tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

pruthr.c (51989B)


      1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
      2 /* This Source Code Form is subject to the terms of the Mozilla Public
      3 * License, v. 2.0. If a copy of the MPL was not distributed with this
      4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      5 
      6 #include "primpl.h"
      7 #include <signal.h>
      8 #include <string.h>
      9 
     10 #if defined(WIN95)
     11 /*
     12 ** Some local variables report warnings on Win95 because the code paths
     13 ** using them are conditioned on HAVE_CUSTOME_USER_THREADS.
     14 ** The pragma suppresses the warning.
     15 **
     16 */
     17 #  pragma warning(disable : 4101)
     18 #endif
     19 
     20 /* _pr_activeLock protects the following global variables */
     21 PRLock* _pr_activeLock;
     22 PRInt32 _pr_primordialExitCount;   /* In PR_Cleanup(), the primordial thread
     23                                    * waits until all other user (non-system)
     24                                    * threads have terminated before it exits.
     25                                    * So whenever we decrement _pr_userActive,
     26                                    * it is compared with
     27                                    * _pr_primordialExitCount.
     28                                    * If the primordial thread is a system
     29                                    * thread, then _pr_primordialExitCount
     30                                    * is 0.  If the primordial thread is
     31                                    * itself a user thread, then
     32                                    * _pr_primordialThread is 1.
     33                                    */
     34 PRCondVar* _pr_primordialExitCVar; /* When _pr_userActive is decremented to
     35                                    * _pr_primordialExitCount, this condition
     36                                    * variable is notified.
     37                                    */
     38 
     39 PRLock* _pr_deadQLock;
     40 PRUint32 _pr_numNativeDead;
     41 PRUint32 _pr_numUserDead;
     42 PRCList _pr_deadNativeQ;
     43 PRCList _pr_deadUserQ;
     44 
     45 PRUint32 _pr_join_counter;
     46 
     47 PRUint32 _pr_local_threads;
     48 PRUint32 _pr_global_threads;
     49 
     50 PRBool suspendAllOn = PR_FALSE;
     51 PRThread* suspendAllThread = NULL;
     52 
     53 extern PRCList _pr_active_global_threadQ;
     54 extern PRCList _pr_active_local_threadQ;
     55 
     56 static void _PR_DecrActiveThreadCount(PRThread* thread);
     57 static PRThread* _PR_AttachThread(PRThreadType, PRThreadPriority,
     58                                  PRThreadStack*);
     59 static void _PR_InitializeNativeStack(PRThreadStack* ts);
     60 static void _PR_InitializeRecycledThread(PRThread* thread);
     61 static void _PR_UserRunThread(void);
     62 
     63 void _PR_InitThreads(PRThreadType type, PRThreadPriority priority,
     64                     PRUintn maxPTDs) {
     65  PRThread* thread;
     66  PRThreadStack* stack;
     67 
     68  PR_ASSERT(priority == PR_PRIORITY_NORMAL);
     69 
     70  _pr_terminationCVLock = PR_NewLock();
     71  _pr_activeLock = PR_NewLock();
     72 
     73 #ifndef HAVE_CUSTOM_USER_THREADS
     74  stack = PR_NEWZAP(PRThreadStack);
     75 #  ifdef HAVE_STACK_GROWING_UP
     76  stack->stackTop =
     77      (char*)((((PRWord)&type) >> _pr_pageShift) << _pr_pageShift);
     78 #  else
     79 #    if defined(SOLARIS)
     80  stack->stackTop = (char*)&thread;
     81 #    else
     82  stack->stackTop =
     83      (char*)((((PRWord)&type + _pr_pageSize - 1) >> _pr_pageShift)
     84              << _pr_pageShift);
     85 #    endif
     86 #  endif
     87 #else
     88  /* If stack is NULL, we're using custom user threads like NT fibers. */
     89  stack = PR_NEWZAP(PRThreadStack);
     90  if (stack) {
     91    stack->stackSize = 0;
     92    _PR_InitializeNativeStack(stack);
     93  }
     94 #endif /* HAVE_CUSTOM_USER_THREADS */
     95 
     96  thread = _PR_AttachThread(type, priority, stack);
     97  if (thread) {
     98    _PR_MD_SET_CURRENT_THREAD(thread);
     99 
    100    if (type == PR_SYSTEM_THREAD) {
    101      thread->flags = _PR_SYSTEM;
    102      _pr_systemActive++;
    103      _pr_primordialExitCount = 0;
    104    } else {
    105      _pr_userActive++;
    106      _pr_primordialExitCount = 1;
    107    }
    108    thread->no_sched = 1;
    109    _pr_primordialExitCVar = PR_NewCondVar(_pr_activeLock);
    110  }
    111 
    112  if (!thread) {
    113    PR_Abort();
    114  }
    115 #ifdef _PR_LOCAL_THREADS_ONLY
    116  thread->flags |= _PR_PRIMORDIAL;
    117 #else
    118  thread->flags |= _PR_PRIMORDIAL | _PR_GLOBAL_SCOPE;
    119 #endif
    120 
    121  /*
    122   * Needs _PR_PRIMORDIAL flag set before calling
    123   * _PR_MD_INIT_THREAD()
    124   */
    125  if (_PR_MD_INIT_THREAD(thread) == PR_FAILURE) {
    126    /*
    127     * XXX do what?
    128     */
    129  }
    130 
    131  if (_PR_IS_NATIVE_THREAD(thread)) {
    132    PR_APPEND_LINK(&thread->active, &_PR_ACTIVE_GLOBAL_THREADQ());
    133    _pr_global_threads++;
    134  } else {
    135    PR_APPEND_LINK(&thread->active, &_PR_ACTIVE_LOCAL_THREADQ());
    136    _pr_local_threads++;
    137  }
    138 
    139  _pr_recycleThreads = 0;
    140  _pr_deadQLock = PR_NewLock();
    141  _pr_numNativeDead = 0;
    142  _pr_numUserDead = 0;
    143  PR_INIT_CLIST(&_pr_deadNativeQ);
    144  PR_INIT_CLIST(&_pr_deadUserQ);
    145 }
    146 
    147 void _PR_CleanupThreads(void) {
    148  if (_pr_terminationCVLock) {
    149    PR_DestroyLock(_pr_terminationCVLock);
    150    _pr_terminationCVLock = NULL;
    151  }
    152  if (_pr_activeLock) {
    153    PR_DestroyLock(_pr_activeLock);
    154    _pr_activeLock = NULL;
    155  }
    156  if (_pr_primordialExitCVar) {
    157    PR_DestroyCondVar(_pr_primordialExitCVar);
    158    _pr_primordialExitCVar = NULL;
    159  }
    160  /* TODO _pr_dead{Native,User}Q need to be deleted */
    161  if (_pr_deadQLock) {
    162    PR_DestroyLock(_pr_deadQLock);
    163    _pr_deadQLock = NULL;
    164  }
    165 }
    166 
    167 /*
    168 ** Initialize a stack for a native thread
    169 */
    170 static void _PR_InitializeNativeStack(PRThreadStack* ts) {
    171  if (ts && (ts->stackTop == 0)) {
    172    ts->allocSize = ts->stackSize;
    173 
    174    /*
    175    ** Setup stackTop and stackBottom values.
    176    */
    177 #ifdef HAVE_STACK_GROWING_UP
    178    ts->allocBase = (char*)((((PRWord)&ts) >> _pr_pageShift) << _pr_pageShift);
    179    ts->stackBottom = ts->allocBase + ts->stackSize;
    180    ts->stackTop = ts->allocBase;
    181 #else
    182    ts->allocBase = (char*)((((PRWord)&ts + _pr_pageSize - 1) >> _pr_pageShift)
    183                            << _pr_pageShift);
    184    ts->stackTop = ts->allocBase;
    185    ts->stackBottom = ts->allocBase - ts->stackSize;
    186 #endif
    187  }
    188 }
    189 
    190 void _PR_NotifyJoinWaiters(PRThread* thread) {
    191  /*
    192  ** Handle joinable threads.  Change the state to waiting for join.
    193  ** Remove from our run Q and put it on global waiting to join Q.
    194  ** Notify on our "termination" condition variable so that joining
    195  ** thread will know about our termination.  Switch our context and
    196  ** come back later on to continue the cleanup.
    197  */
    198  PR_ASSERT(thread == _PR_MD_CURRENT_THREAD());
    199  if (thread->term != NULL) {
    200    PR_Lock(_pr_terminationCVLock);
    201    _PR_THREAD_LOCK(thread);
    202    thread->state = _PR_JOIN_WAIT;
    203    if (!_PR_IS_NATIVE_THREAD(thread)) {
    204      _PR_MISCQ_LOCK(thread->cpu);
    205      _PR_ADD_JOINQ(thread, thread->cpu);
    206      _PR_MISCQ_UNLOCK(thread->cpu);
    207    }
    208    _PR_THREAD_UNLOCK(thread);
    209    PR_NotifyCondVar(thread->term);
    210    PR_Unlock(_pr_terminationCVLock);
    211    _PR_MD_WAIT(thread, PR_INTERVAL_NO_TIMEOUT);
    212    PR_ASSERT(thread->state != _PR_JOIN_WAIT);
    213  }
    214 }
    215 
    216 /*
    217 * Zero some of the data members of a recycled thread.
    218 *
    219 * Note that we can do this either when a dead thread is added to
    220 * the dead thread queue or when it is reused.  Here, we are doing
    221 * this lazily, when the thread is reused in _PR_CreateThread().
    222 */
    223 static void _PR_InitializeRecycledThread(PRThread* thread) {
    224  /*
    225   * Assert that the following data members are already zeroed
    226   * by _PR_CleanupThread().
    227   */
    228 #ifdef DEBUG
    229  if (thread->privateData) {
    230    unsigned int i;
    231    for (i = 0; i < thread->tpdLength; i++) {
    232      PR_ASSERT(thread->privateData[i] == NULL);
    233    }
    234  }
    235 #endif
    236  PR_ASSERT(thread->dumpArg == 0 && thread->dump == 0);
    237  PR_ASSERT(thread->errorString == 0 && thread->errorStringSize == 0);
    238  PR_ASSERT(thread->errorStringLength == 0);
    239  PR_ASSERT(thread->name == 0);
    240 
    241  /* Reset data members in thread structure */
    242  thread->errorCode = thread->osErrorCode = 0;
    243  thread->io_pending = thread->io_suspended = PR_FALSE;
    244  thread->environment = 0;
    245  PR_INIT_CLIST(&thread->lockList);
    246 }
    247 
    248 PRStatus _PR_RecycleThread(PRThread* thread) {
    249  if (_PR_IS_NATIVE_THREAD(thread) && _PR_NUM_DEADNATIVE < _pr_recycleThreads) {
    250    _PR_DEADQ_LOCK;
    251    PR_APPEND_LINK(&thread->links, &_PR_DEADNATIVEQ);
    252    _PR_INC_DEADNATIVE;
    253    _PR_DEADQ_UNLOCK;
    254    return (PR_SUCCESS);
    255  } else if (!_PR_IS_NATIVE_THREAD(thread) &&
    256             _PR_NUM_DEADUSER < _pr_recycleThreads) {
    257    _PR_DEADQ_LOCK;
    258    PR_APPEND_LINK(&thread->links, &_PR_DEADUSERQ);
    259    _PR_INC_DEADUSER;
    260    _PR_DEADQ_UNLOCK;
    261    return (PR_SUCCESS);
    262  }
    263  return (PR_FAILURE);
    264 }
    265 
    266 /*
    267 * Decrement the active thread count, either _pr_systemActive or
    268 * _pr_userActive, depending on whether the thread is a system thread
    269 * or a user thread.  If all the user threads, except possibly
    270 * the primordial thread, have terminated, we notify the primordial
    271 * thread of this condition.
    272 *
    273 * Since this function will lock _pr_activeLock, do not call this
    274 * function while holding the _pr_activeLock lock, as this will result
    275 * in a deadlock.
    276 */
    277 
    278 static void _PR_DecrActiveThreadCount(PRThread* thread) {
    279  PR_Lock(_pr_activeLock);
    280  if (thread->flags & _PR_SYSTEM) {
    281    _pr_systemActive--;
    282  } else {
    283    _pr_userActive--;
    284    if (_pr_userActive == _pr_primordialExitCount) {
    285      PR_NotifyCondVar(_pr_primordialExitCVar);
    286    }
    287  }
    288  PR_Unlock(_pr_activeLock);
    289 }
    290 
    291 /*
    292 ** Detach thread structure
    293 */
    294 static void _PR_DestroyThread(PRThread* thread) {
    295  _PR_MD_FREE_LOCK(&thread->threadLock);
    296  PR_DELETE(thread);
    297 }
    298 
    299 void _PR_NativeDestroyThread(PRThread* thread) {
    300  if (thread->term) {
    301    PR_DestroyCondVar(thread->term);
    302    thread->term = 0;
    303  }
    304  if (NULL != thread->privateData) {
    305    PR_ASSERT(0 != thread->tpdLength);
    306    PR_DELETE(thread->privateData);
    307    thread->tpdLength = 0;
    308  }
    309  PR_DELETE(thread->stack);
    310  _PR_DestroyThread(thread);
    311 }
    312 
    313 void _PR_UserDestroyThread(PRThread* thread) {
    314  if (thread->term) {
    315    PR_DestroyCondVar(thread->term);
    316    thread->term = 0;
    317  }
    318  if (NULL != thread->privateData) {
    319    PR_ASSERT(0 != thread->tpdLength);
    320    PR_DELETE(thread->privateData);
    321    thread->tpdLength = 0;
    322  }
    323  _PR_MD_FREE_LOCK(&thread->threadLock);
    324  if (thread->threadAllocatedOnStack == 1) {
    325    _PR_MD_CLEAN_THREAD(thread);
    326    /*
    327     *  Because the no_sched field is set, this thread/stack will
    328     *  will not be re-used until the flag is cleared by the thread
    329     *  we will context switch to.
    330     */
    331    _PR_FreeStack(thread->stack);
    332  } else {
    333 #ifdef WINNT
    334    _PR_MD_CLEAN_THREAD(thread);
    335 #else
    336    /*
    337     * This assertion does not apply to NT.  On NT, every fiber
    338     * has its threadAllocatedOnStack equal to 0.  Elsewhere,
    339     * only the primordial thread has its threadAllocatedOnStack
    340     * equal to 0.
    341     */
    342    PR_ASSERT(thread->flags & _PR_PRIMORDIAL);
    343 #endif
    344  }
    345 }
    346 
    347 /*
    348 ** Run a thread's start function. When the start function returns the
    349 ** thread is done executing and no longer needs the CPU. If there are no
    350 ** more user threads running then we can exit the program.
    351 */
    352 void _PR_NativeRunThread(void* arg) {
    353  PRThread* thread = (PRThread*)arg;
    354 
    355  _PR_MD_SET_CURRENT_THREAD(thread);
    356 
    357  _PR_MD_SET_CURRENT_CPU(NULL);
    358 
    359  /* Set up the thread stack information */
    360  _PR_InitializeNativeStack(thread->stack);
    361 
    362  /* Set up the thread md information */
    363  if (_PR_MD_INIT_THREAD(thread) == PR_FAILURE) {
    364    /*
    365     * thread failed to initialize itself, possibly due to
    366     * failure to allocate per-thread resources
    367     */
    368    return;
    369  }
    370 
    371  while (1) {
    372    thread->state = _PR_RUNNING;
    373 
    374    /*
    375     * Add to list of active threads
    376     */
    377    PR_Lock(_pr_activeLock);
    378    PR_APPEND_LINK(&thread->active, &_PR_ACTIVE_GLOBAL_THREADQ());
    379    _pr_global_threads++;
    380    PR_Unlock(_pr_activeLock);
    381 
    382    (*thread->startFunc)(thread->arg);
    383 
    384    /*
    385     * The following two assertions are meant for NT asynch io.
    386     *
    387     * The thread should have no asynch io in progress when it
    388     * exits, otherwise the overlapped buffer, which is part of
    389     * the thread structure, would become invalid.
    390     */
    391    PR_ASSERT(thread->io_pending == PR_FALSE);
    392    /*
    393     * This assertion enforces the programming guideline that
    394     * if an io function times out or is interrupted, the thread
    395     * should close the fd to force the asynch io to abort
    396     * before it exits.  Right now, closing the fd is the only
    397     * way to clear the io_suspended flag.
    398     */
    399    PR_ASSERT(thread->io_suspended == PR_FALSE);
    400 
    401    /*
    402     * remove thread from list of active threads
    403     */
    404    PR_Lock(_pr_activeLock);
    405    PR_REMOVE_LINK(&thread->active);
    406    _pr_global_threads--;
    407    PR_Unlock(_pr_activeLock);
    408 
    409    PR_LOG(_pr_thread_lm, PR_LOG_MIN, ("thread exiting"));
    410 
    411    /* All done, time to go away */
    412    _PR_CleanupThread(thread);
    413 
    414    _PR_NotifyJoinWaiters(thread);
    415 
    416    _PR_DecrActiveThreadCount(thread);
    417 
    418    thread->state = _PR_DEAD_STATE;
    419 
    420    if (!_pr_recycleThreads || (_PR_RecycleThread(thread) == PR_FAILURE)) {
    421      /*
    422       * thread not recycled
    423       * platform-specific thread exit processing
    424       *        - for stuff like releasing native-thread resources, etc.
    425       */
    426      _PR_MD_EXIT_THREAD(thread);
    427      /*
    428       * Free memory allocated for the thread
    429       */
    430      _PR_NativeDestroyThread(thread);
    431      /*
    432       * thread gone, cannot de-reference thread now
    433       */
    434      return;
    435    }
    436 
    437    /* Now wait for someone to activate us again... */
    438    _PR_MD_WAIT(thread, PR_INTERVAL_NO_TIMEOUT);
    439  }
    440 }
    441 
    442 static void _PR_UserRunThread(void) {
    443  PRThread* thread = _PR_MD_CURRENT_THREAD();
    444  PRIntn is;
    445 
    446  if (_MD_LAST_THREAD()) {
    447    _MD_LAST_THREAD()->no_sched = 0;
    448  }
    449 
    450 #ifdef HAVE_CUSTOM_USER_THREADS
    451  if (thread->stack == NULL) {
    452    thread->stack = PR_NEWZAP(PRThreadStack);
    453    _PR_InitializeNativeStack(thread->stack);
    454  }
    455 #endif /* HAVE_CUSTOM_USER_THREADS */
    456 
    457  while (1) {
    458    /* Run thread main */
    459    if (!_PR_IS_NATIVE_THREAD(thread)) {
    460      _PR_MD_SET_INTSOFF(0);
    461    }
    462 
    463    /*
    464     * Add to list of active threads
    465     */
    466    if (!(thread->flags & _PR_IDLE_THREAD)) {
    467      PR_Lock(_pr_activeLock);
    468      PR_APPEND_LINK(&thread->active, &_PR_ACTIVE_LOCAL_THREADQ());
    469      _pr_local_threads++;
    470      PR_Unlock(_pr_activeLock);
    471    }
    472 
    473    (*thread->startFunc)(thread->arg);
    474 
    475    /*
    476     * The following two assertions are meant for NT asynch io.
    477     *
    478     * The thread should have no asynch io in progress when it
    479     * exits, otherwise the overlapped buffer, which is part of
    480     * the thread structure, would become invalid.
    481     */
    482    PR_ASSERT(thread->io_pending == PR_FALSE);
    483    /*
    484     * This assertion enforces the programming guideline that
    485     * if an io function times out or is interrupted, the thread
    486     * should close the fd to force the asynch io to abort
    487     * before it exits.  Right now, closing the fd is the only
    488     * way to clear the io_suspended flag.
    489     */
    490    PR_ASSERT(thread->io_suspended == PR_FALSE);
    491 
    492    PR_Lock(_pr_activeLock);
    493    /*
    494     * remove thread from list of active threads
    495     */
    496    if (!(thread->flags & _PR_IDLE_THREAD)) {
    497      PR_REMOVE_LINK(&thread->active);
    498      _pr_local_threads--;
    499    }
    500    PR_Unlock(_pr_activeLock);
    501    PR_LOG(_pr_thread_lm, PR_LOG_MIN, ("thread exiting"));
    502 
    503    /* All done, time to go away */
    504    _PR_CleanupThread(thread);
    505 
    506    _PR_INTSOFF(is);
    507 
    508    _PR_NotifyJoinWaiters(thread);
    509 
    510    _PR_DecrActiveThreadCount(thread);
    511 
    512    thread->state = _PR_DEAD_STATE;
    513 
    514    if (!_pr_recycleThreads || (_PR_RecycleThread(thread) == PR_FAILURE)) {
    515      /*
    516      ** Destroy the thread resources
    517      */
    518      _PR_UserDestroyThread(thread);
    519    }
    520 
    521    /*
    522    ** Find another user thread to run. This cpu has finished the
    523    ** previous threads main and is now ready to run another thread.
    524    */
    525    {
    526      PRInt32 is;
    527      _PR_INTSOFF(is);
    528      _PR_MD_SWITCH_CONTEXT(thread);
    529    }
    530 
    531    /* Will land here when we get scheduled again if we are recycling... */
    532  }
    533 }
    534 
    535 void _PR_SetThreadPriority(PRThread* thread, PRThreadPriority newPri) {
    536  PRThread* me = _PR_MD_CURRENT_THREAD();
    537  PRIntn is;
    538 
    539  if (_PR_IS_NATIVE_THREAD(thread)) {
    540    _PR_MD_SET_PRIORITY(&(thread->md), newPri);
    541    return;
    542  }
    543 
    544  if (!_PR_IS_NATIVE_THREAD(me)) {
    545    _PR_INTSOFF(is);
    546  }
    547  _PR_THREAD_LOCK(thread);
    548  if (newPri != thread->priority) {
    549    _PRCPU* cpu = thread->cpu;
    550 
    551    switch (thread->state) {
    552      case _PR_RUNNING:
    553        /* Change my priority */
    554 
    555        _PR_RUNQ_LOCK(cpu);
    556        thread->priority = newPri;
    557        if (_PR_RUNQREADYMASK(cpu) >> (newPri + 1)) {
    558          if (!_PR_IS_NATIVE_THREAD(me)) {
    559            _PR_SET_RESCHED_FLAG();
    560          }
    561        }
    562        _PR_RUNQ_UNLOCK(cpu);
    563        break;
    564 
    565      case _PR_RUNNABLE:
    566 
    567        _PR_RUNQ_LOCK(cpu);
    568        /* Move to different runQ */
    569        _PR_DEL_RUNQ(thread);
    570        thread->priority = newPri;
    571        PR_ASSERT(!(thread->flags & _PR_IDLE_THREAD));
    572        _PR_ADD_RUNQ(thread, cpu, newPri);
    573        _PR_RUNQ_UNLOCK(cpu);
    574 
    575        if (newPri > me->priority) {
    576          if (!_PR_IS_NATIVE_THREAD(me)) {
    577            _PR_SET_RESCHED_FLAG();
    578          }
    579        }
    580 
    581        break;
    582 
    583      case _PR_LOCK_WAIT:
    584      case _PR_COND_WAIT:
    585      case _PR_IO_WAIT:
    586      case _PR_SUSPENDED:
    587 
    588        thread->priority = newPri;
    589        break;
    590    }
    591  }
    592  _PR_THREAD_UNLOCK(thread);
    593  if (!_PR_IS_NATIVE_THREAD(me)) {
    594    _PR_INTSON(is);
    595  }
    596 }
    597 
    598 /*
    599 ** Suspend the named thread and copy its gc registers into regBuf
    600 */
    601 static void _PR_Suspend(PRThread* thread) {
    602  PRIntn is;
    603  PRThread* me = _PR_MD_CURRENT_THREAD();
    604 
    605  PR_ASSERT(thread != me);
    606  PR_ASSERT(!_PR_IS_NATIVE_THREAD(thread) || (!thread->cpu));
    607 
    608  if (!_PR_IS_NATIVE_THREAD(me)) {
    609    _PR_INTSOFF(is);
    610  }
    611  _PR_THREAD_LOCK(thread);
    612  switch (thread->state) {
    613    case _PR_RUNNABLE:
    614      if (!_PR_IS_NATIVE_THREAD(thread)) {
    615        _PR_RUNQ_LOCK(thread->cpu);
    616        _PR_DEL_RUNQ(thread);
    617        _PR_RUNQ_UNLOCK(thread->cpu);
    618 
    619        _PR_MISCQ_LOCK(thread->cpu);
    620        _PR_ADD_SUSPENDQ(thread, thread->cpu);
    621        _PR_MISCQ_UNLOCK(thread->cpu);
    622      } else {
    623        /*
    624         * Only LOCAL threads are suspended by _PR_Suspend
    625         */
    626        PR_ASSERT(0);
    627      }
    628      thread->state = _PR_SUSPENDED;
    629      break;
    630 
    631    case _PR_RUNNING:
    632      /*
    633       * The thread being suspended should be a LOCAL thread with
    634       * _pr_numCPUs == 1. Hence, the thread cannot be in RUNNING state
    635       */
    636      PR_ASSERT(0);
    637      break;
    638 
    639    case _PR_LOCK_WAIT:
    640    case _PR_IO_WAIT:
    641    case _PR_COND_WAIT:
    642      if (_PR_IS_NATIVE_THREAD(thread)) {
    643        _PR_MD_SUSPEND_THREAD(thread);
    644      }
    645      thread->flags |= _PR_SUSPENDING;
    646      break;
    647 
    648    default:
    649      PR_Abort();
    650  }
    651  _PR_THREAD_UNLOCK(thread);
    652  if (!_PR_IS_NATIVE_THREAD(me)) {
    653    _PR_INTSON(is);
    654  }
    655 }
    656 
    657 static void _PR_Resume(PRThread* thread) {
    658  PRThreadPriority pri;
    659  PRIntn is;
    660  PRThread* me = _PR_MD_CURRENT_THREAD();
    661 
    662  if (!_PR_IS_NATIVE_THREAD(me)) {
    663    _PR_INTSOFF(is);
    664  }
    665  _PR_THREAD_LOCK(thread);
    666  switch (thread->state) {
    667    case _PR_SUSPENDED:
    668      thread->state = _PR_RUNNABLE;
    669      thread->flags &= ~_PR_SUSPENDING;
    670      if (!_PR_IS_NATIVE_THREAD(thread)) {
    671        _PR_MISCQ_LOCK(thread->cpu);
    672        _PR_DEL_SUSPENDQ(thread);
    673        _PR_MISCQ_UNLOCK(thread->cpu);
    674 
    675        pri = thread->priority;
    676 
    677        _PR_RUNQ_LOCK(thread->cpu);
    678        _PR_ADD_RUNQ(thread, thread->cpu, pri);
    679        _PR_RUNQ_UNLOCK(thread->cpu);
    680 
    681        if (pri > _PR_MD_CURRENT_THREAD()->priority) {
    682          if (!_PR_IS_NATIVE_THREAD(me)) {
    683            _PR_SET_RESCHED_FLAG();
    684          }
    685        }
    686      } else {
    687        PR_ASSERT(0);
    688      }
    689      break;
    690 
    691    case _PR_IO_WAIT:
    692    case _PR_COND_WAIT:
    693      thread->flags &= ~_PR_SUSPENDING;
    694      /*      PR_ASSERT(thread->wait.monitor->stickyCount == 0); */
    695      break;
    696 
    697    case _PR_LOCK_WAIT: {
    698      PRLock* wLock = thread->wait.lock;
    699 
    700      thread->flags &= ~_PR_SUSPENDING;
    701 
    702      _PR_LOCK_LOCK(wLock);
    703      if (thread->wait.lock->owner == 0) {
    704        _PR_UnblockLockWaiter(thread->wait.lock);
    705      }
    706      _PR_LOCK_UNLOCK(wLock);
    707      break;
    708    }
    709    case _PR_RUNNABLE:
    710      break;
    711    case _PR_RUNNING:
    712      /*
    713       * The thread being suspended should be a LOCAL thread with
    714       * _pr_numCPUs == 1. Hence, the thread cannot be in RUNNING state
    715       */
    716      PR_ASSERT(0);
    717      break;
    718 
    719    default:
    720      /*
    721       * thread should have been in one of the above-listed blocked states
    722       * (_PR_JOIN_WAIT, _PR_IO_WAIT, _PR_UNBORN, _PR_DEAD_STATE)
    723       */
    724      PR_Abort();
    725  }
    726  _PR_THREAD_UNLOCK(thread);
    727  if (!_PR_IS_NATIVE_THREAD(me)) {
    728    _PR_INTSON(is);
    729  }
    730 }
    731 
    732 #if !defined(_PR_LOCAL_THREADS_ONLY) && defined(XP_UNIX)
    733 static PRThread* get_thread(_PRCPU* cpu, PRBool* wakeup_cpus) {
    734  PRThread* thread;
    735  PRIntn pri;
    736  PRUint32 r;
    737  PRCList* qp;
    738  PRIntn priMin, priMax;
    739 
    740  _PR_RUNQ_LOCK(cpu);
    741  r = _PR_RUNQREADYMASK(cpu);
    742  if (r == 0) {
    743    priMin = priMax = PR_PRIORITY_FIRST;
    744  } else if (r == (1 << PR_PRIORITY_NORMAL)) {
    745    priMin = priMax = PR_PRIORITY_NORMAL;
    746  } else {
    747    priMin = PR_PRIORITY_FIRST;
    748    priMax = PR_PRIORITY_LAST;
    749  }
    750  thread = NULL;
    751  for (pri = priMax; pri >= priMin; pri--) {
    752    if (r & (1 << pri)) {
    753      for (qp = _PR_RUNQ(cpu)[pri].next; qp != &_PR_RUNQ(cpu)[pri];
    754           qp = qp->next) {
    755        thread = _PR_THREAD_PTR(qp);
    756        /*
    757         * skip non-schedulable threads
    758         */
    759        PR_ASSERT(!(thread->flags & _PR_IDLE_THREAD));
    760        if (thread->no_sched) {
    761          thread = NULL;
    762          /*
    763           * Need to wakeup cpus to avoid missing a
    764           * runnable thread
    765           * Waking up all CPU's need happen only once.
    766           */
    767 
    768          *wakeup_cpus = PR_TRUE;
    769          continue;
    770        } else if (thread->flags & _PR_BOUND_THREAD) {
    771          /*
    772           * Thread bound to cpu 0
    773           */
    774 
    775          thread = NULL;
    776          continue;
    777        } else if (thread->io_pending == PR_TRUE) {
    778          /*
    779           * A thread that is blocked for I/O needs to run
    780           * on the same cpu on which it was blocked. This is because
    781           * the cpu's ioq is accessed without lock protection and scheduling
    782           * the thread on a different cpu would preclude this optimization.
    783           */
    784          thread = NULL;
    785          continue;
    786        } else {
    787          /* Pull thread off of its run queue */
    788          _PR_DEL_RUNQ(thread);
    789          _PR_RUNQ_UNLOCK(cpu);
    790          return (thread);
    791        }
    792      }
    793    }
    794    thread = NULL;
    795  }
    796  _PR_RUNQ_UNLOCK(cpu);
    797  return (thread);
    798 }
    799 #endif /* !defined(_PR_LOCAL_THREADS_ONLY) && defined(XP_UNIX) */
    800 
    801 /*
    802 ** Schedule this native thread by finding the highest priority nspr
    803 ** thread that is ready to run.
    804 **
    805 ** Note- everyone really needs to call _PR_MD_SWITCH_CONTEXT (which calls
    806 **       PR_Schedule() rather than calling PR_Schedule.  Otherwise if there
    807 **       is initialization required for switching from SWITCH_CONTEXT,
    808 **       it will not get done!
    809 */
    810 void _PR_Schedule(void) {
    811  PRThread *thread, *me = _PR_MD_CURRENT_THREAD();
    812  _PRCPU* cpu = _PR_MD_CURRENT_CPU();
    813  PRIntn pri;
    814  PRUint32 r;
    815  PRCList* qp;
    816  PRIntn priMin, priMax;
    817 #if !defined(_PR_LOCAL_THREADS_ONLY) && defined(XP_UNIX)
    818  PRBool wakeup_cpus;
    819 #endif
    820 
    821  /* Interrupts must be disabled */
    822  PR_ASSERT(_PR_IS_NATIVE_THREAD(me) || _PR_MD_GET_INTSOFF() != 0);
    823 
    824  /* Since we are rescheduling, we no longer want to */
    825  _PR_CLEAR_RESCHED_FLAG();
    826 
    827  /*
    828  ** Find highest priority thread to run. Bigger priority numbers are
    829  ** higher priority threads
    830  */
    831  _PR_RUNQ_LOCK(cpu);
    832  /*
    833   *  if we are in SuspendAll mode, can schedule only the thread
    834   *    that called PR_SuspendAll
    835   *
    836   *  The thread may be ready to run now, after completing an I/O
    837   *  operation, for example
    838   */
    839  if ((thread = suspendAllThread) != 0) {
    840    if ((!(thread->no_sched)) && (thread->state == _PR_RUNNABLE)) {
    841      /* Pull thread off of its run queue */
    842      _PR_DEL_RUNQ(thread);
    843      _PR_RUNQ_UNLOCK(cpu);
    844      goto found_thread;
    845    } else {
    846      thread = NULL;
    847      _PR_RUNQ_UNLOCK(cpu);
    848      goto idle_thread;
    849    }
    850  }
    851  r = _PR_RUNQREADYMASK(cpu);
    852  if (r == 0) {
    853    priMin = priMax = PR_PRIORITY_FIRST;
    854  } else if (r == (1 << PR_PRIORITY_NORMAL)) {
    855    priMin = priMax = PR_PRIORITY_NORMAL;
    856  } else {
    857    priMin = PR_PRIORITY_FIRST;
    858    priMax = PR_PRIORITY_LAST;
    859  }
    860  thread = NULL;
    861  for (pri = priMax; pri >= priMin; pri--) {
    862    if (r & (1 << pri)) {
    863      for (qp = _PR_RUNQ(cpu)[pri].next; qp != &_PR_RUNQ(cpu)[pri];
    864           qp = qp->next) {
    865        thread = _PR_THREAD_PTR(qp);
    866        /*
    867         * skip non-schedulable threads
    868         */
    869        PR_ASSERT(!(thread->flags & _PR_IDLE_THREAD));
    870        if ((thread->no_sched) && (me != thread)) {
    871          thread = NULL;
    872          continue;
    873        } else {
    874          /* Pull thread off of its run queue */
    875          _PR_DEL_RUNQ(thread);
    876          _PR_RUNQ_UNLOCK(cpu);
    877          goto found_thread;
    878        }
    879      }
    880    }
    881    thread = NULL;
    882  }
    883  _PR_RUNQ_UNLOCK(cpu);
    884 
    885 #if !defined(_PR_LOCAL_THREADS_ONLY) && defined(XP_UNIX)
    886 
    887  wakeup_cpus = PR_FALSE;
    888  _PR_CPU_LIST_LOCK();
    889  for (qp = _PR_CPUQ().next; qp != &_PR_CPUQ(); qp = qp->next) {
    890    if (cpu != _PR_CPU_PTR(qp)) {
    891      if ((thread = get_thread(_PR_CPU_PTR(qp), &wakeup_cpus)) != NULL) {
    892        thread->cpu = cpu;
    893        _PR_CPU_LIST_UNLOCK();
    894        if (wakeup_cpus == PR_TRUE) {
    895          _PR_MD_WAKEUP_CPUS();
    896        }
    897        goto found_thread;
    898      }
    899    }
    900  }
    901  _PR_CPU_LIST_UNLOCK();
    902  if (wakeup_cpus == PR_TRUE) {
    903    _PR_MD_WAKEUP_CPUS();
    904  }
    905 
    906 #endif /* _PR_LOCAL_THREADS_ONLY */
    907 
    908 idle_thread:
    909  /*
    910   ** There are no threads to run. Switch to the idle thread
    911   */
    912  PR_LOG(_pr_sched_lm, PR_LOG_MAX, ("pausing"));
    913  thread = _PR_MD_CURRENT_CPU()->idle_thread;
    914 
    915 found_thread:
    916  PR_ASSERT((me == thread) ||
    917            ((thread->state == _PR_RUNNABLE) && (!(thread->no_sched))));
    918 
    919  /* Resume the thread */
    920  PR_LOG(_pr_sched_lm, PR_LOG_MAX, ("switching to %d[%p]", thread->id, thread));
    921  PR_ASSERT(thread->state != _PR_RUNNING);
    922  thread->state = _PR_RUNNING;
    923 
    924  /* If we are on the runq, it just means that we went to sleep on some
    925   * resource, and by the time we got here another real native thread had
    926   * already given us the resource and put us back on the runqueue
    927   */
    928  PR_ASSERT(thread->cpu == _PR_MD_CURRENT_CPU());
    929  if (thread != me) {
    930    _PR_MD_RESTORE_CONTEXT(thread);
    931  }
    932 #if 0
    933    /* XXXMB; with setjmp/longjmp it is impossible to land here, but
    934     * it is not with fibers... Is this a bad thing?  I believe it is
    935     * still safe.
    936     */
    937    PR_NOT_REACHED("impossible return from schedule");
    938 #endif
    939 }
    940 
    941 /*
    942 ** Attaches a thread.
    943 ** Does not set the _PR_MD_CURRENT_THREAD.
    944 ** Does not specify the scope of the thread.
    945 */
    946 static PRThread* _PR_AttachThread(PRThreadType type, PRThreadPriority priority,
    947                                  PRThreadStack* stack) {
    948  PRThread* thread;
    949  char* mem;
    950 
    951  if (priority > PR_PRIORITY_LAST) {
    952    priority = PR_PRIORITY_LAST;
    953  } else if (priority < PR_PRIORITY_FIRST) {
    954    priority = PR_PRIORITY_FIRST;
    955  }
    956 
    957  mem = (char*)PR_CALLOC(sizeof(PRThread));
    958  if (mem) {
    959    thread = (PRThread*)mem;
    960    thread->priority = priority;
    961    thread->stack = stack;
    962    thread->state = _PR_RUNNING;
    963    PR_INIT_CLIST(&thread->lockList);
    964    if (_PR_MD_NEW_LOCK(&thread->threadLock) == PR_FAILURE) {
    965      PR_DELETE(thread);
    966      return 0;
    967    }
    968 
    969    return thread;
    970  }
    971  return 0;
    972 }
    973 
    974 PR_IMPLEMENT(PRThread*)
    975 _PR_NativeCreateThread(PRThreadType type, void (*start)(void* arg), void* arg,
    976                       PRThreadPriority priority, PRThreadScope scope,
    977                       PRThreadState state, PRUint32 stackSize,
    978                       PRUint32 flags) {
    979  PRThread* thread;
    980 
    981  thread = _PR_AttachThread(type, priority, NULL);
    982 
    983  if (thread) {
    984    PR_Lock(_pr_activeLock);
    985    thread->flags = (flags | _PR_GLOBAL_SCOPE);
    986    thread->id = ++_pr_utid;
    987    if (type == PR_SYSTEM_THREAD) {
    988      thread->flags |= _PR_SYSTEM;
    989      _pr_systemActive++;
    990    } else {
    991      _pr_userActive++;
    992    }
    993    PR_Unlock(_pr_activeLock);
    994 
    995    thread->stack = PR_NEWZAP(PRThreadStack);
    996    if (!thread->stack) {
    997      PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
    998      goto done;
    999    }
   1000    thread->stack->stackSize = stackSize ? stackSize : _MD_DEFAULT_STACK_SIZE;
   1001    thread->stack->thr = thread;
   1002    thread->startFunc = start;
   1003    thread->arg = arg;
   1004 
   1005    /*
   1006      Set thread flags related to scope and joinable state. If joinable
   1007      thread, allocate a "termination" conidition variable.
   1008     */
   1009    if (state == PR_JOINABLE_THREAD) {
   1010      thread->term = PR_NewCondVar(_pr_terminationCVLock);
   1011      if (thread->term == NULL) {
   1012        PR_DELETE(thread->stack);
   1013        goto done;
   1014      }
   1015    }
   1016 
   1017    thread->state = _PR_RUNNING;
   1018    if (_PR_MD_CREATE_THREAD(thread, _PR_NativeRunThread, priority, scope,
   1019                             state, stackSize) == PR_SUCCESS) {
   1020      return thread;
   1021    }
   1022    if (thread->term) {
   1023      PR_DestroyCondVar(thread->term);
   1024      thread->term = NULL;
   1025    }
   1026    PR_DELETE(thread->stack);
   1027  }
   1028 
   1029 done:
   1030  if (thread) {
   1031    _PR_DecrActiveThreadCount(thread);
   1032    _PR_DestroyThread(thread);
   1033  }
   1034  return NULL;
   1035 }
   1036 
   1037 /************************************************************************/
   1038 
   1039 PR_IMPLEMENT(PRThread*)
   1040 _PR_CreateThread(PRThreadType type, void (*start)(void* arg), void* arg,
   1041                 PRThreadPriority priority, PRThreadScope scope,
   1042                 PRThreadState state, PRUint32 stackSize, PRUint32 flags) {
   1043  PRThread* me;
   1044  PRThread* thread = NULL;
   1045  PRThreadStack* stack;
   1046  char* top;
   1047  PRIntn is;
   1048  PRIntn native = 0;
   1049  PRIntn useRecycled = 0;
   1050  PRBool status;
   1051 
   1052  /*
   1053  First, pin down the priority.  Not all compilers catch passing out of
   1054  range enum here.  If we let bad values thru, priority queues won't work.
   1055  */
   1056  if (priority > PR_PRIORITY_LAST) {
   1057    priority = PR_PRIORITY_LAST;
   1058  } else if (priority < PR_PRIORITY_FIRST) {
   1059    priority = PR_PRIORITY_FIRST;
   1060  }
   1061 
   1062  if (!_pr_initialized) {
   1063    _PR_ImplicitInitialization();
   1064  }
   1065 
   1066  if (!(flags & _PR_IDLE_THREAD)) {
   1067    me = _PR_MD_CURRENT_THREAD();
   1068  }
   1069 
   1070 #if defined(_PR_GLOBAL_THREADS_ONLY)
   1071  /*
   1072   * can create global threads only
   1073   */
   1074  if (scope == PR_LOCAL_THREAD) {
   1075    scope = PR_GLOBAL_THREAD;
   1076  }
   1077 #endif
   1078 
   1079  if (_native_threads_only) {
   1080    scope = PR_GLOBAL_THREAD;
   1081  }
   1082 
   1083  native =
   1084      (((scope == PR_GLOBAL_THREAD) || (scope == PR_GLOBAL_BOUND_THREAD)) &&
   1085       _PR_IS_NATIVE_THREAD_SUPPORTED());
   1086 
   1087  _PR_ADJUST_STACKSIZE(stackSize);
   1088 
   1089  if (native) {
   1090    /*
   1091     * clear the IDLE_THREAD flag which applies to LOCAL
   1092     * threads only
   1093     */
   1094    flags &= ~_PR_IDLE_THREAD;
   1095    flags |= _PR_GLOBAL_SCOPE;
   1096    if (_PR_NUM_DEADNATIVE > 0) {
   1097      _PR_DEADQ_LOCK;
   1098 
   1099      if (_PR_NUM_DEADNATIVE == 0) { /* Thread safe check */
   1100        _PR_DEADQ_UNLOCK;
   1101      } else {
   1102        thread = _PR_THREAD_PTR(_PR_DEADNATIVEQ.next);
   1103        PR_REMOVE_LINK(&thread->links);
   1104        _PR_DEC_DEADNATIVE;
   1105        _PR_DEADQ_UNLOCK;
   1106 
   1107        _PR_InitializeRecycledThread(thread);
   1108        thread->startFunc = start;
   1109        thread->arg = arg;
   1110        thread->flags = (flags | _PR_GLOBAL_SCOPE);
   1111        if (type == PR_SYSTEM_THREAD) {
   1112          thread->flags |= _PR_SYSTEM;
   1113          PR_ATOMIC_INCREMENT(&_pr_systemActive);
   1114        } else {
   1115          PR_ATOMIC_INCREMENT(&_pr_userActive);
   1116        }
   1117 
   1118        if (state == PR_JOINABLE_THREAD) {
   1119          if (!thread->term) {
   1120            thread->term = PR_NewCondVar(_pr_terminationCVLock);
   1121          }
   1122        } else {
   1123          if (thread->term) {
   1124            PR_DestroyCondVar(thread->term);
   1125            thread->term = 0;
   1126          }
   1127        }
   1128 
   1129        thread->priority = priority;
   1130        _PR_MD_SET_PRIORITY(&(thread->md), priority);
   1131        /* XXX what about stackSize? */
   1132        thread->state = _PR_RUNNING;
   1133        _PR_MD_WAKEUP_WAITER(thread);
   1134        return thread;
   1135      }
   1136    }
   1137    thread = _PR_NativeCreateThread(type, start, arg, priority, scope, state,
   1138                                    stackSize, flags);
   1139  } else {
   1140    if (_PR_NUM_DEADUSER > 0) {
   1141      _PR_DEADQ_LOCK;
   1142 
   1143      if (_PR_NUM_DEADUSER == 0) { /* thread safe check */
   1144        _PR_DEADQ_UNLOCK;
   1145      } else {
   1146        PRCList* ptr;
   1147 
   1148        /* Go down list checking for a recycled thread with a
   1149         * large enough stack.  XXXMB - this has a bad degenerate case.
   1150         */
   1151        ptr = _PR_DEADUSERQ.next;
   1152        while (ptr != &_PR_DEADUSERQ) {
   1153          thread = _PR_THREAD_PTR(ptr);
   1154          if ((thread->stack->stackSize >= stackSize) && (!thread->no_sched)) {
   1155            PR_REMOVE_LINK(&thread->links);
   1156            _PR_DEC_DEADUSER;
   1157            break;
   1158          } else {
   1159            ptr = ptr->next;
   1160            thread = NULL;
   1161          }
   1162        }
   1163 
   1164        _PR_DEADQ_UNLOCK;
   1165 
   1166        if (thread) {
   1167          _PR_InitializeRecycledThread(thread);
   1168          thread->startFunc = start;
   1169          thread->arg = arg;
   1170          thread->priority = priority;
   1171          if (state == PR_JOINABLE_THREAD) {
   1172            if (!thread->term) {
   1173              thread->term = PR_NewCondVar(_pr_terminationCVLock);
   1174            }
   1175          } else {
   1176            if (thread->term) {
   1177              PR_DestroyCondVar(thread->term);
   1178              thread->term = 0;
   1179            }
   1180          }
   1181          useRecycled++;
   1182        }
   1183      }
   1184    }
   1185    if (thread == NULL) {
   1186 #ifndef HAVE_CUSTOM_USER_THREADS
   1187      stack = _PR_NewStack(stackSize);
   1188      if (!stack) {
   1189        PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
   1190        return NULL;
   1191      }
   1192 
   1193      /* Allocate thread object and per-thread data off the top of the stack*/
   1194      top = stack->stackTop;
   1195 #  ifdef HAVE_STACK_GROWING_UP
   1196      thread = (PRThread*)top;
   1197      top = top + sizeof(PRThread);
   1198      /*
   1199       * Make stack 64-byte aligned
   1200       */
   1201      if ((PRUptrdiff)top & 0x3f) {
   1202        top = (char*)(((PRUptrdiff)top + 0x40) & ~0x3f);
   1203      }
   1204 #  else
   1205      top = top - sizeof(PRThread);
   1206      thread = (PRThread*)top;
   1207      /*
   1208       * Make stack 64-byte aligned
   1209       */
   1210      if ((PRUptrdiff)top & 0x3f) {
   1211        top = (char*)((PRUptrdiff)top & ~0x3f);
   1212      }
   1213 #  endif
   1214      stack->thr = thread;
   1215      memset(thread, 0, sizeof(PRThread));
   1216      thread->threadAllocatedOnStack = 1;
   1217 #else
   1218      thread = _PR_MD_CREATE_USER_THREAD(stackSize, start, arg);
   1219      if (!thread) {
   1220        PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
   1221        return NULL;
   1222      }
   1223      thread->threadAllocatedOnStack = 0;
   1224      stack = NULL;
   1225      top = NULL;
   1226 #endif
   1227 
   1228      /* Initialize thread */
   1229      thread->tpdLength = 0;
   1230      thread->privateData = NULL;
   1231      thread->stack = stack;
   1232      thread->priority = priority;
   1233      thread->startFunc = start;
   1234      thread->arg = arg;
   1235      PR_INIT_CLIST(&thread->lockList);
   1236 
   1237      if (_PR_MD_INIT_THREAD(thread) == PR_FAILURE) {
   1238        if (thread->threadAllocatedOnStack == 1) {
   1239          _PR_FreeStack(thread->stack);
   1240        } else {
   1241          PR_DELETE(thread);
   1242        }
   1243        PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, 0);
   1244        return NULL;
   1245      }
   1246 
   1247      if (_PR_MD_NEW_LOCK(&thread->threadLock) == PR_FAILURE) {
   1248        if (thread->threadAllocatedOnStack == 1) {
   1249          _PR_FreeStack(thread->stack);
   1250        } else {
   1251          PR_DELETE(thread->privateData);
   1252          PR_DELETE(thread);
   1253        }
   1254        PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, 0);
   1255        return NULL;
   1256      }
   1257 
   1258      _PR_MD_INIT_CONTEXT(thread, top, _PR_UserRunThread, &status);
   1259 
   1260      if (status == PR_FALSE) {
   1261        _PR_MD_FREE_LOCK(&thread->threadLock);
   1262        if (thread->threadAllocatedOnStack == 1) {
   1263          _PR_FreeStack(thread->stack);
   1264        } else {
   1265          PR_DELETE(thread->privateData);
   1266          PR_DELETE(thread);
   1267        }
   1268        return NULL;
   1269      }
   1270 
   1271      /*
   1272        Set thread flags related to scope and joinable state. If joinable
   1273        thread, allocate a "termination" condition variable.
   1274      */
   1275      if (state == PR_JOINABLE_THREAD) {
   1276        thread->term = PR_NewCondVar(_pr_terminationCVLock);
   1277        if (thread->term == NULL) {
   1278          _PR_MD_FREE_LOCK(&thread->threadLock);
   1279          if (thread->threadAllocatedOnStack == 1) {
   1280            _PR_FreeStack(thread->stack);
   1281          } else {
   1282            PR_DELETE(thread->privateData);
   1283            PR_DELETE(thread);
   1284          }
   1285          return NULL;
   1286        }
   1287      }
   1288    }
   1289 
   1290    /* Update thread type counter */
   1291    PR_Lock(_pr_activeLock);
   1292    thread->flags = flags;
   1293    thread->id = ++_pr_utid;
   1294    if (type == PR_SYSTEM_THREAD) {
   1295      thread->flags |= _PR_SYSTEM;
   1296      _pr_systemActive++;
   1297    } else {
   1298      _pr_userActive++;
   1299    }
   1300 
   1301    /* Make thread runnable */
   1302    thread->state = _PR_RUNNABLE;
   1303    /*
   1304     * Add to list of active threads
   1305     */
   1306    PR_Unlock(_pr_activeLock);
   1307 
   1308    if ((!(thread->flags & _PR_IDLE_THREAD)) && _PR_IS_NATIVE_THREAD(me)) {
   1309      thread->cpu = _PR_GetPrimordialCPU();
   1310    } else {
   1311      thread->cpu = _PR_MD_CURRENT_CPU();
   1312    }
   1313 
   1314    PR_ASSERT(!_PR_IS_NATIVE_THREAD(thread));
   1315 
   1316    if ((!(thread->flags & _PR_IDLE_THREAD)) && !_PR_IS_NATIVE_THREAD(me)) {
   1317      _PR_INTSOFF(is);
   1318      _PR_RUNQ_LOCK(thread->cpu);
   1319      _PR_ADD_RUNQ(thread, thread->cpu, priority);
   1320      _PR_RUNQ_UNLOCK(thread->cpu);
   1321    }
   1322 
   1323    if (thread->flags & _PR_IDLE_THREAD) {
   1324      /*
   1325      ** If the creating thread is a kernel thread, we need to
   1326      ** awaken the user thread idle thread somehow; potentially
   1327      ** it could be sleeping in its idle loop, and we need to poke
   1328      ** it.  To do so, wake the idle thread...
   1329      */
   1330      _PR_MD_WAKEUP_WAITER(NULL);
   1331    } else if (_PR_IS_NATIVE_THREAD(me)) {
   1332      _PR_MD_WAKEUP_WAITER(thread);
   1333    }
   1334    if ((!(thread->flags & _PR_IDLE_THREAD)) && !_PR_IS_NATIVE_THREAD(me)) {
   1335      _PR_INTSON(is);
   1336    }
   1337  }
   1338 
   1339  return thread;
   1340 }
   1341 
   1342 PR_IMPLEMENT(PRThread*)
   1343 PR_CreateThread(PRThreadType type, void (*start)(void* arg), void* arg,
   1344                PRThreadPriority priority, PRThreadScope scope,
   1345                PRThreadState state, PRUint32 stackSize) {
   1346  return _PR_CreateThread(type, start, arg, priority, scope, state, stackSize,
   1347                          0);
   1348 }
   1349 
   1350 /*
   1351 ** Associate a thread object with an existing native thread.
   1352 **     "type" is the type of thread object to attach
   1353 **     "priority" is the priority to assign to the thread
   1354 **     "stack" defines the shape of the threads stack
   1355 **
   1356 ** This can return NULL if some kind of error occurs, or if memory is
   1357 ** tight.
   1358 **
   1359 ** This call is not normally needed unless you create your own native
   1360 ** thread. PR_Init does this automatically for the primordial thread.
   1361 */
   1362 PRThread* _PRI_AttachThread(PRThreadType type, PRThreadPriority priority,
   1363                            PRThreadStack* stack, PRUint32 flags) {
   1364  PRThread* thread;
   1365 
   1366  if ((thread = _PR_MD_GET_ATTACHED_THREAD()) != NULL) {
   1367    return thread;
   1368  }
   1369  _PR_MD_SET_CURRENT_THREAD(NULL);
   1370 
   1371  /* Clear out any state if this thread was attached before */
   1372  _PR_MD_SET_CURRENT_CPU(NULL);
   1373 
   1374  thread = _PR_AttachThread(type, priority, stack);
   1375  if (thread) {
   1376    PRIntn is;
   1377 
   1378    _PR_MD_SET_CURRENT_THREAD(thread);
   1379 
   1380    thread->flags = flags | _PR_GLOBAL_SCOPE | _PR_ATTACHED;
   1381 
   1382    if (!stack) {
   1383      thread->stack = PR_NEWZAP(PRThreadStack);
   1384      if (!thread->stack) {
   1385        _PR_DestroyThread(thread);
   1386        return NULL;
   1387      }
   1388      thread->stack->stackSize = _MD_DEFAULT_STACK_SIZE;
   1389    }
   1390    PR_INIT_CLIST(&thread->links);
   1391 
   1392    if (_PR_MD_INIT_ATTACHED_THREAD(thread) == PR_FAILURE) {
   1393      PR_DELETE(thread->stack);
   1394      _PR_DestroyThread(thread);
   1395      return NULL;
   1396    }
   1397 
   1398    _PR_MD_SET_CURRENT_CPU(NULL);
   1399 
   1400    if (_PR_MD_CURRENT_CPU()) {
   1401      _PR_INTSOFF(is);
   1402      PR_Lock(_pr_activeLock);
   1403    }
   1404    if (type == PR_SYSTEM_THREAD) {
   1405      thread->flags |= _PR_SYSTEM;
   1406      _pr_systemActive++;
   1407    } else {
   1408      _pr_userActive++;
   1409    }
   1410    if (_PR_MD_CURRENT_CPU()) {
   1411      PR_Unlock(_pr_activeLock);
   1412      _PR_INTSON(is);
   1413    }
   1414  }
   1415  return thread;
   1416 }
   1417 
   1418 PR_IMPLEMENT(PRThread*)
   1419 PR_AttachThread(PRThreadType type, PRThreadPriority priority,
   1420                PRThreadStack* stack) {
   1421  return PR_GetCurrentThread();
   1422 }
   1423 
   1424 PR_IMPLEMENT(void) PR_DetachThread(void) {
   1425  /*
   1426   * On Solaris, and Windows, foreign threads are detached when
   1427   * they terminate.
   1428   */
   1429 #if !defined(WIN32) && !(defined(SOLARIS) && defined(_PR_GLOBAL_THREADS_ONLY))
   1430  PRThread* me;
   1431  if (_pr_initialized) {
   1432    me = _PR_MD_GET_ATTACHED_THREAD();
   1433    if ((me != NULL) && (me->flags & _PR_ATTACHED)) {
   1434      _PRI_DetachThread();
   1435    }
   1436  }
   1437 #endif
   1438 }
   1439 
   1440 void _PRI_DetachThread(void) {
   1441  PRThread* me = _PR_MD_CURRENT_THREAD();
   1442 
   1443  if (me->flags & _PR_PRIMORDIAL) {
   1444    /*
   1445     * ignore, if primordial thread
   1446     */
   1447    return;
   1448  }
   1449  PR_ASSERT(me->flags & _PR_ATTACHED);
   1450  PR_ASSERT(_PR_IS_NATIVE_THREAD(me));
   1451  _PR_CleanupThread(me);
   1452  PR_DELETE(me->privateData);
   1453 
   1454  _PR_DecrActiveThreadCount(me);
   1455 
   1456  _PR_MD_CLEAN_THREAD(me);
   1457  _PR_MD_SET_CURRENT_THREAD(NULL);
   1458  if (!me->threadAllocatedOnStack) {
   1459    PR_DELETE(me->stack);
   1460  }
   1461  _PR_MD_FREE_LOCK(&me->threadLock);
   1462  PR_DELETE(me);
   1463 }
   1464 
   1465 /*
   1466 ** Wait for thread termination:
   1467 **     "thread" is the target thread
   1468 **
   1469 ** This can return PR_FAILURE if no joinable thread could be found
   1470 ** corresponding to the specified target thread.
   1471 **
   1472 ** The calling thread is suspended until the target thread completes.
   1473 ** Several threads cannot wait for the same thread to complete; one thread
   1474 ** will complete successfully and others will terminate with an error
   1475 *PR_FAILURE.
   1476 ** The calling thread will not be blocked if the target thread has already
   1477 ** terminated.
   1478 */
   1479 PR_IMPLEMENT(PRStatus) PR_JoinThread(PRThread* thread) {
   1480  PRIntn is;
   1481  PRCondVar* term;
   1482  PRThread* me = _PR_MD_CURRENT_THREAD();
   1483 
   1484  if (!_PR_IS_NATIVE_THREAD(me)) {
   1485    _PR_INTSOFF(is);
   1486  }
   1487  term = thread->term;
   1488  /* can't join a non-joinable thread */
   1489  if (term == NULL) {
   1490    PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0);
   1491    goto ErrorExit;
   1492  }
   1493 
   1494  /* multiple threads can't wait on the same joinable thread */
   1495  if (term->condQ.next != &term->condQ) {
   1496    goto ErrorExit;
   1497  }
   1498  if (!_PR_IS_NATIVE_THREAD(me)) {
   1499    _PR_INTSON(is);
   1500  }
   1501 
   1502  /* wait for the target thread's termination cv invariant */
   1503  PR_Lock(_pr_terminationCVLock);
   1504  while (thread->state != _PR_JOIN_WAIT) {
   1505    (void)PR_WaitCondVar(term, PR_INTERVAL_NO_TIMEOUT);
   1506  }
   1507  (void)PR_Unlock(_pr_terminationCVLock);
   1508 
   1509  /*
   1510   Remove target thread from global waiting to join Q; make it runnable
   1511   again and put it back on its run Q.  When it gets scheduled later in
   1512   _PR_RunThread code, it will clean up its stack.
   1513  */
   1514  if (!_PR_IS_NATIVE_THREAD(me)) {
   1515    _PR_INTSOFF(is);
   1516  }
   1517  thread->state = _PR_RUNNABLE;
   1518  if (!_PR_IS_NATIVE_THREAD(thread)) {
   1519    _PR_THREAD_LOCK(thread);
   1520 
   1521    _PR_MISCQ_LOCK(thread->cpu);
   1522    _PR_DEL_JOINQ(thread);
   1523    _PR_MISCQ_UNLOCK(thread->cpu);
   1524 
   1525    _PR_AddThreadToRunQ(me, thread);
   1526    _PR_THREAD_UNLOCK(thread);
   1527  }
   1528  if (!_PR_IS_NATIVE_THREAD(me)) {
   1529    _PR_INTSON(is);
   1530  }
   1531 
   1532  _PR_MD_WAKEUP_WAITER(thread);
   1533 
   1534  return PR_SUCCESS;
   1535 
   1536 ErrorExit:
   1537  if (!_PR_IS_NATIVE_THREAD(me)) {
   1538    _PR_INTSON(is);
   1539  }
   1540  return PR_FAILURE;
   1541 }
   1542 
   1543 PR_IMPLEMENT(void)
   1544 PR_SetThreadPriority(PRThread* thread, PRThreadPriority newPri) {
   1545  /*
   1546  First, pin down the priority.  Not all compilers catch passing out of
   1547  range enum here.  If we let bad values thru, priority queues won't work.
   1548  */
   1549  if ((PRIntn)newPri > (PRIntn)PR_PRIORITY_LAST) {
   1550    newPri = PR_PRIORITY_LAST;
   1551  } else if ((PRIntn)newPri < (PRIntn)PR_PRIORITY_FIRST) {
   1552    newPri = PR_PRIORITY_FIRST;
   1553  }
   1554 
   1555  if (_PR_IS_NATIVE_THREAD(thread)) {
   1556    thread->priority = newPri;
   1557    _PR_MD_SET_PRIORITY(&(thread->md), newPri);
   1558  } else {
   1559    _PR_SetThreadPriority(thread, newPri);
   1560  }
   1561 }
   1562 
   1563 PR_IMPLEMENT(PRStatus) PR_SetCurrentThreadName(const char* name) {
   1564  PRThread* thread;
   1565  size_t nameLen;
   1566 
   1567  if (!name) {
   1568    PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0);
   1569    return PR_FAILURE;
   1570  }
   1571 
   1572  thread = PR_GetCurrentThread();
   1573  if (!thread) {
   1574    return PR_FAILURE;
   1575  }
   1576 
   1577  PR_Free(thread->name);
   1578  nameLen = strlen(name);
   1579  thread->name = (char*)PR_Malloc(nameLen + 1);
   1580  if (!thread->name) {
   1581    return PR_FAILURE;
   1582  }
   1583  memcpy(thread->name, name, nameLen + 1);
   1584  _PR_MD_SET_CURRENT_THREAD_NAME(thread->name);
   1585  return PR_SUCCESS;
   1586 }
   1587 
   1588 PR_IMPLEMENT(const char*) PR_GetThreadName(const PRThread* thread) {
   1589  if (!thread) {
   1590    return NULL;
   1591  }
   1592  return thread->name;
   1593 }
   1594 
   1595 /*
   1596 ** This routine prevents all other threads from running. This call is needed by
   1597 ** the garbage collector.
   1598 */
   1599 PR_IMPLEMENT(void) PR_SuspendAll(void) {
   1600  PRThread* me = _PR_MD_CURRENT_THREAD();
   1601  PRCList* qp;
   1602 
   1603  /*
   1604   * Stop all user and native threads which are marked GC able.
   1605   */
   1606  PR_Lock(_pr_activeLock);
   1607  suspendAllOn = PR_TRUE;
   1608  suspendAllThread = _PR_MD_CURRENT_THREAD();
   1609  _PR_MD_BEGIN_SUSPEND_ALL();
   1610  for (qp = _PR_ACTIVE_LOCAL_THREADQ().next; qp != &_PR_ACTIVE_LOCAL_THREADQ();
   1611       qp = qp->next) {
   1612    if ((me != _PR_ACTIVE_THREAD_PTR(qp)) &&
   1613        _PR_IS_GCABLE_THREAD(_PR_ACTIVE_THREAD_PTR(qp))) {
   1614      _PR_Suspend(_PR_ACTIVE_THREAD_PTR(qp));
   1615      PR_ASSERT((_PR_ACTIVE_THREAD_PTR(qp))->state != _PR_RUNNING);
   1616    }
   1617  }
   1618  for (qp = _PR_ACTIVE_GLOBAL_THREADQ().next;
   1619       qp != &_PR_ACTIVE_GLOBAL_THREADQ(); qp = qp->next) {
   1620    if ((me != _PR_ACTIVE_THREAD_PTR(qp)) &&
   1621        _PR_IS_GCABLE_THREAD(_PR_ACTIVE_THREAD_PTR(qp)))
   1622    /* PR_Suspend(_PR_ACTIVE_THREAD_PTR(qp)); */
   1623    {
   1624      _PR_MD_SUSPEND_THREAD(_PR_ACTIVE_THREAD_PTR(qp));
   1625    }
   1626  }
   1627  _PR_MD_END_SUSPEND_ALL();
   1628 }
   1629 
   1630 /*
   1631 ** This routine unblocks all other threads that were suspended from running by
   1632 ** PR_SuspendAll(). This call is needed by the garbage collector.
   1633 */
   1634 PR_IMPLEMENT(void) PR_ResumeAll(void) {
   1635  PRThread* me = _PR_MD_CURRENT_THREAD();
   1636  PRCList* qp;
   1637 
   1638  /*
   1639   * Resume all user and native threads which are marked GC able.
   1640   */
   1641  _PR_MD_BEGIN_RESUME_ALL();
   1642  for (qp = _PR_ACTIVE_LOCAL_THREADQ().next; qp != &_PR_ACTIVE_LOCAL_THREADQ();
   1643       qp = qp->next) {
   1644    if ((me != _PR_ACTIVE_THREAD_PTR(qp)) &&
   1645        _PR_IS_GCABLE_THREAD(_PR_ACTIVE_THREAD_PTR(qp))) {
   1646      _PR_Resume(_PR_ACTIVE_THREAD_PTR(qp));
   1647    }
   1648  }
   1649  for (qp = _PR_ACTIVE_GLOBAL_THREADQ().next;
   1650       qp != &_PR_ACTIVE_GLOBAL_THREADQ(); qp = qp->next) {
   1651    if ((me != _PR_ACTIVE_THREAD_PTR(qp)) &&
   1652        _PR_IS_GCABLE_THREAD(_PR_ACTIVE_THREAD_PTR(qp))) {
   1653      _PR_MD_RESUME_THREAD(_PR_ACTIVE_THREAD_PTR(qp));
   1654    }
   1655  }
   1656  _PR_MD_END_RESUME_ALL();
   1657  suspendAllThread = NULL;
   1658  suspendAllOn = PR_FALSE;
   1659  PR_Unlock(_pr_activeLock);
   1660 }
   1661 
   1662 PR_IMPLEMENT(PRStatus) PR_EnumerateThreads(PREnumerator func, void* arg) {
   1663  PRCList *qp, *qp_next;
   1664  PRIntn i = 0;
   1665  PRStatus rv = PR_SUCCESS;
   1666  PRThread* t;
   1667 
   1668  /*
   1669  ** Currently Enumerate threads happen only with suspension and
   1670  ** pr_activeLock held
   1671  */
   1672  PR_ASSERT(suspendAllOn);
   1673 
   1674  /* Steve Morse, 4-23-97: Note that we can't walk a queue by taking
   1675   * qp->next after applying the function "func".  In particular, "func"
   1676   * might remove the thread from the queue and put it into another one in
   1677   * which case qp->next no longer points to the next entry in the original
   1678   * queue.
   1679   *
   1680   * To get around this problem, we save qp->next in qp_next before applying
   1681   * "func" and use that saved value as the next value after applying "func".
   1682   */
   1683 
   1684  /*
   1685   * Traverse the list of local and global threads
   1686   */
   1687  for (qp = _PR_ACTIVE_LOCAL_THREADQ().next; qp != &_PR_ACTIVE_LOCAL_THREADQ();
   1688       qp = qp_next) {
   1689    qp_next = qp->next;
   1690    t = _PR_ACTIVE_THREAD_PTR(qp);
   1691    if (_PR_IS_GCABLE_THREAD(t)) {
   1692      rv = (*func)(t, i, arg);
   1693      if (rv != PR_SUCCESS) {
   1694        return rv;
   1695      }
   1696      i++;
   1697    }
   1698  }
   1699  for (qp = _PR_ACTIVE_GLOBAL_THREADQ().next;
   1700       qp != &_PR_ACTIVE_GLOBAL_THREADQ(); qp = qp_next) {
   1701    qp_next = qp->next;
   1702    t = _PR_ACTIVE_THREAD_PTR(qp);
   1703    if (_PR_IS_GCABLE_THREAD(t)) {
   1704      rv = (*func)(t, i, arg);
   1705      if (rv != PR_SUCCESS) {
   1706        return rv;
   1707      }
   1708      i++;
   1709    }
   1710  }
   1711  return rv;
   1712 }
   1713 
   1714 /* FUNCTION: _PR_AddSleepQ
   1715 ** DESCRIPTION:
   1716 **    Adds a thread to the sleep/pauseQ.
   1717 ** RESTRICTIONS:
   1718 **    Caller must have the RUNQ lock.
   1719 **    Caller must be a user level thread
   1720 */
   1721 PR_IMPLEMENT(void)
   1722 _PR_AddSleepQ(PRThread* thread, PRIntervalTime timeout) {
   1723  _PRCPU* cpu = thread->cpu;
   1724 
   1725  if (timeout == PR_INTERVAL_NO_TIMEOUT) {
   1726    /* append the thread to the global pause Q */
   1727    PR_APPEND_LINK(&thread->links, &_PR_PAUSEQ(thread->cpu));
   1728    thread->flags |= _PR_ON_PAUSEQ;
   1729  } else {
   1730    PRIntervalTime sleep;
   1731    PRCList* q;
   1732    PRThread* t;
   1733 
   1734    /* sort onto global sleepQ */
   1735    sleep = timeout;
   1736 
   1737    /* Check if we are longest timeout */
   1738    if (timeout >= _PR_SLEEPQMAX(cpu)) {
   1739      PR_INSERT_BEFORE(&thread->links, &_PR_SLEEPQ(cpu));
   1740      thread->sleep = timeout - _PR_SLEEPQMAX(cpu);
   1741      _PR_SLEEPQMAX(cpu) = timeout;
   1742    } else {
   1743      /* Sort thread into global sleepQ at appropriate point */
   1744      q = _PR_SLEEPQ(cpu).next;
   1745 
   1746      /* Now scan the list for where to insert this entry */
   1747      while (q != &_PR_SLEEPQ(cpu)) {
   1748        t = _PR_THREAD_PTR(q);
   1749        if (sleep < t->sleep) {
   1750          /* Found sleeper to insert in front of */
   1751          break;
   1752        }
   1753        sleep -= t->sleep;
   1754        q = q->next;
   1755      }
   1756      thread->sleep = sleep;
   1757      PR_INSERT_BEFORE(&thread->links, q);
   1758 
   1759      /*
   1760      ** Subtract our sleep time from the sleeper that follows us (there
   1761      ** must be one) so that they remain relative to us.
   1762      */
   1763      PR_ASSERT(thread->links.next != &_PR_SLEEPQ(cpu));
   1764 
   1765      t = _PR_THREAD_PTR(thread->links.next);
   1766      PR_ASSERT(_PR_THREAD_PTR(t->links.prev) == thread);
   1767      t->sleep -= sleep;
   1768    }
   1769 
   1770    thread->flags |= _PR_ON_SLEEPQ;
   1771  }
   1772 }
   1773 
   1774 /* FUNCTION: _PR_DelSleepQ
   1775 ** DESCRIPTION:
   1776 **    Removes a thread from the sleep/pauseQ.
   1777 ** INPUTS:
   1778 **    If propogate_time is true, then the thread following the deleted
   1779 **    thread will be get the time from the deleted thread.  This is used
   1780 **    when deleting a sleeper that has not timed out.
   1781 ** RESTRICTIONS:
   1782 **    Caller must have the RUNQ lock.
   1783 **    Caller must be a user level thread
   1784 */
   1785 PR_IMPLEMENT(void)
   1786 _PR_DelSleepQ(PRThread* thread, PRBool propogate_time) {
   1787  _PRCPU* cpu = thread->cpu;
   1788 
   1789  /* Remove from pauseQ/sleepQ */
   1790  if (thread->flags & (_PR_ON_PAUSEQ | _PR_ON_SLEEPQ)) {
   1791    if (thread->flags & _PR_ON_SLEEPQ) {
   1792      PRCList* q = thread->links.next;
   1793      if (q != &_PR_SLEEPQ(cpu)) {
   1794        if (propogate_time == PR_TRUE) {
   1795          PRThread* after = _PR_THREAD_PTR(q);
   1796          after->sleep += thread->sleep;
   1797        } else {
   1798          _PR_SLEEPQMAX(cpu) -= thread->sleep;
   1799        }
   1800      } else {
   1801        /* Check if prev is the beggining of the list; if so,
   1802         * we are the only element on the list.
   1803         */
   1804        if (thread->links.prev != &_PR_SLEEPQ(cpu)) {
   1805          _PR_SLEEPQMAX(cpu) -= thread->sleep;
   1806        } else {
   1807          _PR_SLEEPQMAX(cpu) = 0;
   1808        }
   1809      }
   1810      thread->flags &= ~_PR_ON_SLEEPQ;
   1811    } else {
   1812      thread->flags &= ~_PR_ON_PAUSEQ;
   1813    }
   1814    PR_REMOVE_LINK(&thread->links);
   1815  } else {
   1816    PR_ASSERT(0);
   1817  }
   1818 }
   1819 
   1820 void _PR_AddThreadToRunQ(
   1821    PRThread* me,     /* the current thread */
   1822    PRThread* thread) /* the local thread to be added to a run queue */
   1823 {
   1824  PRThreadPriority pri = thread->priority;
   1825  _PRCPU* cpu = thread->cpu;
   1826 
   1827  PR_ASSERT(!_PR_IS_NATIVE_THREAD(thread));
   1828 
   1829 #if defined(WINNT)
   1830  /*
   1831   * On NT, we can only reliably know that the current CPU
   1832   * is not idle.  We add the awakened thread to the run
   1833   * queue of its CPU if its CPU is the current CPU.
   1834   * For any other CPU, we don't really know whether it
   1835   * is busy or idle.  So in all other cases, we just
   1836   * "post" the awakened thread to the IO completion port
   1837   * for the next idle CPU to execute (this is done in
   1838   * _PR_MD_WAKEUP_WAITER).
   1839   * Threads with a suspended I/O operation remain bound to
   1840   * the same cpu until I/O is cancelled
   1841   *
   1842   * NOTE: the boolean expression below must be the exact
   1843   * opposite of the corresponding boolean expression in
   1844   * _PR_MD_WAKEUP_WAITER.
   1845   */
   1846  if ((!_PR_IS_NATIVE_THREAD(me) && (cpu == me->cpu)) ||
   1847      (thread->md.thr_bound_cpu)) {
   1848    PR_ASSERT(!thread->md.thr_bound_cpu || (thread->md.thr_bound_cpu == cpu));
   1849    _PR_RUNQ_LOCK(cpu);
   1850    _PR_ADD_RUNQ(thread, cpu, pri);
   1851    _PR_RUNQ_UNLOCK(cpu);
   1852  }
   1853 #else
   1854  _PR_RUNQ_LOCK(cpu);
   1855  _PR_ADD_RUNQ(thread, cpu, pri);
   1856  _PR_RUNQ_UNLOCK(cpu);
   1857  if (!_PR_IS_NATIVE_THREAD(me) && (cpu == me->cpu)) {
   1858    if (pri > me->priority) {
   1859      _PR_SET_RESCHED_FLAG();
   1860    }
   1861  }
   1862 #endif
   1863 }