tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

prucpu.c (10528B)


      1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
      2 /* This Source Code Form is subject to the terms of the Mozilla Public
      3 * License, v. 2.0. If a copy of the MPL was not distributed with this
      4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      5 
      6 #include "primpl.h"
      7 
      8 _PRCPU* _pr_primordialCPU = NULL;
      9 
     10 PRInt32 _pr_md_idle_cpus; /* number of idle cpus */
     11 /*
     12 * The idle threads in MxN models increment/decrement _pr_md_idle_cpus.
     13 * If _PR_HAVE_ATOMIC_OPS is not defined, they can't use the atomic
     14 * increment/decrement routines (which are based on PR_Lock/PR_Unlock),
     15 * because PR_Lock asserts that the calling thread is not an idle thread.
     16 * So we use a _MDLock to protect _pr_md_idle_cpus.
     17 */
     18 #if !defined(_PR_LOCAL_THREADS_ONLY) && !defined(_PR_GLOBAL_THREADS_ONLY)
     19 #  ifndef _PR_HAVE_ATOMIC_OPS
     20 static _MDLock _pr_md_idle_cpus_lock;
     21 #  endif
     22 #endif
     23 PRUintn _pr_numCPU;
     24 PRInt32 _pr_cpus_exit;
     25 PRUint32 _pr_cpu_affinity_mask = 0;
     26 
     27 #if !defined(_PR_GLOBAL_THREADS_ONLY)
     28 
     29 static PRUintn _pr_cpuID;
     30 
     31 static void PR_CALLBACK _PR_CPU_Idle(void*);
     32 
     33 static _PRCPU* _PR_CreateCPU(void);
     34 static PRStatus _PR_StartCPU(_PRCPU* cpu, PRThread* thread);
     35 
     36 #  if !defined(_PR_LOCAL_THREADS_ONLY)
     37 static void _PR_RunCPU(void* arg);
     38 #  endif
     39 
     40 void _PR_InitCPUs() {
     41  PRThread* me = _PR_MD_CURRENT_THREAD();
     42 
     43  if (_native_threads_only) {
     44    return;
     45  }
     46 
     47  _pr_cpuID = 0;
     48  _MD_NEW_LOCK(&_pr_cpuLock);
     49 #  if !defined(_PR_LOCAL_THREADS_ONLY) && !defined(_PR_GLOBAL_THREADS_ONLY)
     50 #    ifndef _PR_HAVE_ATOMIC_OPS
     51  _MD_NEW_LOCK(&_pr_md_idle_cpus_lock);
     52 #    endif
     53 #  endif
     54 
     55 #  ifdef _PR_LOCAL_THREADS_ONLY
     56 
     57 #    ifdef HAVE_CUSTOM_USER_THREADS
     58  _PR_MD_CREATE_PRIMORDIAL_USER_THREAD(me);
     59 #    endif
     60 
     61  /* Now start the first CPU. */
     62  _pr_primordialCPU = _PR_CreateCPU();
     63  _pr_numCPU = 1;
     64  _PR_StartCPU(_pr_primordialCPU, me);
     65 
     66  _PR_MD_SET_CURRENT_CPU(_pr_primordialCPU);
     67 
     68  /* Initialize cpu for current thread (could be different from me) */
     69  _PR_MD_CURRENT_THREAD()->cpu = _pr_primordialCPU;
     70 
     71  _PR_MD_SET_LAST_THREAD(me);
     72 
     73 #  else /* Combined MxN model */
     74 
     75  _pr_primordialCPU = _PR_CreateCPU();
     76  _pr_numCPU = 1;
     77  _PR_CreateThread(PR_SYSTEM_THREAD, _PR_RunCPU, _pr_primordialCPU,
     78                   PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD, PR_UNJOINABLE_THREAD,
     79                   0, _PR_IDLE_THREAD);
     80 
     81 #  endif /* _PR_LOCAL_THREADS_ONLY */
     82 
     83  _PR_MD_INIT_CPUS();
     84 }
     85 
     86 #  ifdef WINNT
     87 /*
     88 * Right now this function merely stops the CPUs and does
     89 * not do any other cleanup.
     90 *
     91 * It is only implemented for WINNT because bug 161998 only
     92 * affects the WINNT version of NSPR, but it would be nice
     93 * to implement this function for other platforms too.
     94 */
     95 void _PR_CleanupCPUs(void) {
     96  PRUintn i;
     97  PRCList* qp;
     98  _PRCPU* cpu;
     99 
    100  _pr_cpus_exit = 1;
    101  for (i = 0; i < _pr_numCPU; i++) {
    102    _PR_MD_WAKEUP_WAITER(NULL);
    103  }
    104  for (qp = _PR_CPUQ().next; qp != &_PR_CPUQ(); qp = qp->next) {
    105    cpu = _PR_CPU_PTR(qp);
    106    _PR_MD_JOIN_THREAD(&cpu->thread->md);
    107  }
    108 }
    109 #  endif
    110 
    111 static _PRCPUQueue* _PR_CreateCPUQueue(void) {
    112  PRInt32 index;
    113  _PRCPUQueue* cpuQueue;
    114  cpuQueue = PR_NEWZAP(_PRCPUQueue);
    115 
    116  _MD_NEW_LOCK(&cpuQueue->runQLock);
    117  _MD_NEW_LOCK(&cpuQueue->sleepQLock);
    118  _MD_NEW_LOCK(&cpuQueue->miscQLock);
    119 
    120  for (index = 0; index < PR_ARRAY_SIZE(cpuQueue->runQ); index++) {
    121    PR_INIT_CLIST(&(cpuQueue->runQ[index]));
    122  }
    123  PR_INIT_CLIST(&(cpuQueue->sleepQ));
    124  PR_INIT_CLIST(&(cpuQueue->pauseQ));
    125  PR_INIT_CLIST(&(cpuQueue->suspendQ));
    126  PR_INIT_CLIST(&(cpuQueue->waitingToJoinQ));
    127 
    128  cpuQueue->numCPUs = 1;
    129 
    130  return cpuQueue;
    131 }
    132 
    133 /*
    134 * Create a new CPU.
    135 *
    136 * This function initializes enough of the _PRCPU structure so
    137 * that it can be accessed safely by a global thread or another
    138 * CPU.  This function does not create the native thread that
    139 * will run the CPU nor does it initialize the parts of _PRCPU
    140 * that must be initialized by that native thread.
    141 *
    142 * The reason we cannot simply have the native thread create
    143 * and fully initialize a new CPU is that we need to be able to
    144 * create a usable _pr_primordialCPU in _PR_InitCPUs without
    145 * assuming that the primordial CPU thread we created can run
    146 * during NSPR initialization.  For example, on Windows while
    147 * new threads can be created by DllMain, they won't be able
    148 * to run during DLL initialization.  If NSPR is initialized
    149 * by DllMain, the primordial CPU thread won't run until DLL
    150 * initialization is finished.
    151 */
    152 static _PRCPU* _PR_CreateCPU(void) {
    153  _PRCPU* cpu;
    154 
    155  cpu = PR_NEWZAP(_PRCPU);
    156  if (cpu) {
    157    cpu->queue = _PR_CreateCPUQueue();
    158    if (!cpu->queue) {
    159      PR_DELETE(cpu);
    160      return NULL;
    161    }
    162  }
    163  return cpu;
    164 }
    165 
    166 /*
    167 * Start a new CPU.
    168 *
    169 * 'cpu' is a _PRCPU structure created by _PR_CreateCPU().
    170 * 'thread' is the native thread that will run the CPU.
    171 *
    172 * If this function fails, 'cpu' is destroyed.
    173 */
    174 static PRStatus _PR_StartCPU(_PRCPU* cpu, PRThread* thread) {
    175  /*
    176  ** Start a new cpu. The assumption this code makes is that the
    177  ** underlying operating system creates a stack to go with the new
    178  ** native thread. That stack will be used by the cpu when pausing.
    179  */
    180 
    181  PR_ASSERT(!_native_threads_only);
    182 
    183  cpu->last_clock = PR_IntervalNow();
    184 
    185  /* Before we create any threads on this CPU we have to
    186   * set the current CPU
    187   */
    188  _PR_MD_SET_CURRENT_CPU(cpu);
    189  _PR_MD_INIT_RUNNING_CPU(cpu);
    190  thread->cpu = cpu;
    191 
    192  cpu->idle_thread = _PR_CreateThread(
    193      PR_SYSTEM_THREAD, _PR_CPU_Idle, (void*)cpu, PR_PRIORITY_NORMAL,
    194      PR_LOCAL_THREAD, PR_UNJOINABLE_THREAD, 0, _PR_IDLE_THREAD);
    195 
    196  if (!cpu->idle_thread) {
    197    /* didn't clean up CPU queue XXXMB */
    198    PR_DELETE(cpu);
    199    return PR_FAILURE;
    200  }
    201  PR_ASSERT(cpu->idle_thread->cpu == cpu);
    202 
    203  cpu->idle_thread->no_sched = 0;
    204 
    205  cpu->thread = thread;
    206 
    207  if (_pr_cpu_affinity_mask) {
    208    PR_SetThreadAffinityMask(thread, _pr_cpu_affinity_mask);
    209  }
    210 
    211  /* Created and started a new CPU */
    212  _PR_CPU_LIST_LOCK();
    213  cpu->id = _pr_cpuID++;
    214  PR_APPEND_LINK(&cpu->links, &_PR_CPUQ());
    215  _PR_CPU_LIST_UNLOCK();
    216 
    217  return PR_SUCCESS;
    218 }
    219 
    220 #  if !defined(_PR_GLOBAL_THREADS_ONLY) && !defined(_PR_LOCAL_THREADS_ONLY)
    221 /*
    222 ** This code is used during a cpu's initial creation.
    223 */
    224 static void _PR_RunCPU(void* arg) {
    225  _PRCPU* cpu = (_PRCPU*)arg;
    226  PRThread* me = _PR_MD_CURRENT_THREAD();
    227 
    228  PR_ASSERT(NULL != me);
    229 
    230  /*
    231   * _PR_StartCPU calls _PR_CreateThread to create the
    232   * idle thread.  Because _PR_CreateThread calls PR_Lock,
    233   * the current thread has to remain a global thread
    234   * during the _PR_StartCPU call so that it can wait for
    235   * the lock if the lock is held by another thread.  If
    236   * we clear the _PR_GLOBAL_SCOPE flag in
    237   * _PR_MD_CREATE_PRIMORDIAL_THREAD, the current thread
    238   * will be treated as a local thread and have trouble
    239   * waiting for the lock because the CPU is not fully
    240   * constructed yet.
    241   *
    242   * After the CPU is started, it is safe to mark the
    243   * current thread as a local thread.
    244   */
    245 
    246 #    ifdef HAVE_CUSTOM_USER_THREADS
    247  _PR_MD_CREATE_PRIMORDIAL_USER_THREAD(me);
    248 #    endif
    249 
    250  me->no_sched = 1;
    251  _PR_StartCPU(cpu, me);
    252 
    253 #    ifdef HAVE_CUSTOM_USER_THREADS
    254  me->flags &= (~_PR_GLOBAL_SCOPE);
    255 #    endif
    256 
    257  _PR_MD_SET_CURRENT_CPU(cpu);
    258  _PR_MD_SET_CURRENT_THREAD(cpu->thread);
    259  me->cpu = cpu;
    260 
    261  while (1) {
    262    PRInt32 is;
    263    if (!_PR_IS_NATIVE_THREAD(me)) {
    264      _PR_INTSOFF(is);
    265    }
    266    _PR_MD_START_INTERRUPTS();
    267    _PR_MD_SWITCH_CONTEXT(me);
    268  }
    269 }
    270 #  endif
    271 
    272 static void PR_CALLBACK _PR_CPU_Idle(void* _cpu) {
    273  _PRCPU* cpu = (_PRCPU*)_cpu;
    274  PRThread* me = _PR_MD_CURRENT_THREAD();
    275 
    276  PR_ASSERT(NULL != me);
    277 
    278  me->cpu = cpu;
    279  cpu->idle_thread = me;
    280  if (_MD_LAST_THREAD()) {
    281    _MD_LAST_THREAD()->no_sched = 0;
    282  }
    283  if (!_PR_IS_NATIVE_THREAD(me)) {
    284    _PR_MD_SET_INTSOFF(0);
    285  }
    286  while (1) {
    287    PRInt32 is;
    288    PRIntervalTime timeout;
    289    if (!_PR_IS_NATIVE_THREAD(me)) {
    290      _PR_INTSOFF(is);
    291    }
    292 
    293    _PR_RUNQ_LOCK(cpu);
    294 #  if !defined(_PR_LOCAL_THREADS_ONLY) && !defined(_PR_GLOBAL_THREADS_ONLY)
    295 #    ifdef _PR_HAVE_ATOMIC_OPS
    296    _PR_MD_ATOMIC_INCREMENT(&_pr_md_idle_cpus);
    297 #    else
    298    _PR_MD_LOCK(&_pr_md_idle_cpus_lock);
    299    _pr_md_idle_cpus++;
    300    _PR_MD_UNLOCK(&_pr_md_idle_cpus_lock);
    301 #    endif /* _PR_HAVE_ATOMIC_OPS */
    302 #  endif
    303    /* If someone on runq; do a nonblocking PAUSECPU */
    304    if (_PR_RUNQREADYMASK(me->cpu) != 0) {
    305      _PR_RUNQ_UNLOCK(cpu);
    306      timeout = PR_INTERVAL_NO_WAIT;
    307    } else {
    308      _PR_RUNQ_UNLOCK(cpu);
    309 
    310      _PR_SLEEPQ_LOCK(cpu);
    311      if (PR_CLIST_IS_EMPTY(&_PR_SLEEPQ(me->cpu))) {
    312        timeout = PR_INTERVAL_NO_TIMEOUT;
    313      } else {
    314        PRThread* wakeThread;
    315        wakeThread = _PR_THREAD_PTR(_PR_SLEEPQ(me->cpu).next);
    316        timeout = wakeThread->sleep;
    317      }
    318      _PR_SLEEPQ_UNLOCK(cpu);
    319    }
    320 
    321    /* Wait for an IO to complete */
    322    (void)_PR_MD_PAUSE_CPU(timeout);
    323 
    324 #  ifdef WINNT
    325    if (_pr_cpus_exit) {
    326      /* _PR_CleanupCPUs tells us to exit */
    327      _PR_MD_END_THREAD();
    328    }
    329 #  endif
    330 
    331 #  if !defined(_PR_LOCAL_THREADS_ONLY) && !defined(_PR_GLOBAL_THREADS_ONLY)
    332 #    ifdef _PR_HAVE_ATOMIC_OPS
    333    _PR_MD_ATOMIC_DECREMENT(&_pr_md_idle_cpus);
    334 #    else
    335    _PR_MD_LOCK(&_pr_md_idle_cpus_lock);
    336    _pr_md_idle_cpus--;
    337    _PR_MD_UNLOCK(&_pr_md_idle_cpus_lock);
    338 #    endif /* _PR_HAVE_ATOMIC_OPS */
    339 #  endif
    340 
    341    _PR_ClockInterrupt();
    342 
    343    /* Now schedule any thread that is on the runq
    344     * INTS must be OFF when calling PR_Schedule()
    345     */
    346    me->state = _PR_RUNNABLE;
    347    _PR_MD_SWITCH_CONTEXT(me);
    348    if (!_PR_IS_NATIVE_THREAD(me)) {
    349      _PR_FAST_INTSON(is);
    350    }
    351  }
    352 }
    353 #endif /* _PR_GLOBAL_THREADS_ONLY */
    354 
    355 PR_IMPLEMENT(void) PR_SetConcurrency(PRUintn numCPUs) {
    356 #if defined(_PR_GLOBAL_THREADS_ONLY) || defined(_PR_LOCAL_THREADS_ONLY)
    357 
    358  /* do nothing */
    359 
    360 #else /* combined, MxN thread model */
    361 
    362  PRUintn newCPU;
    363  _PRCPU* cpu;
    364  PRThread* thr;
    365 
    366  if (!_pr_initialized) {
    367    _PR_ImplicitInitialization();
    368  }
    369 
    370  if (_native_threads_only) {
    371    return;
    372  }
    373 
    374  _PR_CPU_LIST_LOCK();
    375  if (_pr_numCPU < numCPUs) {
    376    newCPU = numCPUs - _pr_numCPU;
    377    _pr_numCPU = numCPUs;
    378  } else {
    379    newCPU = 0;
    380  }
    381  _PR_CPU_LIST_UNLOCK();
    382 
    383  for (; newCPU; newCPU--) {
    384    cpu = _PR_CreateCPU();
    385    thr = _PR_CreateThread(PR_SYSTEM_THREAD, _PR_RunCPU, cpu,
    386                           PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
    387                           PR_UNJOINABLE_THREAD, 0, _PR_IDLE_THREAD);
    388  }
    389 #endif
    390 }
    391 
    392 PR_IMPLEMENT(_PRCPU*) _PR_GetPrimordialCPU(void) {
    393  if (_pr_primordialCPU) {
    394    return _pr_primordialCPU;
    395  } else {
    396    return _PR_MD_CURRENT_CPU();
    397  }
    398 }