tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

pthreads_user.c (10908B)


      1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
      2 /* This Source Code Form is subject to the terms of the Mozilla Public
      3 * License, v. 2.0. If a copy of the MPL was not distributed with this
      4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      5 
      6 #include "primpl.h"
      7 #include <sys/types.h>
      8 #include <unistd.h>
      9 #include <signal.h>
     10 #include <pthread.h>
     11 
     12 sigset_t ints_off;
     13 pthread_mutex_t _pr_heapLock;
     14 pthread_key_t current_thread_key;
     15 pthread_key_t current_cpu_key;
     16 pthread_key_t last_thread_key;
     17 pthread_key_t intsoff_key;
     18 
     19 PRInt32 _pr_md_pthreads_created, _pr_md_pthreads_failed;
     20 PRInt32 _pr_md_pthreads = 1;
     21 
     22 void _MD_EarlyInit(void) {
     23  extern PRInt32 _nspr_noclock;
     24 
     25  if (pthread_key_create(&current_thread_key, NULL) != 0) {
     26    perror("pthread_key_create failed");
     27    exit(1);
     28  }
     29  if (pthread_key_create(&current_cpu_key, NULL) != 0) {
     30    perror("pthread_key_create failed");
     31    exit(1);
     32  }
     33  if (pthread_key_create(&last_thread_key, NULL) != 0) {
     34    perror("pthread_key_create failed");
     35    exit(1);
     36  }
     37  if (pthread_key_create(&intsoff_key, NULL) != 0) {
     38    perror("pthread_key_create failed");
     39    exit(1);
     40  }
     41 
     42  sigemptyset(&ints_off);
     43  sigaddset(&ints_off, SIGALRM);
     44  sigaddset(&ints_off, SIGIO);
     45  sigaddset(&ints_off, SIGCLD);
     46 
     47  /*
     48   * disable clock interrupts
     49   */
     50  _nspr_noclock = 1;
     51 }
     52 
     53 void _MD_InitLocks() {
     54  if (pthread_mutex_init(&_pr_heapLock, NULL) != 0) {
     55    perror("pthread_mutex_init failed");
     56    exit(1);
     57  }
     58 }
     59 
     60 PR_IMPLEMENT(void) _MD_FREE_LOCK(struct _MDLock* lockp) {
     61  PRIntn _is;
     62  PRThread* me = _PR_MD_CURRENT_THREAD();
     63 
     64  if (me && !_PR_IS_NATIVE_THREAD(me)) {
     65    _PR_INTSOFF(_is);
     66  }
     67  pthread_mutex_destroy(&lockp->mutex);
     68  if (me && !_PR_IS_NATIVE_THREAD(me)) {
     69    _PR_FAST_INTSON(_is);
     70  }
     71 }
     72 
     73 PR_IMPLEMENT(PRStatus) _MD_NEW_LOCK(struct _MDLock* lockp) {
     74  PRStatus rv;
     75  PRIntn is;
     76  PRThread* me = _PR_MD_CURRENT_THREAD();
     77 
     78  if (me && !_PR_IS_NATIVE_THREAD(me)) {
     79    _PR_INTSOFF(is);
     80  }
     81  rv = pthread_mutex_init(&lockp->mutex, NULL);
     82  if (me && !_PR_IS_NATIVE_THREAD(me)) {
     83    _PR_FAST_INTSON(is);
     84  }
     85  return (rv == 0) ? PR_SUCCESS : PR_FAILURE;
     86 }
     87 
     88 PRWord* _MD_HomeGCRegisters(PRThread* t, int isCurrent, int* np) {
     89  if (isCurrent) {
     90    (void)setjmp(CONTEXT(t));
     91  }
     92  *np = sizeof(CONTEXT(t)) / sizeof(PRWord);
     93  return (PRWord*)CONTEXT(t);
     94 }
     95 
     96 PR_IMPLEMENT(void)
     97 _MD_SetPriority(_MDThread* thread, PRThreadPriority newPri) {
     98  /*
     99   * XXX - to be implemented
    100   */
    101  return;
    102 }
    103 
    104 PR_IMPLEMENT(PRStatus) _MD_InitThread(struct PRThread* thread) {
    105  struct sigaction sigact;
    106 
    107  if (thread->flags & _PR_GLOBAL_SCOPE) {
    108    thread->md.pthread = pthread_self();
    109 #if 0
    110        /*
    111         * set up SIGUSR1 handler; this is used to save state
    112         * during PR_SuspendAll
    113         */
    114        sigact.sa_handler = save_context_and_block;
    115        sigact.sa_flags = SA_RESTART;
    116        /*
    117         * Must mask clock interrupts
    118         */
    119        sigact.sa_mask = timer_set;
    120        sigaction(SIGUSR1, &sigact, 0);
    121 #endif
    122  }
    123 
    124  return PR_SUCCESS;
    125 }
    126 
    127 PR_IMPLEMENT(void) _MD_ExitThread(struct PRThread* thread) {
    128  if (thread->flags & _PR_GLOBAL_SCOPE) {
    129    _MD_CLEAN_THREAD(thread);
    130    _MD_SET_CURRENT_THREAD(NULL);
    131  }
    132 }
    133 
    134 PR_IMPLEMENT(void) _MD_CleanThread(struct PRThread* thread) {
    135  if (thread->flags & _PR_GLOBAL_SCOPE) {
    136    pthread_mutex_destroy(&thread->md.pthread_mutex);
    137    pthread_cond_destroy(&thread->md.pthread_cond);
    138  }
    139 }
    140 
    141 PR_IMPLEMENT(void) _MD_SuspendThread(struct PRThread* thread) {
    142  PRInt32 rv;
    143 
    144  PR_ASSERT((thread->flags & _PR_GLOBAL_SCOPE) && _PR_IS_GCABLE_THREAD(thread));
    145 #if 0
    146    thread->md.suspending_id = getpid();
    147    rv = kill(thread->md.id, SIGUSR1);
    148    PR_ASSERT(rv == 0);
    149    /*
    150     * now, block the current thread/cpu until woken up by the suspended
    151     * thread from it's SIGUSR1 signal handler
    152     */
    153    blockproc(getpid());
    154 #endif
    155 }
    156 
    157 PR_IMPLEMENT(void) _MD_ResumeThread(struct PRThread* thread) {
    158  PRInt32 rv;
    159 
    160  PR_ASSERT((thread->flags & _PR_GLOBAL_SCOPE) && _PR_IS_GCABLE_THREAD(thread));
    161 #if 0
    162    rv = unblockproc(thread->md.id);
    163 #endif
    164 }
    165 
    166 PR_IMPLEMENT(void) _MD_SuspendCPU(struct _PRCPU* thread) {
    167  PRInt32 rv;
    168 
    169 #if 0
    170    cpu->md.suspending_id = getpid();
    171    rv = kill(cpu->md.id, SIGUSR1);
    172    PR_ASSERT(rv == 0);
    173    /*
    174     * now, block the current thread/cpu until woken up by the suspended
    175     * thread from it's SIGUSR1 signal handler
    176     */
    177    blockproc(getpid());
    178 #endif
    179 }
    180 
    181 PR_IMPLEMENT(void) _MD_ResumeCPU(struct _PRCPU* thread) {
    182 #if 0
    183    unblockproc(cpu->md.id);
    184 #endif
    185 }
    186 
    187 #define PT_NANOPERMICRO 1000UL
    188 #define PT_BILLION 1000000000UL
    189 
    190 PR_IMPLEMENT(PRStatus)
    191 _pt_wait(PRThread* thread, PRIntervalTime timeout) {
    192  int rv;
    193  struct timeval now;
    194  struct timespec tmo;
    195  PRUint32 ticks = PR_TicksPerSecond();
    196 
    197  if (timeout != PR_INTERVAL_NO_TIMEOUT) {
    198    tmo.tv_sec = timeout / ticks;
    199    tmo.tv_nsec = timeout - (tmo.tv_sec * ticks);
    200    tmo.tv_nsec = PR_IntervalToMicroseconds(PT_NANOPERMICRO * tmo.tv_nsec);
    201 
    202    /* pthreads wants this in absolute time, off we go ... */
    203    (void)GETTIMEOFDAY(&now);
    204    /* that one's usecs, this one's nsecs - grrrr! */
    205    tmo.tv_sec += now.tv_sec;
    206    tmo.tv_nsec += (PT_NANOPERMICRO * now.tv_usec);
    207    tmo.tv_sec += tmo.tv_nsec / PT_BILLION;
    208    tmo.tv_nsec %= PT_BILLION;
    209  }
    210 
    211  pthread_mutex_lock(&thread->md.pthread_mutex);
    212  thread->md.wait--;
    213  if (thread->md.wait < 0) {
    214    if (timeout != PR_INTERVAL_NO_TIMEOUT) {
    215      rv = pthread_cond_timedwait(&thread->md.pthread_cond,
    216                                  &thread->md.pthread_mutex, &tmo);
    217    } else
    218      rv = pthread_cond_wait(&thread->md.pthread_cond,
    219                             &thread->md.pthread_mutex);
    220    if (rv != 0) {
    221      thread->md.wait++;
    222    }
    223  } else {
    224    rv = 0;
    225  }
    226  pthread_mutex_unlock(&thread->md.pthread_mutex);
    227 
    228  return (rv == 0) ? PR_SUCCESS : PR_FAILURE;
    229 }
    230 
    231 PR_IMPLEMENT(PRStatus)
    232 _MD_wait(PRThread* thread, PRIntervalTime ticks) {
    233  if (thread->flags & _PR_GLOBAL_SCOPE) {
    234    _MD_CHECK_FOR_EXIT();
    235    if (_pt_wait(thread, ticks) == PR_FAILURE) {
    236      _MD_CHECK_FOR_EXIT();
    237      /*
    238       * wait timed out
    239       */
    240      _PR_THREAD_LOCK(thread);
    241      if (thread->wait.cvar) {
    242        /*
    243         * The thread will remove itself from the waitQ
    244         * of the cvar in _PR_WaitCondVar
    245         */
    246        thread->wait.cvar = NULL;
    247        thread->state = _PR_RUNNING;
    248        _PR_THREAD_UNLOCK(thread);
    249      } else {
    250        _pt_wait(thread, PR_INTERVAL_NO_TIMEOUT);
    251        _PR_THREAD_UNLOCK(thread);
    252      }
    253    }
    254  } else {
    255    _PR_MD_SWITCH_CONTEXT(thread);
    256  }
    257  return PR_SUCCESS;
    258 }
    259 
    260 PR_IMPLEMENT(PRStatus)
    261 _MD_WakeupWaiter(PRThread* thread) {
    262  PRThread* me = _PR_MD_CURRENT_THREAD();
    263  PRInt32 pid, rv;
    264  PRIntn is;
    265 
    266  PR_ASSERT(_pr_md_idle_cpus >= 0);
    267  if (thread == NULL) {
    268    if (_pr_md_idle_cpus) {
    269      _MD_Wakeup_CPUs();
    270    }
    271  } else if (!_PR_IS_NATIVE_THREAD(thread)) {
    272    /*
    273     * If the thread is on my cpu's runq there is no need to
    274     * wakeup any cpus
    275     */
    276    if (!_PR_IS_NATIVE_THREAD(me)) {
    277      if (me->cpu != thread->cpu) {
    278        if (_pr_md_idle_cpus) {
    279          _MD_Wakeup_CPUs();
    280        }
    281      }
    282    } else {
    283      if (_pr_md_idle_cpus) {
    284        _MD_Wakeup_CPUs();
    285      }
    286    }
    287  } else {
    288    PR_ASSERT(_PR_IS_NATIVE_THREAD(thread));
    289    if (!_PR_IS_NATIVE_THREAD(me)) {
    290      _PR_INTSOFF(is);
    291    }
    292 
    293    pthread_mutex_lock(&thread->md.pthread_mutex);
    294    thread->md.wait++;
    295    rv = pthread_cond_signal(&thread->md.pthread_cond);
    296    PR_ASSERT(rv == 0);
    297    pthread_mutex_unlock(&thread->md.pthread_mutex);
    298 
    299    if (!_PR_IS_NATIVE_THREAD(me)) {
    300      _PR_FAST_INTSON(is);
    301    }
    302  }
    303  return PR_SUCCESS;
    304 }
    305 
    306 /* These functions should not be called for AIX */
    307 PR_IMPLEMENT(void)
    308 _MD_YIELD(void) { PR_NOT_REACHED("_MD_YIELD should not be called for AIX."); }
    309 
    310 PR_IMPLEMENT(PRStatus)
    311 _MD_CreateThread(PRThread* thread, void (*start)(void*),
    312                 PRThreadPriority priority, PRThreadScope scope,
    313                 PRThreadState state, PRUint32 stackSize) {
    314  PRIntn is;
    315  int rv;
    316  PRThread* me = _PR_MD_CURRENT_THREAD();
    317  pthread_attr_t attr;
    318 
    319  if (!_PR_IS_NATIVE_THREAD(me)) {
    320    _PR_INTSOFF(is);
    321  }
    322 
    323  if (pthread_mutex_init(&thread->md.pthread_mutex, NULL) != 0) {
    324    if (!_PR_IS_NATIVE_THREAD(me)) {
    325      _PR_FAST_INTSON(is);
    326    }
    327    return PR_FAILURE;
    328  }
    329 
    330  if (pthread_cond_init(&thread->md.pthread_cond, NULL) != 0) {
    331    pthread_mutex_destroy(&thread->md.pthread_mutex);
    332    if (!_PR_IS_NATIVE_THREAD(me)) {
    333      _PR_FAST_INTSON(is);
    334    }
    335    return PR_FAILURE;
    336  }
    337  thread->flags |= _PR_GLOBAL_SCOPE;
    338 
    339  pthread_attr_init(&attr); /* initialize attr with default attributes */
    340  if (pthread_attr_setstacksize(&attr, (size_t)stackSize) != 0) {
    341    pthread_mutex_destroy(&thread->md.pthread_mutex);
    342    pthread_cond_destroy(&thread->md.pthread_cond);
    343    pthread_attr_destroy(&attr);
    344    if (!_PR_IS_NATIVE_THREAD(me)) {
    345      _PR_FAST_INTSON(is);
    346    }
    347    return PR_FAILURE;
    348  }
    349 
    350  thread->md.wait = 0;
    351  rv = pthread_create(&thread->md.pthread, &attr, start, (void*)thread);
    352  if (0 == rv) {
    353    _MD_ATOMIC_INCREMENT(&_pr_md_pthreads_created);
    354    _MD_ATOMIC_INCREMENT(&_pr_md_pthreads);
    355    if (!_PR_IS_NATIVE_THREAD(me)) {
    356      _PR_FAST_INTSON(is);
    357    }
    358    return PR_SUCCESS;
    359  } else {
    360    pthread_mutex_destroy(&thread->md.pthread_mutex);
    361    pthread_cond_destroy(&thread->md.pthread_cond);
    362    pthread_attr_destroy(&attr);
    363    _MD_ATOMIC_INCREMENT(&_pr_md_pthreads_failed);
    364    if (!_PR_IS_NATIVE_THREAD(me)) {
    365      _PR_FAST_INTSON(is);
    366    }
    367    PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, rv);
    368    return PR_FAILURE;
    369  }
    370 }
    371 
    372 PR_IMPLEMENT(void)
    373 _MD_InitRunningCPU(struct _PRCPU* cpu) {
    374  extern int _pr_md_pipefd[2];
    375 
    376  _MD_unix_init_running_cpu(cpu);
    377  cpu->md.pthread = pthread_self();
    378  if (_pr_md_pipefd[0] >= 0) {
    379    _PR_IOQ_MAX_OSFD(cpu) = _pr_md_pipefd[0];
    380 #ifndef _PR_USE_POLL
    381    FD_SET(_pr_md_pipefd[0], &_PR_FD_READ_SET(cpu));
    382 #endif
    383  }
    384 }
    385 
    386 void _MD_CleanupBeforeExit(void) {
    387 #if 0
    388    extern PRInt32    _pr_cpus_exit;
    389 
    390    _pr_irix_exit_now = 1;
    391    if (_pr_numCPU > 1) {
    392        /*
    393         * Set a global flag, and wakeup all cpus which will notice the flag
    394         * and exit.
    395         */
    396        _pr_cpus_exit = getpid();
    397        _MD_Wakeup_CPUs();
    398        while(_pr_numCPU > 1) {
    399            _PR_WAIT_SEM(_pr_irix_exit_sem);
    400            _pr_numCPU--;
    401        }
    402    }
    403    /*
    404     * cause global threads on the recycle list to exit
    405     */
    406    _PR_DEADQ_LOCK;
    407    if (_PR_NUM_DEADNATIVE != 0) {
    408        PRThread *thread;
    409        PRCList *ptr;
    410 
    411        ptr = _PR_DEADNATIVEQ.next;
    412        while( ptr != &_PR_DEADNATIVEQ ) {
    413            thread = _PR_THREAD_PTR(ptr);
    414            _MD_CVAR_POST_SEM(thread);
    415            ptr = ptr->next;
    416        }
    417    }
    418    _PR_DEADQ_UNLOCK;
    419    while(_PR_NUM_DEADNATIVE > 1) {
    420        _PR_WAIT_SEM(_pr_irix_exit_sem);
    421        _PR_DEC_DEADNATIVE;
    422    }
    423 #endif
    424 }