tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

ntthread.c (14457B)


      1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
      2 /* This Source Code Form is subject to the terms of the Mozilla Public
      3 * License, v. 2.0. If a copy of the MPL was not distributed with this
      4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      5 
      6 #include "primpl.h"
      7 #include <process.h> /* for _beginthreadex() */
      8 
      9 /* --- globals ------------------------------------------------ */
     10 PRLock* _pr_schedLock = NULL;
     11 _PRInterruptTable _pr_interruptTable[] = {{0}};
     12 
     13 BOOL _pr_use_static_tls = TRUE;
     14 __declspec(thread) PRThread* _pr_current_fiber;
     15 __declspec(thread) PRThread* _pr_fiber_last_run;
     16 __declspec(thread) _PRCPU* _pr_current_cpu;
     17 __declspec(thread) PRUintn _pr_ints_off;
     18 DWORD _pr_currentFiberIndex;
     19 DWORD _pr_lastFiberIndex;
     20 DWORD _pr_currentCPUIndex;
     21 DWORD _pr_intsOffIndex;
     22 
     23 _MDLock _nt_idleLock;
     24 PRCList _nt_idleList;
     25 PRUint32 _nt_idleCount;
     26 
     27 extern __declspec(thread) PRThread* _pr_io_restarted_io;
     28 extern DWORD _pr_io_restartedIOIndex;
     29 
     30 typedef HRESULT(WINAPI* SETTHREADDESCRIPTION)(HANDLE, PCWSTR);
     31 static SETTHREADDESCRIPTION sSetThreadDescription = NULL;
     32 
     33 /* Must check the restarted_io *before* decrementing no_sched to 0 */
     34 #define POST_SWITCH_WORK()                                                    \
     35  PR_BEGIN_MACRO                                                              \
     36  PRThread* restarted_io =                                                    \
     37      (_pr_use_static_tls ? _pr_io_restarted_io                               \
     38                          : (PRThread*)TlsGetValue(_pr_io_restartedIOIndex)); \
     39  if (restarted_io) {                                                         \
     40    _nt_handle_restarted_io(restarted_io);                                    \
     41  }                                                                           \
     42  _PR_MD_LAST_THREAD()->no_sched = 0;                                         \
     43  PR_END_MACRO
     44 
     45 void _nt_handle_restarted_io(PRThread* restarted_io) {
     46  /* After the switch we can resume an IO if needed.
     47   * XXXMB - this needs to be done in create thread, since that could
     48   * be the result for a context switch too..
     49   */
     50  PR_ASSERT(restarted_io->io_suspended == PR_TRUE);
     51  PR_ASSERT(restarted_io->md.thr_bound_cpu == restarted_io->cpu);
     52 
     53  _PR_THREAD_LOCK(restarted_io);
     54  if (restarted_io->io_pending == PR_FALSE) {
     55    /* The IO already completed, put us back on the runq. */
     56    int pri = restarted_io->priority;
     57 
     58    restarted_io->state = _PR_RUNNABLE;
     59    _PR_RUNQ_LOCK(restarted_io->cpu);
     60    _PR_ADD_RUNQ(restarted_io, restarted_io->cpu, pri);
     61    _PR_RUNQ_UNLOCK(restarted_io->cpu);
     62  } else {
     63    _PR_SLEEPQ_LOCK(restarted_io->cpu);
     64    _PR_ADD_SLEEPQ(restarted_io, restarted_io->sleep);
     65    _PR_SLEEPQ_UNLOCK(restarted_io->cpu);
     66  }
     67  restarted_io->io_suspended = PR_FALSE;
     68  restarted_io->md.thr_bound_cpu = NULL;
     69 
     70  _PR_THREAD_UNLOCK(restarted_io);
     71 
     72  if (_pr_use_static_tls) {
     73    _pr_io_restarted_io = NULL;
     74  } else {
     75    TlsSetValue(_pr_io_restartedIOIndex, NULL);
     76  }
     77 }
     78 
     79 void _PR_MD_EARLY_INIT() {
     80  HMODULE hModule;
     81 
     82  _MD_NEW_LOCK(&_nt_idleLock);
     83  _nt_idleCount = 0;
     84  PR_INIT_CLIST(&_nt_idleList);
     85 
     86 #if 0
     87    /* Make the clock tick at least once per millisecond */
     88    if ( timeBeginPeriod(1) == TIMERR_NOCANDO) {
     89        /* deep yoghurt; clock doesn't tick fast enough! */
     90        PR_ASSERT(0);
     91    }
     92 #endif
     93 
     94  if (!_pr_use_static_tls) {
     95    _pr_currentFiberIndex = TlsAlloc();
     96    _pr_lastFiberIndex = TlsAlloc();
     97    _pr_currentCPUIndex = TlsAlloc();
     98    _pr_intsOffIndex = TlsAlloc();
     99    _pr_io_restartedIOIndex = TlsAlloc();
    100  }
    101 
    102  // SetThreadDescription is Windows 10 build 1607+
    103  hModule = GetModuleHandleW(L"kernel32.dll");
    104  if (hModule) {
    105    sSetThreadDescription =
    106        (SETTHREADDESCRIPTION)GetProcAddress(hModule, "SetThreadDescription");
    107  }
    108 }
    109 
    110 void _PR_MD_CLEANUP_BEFORE_EXIT(void) {
    111  _PR_NT_FreeSids();
    112 
    113  WSACleanup();
    114 
    115  if (!_pr_use_static_tls) {
    116    TlsFree(_pr_currentFiberIndex);
    117    TlsFree(_pr_lastFiberIndex);
    118    TlsFree(_pr_currentCPUIndex);
    119    TlsFree(_pr_intsOffIndex);
    120    TlsFree(_pr_io_restartedIOIndex);
    121  }
    122 }
    123 
    124 PRStatus _PR_MD_INIT_THREAD(PRThread* thread) {
    125  thread->md.overlapped.ioModel = _MD_BlockingIO;
    126  thread->md.overlapped.data.mdThread = &thread->md;
    127 
    128  if (thread->flags & _PR_GLOBAL_SCOPE) {
    129    if (thread->flags & (_PR_PRIMORDIAL | _PR_ATTACHED)) {
    130      /*
    131      ** Warning:
    132      ** --------
    133      ** NSPR requires a real handle to every thread.
    134      ** GetCurrentThread() returns a pseudo-handle which
    135      ** is not suitable for some thread operations (e.g.,
    136      ** suspending).  Therefore, get a real handle from
    137      ** the pseudo handle via DuplicateHandle(...)
    138      */
    139      DuplicateHandle(GetCurrentProcess(),    /* Process of source handle */
    140                      GetCurrentThread(),     /* Pseudo Handle to dup */
    141                      GetCurrentProcess(),    /* Process of handle */
    142                      &(thread->md.handle),   /* resulting handle */
    143                      0L,                     /* access flags */
    144                      FALSE,                  /* Inheritable */
    145                      DUPLICATE_SAME_ACCESS); /* Options */
    146    }
    147 
    148    /* Create the blocking IO semaphore */
    149    thread->md.blocked_sema = CreateSemaphore(NULL, 0, 1, NULL);
    150    if (thread->md.blocked_sema == NULL) {
    151      return PR_FAILURE;
    152    }
    153    if (_native_threads_only) {
    154      /* Create the blocking IO semaphore */
    155      thread->md.thr_event = CreateEvent(NULL, TRUE, FALSE, NULL);
    156      if (thread->md.thr_event == NULL) {
    157        return PR_FAILURE;
    158      }
    159    }
    160  }
    161 
    162  return PR_SUCCESS;
    163 }
    164 
    165 static unsigned __stdcall pr_root(void* arg) {
    166  PRThread* thread = (PRThread*)arg;
    167  thread->md.start(thread);
    168  return 0;
    169 }
    170 
    171 PRStatus _PR_MD_CREATE_THREAD(PRThread* thread, void (*start)(void*),
    172                              PRThreadPriority priority, PRThreadScope scope,
    173                              PRThreadState state, PRUint32 stackSize) {
    174  thread->md.start = start;
    175  thread->md.handle = (HANDLE)_beginthreadex(
    176      NULL, thread->stack->stackSize, pr_root, (void*)thread,
    177      CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION, &(thread->id));
    178  if (!thread->md.handle) {
    179    PRErrorCode prerror;
    180    thread->md.fiber_last_error = GetLastError();
    181    switch (errno) {
    182      case ENOMEM:
    183        prerror = PR_OUT_OF_MEMORY_ERROR;
    184        break;
    185      case EAGAIN:
    186        prerror = PR_INSUFFICIENT_RESOURCES_ERROR;
    187        break;
    188      case EINVAL:
    189        prerror = PR_INVALID_ARGUMENT_ERROR;
    190        break;
    191      default:
    192        prerror = PR_UNKNOWN_ERROR;
    193    }
    194    PR_SetError(prerror, errno);
    195    return PR_FAILURE;
    196  }
    197 
    198  thread->md.id = thread->id;
    199  /*
    200   * On windows, a thread is created with a thread priority of
    201   * THREAD_PRIORITY_NORMAL.
    202   */
    203  if (priority != PR_PRIORITY_NORMAL) {
    204    _PR_MD_SET_PRIORITY(&(thread->md), priority);
    205  }
    206 
    207  /* Activate the thread */
    208  if (ResumeThread(thread->md.handle) != -1) {
    209    return PR_SUCCESS;
    210  }
    211 
    212  PR_SetError(PR_UNKNOWN_ERROR, GetLastError());
    213  return PR_FAILURE;
    214 }
    215 
    216 void _PR_MD_JOIN_THREAD(_MDThread* md) {
    217  DWORD rv;
    218 
    219  rv = WaitForSingleObject(md->handle, INFINITE);
    220  PR_ASSERT(WAIT_OBJECT_0 == rv);
    221 }
    222 
    223 void _PR_MD_END_THREAD(void) { _endthreadex(0); }
    224 
    225 void _PR_MD_YIELD(void) {
    226  /* Can NT really yield at all? */
    227  Sleep(0);
    228 }
    229 
    230 void _PR_MD_SET_PRIORITY(_MDThread* thread, PRThreadPriority newPri) {
    231  int nativePri;
    232  BOOL rv;
    233 
    234  if (newPri < PR_PRIORITY_FIRST) {
    235    newPri = PR_PRIORITY_FIRST;
    236  } else if (newPri > PR_PRIORITY_LAST) {
    237    newPri = PR_PRIORITY_LAST;
    238  }
    239  switch (newPri) {
    240    case PR_PRIORITY_LOW:
    241      nativePri = THREAD_PRIORITY_BELOW_NORMAL;
    242      break;
    243    case PR_PRIORITY_NORMAL:
    244      nativePri = THREAD_PRIORITY_NORMAL;
    245      break;
    246    case PR_PRIORITY_HIGH:
    247      nativePri = THREAD_PRIORITY_ABOVE_NORMAL;
    248      break;
    249    case PR_PRIORITY_URGENT:
    250      nativePri = THREAD_PRIORITY_HIGHEST;
    251  }
    252  rv = SetThreadPriority(thread->handle, nativePri);
    253  PR_ASSERT(rv);
    254  if (!rv) {
    255    PR_LOG(_pr_thread_lm, PR_LOG_MIN,
    256           ("PR_SetThreadPriority: can't set thread priority\n"));
    257  }
    258  return;
    259 }
    260 
    261 const DWORD MS_VC_EXCEPTION = 0x406D1388;
    262 
    263 #pragma pack(push, 8)
    264 typedef struct tagTHREADNAME_INFO {
    265  DWORD dwType;      // Must be 0x1000.
    266  LPCSTR szName;     // Pointer to name (in user addr space).
    267  DWORD dwThreadID;  // Thread ID (-1=caller thread).
    268  DWORD dwFlags;     // Reserved for future use, must be zero.
    269 } THREADNAME_INFO;
    270 #pragma pack(pop)
    271 
    272 void _PR_MD_SET_CURRENT_THREAD_NAME(const char* name) {
    273 #ifdef _MSC_VER
    274  THREADNAME_INFO info;
    275 #endif
    276 
    277  if (sSetThreadDescription) {
    278    WCHAR wideName[MAX_PATH];
    279    if (MultiByteToWideChar(CP_ACP, 0, name, -1, wideName, MAX_PATH)) {
    280      sSetThreadDescription(GetCurrentThread(), wideName);
    281    }
    282  }
    283 
    284 #ifdef _MSC_VER
    285  if (!IsDebuggerPresent()) {
    286    return;
    287  }
    288 
    289  info.dwType = 0x1000;
    290  info.szName = (char*)name;
    291  info.dwThreadID = -1;
    292  info.dwFlags = 0;
    293 
    294  __try {
    295    RaiseException(MS_VC_EXCEPTION, 0, sizeof(info) / sizeof(ULONG_PTR),
    296                   (ULONG_PTR*)&info);
    297  } __except (EXCEPTION_CONTINUE_EXECUTION) {
    298  }
    299 #endif
    300 }
    301 
    302 void _PR_MD_CLEAN_THREAD(PRThread* thread) {
    303  BOOL rv;
    304 
    305  if (thread->md.acceptex_buf) {
    306    PR_DELETE(thread->md.acceptex_buf);
    307  }
    308 
    309  if (thread->md.xmit_bufs) {
    310    PR_DELETE(thread->md.xmit_bufs);
    311  }
    312 
    313  if (thread->md.blocked_sema) {
    314    rv = CloseHandle(thread->md.blocked_sema);
    315    PR_ASSERT(rv);
    316    thread->md.blocked_sema = 0;
    317  }
    318  if (_native_threads_only) {
    319    if (thread->md.thr_event) {
    320      rv = CloseHandle(thread->md.thr_event);
    321      PR_ASSERT(rv);
    322      thread->md.thr_event = 0;
    323    }
    324  }
    325 
    326  if (thread->md.handle) {
    327    rv = CloseHandle(thread->md.handle);
    328    PR_ASSERT(rv);
    329    thread->md.handle = 0;
    330  }
    331 
    332  /* Don't call DeleteFiber on current fiber or we'll kill the whole thread.
    333   * Don't call free(thread) until we've switched off the thread.
    334   * So put this fiber (or thread) on a list to be deleted by the idle
    335   * fiber next time we have a chance.
    336   */
    337  if (!(thread->flags & (_PR_ATTACHED | _PR_GLOBAL_SCOPE))) {
    338    _MD_LOCK(&_nt_idleLock);
    339    _nt_idleCount++;
    340    PR_APPEND_LINK(&thread->links, &_nt_idleList);
    341    _MD_UNLOCK(&_nt_idleLock);
    342  }
    343 }
    344 
    345 void _PR_MD_EXIT_THREAD(PRThread* thread) {
    346  BOOL rv;
    347 
    348  if (thread->md.acceptex_buf) {
    349    PR_DELETE(thread->md.acceptex_buf);
    350  }
    351 
    352  if (thread->md.xmit_bufs) {
    353    PR_DELETE(thread->md.xmit_bufs);
    354  }
    355 
    356  if (thread->md.blocked_sema) {
    357    rv = CloseHandle(thread->md.blocked_sema);
    358    PR_ASSERT(rv);
    359    thread->md.blocked_sema = 0;
    360  }
    361 
    362  if (_native_threads_only) {
    363    if (thread->md.thr_event) {
    364      rv = CloseHandle(thread->md.thr_event);
    365      PR_ASSERT(rv);
    366      thread->md.thr_event = 0;
    367    }
    368  }
    369 
    370  if (thread->md.handle) {
    371    rv = CloseHandle(thread->md.handle);
    372    PR_ASSERT(rv);
    373    thread->md.handle = 0;
    374  }
    375 
    376  if (thread->flags & _PR_GLOBAL_SCOPE) {
    377    _MD_SET_CURRENT_THREAD(NULL);
    378  }
    379 }
    380 
    381 void _PR_MD_EXIT(PRIntn status) { _exit(status); }
    382 
    383 #ifdef HAVE_FIBERS
    384 
    385 void _pr_fiber_mainline(void* unused) {
    386  PRThread* fiber = _PR_MD_CURRENT_THREAD();
    387 
    388  POST_SWITCH_WORK();
    389 
    390  fiber->md.fiber_fn(fiber->md.fiber_arg);
    391 }
    392 
    393 PRThread* _PR_MD_CREATE_USER_THREAD(PRUint32 stacksize, void (*start)(void*),
    394                                    void* arg) {
    395  PRThread* thread;
    396 
    397  if ((thread = PR_NEW(PRThread)) == NULL) {
    398    return NULL;
    399  }
    400 
    401  memset(thread, 0, sizeof(PRThread));
    402  thread->md.fiber_fn = start;
    403  thread->md.fiber_arg = arg;
    404  thread->md.fiber_stacksize = stacksize;
    405  return thread;
    406 }
    407 
    408 void _PR_MD_CREATE_PRIMORDIAL_USER_THREAD(PRThread* thread) {
    409  thread->md.fiber_id = ConvertThreadToFiber(NULL);
    410  PR_ASSERT(thread->md.fiber_id);
    411  _MD_SET_CURRENT_THREAD(thread);
    412  _MD_SET_LAST_THREAD(thread);
    413  thread->no_sched = 1;
    414  return;
    415 }
    416 
    417 void _PR_MD_INIT_CONTEXT(PRThread* thread, char* top, void (*start)(void),
    418                         PRBool* status) {
    419  thread->md.fiber_fn = (void (*)(void*))start;
    420  thread->md.fiber_id =
    421      CreateFiber(thread->md.fiber_stacksize,
    422                  (LPFIBER_START_ROUTINE)_pr_fiber_mainline, NULL);
    423  if (thread->md.fiber_id != 0) {
    424    *status = PR_TRUE;
    425  } else {
    426    DWORD oserror = GetLastError();
    427    PRErrorCode prerror;
    428    if (oserror == ERROR_NOT_ENOUGH_MEMORY) {
    429      prerror = PR_OUT_OF_MEMORY_ERROR;
    430    } else {
    431      prerror = PR_UNKNOWN_ERROR;
    432    }
    433    PR_SetError(prerror, oserror);
    434    *status = PR_FALSE;
    435  }
    436 }
    437 
    438 void _PR_MD_SWITCH_CONTEXT(PRThread* thread) {
    439  PR_ASSERT(!_PR_IS_NATIVE_THREAD(thread));
    440 
    441  thread->md.fiber_last_error = GetLastError();
    442  _PR_Schedule();
    443 }
    444 
    445 void _PR_MD_RESTORE_CONTEXT(PRThread* thread) {
    446  PRThread* me = _PR_MD_CURRENT_THREAD();
    447 
    448  PR_ASSERT(!_PR_IS_NATIVE_THREAD(thread));
    449 
    450  /* The user-level code for yielding will happily add ourselves to the runq
    451   * and then switch to ourselves; the NT fibers can't handle switching to
    452   * ourselves.
    453   */
    454  if (thread != me) {
    455    SetLastError(thread->md.fiber_last_error);
    456    _MD_SET_CURRENT_THREAD(thread);
    457    _PR_MD_SET_LAST_THREAD(me);
    458    thread->no_sched = 1;
    459    SwitchToFiber(thread->md.fiber_id);
    460    POST_SWITCH_WORK();
    461  }
    462 }
    463 
    464 #endif /* HAVE_FIBERS */
    465 
    466 PRInt32 _PR_MD_SETTHREADAFFINITYMASK(PRThread* thread, PRUint32 mask) {
    467  int rv;
    468 
    469  rv = SetThreadAffinityMask(thread->md.handle, mask);
    470 
    471  return rv ? 0 : -1;
    472 }
    473 
    474 PRInt32 _PR_MD_GETTHREADAFFINITYMASK(PRThread* thread, PRUint32* mask) {
    475  PRInt32 rv, system_mask;
    476 
    477  rv = GetProcessAffinityMask(GetCurrentProcess(), mask, &system_mask);
    478 
    479  return rv ? 0 : -1;
    480 }
    481 
    482 void _PR_MD_SUSPEND_CPU(_PRCPU* cpu) { _PR_MD_SUSPEND_THREAD(cpu->thread); }
    483 
    484 void _PR_MD_RESUME_CPU(_PRCPU* cpu) { _PR_MD_RESUME_THREAD(cpu->thread); }
    485 
    486 void _PR_MD_SUSPEND_THREAD(PRThread* thread) {
    487  if (_PR_IS_NATIVE_THREAD(thread)) {
    488    /*
    489    ** There seems to be some doubt about whether or not SuspendThread
    490    ** is a synchronous function. The test afterwards is to help veriry
    491    ** that it is, which is what Microsoft says it is.
    492    */
    493    PRUintn rv = SuspendThread(thread->md.handle);
    494    PR_ASSERT(0xffffffffUL != rv);
    495  }
    496 }
    497 
    498 void _PR_MD_RESUME_THREAD(PRThread* thread) {
    499  if (_PR_IS_NATIVE_THREAD(thread)) {
    500    ResumeThread(thread->md.handle);
    501  }
    502 }
    503 
    504 PRThread* _MD_CURRENT_THREAD(void) {
    505  PRThread* thread;
    506 
    507  thread = _MD_GET_ATTACHED_THREAD();
    508 
    509  if (NULL == thread) {
    510    thread = _PRI_AttachThread(PR_USER_THREAD, PR_PRIORITY_NORMAL, NULL, 0);
    511  }
    512  PR_ASSERT(thread != NULL);
    513  return thread;
    514 }