WasmProcess.cpp (6986B)
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- 2 * vim: set ts=8 sts=2 et sw=2 tw=80: 3 * 4 * Copyright 2017 Mozilla Foundation 5 * 6 * Licensed under the Apache License, Version 2.0 (the "License"); 7 * you may not use this file except in compliance with the License. 8 * You may obtain a copy of the License at 9 * 10 * http://www.apache.org/licenses/LICENSE-2.0 11 * 12 * Unless required by applicable law or agreed to in writing, software 13 * distributed under the License is distributed on an "AS IS" BASIS, 14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 * See the License for the specific language governing permissions and 16 * limitations under the License. 17 */ 18 19 #include "wasm/WasmProcess.h" 20 21 #include "gc/Memory.h" 22 #include "threading/ExclusiveData.h" 23 #include "vm/MutexIDs.h" 24 #include "vm/Runtime.h" 25 #include "wasm/WasmBuiltinModule.h" 26 #include "wasm/WasmBuiltins.h" 27 #include "wasm/WasmCode.h" 28 #include "wasm/WasmInstance.h" 29 #include "wasm/WasmModuleTypes.h" 30 #include "wasm/WasmStaticTypeDefs.h" 31 32 using namespace js; 33 using namespace wasm; 34 35 mozilla::Atomic<bool> wasm::CodeExists(false); 36 37 // Per-process map from values of program-counter (pc) to CodeBlocks. 38 // 39 // Whenever a new CodeBlock is ready to use, it has to be registered so that 40 // we can have fast lookups from pc to CodeBlocks in numerous places. Since 41 // wasm compilation may be tiered, and the second tier doesn't have access to 42 // any JSContext/JS::Compartment/etc lying around, we have to use a process-wide 43 // map instead. 44 45 // This field is only atomic to handle buggy scenarios where we crash during 46 // startup or shutdown and thus racily perform wasm::LookupCodeBlock() from 47 // the crashing thread. 48 49 static mozilla::Atomic<ThreadSafeCodeBlockMap*> sThreadSafeCodeBlockMap( 50 nullptr); 51 52 bool wasm::RegisterCodeBlock(const CodeBlock* cs) { 53 if (cs->length() == 0) { 54 return true; 55 } 56 57 // This function cannot race with startup/shutdown. 58 ThreadSafeCodeBlockMap* map = sThreadSafeCodeBlockMap; 59 MOZ_RELEASE_ASSERT(map); 60 bool result = map->insert(cs); 61 if (result) { 62 CodeExists = true; 63 } 64 return result; 65 } 66 67 void wasm::UnregisterCodeBlock(const CodeBlock* cs) { 68 if (cs->length() == 0) { 69 return; 70 } 71 72 // This function cannot race with startup/shutdown. 73 ThreadSafeCodeBlockMap* map = sThreadSafeCodeBlockMap; 74 MOZ_RELEASE_ASSERT(map); 75 size_t newCount = map->remove(cs); 76 if (newCount == 0) { 77 CodeExists = false; 78 } 79 } 80 81 const CodeBlock* wasm::LookupCodeBlock( 82 const void* pc, const CodeRange** codeRange /*= nullptr */) { 83 ThreadSafeCodeBlockMap* map = sThreadSafeCodeBlockMap; 84 if (!map) { 85 return nullptr; 86 } 87 88 return map->lookup(pc, codeRange); 89 } 90 91 const Code* wasm::LookupCode(const void* pc, 92 const CodeRange** codeRange /* = nullptr */) { 93 const CodeBlock* found = LookupCodeBlock(pc, codeRange); 94 MOZ_ASSERT_IF(!found && codeRange, !*codeRange); 95 return found ? found->code : nullptr; 96 } 97 98 bool wasm::InCompiledCode(void* pc) { 99 if (LookupCodeBlock(pc)) { 100 return true; 101 } 102 103 const CodeRange* codeRange; 104 const uint8_t* codeBase; 105 return LookupBuiltinThunk(pc, &codeRange, &codeBase); 106 } 107 108 #ifdef WASM_SUPPORTS_HUGE_MEMORY 109 # if defined(__riscv) 110 // On riscv64, Sv39 is not enough for huge memory, so we require at least Sv48. 111 static const size_t MinAddressBitsForHugeMemory = 47; 112 # else 113 /* 114 * Some 64 bit systems greatly limit the range of available virtual memory. We 115 * require about 6GiB for each wasm huge memory, which can exhaust the address 116 * spaces of these systems quickly. In order to avoid this, we only enable huge 117 * memory if we observe a large enough address space. 118 * 119 * This number is conservatively chosen to continue using huge memory on our 120 * smallest address space system, Android on ARM64 (39 bits), along with a bit 121 * for error in detecting the address space limit. 122 */ 123 static const size_t MinAddressBitsForHugeMemory = 38; 124 # endif 125 126 /* 127 * In addition to the above, some systems impose an independent limit on the 128 * amount of virtual memory that may be used. 129 */ 130 static const size_t MinVirtualMemoryLimitForHugeMemory = 131 size_t(1) << MinAddressBitsForHugeMemory; 132 #endif 133 134 static bool sHugeMemoryEnabled32 = false; 135 136 bool wasm::IsHugeMemoryEnabled(wasm::AddressType t, wasm::PageSize sz) { 137 if (t == AddressType::I64 || sz != wasm::PageSize::Standard) { 138 // No support for huge memory with 64-bit memories or custom page sizes. 139 return false; 140 } 141 return sHugeMemoryEnabled32; 142 } 143 144 void ConfigureHugeMemory() { 145 #ifdef WASM_SUPPORTS_HUGE_MEMORY 146 MOZ_ASSERT(!sHugeMemoryEnabled32); 147 148 if (JS::Prefs::wasm_disable_huge_memory()) { 149 return; 150 } 151 152 if (gc::SystemAddressBits() < MinAddressBitsForHugeMemory) { 153 return; 154 } 155 156 if (gc::VirtualMemoryLimit() != size_t(-1) && 157 gc::VirtualMemoryLimit() < MinVirtualMemoryLimitForHugeMemory) { 158 return; 159 } 160 161 sHugeMemoryEnabled32 = true; 162 #endif 163 } 164 165 const TagType* wasm::sWrappedJSValueTagType = nullptr; 166 167 static bool InitTagForJSValue() { 168 MutableTagType type = js_new<TagType>(); 169 if (!type || !type->initialize(StaticTypeDefs::jsTag)) { 170 return false; 171 } 172 MOZ_ASSERT(WrappedJSValueTagType_ValueOffset == type->argOffsets()[0]); 173 174 type.forget(&sWrappedJSValueTagType); 175 176 return true; 177 } 178 179 bool wasm::Init() { 180 MOZ_RELEASE_ASSERT(!sThreadSafeCodeBlockMap); 181 182 // Assert invariants that should universally hold true, but cannot be checked 183 // at compile time. 184 uintptr_t pageSize = gc::SystemPageSize(); 185 MOZ_RELEASE_ASSERT(wasm::NullPtrGuardSize <= pageSize); 186 MOZ_RELEASE_ASSERT(intptr_t(nullptr) == AnyRef::NullRefValue); 187 188 ConfigureHugeMemory(); 189 190 AutoEnterOOMUnsafeRegion oomUnsafe; 191 ThreadSafeCodeBlockMap* map = js_new<ThreadSafeCodeBlockMap>(); 192 if (!map) { 193 oomUnsafe.crash("js::wasm::Init"); 194 } 195 196 if (!StaticTypeDefs::init()) { 197 oomUnsafe.crash("js::wasm::Init"); 198 } 199 200 // This uses StaticTypeDefs 201 if (!BuiltinModuleFuncs::init()) { 202 oomUnsafe.crash("js::wasm::Init"); 203 } 204 205 sThreadSafeCodeBlockMap = map; 206 207 if (!InitTagForJSValue()) { 208 oomUnsafe.crash("js::wasm::Init"); 209 } 210 211 return true; 212 } 213 214 void wasm::ShutDown() { 215 // If there are live runtimes then we are already pretty much leaking the 216 // world, so to avoid spurious assertions (which are valid and valuable when 217 // there are not live JSRuntimes), don't bother releasing anything here. 218 if (JSRuntime::hasLiveRuntimes()) { 219 return; 220 } 221 222 BuiltinModuleFuncs::destroy(); 223 StaticTypeDefs::destroy(); 224 PurgeCanonicalTypes(); 225 226 if (sWrappedJSValueTagType) { 227 sWrappedJSValueTagType->Release(); 228 sWrappedJSValueTagType = nullptr; 229 } 230 231 // After signalling shutdown by clearing sThreadSafeCodeBlockMap, wait for 232 // concurrent wasm::LookupCodeBlock()s to finish. 233 ThreadSafeCodeBlockMap* map = sThreadSafeCodeBlockMap; 234 MOZ_RELEASE_ASSERT(map); 235 sThreadSafeCodeBlockMap = nullptr; 236 while (map->numActiveLookups() > 0) { 237 } 238 239 ReleaseBuiltinThunks(); 240 js_delete(map); 241 }