regexp-stack.cc (6365B)
1 // Copyright 2009 the V8 project authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #include "irregexp/imported/regexp-stack.h" 6 7 8 namespace v8 { 9 namespace internal { 10 11 RegExpStackScope::RegExpStackScope(Isolate* isolate) 12 : regexp_stack_(isolate->regexp_stack()), 13 old_sp_top_delta_(regexp_stack_->sp_top_delta()) { 14 DCHECK(regexp_stack_->IsValid()); 15 } 16 17 RegExpStackScope::~RegExpStackScope() { 18 CHECK_EQ(old_sp_top_delta_, regexp_stack_->sp_top_delta()); 19 regexp_stack_->ResetIfEmpty(); 20 } 21 22 RegExpStack::RegExpStack() : thread_local_(this) {} 23 24 RegExpStack::~RegExpStack() { thread_local_.FreeAndInvalidate(); } 25 26 #ifndef COMPILING_IRREGEXP_FOR_EXTERNAL_EMBEDDER 27 28 // static 29 RegExpStack* RegExpStack::New() { 30 #ifdef V8_ENABLE_SANDBOX_HARDWARE_SUPPORT 31 // TODO(426514762): RegExpStack objects must currently be accessible to 32 // sandboxed code (which is unsafe). As such we need to register them as 33 // sandbox extension memory, which requires allocating them on full OS pages. 34 VirtualAddressSpace* vas = GetPlatformVirtualAddressSpace(); 35 CHECK_LT(sizeof(RegExpStack), vas->allocation_granularity()); 36 Address regexp_stack_memory = vas->AllocatePages( 37 VirtualAddressSpace::kNoHint, vas->allocation_granularity(), 38 vas->allocation_granularity(), PagePermissions::kReadWrite); 39 SandboxHardwareSupport::RegisterUnsafeSandboxExtensionMemory( 40 regexp_stack_memory, vas->allocation_granularity()); 41 return new (reinterpret_cast<void*>(regexp_stack_memory)) RegExpStack(); 42 #else 43 return new RegExpStack(); 44 #endif // V8_ENABLE_SANDBOX_HARDWARE_SUPPORT 45 } 46 47 // static 48 void RegExpStack::Delete(RegExpStack* instance) { 49 #ifdef V8_ENABLE_SANDBOX_HARDWARE_SUPPORT 50 // TODO(426514762): we currently allocate RegExpStack objects on full pages. 51 instance->~RegExpStack(); 52 VirtualAddressSpace* vas = GetPlatformVirtualAddressSpace(); 53 Address page = reinterpret_cast<Address>(instance); 54 DCHECK(IsAligned(page, vas->allocation_granularity())); 55 vas->FreePages(page, vas->allocation_granularity()); 56 #else 57 delete instance; 58 #endif 59 } 60 61 #endif // !COMPILING_IRREGEXP_FOR_EXTERNAL_EMBEDDER 62 63 char* RegExpStack::ArchiveStack(char* to) { 64 if (!thread_local_.owns_memory_) { 65 // Force dynamic stacks prior to archiving. Any growth will do. A dynamic 66 // stack is needed because stack archival & restoration rely on `memory_` 67 // pointing at a fixed-location backing store, whereas the static stack is 68 // tied to a RegExpStack instance. 69 EnsureCapacity(thread_local_.memory_size_ + 1); 70 DCHECK(thread_local_.owns_memory_); 71 } 72 73 MemCopy(reinterpret_cast<void*>(to), &thread_local_, kThreadLocalSize); 74 thread_local_ = ThreadLocal(this); 75 return to + kThreadLocalSize; 76 } 77 78 79 char* RegExpStack::RestoreStack(char* from) { 80 MemCopy(&thread_local_, reinterpret_cast<void*>(from), kThreadLocalSize); 81 return from + kThreadLocalSize; 82 } 83 84 void RegExpStack::ThreadLocal::ResetToStaticStack(RegExpStack* regexp_stack) { 85 DeleteDynamicStack(); 86 87 memory_ = regexp_stack->static_stack_; 88 memory_top_ = regexp_stack->static_stack_ + kStaticStackSize; 89 memory_size_ = kStaticStackSize; 90 stack_pointer_ = memory_top_; 91 limit_ = reinterpret_cast<Address>(regexp_stack->static_stack_) + 92 kStackLimitSlackSize; 93 owns_memory_ = false; 94 } 95 96 void RegExpStack::ThreadLocal::FreeAndInvalidate() { 97 DeleteDynamicStack(); 98 99 // This stack may not be used after being freed. Just reset to invalid values 100 // to ensure we don't accidentally use old memory areas. 101 memory_ = nullptr; 102 memory_top_ = nullptr; 103 memory_size_ = 0; 104 stack_pointer_ = nullptr; 105 limit_ = kMemoryTop; 106 } 107 108 // static 109 uint8_t* RegExpStack::ThreadLocal::NewDynamicStack(size_t size) { 110 #ifdef V8_ENABLE_SANDBOX_HARDWARE_SUPPORT 111 // Stack memory must be accessible to sandboxed code, so we must register it 112 // as sandbox extension memory. As such, we need to allocate full OS pages. 113 // TODO(426514762): determine if stack memory is always safe to be 114 // corrupted by an attacker. If so, consider moving it into the sandbox. 115 // TODO(426514762): if we're anyway switching this to full OS pages, would 116 // there be a benefit from adding guard regions around the stack memory to 117 // catch stack overflows and similar bugs? 118 VirtualAddressSpace* vas = GetPlatformVirtualAddressSpace(); 119 size_t allocation_size = RoundUp(size, vas->allocation_granularity()); 120 uint8_t* new_memory = reinterpret_cast<uint8_t*>(vas->AllocatePages( 121 VirtualAddressSpace::kNoHint, allocation_size, 122 vas->allocation_granularity(), PagePermissions::kReadWrite)); 123 SandboxHardwareSupport::RegisterUnsafeSandboxExtensionMemory( 124 reinterpret_cast<Address>(new_memory), allocation_size); 125 #else 126 uint8_t* new_memory = NewArray<uint8_t>(size); 127 #endif // V8_ENABLE_SANDBOX_HARDWARE_SUPPORT 128 return new_memory; 129 } 130 131 void RegExpStack::ThreadLocal::DeleteDynamicStack() { 132 if (owns_memory_) { 133 #ifdef V8_ENABLE_SANDBOX_HARDWARE_SUPPORT 134 VirtualAddressSpace* vas = GetPlatformVirtualAddressSpace(); 135 size_t allocation_size = 136 RoundUp(memory_size_, vas->allocation_granularity()); 137 vas->FreePages(reinterpret_cast<Address>(memory_), allocation_size); 138 #else 139 DeleteArray(memory_); 140 #endif // V8_ENABLE_SANDBOX_HARDWARE_SUPPORT 141 } 142 } 143 144 Address RegExpStack::EnsureCapacity(size_t size) { 145 if (size > kMaximumStackSize) return kNullAddress; 146 if (thread_local_.memory_size_ < size) { 147 if (size < kMinimumDynamicStackSize) size = kMinimumDynamicStackSize; 148 uint8_t* new_memory = ThreadLocal::NewDynamicStack(size); 149 if (thread_local_.memory_size_ > 0) { 150 // Copy original memory into top of new memory. 151 MemCopy(new_memory + size - thread_local_.memory_size_, 152 thread_local_.memory_, thread_local_.memory_size_); 153 thread_local_.DeleteDynamicStack(); 154 } 155 ptrdiff_t delta = sp_top_delta(); 156 thread_local_.memory_ = new_memory; 157 thread_local_.memory_top_ = new_memory + size; 158 thread_local_.memory_size_ = size; 159 thread_local_.stack_pointer_ = thread_local_.memory_top_ + delta; 160 thread_local_.limit_ = 161 reinterpret_cast<Address>(new_memory) + kStackLimitSlackSize; 162 thread_local_.owns_memory_ = true; 163 } 164 return reinterpret_cast<Address>(thread_local_.memory_top_); 165 } 166 167 168 } // namespace internal 169 } // namespace v8