ModuloBuffer.h (26575B)
1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ 2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */ 3 /* This Source Code Form is subject to the terms of the Mozilla Public 4 * License, v. 2.0. If a copy of the MPL was not distributed with this 5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 7 #ifndef ModuloBuffer_h 8 #define ModuloBuffer_h 9 10 #include "mozilla/leb128iterator.h" 11 #include "mozilla/Maybe.h" 12 #include "mozilla/MemoryReporting.h" 13 #include "mozilla/NotNull.h" 14 #include "mozilla/PowerOfTwo.h" 15 #include "mozilla/ProfileBufferEntrySerialization.h" 16 #include "mozilla/UniquePtr.h" 17 18 #include <functional> 19 #include <iterator> 20 #include <limits> 21 #include <type_traits> 22 23 namespace mozilla { 24 25 // The ModuloBuffer class is a circular buffer that holds raw byte values, with 26 // data-read/write helpers. 27 // 28 // OffsetT: Type of the internal offset into the buffer of bytes, it should be 29 // large enough to access all bytes of the buffer. It will also be used as 30 // Length (in bytes) of the buffer and of any subset. Default uint32_t 31 // IndexT: Type of the external index, it should be large enough that overflows 32 // should not happen during the lifetime of the ModuloBuffer. 33 // 34 // The basic usage is to create an iterator-like object with `ReaderAt(Index)` 35 // or `WriterAt(Index)`, and use it to read/write data blobs. Iterators 36 // automatically manage the wrap-around (through "Modulo", which is effectively 37 // an AND-masking with the PowerOfTwo buffer size.) 38 // 39 // There is zero safety: No thread safety, no checks that iterators may be 40 // overwriting data that's still to be read, etc. It's up to the caller to add 41 // adequate checks. 42 // The intended use is as an underlying buffer for a safer container. 43 template <typename OffsetT = uint32_t, typename IndexT = uint64_t> 44 class ModuloBuffer { 45 public: 46 using Byte = uint8_t; 47 static_assert(sizeof(Byte) == 1, "ModuloBuffer::Byte must be 1 byte"); 48 using Offset = OffsetT; 49 static_assert(!std::numeric_limits<Offset>::is_signed, 50 "ModuloBuffer::Offset must be an unsigned integral type"); 51 using Length = Offset; 52 using Index = IndexT; 53 static_assert(!std::numeric_limits<Index>::is_signed, 54 "ModuloBuffer::Index must be an unsigned integral type"); 55 static_assert(sizeof(Index) >= sizeof(Offset), 56 "ModuloBuffer::Index size must >= Offset"); 57 58 // Create a buffer of the given length. 59 explicit ModuloBuffer(PowerOfTwo<Length> aLength) 60 : mMask(aLength.Mask()), 61 mBuffer(WrapNotNull(new Byte[aLength.Value()])), 62 mBufferDeleter([](Byte* aBuffer) { delete[] aBuffer; }) {} 63 64 // Take ownership of an existing buffer. Existing contents is ignored. 65 // Done by extracting the raw pointer from UniquePtr<Byte[]>, and adding 66 // an equivalent `delete[]` in `mBufferDeleter`. 67 ModuloBuffer(UniquePtr<Byte[]> aExistingBuffer, PowerOfTwo<Length> aLength) 68 : mMask(aLength.Mask()), 69 mBuffer(WrapNotNull(aExistingBuffer.release())), 70 mBufferDeleter([](Byte* aBuffer) { delete[] aBuffer; }) {} 71 72 // Use an externally-owned buffer. Existing contents is ignored. 73 ModuloBuffer(Byte* aExternalBuffer, PowerOfTwo<Length> aLength) 74 : mMask(aLength.Mask()), mBuffer(WrapNotNull(aExternalBuffer)) {} 75 76 // Disallow copying, as we may uniquely own the resource. 77 ModuloBuffer(const ModuloBuffer& aOther) = delete; 78 ModuloBuffer& operator=(const ModuloBuffer& aOther) = delete; 79 80 // Allow move-construction. Stealing ownership if the original had it. 81 // This effectively prevents copy construction, and all assignments; needed so 82 // that a ModuloBuffer may be initialized from a separate construction. 83 // The moved-from ModuloBuffer still points at the resource but doesn't own 84 // it, so it won't try to free it; but accesses are not guaranteed, so it 85 // should not be used anymore. 86 ModuloBuffer(ModuloBuffer&& aOther) 87 : mMask(std::move(aOther.mMask)), 88 mBuffer(std::move(aOther.mBuffer)), 89 mBufferDeleter(std::move(aOther.mBufferDeleter)) { 90 // The above move leaves `aOther.mBufferDeleter` in a valid state but with 91 // an unspecified value, so it could theoretically still contain the 92 // original function, which would be bad because we don't want aOther to 93 // delete the resource that `this` now owns. 94 if (aOther.mBufferDeleter) { 95 // `aOther` still had a non-empty deleter, reset it. 96 aOther.mBufferDeleter = nullptr; 97 } 98 } 99 100 // Disallow assignment, as we have some `const` members. 101 ModuloBuffer& operator=(ModuloBuffer&& aOther) = delete; 102 103 // Destructor, deletes the resource if we uniquely own it. 104 ~ModuloBuffer() { 105 if (mBufferDeleter) { 106 mBufferDeleter(mBuffer); 107 } 108 } 109 110 PowerOfTwo<Length> BufferLength() const { 111 return PowerOfTwo<Length>(mMask.MaskValue() + 1); 112 } 113 114 // Size of external resources. 115 // Note: `mBufferDeleter`'s potential external data (for its captures) is not 116 // included, as it's hidden in the `std::function` implementation. 117 size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const { 118 if (!mBufferDeleter) { 119 // If we don't have a buffer deleter, assume we don't own the data, so 120 // it's probably on the stack, or should be reported by its owner. 121 return 0; 122 } 123 return aMallocSizeOf(mBuffer); 124 } 125 126 size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const { 127 return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); 128 } 129 130 ProfileBufferEntryReader EntryReaderFromTo( 131 Index aStart, Index aEnd, ProfileBufferBlockIndex aBlockIndex, 132 ProfileBufferBlockIndex aNextBlockIndex) const { 133 using EntrySpan = Span<const ProfileBufferEntryReader::Byte>; 134 if (aStart == aEnd) { 135 return ProfileBufferEntryReader{}; 136 } 137 // Don't allow over-wrapping. 138 MOZ_ASSERT(aEnd - aStart <= mMask.MaskValue() + 1); 139 // Start offset in 0 .. (buffer size - 1) 140 Offset start = static_cast<Offset>(aStart) & mMask; 141 // End offset in 1 .. (buffer size) 142 Offset end = (static_cast<Offset>(aEnd - 1) & mMask) + 1; 143 if (start < end) { 144 // Segment doesn't cross buffer threshold, one span is enough. 145 return ProfileBufferEntryReader{EntrySpan(&mBuffer[start], end - start), 146 aBlockIndex, aNextBlockIndex}; 147 } 148 // Segment crosses buffer threshold, we need one span until the end and one 149 // span restarting at the beginning of the buffer. 150 return ProfileBufferEntryReader{ 151 EntrySpan(&mBuffer[start], mMask.MaskValue() + 1 - start), 152 EntrySpan(&mBuffer[0], end), aBlockIndex, aNextBlockIndex}; 153 } 154 155 // Return an entry writer for the given range. 156 ProfileBufferEntryWriter EntryWriterFromTo(Index aStart, Index aEnd) const { 157 using EntrySpan = Span<ProfileBufferEntryReader::Byte>; 158 if (aStart == aEnd) { 159 return ProfileBufferEntryWriter{}; 160 } 161 MOZ_ASSERT(aEnd - aStart <= mMask.MaskValue() + 1); 162 // Start offset in 0 .. (buffer size - 1) 163 Offset start = static_cast<Offset>(aStart) & mMask; 164 // End offset in 1 .. (buffer size) 165 Offset end = (static_cast<Offset>(aEnd - 1) & mMask) + 1; 166 if (start < end) { 167 // Segment doesn't cross buffer threshold, one span is enough. 168 return ProfileBufferEntryWriter{ 169 EntrySpan(&mBuffer[start], end - start), 170 ProfileBufferBlockIndex::CreateFromProfileBufferIndex(aStart), 171 ProfileBufferBlockIndex::CreateFromProfileBufferIndex(aEnd)}; 172 } 173 // Segment crosses buffer threshold, we need one span until the end and one 174 // span restarting at the beginning of the buffer. 175 return ProfileBufferEntryWriter{ 176 EntrySpan(&mBuffer[start], mMask.MaskValue() + 1 - start), 177 EntrySpan(&mBuffer[0], end), 178 ProfileBufferBlockIndex::CreateFromProfileBufferIndex(aStart), 179 ProfileBufferBlockIndex::CreateFromProfileBufferIndex(aEnd)}; 180 } 181 182 // Emplace an entry writer into `aMaybeEntryWriter` for the given range. 183 void EntryWriterFromTo(Maybe<ProfileBufferEntryWriter>& aMaybeEntryWriter, 184 Index aStart, Index aEnd) const { 185 MOZ_ASSERT(aMaybeEntryWriter.isNothing(), 186 "Reference entry writer should be Nothing."); 187 using EntrySpan = Span<ProfileBufferEntryReader::Byte>; 188 if (aStart == aEnd) { 189 return; 190 } 191 MOZ_ASSERT(aEnd - aStart <= mMask.MaskValue() + 1); 192 // Start offset in 0 .. (buffer size - 1) 193 Offset start = static_cast<Offset>(aStart) & mMask; 194 // End offset in 1 .. (buffer size) 195 Offset end = (static_cast<Offset>(aEnd - 1) & mMask) + 1; 196 if (start < end) { 197 // Segment doesn't cross buffer threshold, one span is enough. 198 aMaybeEntryWriter.emplace( 199 EntrySpan(&mBuffer[start], end - start), 200 ProfileBufferBlockIndex::CreateFromProfileBufferIndex(aStart), 201 ProfileBufferBlockIndex::CreateFromProfileBufferIndex(aEnd)); 202 } else { 203 // Segment crosses buffer threshold, we need one span until the end and 204 // one span restarting at the beginning of the buffer. 205 aMaybeEntryWriter.emplace( 206 EntrySpan(&mBuffer[start], mMask.MaskValue() + 1 - start), 207 EntrySpan(&mBuffer[0], end), 208 ProfileBufferBlockIndex::CreateFromProfileBufferIndex(aStart), 209 ProfileBufferBlockIndex::CreateFromProfileBufferIndex(aEnd)); 210 } 211 } 212 213 // All ModuloBuffer operations should be done through this iterator, which has 214 // an effectively infinite range. The underlying wrapping-around is hidden. 215 // Use `ReaderAt(Index)` or `WriterAt(Index)` to create it. 216 // 217 // `const Iterator<...>` means the iterator itself cannot change, i.e., it 218 // cannot move, and only its const methods are available. Note that these 219 // const methods may still be used to modify the buffer contents (e.g.: 220 // `operator*()`, `Poke()`). 221 // 222 // `Iterator</*IsBufferConst=*/true>` means the buffer contents cannot be 223 // modified, i.e., write operations are forbidden, but the iterator may still 224 // move if non-const itself. 225 template <bool IsBufferConst> 226 class Iterator { 227 // Alias to const- or mutable-`ModuloBuffer` depending on `IsBufferConst`. 228 using ConstOrMutableBuffer = 229 std::conditional_t<IsBufferConst, const ModuloBuffer, ModuloBuffer>; 230 231 // Implementation note about the strange enable-if's below: 232 // `template <bool NotIBC = !IsBufferConst> enable_if_t<NotIBC>` 233 // which intuitively could be simplified to: 234 // `enable_if_t<!IsBufferConst>` 235 // The former extra-templated syntax is in fact necessary to delay 236 // instantiation of these functions until they are actually needed. 237 // 238 // If we were just doing `enable_if_t<!IsBufferConst>`, this would only 239 // depend on the *class* (`ModuloBuffer<...>::Iterator`), which gets 240 // instantiated when a `ModuloBuffer` is created with some template 241 // arguments; at that point, all non-templated methods get instantiated, so 242 // there's no "SFINAE" happening, and `enable_if_t<...>` is actually doing 243 // `typename enable_if<...>::type` on the spot, but there is no `type` if 244 // `IsBufferConst` is true, so it just fails right away. E.g.: 245 // error: no type named 'type' in 'std::enable_if<false, void>'; 246 // 'enable_if' cannot be used to disable this declaration 247 // note: in instantiation of template type alias 'enable_if_t' 248 // > std::enable_if_t<!IsBufferConst> WriteObject(const T& aObject) { 249 // in instantiation of template class 250 // 'mozilla::ModuloBuffer<...>::Iterator<true>' 251 // > auto it = mb.ReaderAt(1); 252 // 253 // By adding another template level `template <bool NotIsBufferConst = 254 // !IsBufferConst>`, the instantiation is delayed until the function is 255 // actually invoked somewhere, e.g. `it.Poke(...);`. 256 // So at that invocation point, the compiler looks for a "Poke" name in it, 257 // and considers potential template instantiations that could work. The 258 // `enable_if_t` is *now* attempted, with `NotIsBufferConst` taking its 259 // value from `!IsBufferConst`: 260 // - If `IsBufferConst` is false, `NotIsBufferConst` is true, 261 // `enable_if<NotIsBufferConst>` does define a `type` (`void` by default), 262 // so `enable_if_t` happily becomes `void`, the function exists and may be 263 // called. 264 // - Otherwise if `IsBufferConst` is true, `NotIsBufferConst` is false, 265 // `enable_if<NotIsBufferConst>` does *not* define a `type`, therefore 266 // `enable_if_t` produces an error because there is no `type`. Now "SFINAE" 267 // happens: This "Substitution Failure Is Not An Error" (by itself)... But 268 // then, there are no other functions named "Poke" as requested in the 269 // `it.Poke(...);` call, so we are now getting an error (can't find 270 // function), as expected because `it` had `IsBufferConst`==true. (But at 271 // least the compiler waited until this invocation attempt before outputting 272 // an error.) 273 // 274 // C++ is fun! 275 276 public: 277 // These definitions are expected by std functions, to recognize this as an 278 // iterator. See https://en.cppreference.com/w/cpp/iterator/iterator_traits 279 using difference_type = Index; 280 using value_type = Byte; 281 using pointer = std::conditional_t<IsBufferConst, const Byte*, Byte*>; 282 using reference = std::conditional_t<IsBufferConst, const Byte&, Byte&>; 283 using iterator_category = std::random_access_iterator_tag; 284 285 // Can always copy/assign from the same kind of iterator. 286 Iterator(const Iterator& aRhs) = default; 287 Iterator& operator=(const Iterator& aRhs) = default; 288 289 // Can implicitly copy an Iterator-to-mutable (reader+writer) to 290 // Iterator-to-const (reader-only), but not the reverse. 291 template <bool IsRhsBufferConst, 292 typename = std::enable_if_t<(!IsRhsBufferConst) && IsBufferConst>> 293 MOZ_IMPLICIT Iterator(const Iterator<IsRhsBufferConst>& aRhs) 294 : mModuloBuffer(aRhs.mModuloBuffer), mIndex(aRhs.mIndex) {} 295 296 // Can implicitly assign from an Iterator-to-mutable (reader+writer) to 297 // Iterator-to-const (reader-only), but not the reverse. 298 template <bool IsRhsBufferConst, 299 typename = std::enable_if_t<(!IsRhsBufferConst) && IsBufferConst>> 300 Iterator& operator=(const Iterator<IsRhsBufferConst>& aRhs) { 301 mModuloBuffer = aRhs.mModuloBuffer; 302 mIndex = aRhs.mIndex; 303 return *this; 304 } 305 306 // Current location of the iterator in the `Index` range. 307 // Note that due to wrapping, multiple indices may effectively point at the 308 // same byte in the buffer. 309 Index CurrentIndex() const { return mIndex; } 310 311 // Location comparison in the `Index` range. I.e., two `Iterator`s may look 312 // unequal, but refer to the same buffer location. 313 // Must be on the same buffer. 314 bool operator==(const Iterator& aRhs) const { 315 MOZ_ASSERT(mModuloBuffer == aRhs.mModuloBuffer); 316 return mIndex == aRhs.mIndex; 317 } 318 bool operator!=(const Iterator& aRhs) const { 319 MOZ_ASSERT(mModuloBuffer == aRhs.mModuloBuffer); 320 return mIndex != aRhs.mIndex; 321 } 322 bool operator<(const Iterator& aRhs) const { 323 MOZ_ASSERT(mModuloBuffer == aRhs.mModuloBuffer); 324 return mIndex < aRhs.mIndex; 325 } 326 bool operator<=(const Iterator& aRhs) const { 327 MOZ_ASSERT(mModuloBuffer == aRhs.mModuloBuffer); 328 return mIndex <= aRhs.mIndex; 329 } 330 bool operator>(const Iterator& aRhs) const { 331 MOZ_ASSERT(mModuloBuffer == aRhs.mModuloBuffer); 332 return mIndex > aRhs.mIndex; 333 } 334 bool operator>=(const Iterator& aRhs) const { 335 MOZ_ASSERT(mModuloBuffer == aRhs.mModuloBuffer); 336 return mIndex >= aRhs.mIndex; 337 } 338 339 // Movement in the `Index` range. 340 Iterator& operator++() { 341 ++mIndex; 342 return *this; 343 } 344 Iterator operator++(int) { 345 Iterator here(*mModuloBuffer, mIndex); 346 ++mIndex; 347 return here; 348 } 349 Iterator& operator--() { 350 --mIndex; 351 return *this; 352 } 353 Iterator operator--(int) { 354 Iterator here(*mModuloBuffer, mIndex); 355 --mIndex; 356 return here; 357 } 358 Iterator& operator+=(Length aLength) { 359 mIndex += aLength; 360 return *this; 361 } 362 Iterator operator+(Length aLength) const { 363 return Iterator(*mModuloBuffer, mIndex + aLength); 364 } 365 friend Iterator operator+(Length aLength, const Iterator& aIt) { 366 return aIt + aLength; 367 } 368 Iterator& operator-=(Length aLength) { 369 mIndex -= aLength; 370 return *this; 371 } 372 Iterator operator-(Length aLength) const { 373 return Iterator(*mModuloBuffer, mIndex - aLength); 374 } 375 376 // Distance from `aRef` to here in the `Index` range. 377 // May be negative (as 2's complement) if `aRef > *this`. 378 Index operator-(const Iterator& aRef) const { 379 MOZ_ASSERT(mModuloBuffer == aRef.mModuloBuffer); 380 return mIndex - aRef.mIndex; 381 } 382 383 // Dereference a single byte (read-only if `IsBufferConst` is true). 384 reference operator*() const { 385 return mModuloBuffer->mBuffer[OffsetInBuffer()]; 386 } 387 388 // Random-access dereference. 389 reference operator[](Length aLength) const { return *(*this + aLength); } 390 391 // Write data (if `IsBufferConst` is false) but don't move iterator. 392 template <bool NotIsBufferConst = !IsBufferConst> 393 std::enable_if_t<NotIsBufferConst> Poke(const void* aSrc, 394 Length aLength) const { 395 // Don't allow data larger than the buffer. 396 MOZ_ASSERT(aLength <= mModuloBuffer->BufferLength().Value()); 397 // Offset inside the buffer (corresponding to our Index). 398 Offset offset = OffsetInBuffer(); 399 // Compute remaining bytes between this offset and the end of the buffer. 400 Length remaining = mModuloBuffer->BufferLength().Value() - offset; 401 if (MOZ_LIKELY(remaining >= aLength)) { 402 // Enough space to write everything before the end. 403 memcpy(&mModuloBuffer->mBuffer[offset], aSrc, aLength); 404 } else { 405 // Not enough space. Write as much as possible before the end. 406 memcpy(&mModuloBuffer->mBuffer[offset], aSrc, remaining); 407 // And then continue from the beginning of the buffer. 408 memcpy(&mModuloBuffer->mBuffer[0], 409 static_cast<const Byte*>(aSrc) + remaining, 410 (aLength - remaining)); 411 } 412 } 413 414 // Write object data (if `IsBufferConst` is false) but don't move iterator. 415 // Note that this copies bytes from the object, with the intent to read them 416 // back later. Restricted to trivially-copyable types, which support this 417 // without Undefined Behavior! 418 template <typename T, bool NotIsBufferConst = !IsBufferConst> 419 std::enable_if_t<NotIsBufferConst> PokeObject(const T& aObject) const { 420 static_assert(std::is_trivially_copyable<T>::value, 421 "PokeObject<T> - T must be trivially copyable"); 422 return Poke(&aObject, sizeof(T)); 423 } 424 425 // Write data (if `IsBufferConst` is false) and move iterator ahead. 426 template <bool NotIsBufferConst = !IsBufferConst> 427 std::enable_if_t<NotIsBufferConst> Write(const void* aSrc, Length aLength) { 428 Poke(aSrc, aLength); 429 mIndex += aLength; 430 } 431 432 // Write object data (if `IsBufferConst` is false) and move iterator ahead. 433 // Note that this copies bytes from the object, with the intent to read them 434 // back later. Restricted to trivially-copyable types, which support this 435 // without Undefined Behavior! 436 template <typename T, bool NotIsBufferConst = !IsBufferConst> 437 std::enable_if_t<NotIsBufferConst> WriteObject(const T& aObject) { 438 static_assert(std::is_trivially_copyable<T>::value, 439 "WriteObject<T> - T must be trivially copyable"); 440 return Write(&aObject, sizeof(T)); 441 } 442 443 // Number of bytes needed to represent `aValue` in unsigned LEB128. 444 template <typename T> 445 static unsigned ULEB128Size(T aValue) { 446 return ::mozilla::ULEB128Size(aValue); 447 } 448 449 // Write number as unsigned LEB128 (if `IsBufferConst` is false) and move 450 // iterator ahead. 451 template <typename T, bool NotIsBufferConst = !IsBufferConst> 452 std::enable_if_t<NotIsBufferConst> WriteULEB128(T aValue) { 453 ::mozilla::WriteULEB128(aValue, *this); 454 } 455 456 // Read data but don't move iterator. 457 void Peek(void* aDst, Length aLength) const { 458 // Don't allow data larger than the buffer. 459 MOZ_ASSERT(aLength <= mModuloBuffer->BufferLength().Value()); 460 // Offset inside the buffer (corresponding to our Index). 461 Offset offset = OffsetInBuffer(); 462 // Compute remaining bytes between this offset and the end of the buffer. 463 Length remaining = mModuloBuffer->BufferLength().Value() - offset; 464 if (MOZ_LIKELY(remaining >= aLength)) { 465 // Can read everything we need before the end of the buffer. 466 memcpy(aDst, &mModuloBuffer->mBuffer[offset], aLength); 467 } else { 468 // Read as much as possible before the end of the buffer. 469 memcpy(aDst, &mModuloBuffer->mBuffer[offset], remaining); 470 // And then continue from the beginning of the buffer. 471 memcpy(static_cast<Byte*>(aDst) + remaining, &mModuloBuffer->mBuffer[0], 472 (aLength - remaining)); 473 } 474 } 475 476 // Read data into an object but don't move iterator. 477 // Note that this overwrites `aObject` with bytes from the buffer. 478 // Restricted to trivially-copyable types, which support this without 479 // Undefined Behavior! 480 template <typename T> 481 void PeekIntoObject(T& aObject) const { 482 static_assert(std::is_trivially_copyable<T>::value, 483 "PeekIntoObject<T> - T must be trivially copyable"); 484 Peek(&aObject, sizeof(T)); 485 } 486 487 // Read data as an object but don't move iterator. 488 // Note that this creates an default `T` first, and then overwrites it with 489 // bytes from the buffer. Restricted to trivially-copyable types, which 490 // support this without Undefined Behavior! 491 template <typename T> 492 T PeekObject() const { 493 static_assert(std::is_trivially_copyable<T>::value, 494 "PeekObject<T> - T must be trivially copyable"); 495 T object; 496 PeekIntoObject(object); 497 return object; 498 } 499 500 // Read data and move iterator ahead. 501 void Read(void* aDst, Length aLength) { 502 Peek(aDst, aLength); 503 mIndex += aLength; 504 } 505 506 // Read data into a mutable iterator and move both iterators ahead. 507 void ReadInto(Iterator</* IsBufferConst */ false>& aDst, Length aLength) { 508 // Don't allow data larger than the buffer. 509 MOZ_ASSERT(aLength <= mModuloBuffer->BufferLength().Value()); 510 MOZ_ASSERT(aLength <= aDst.mModuloBuffer->BufferLength().Value()); 511 // Offset inside the buffer (corresponding to our Index). 512 Offset offset = OffsetInBuffer(); 513 // Compute remaining bytes between this offset and the end of the buffer. 514 Length remaining = mModuloBuffer->BufferLength().Value() - offset; 515 if (MOZ_LIKELY(remaining >= aLength)) { 516 // Can read everything we need before the end of the buffer. 517 aDst.Write(&mModuloBuffer->mBuffer[offset], aLength); 518 } else { 519 // Read as much as possible before the end of the buffer. 520 aDst.Write(&mModuloBuffer->mBuffer[offset], remaining); 521 // And then continue from the beginning of the buffer. 522 aDst.Write(&mModuloBuffer->mBuffer[0], (aLength - remaining)); 523 } 524 mIndex += aLength; 525 } 526 527 // Read data into an object and move iterator ahead. 528 // Note that this overwrites `aObject` with bytes from the buffer. 529 // Restricted to trivially-copyable types, which support this without 530 // Undefined Behavior! 531 template <typename T> 532 void ReadIntoObject(T& aObject) { 533 static_assert(std::is_trivially_copyable<T>::value, 534 "ReadIntoObject<T> - T must be trivially copyable"); 535 Read(&aObject, sizeof(T)); 536 } 537 538 // Read data as an object and move iterator ahead. 539 // Note that this creates an default `T` first, and then overwrites it with 540 // bytes from the buffer. Restricted to trivially-copyable types, which 541 // support this without Undefined Behavior! 542 template <typename T> 543 T ReadObject() { 544 static_assert(std::is_trivially_copyable<T>::value, 545 "ReadObject<T> - T must be trivially copyable"); 546 T object; 547 ReadIntoObject(object); 548 return object; 549 } 550 551 // Read an unsigned LEB128 number and move iterator ahead. 552 template <typename T> 553 T ReadULEB128() { 554 return ::mozilla::ReadULEB128<T>(*this); 555 } 556 557 private: 558 // Only a ModuloBuffer can instantiate its iterator. 559 friend class ModuloBuffer; 560 561 Iterator(ConstOrMutableBuffer& aBuffer, Index aIndex) 562 : mModuloBuffer(WrapNotNull(&aBuffer)), mIndex(aIndex) {} 563 564 // Convert the Iterator's mIndex into an offset inside the byte buffer. 565 Offset OffsetInBuffer() const { 566 return static_cast<Offset>(mIndex) & mModuloBuffer->mMask; 567 } 568 569 // ModuloBuffer that this Iterator operates on. 570 // Using a non-null pointer instead of a reference, to allow re-assignment 571 // of an Iterator variable. 572 NotNull<ConstOrMutableBuffer*> mModuloBuffer; 573 574 // Position of this iterator in the wider `Index` range. (Will be wrapped 575 // around as needed when actually accessing bytes from the buffer.) 576 Index mIndex; 577 }; 578 579 // Shortcut to iterator to const (read-only) data. 580 using Reader = Iterator<true>; 581 // Shortcut to iterator to non-const (read/write) data. 582 using Writer = Iterator<false>; 583 584 // Create an iterator to const data at the given index. 585 Reader ReaderAt(Index aIndex) const { return Reader(*this, aIndex); } 586 587 // Create an iterator to non-const data at the given index. 588 Writer WriterAt(Index aIndex) { return Writer(*this, aIndex); } 589 590 #ifdef DEBUG 591 void Dump() const { 592 Length len = BufferLength().Value(); 593 if (len > 128) { 594 len = 128; 595 } 596 for (Length i = 0; i < len; ++i) { 597 printf("%02x ", mBuffer[i]); 598 } 599 printf("\n"); 600 } 601 #endif // DEBUG 602 603 private: 604 // Mask used to convert an index to an offset in `mBuffer` 605 const PowerOfTwoMask<Offset> mMask; 606 607 // Buffer data. `const NotNull<...>` shows that `mBuffer is `const`, and 608 // `Byte* const` shows that the pointer cannot be changed to point at 609 // something else, but the pointed-at `Byte`s are writable. 610 const NotNull<Byte* const> mBuffer; 611 612 // Function used to release the buffer resource (if needed). 613 std::function<void(Byte*)> mBufferDeleter; 614 }; 615 616 } // namespace mozilla 617 618 #endif // ModuloBuffer_h