tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

ISurfaceAllocator.cpp (7811B)


      1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
      2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
      3 /* This Source Code Form is subject to the terms of the Mozilla Public
      4 * License, v. 2.0. If a copy of the MPL was not distributed with this
      5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      6 
      7 #include "ISurfaceAllocator.h"
      8 
      9 #include "mozilla/layers/ImageBridgeParent.h"  // for ImageBridgeParent
     10 #include "mozilla/layers/TextureHost.h"        // for TextureHost
     11 #include "mozilla/layers/TextureForwarder.h"
     12 #include "mozilla/layers/CompositableForwarder.h"
     13 
     14 namespace mozilla {
     15 namespace layers {
     16 
     17 NS_IMPL_ISUPPORTS(GfxMemoryImageReporter, nsIMemoryReporter)
     18 
     19 mozilla::Atomic<ptrdiff_t> GfxMemoryImageReporter::sAmount(0);
     20 
     21 void HostIPCAllocator::SendPendingAsyncMessages() {
     22  if (mPendingAsyncMessage.empty()) {
     23    return;
     24  }
     25 
     26  // Some type of AsyncParentMessageData message could have
     27  // one file descriptor (e.g. OpDeliverFence).
     28  // A number of file descriptors per gecko ipc message have a limitation
     29  // on XP_UNIX (MACOSX or LINUX).
     30  static const uint32_t kMaxMessageNumber =
     31      IPC::Message::MAX_DESCRIPTORS_PER_MESSAGE;
     32 
     33  nsTArray<AsyncParentMessageData> messages;
     34  messages.SetCapacity(mPendingAsyncMessage.size());
     35  for (size_t i = 0; i < mPendingAsyncMessage.size(); i++) {
     36    messages.AppendElement(mPendingAsyncMessage[i]);
     37    // Limit maximum number of messages.
     38    if (messages.Length() >= kMaxMessageNumber) {
     39      SendAsyncMessage(messages);
     40      // Initialize Messages.
     41      messages.Clear();
     42    }
     43  }
     44 
     45  if (messages.Length() > 0) {
     46    SendAsyncMessage(messages);
     47  }
     48  mPendingAsyncMessage.clear();
     49 }
     50 
     51 // XXX - We should actually figure out the minimum shmem allocation size on
     52 // a certain platform and use that.
     53 const uint32_t sShmemPageSize = 4096;
     54 
     55 #ifdef DEBUG
     56 const uint32_t sSupportedBlockSize = 4;
     57 #endif
     58 
     59 FixedSizeSmallShmemSectionAllocator::FixedSizeSmallShmemSectionAllocator(
     60    LayersIPCChannel* aShmProvider)
     61    : mShmProvider(aShmProvider) {
     62  MOZ_ASSERT(mShmProvider);
     63 }
     64 
     65 FixedSizeSmallShmemSectionAllocator::~FixedSizeSmallShmemSectionAllocator() {
     66  ShrinkShmemSectionHeap();
     67 }
     68 
     69 bool FixedSizeSmallShmemSectionAllocator::IPCOpen() const {
     70  return mShmProvider->IPCOpen();
     71 }
     72 
     73 bool FixedSizeSmallShmemSectionAllocator::AllocShmemSection(
     74    uint32_t aSize, ShmemSection* aShmemSection) {
     75  // For now we only support sizes of 4. If we want to support different sizes
     76  // some more complicated bookkeeping should be added.
     77  NS_ASSERT_OWNINGTHREAD(FixedSizeSmallShmemSectionAllocator);
     78  MOZ_ASSERT(aSize == sSupportedBlockSize);
     79  MOZ_ASSERT(aShmemSection);
     80 
     81  if (!IPCOpen()) {
     82    gfxCriticalError() << "Attempt to allocate a ShmemSection after shutdown.";
     83    return false;
     84  }
     85 
     86  uint32_t allocationSize = (aSize + sizeof(ShmemSectionHeapAllocation));
     87 
     88  ipc::Shmem shmem;
     89 
     90  for (size_t i = 0; i < mUsedShmems.size(); i++) {
     91    ShmemSectionHeapHeader* header =
     92        mUsedShmems[i].get<ShmemSectionHeapHeader>();
     93    if ((header->mAllocatedBlocks + 1) * allocationSize +
     94            sizeof(ShmemSectionHeapHeader) <
     95        sShmemPageSize) {
     96      shmem = mUsedShmems[i];
     97      MOZ_ASSERT(mUsedShmems[i].IsWritable());
     98      break;
     99    }
    100  }
    101 
    102  if (!shmem.IsWritable()) {
    103    ipc::Shmem tmp;
    104    if (!mShmProvider->AllocUnsafeShmem(sShmemPageSize, &tmp)) {
    105      return false;
    106    }
    107 
    108    ShmemSectionHeapHeader* header = tmp.get<ShmemSectionHeapHeader>();
    109    header->mTotalBlocks = 0;
    110    header->mAllocatedBlocks = 0;
    111 
    112    mUsedShmems.push_back(tmp);
    113    shmem = tmp;
    114  }
    115 
    116  MOZ_ASSERT(shmem.IsWritable());
    117 
    118  ShmemSectionHeapHeader* header = shmem.get<ShmemSectionHeapHeader>();
    119  uint8_t* heap = shmem.get<uint8_t>() + sizeof(ShmemSectionHeapHeader);
    120 
    121  ShmemSectionHeapAllocation* allocHeader = nullptr;
    122 
    123  if (header->mTotalBlocks > header->mAllocatedBlocks) {
    124    // Search for the first available block.
    125    for (size_t i = 0; i < header->mTotalBlocks; i++) {
    126      allocHeader = reinterpret_cast<ShmemSectionHeapAllocation*>(heap);
    127 
    128      if (allocHeader->mStatus == STATUS_FREED) {
    129        break;
    130      }
    131      heap += allocationSize;
    132    }
    133    MOZ_ASSERT(allocHeader && allocHeader->mStatus == STATUS_FREED);
    134    MOZ_ASSERT(allocHeader->mSize == sSupportedBlockSize);
    135  } else {
    136    heap += header->mTotalBlocks * allocationSize;
    137 
    138    header->mTotalBlocks++;
    139    allocHeader = reinterpret_cast<ShmemSectionHeapAllocation*>(heap);
    140    allocHeader->mSize = aSize;
    141  }
    142 
    143  MOZ_ASSERT(allocHeader);
    144  header->mAllocatedBlocks++;
    145  allocHeader->mStatus = STATUS_ALLOCATED;
    146 
    147  size_t offset =
    148      (heap + sizeof(ShmemSectionHeapAllocation)) - shmem.get<uint8_t>();
    149  if (offset > (size_t)std::numeric_limits<uint32_t>::max()) {
    150    return false;
    151  }
    152  if (!aShmemSection->Init(shmem, offset, aSize)) {
    153    return false;
    154  }
    155 
    156  ShrinkShmemSectionHeap();
    157  return true;
    158 }
    159 
    160 void FixedSizeSmallShmemSectionAllocator::FreeShmemSection(
    161    mozilla::layers::ShmemSection& aShmemSection) {
    162  MOZ_ASSERT(aShmemSection.size() == sSupportedBlockSize);
    163  MOZ_ASSERT(aShmemSection.offset() < sShmemPageSize - sSupportedBlockSize);
    164 
    165  if (!aShmemSection.shmem().IsWritable()) {
    166    return;
    167  }
    168 
    169  ShmemSectionHeapAllocation* allocHeader =
    170      reinterpret_cast<ShmemSectionHeapAllocation*>(
    171          aShmemSection.shmem().get<char>() + aShmemSection.offset() -
    172          sizeof(ShmemSectionHeapAllocation));
    173 
    174  MOZ_ASSERT(allocHeader->mSize == aShmemSection.size());
    175 
    176  DebugOnly<bool> success =
    177      allocHeader->mStatus.compareExchange(STATUS_ALLOCATED, STATUS_FREED);
    178  // If this fails something really weird is going on.
    179  MOZ_ASSERT(success);
    180 
    181  ShmemSectionHeapHeader* header =
    182      aShmemSection.shmem().get<ShmemSectionHeapHeader>();
    183  header->mAllocatedBlocks--;
    184 }
    185 
    186 void FixedSizeSmallShmemSectionAllocator::DeallocShmemSection(
    187    mozilla::layers::ShmemSection& aShmemSection) {
    188  NS_ASSERT_OWNINGTHREAD(FixedSizeSmallShmemSectionAllocator);
    189 
    190  if (!IPCOpen()) {
    191    gfxCriticalNote << "Attempt to dealloc a ShmemSections after shutdown.";
    192    return;
    193  }
    194 
    195  FreeShmemSection(aShmemSection);
    196  ShrinkShmemSectionHeap();
    197 }
    198 
    199 void FixedSizeSmallShmemSectionAllocator::ShrinkShmemSectionHeap() {
    200  NS_ASSERT_OWNINGTHREAD(FixedSizeSmallShmemSectionAllocator);
    201 
    202  if (!IPCOpen()) {
    203    mUsedShmems.clear();
    204    return;
    205  }
    206 
    207  // The loop will terminate as we either increase i, or decrease size
    208  // every time through.
    209  size_t i = 0;
    210  while (i < mUsedShmems.size()) {
    211    ShmemSectionHeapHeader* header =
    212        mUsedShmems[i].get<ShmemSectionHeapHeader>();
    213    if (header->mAllocatedBlocks == 0) {
    214      mShmProvider->DeallocShmem(mUsedShmems[i]);
    215      // We don't particularly care about order, move the last one in the array
    216      // to this position.
    217      if (i < mUsedShmems.size() - 1) {
    218        mUsedShmems[i] = mUsedShmems[mUsedShmems.size() - 1];
    219      }
    220      mUsedShmems.pop_back();
    221    } else {
    222      i++;
    223    }
    224  }
    225 }
    226 
    227 Maybe<ShmemSection> ShmemSection::FromUntrusted(
    228    const UntrustedShmemSection& aUntrusted) {
    229  ShmemSection section;
    230  if (!section.Init(aUntrusted.shmem(), aUntrusted.offset(),
    231                    aUntrusted.size())) {
    232    return Nothing();
    233  }
    234 
    235  return Some(section);
    236 }
    237 
    238 bool ShmemSection::Init(const mozilla::ipc::Shmem& aShmem, uint32_t aOffset,
    239                        uint32_t aSize) {
    240  if (!aShmem.IsReadable()) {
    241    return false;
    242  }
    243 
    244  size_t shmSize = aShmem.Size<uint8_t>();
    245  CheckedInt<size_t> end = CheckedInt<size_t>(aOffset) + aSize;
    246 
    247  if (!end.isValid() || end.value() > shmSize) {
    248    return false;
    249  }
    250 
    251  mShmem = aShmem;
    252  mOffset = aOffset;
    253  mSize = aSize;
    254 
    255  return true;
    256 }
    257 
    258 UntrustedShmemSection ShmemSection::AsUntrusted() {
    259  return UntrustedShmemSection(mShmem, mOffset, mSize);
    260 }
    261 
    262 }  // namespace layers
    263 }  // namespace mozilla