tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

ringbuffer.h (5967B)


      1 /* Copyright 2013 Google Inc. All Rights Reserved.
      2 
      3   Distributed under MIT license.
      4   See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
      5 */
      6 
      7 /* Sliding window over the input data. */
      8 
      9 #ifndef BROTLI_ENC_RINGBUFFER_H_
     10 #define BROTLI_ENC_RINGBUFFER_H_
     11 
     12 #include "../common/platform.h"
     13 #include "memory.h"
     14 #include "params.h"
     15 #include "quality.h"
     16 
     17 #if defined(__cplusplus) || defined(c_plusplus)
     18 extern "C" {
     19 #endif
     20 
     21 /* A RingBuffer(window_bits, tail_bits) contains `1 << window_bits' bytes of
     22   data in a circular manner: writing a byte writes it to:
     23     `position() % (1 << window_bits)'.
     24   For convenience, the RingBuffer array contains another copy of the
     25   first `1 << tail_bits' bytes:
     26     buffer_[i] == buffer_[i + (1 << window_bits)], if i < (1 << tail_bits),
     27   and another copy of the last two bytes:
     28     buffer_[-1] == buffer_[(1 << window_bits) - 1] and
     29     buffer_[-2] == buffer_[(1 << window_bits) - 2]. */
     30 typedef struct RingBuffer {
     31  /* Size of the ring-buffer is (1 << window_bits) + tail_size_. */
     32  const uint32_t size_;
     33  const uint32_t mask_;
     34  const uint32_t tail_size_;
     35  const uint32_t total_size_;
     36 
     37  uint32_t cur_size_;
     38  /* Position to write in the ring buffer. */
     39  uint32_t pos_;
     40  /* The actual ring buffer containing the copy of the last two bytes, the data,
     41     and the copy of the beginning as a tail. */
     42  uint8_t* data_;
     43  /* The start of the ring-buffer. */
     44  uint8_t* buffer_;
     45 } RingBuffer;
     46 
     47 static BROTLI_INLINE void RingBufferInit(RingBuffer* rb) {
     48  rb->cur_size_ = 0;
     49  rb->pos_ = 0;
     50  rb->data_ = 0;
     51  rb->buffer_ = 0;
     52 }
     53 
     54 static BROTLI_INLINE void RingBufferSetup(
     55    const BrotliEncoderParams* params, RingBuffer* rb) {
     56  int window_bits = ComputeRbBits(params);
     57  int tail_bits = params->lgblock;
     58  *(uint32_t*)&rb->size_ = 1u << window_bits;
     59  *(uint32_t*)&rb->mask_ = (1u << window_bits) - 1;
     60  *(uint32_t*)&rb->tail_size_ = 1u << tail_bits;
     61  *(uint32_t*)&rb->total_size_ = rb->size_ + rb->tail_size_;
     62 }
     63 
     64 static BROTLI_INLINE void RingBufferFree(MemoryManager* m, RingBuffer* rb) {
     65  BROTLI_FREE(m, rb->data_);
     66 }
     67 
     68 /* Allocates or re-allocates data_ to the given length + plus some slack
     69   region before and after. Fills the slack regions with zeros. */
     70 static BROTLI_INLINE void RingBufferInitBuffer(
     71    MemoryManager* m, const uint32_t buflen, RingBuffer* rb) {
     72  static const size_t kSlackForEightByteHashingEverywhere = 7;
     73  uint8_t* new_data = BROTLI_ALLOC(
     74      m, uint8_t, 2 + buflen + kSlackForEightByteHashingEverywhere);
     75  size_t i;
     76  if (BROTLI_IS_OOM(m) || BROTLI_IS_NULL(new_data)) return;
     77  if (rb->data_) {
     78    memcpy(new_data, rb->data_,
     79        2 + rb->cur_size_ + kSlackForEightByteHashingEverywhere);
     80    BROTLI_FREE(m, rb->data_);
     81  }
     82  rb->data_ = new_data;
     83  rb->cur_size_ = buflen;
     84  rb->buffer_ = rb->data_ + 2;
     85  rb->buffer_[-2] = rb->buffer_[-1] = 0;
     86  for (i = 0; i < kSlackForEightByteHashingEverywhere; ++i) {
     87    rb->buffer_[rb->cur_size_ + i] = 0;
     88  }
     89 }
     90 
     91 static BROTLI_INLINE void RingBufferWriteTail(
     92    const uint8_t* bytes, size_t n, RingBuffer* rb) {
     93  const size_t masked_pos = rb->pos_ & rb->mask_;
     94  if (BROTLI_PREDICT_FALSE(masked_pos < rb->tail_size_)) {
     95    /* Just fill the tail buffer with the beginning data. */
     96    const size_t p = rb->size_ + masked_pos;
     97    memcpy(&rb->buffer_[p], bytes,
     98        BROTLI_MIN(size_t, n, rb->tail_size_ - masked_pos));
     99  }
    100 }
    101 
    102 /* Push bytes into the ring buffer. */
    103 static BROTLI_INLINE void RingBufferWrite(
    104    MemoryManager* m, const uint8_t* bytes, size_t n, RingBuffer* rb) {
    105  if (rb->pos_ == 0 && n < rb->tail_size_) {
    106    /* Special case for the first write: to process the first block, we don't
    107       need to allocate the whole ring-buffer and we don't need the tail
    108       either. However, we do this memory usage optimization only if the
    109       first write is less than the tail size, which is also the input block
    110       size, otherwise it is likely that other blocks will follow and we
    111       will need to reallocate to the full size anyway. */
    112    rb->pos_ = (uint32_t)n;
    113    RingBufferInitBuffer(m, rb->pos_, rb);
    114    if (BROTLI_IS_OOM(m)) return;
    115    memcpy(rb->buffer_, bytes, n);
    116    return;
    117  }
    118  if (rb->cur_size_ < rb->total_size_) {
    119    /* Lazily allocate the full buffer. */
    120    RingBufferInitBuffer(m, rb->total_size_, rb);
    121    if (BROTLI_IS_OOM(m)) return;
    122    /* Initialize the last two bytes to zero, so that we don't have to worry
    123       later when we copy the last two bytes to the first two positions. */
    124    rb->buffer_[rb->size_ - 2] = 0;
    125    rb->buffer_[rb->size_ - 1] = 0;
    126    /* Initialize tail; might be touched by "best_len++" optimization when
    127       ring buffer is "full". */
    128    rb->buffer_[rb->size_] = 241;
    129  }
    130  {
    131    const size_t masked_pos = rb->pos_ & rb->mask_;
    132    /* The length of the writes is limited so that we do not need to worry
    133       about a write */
    134    RingBufferWriteTail(bytes, n, rb);
    135    if (BROTLI_PREDICT_TRUE(masked_pos + n <= rb->size_)) {
    136      /* A single write fits. */
    137      memcpy(&rb->buffer_[masked_pos], bytes, n);
    138    } else {
    139      /* Split into two writes.
    140         Copy into the end of the buffer, including the tail buffer. */
    141      memcpy(&rb->buffer_[masked_pos], bytes,
    142             BROTLI_MIN(size_t, n, rb->total_size_ - masked_pos));
    143      /* Copy into the beginning of the buffer */
    144      memcpy(&rb->buffer_[0], bytes + (rb->size_ - masked_pos),
    145             n - (rb->size_ - masked_pos));
    146    }
    147  }
    148  {
    149    BROTLI_BOOL not_first_lap = (rb->pos_ & (1u << 31)) != 0;
    150    uint32_t rb_pos_mask = (1u << 31) - 1;
    151    rb->buffer_[-2] = rb->buffer_[rb->size_ - 2];
    152    rb->buffer_[-1] = rb->buffer_[rb->size_ - 1];
    153    rb->pos_ = (rb->pos_ & rb_pos_mask) + (uint32_t)(n & rb_pos_mask);
    154    if (not_first_lap) {
    155      /* Wrap, but preserve not-a-first-lap feature. */
    156      rb->pos_ |= 1u << 31;
    157    }
    158  }
    159 }
    160 
    161 #if defined(__cplusplus) || defined(c_plusplus)
    162 }  /* extern "C" */
    163 #endif
    164 
    165 #endif  /* BROTLI_ENC_RINGBUFFER_H_ */