tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

hb-serialize.hh (23593B)


      1 /*
      2 * Copyright © 2007,2008,2009,2010  Red Hat, Inc.
      3 * Copyright © 2012,2018  Google, Inc.
      4 * Copyright © 2019  Facebook, Inc.
      5 *
      6 *  This is part of HarfBuzz, a text shaping library.
      7 *
      8 * Permission is hereby granted, without written agreement and without
      9 * license or royalty fees, to use, copy, modify, and distribute this
     10 * software and its documentation for any purpose, provided that the
     11 * above copyright notice and the following two paragraphs appear in
     12 * all copies of this software.
     13 *
     14 * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
     15 * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
     16 * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
     17 * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
     18 * DAMAGE.
     19 *
     20 * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
     21 * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
     22 * FITNESS FOR A PARTICULAR PURPOSE.  THE SOFTWARE PROVIDED HEREUNDER IS
     23 * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
     24 * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
     25 *
     26 * Red Hat Author(s): Behdad Esfahbod
     27 * Google Author(s): Behdad Esfahbod
     28 * Facebook Author(s): Behdad Esfahbod
     29 */
     30 
     31 #ifndef HB_SERIALIZE_HH
     32 #define HB_SERIALIZE_HH
     33 
     34 #include "hb.hh"
     35 #include "hb-blob.hh"
     36 #include "hb-map.hh"
     37 #include "hb-free-pool.hh"
     38 
     39 #include "hb-subset-serialize.h"
     40 
     41 /*
     42 * Serialize
     43 */
     44 
     45 enum hb_serialize_error_t {
     46  HB_SERIALIZE_ERROR_NONE =            0x00000000u,
     47  HB_SERIALIZE_ERROR_OTHER =           0x00000001u,
     48  HB_SERIALIZE_ERROR_OFFSET_OVERFLOW = 0x00000002u,
     49  HB_SERIALIZE_ERROR_OUT_OF_ROOM =     0x00000004u,
     50  HB_SERIALIZE_ERROR_INT_OVERFLOW =    0x00000008u,
     51  HB_SERIALIZE_ERROR_ARRAY_OVERFLOW =  0x00000010u
     52 };
     53 HB_MARK_AS_FLAG_T (hb_serialize_error_t);
     54 
     55 struct hb_serialize_context_t
     56 {
     57  typedef unsigned objidx_t;
     58 
     59  enum whence_t {
     60     Head,	/* Relative to the current object head (default). */
     61     Tail,	/* Relative to the current object tail after packed. */
     62     Absolute	/* Absolute: from the start of the serialize buffer. */
     63   };
     64 
     65 
     66 
     67  struct object_t
     68  {
     69    void fini () {
     70      real_links.fini ();
     71      virtual_links.fini ();
     72    }
     73 
     74    object_t () = default;
     75 
     76    object_t (const hb_subset_serialize_object_t &o)
     77    {
     78      head = o.head;
     79      tail = o.tail;
     80      next = nullptr;
     81      real_links.alloc_exact (o.num_real_links);
     82      for (unsigned i = 0 ; i < o.num_real_links; i++)
     83        real_links.push (o.real_links[i]);
     84 
     85      virtual_links.alloc_exact (o.num_virtual_links);
     86      for (unsigned i = 0; i < o.num_virtual_links; i++)
     87        virtual_links.push (o.virtual_links[i]);
     88    }
     89 
     90    bool add_virtual_link (objidx_t objidx)
     91    {
     92      if (!objidx)
     93        return false;
     94 
     95      auto& link = *virtual_links.push ();
     96      if (virtual_links.in_error ())
     97        return false;
     98 
     99      link.objidx = objidx;
    100      // Remaining fields were previously zero'd by push():
    101      // link.width = 0;
    102      // link.is_signed = 0;
    103      // link.whence = 0;
    104      // link.position = 0;
    105      // link.bias = 0;
    106 
    107      return true;
    108    }
    109 
    110    friend void swap (object_t& a, object_t& b) noexcept
    111    {
    112      hb_swap (a.head, b.head);
    113      hb_swap (a.tail, b.tail);
    114      hb_swap (a.next, b.next);
    115      hb_swap (a.real_links, b.real_links);
    116      hb_swap (a.virtual_links, b.virtual_links);
    117    }
    118 
    119    bool operator == (const object_t &o) const
    120    {
    121      // Virtual links aren't considered for equality since they don't affect the functionality
    122      // of the object.
    123      return (tail - head == o.tail - o.head)
    124   && (real_links.length == o.real_links.length)
    125   && 0 == hb_memcmp (head, o.head, tail - head)
    126   && real_links.as_bytes () == o.real_links.as_bytes ();
    127    }
    128    uint32_t hash () const
    129    {
    130      // Virtual links aren't considered for equality since they don't affect the functionality
    131      // of the object.
    132      return hb_bytes_t (head, hb_min (128, tail - head)).hash () ^
    133          real_links.as_bytes ().hash ();
    134    }
    135 
    136    struct link_t
    137    {
    138      unsigned width: 3;
    139      unsigned is_signed: 1;
    140      unsigned whence: 2;
    141      unsigned bias : 26;
    142      unsigned position;
    143      objidx_t objidx;
    144 
    145      link_t () = default;
    146 
    147      link_t (const hb_subset_serialize_link_t &o)
    148      {
    149        width = o.width;
    150        is_signed = 0;
    151        whence = 0;
    152        position = o.position;
    153        bias = 0;
    154        objidx = o.objidx;
    155      }
    156 
    157      HB_INTERNAL static int cmp (const void* a, const void* b)
    158      {
    159        int cmp = ((const link_t*)a)->position - ((const link_t*)b)->position;
    160        if (cmp) return cmp;
    161 
    162        return ((const link_t*)a)->objidx - ((const link_t*)b)->objidx;
    163      }
    164    };
    165 
    166    char *head;
    167    char *tail;
    168    hb_vector_t<link_t> real_links;
    169    hb_vector_t<link_t> virtual_links;
    170    object_t *next;
    171 
    172    auto all_links () const HB_AUTO_RETURN
    173        (( hb_concat (real_links, virtual_links) ));
    174    auto all_links_writer () HB_AUTO_RETURN
    175        (( hb_concat (real_links.writer (), virtual_links.writer ()) ));
    176  };
    177 
    178  struct snapshot_t
    179  {
    180    char *head;
    181    char *tail;
    182    object_t *current; // Just for sanity check
    183    unsigned num_real_links;
    184    unsigned num_virtual_links;
    185    hb_serialize_error_t errors;
    186  };
    187 
    188  snapshot_t snapshot ()
    189  {
    190    return snapshot_t {
    191      head, tail, current,
    192      current ? current->real_links.length : 0,
    193      current ? current->virtual_links.length : 0,
    194      errors
    195     };
    196  }
    197 
    198  hb_serialize_context_t (void *start_, unsigned int size) :
    199    start ((char *) start_),
    200    end (start + size),
    201    current (nullptr)
    202  { reset (); }
    203  ~hb_serialize_context_t () { fini (); }
    204 
    205  void fini ()
    206  {
    207    for (object_t *_ : ++hb_iter (packed)) _->fini ();
    208    packed.fini ();
    209    this->packed_map.fini ();
    210 
    211    while (current)
    212    {
    213      auto *_ = current;
    214      current = current->next;
    215      _->fini ();
    216    }
    217  }
    218 
    219  bool in_error () const { return bool (errors); }
    220 
    221  bool successful () const { return !bool (errors); }
    222 
    223  HB_NODISCARD bool ran_out_of_room () const { return errors & HB_SERIALIZE_ERROR_OUT_OF_ROOM; }
    224  HB_NODISCARD bool offset_overflow () const { return errors & HB_SERIALIZE_ERROR_OFFSET_OVERFLOW; }
    225  HB_NODISCARD bool only_offset_overflow () const { return errors == HB_SERIALIZE_ERROR_OFFSET_OVERFLOW; }
    226  HB_NODISCARD bool only_overflow () const
    227  {
    228    return errors == HB_SERIALIZE_ERROR_OFFSET_OVERFLOW
    229        || errors == HB_SERIALIZE_ERROR_INT_OVERFLOW
    230        || errors == HB_SERIALIZE_ERROR_ARRAY_OVERFLOW;
    231  }
    232 
    233  void reset (void *start_, unsigned int size)
    234  {
    235    start = (char*) start_;
    236    end = start + size;
    237    reset ();
    238    current = nullptr;
    239  }
    240 
    241  void reset ()
    242  {
    243    this->errors = HB_SERIALIZE_ERROR_NONE;
    244    this->head = this->start;
    245    this->tail = this->end;
    246    this->zerocopy = nullptr;
    247    this->debug_depth = 0;
    248 
    249    fini ();
    250    this->packed.push (nullptr);
    251    this->packed_map.init ();
    252  }
    253 
    254  bool check_success (bool success,
    255                      hb_serialize_error_t err_type = HB_SERIALIZE_ERROR_OTHER)
    256  {
    257    return successful ()
    258        && (success || err (err_type));
    259  }
    260 
    261  template <typename T1, typename T2>
    262  bool check_equal (T1 &&v1, T2 &&v2, hb_serialize_error_t err_type)
    263  {
    264    if ((long long) v1 != (long long) v2)
    265    {
    266      return err (err_type);
    267    }
    268    return true;
    269  }
    270 
    271  template <typename T1, typename T2>
    272  bool check_assign (T1 &v1, T2 &&v2, hb_serialize_error_t err_type)
    273  { return check_equal (v1 = v2, v2, err_type); }
    274 
    275  template <typename T> bool propagate_error (T &&obj)
    276  { return check_success (!hb_deref (obj).in_error ()); }
    277 
    278  template <typename T1, typename... Ts> bool propagate_error (T1 &&o1, Ts&&... os)
    279  { return propagate_error (std::forward<T1> (o1)) &&
    280    propagate_error (std::forward<Ts> (os)...); }
    281 
    282  /* To be called around main operation. */
    283  template <typename Type=char>
    284  __attribute__((returns_nonnull))
    285  Type *start_serialize ()
    286  {
    287    DEBUG_MSG_LEVEL (SERIALIZE, this->start, 0, +1,
    288 	     "start [%p..%p] (%lu bytes)",
    289 	     this->start, this->end,
    290 	     (unsigned long) (this->end - this->start));
    291 
    292    assert (!current);
    293    return push<Type> ();
    294  }
    295  void end_serialize ()
    296  {
    297    DEBUG_MSG_LEVEL (SERIALIZE, this->start, 0, -1,
    298 	     "end [%p..%p] serialized %u bytes; %s",
    299 	     this->start, this->end,
    300 	     (unsigned) (this->head - this->start),
    301 	     successful () ? "successful" : "UNSUCCESSFUL");
    302 
    303    propagate_error (packed, packed_map);
    304 
    305    if (unlikely (!current)) return;
    306    if (unlikely (in_error()))
    307    {
    308      // Offset overflows that occur before link resolution cannot be handled
    309      // by repacking, so set a more general error.
    310      if (offset_overflow ()) err (HB_SERIALIZE_ERROR_OTHER);
    311      return;
    312    }
    313 
    314    assert (!current->next);
    315 
    316    /* Only "pack" if there exist other objects... Otherwise, don't bother.
    317     * Saves a move. */
    318    if (packed.length <= 1)
    319      return;
    320 
    321    pop_pack (false);
    322 
    323    resolve_links ();
    324  }
    325 
    326  template <typename Type = void>
    327  __attribute__((returns_nonnull))
    328  Type *push ()
    329  {
    330    if (unlikely (in_error ())) return start_embed<Type> ();
    331 
    332    object_t *obj = object_pool.alloc ();
    333    if (unlikely (!obj))
    334      check_success (false);
    335    else
    336    {
    337      obj->head = head;
    338      obj->tail = tail;
    339      obj->next = current;
    340      current = obj;
    341    }
    342    return start_embed<Type> ();
    343  }
    344  void pop_discard ()
    345  {
    346    object_t *obj = current;
    347    if (unlikely (!obj)) return;
    348    // Allow cleanup when we've error'd out on int overflows which don't compromise
    349    // the serializer state.
    350    if (unlikely (in_error() && !only_overflow ())) return;
    351 
    352    current = current->next;
    353    revert (zerocopy ? zerocopy : obj->head, obj->tail);
    354    zerocopy = nullptr;
    355    obj->fini ();
    356    object_pool.release (obj);
    357  }
    358 
    359  /* Set share to false when an object is unlikely shareable with others
    360   * so not worth an attempt, or a contiguous table is serialized as
    361   * multiple consecutive objects in the reverse order so can't be shared.
    362   */
    363  objidx_t pop_pack (bool share=true)
    364  {
    365    object_t *obj = current;
    366    if (unlikely (!obj)) return 0;
    367    // Allow cleanup when we've error'd out on int overflows which don't compromise
    368    // the serializer state.
    369    if (unlikely (in_error()  && !only_overflow ())) return 0;
    370 
    371    current = current->next;
    372    obj->tail = head;
    373    obj->next = nullptr;
    374    assert (obj->head <= obj->tail);
    375    unsigned len = obj->tail - obj->head;
    376    head = zerocopy ? zerocopy : obj->head; /* Rewind head. */
    377    bool was_zerocopy = zerocopy;
    378    zerocopy = nullptr;
    379 
    380    if (!len)
    381    {
    382      assert (!obj->real_links.length);
    383      assert (!obj->virtual_links.length);
    384      return 0;
    385    }
    386 
    387    objidx_t objidx;
    388    uint32_t hash = 0;
    389    if (share)
    390    {
    391      hash = hb_hash (obj);
    392      objidx = packed_map.get_with_hash (obj, hash);
    393      if (objidx)
    394      {
    395        merge_virtual_links (obj, objidx);
    396 obj->fini ();
    397        object_pool.release (obj);
    398 return objidx;
    399      }
    400    }
    401 
    402    tail -= len;
    403    if (was_zerocopy)
    404      assert (tail == obj->head);
    405    else
    406      memmove (tail, obj->head, len);
    407 
    408    obj->head = tail;
    409    obj->tail = tail + len;
    410 
    411    packed.push (obj);
    412 
    413    if (unlikely (!propagate_error (packed)))
    414    {
    415      /* Obj wasn't successfully added to packed, so clean it up otherwise its
    416       * links will be leaked. When we use constructor/destructors properly, we
    417       * can remove these. */
    418      obj->fini ();
    419      return 0;
    420    }
    421 
    422    objidx = packed.length - 1;
    423 
    424    if (share) packed_map.set_with_hash (obj, hash, objidx);
    425    propagate_error (packed_map);
    426 
    427    return objidx;
    428  }
    429 
    430  void revert (snapshot_t snap)
    431  {
    432    // Overflows that happened after the snapshot will be erased by the revert.
    433    if (unlikely (in_error () && !only_overflow ())) return;
    434    assert (snap.current == current);
    435    if (current)
    436    {
    437      current->real_links.shrink (snap.num_real_links);
    438      current->virtual_links.shrink (snap.num_virtual_links);
    439    }
    440    errors = snap.errors;
    441    revert (snap.head, snap.tail);
    442  }
    443 
    444  void revert (char *snap_head,
    445        char *snap_tail)
    446  {
    447    if (unlikely (in_error ())) return;
    448    assert (snap_head <= head);
    449    assert (tail <= snap_tail);
    450    head = snap_head;
    451    tail = snap_tail;
    452    discard_stale_objects ();
    453  }
    454 
    455  void discard_stale_objects ()
    456  {
    457    if (unlikely (in_error ())) return;
    458    while (packed.length > 1 &&
    459    packed.tail ()->head < tail)
    460    {
    461      object_t *obj = packed.tail ();
    462      packed_map.del (obj);
    463      assert (!obj->next);
    464      obj->fini ();
    465      object_pool.release (obj);
    466      packed.pop ();
    467    }
    468    if (packed.length > 1)
    469      assert (packed.tail ()->head == tail);
    470  }
    471 
    472  // Adds a virtual link from the current object to objidx. A virtual link is not associated with
    473  // an actual offset field. They are solely used to enforce ordering constraints between objects.
    474  // Adding a virtual link from object a to object b will ensure that object b is always packed after
    475  // object a in the final serialized order.
    476  //
    477  // This is useful in certain situations where there needs to be a specific ordering in the
    478  // final serialization. Such as when platform bugs require certain orderings, or to provide
    479  //  guidance to the repacker for better offset overflow resolution.
    480  void add_virtual_link (objidx_t objidx)
    481  {
    482    if (unlikely (in_error ())) return;
    483 
    484    if (!objidx)
    485      return;
    486 
    487    assert (current);
    488 
    489    if (!current->add_virtual_link(objidx))
    490      err (HB_SERIALIZE_ERROR_OTHER);
    491  }
    492 
    493  objidx_t last_added_child_index() const {
    494    if (unlikely (in_error ())) return (objidx_t) -1;
    495 
    496    assert (current);
    497    if (!bool(current->real_links)) {
    498      return (objidx_t) -1;
    499    }
    500 
    501    return current->real_links[current->real_links.length - 1].objidx;
    502  }
    503 
    504  // For the current object ensure that the sub-table bytes for child objidx are always placed
    505  // after the subtable bytes for any other existing children. This only ensures that the
    506  // repacker will not move the target subtable before the other children
    507  // (by adding virtual links). It is up to the caller to ensure the initial serialization
    508  // order is correct.
    509  void repack_last(objidx_t objidx) {
    510    if (unlikely (in_error ())) return;
    511 
    512    if (!objidx)
    513      return;
    514 
    515    assert (current);
    516    for (auto& l : current->real_links) {
    517      if (l.objidx == objidx) {
    518        continue;
    519      }
    520 
    521      packed[l.objidx]->add_virtual_link(objidx);
    522    }
    523  }
    524 
    525  template <typename T>
    526  void add_link (T &ofs, objidx_t objidx,
    527 	 whence_t whence = Head,
    528 	 unsigned bias = 0)
    529  {
    530    if (unlikely (in_error ())) return;
    531 
    532    if (!objidx)
    533      return;
    534 
    535    assert (current);
    536    assert (current->head <= (const char *) &ofs);
    537 
    538    auto& link = *current->real_links.push ();
    539    if (current->real_links.in_error ())
    540      err (HB_SERIALIZE_ERROR_OTHER);
    541 
    542    link.width = sizeof (T);
    543    link.objidx = objidx;
    544    if (unlikely (!sizeof (T)))
    545    {
    546      // This link is not associated with an actual offset and exists merely to enforce
    547      // an ordering constraint.
    548      link.is_signed = 0;
    549      link.whence = 0;
    550      link.position = 0;
    551      link.bias = 0;
    552      return;
    553    }
    554 
    555    link.is_signed = std::is_signed<hb_unwrap_type (T)>::value;
    556    link.whence = (unsigned) whence;
    557    link.position = (const char *) &ofs - current->head;
    558    link.bias = bias;
    559  }
    560 
    561  unsigned to_bias (const void *base) const
    562  {
    563    if (unlikely (in_error ())) return 0;
    564    if (!base) return 0;
    565    assert (current);
    566    assert (current->head <= (const char *) base);
    567    return (const char *) base - current->head;
    568  }
    569 
    570  void resolve_links ()
    571  {
    572    if (unlikely (in_error ())) return;
    573 
    574    assert (!current);
    575    assert (packed.length > 1);
    576 
    577    for (const object_t* parent : ++hb_iter (packed))
    578      for (const object_t::link_t &link : parent->real_links)
    579      {
    580 const object_t* child = packed[link.objidx];
    581 if (unlikely (!child)) { err (HB_SERIALIZE_ERROR_OTHER); return; }
    582 unsigned offset = 0;
    583 switch ((whence_t) link.whence) {
    584 case Head:     offset = child->head - parent->head; break;
    585 case Tail:     offset = child->head - parent->tail; break;
    586 case Absolute: offset = (head - start) + (child->head - tail); break;
    587 }
    588 
    589 assert (offset >= link.bias);
    590 offset -= link.bias;
    591 if (link.is_signed)
    592 {
    593   assert (link.width == 2 || link.width == 4);
    594   if (link.width == 4)
    595     assign_offset<int32_t> (parent, link, offset);
    596   else
    597     assign_offset<int16_t> (parent, link, offset);
    598 }
    599 else
    600 {
    601   assert (link.width == 2 || link.width == 3 || link.width == 4);
    602   if (link.width == 4)
    603     assign_offset<uint32_t> (parent, link, offset);
    604   else if (link.width == 3)
    605     assign_offset<uint32_t, 3> (parent, link, offset);
    606   else
    607     assign_offset<uint16_t> (parent, link, offset);
    608 }
    609      }
    610  }
    611 
    612  unsigned int length () const
    613  {
    614    if (unlikely (!current)) return 0;
    615    return this->head - current->head;
    616  }
    617 
    618  void align (unsigned int alignment)
    619  {
    620    unsigned int l = length () % alignment;
    621    if (l)
    622      (void) allocate_size<void> (alignment - l);
    623  }
    624 
    625  template <typename Type = void>
    626  __attribute__((returns_nonnull))
    627  Type *start_embed (const Type *obj HB_UNUSED = nullptr) const
    628  { return reinterpret_cast<Type *> (this->head); }
    629  template <typename Type>
    630  __attribute__((returns_nonnull))
    631  Type *start_embed (const Type &obj) const
    632  { return start_embed (std::addressof (obj)); }
    633 
    634  bool err (hb_serialize_error_t err_type)
    635  {
    636    return !bool ((errors = (errors | err_type)));
    637  }
    638 
    639  bool start_zerocopy (size_t size)
    640  {
    641    if (unlikely (in_error ())) return false;
    642 
    643    if (unlikely (size > INT_MAX || this->tail - this->head < ptrdiff_t (size)))
    644    {
    645      err (HB_SERIALIZE_ERROR_OUT_OF_ROOM);
    646      return false;
    647    }
    648 
    649    assert (!this->zerocopy);
    650    this->zerocopy = this->head;
    651 
    652    assert (this->current->head == this->head);
    653    this->current->head = this->current->tail = this->head = this->tail - size;
    654    return true;
    655  }
    656 
    657  template <typename Type>
    658  HB_NODISCARD
    659  Type *allocate_size (size_t size, bool clear = true)
    660  {
    661    if (unlikely (in_error ())) return nullptr;
    662 
    663    if (unlikely (size > INT_MAX || this->tail - this->head < ptrdiff_t (size)))
    664    {
    665      err (HB_SERIALIZE_ERROR_OUT_OF_ROOM);
    666      return nullptr;
    667    }
    668    if (clear)
    669      hb_memset (this->head, 0, size);
    670    char *ret = this->head;
    671    this->head += size;
    672    return reinterpret_cast<Type *> (ret);
    673  }
    674 
    675  template <typename Type>
    676  Type *allocate_min ()
    677  { return this->allocate_size<Type> (Type::min_size); }
    678 
    679  template <typename Type>
    680  HB_NODISCARD
    681  Type *embed (const Type *obj)
    682  {
    683    unsigned int size = obj->get_size ();
    684    Type *ret = this->allocate_size<Type> (size, false);
    685    if (unlikely (!ret)) return nullptr;
    686    hb_memcpy (ret, obj, size);
    687    return ret;
    688  }
    689  template <typename Type>
    690  HB_NODISCARD
    691  Type *embed (const Type &obj)
    692  { return embed (std::addressof (obj)); }
    693  char *embed (const char *obj, unsigned size)
    694  {
    695    char *ret = this->allocate_size<char> (size, false);
    696    if (unlikely (!ret)) return nullptr;
    697    hb_memcpy (ret, obj, size);
    698    return ret;
    699  }
    700 
    701  template <typename Type, typename ...Ts> auto
    702  _copy (const Type &src, hb_priority<1>, Ts&&... ds) HB_RETURN
    703  (Type *, src.copy (this, std::forward<Ts> (ds)...))
    704 
    705  template <typename Type> auto
    706  _copy (const Type &src, hb_priority<0>) -> decltype (&(hb_declval<Type> () = src))
    707  {
    708    Type *ret = this->allocate_size<Type> (sizeof (Type));
    709    if (unlikely (!ret)) return nullptr;
    710    *ret = src;
    711    return ret;
    712  }
    713 
    714  /* Like embed, but active: calls obj.operator=() or obj.copy() to transfer data
    715   * instead of hb_memcpy(). */
    716  template <typename Type, typename ...Ts>
    717  Type *copy (const Type &src, Ts&&... ds)
    718  { return _copy (src, hb_prioritize, std::forward<Ts> (ds)...); }
    719  template <typename Type, typename ...Ts>
    720  Type *copy (const Type *src, Ts&&... ds)
    721  { return copy (*src, std::forward<Ts> (ds)...); }
    722 
    723  template<typename Iterator,
    724    hb_requires (hb_is_iterator (Iterator)),
    725    typename ...Ts>
    726  void copy_all (Iterator it, Ts&&... ds)
    727  { for (decltype (*it) _ : it) copy (_, ds...); }
    728 
    729  template <typename Type>
    730  hb_serialize_context_t& operator << (const Type &obj) & { embed (obj); return *this; }
    731 
    732  template <typename Type>
    733  Type *extend_size (Type *obj, size_t size, bool clear = true)
    734  {
    735    if (unlikely (in_error ())) return nullptr;
    736 
    737    assert (this->start <= (char *) obj);
    738    assert ((char *) obj <= this->head);
    739    assert ((size_t) (this->head - (char *) obj) <= size);
    740    if (unlikely (((char *) obj + size < (char *) obj) ||
    741 	  !this->allocate_size<Type> (((char *) obj) + size - this->head, clear))) return nullptr;
    742    return reinterpret_cast<Type *> (obj);
    743  }
    744  template <typename Type>
    745  Type *extend_size (Type &obj, size_t size, bool clear = true)
    746  { return extend_size (std::addressof (obj), size, clear); }
    747 
    748  template <typename Type>
    749  Type *extend_min (Type *obj) { return extend_size (obj, obj->min_size); }
    750  template <typename Type>
    751  Type *extend_min (Type &obj) { return extend_min (std::addressof (obj)); }
    752 
    753  template <typename Type, typename ...Ts>
    754  Type *extend (Type *obj, Ts&&... ds)
    755  { return extend_size (obj, obj->get_size (std::forward<Ts> (ds)...)); }
    756  template <typename Type, typename ...Ts>
    757  Type *extend (Type &obj, Ts&&... ds)
    758  { return extend (std::addressof (obj), std::forward<Ts> (ds)...); }
    759 
    760  /* Output routines. */
    761  hb_bytes_t copy_bytes () const
    762  {
    763    assert (successful ());
    764    /* Copy both items from head side and tail side... */
    765    unsigned int len = (this->head - this->start)
    766 	     + (this->end  - this->tail);
    767 
    768    // If len is zero don't hb_malloc as the memory won't get properly
    769    // cleaned up later.
    770    if (!len) return hb_bytes_t ();
    771 
    772    char *p = (char *) hb_malloc (len);
    773    if (unlikely (!p)) return hb_bytes_t ();
    774 
    775    hb_memcpy (p, this->start, this->head - this->start);
    776    hb_memcpy (p + (this->head - this->start), this->tail, this->end - this->tail);
    777    return hb_bytes_t (p, len);
    778  }
    779  template <typename Type>
    780  Type *copy () const
    781  { return reinterpret_cast<Type *> ((char *) copy_bytes ().arrayZ); }
    782  hb_blob_t *copy_blob () const
    783  {
    784    hb_bytes_t b = copy_bytes ();
    785    return hb_blob_create (b.arrayZ, b.length,
    786 		   HB_MEMORY_MODE_WRITABLE,
    787 		   (char *) b.arrayZ, hb_free);
    788  }
    789 
    790  const hb_vector_t<object_t *>& object_graph() const
    791  { return packed; }
    792 
    793  private:
    794  template <typename T, unsigned Size = sizeof (T)>
    795  void assign_offset (const object_t* parent, const object_t::link_t &link, unsigned offset)
    796  {
    797    // XXX We should stop assuming big-endian!
    798    auto &off = * ((HBInt<true, T, Size> *) (parent->head + link.position));
    799    assert (0 == off);
    800    check_assign (off, offset, HB_SERIALIZE_ERROR_OFFSET_OVERFLOW);
    801  }
    802 
    803  public:
    804  char *start, *head, *tail, *end, *zerocopy;
    805  unsigned int debug_depth;
    806  hb_serialize_error_t errors;
    807 
    808  private:
    809 
    810  void merge_virtual_links (const object_t* from, objidx_t to_idx) {
    811    object_t* to = packed[to_idx];
    812    for (const auto& l : from->virtual_links) {
    813      to->virtual_links.push (l);
    814    }
    815  }
    816 
    817  /* Object memory pool. */
    818  hb_free_pool_t<object_t> object_pool;
    819 
    820  /* Stack of currently under construction objects. */
    821  object_t *current;
    822 
    823  /* Stack of packed objects.  Object 0 is always nil object. */
    824  hb_vector_t<object_t *> packed;
    825 
    826  /* Map view of packed objects. */
    827  hb_hashmap_t<const object_t *, objidx_t> packed_map;
    828 };
    829 
    830 #endif /* HB_SERIALIZE_HH */