neovim

Neovim text editor
git clone https://git.dasho.dev/neovim.git
Log | Files | Refs | README

marktree.c (75583B)


      1 // Tree data structure for storing marks at (row, col) positions and updating
      2 // them to arbitrary text changes. Derivative work of kbtree in klib, whose
      3 // copyright notice is reproduced below. Also inspired by the design of the
      4 // marker tree data structure of the Atom editor, regarding efficient updates
      5 // to text changes.
      6 //
      7 // Marks are inserted using marktree_put. Text changes are processed using
      8 // marktree_splice. All read and delete operations use the iterator.
      9 // use marktree_itr_get to put an iterator at a given position or
     10 // marktree_lookup to lookup a mark by its id (iterator optional in this case).
     11 // Use marktree_itr_current and marktree_itr_next/prev to read marks in a loop.
     12 // marktree_del_itr deletes the current mark of the iterator and implicitly
     13 // moves the iterator to the next mark.
     14 
     15 // Copyright notice for kbtree (included in heavily modified form):
     16 //
     17 // Copyright 1997-1999, 2001, John-Mark Gurney.
     18 //           2008-2009, Attractive Chaos <attractor@live.co.uk>
     19 //
     20 // Redistribution and use in source and binary forms, with or without
     21 // modification, are permitted provided that the following conditions
     22 // are met:
     23 //
     24 // 1. Redistributions of source code must retain the above copyright
     25 //    notice, this list of conditions and the following disclaimer.
     26 // 2. Redistributions in binary form must reproduce the above copyright
     27 //    notice, this list of conditions and the following disclaimer in the
     28 //    documentation and/or other materials provided with the distribution.
     29 //
     30 // THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     31 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     32 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     33 // ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     34 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     35 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     36 // OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     37 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     38 // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     39 // OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     40 // SUCH DAMAGE.
     41 //
     42 // Changes done by by the neovim project follow the Apache v2 license available
     43 // at the repo root.
     44 
     45 #include <assert.h>
     46 #include <inttypes.h>
     47 #include <stdio.h>
     48 #include <stdlib.h>
     49 #include <string.h>
     50 #include <uv.h>
     51 
     52 #include "klib/kvec.h"
     53 #include "nvim/macros_defs.h"
     54 #include "nvim/map_defs.h"
     55 #include "nvim/marktree.h"
     56 #include "nvim/memory.h"
     57 #include "nvim/pos_defs.h"
     58 // only for debug functions
     59 #include "nvim/api/private/defs.h"
     60 #include "nvim/api/private/helpers.h"
     61 #include "nvim/garray.h"
     62 #include "nvim/garray_defs.h"
     63 
     64 #define T MT_BRANCH_FACTOR
     65 #define ILEN (sizeof(MTNode) + sizeof(struct mtnode_inner_s))
     66 
     67 #define ID_INCR (((uint64_t)1) << 2)
     68 
     69 #define rawkey(itr) ((itr)->x->key[(itr)->i])
     70 
     71 static bool pos_leq(MTPos a, MTPos b)
     72 {
     73  return a.row < b.row || (a.row == b.row && a.col <= b.col);
     74 }
     75 
     76 static bool pos_less(MTPos a, MTPos b)
     77 {
     78  return !pos_leq(b, a);
     79 }
     80 
     81 static void relative(MTPos base, MTPos *val)
     82 {
     83  assert(pos_leq(base, *val));
     84  if (val->row == base.row) {
     85    val->row = 0;
     86    val->col -= base.col;
     87  } else {
     88    val->row -= base.row;
     89  }
     90 }
     91 
     92 static void unrelative(MTPos base, MTPos *val)
     93 {
     94  if (val->row == 0) {
     95    val->row = base.row;
     96    val->col += base.col;
     97  } else {
     98    val->row += base.row;
     99  }
    100 }
    101 
    102 static void compose(MTPos *base, MTPos val)
    103 {
    104  if (val.row == 0) {
    105    base->col += val.col;
    106  } else {
    107    base->row += val.row;
    108    base->col = val.col;
    109  }
    110 }
    111 
    112 // Used by `marktree_splice`. Need to keep track of marks which moved
    113 // in order to repair intersections.
    114 typedef struct {
    115  uint64_t id;
    116  MTNode *old, *new;
    117  int old_i, new_i;
    118 } Damage;
    119 typedef kvec_withinit_t(Damage, 8) DamageList;
    120 
    121 #include "marktree.c.generated.h"
    122 
    123 #define mt_generic_cmp(a, b) (((b) < (a)) - ((a) < (b)))
    124 static int key_cmp(MTKey a, MTKey b)
    125 {
    126  int cmp = mt_generic_cmp(a.pos.row, b.pos.row);
    127  if (cmp != 0) {
    128    return cmp;
    129  }
    130  cmp = mt_generic_cmp(a.pos.col, b.pos.col);
    131  if (cmp != 0) {
    132    return cmp;
    133  }
    134 
    135  // TODO(bfredl): MT_FLAG_REAL could go away if we fix marktree_getp_aux for real
    136  const uint16_t cmp_mask = MT_FLAG_RIGHT_GRAVITY | MT_FLAG_END | MT_FLAG_REAL | MT_FLAG_LAST;
    137  return mt_generic_cmp(a.flags & cmp_mask, b.flags & cmp_mask);
    138 }
    139 
    140 /// @return position of k if it exists in the node, otherwise the position
    141 /// it should be inserted, which ranges from 0 to x->n _inclusively_
    142 /// @param match (optional) set to TRUE if match (pos, gravity) was found
    143 static inline int marktree_getp_aux(const MTNode *x, MTKey k, bool *match)
    144 {
    145  bool dummy_match;
    146  bool *m = match ? match : &dummy_match;
    147 
    148  int begin = 0;
    149  int end = x->n;
    150  if (x->n == 0) {
    151    *m = false;
    152    return -1;
    153  }
    154  while (begin < end) {
    155    int mid = (begin + end) >> 1;
    156    if (key_cmp(x->key[mid], k) < 0) {
    157      begin = mid + 1;
    158    } else {
    159      end = mid;
    160    }
    161  }
    162  if (begin == x->n) {
    163    *m = false;
    164    return x->n - 1;
    165  }
    166  if (!(*m = (key_cmp(k, x->key[begin]) == 0))) {
    167    begin--;
    168  }
    169  return begin;
    170 }
    171 
    172 static inline void refkey(MarkTree *b, MTNode *x, int i)
    173 {
    174  pmap_put(uint64_t)(b->id2node, mt_lookup_key(x->key[i]), x);
    175 }
    176 
    177 static MTNode *id2node(MarkTree *b, uint64_t id)
    178 {
    179  return pmap_get(uint64_t)(b->id2node, id);
    180 }
    181 
    182 #define ptr s->i_ptr
    183 #define meta s->i_meta
    184 // put functions
    185 
    186 // x must be an internal node, which is not full
    187 // x->ptr[i] should be a full node, i e x->ptr[i]->n == 2*T-1
    188 static inline void split_node(MarkTree *b, MTNode *x, const int i, MTKey next)
    189 {
    190  MTNode *y = x->ptr[i];
    191  MTNode *z = marktree_alloc_node(b, y->level);
    192  z->level = y->level;
    193  z->n = T - 1;
    194 
    195  // tricky: we might split a node in between inserting the start node and the end
    196  // node of the same pair. Then we must not intersect this id yet (done later
    197  // in marktree_intersect_pair).
    198  uint64_t last_start = mt_end(next) ? mt_lookup_id(next.ns, next.id, false) : MARKTREE_END_FLAG;
    199 
    200  // no alloc in the common case (less than 4 intersects)
    201  kvi_copy(z->intersect, y->intersect);
    202 
    203  if (!y->level) {
    204    uint64_t pi = pseudo_index(y, 0);  // note: sloppy pseudo-index
    205    for (int j = 0; j < T; j++) {
    206      MTKey k = y->key[j];
    207      uint64_t pi_end = pseudo_index_for_id(b, mt_lookup_id(k.ns, k.id, true), true);
    208      if (mt_start(k) && pi_end > pi && mt_lookup_key(k) != last_start) {
    209        intersect_node(b, z, mt_lookup_id(k.ns, k.id, false));
    210      }
    211    }
    212 
    213    // note: y->key[T-1] is moved up and thus checked for both
    214    for (int j = T - 1; j < (T * 2) - 1; j++) {
    215      MTKey k = y->key[j];
    216      uint64_t pi_start = pseudo_index_for_id(b, mt_lookup_id(k.ns, k.id, false), true);
    217      if (mt_end(k) && pi_start > 0 && pi_start < pi) {
    218        intersect_node(b, y, mt_lookup_id(k.ns, k.id, false));
    219      }
    220    }
    221  }
    222 
    223  memcpy(z->key, &y->key[T], sizeof(MTKey) * (T - 1));
    224  for (int j = 0; j < T - 1; j++) {
    225    refkey(b, z, j);
    226  }
    227  if (y->level) {
    228    memcpy(z->ptr, &y->ptr[T], sizeof(MTNode *) * T);
    229    memcpy(z->meta, &y->meta[T], sizeof(z->meta[0]) * T);
    230    for (int j = 0; j < T; j++) {
    231      z->ptr[j]->parent = z;
    232      z->ptr[j]->p_idx = (int16_t)j;
    233    }
    234  }
    235  y->n = T - 1;
    236  memmove(&x->ptr[i + 2], &x->ptr[i + 1], sizeof(MTNode *) * (size_t)(x->n - i));
    237  memmove(&x->meta[i + 2], &x->meta[i + 1], sizeof(x->meta[0]) * (size_t)(x->n - i));
    238  x->ptr[i + 1] = z;
    239  meta_describe_node(x->meta[i + 1], z);
    240  z->parent = x;  // == y->parent
    241  for (int j = i + 1; j < x->n + 2; j++) {
    242    x->ptr[j]->p_idx = (int16_t)j;
    243  }
    244  memmove(&x->key[i + 1], &x->key[i], sizeof(MTKey) * (size_t)(x->n - i));
    245 
    246  // move key to internal layer:
    247  x->key[i] = y->key[T - 1];
    248  refkey(b, x, i);
    249  x->n++;
    250 
    251  uint32_t meta_inc[kMTMetaCount];
    252  meta_describe_key(meta_inc, x->key[i]);
    253  for (int m = 0; m < kMTMetaCount; m++) {
    254    // y used contain all of z and x->key[i], discount those
    255    x->meta[i][m] -= (x->meta[i + 1][m] + meta_inc[m]);
    256  }
    257 
    258  for (int j = 0; j < T - 1; j++) {
    259    relative(x->key[i].pos, &z->key[j].pos);
    260  }
    261  if (i > 0) {
    262    unrelative(x->key[i - 1].pos, &x->key[i].pos);
    263  }
    264 
    265  if (y->level) {
    266    bubble_up(y);
    267    bubble_up(z);
    268  } else {
    269    // code above goose here
    270  }
    271 }
    272 
    273 // x must not be a full node (even if there might be internal space)
    274 static inline void marktree_putp_aux(MarkTree *b, MTNode *x, MTKey k, uint32_t *meta_inc)
    275 {
    276  // TODO(bfredl): ugh, make sure this is the _last_ valid (pos, gravity) position,
    277  // to minimize movement
    278  int i = marktree_getp_aux(x, k, NULL) + 1;
    279  if (x->level == 0) {
    280    if (i != x->n) {
    281      memmove(&x->key[i + 1], &x->key[i],
    282              (size_t)(x->n - i) * sizeof(MTKey));
    283    }
    284    x->key[i] = k;
    285    refkey(b, x, i);
    286    x->n++;
    287  } else {
    288    if (x->ptr[i]->n == 2 * T - 1) {
    289      split_node(b, x, i, k);
    290      if (key_cmp(k, x->key[i]) > 0) {
    291        i++;
    292      }
    293    }
    294    if (i > 0) {
    295      relative(x->key[i - 1].pos, &k.pos);
    296    }
    297    marktree_putp_aux(b, x->ptr[i], k, meta_inc);
    298    for (int m = 0; m < kMTMetaCount; m++) {
    299      x->meta[i][m] += meta_inc[m];
    300    }
    301  }
    302 }
    303 
    304 void marktree_put(MarkTree *b, MTKey key, int end_row, int end_col, bool end_right)
    305 {
    306  assert(!(key.flags & ~(MT_FLAG_EXTERNAL_MASK | MT_FLAG_RIGHT_GRAVITY)));
    307  if (end_row >= 0) {
    308    key.flags |= MT_FLAG_PAIRED;
    309  }
    310 
    311  marktree_put_key(b, key);
    312 
    313  if (end_row >= 0) {
    314    MTKey end_key = key;
    315    end_key.flags = (uint16_t)((uint16_t)(key.flags & ~MT_FLAG_RIGHT_GRAVITY)
    316                               |(uint16_t)MT_FLAG_END
    317                               |(uint16_t)(end_right ? MT_FLAG_RIGHT_GRAVITY : 0));
    318    end_key.pos = (MTPos){ end_row, end_col };
    319    marktree_put_key(b, end_key);
    320    MarkTreeIter itr[1] = { 0 };
    321    MarkTreeIter end_itr[1] = { 0 };
    322    marktree_lookup(b, mt_lookup_key(key), itr);
    323    marktree_lookup(b, mt_lookup_key(end_key), end_itr);
    324 
    325    marktree_intersect_pair(b, mt_lookup_key(key), itr, end_itr, false);
    326  }
    327 }
    328 
    329 // this is currently not used very often, but if it was it should use binary search
    330 static bool intersection_has(Intersection *x, uint64_t id)
    331 {
    332  for (size_t i = 0; i < kv_size(*x); i++) {
    333    if (kv_A(*x, i) == id) {
    334      return true;
    335    } else if (kv_A(*x, i) >= id) {
    336      return false;
    337    }
    338  }
    339  return false;
    340 }
    341 
    342 static void intersect_node(MarkTree *b, MTNode *x, uint64_t id)
    343 {
    344  assert(!(id & MARKTREE_END_FLAG));
    345  kvi_pushp(x->intersect);
    346  // optimized for the common case: new key is always in the end
    347  for (ssize_t i = (ssize_t)kv_size(x->intersect) - 1; i >= 0; i--) {
    348    if (i > 0 && kv_A(x->intersect, i - 1) > id) {
    349      kv_A(x->intersect, i) = kv_A(x->intersect, i - 1);
    350    } else {
    351      kv_A(x->intersect, i) = id;
    352      break;
    353    }
    354  }
    355 }
    356 
    357 static void unintersect_node(MarkTree *b, MTNode *x, uint64_t id, bool strict)
    358 {
    359  assert(!(id & MARKTREE_END_FLAG));
    360  bool seen = false;
    361  size_t i;
    362  for (i = 0; i < kv_size(x->intersect); i++) {
    363    if (kv_A(x->intersect, i) < id) {
    364      continue;
    365    } else if (kv_A(x->intersect, i) == id) {
    366      seen = true;
    367      break;
    368    } else {  // (kv_A(x->intersect, i) > id)
    369      break;
    370    }
    371  }
    372  if (strict) {
    373 #ifndef RELDEBUG
    374    // TODO(bfredl): This assert has been seen to fail for end users
    375    // using RelWithDebInfo builds. While indicating an invalid state for
    376    // the marktree, this error doesn't need to be fatal. The assert still
    377    // needs to present in Debug builds to be able to detect regressions in tests.
    378    assert(seen);
    379 #endif
    380  }
    381 
    382  if (seen) {
    383    if (i < kv_size(x->intersect) - 1) {
    384      memmove(&kv_A(x->intersect, i), &kv_A(x->intersect, i + 1), (kv_size(x->intersect) - i - 1) *
    385              sizeof(kv_A(x->intersect, i)));
    386    }
    387    kv_size(x->intersect)--;
    388  }
    389 }
    390 
    391 /// @param itr mutated
    392 /// @param end_itr not mutated
    393 void marktree_intersect_pair(MarkTree *b, uint64_t id, MarkTreeIter *itr, MarkTreeIter *end_itr,
    394                             bool delete)
    395 {
    396  int lvl = 0, maxlvl = MIN(itr->lvl, end_itr->lvl);
    397 #define iat(itr, l, q) ((l == itr->lvl) ? itr->i + q : itr->s[l].i)
    398  for (; lvl < maxlvl; lvl++) {
    399    if (itr->s[lvl].i > end_itr->s[lvl].i) {
    400      return;  // empty range
    401    } else if (itr->s[lvl].i < end_itr->s[lvl].i) {
    402      break;  // work to do
    403    }
    404  }
    405  if (lvl == maxlvl && iat(itr, lvl, 1) > iat(end_itr, lvl, 0)) {
    406    return;  // empty range
    407  }
    408 
    409  while (itr->x) {
    410    bool skip = false;
    411    if (itr->x == end_itr->x) {
    412      if (itr->x->level == 0 || itr->i >= end_itr->i) {
    413        break;
    414      } else {
    415        skip = true;
    416      }
    417    } else if (itr->lvl > lvl) {
    418      skip = true;
    419    } else {
    420      if (iat(itr, lvl, 1) < iat(end_itr, lvl, 1)) {
    421        skip = true;
    422      } else {
    423        lvl++;
    424      }
    425    }
    426 
    427    if (skip) {
    428      if (itr->x->level) {
    429        MTNode *x = itr->x->ptr[itr->i + 1];
    430        if (delete) {
    431          unintersect_node(b, x, id, true);
    432        } else {
    433          intersect_node(b, x, id);
    434        }
    435      }
    436    }
    437    marktree_itr_next_skip(b, itr, skip, true, NULL, NULL);
    438  }
    439 #undef iat
    440 }
    441 
    442 static MTNode *marktree_alloc_node(MarkTree *b, bool internal)
    443 {
    444  MTNode *x = xcalloc(1, internal ? ILEN : sizeof(MTNode));
    445  kvi_init(x->intersect);
    446  b->n_nodes++;
    447  return x;
    448 }
    449 
    450 // really meta_inc[kMTMetaCount]
    451 static void meta_describe_key_inc(uint32_t *meta_inc, MTKey *k)
    452 {
    453  if (!mt_end(*k) && !mt_invalid(*k)) {
    454    meta_inc[kMTMetaInline] += (k->flags & MT_FLAG_DECOR_VIRT_TEXT_INLINE) ? 1 : 0;
    455    meta_inc[kMTMetaLines] += (k->flags & MT_FLAG_DECOR_VIRT_LINES) ? 1 : 0;
    456    meta_inc[kMTMetaSignHL] += (k->flags & MT_FLAG_DECOR_SIGNHL) ? 1 : 0;
    457    meta_inc[kMTMetaSignText] += (k->flags & MT_FLAG_DECOR_SIGNTEXT) ? 1 : 0;
    458    meta_inc[kMTMetaConcealLines] += (k->flags & MT_FLAG_DECOR_CONCEAL_LINES) ? 1 : 0;
    459  }
    460 }
    461 
    462 static void meta_describe_key(uint32_t *meta_inc, MTKey k)
    463 {
    464  memset(meta_inc, 0, kMTMetaCount * sizeof(*meta_inc));
    465  meta_describe_key_inc(meta_inc, &k);
    466 }
    467 
    468 // if x is internal, assumes x->meta[..] of children are correct
    469 static void meta_describe_node(uint32_t *meta_node, MTNode *x)
    470 {
    471  memset(meta_node, 0, kMTMetaCount * sizeof(meta_node[0]));
    472  for (int i = 0; i < x->n; i++) {
    473    meta_describe_key_inc(meta_node, &x->key[i]);
    474  }
    475  if (x->level) {
    476    for (int i = 0; i < x->n + 1; i++) {
    477      for (int m = 0; m < kMTMetaCount; m++) {
    478        meta_node[m] += x->meta[i][m];
    479      }
    480    }
    481  }
    482 }
    483 
    484 static bool meta_has(const uint32_t *meta_count, MetaFilter meta_filter)
    485 {
    486  uint32_t count = 0;
    487  for (int m = 0; m < kMTMetaCount; m++) {
    488    count += meta_count[m] & meta_filter[m];
    489  }
    490  return count > 0;
    491 }
    492 
    493 void marktree_put_key(MarkTree *b, MTKey k)
    494 {
    495  k.flags |= MT_FLAG_REAL;  // let's be real.
    496  if (!b->root) {
    497    b->root = marktree_alloc_node(b, true);
    498  }
    499  MTNode *r = b->root;
    500  if (r->n == 2 * T - 1) {
    501    MTNode *s = marktree_alloc_node(b, true);
    502    b->root = s; s->level = r->level + 1; s->n = 0;
    503    s->ptr[0] = r;
    504    for (int m = 0; m < kMTMetaCount; m++) {
    505      s->meta[0][m] = b->meta_root[m];
    506    }
    507    r->parent = s;
    508    r->p_idx = 0;
    509    split_node(b, s, 0, k);
    510    r = s;
    511  }
    512 
    513  uint32_t meta_inc[kMTMetaCount];
    514  meta_describe_key(meta_inc, k);
    515  marktree_putp_aux(b, r, k, meta_inc);
    516  for (int m = 0; m < kMTMetaCount; m++) {
    517    b->meta_root[m] += meta_inc[m];
    518  }
    519  b->n_keys++;
    520 }
    521 
    522 /// INITIATING DELETION PROTOCOL:
    523 ///
    524 /// 1. Construct a valid iterator to the node to delete (argument)
    525 /// 2. If an "internal" key. Iterate one step to the left or right,
    526 ///     which gives an internal key "auxiliary key".
    527 /// 3. Now delete this internal key (intended or auxiliary).
    528 ///    The leaf node X might become undersized.
    529 /// 4. If step two was done: now replace the key that _should_ be
    530 ///    deleted with the auxiliary key. Adjust relative
    531 /// 5. Now "repair" the tree as needed. We always start at a leaf node X.
    532 ///     - if the node is big enough, terminate
    533 ///     - if we can steal from the left, steal
    534 ///     - if we can steal from the right, steal
    535 ///     - otherwise merge this node with a neighbour. This might make our
    536 ///       parent undersized. So repeat 5 for the parent.
    537 /// 6. If 4 went all the way to the root node. The root node
    538 ///    might have ended up with size 0. Delete it then.
    539 ///
    540 /// The iterator remains valid, and now points at the key _after_ the deleted
    541 /// one.
    542 ///
    543 /// @param rev should be true if we plan to iterate _backwards_ and delete
    544 ///            stuff before this key. Most of the time this is false (the
    545 ///            recommended strategy is to always iterate forward)
    546 uint64_t marktree_del_itr(MarkTree *b, MarkTreeIter *itr, bool rev)
    547 {
    548  int adjustment = 0;
    549 
    550  MTNode *cur = itr->x;
    551  int curi = itr->i;
    552  uint64_t id = mt_lookup_key(cur->key[curi]);
    553 
    554  MTKey raw = rawkey(itr);
    555  uint64_t other = 0;
    556  if (mt_paired(raw) && !(raw.flags & MT_FLAG_ORPHANED)) {
    557    other = mt_lookup_key_side(raw, !mt_end(raw));
    558 
    559    MarkTreeIter other_itr[1];
    560    marktree_lookup(b, other, other_itr);
    561    rawkey(other_itr).flags |= MT_FLAG_ORPHANED;
    562    // Remove intersect markers. NB: must match exactly!
    563    if (mt_start(raw)) {
    564      MarkTreeIter this_itr[1] = { *itr };  // mutated copy
    565      marktree_intersect_pair(b, id, this_itr, other_itr, true);
    566    } else {
    567      marktree_intersect_pair(b, other, other_itr, itr, true);
    568    }
    569  }
    570 
    571  // 2.
    572  if (itr->x->level) {
    573    if (rev) {
    574      abort();
    575    } else {
    576      // steal previous node
    577      marktree_itr_prev(b, itr);
    578      adjustment = -1;
    579    }
    580  }
    581 
    582  // 3.
    583  MTNode *x = itr->x;
    584  assert(x->level == 0);
    585  MTKey intkey = x->key[itr->i];
    586 
    587  uint32_t meta_inc[kMTMetaCount];
    588  meta_describe_key(meta_inc, intkey);
    589  if (x->n > itr->i + 1) {
    590    memmove(&x->key[itr->i], &x->key[itr->i + 1],
    591            sizeof(MTKey) * (size_t)(x->n - itr->i - 1));
    592  }
    593  x->n--;
    594 
    595  b->n_keys--;
    596  pmap_del(uint64_t)(b->id2node, id, NULL);
    597 
    598  // 4.
    599  // if (adjustment == 1) {
    600  //   abort();
    601  // }
    602  if (adjustment == -1) {
    603    int ilvl = itr->lvl - 1;
    604    MTNode *lnode = x;
    605    uint64_t start_id = 0;
    606    bool did_bubble = false;
    607    if (mt_end(intkey)) {
    608      start_id = mt_lookup_key_side(intkey, false);
    609    }
    610    do {
    611      MTNode *p = lnode->parent;
    612      if (ilvl < 0) {
    613        abort();
    614      }
    615      int i = itr->s[ilvl].i;
    616      assert(p->ptr[i] == lnode);
    617      if (i > 0) {
    618        unrelative(p->key[i - 1].pos, &intkey.pos);
    619      }
    620 
    621      if (p != cur && start_id) {
    622        if (intersection_has(&p->ptr[0]->intersect, start_id)) {
    623          // if not the first time, we need to undo the addition in the
    624          // previous step (`intersect_node` just below)
    625          int last = (lnode != x) ? 1 : 0;
    626          for (int k = 0; k < p->n + last; k++) {  // one less as p->ptr[n] is the last
    627            unintersect_node(b, p->ptr[k], start_id, true);
    628          }
    629          intersect_node(b, p, start_id);
    630          did_bubble = true;
    631        }
    632      }
    633 
    634      for (int m = 0; m < kMTMetaCount; m++) {
    635        p->meta[lnode->p_idx][m] -= meta_inc[m];
    636      }
    637 
    638      lnode = p;
    639      ilvl--;
    640    } while (lnode != cur);
    641 
    642    MTKey deleted = cur->key[curi];
    643    meta_describe_key(meta_inc, deleted);
    644    cur->key[curi] = intkey;
    645    refkey(b, cur, curi);
    646    // if `did_bubble` then we already added `start_id` to some parent
    647    if (mt_end(cur->key[curi]) && !did_bubble) {
    648      uint64_t pi = pseudo_index(x, 0);  // note: sloppy pseudo-index
    649      uint64_t pi_start = pseudo_index_for_id(b, start_id, true);
    650      if (pi_start > 0 && pi_start < pi) {
    651        intersect_node(b, x, start_id);
    652      }
    653    }
    654 
    655    relative(intkey.pos, &deleted.pos);
    656    MTNode *y = cur->ptr[curi + 1];
    657    if (deleted.pos.row || deleted.pos.col) {
    658      while (y) {
    659        for (int k = 0; k < y->n; k++) {
    660          unrelative(deleted.pos, &y->key[k].pos);
    661        }
    662        y = y->level ? y->ptr[0] : NULL;
    663      }
    664    }
    665    itr->i--;
    666  }
    667 
    668  MTNode *lnode = cur;
    669  while (lnode->parent) {
    670    uint32_t *meta_p = lnode->parent->meta[lnode->p_idx];
    671    for (int m = 0; m < kMTMetaCount; m++) {
    672      meta_p[m] -= meta_inc[m];
    673    }
    674 
    675    lnode = lnode->parent;
    676  }
    677  for (int m = 0; m < kMTMetaCount; m++) {
    678    assert(b->meta_root[m] >= meta_inc[m]);
    679    b->meta_root[m] -= meta_inc[m];
    680  }
    681 
    682  // 5.
    683  bool itr_dirty = false;
    684  int rlvl = itr->lvl - 1;
    685  int *lasti = &itr->i;
    686  MTPos ppos = itr->pos;
    687  while (x != b->root) {
    688    assert(rlvl >= 0);
    689    MTNode *p = x->parent;
    690    if (x->n >= T - 1) {
    691      // we are done, if this node is fine the rest of the tree will be
    692      break;
    693    }
    694    int pi = itr->s[rlvl].i;
    695    assert(p->ptr[pi] == x);
    696    if (pi > 0) {
    697      ppos.row -= p->key[pi - 1].pos.row;
    698      ppos.col = itr->s[rlvl].oldcol;
    699    }
    700    // ppos is now the pos of p
    701 
    702    if (pi > 0 && p->ptr[pi - 1]->n > T - 1) {
    703      *lasti += 1;
    704      itr_dirty = true;
    705      // steal one key from the left neighbour
    706      pivot_right(b, ppos, p, pi - 1);
    707      break;
    708    } else if (pi < p->n && p->ptr[pi + 1]->n > T - 1) {
    709      // steal one key from right neighbour
    710      pivot_left(b, ppos, p, pi);
    711      break;
    712    } else if (pi > 0) {
    713      assert(p->ptr[pi - 1]->n == T - 1);
    714      // merge with left neighbour
    715      *lasti += T;
    716      x = merge_node(b, p, pi - 1);
    717      if (lasti == &itr->i) {
    718        // TRICKY: we merged the node the iterator was on
    719        itr->x = x;
    720      }
    721      itr->s[rlvl].i--;
    722      itr_dirty = true;
    723    } else {
    724      assert(pi < p->n && p->ptr[pi + 1]->n == T - 1);
    725      merge_node(b, p, pi);
    726      // no iter adjustment needed
    727    }
    728    lasti = &itr->s[rlvl].i;
    729    rlvl--;
    730    x = p;
    731  }
    732 
    733  // 6.
    734  if (b->root->n == 0) {
    735    if (itr->lvl > 0) {
    736      memmove(itr->s, itr->s + 1, (size_t)(itr->lvl - 1) * sizeof(*itr->s));
    737      itr->lvl--;
    738    }
    739    if (b->root->level) {
    740      MTNode *oldroot = b->root;
    741      b->root = b->root->ptr[0];
    742      for (int m = 0; m < kMTMetaCount; m++) {
    743        assert(b->meta_root[m] == oldroot->meta[0][m]);
    744      }
    745 
    746      b->root->parent = NULL;
    747      marktree_free_node(b, oldroot);
    748    } else {
    749      // no items, nothing for iterator to point to
    750      // not strictly needed, should handle delete right-most mark anyway
    751      itr->x = NULL;
    752    }
    753  }
    754 
    755  if (itr->x && itr_dirty) {
    756    marktree_itr_fix_pos(b, itr);
    757  }
    758 
    759  // BONUS STEP: fix the iterator, so that it points to the key afterwards
    760  // TODO(bfredl): with "rev" should point before
    761  // if (adjustment == 1) {
    762  //   abort();
    763  // }
    764  if (adjustment == -1) {
    765    // tricky: we stand at the deleted space in the previous leaf node.
    766    // But the inner key is now the previous key we stole, so we need
    767    // to skip that one as well.
    768    marktree_itr_next(b, itr);
    769    marktree_itr_next(b, itr);
    770  } else {
    771    if (itr->x && itr->i >= itr->x->n) {
    772      // we deleted the last key of a leaf node
    773      // go to the inner key after that.
    774      assert(itr->x->level == 0);
    775      marktree_itr_next(b, itr);
    776    }
    777  }
    778 
    779  return other;
    780 }
    781 
    782 void marktree_revise_meta(MarkTree *b, MarkTreeIter *itr, MTKey old_key)
    783 {
    784  uint32_t meta_old[kMTMetaCount], meta_new[kMTMetaCount];
    785  meta_describe_key(meta_old, old_key);
    786  meta_describe_key(meta_new, rawkey(itr));
    787 
    788  if (!memcmp(meta_old, meta_new, sizeof(meta_old))) {
    789    return;
    790  }
    791 
    792  MTNode *lnode = itr->x;
    793  while (lnode->parent) {
    794    uint32_t *meta_p = lnode->parent->meta[lnode->p_idx];
    795    for (int m = 0; m < kMTMetaCount; m++) {
    796      meta_p[m] += meta_new[m] - meta_old[m];
    797    }
    798 
    799    lnode = lnode->parent;
    800  }
    801 
    802  for (int m = 0; m < kMTMetaCount; m++) {
    803    b->meta_root[m] += meta_new[m] - meta_old[m];
    804  }
    805 }
    806 
    807 /// similar to intersect_common but modify x and y in place to retain
    808 /// only the items which are NOT in common
    809 static void intersect_merge(Intersection *restrict m, Intersection *restrict x,
    810                            Intersection *restrict y)
    811 {
    812  size_t xi = 0;
    813  size_t yi = 0;
    814  size_t xn = 0;
    815  size_t yn = 0;
    816  while (xi < kv_size(*x) && yi < kv_size(*y)) {
    817    if (kv_A(*x, xi) == kv_A(*y, yi)) {
    818      // TODO(bfredl): kvi_pushp is actually quite complex, break out kvi_resize() to a function?
    819      kvi_push(*m, kv_A(*x, xi));
    820      xi++;
    821      yi++;
    822    } else if (kv_A(*x, xi) < kv_A(*y, yi)) {
    823      kv_A(*x, xn++) = kv_A(*x, xi++);
    824    } else {
    825      kv_A(*y, yn++) = kv_A(*y, yi++);
    826    }
    827  }
    828 
    829  if (xi < kv_size(*x)) {
    830    memmove(&kv_A(*x, xn), &kv_A(*x, xi), sizeof(kv_A(*x, xn)) * (kv_size(*x) - xi));
    831    xn += kv_size(*x) - xi;
    832  }
    833  if (yi < kv_size(*y)) {
    834    memmove(&kv_A(*y, yn), &kv_A(*y, yi), sizeof(kv_A(*y, yn)) * (kv_size(*y) - yi));
    835    yn += kv_size(*y) - yi;
    836  }
    837 
    838  kv_size(*x) = xn;
    839  kv_size(*y) = yn;
    840 }
    841 
    842 // w used to be a child of x but it is now a child of y, adjust intersections accordingly
    843 // @param[out] d are intersections which should be added to the old children of y
    844 static void intersect_mov(Intersection *restrict x, Intersection *restrict y,
    845                          Intersection *restrict w, Intersection *restrict d)
    846 {
    847  size_t wi = 0;
    848  size_t yi = 0;
    849  size_t wn = 0;
    850  size_t yn = 0;
    851  size_t xi = 0;
    852  while (wi < kv_size(*w) || xi < kv_size(*x)) {
    853    if (wi < kv_size(*w) && (xi >= kv_size(*x) || kv_A(*x, xi) >= kv_A(*w, wi))) {
    854      if (xi < kv_size(*x) && kv_A(*x, xi) == kv_A(*w, wi)) {
    855        xi++;
    856      }
    857      // now w < x strictly
    858      while (yi < kv_size(*y) && kv_A(*y, yi) < kv_A(*w, wi)) {
    859        kvi_push(*d, kv_A(*y, yi));
    860        yi++;
    861      }
    862      if (yi < kv_size(*y) && kv_A(*y, yi) == kv_A(*w, wi)) {
    863        kv_A(*y, yn++) = kv_A(*y, yi++);
    864        wi++;
    865      } else {
    866        kv_A(*w, wn++) = kv_A(*w, wi++);
    867      }
    868    } else {
    869      // x < w strictly
    870      while (yi < kv_size(*y) && kv_A(*y, yi) < kv_A(*x, xi)) {
    871        kvi_push(*d, kv_A(*y, yi));
    872        yi++;
    873      }
    874      if (yi < kv_size(*y) && kv_A(*y, yi) == kv_A(*x, xi)) {
    875        kv_A(*y, yn++) = kv_A(*y, yi++);
    876        xi++;
    877      } else {
    878        // add kv_A(x, xi) at kv_A(w, wn), pushing up wi if wi == wn
    879        if (wi == wn) {
    880          size_t n = kv_size(*w) - wn;
    881          kvi_pushp(*w);
    882          if (n > 0) {
    883            memmove(&kv_A(*w, wn + 1), &kv_A(*w, wn), n * sizeof(kv_A(*w, 0)));
    884          }
    885          kv_A(*w, wi) = kv_A(*x, xi);
    886          wn++;
    887          wi++;  // no need to consider the added element again
    888        } else {
    889          assert(wn < wi);
    890          kv_A(*w, wn++) = kv_A(*x, xi);
    891        }
    892        xi++;
    893      }
    894    }
    895  }
    896  if (yi < kv_size(*y)) {
    897    // move remaining items to d
    898    size_t n = kv_size(*y) - yi;  // at least one
    899    kvi_ensure_more_space(*d, n);
    900    memcpy(&kv_A(*d, kv_size(*d)), &kv_A(*y, yi), n * sizeof(kv_A(*d, 0)));
    901    kv_size(*d) += n;
    902  }
    903  kv_size(*w) = wn;
    904  kv_size(*y) = yn;
    905 }
    906 
    907 bool intersect_mov_test(const uint64_t *x, size_t nx, const uint64_t *y, size_t ny,
    908                        const uint64_t *win, size_t nwin, uint64_t *wout, size_t *nwout,
    909                        uint64_t *dout, size_t *ndout)
    910 {
    911  // x is immutable in the context of intersect_mov. y might shrink, but we
    912  // don't care about it (we get it the deleted ones in d)
    913  Intersection xi = { .items = (uint64_t *)x, .size = nx };
    914  Intersection yi = { .items = (uint64_t *)y, .size = ny };
    915 
    916  Intersection w;
    917  kvi_init(w);
    918  for (size_t i = 0; i < nwin; i++) {
    919    kvi_push(w, win[i]);
    920  }
    921  Intersection d;
    922  kvi_init(d);
    923 
    924  intersect_mov(&xi, &yi, &w, &d);
    925 
    926  if (w.size > *nwout || d.size > *ndout) {
    927    return false;
    928  }
    929 
    930  memcpy(wout, w.items, sizeof(w.items[0]) * w.size);
    931  *nwout = w.size;
    932 
    933  memcpy(dout, d.items, sizeof(d.items[0]) * d.size);
    934  *ndout = d.size;
    935 
    936  return true;
    937 }
    938 
    939 /// intersection: i = x & y
    940 static void intersect_common(Intersection *i, Intersection *x, Intersection *y)
    941 {
    942  size_t xi = 0;
    943  size_t yi = 0;
    944  while (xi < kv_size(*x) && yi < kv_size(*y)) {
    945    if (kv_A(*x, xi) == kv_A(*y, yi)) {
    946      kvi_push(*i, kv_A(*x, xi));
    947      xi++;
    948      yi++;
    949    } else if (kv_A(*x, xi) < kv_A(*y, yi)) {
    950      xi++;
    951    } else {
    952      yi++;
    953    }
    954  }
    955 }
    956 
    957 // inplace union: x |= y
    958 static void intersect_add(Intersection *x, Intersection *y)
    959 {
    960  size_t xi = 0;
    961  size_t yi = 0;
    962  while (xi < kv_size(*x) && yi < kv_size(*y)) {
    963    if (kv_A(*x, xi) == kv_A(*y, yi)) {
    964      xi++;
    965      yi++;
    966    } else if (kv_A(*y, yi) < kv_A(*x, xi)) {
    967      size_t n = kv_size(*x) - xi;  // at least one
    968      kvi_pushp(*x);
    969      memmove(&kv_A(*x, xi + 1), &kv_A(*x, xi), n * sizeof(kv_A(*x, 0)));
    970      kv_A(*x, xi) = kv_A(*y, yi);
    971      xi++;  // newly added element
    972      yi++;
    973    } else {
    974      xi++;
    975    }
    976  }
    977  if (yi < kv_size(*y)) {
    978    size_t n = kv_size(*y) - yi;  // at least one
    979    kvi_ensure_more_space(*x, n);
    980    memcpy(&kv_A(*x, kv_size(*x)), &kv_A(*y, yi), n * sizeof(kv_A(*x, 0)));
    981    kv_size(*x) += n;
    982  }
    983 }
    984 
    985 // inplace asymmetric difference: x &= ~y
    986 static void intersect_sub(Intersection *restrict x, Intersection *restrict y)
    987 {
    988  size_t xi = 0;
    989  size_t yi = 0;
    990  size_t xn = 0;
    991  while (xi < kv_size(*x) && yi < kv_size(*y)) {
    992    if (kv_A(*x, xi) == kv_A(*y, yi)) {
    993      xi++;
    994      yi++;
    995    } else if (kv_A(*x, xi) < kv_A(*y, yi)) {
    996      kv_A(*x, xn++) = kv_A(*x, xi++);
    997    } else {
    998      yi++;
    999    }
   1000  }
   1001  if (xi < kv_size(*x)) {
   1002    size_t n = kv_size(*x) - xi;
   1003    if (xn < xi) {  // otherwise xn == xi
   1004      memmove(&kv_A(*x, xn), &kv_A(*x, xi), n * sizeof(kv_A(*x, 0)));
   1005    }
   1006    xn += n;
   1007  }
   1008  kv_size(*x) = xn;
   1009 }
   1010 
   1011 /// x is a node which shrunk, or the half of a split
   1012 ///
   1013 /// this means that intervals which previously intersected all the (current)
   1014 /// child nodes, now instead intersects `x` itself.
   1015 static void bubble_up(MTNode *x)
   1016 {
   1017  Intersection xi;
   1018  kvi_init(xi);
   1019  // due to invariants, the largest subset of _all_ subnodes is the intersection
   1020  // between the first and the last
   1021  intersect_common(&xi, &x->ptr[0]->intersect, &x->ptr[x->n]->intersect);
   1022  if (kv_size(xi)) {
   1023    for (int i = 0; i < x->n + 1; i++) {
   1024      intersect_sub(&x->ptr[i]->intersect, &xi);
   1025    }
   1026    intersect_add(&x->intersect, &xi);
   1027  }
   1028  kvi_destroy(xi);
   1029 }
   1030 
   1031 static MTNode *merge_node(MarkTree *b, MTNode *p, int i)
   1032 {
   1033  MTNode *x = p->ptr[i];
   1034  MTNode *y = p->ptr[i + 1];
   1035  Intersection mi;
   1036  kvi_init(mi);
   1037 
   1038  intersect_merge(&mi, &x->intersect, &y->intersect);
   1039 
   1040  x->key[x->n] = p->key[i];
   1041  refkey(b, x, x->n);
   1042  if (i > 0) {
   1043    relative(p->key[i - 1].pos, &x->key[x->n].pos);
   1044  }
   1045 
   1046  uint32_t meta_inc[kMTMetaCount];
   1047  meta_describe_key(meta_inc, x->key[x->n]);
   1048 
   1049  memmove(&x->key[x->n + 1], y->key, (size_t)y->n * sizeof(MTKey));
   1050  for (int k = 0; k < y->n; k++) {
   1051    refkey(b, x, x->n + 1 + k);
   1052    unrelative(x->key[x->n].pos, &x->key[x->n + 1 + k].pos);
   1053  }
   1054  if (x->level) {
   1055    // bubble down: ranges that intersected old-x but not old-y or vice versa
   1056    // must be moved to their respective children
   1057    memmove(&x->ptr[x->n + 1], y->ptr, ((size_t)y->n + 1) * sizeof(MTNode *));
   1058    memmove(&x->meta[x->n + 1], y->meta, ((size_t)y->n + 1) * sizeof(y->meta[0]));
   1059    for (int k = 0; k < x->n + 1; k++) {
   1060      // TODO(bfredl): dedicated impl for "Z |= Y"
   1061      for (size_t idx = 0; idx < kv_size(x->intersect); idx++) {
   1062        intersect_node(b, x->ptr[k], kv_A(x->intersect, idx));
   1063      }
   1064    }
   1065    for (int ky = 0; ky < y->n + 1; ky++) {
   1066      int k = x->n + ky + 1;
   1067      // nodes that used to be in y, now the second half of x
   1068      x->ptr[k]->parent = x;
   1069      x->ptr[k]->p_idx = (int16_t)k;
   1070      // TODO(bfredl): dedicated impl for "Z |= X"
   1071      for (size_t idx = 0; idx < kv_size(y->intersect); idx++) {
   1072        intersect_node(b, x->ptr[k], kv_A(y->intersect, idx));
   1073      }
   1074    }
   1075  }
   1076  x->n += y->n + 1;
   1077  for (int m = 0; m < kMTMetaCount; m++) {
   1078    // x now contains everything of y plus old p->key[i]
   1079    p->meta[i][m] += (p->meta[i + 1][m] + meta_inc[m]);
   1080  }
   1081 
   1082  memmove(&p->key[i], &p->key[i + 1], (size_t)(p->n - i - 1) * sizeof(MTKey));
   1083  memmove(&p->ptr[i + 1], &p->ptr[i + 2], (size_t)(p->n - i - 1) * sizeof(MTKey *));
   1084  memmove(&p->meta[i + 1], &p->meta[i + 2], (size_t)(p->n - i - 1) * sizeof(p->meta[0]));
   1085  for (int j = i + 1; j < p->n; j++) {  // note: one has been deleted
   1086    p->ptr[j]->p_idx = (int16_t)j;
   1087  }
   1088  p->n--;
   1089  marktree_free_node(b, y);
   1090 
   1091  kvi_destroy(x->intersect);
   1092 
   1093  // move of a kvec_withinit_t, messy!
   1094  // TODO(bfredl): special case version of intersect_merge(x_out, x_in_m_out, y) to avoid this
   1095  kvi_move(&x->intersect, &mi);
   1096 
   1097  return x;
   1098 }
   1099 
   1100 /// @param dest is overwritten (assumed to already been freed/moved)
   1101 /// @param src consumed (don't free or use)
   1102 void kvi_move(Intersection *dest, Intersection *src)
   1103 {
   1104  dest->size = src->size;
   1105  dest->capacity = src->capacity;
   1106  if (src->items == src->init_array) {
   1107    memcpy(dest->init_array, src->init_array, src->size * sizeof(*src->init_array));
   1108    dest->items = dest->init_array;
   1109  } else {
   1110    dest->items = src->items;
   1111  }
   1112 }
   1113 
   1114 // TODO(bfredl): as a potential "micro" optimization, pivoting should balance
   1115 // the two nodes instead of stealing just one key
   1116 // x_pos is the absolute position of the key just before x (or a dummy key strictly less than any
   1117 // key inside x, if x is the first leaf)
   1118 static void pivot_right(MarkTree *b, MTPos p_pos, MTNode *p, const int i)
   1119 {
   1120  MTNode *x = p->ptr[i];
   1121  MTNode *y = p->ptr[i + 1];
   1122  memmove(&y->key[1], y->key, (size_t)y->n * sizeof(MTKey));
   1123  if (y->level) {
   1124    memmove(&y->ptr[1], y->ptr, ((size_t)y->n + 1) * sizeof(MTNode *));
   1125    memmove(&y->meta[1], y->meta, ((size_t)y->n + 1) * sizeof(y->meta[0]));
   1126    for (int j = 1; j < y->n + 2; j++) {
   1127      y->ptr[j]->p_idx = (int16_t)j;
   1128    }
   1129  }
   1130 
   1131  y->key[0] = p->key[i];
   1132  refkey(b, y, 0);
   1133  p->key[i] = x->key[x->n - 1];
   1134  refkey(b, p, i);
   1135 
   1136  uint32_t meta_inc_y[kMTMetaCount];
   1137  meta_describe_key(meta_inc_y, y->key[0]);
   1138  uint32_t meta_inc_x[kMTMetaCount];
   1139  meta_describe_key(meta_inc_x, p->key[i]);
   1140 
   1141  for (int m = 0; m < kMTMetaCount; m++) {
   1142    p->meta[i + 1][m] += meta_inc_y[m];
   1143    p->meta[i][m] -= meta_inc_x[m];
   1144  }
   1145 
   1146  if (x->level) {
   1147    y->ptr[0] = x->ptr[x->n];
   1148    memcpy(y->meta[0], x->meta[x->n], sizeof(y->meta[0]));
   1149    for (int m = 0; m < kMTMetaCount; m++) {
   1150      p->meta[i + 1][m] += y->meta[0][m];
   1151      p->meta[i][m] -= y->meta[0][m];
   1152    }
   1153    y->ptr[0]->parent = y;
   1154    y->ptr[0]->p_idx = 0;
   1155  }
   1156  x->n--;
   1157  y->n++;
   1158  if (i > 0) {
   1159    unrelative(p->key[i - 1].pos, &p->key[i].pos);
   1160  }
   1161  relative(p->key[i].pos, &y->key[0].pos);
   1162  for (int k = 1; k < y->n; k++) {
   1163    unrelative(y->key[0].pos, &y->key[k].pos);
   1164  }
   1165 
   1166  // repair intersections of x
   1167  if (x->level) {
   1168    // handle y and first new y->ptr[0]
   1169    Intersection d;
   1170    kvi_init(d);
   1171    // y->ptr[0] was moved from x to y
   1172    // adjust y->ptr[0] for a difference between the parents
   1173    // in addition, this might cause some intersection of the old y
   1174    // to bubble down to the old children of y (if y->ptr[0] wasn't intersected)
   1175    intersect_mov(&x->intersect, &y->intersect, &y->ptr[0]->intersect, &d);
   1176    if (kv_size(d)) {
   1177      for (int yi = 1; yi < y->n + 1; yi++) {
   1178        intersect_add(&y->ptr[yi]->intersect, &d);
   1179      }
   1180    }
   1181    kvi_destroy(d);
   1182 
   1183    bubble_up(x);
   1184  } else {
   1185    // if the last element of x used to be an end node, check if it now covers all of x
   1186    if (mt_end(p->key[i])) {
   1187      uint64_t pi = pseudo_index(x, 0);  // note: sloppy pseudo-index
   1188      uint64_t start_id = mt_lookup_key_side(p->key[i], false);
   1189      uint64_t pi_start = pseudo_index_for_id(b, start_id, true);
   1190      if (pi_start > 0 && pi_start < pi) {
   1191        intersect_node(b, x, start_id);
   1192      }
   1193    }
   1194 
   1195    if (mt_start(y->key[0])) {
   1196      // no need for a check, just delet it if it was there
   1197      unintersect_node(b, y, mt_lookup_key(y->key[0]), false);
   1198    }
   1199  }
   1200 }
   1201 
   1202 static void pivot_left(MarkTree *b, MTPos p_pos, MTNode *p, int i)
   1203 {
   1204  MTNode *x = p->ptr[i];
   1205  MTNode *y = p->ptr[i + 1];
   1206 
   1207  // reverse from how we "always" do it. but pivot_left
   1208  // is just the inverse of pivot_right, so reverse it literally.
   1209  for (int k = 1; k < y->n; k++) {
   1210    relative(y->key[0].pos, &y->key[k].pos);
   1211  }
   1212  unrelative(p->key[i].pos, &y->key[0].pos);
   1213  if (i > 0) {
   1214    relative(p->key[i - 1].pos, &p->key[i].pos);
   1215  }
   1216 
   1217  x->key[x->n] = p->key[i];
   1218  refkey(b, x, x->n);
   1219  p->key[i] = y->key[0];
   1220  refkey(b, p, i);
   1221 
   1222  uint32_t meta_inc_x[kMTMetaCount];
   1223  meta_describe_key(meta_inc_x, x->key[x->n]);
   1224  uint32_t meta_inc_y[kMTMetaCount];
   1225  meta_describe_key(meta_inc_y, p->key[i]);
   1226  for (int m = 0; m < kMTMetaCount; m++) {
   1227    p->meta[i][m] += meta_inc_x[m];
   1228    p->meta[i + 1][m] -= meta_inc_y[m];
   1229  }
   1230 
   1231  if (x->level) {
   1232    x->ptr[x->n + 1] = y->ptr[0];
   1233    memcpy(x->meta[x->n + 1], y->meta[0], sizeof(y->meta[0]));
   1234    for (int m = 0; m < kMTMetaCount; m++) {
   1235      p->meta[i + 1][m] -= y->meta[0][m];
   1236      p->meta[i][m] += y->meta[0][m];
   1237    }
   1238    x->ptr[x->n + 1]->parent = x;
   1239    x->ptr[x->n + 1]->p_idx = (int16_t)(x->n + 1);
   1240  }
   1241  memmove(y->key, &y->key[1], (size_t)(y->n - 1) * sizeof(MTKey));
   1242  if (y->level) {
   1243    memmove(y->ptr, &y->ptr[1], (size_t)y->n * sizeof(MTNode *));
   1244    memmove(y->meta, &y->meta[1], (size_t)y->n * sizeof(y->meta[0]));
   1245    for (int j = 0; j < y->n; j++) {  // note: last item deleted
   1246      y->ptr[j]->p_idx = (int16_t)j;
   1247    }
   1248  }
   1249  x->n++;
   1250  y->n--;
   1251 
   1252  // repair intersections of x,y
   1253  if (x->level) {
   1254    // handle y and first new y->ptr[0]
   1255    Intersection d;
   1256    kvi_init(d);
   1257    // x->ptr[x->n] was moved from y to x
   1258    // adjust x->ptr[x->n] for a difference between the parents
   1259    // in addition, this might cause some intersection of the old x
   1260    // to bubble down to the old children of x (if x->ptr[n] wasn't intersected)
   1261    intersect_mov(&y->intersect, &x->intersect, &x->ptr[x->n]->intersect, &d);
   1262    if (kv_size(d)) {
   1263      for (int xi = 0; xi < x->n; xi++) {  // ptr[x->n| deliberately skipped
   1264        intersect_add(&x->ptr[xi]->intersect, &d);
   1265      }
   1266    }
   1267    kvi_destroy(d);
   1268 
   1269    bubble_up(y);
   1270  } else {
   1271    // if the first element of y used to be an start node, check if it now covers all of y
   1272    if (mt_start(p->key[i])) {
   1273      uint64_t pi = pseudo_index(y, 0);  // note: sloppy pseudo-index
   1274 
   1275      uint64_t end_id = mt_lookup_key_side(p->key[i], true);
   1276      uint64_t pi_end = pseudo_index_for_id(b, end_id, true);
   1277 
   1278      if (pi_end > pi) {
   1279        intersect_node(b, y, mt_lookup_key(p->key[i]));
   1280      }
   1281    }
   1282 
   1283    if (mt_end(x->key[x->n - 1])) {
   1284      // no need for a check, just delet it if it was there
   1285      unintersect_node(b, x, mt_lookup_key_side(x->key[x->n - 1], false), false);
   1286    }
   1287  }
   1288 }
   1289 
   1290 /// frees all mem, resets tree to valid empty state
   1291 void marktree_clear(MarkTree *b)
   1292 {
   1293  if (b->root) {
   1294    marktree_free_subtree(b, b->root);
   1295    b->root = NULL;
   1296  }
   1297  map_destroy(uint64_t, b->id2node);
   1298  b->n_keys = 0;
   1299  memset(b->meta_root, 0, kMTMetaCount * sizeof(b->meta_root[0]));
   1300  assert(b->n_nodes == 0);
   1301 }
   1302 
   1303 void marktree_free_subtree(MarkTree *b, MTNode *x)
   1304 {
   1305  if (x->level) {
   1306    for (int i = 0; i < x->n + 1; i++) {
   1307      marktree_free_subtree(b, x->ptr[i]);
   1308    }
   1309  }
   1310  marktree_free_node(b, x);
   1311 }
   1312 
   1313 static void marktree_free_node(MarkTree *b, MTNode *x)
   1314 {
   1315  kvi_destroy(x->intersect);
   1316  xfree(x);
   1317  b->n_nodes--;
   1318 }
   1319 
   1320 /// @param itr iterator is invalid after call
   1321 void marktree_move(MarkTree *b, MarkTreeIter *itr, int row, int col)
   1322 {
   1323  MTKey key = rawkey(itr);
   1324  MTNode *x = itr->x;
   1325  if (!x->level) {
   1326    bool internal = false;
   1327    MTPos newpos = MTPos(row, col);
   1328    if (x->parent != NULL) {
   1329      // strictly _after_ key before `x`
   1330      // (not optimal when x is very first leaf of the entire tree, but that's fine)
   1331      if (pos_less(itr->pos, newpos)) {
   1332        relative(itr->pos, &newpos);
   1333 
   1334        // strictly before the end of x. (this could be made sharper by
   1335        // finding the internal key just after x, but meh)
   1336        if (pos_less(newpos, x->key[x->n - 1].pos)) {
   1337          internal = true;
   1338        }
   1339      }
   1340    } else {
   1341      // tree is one node. newpos thus is already "relative" itr->pos
   1342      internal = true;
   1343    }
   1344 
   1345    if (internal) {
   1346      if (key.pos.row == newpos.row && key.pos.col == newpos.col) {
   1347        return;
   1348      }
   1349      key.pos = newpos;
   1350      bool match;
   1351      // tricky: could minimize movement in either direction better
   1352      int new_i = marktree_getp_aux(x, key, &match);
   1353      if (!match) {
   1354        new_i++;
   1355      }
   1356      if (new_i == itr->i) {
   1357        x->key[itr->i].pos = newpos;
   1358      } else if (new_i < itr->i) {
   1359        memmove(&x->key[new_i + 1], &x->key[new_i], sizeof(MTKey) * (size_t)(itr->i - new_i));
   1360        x->key[new_i] = key;
   1361      } else if (new_i > itr->i) {
   1362        memmove(&x->key[itr->i], &x->key[itr->i + 1], sizeof(MTKey) * (size_t)(new_i - itr->i - 1));
   1363        x->key[new_i - 1] = key;
   1364      }
   1365      return;
   1366    }
   1367  }
   1368  uint64_t other = marktree_del_itr(b, itr, false);
   1369  key.pos = (MTPos){ row, col };
   1370 
   1371  marktree_put_key(b, key);
   1372 
   1373  if (other) {
   1374    marktree_restore_pair(b, key);
   1375  }
   1376  itr->x = NULL;  // itr might become invalid by put
   1377 }
   1378 
   1379 void marktree_restore_pair(MarkTree *b, MTKey key)
   1380 {
   1381  MarkTreeIter itr[1];
   1382  MarkTreeIter end_itr[1];
   1383  marktree_lookup(b, mt_lookup_key_side(key, false), itr);
   1384  marktree_lookup(b, mt_lookup_key_side(key, true), end_itr);
   1385  if (!itr->x || !end_itr->x) {
   1386    // this could happen if the other end is waiting to be restored later
   1387    // this function will be called again for the other end.
   1388    return;
   1389  }
   1390  rawkey(itr).flags &= (uint16_t) ~MT_FLAG_ORPHANED;
   1391  rawkey(end_itr).flags &= (uint16_t) ~MT_FLAG_ORPHANED;
   1392 
   1393  marktree_intersect_pair(b, mt_lookup_key_side(key, false), itr, end_itr, false);
   1394 }
   1395 
   1396 // itr functions
   1397 
   1398 bool marktree_itr_get(MarkTree *b, int32_t row, int col, MarkTreeIter *itr)
   1399 {
   1400  return marktree_itr_get_ext(b, MTPos(row, col), itr, false, false, NULL, NULL);
   1401 }
   1402 
   1403 bool marktree_itr_get_ext(MarkTree *b, MTPos p, MarkTreeIter *itr, bool last, bool gravity,
   1404                          MTPos *oldbase, MetaFilter meta_filter)
   1405 {
   1406  if (b->n_keys == 0) {
   1407    itr->x = NULL;
   1408    return false;
   1409  }
   1410 
   1411  MTKey k = { .pos = p, .flags = gravity ? MT_FLAG_RIGHT_GRAVITY : 0 };
   1412  if (last && !gravity) {
   1413    k.flags = MT_FLAG_LAST;
   1414  }
   1415  itr->pos = (MTPos){ 0, 0 };
   1416  itr->x = b->root;
   1417  itr->lvl = 0;
   1418  if (oldbase) {
   1419    oldbase[itr->lvl] = itr->pos;
   1420  }
   1421  while (true) {
   1422    itr->i = marktree_getp_aux(itr->x, k, 0) + 1;
   1423 
   1424    if (itr->x->level == 0) {
   1425      break;
   1426    }
   1427    if (meta_filter) {
   1428      if (!meta_has(itr->x->meta[itr->i], meta_filter)) {
   1429        // this takes us to the internal position after the first rejected node
   1430        break;
   1431      }
   1432    }
   1433 
   1434    itr->s[itr->lvl].i = itr->i;
   1435    itr->s[itr->lvl].oldcol = itr->pos.col;
   1436 
   1437    if (itr->i > 0) {
   1438      compose(&itr->pos, itr->x->key[itr->i - 1].pos);
   1439      relative(itr->x->key[itr->i - 1].pos, &k.pos);
   1440    }
   1441    itr->x = itr->x->ptr[itr->i];
   1442    itr->lvl++;
   1443    if (oldbase) {
   1444      oldbase[itr->lvl] = itr->pos;
   1445    }
   1446  }
   1447 
   1448  if (last) {
   1449    return marktree_itr_prev(b, itr);
   1450  } else if (itr->i >= itr->x->n) {
   1451    // no need for "meta_filter" here, this just goes up one step
   1452    return marktree_itr_next_skip(b, itr, true, false, NULL, NULL);
   1453  }
   1454  return true;
   1455 }
   1456 
   1457 bool marktree_itr_first(MarkTree *b, MarkTreeIter *itr)
   1458 {
   1459  if (b->n_keys == 0) {
   1460    itr->x = NULL;
   1461    return false;
   1462  }
   1463 
   1464  itr->x = b->root;
   1465  itr->i = 0;
   1466  itr->lvl = 0;
   1467  itr->pos = MTPos(0, 0);
   1468  while (itr->x->level > 0) {
   1469    itr->s[itr->lvl].i = 0;
   1470    itr->s[itr->lvl].oldcol = 0;
   1471    itr->lvl++;
   1472    itr->x = itr->x->ptr[0];
   1473  }
   1474  return true;
   1475 }
   1476 
   1477 // gives the first key that is greater or equal to p
   1478 int marktree_itr_last(MarkTree *b, MarkTreeIter *itr)
   1479 {
   1480  if (b->n_keys == 0) {
   1481    itr->x = NULL;
   1482    return false;
   1483  }
   1484  itr->pos = MTPos(0, 0);
   1485  itr->x = b->root;
   1486  itr->lvl = 0;
   1487  while (true) {
   1488    itr->i = itr->x->n;
   1489 
   1490    if (itr->x->level == 0) {
   1491      break;
   1492    }
   1493 
   1494    itr->s[itr->lvl].i = itr->i;
   1495    itr->s[itr->lvl].oldcol = itr->pos.col;
   1496 
   1497    assert(itr->i > 0);
   1498    compose(&itr->pos, itr->x->key[itr->i - 1].pos);
   1499 
   1500    itr->x = itr->x->ptr[itr->i];
   1501    itr->lvl++;
   1502  }
   1503  itr->i--;
   1504  return true;
   1505 }
   1506 
   1507 bool marktree_itr_next(MarkTree *b, MarkTreeIter *itr)
   1508 {
   1509  return marktree_itr_next_skip(b, itr, false, false, NULL, NULL);
   1510 }
   1511 
   1512 static bool marktree_itr_next_skip(MarkTree *b, MarkTreeIter *itr, bool skip, bool preload,
   1513                                   MTPos oldbase[], MetaFilter meta_filter)
   1514 {
   1515  if (!itr->x) {
   1516    return false;
   1517  }
   1518  itr->i++;
   1519  if (meta_filter && itr->x->level > 0) {
   1520    if (!meta_has(itr->x->meta[itr->i], meta_filter)) {
   1521      skip = true;
   1522    }
   1523  }
   1524  if (itr->x->level == 0 || skip) {
   1525    if (preload && itr->x->level == 0 && skip) {
   1526      // skip rest of this leaf node
   1527      itr->i = itr->x->n;
   1528    } else if (itr->i < itr->x->n) {
   1529      // TODO(bfredl): this is the common case,
   1530      // and could be handled by inline wrapper
   1531      return true;
   1532    }
   1533    // we ran out of non-internal keys. Go up until we find an internal key
   1534    while (itr->i >= itr->x->n) {
   1535      itr->x = itr->x->parent;
   1536      if (itr->x == NULL) {
   1537        return false;
   1538      }
   1539      itr->lvl--;
   1540      itr->i = itr->s[itr->lvl].i;
   1541      if (itr->i > 0) {
   1542        itr->pos.row -= itr->x->key[itr->i - 1].pos.row;
   1543        itr->pos.col = itr->s[itr->lvl].oldcol;
   1544      }
   1545    }
   1546  } else {
   1547    // we stood at an "internal" key. Go down to the first non-internal
   1548    // key after it.
   1549    while (itr->x->level > 0) {
   1550      // internal key, there is always a child after
   1551      if (itr->i > 0) {
   1552        itr->s[itr->lvl].oldcol = itr->pos.col;
   1553        compose(&itr->pos, itr->x->key[itr->i - 1].pos);
   1554      }
   1555      if (oldbase && itr->i == 0) {
   1556        oldbase[itr->lvl + 1] = oldbase[itr->lvl];
   1557      }
   1558      itr->s[itr->lvl].i = itr->i;
   1559      assert(itr->x->ptr[itr->i]->parent == itr->x);
   1560      itr->lvl++;
   1561      itr->x = itr->x->ptr[itr->i];
   1562      if (preload && itr->x->level) {
   1563        itr->i = -1;
   1564        break;
   1565      }
   1566      itr->i = 0;
   1567      if (meta_filter && itr->x->level) {
   1568        if (!meta_has(itr->x->meta[0], meta_filter)) {
   1569          // itr->x has filtered keys but x->ptr[0] does not, don't enter the latter
   1570          break;
   1571        }
   1572      }
   1573    }
   1574  }
   1575  return true;
   1576 }
   1577 
   1578 bool marktree_itr_get_filter(MarkTree *b, int32_t row, int col, int stop_row, int stop_col,
   1579                             MetaFilter meta_filter, MarkTreeIter *itr)
   1580 {
   1581  if (!meta_has(b->meta_root, meta_filter)) {
   1582    return false;
   1583  }
   1584  if (!marktree_itr_get_ext(b, MTPos(row, col), itr, false, false, NULL, meta_filter)) {
   1585    return false;
   1586  }
   1587 
   1588  return marktree_itr_check_filter(b, itr, stop_row, stop_col, meta_filter);
   1589 }
   1590 
   1591 /// use after marktree_itr_get_overlap() to continue in a filtered fashion
   1592 ///
   1593 /// not strictly needed but steps out to the right parent node where there
   1594 /// might be "start" keys matching the filter ("end" keys are properly handled
   1595 /// by marktree_itr_step_overlap() already)
   1596 bool marktree_itr_step_out_filter(MarkTree *b, MarkTreeIter *itr, MetaFilter meta_filter)
   1597 {
   1598  if (!meta_has(b->meta_root, meta_filter)) {
   1599    itr->x = NULL;
   1600    return false;
   1601  }
   1602 
   1603  while (itr->x && itr->x->parent) {
   1604    if (meta_has(itr->x->parent->meta[itr->x->p_idx], meta_filter)) {
   1605      return true;
   1606    }
   1607 
   1608    itr->i = itr->x->n;
   1609 
   1610    // no filter needed, just reuse the code path for step to parent
   1611    marktree_itr_next_skip(b, itr, true, false, NULL, NULL);
   1612  }
   1613 
   1614  return itr->x;
   1615 }
   1616 
   1617 bool marktree_itr_next_filter(MarkTree *b, MarkTreeIter *itr, int stop_row, int stop_col,
   1618                              MetaFilter meta_filter)
   1619 {
   1620  if (!marktree_itr_next_skip(b, itr, false, false, NULL, meta_filter)) {
   1621    return false;
   1622  }
   1623 
   1624  return marktree_itr_check_filter(b, itr, stop_row, stop_col, meta_filter);
   1625 }
   1626 
   1627 const uint32_t meta_map[kMTMetaCount] = {
   1628  MT_FLAG_DECOR_VIRT_TEXT_INLINE,
   1629  MT_FLAG_DECOR_VIRT_LINES,
   1630  MT_FLAG_DECOR_SIGNHL,
   1631  MT_FLAG_DECOR_SIGNTEXT,
   1632  MT_FLAG_DECOR_CONCEAL_LINES
   1633 };
   1634 static bool marktree_itr_check_filter(MarkTree *b, MarkTreeIter *itr, int stop_row, int stop_col,
   1635                                      MetaFilter meta_filter)
   1636 {
   1637  MTPos stop_pos = MTPos(stop_row, stop_col);
   1638 
   1639  uint32_t key_filter = 0;
   1640  for (int m = 0; m < kMTMetaCount; m++) {
   1641    key_filter |= meta_map[m]&meta_filter[m];
   1642  }
   1643 
   1644  while (true) {
   1645    if (pos_leq(stop_pos, marktree_itr_pos(itr))) {
   1646      itr->x = NULL;
   1647      return false;
   1648    }
   1649 
   1650    MTKey k = rawkey(itr);
   1651    if (!mt_end(k) && (k.flags & key_filter)) {
   1652      return true;
   1653    }
   1654 
   1655    // this skips subtrees, but not keys, thus the outer loop
   1656    if (!marktree_itr_next_skip(b, itr, false, false, NULL, meta_filter)) {
   1657      return false;
   1658    }
   1659  }
   1660 }
   1661 
   1662 bool marktree_itr_prev(MarkTree *b, MarkTreeIter *itr)
   1663 {
   1664  if (!itr->x) {
   1665    return false;
   1666  }
   1667  if (itr->x->level == 0) {
   1668    itr->i--;
   1669    if (itr->i >= 0) {
   1670      // TODO(bfredl): this is the common case,
   1671      // and could be handled by inline wrapper
   1672      return true;
   1673    }
   1674    // we ran out of non-internal keys. Go up until we find a non-internal key
   1675    while (itr->i < 0) {
   1676      itr->x = itr->x->parent;
   1677      if (itr->x == NULL) {
   1678        return false;
   1679      }
   1680      itr->lvl--;
   1681      itr->i = itr->s[itr->lvl].i - 1;
   1682      if (itr->i >= 0) {
   1683        itr->pos.row -= itr->x->key[itr->i].pos.row;
   1684        itr->pos.col = itr->s[itr->lvl].oldcol;
   1685      }
   1686    }
   1687  } else {
   1688    // we stood at an "internal" key. Go down to the last non-internal
   1689    // key before it.
   1690    while (itr->x->level > 0) {
   1691      // internal key, there is always a child before
   1692      if (itr->i > 0) {
   1693        itr->s[itr->lvl].oldcol = itr->pos.col;
   1694        compose(&itr->pos, itr->x->key[itr->i - 1].pos);
   1695      }
   1696      itr->s[itr->lvl].i = itr->i;
   1697      assert(itr->x->ptr[itr->i]->parent == itr->x);
   1698      itr->x = itr->x->ptr[itr->i];
   1699      itr->i = itr->x->n;
   1700      itr->lvl++;
   1701    }
   1702    itr->i--;
   1703  }
   1704  return true;
   1705 }
   1706 
   1707 bool marktree_itr_node_done(MarkTreeIter *itr)
   1708 {
   1709  return !itr->x || itr->i == itr->x->n - 1;
   1710 }
   1711 
   1712 MTPos marktree_itr_pos(MarkTreeIter *itr)
   1713 {
   1714  MTPos pos = rawkey(itr).pos;
   1715  unrelative(itr->pos, &pos);
   1716  return pos;
   1717 }
   1718 
   1719 MTKey marktree_itr_current(MarkTreeIter *itr)
   1720 {
   1721  if (itr->x) {
   1722    MTKey key = rawkey(itr);
   1723    key.pos = marktree_itr_pos(itr);
   1724    return key;
   1725  }
   1726  return MT_INVALID_KEY;
   1727 }
   1728 
   1729 static bool itr_eq(MarkTreeIter *itr1, MarkTreeIter *itr2)
   1730 {
   1731  return (&rawkey(itr1) == &rawkey(itr2));
   1732 }
   1733 
   1734 /// Get all marks which overlaps the position (row,col)
   1735 ///
   1736 /// After calling this function, use marktree_itr_step_overlap to step through
   1737 /// one overlapping mark at a time, until it returns false
   1738 ///
   1739 /// NOTE: It's possible to get all marks which overlaps a region (row,col) to (row_end,col_end)
   1740 /// To do this, first call marktree_itr_get_overlap with the start position and
   1741 /// keep calling marktree_itr_step_overlap until it returns false.
   1742 /// After this, as a second loop, keep calling the marktree_itr_next() until
   1743 /// the iterator is invalid or reaches past (row_end, col_end). In this loop,
   1744 /// consider all "start" marks (and unpaired marks if relevant), but skip over
   1745 /// all "end" marks, using mt_end(mark).
   1746 ///
   1747 /// @return false if we already know no marks can be found
   1748 ///               even if "true" the first call to marktree_itr_step_overlap
   1749 ///               could return false
   1750 bool marktree_itr_get_overlap(MarkTree *b, int row, int col, MarkTreeIter *itr)
   1751 {
   1752  if (b->n_keys == 0) {
   1753    itr->x = NULL;
   1754    return false;
   1755  }
   1756 
   1757  itr->x = b->root;
   1758  itr->i = -1;
   1759  itr->lvl = 0;
   1760  itr->pos = MTPos(0, 0);
   1761  itr->intersect_pos = MTPos(row, col);
   1762  // intersect_pos but will be adjusted relative itr->x
   1763  itr->intersect_pos_x = MTPos(row, col);
   1764  itr->intersect_idx = 0;
   1765  return true;
   1766 }
   1767 
   1768 /// Step through all overlapping pairs at a position.
   1769 ///
   1770 /// This function must only be used with an iterator from |marktree_itr_step_overlap|
   1771 ///
   1772 /// @return true if a valid pair was found (returned as `pair`)
   1773 /// When all overlapping mark pairs have been found, false will be returned. `itr`
   1774 /// is then valid as an ordinary iterator at the (row, col) position specified in
   1775 /// marktree_itr_step_overlap
   1776 bool marktree_itr_step_overlap(MarkTree *b, MarkTreeIter *itr, MTPair *pair)
   1777 {
   1778  // phase one: we start at the root node and step inwards towards itr->intersect_pos
   1779  // (the position queried in marktree_itr_get_overlap)
   1780  //
   1781  // For each node (ancestor node to the node containing the sought position)
   1782  // we return all intersecting intervals, one at a time
   1783  while (itr->i == -1) {
   1784    if (itr->intersect_idx < kv_size(itr->x->intersect)) {
   1785      uint64_t id = kv_A(itr->x->intersect, itr->intersect_idx++);
   1786      *pair = mtpair_from(marktree_lookup(b, id, NULL),
   1787                          marktree_lookup(b, id|MARKTREE_END_FLAG, NULL));
   1788      return true;
   1789    }
   1790 
   1791    if (itr->x->level == 0) {
   1792      itr->s[itr->lvl].i = itr->i = 0;
   1793      break;
   1794    }
   1795 
   1796    MTKey k = { .pos = itr->intersect_pos_x, .flags = 0 };
   1797    itr->i = marktree_getp_aux(itr->x, k, 0) + 1;
   1798 
   1799    itr->s[itr->lvl].i = itr->i;
   1800    itr->s[itr->lvl].oldcol = itr->pos.col;
   1801 
   1802    if (itr->i > 0) {
   1803      compose(&itr->pos, itr->x->key[itr->i - 1].pos);
   1804      relative(itr->x->key[itr->i - 1].pos, &itr->intersect_pos_x);
   1805    }
   1806    itr->x = itr->x->ptr[itr->i];
   1807    itr->lvl++;
   1808    itr->i = -1;
   1809    itr->intersect_idx = 0;
   1810  }
   1811 
   1812  // phase two: we now need to handle the node found at itr->intersect_pos
   1813  // first consider all start nodes in the node before this position.
   1814  while (itr->i < itr->x->n && pos_less(rawkey(itr).pos, itr->intersect_pos_x)) {
   1815    MTKey k = itr->x->key[itr->i++];
   1816    itr->s[itr->lvl].i = itr->i;
   1817    if (mt_start(k)) {
   1818      MTKey end = marktree_lookup(b, mt_lookup_id(k.ns, k.id, true), NULL);
   1819      if (pos_less(end.pos, itr->intersect_pos)) {
   1820        continue;
   1821      }
   1822 
   1823      unrelative(itr->pos, &k.pos);
   1824      *pair = mtpair_from(k, end);
   1825      return true;  // it's a start!
   1826    }
   1827  }
   1828 
   1829  // phase 2B: We also need to step to the end of this node and consider all end marks, which
   1830  // might end an interval overlapping itr->intersect_pos
   1831  while (itr->i < itr->x->n) {
   1832    MTKey k = itr->x->key[itr->i++];
   1833    if (mt_end(k)) {
   1834      uint64_t id = mt_lookup_id(k.ns, k.id, false);
   1835      if (id2node(b, id) == itr->x) {
   1836        continue;
   1837      }
   1838      unrelative(itr->pos, &k.pos);
   1839      MTKey start = marktree_lookup(b, id, NULL);
   1840      if (pos_leq(itr->intersect_pos, start.pos)) {
   1841        continue;
   1842      }
   1843      *pair = mtpair_from(start, k);
   1844      return true;  // end of a range which began before us!
   1845    }
   1846  }
   1847 
   1848  // when returning false, get back to the queried position, to ensure the caller
   1849  // can keep using it as an ordinary iterator at the queried position. The docstring
   1850  // for marktree_itr_get_overlap explains how this is useful.
   1851  itr->i = itr->s[itr->lvl].i;
   1852  assert(itr->i >= 0);
   1853  if (itr->i >= itr->x->n) {
   1854    marktree_itr_next(b, itr);
   1855  }
   1856 
   1857  // either on or after the intersected position, bail out
   1858  return false;
   1859 }
   1860 
   1861 static void swap_keys(MarkTree *b, MarkTreeIter *itr1, MarkTreeIter *itr2, DamageList *damage)
   1862 {
   1863  // TODO(bfredl): redactor is planned, see TODO comment next to qsort in marktree_splice
   1864  if (mt_paired(rawkey(itr1))) {
   1865    kvi_push(*damage, ((Damage){ mt_lookup_key(rawkey(itr1)), itr1->x, itr2->x,
   1866                                 itr1->i, itr2->i }));
   1867  }
   1868  if (mt_paired(rawkey(itr2))) {
   1869    kvi_push(*damage, ((Damage){ mt_lookup_key(rawkey(itr2)), itr2->x, itr1->x,
   1870                                 itr2->i, itr1->i }));
   1871  }
   1872 
   1873  if (itr1->x != itr2->x) {
   1874    uint32_t meta_inc_1[kMTMetaCount];
   1875    meta_describe_key(meta_inc_1, rawkey(itr1));
   1876    uint32_t meta_inc_2[kMTMetaCount];
   1877    meta_describe_key(meta_inc_2, rawkey(itr2));
   1878 
   1879    if (memcmp(meta_inc_1, meta_inc_2, sizeof(meta_inc_1)) != 0) {
   1880      MTNode *x1 = itr1->x;
   1881      MTNode *x2 = itr2->x;
   1882      while (x1 != x2) {
   1883        if (x1->level <= x2->level) {
   1884          // as the root node uniquely has the highest level, x1 cannot be it
   1885          uint32_t *meta_node = x1->parent->meta[x1->p_idx];
   1886          for (int m = 0; m < kMTMetaCount; m++) {
   1887            meta_node[m] += meta_inc_2[m] - meta_inc_1[m];
   1888          }
   1889          x1 = x1->parent;
   1890        }
   1891        if (x2->level < x1->level) {
   1892          uint32_t *meta_node = x2->parent->meta[x2->p_idx];
   1893          for (int m = 0; m < kMTMetaCount; m++) {
   1894            meta_node[m] += meta_inc_1[m] - meta_inc_2[m];
   1895          }
   1896          x2 = x2->parent;
   1897        }
   1898      }
   1899    }
   1900  }
   1901 
   1902  MTKey key1 = rawkey(itr1);
   1903  MTKey key2 = rawkey(itr2);
   1904  rawkey(itr1) = key2;
   1905  rawkey(itr1).pos = key1.pos;
   1906  rawkey(itr2) = key1;
   1907  rawkey(itr2).pos = key2.pos;
   1908  refkey(b, itr1->x, itr1->i);
   1909  refkey(b, itr2->x, itr2->i);
   1910 }
   1911 
   1912 static int damage_cmp(const void *s1, const void *s2)
   1913 {
   1914  Damage *d1 = (Damage *)s1;
   1915  Damage *d2 = (Damage *)s2;
   1916  assert(d1->id != d2->id);
   1917  return d1->id > d2->id ? 1 : -1;
   1918 }
   1919 
   1920 bool marktree_splice(MarkTree *b, int32_t start_line, int start_col, int old_extent_line,
   1921                     int old_extent_col, int new_extent_line, int new_extent_col)
   1922 {
   1923  MTPos start = { start_line, start_col };
   1924  MTPos old_extent = { old_extent_line, old_extent_col };
   1925  MTPos new_extent = { new_extent_line, new_extent_col };
   1926 
   1927  bool may_delete = (old_extent.row != 0 || old_extent.col != 0);
   1928  bool same_line = old_extent.row == 0 && new_extent.row == 0;
   1929  unrelative(start, &old_extent);
   1930  unrelative(start, &new_extent);
   1931  MarkTreeIter itr[1] = { 0 };
   1932  MarkTreeIter enditr[1] = { 0 };
   1933 
   1934  MTPos oldbase[MT_MAX_DEPTH] = { 0 };
   1935 
   1936  marktree_itr_get_ext(b, start, itr, false, true, oldbase, NULL);
   1937  if (!itr->x) {
   1938    // den e FÄRDIG
   1939    return false;
   1940  }
   1941  MTPos delta = { new_extent.row - old_extent.row,
   1942                  new_extent.col - old_extent.col };
   1943 
   1944  if (may_delete) {
   1945    MTPos ipos = marktree_itr_pos(itr);
   1946    if (!pos_leq(old_extent, ipos)
   1947        || (old_extent.row == ipos.row && old_extent.col == ipos.col
   1948            && !mt_right(rawkey(itr)))) {
   1949      marktree_itr_get_ext(b, old_extent, enditr, true, true, NULL, NULL);
   1950      assert(enditr->x);
   1951      // "assert" (itr <= enditr)
   1952    } else {
   1953      may_delete = false;
   1954    }
   1955  }
   1956 
   1957  bool past_right = false;
   1958  bool moved = false;
   1959  DamageList damage;
   1960  kvi_init(damage);
   1961 
   1962  // Follow the general strategy of messing things up and fix them later
   1963  // "oldbase" carries the information needed to calculate old position of
   1964  // children.
   1965  if (may_delete) {
   1966    while (itr->x && !past_right) {
   1967      MTPos loc_start = start;
   1968      MTPos loc_old = old_extent;
   1969      relative(itr->pos, &loc_start);
   1970 
   1971      relative(oldbase[itr->lvl], &loc_old);
   1972 
   1973 continue_same_node:
   1974      // NB: strictly should be less than the right gravity of loc_old, but
   1975      // the iter comparison below will already break on that.
   1976      if (!pos_leq(rawkey(itr).pos, loc_old)) {
   1977        break;
   1978      }
   1979 
   1980      if (mt_right(rawkey(itr))) {
   1981        while (!itr_eq(itr, enditr)
   1982               && mt_right(rawkey(enditr))) {
   1983          marktree_itr_prev(b, enditr);
   1984        }
   1985        if (!mt_right(rawkey(enditr))) {
   1986          swap_keys(b, itr, enditr, &damage);
   1987        } else {
   1988          past_right = true;  // NOLINT
   1989          (void)past_right;
   1990          break;
   1991        }
   1992      }
   1993 
   1994      if (itr_eq(itr, enditr)) {
   1995        // actually, will be past_right after this key
   1996        past_right = true;
   1997      }
   1998 
   1999      moved = true;
   2000      if (itr->x->level) {
   2001        oldbase[itr->lvl + 1] = rawkey(itr).pos;
   2002        unrelative(oldbase[itr->lvl], &oldbase[itr->lvl + 1]);
   2003        rawkey(itr).pos = loc_start;
   2004        marktree_itr_next_skip(b, itr, false, false, oldbase, NULL);
   2005      } else {
   2006        rawkey(itr).pos = loc_start;
   2007        if (itr->i < itr->x->n - 1) {
   2008          itr->i++;
   2009          if (!past_right) {
   2010            goto continue_same_node;
   2011          }
   2012        } else {
   2013          marktree_itr_next(b, itr);
   2014        }
   2015      }
   2016    }
   2017    while (itr->x) {
   2018      MTPos loc_new = new_extent;
   2019      relative(itr->pos, &loc_new);
   2020      MTPos limit = old_extent;
   2021 
   2022      relative(oldbase[itr->lvl], &limit);
   2023 
   2024 past_continue_same_node:
   2025 
   2026      if (pos_leq(limit, rawkey(itr).pos)) {
   2027        break;
   2028      }
   2029 
   2030      MTPos oldpos = rawkey(itr).pos;
   2031      rawkey(itr).pos = loc_new;
   2032      moved = true;
   2033      if (itr->x->level) {
   2034        oldbase[itr->lvl + 1] = oldpos;
   2035        unrelative(oldbase[itr->lvl], &oldbase[itr->lvl + 1]);
   2036 
   2037        marktree_itr_next_skip(b, itr, false, false, oldbase, NULL);
   2038      } else {
   2039        if (itr->i < itr->x->n - 1) {
   2040          itr->i++;
   2041          goto past_continue_same_node;
   2042        } else {
   2043          marktree_itr_next(b, itr);
   2044        }
   2045      }
   2046    }
   2047  }
   2048 
   2049  while (itr->x) {
   2050    unrelative(oldbase[itr->lvl], &rawkey(itr).pos);
   2051    int realrow = rawkey(itr).pos.row;
   2052    assert(realrow >= old_extent.row);
   2053    bool done = false;
   2054    if (realrow == old_extent.row) {
   2055      if (delta.col) {
   2056        rawkey(itr).pos.col += delta.col;
   2057      }
   2058    } else {
   2059      if (same_line) {
   2060        // optimization: column only adjustment can skip remaining rows
   2061        done = true;
   2062      }
   2063    }
   2064    if (delta.row) {
   2065      rawkey(itr).pos.row += delta.row;
   2066      moved = true;
   2067    }
   2068    relative(itr->pos, &rawkey(itr).pos);
   2069    if (done) {
   2070      break;
   2071    }
   2072    marktree_itr_next_skip(b, itr, true, false, NULL, NULL);
   2073  }
   2074 
   2075  if (kv_size(damage)) {
   2076    // TODO(bfredl): a full sort is not really needed. we just need a "start" node to find
   2077    // its corresponding "end" node. Set up some dedicated hash for this later (c.f. the
   2078    // "grow only" variant of khash_t branch)
   2079    qsort((void *)&kv_A(damage, 0), kv_size(damage), sizeof(kv_A(damage, 0)),
   2080          damage_cmp);
   2081 
   2082    for (size_t i = 0; i < kv_size(damage); i++) {
   2083      Damage d = kv_A(damage, i);
   2084      assert(i == 0 || d.id > kv_A(damage, i - 1).id);
   2085      if (!(d.id & MARKTREE_END_FLAG)) {  // start
   2086        if (i + 1 < kv_size(damage) && kv_A(damage, i + 1).id == (d.id | MARKTREE_END_FLAG)) {
   2087          Damage d2 = kv_A(damage, i + 1);
   2088 
   2089          // pair
   2090          marktree_itr_set_node(b, itr, d.old, d.old_i);
   2091          marktree_itr_set_node(b, enditr, d2.old, d2.old_i);
   2092          marktree_intersect_pair(b, d.id, itr, enditr, true);
   2093          marktree_itr_set_node(b, itr, d.new, d.new_i);
   2094          marktree_itr_set_node(b, enditr, d2.new, d2.new_i);
   2095          marktree_intersect_pair(b, d.id, itr, enditr, false);
   2096 
   2097          i++;  // consume two items
   2098          continue;
   2099        }
   2100 
   2101        // d is lone start, end didn't move
   2102        MarkTreeIter endpos[1];
   2103        marktree_lookup(b, d.id | MARKTREE_END_FLAG, endpos);
   2104        if (endpos->x) {
   2105          marktree_itr_set_node(b, itr, d.old, d.old_i);
   2106          *enditr = *endpos;
   2107          marktree_intersect_pair(b, d.id, itr, enditr, true);
   2108          marktree_itr_set_node(b, itr, d.new, d.new_i);
   2109          *enditr = *endpos;
   2110          marktree_intersect_pair(b, d.id, itr, enditr, false);
   2111        }
   2112      } else {
   2113        // d is lone end, start didn't move
   2114        MarkTreeIter startpos[1];
   2115        uint64_t start_id = d.id & ~MARKTREE_END_FLAG;
   2116 
   2117        marktree_lookup(b, start_id, startpos);
   2118        if (startpos->x) {
   2119          *itr = *startpos;
   2120          marktree_itr_set_node(b, enditr, d.old, d.old_i);
   2121          marktree_intersect_pair(b, start_id, itr, enditr, true);
   2122          *itr = *startpos;
   2123          marktree_itr_set_node(b, enditr, d.new, d.new_i);
   2124          marktree_intersect_pair(b, start_id, itr, enditr, false);
   2125        }
   2126      }
   2127    }
   2128  }
   2129  kvi_destroy(damage);
   2130 
   2131  return moved;
   2132 }
   2133 
   2134 void marktree_move_region(MarkTree *b, int start_row, colnr_T start_col, int extent_row,
   2135                          colnr_T extent_col, int new_row, colnr_T new_col)
   2136 {
   2137  MTPos start = { start_row, start_col };
   2138  MTPos size = { extent_row, extent_col };
   2139  MTPos end = size;
   2140  unrelative(start, &end);
   2141  MarkTreeIter itr[1] = { 0 };
   2142  marktree_itr_get_ext(b, start, itr, false, true, NULL, NULL);
   2143  kvec_t(MTKey) saved = KV_INITIAL_VALUE;
   2144  while (itr->x) {
   2145    MTKey k = marktree_itr_current(itr);
   2146    if (!pos_leq(k.pos, end) || (k.pos.row == end.row && k.pos.col == end.col
   2147                                 && mt_right(k))) {
   2148      break;
   2149    }
   2150    relative(start, &k.pos);
   2151    kv_push(saved, k);
   2152    marktree_del_itr(b, itr, false);
   2153  }
   2154 
   2155  marktree_splice(b, start.row, start.col, size.row, size.col, 0, 0);
   2156  MTPos new = { new_row, new_col };
   2157  marktree_splice(b, new.row, new.col,
   2158                  0, 0, size.row, size.col);
   2159 
   2160  for (size_t i = 0; i < kv_size(saved); i++) {
   2161    MTKey item = kv_A(saved, i);
   2162    unrelative(new, &item.pos);
   2163    marktree_put_key(b, item);
   2164    if (mt_paired(item)) {
   2165      // other end might be later in `saved`, this will safely bail out then
   2166      marktree_restore_pair(b, item);
   2167    }
   2168  }
   2169  kv_destroy(saved);
   2170 }
   2171 
   2172 /// @param itr OPTIONAL. set itr to pos.
   2173 MTKey marktree_lookup_ns(MarkTree *b, uint32_t ns, uint32_t id, bool end, MarkTreeIter *itr)
   2174 {
   2175  return marktree_lookup(b, mt_lookup_id(ns, id, end), itr);
   2176 }
   2177 
   2178 static uint64_t pseudo_index(MTNode *x, int i)
   2179 {
   2180  int off = MT_LOG2_BRANCH * x->level;
   2181  uint64_t index = 0;
   2182 
   2183  while (x) {
   2184    index |= (uint64_t)(i + 1) << off;
   2185    off += MT_LOG2_BRANCH;
   2186    i = x->p_idx;
   2187    x = x->parent;
   2188  }
   2189 
   2190  return index;
   2191 }
   2192 
   2193 /// @param itr OPTIONAL. set itr to pos.
   2194 /// if sloppy, two keys at the same _leaf_ node has the same index
   2195 static uint64_t pseudo_index_for_id(MarkTree *b, uint64_t id, bool sloppy)
   2196 {
   2197  MTNode *n = id2node(b, id);
   2198  if (n == NULL) {
   2199    return 0;  // a valid pseudo-index is never zero!
   2200  }
   2201 
   2202  int i = 0;
   2203  if (n->level || !sloppy) {
   2204    for (i = 0; i < n->n; i++) {
   2205      if (mt_lookup_key(n->key[i]) == id) {
   2206        break;
   2207      }
   2208    }
   2209    assert(i < n->n);
   2210    if (n->level) {
   2211      i += 1;  // internal key i comes after ptr[i]
   2212    }
   2213  }
   2214 
   2215  return pseudo_index(n, i);
   2216 }
   2217 
   2218 /// @param itr OPTIONAL. set itr to pos.
   2219 MTKey marktree_lookup(MarkTree *b, uint64_t id, MarkTreeIter *itr)
   2220 {
   2221  MTNode *n = id2node(b, id);
   2222  if (n == NULL) {
   2223    if (itr) {
   2224      itr->x = NULL;
   2225    }
   2226    return MT_INVALID_KEY;
   2227  }
   2228  int i = 0;
   2229  for (i = 0; i < n->n; i++) {
   2230    if (mt_lookup_key(n->key[i]) == id) {
   2231      return marktree_itr_set_node(b, itr, n, i);
   2232    }
   2233  }
   2234 
   2235  abort();
   2236 }
   2237 
   2238 MTKey marktree_itr_set_node(MarkTree *b, MarkTreeIter *itr, MTNode *n, int i)
   2239 {
   2240  MTKey key = n->key[i];
   2241  if (itr) {
   2242    itr->i = i;
   2243    itr->x = n;
   2244    itr->lvl = b->root->level - n->level;
   2245  }
   2246  while (n->parent != NULL) {
   2247    MTNode *p = n->parent;
   2248    i = n->p_idx;
   2249    assert(p->ptr[i] == n);
   2250 
   2251    if (itr) {
   2252      itr->s[b->root->level - p->level].i = i;
   2253    }
   2254    if (i > 0) {
   2255      unrelative(p->key[i - 1].pos, &key.pos);
   2256    }
   2257    n = p;
   2258  }
   2259  if (itr) {
   2260    marktree_itr_fix_pos(b, itr);
   2261  }
   2262  return key;
   2263 }
   2264 
   2265 MTPos marktree_get_altpos(MarkTree *b, MTKey mark, MarkTreeIter *itr)
   2266 {
   2267  return marktree_get_alt(b, mark, itr).pos;
   2268 }
   2269 
   2270 /// @return alt mark for a paired mark or mark itself for unpaired mark
   2271 MTKey marktree_get_alt(MarkTree *b, MTKey mark, MarkTreeIter *itr)
   2272 {
   2273  return mt_paired(mark) ? marktree_lookup_ns(b, mark.ns, mark.id, !mt_end(mark), itr) : mark;
   2274 }
   2275 
   2276 static void marktree_itr_fix_pos(MarkTree *b, MarkTreeIter *itr)
   2277 {
   2278  itr->pos = (MTPos){ 0, 0 };
   2279  MTNode *x = b->root;
   2280  for (int lvl = 0; lvl < itr->lvl; lvl++) {
   2281    itr->s[lvl].oldcol = itr->pos.col;
   2282    int i = itr->s[lvl].i;
   2283    if (i > 0) {
   2284      compose(&itr->pos, x->key[i - 1].pos);
   2285    }
   2286    assert(x->level);
   2287    x = x->ptr[i];
   2288  }
   2289  assert(x == itr->x);
   2290 }
   2291 
   2292 // for unit test
   2293 void marktree_put_test(MarkTree *b, uint32_t ns, uint32_t id, int row, int col, bool right_gravity,
   2294                       int end_row, int end_col, bool end_right, bool meta_inline)
   2295 {
   2296  uint16_t flags = mt_flags(right_gravity, false, false, false);
   2297  // The specific choice is irrelevant here, we pick one counted decor
   2298  // type to test the counting and filtering logic.
   2299  flags |= meta_inline ? MT_FLAG_DECOR_VIRT_TEXT_INLINE : 0;
   2300  MTKey key = { { row, col }, ns, id, flags, { .hl = DECOR_HIGHLIGHT_INLINE_INIT } };
   2301  marktree_put(b, key, end_row, end_col, end_right);
   2302 }
   2303 
   2304 // for unit test
   2305 bool mt_right_test(MTKey key)
   2306 {
   2307  return mt_right(key);
   2308 }
   2309 
   2310 // for unit test
   2311 void marktree_del_pair_test(MarkTree *b, uint32_t ns, uint32_t id)
   2312 {
   2313  MarkTreeIter itr[1];
   2314  marktree_lookup_ns(b, ns, id, false, itr);
   2315 
   2316  uint64_t other = marktree_del_itr(b, itr, false);
   2317  assert(other);
   2318  marktree_lookup(b, other, itr);
   2319  marktree_del_itr(b, itr, false);
   2320 }
   2321 
   2322 void marktree_check(MarkTree *b)
   2323 {
   2324 #ifndef NDEBUG
   2325  if (b->root == NULL) {
   2326    assert(b->n_keys == 0);
   2327    assert(b->n_nodes == 0);
   2328    assert(b->id2node == NULL || map_size(b->id2node) == 0);
   2329    return;
   2330  }
   2331 
   2332  MTPos dummy;
   2333  bool last_right = false;
   2334 
   2335  size_t nkeys = marktree_check_node(b, b->root, &dummy, &last_right, b->meta_root);
   2336  assert(b->n_keys == nkeys);
   2337  assert(b->n_keys == map_size(b->id2node));
   2338 #else
   2339  // Do nothing, as assertions are required
   2340  (void)b;
   2341 #endif
   2342 }
   2343 
   2344 size_t marktree_check_node(MarkTree *b, MTNode *x, MTPos *last, bool *last_right,
   2345                           const uint32_t *meta_node_ref)
   2346 {
   2347  assert(x->n <= 2 * T - 1);
   2348  // TODO(bfredl): too strict if checking "in repair" post-delete tree.
   2349  assert(x->n >= (x != b->root ? T - 1 : 0));
   2350  size_t n_keys = (size_t)x->n;
   2351 
   2352  for (int i = 0; i < x->n; i++) {
   2353    if (x->level) {
   2354      n_keys += marktree_check_node(b, x->ptr[i], last, last_right, x->meta[i]);
   2355    } else {
   2356      *last = (MTPos) { 0, 0 };
   2357    }
   2358    if (i > 0) {
   2359      unrelative(x->key[i - 1].pos, last);
   2360    }
   2361    assert(pos_leq(*last, x->key[i].pos));
   2362    if (last->row == x->key[i].pos.row && last->col == x->key[i].pos.col) {
   2363      assert(!*last_right || mt_right(x->key[i]));
   2364    }
   2365    *last_right = mt_right(x->key[i]);
   2366    assert(x->key[i].pos.col >= 0);
   2367    assert(pmap_get(uint64_t)(b->id2node, mt_lookup_key(x->key[i])) == x);
   2368  }
   2369 
   2370  if (x->level) {
   2371    n_keys += marktree_check_node(b, x->ptr[x->n], last, last_right, x->meta[x->n]);
   2372    unrelative(x->key[x->n - 1].pos, last);
   2373 
   2374    for (int i = 0; i < x->n + 1; i++) {
   2375      assert(x->ptr[i]->parent == x);
   2376      assert(x->ptr[i]->p_idx == i);
   2377      assert(x->ptr[i]->level == x->level - 1);
   2378      // PARANOIA: check no double node ref
   2379      for (int j = 0; j < i; j++) {
   2380        assert(x->ptr[i] != x->ptr[j]);
   2381      }
   2382    }
   2383  } else if (x->n > 0) {
   2384    *last = x->key[x->n - 1].pos;
   2385  }
   2386 
   2387  uint32_t meta_node[kMTMetaCount];
   2388  meta_describe_node(meta_node, x);
   2389  for (int m = 0; m < kMTMetaCount; m++) {
   2390    assert(meta_node_ref[m] == meta_node[m]);
   2391  }
   2392 
   2393  return n_keys;
   2394 }
   2395 
   2396 bool marktree_check_intersections(MarkTree *b)
   2397 {
   2398  if (!b->root) {
   2399    return true;
   2400  }
   2401  PMap(ptr_t) checked = MAP_INIT;
   2402 
   2403  // 1. move x->intersect to checked[x] and reinit x->intersect
   2404  mt_recurse_nodes(b->root, &checked);
   2405 
   2406  // 2. iterate over all marks. for each START mark of a pair,
   2407  // intersect the nodes between the pair
   2408  MarkTreeIter itr[1];
   2409  marktree_itr_first(b, itr);
   2410  while (true) {
   2411    MTKey mark = marktree_itr_current(itr);
   2412    if (mark.pos.row < 0) {
   2413      break;
   2414    }
   2415 
   2416    if (mt_start(mark)) {
   2417      MarkTreeIter start_itr[1];
   2418      MarkTreeIter end_itr[1];
   2419      uint64_t end_id = mt_lookup_id(mark.ns, mark.id, true);
   2420      MTKey k = marktree_lookup(b, end_id, end_itr);
   2421      if (k.pos.row >= 0) {
   2422        *start_itr = *itr;
   2423        marktree_intersect_pair(b, mt_lookup_key(mark), start_itr, end_itr, false);
   2424      }
   2425    }
   2426 
   2427    marktree_itr_next(b, itr);
   2428  }
   2429 
   2430  // 3. for each node check if the recreated intersection
   2431  // matches the old checked[x] intersection.
   2432  bool status = mt_recurse_nodes_compare(b->root, &checked);
   2433 
   2434  uint64_t *val;
   2435  map_foreach_value(&checked, val, {
   2436    xfree(val);
   2437  });
   2438  map_destroy(ptr_t, &checked);
   2439 
   2440  return status;
   2441 }
   2442 
   2443 void mt_recurse_nodes(MTNode *x, PMap(ptr_t) *checked)
   2444 {
   2445  if (kv_size(x->intersect)) {
   2446    kvi_push(x->intersect, (uint64_t)-1);  // sentinel
   2447    uint64_t *val;
   2448    if (x->intersect.items == x->intersect.init_array) {
   2449      val = xmemdup(x->intersect.items, x->intersect.size * sizeof(*x->intersect.items));
   2450    } else {
   2451      val = x->intersect.items;
   2452    }
   2453    pmap_put(ptr_t)(checked, x, val);
   2454    kvi_init(x->intersect);
   2455  }
   2456 
   2457  if (x->level) {
   2458    for (int i = 0; i < x->n + 1; i++) {
   2459      mt_recurse_nodes(x->ptr[i], checked);
   2460    }
   2461  }
   2462 }
   2463 
   2464 bool mt_recurse_nodes_compare(MTNode *x, PMap(ptr_t) *checked)
   2465 {
   2466  uint64_t *ref = pmap_get(ptr_t)(checked, x);
   2467  if (ref != NULL) {
   2468    for (size_t i = 0;; i++) {
   2469      if (ref[i] == (uint64_t)-1) {
   2470        if (i != kv_size(x->intersect)) {
   2471          return false;
   2472        }
   2473 
   2474        break;
   2475      } else {
   2476        if (kv_size(x->intersect) <= i || ref[i] != kv_A(x->intersect, i)) {
   2477          return false;
   2478        }
   2479      }
   2480    }
   2481  } else {
   2482    if (kv_size(x->intersect)) {
   2483      return false;
   2484    }
   2485  }
   2486 
   2487  if (x->level) {
   2488    for (int i = 0; i < x->n + 1; i++) {
   2489      if (!mt_recurse_nodes_compare(x->ptr[i], checked)) {
   2490        return false;
   2491      }
   2492    }
   2493  }
   2494 
   2495  return true;
   2496 }
   2497 
   2498 // TODO(bfredl): kv_print
   2499 #define GA_PUT(x) ga_concat(ga, (char *)(x))
   2500 #define GA_PRINT(fmt, ...) snprintf(buf, sizeof(buf), fmt, __VA_ARGS__); \
   2501  GA_PUT(buf);
   2502 
   2503 String mt_inspect(MarkTree *b, bool keys, bool dot)
   2504 {
   2505  garray_T ga[1];
   2506  ga_init(ga, (int)sizeof(char), 80);
   2507  MTPos p = { 0, 0 };
   2508  if (b->root) {
   2509    if (dot) {
   2510      GA_PUT("digraph D {\n\n");
   2511      mt_inspect_dotfile_node(b, ga, b->root, p, NULL);
   2512      GA_PUT("\n}");
   2513    } else {
   2514      mt_inspect_node(b, ga, keys, b->root, p);
   2515    }
   2516  }
   2517  return ga_take_string(ga);
   2518 }
   2519 
   2520 static inline uint64_t mt_dbg_id(uint64_t id)
   2521 {
   2522  return (id >> 1) & 0xffffffff;
   2523 }
   2524 
   2525 static void mt_inspect_node(MarkTree *b, garray_T *ga, bool keys, MTNode *n, MTPos off)
   2526 {
   2527  static char buf[1024];
   2528  GA_PUT("[");
   2529  if (keys && kv_size(n->intersect)) {
   2530    for (size_t i = 0; i < kv_size(n->intersect); i++) {
   2531      GA_PUT(i == 0 ? "{" : ";");
   2532      // GA_PRINT("%"PRIu64, kv_A(n->intersect, i));
   2533      GA_PRINT("%" PRIu64, mt_dbg_id(kv_A(n->intersect, i)));
   2534    }
   2535    GA_PUT("},");
   2536  }
   2537  if (n->level) {
   2538    mt_inspect_node(b, ga, keys, n->ptr[0], off);
   2539  }
   2540  for (int i = 0; i < n->n; i++) {
   2541    MTPos p = n->key[i].pos;
   2542    unrelative(off, &p);
   2543    GA_PRINT("%d/%d", p.row, p.col);
   2544    if (keys) {
   2545      MTKey key = n->key[i];
   2546      GA_PUT(":");
   2547      if (mt_start(key)) {
   2548        GA_PUT("<");
   2549      }
   2550      // GA_PRINT("%"PRIu64, mt_lookup_id(key.ns, key.id, false));
   2551      GA_PRINT("%" PRIu32, key.id);
   2552      if (mt_end(key)) {
   2553        GA_PUT(">");
   2554      }
   2555    }
   2556    if (n->level) {
   2557      mt_inspect_node(b, ga, keys, n->ptr[i + 1], p);
   2558    } else {
   2559      ga_concat(ga, ",");
   2560    }
   2561  }
   2562  ga_concat(ga, "]");
   2563 }
   2564 
   2565 static void mt_inspect_dotfile_node(MarkTree *b, garray_T *ga, MTNode *n, MTPos off, char *parent)
   2566 {
   2567  static char buf[1024];
   2568  char namebuf[64];
   2569  if (parent != NULL) {
   2570    snprintf(namebuf, sizeof namebuf, "%s_%c%d", parent, 'a' + n->level, n->p_idx);
   2571  } else {
   2572    snprintf(namebuf, sizeof namebuf, "MTNode");
   2573  }
   2574 
   2575  GA_PRINT("  %s[shape=plaintext, label=<\n", namebuf);
   2576  GA_PUT("    <table border='0' cellborder='1' cellspacing='0'>\n");
   2577  if (kv_size(n->intersect)) {
   2578    GA_PUT("    <tr><td>");
   2579    for (size_t i = 0; i < kv_size(n->intersect); i++) {
   2580      if (i > 0) {
   2581        GA_PUT(", ");
   2582      }
   2583      GA_PRINT("%" PRIu64, mt_dbg_id(kv_A(n->intersect, i)));
   2584    }
   2585    GA_PUT("</td></tr>\n");
   2586  }
   2587 
   2588  GA_PUT("    <tr><td>");
   2589  for (int i = 0; i < n->n; i++) {
   2590    MTKey k = n->key[i];
   2591    if (i > 0) {
   2592      GA_PUT(", ");
   2593    }
   2594    GA_PRINT("%d", k.id);
   2595    if (mt_paired(k)) {
   2596      GA_PUT(mt_end(k) ? "e" : "s");
   2597    }
   2598  }
   2599  GA_PUT("</td></tr>\n");
   2600  GA_PUT("    </table>\n");
   2601  GA_PUT(">];\n");
   2602  if (parent) {
   2603    GA_PRINT("  %s -> %s\n", parent, namebuf);
   2604  }
   2605  if (n->level) {
   2606    mt_inspect_dotfile_node(b, ga, n->ptr[0], off, namebuf);
   2607  }
   2608  for (int i = 0; i < n->n; i++) {
   2609    MTPos p = n->key[i].pos;
   2610    unrelative(off, &p);
   2611    if (n->level) {
   2612      mt_inspect_dotfile_node(b, ga, n->ptr[i + 1], p, namebuf);
   2613    }
   2614  }
   2615 }