neovim

Neovim text editor
git clone https://git.dasho.dev/neovim.git
Log | Files | Refs | README

extmark.c (24576B)


      1 // Implements extended marks for plugins. Marks sit in a MarkTree
      2 // datastructure which provides both efficient mark insertations/lookups
      3 // and adjustment to text changes. See marktree.c for more details.
      4 //
      5 // A map of pointers to the marks is used for fast lookup by mark id.
      6 //
      7 // Marks are moved by calls to extmark_splice. Some standard interfaces
      8 // mark_adjust and inserted_bytes already adjust marks, check if these are
      9 // being used before adding extmark_splice calls!
     10 //
     11 // Undo/Redo of marks is implemented by storing the call arguments to
     12 // extmark_splice. The list of arguments is applied in extmark_apply_undo.
     13 // We have to copy extmark positions when the extmarks are within a
     14 // deleted/changed region.
     15 //
     16 // Marks live in namespaces that allow plugins/users to segregate marks
     17 // from other users.
     18 //
     19 // Deleting marks only happens when explicitly calling extmark_del, deleting
     20 // over a range of marks will only move the marks. Deleting on a mark will
     21 // leave it in same position unless it is on the EOL of a line.
     22 //
     23 // Extmarks are used to implement buffer decoration. Decoration is mostly
     24 // regarded as an application of extmarks, however for practical reasons code
     25 // that deletes an extmark with decoration will call back into the decoration
     26 // code for redrawing the line with the deleted decoration.
     27 
     28 #include <assert.h>
     29 #include <stddef.h>
     30 
     31 #include "nvim/api/private/defs.h"
     32 #include "nvim/buffer_defs.h"
     33 #include "nvim/buffer_updates.h"
     34 #include "nvim/decoration.h"
     35 #include "nvim/decoration_defs.h"
     36 #include "nvim/extmark.h"
     37 #include "nvim/extmark_defs.h"
     38 #include "nvim/globals.h"
     39 #include "nvim/map_defs.h"
     40 #include "nvim/marktree.h"
     41 #include "nvim/memline.h"
     42 #include "nvim/memory.h"
     43 #include "nvim/pos_defs.h"
     44 #include "nvim/types_defs.h"
     45 #include "nvim/undo.h"
     46 #include "nvim/undo_defs.h"
     47 
     48 #include "extmark.c.generated.h"
     49 
     50 /// Create or update an extmark
     51 ///
     52 /// must not be used during iteration!
     53 void extmark_set(buf_T *buf, uint32_t ns_id, uint32_t *idp, int row, colnr_T col, int end_row,
     54                 colnr_T end_col, DecorInline decor, uint16_t decor_flags, bool right_gravity,
     55                 bool end_right_gravity, bool no_undo, bool invalidate, Error *err)
     56 {
     57  uint32_t *ns = map_put_ref(uint32_t, uint32_t)(buf->b_extmark_ns, ns_id, NULL, NULL);
     58  uint32_t id = idp ? *idp : 0;
     59 
     60  uint16_t flags = mt_flags(right_gravity, no_undo, invalidate, decor.ext) | decor_flags;
     61  if (id == 0) {
     62    id = ++*ns;
     63  } else {
     64    MarkTreeIter itr[1] = { 0 };
     65    MTKey old_mark = marktree_lookup_ns(buf->b_marktree, ns_id, id, false, itr);
     66    if (old_mark.id) {
     67      if (mt_paired(old_mark) || end_row > -1) {
     68        extmark_del_id(buf, ns_id, id);
     69      } else {
     70        assert(marktree_itr_valid(itr));
     71        if (old_mark.pos.row == row && old_mark.pos.col == col) {
     72          // not paired: we can revise in place
     73          if (!mt_invalid(old_mark) && mt_decor_any(old_mark)) {
     74            mt_itr_rawkey(itr).flags &= (uint16_t) ~MT_FLAG_EXTERNAL_MASK;
     75            buf_decor_remove(buf, row, row, col, mt_decor(old_mark), true);
     76          }
     77          mt_itr_rawkey(itr).flags |= flags;
     78          mt_itr_rawkey(itr).decor_data = decor.data;
     79          marktree_revise_meta(buf->b_marktree, itr, old_mark);
     80          goto revised;
     81        }
     82        marktree_del_itr(buf->b_marktree, itr, false);
     83        if (!mt_invalid(old_mark)) {
     84          buf_decor_remove(buf, old_mark.pos.row, old_mark.pos.row, old_mark.pos.col,
     85                           mt_decor(old_mark), true);
     86        }
     87      }
     88    } else {
     89      *ns = MAX(*ns, id);
     90    }
     91  }
     92 
     93  MTKey mark = { { row, col }, ns_id, id, flags, decor.data };
     94 
     95  marktree_put(buf->b_marktree, mark, end_row, end_col, end_right_gravity);
     96  decor_state_invalidate(buf);
     97 
     98 revised:
     99  if (decor_flags || decor.ext) {
    100    buf_put_decor(buf, decor, row, end_row > -1 ? end_row : row);
    101    decor_redraw(buf, row, end_row > -1 ? end_row : row, col, decor);
    102  }
    103 
    104  if (idp) {
    105    *idp = id;
    106  }
    107 }
    108 
    109 static void extmark_setraw(buf_T *buf, uint64_t mark, int row, colnr_T col, bool invalid)
    110 {
    111  MarkTreeIter itr[1] = { 0 };
    112  MTKey key = marktree_lookup(buf->b_marktree, mark, itr);
    113  bool move = key.pos.row != row || key.pos.col != col;
    114  if (key.pos.row < 0 || (!move && !invalid)) {
    115    return;  // Mark was deleted or no change needed
    116  }
    117 
    118  // Only the position before undo needs to be redrawn here,
    119  // as the position after undo should be marked as changed.
    120  if (!invalid && mt_decor_any(key) && key.pos.row != row) {
    121    decor_redraw(buf, key.pos.row, key.pos.row, key.pos.col, mt_decor(key));
    122  }
    123 
    124  int row1 = 0;
    125  int row2 = 0;
    126  MarkTreeIter altitr[1] = { *itr };
    127  MTKey alt = marktree_get_alt(buf->b_marktree, key, altitr);
    128 
    129  if (invalid) {
    130    mt_itr_rawkey(itr).flags &= (uint16_t) ~MT_FLAG_INVALID;
    131    mt_itr_rawkey(altitr).flags &= (uint16_t) ~MT_FLAG_INVALID;
    132    marktree_revise_meta(buf->b_marktree, mt_end(key) ? altitr : itr, mt_end(key) ? alt : key);
    133  } else if (!mt_invalid(key) && key.flags & MT_FLAG_DECOR_SIGNTEXT && buf->b_signcols.autom) {
    134    row1 = MIN(alt.pos.row, MIN(key.pos.row, row));
    135    row2 = MAX(alt.pos.row, MAX(key.pos.row, row));
    136    buf_signcols_count_range(buf, row1, MIN(curbuf->b_ml.ml_line_count - 1, row2), 0, kTrue);
    137  }
    138 
    139  if (move) {
    140    marktree_move(buf->b_marktree, itr, row, col);
    141  }
    142 
    143  if (invalid) {
    144    buf_put_decor(buf, mt_decor(key), MIN(row, alt.pos.row), MAX(row, alt.pos.row));
    145  } else if (!mt_invalid(key) && key.flags & MT_FLAG_DECOR_SIGNTEXT && buf->b_signcols.autom) {
    146    buf_signcols_count_range(buf, row1, MIN(curbuf->b_ml.ml_line_count - 1, row2), 0, kNone);
    147  }
    148 }
    149 
    150 /// Remove an extmark in "ns_id" by "id"
    151 ///
    152 /// @return false on missing id
    153 bool extmark_del_id(buf_T *buf, uint32_t ns_id, uint32_t id)
    154 {
    155  MarkTreeIter itr[1] = { 0 };
    156  MTKey key = marktree_lookup_ns(buf->b_marktree, ns_id, id, false, itr);
    157  if (key.id) {
    158    extmark_del(buf, itr, key, false);
    159  }
    160 
    161  return key.id > 0;
    162 }
    163 
    164 /// Remove a (paired) extmark "key" pointed to by "itr"
    165 void extmark_del(buf_T *buf, MarkTreeIter *itr, MTKey key, bool restore)
    166 {
    167  assert(key.pos.row >= 0);
    168 
    169  MTKey key2 = key;
    170  uint64_t other = marktree_del_itr(buf->b_marktree, itr, false);
    171  if (other) {
    172    key2 = marktree_lookup(buf->b_marktree, other, itr);
    173    assert(key2.pos.row >= 0);
    174    marktree_del_itr(buf->b_marktree, itr, false);
    175    if (restore) {
    176      marktree_itr_get(buf->b_marktree, key.pos.row, key.pos.col, itr);
    177    }
    178  }
    179 
    180  if (mt_decor_any(key)) {
    181    if (mt_invalid(key)) {
    182      decor_free(mt_decor(key));
    183    } else {
    184      if (mt_end(key)) {
    185        MTKey k = key;
    186        key = key2;
    187        key2 = k;
    188      }
    189      buf_decor_remove(buf, key.pos.row, key2.pos.row, key.pos.col, mt_decor(key), true);
    190    }
    191  }
    192 
    193  decor_state_invalidate(buf);
    194 
    195  // TODO(bfredl): delete it from current undo header, opportunistically?
    196 }
    197 
    198 /// Free extmarks in a ns between lines
    199 /// if ns = 0, it means clear all namespaces
    200 bool extmark_clear(buf_T *buf, uint32_t ns_id, int l_row, colnr_T l_col, int u_row, colnr_T u_col)
    201 {
    202  if (!map_size(buf->b_extmark_ns)) {
    203    return false;
    204  }
    205 
    206  bool all_ns = (ns_id == 0);
    207  uint32_t *ns = NULL;
    208  if (!all_ns) {
    209    ns = map_ref(uint32_t, uint32_t)(buf->b_extmark_ns, ns_id, NULL);
    210    if (!ns) {
    211      // nothing to do
    212      return false;
    213    }
    214  }
    215 
    216  bool marks_cleared_any = false;
    217  bool marks_cleared_all = l_row == 0 && l_col == 0;
    218 
    219  MarkTreeIter itr[1] = { 0 };
    220  marktree_itr_get(buf->b_marktree, l_row, l_col, itr);
    221  while (true) {
    222    MTKey mark = marktree_itr_current(itr);
    223    if (mark.pos.row < 0
    224        || mark.pos.row > u_row
    225        || (mark.pos.row == u_row && mark.pos.col > u_col)) {
    226      if (mark.pos.row >= 0) {
    227        marks_cleared_all = false;
    228      }
    229      break;
    230    }
    231    if (mark.ns == ns_id || all_ns) {
    232      marks_cleared_any = true;
    233      extmark_del(buf, itr, mark, true);
    234    } else {
    235      marktree_itr_next(buf->b_marktree, itr);
    236    }
    237  }
    238 
    239  if (marks_cleared_all) {
    240    if (all_ns) {
    241      map_destroy(uint32_t, buf->b_extmark_ns);
    242      *buf->b_extmark_ns = (Map(uint32_t, uint32_t)) MAP_INIT;
    243    } else {
    244      map_del(uint32_t, uint32_t)(buf->b_extmark_ns, ns_id, NULL);
    245    }
    246  }
    247 
    248  if (marks_cleared_any) {
    249    decor_state_invalidate(buf);
    250  }
    251 
    252  return marks_cleared_any;
    253 }
    254 
    255 /// @return  the position of marks between a range,
    256 ///          marks found at the start or end index will be included.
    257 ///
    258 /// if upper_lnum or upper_col are negative the buffer
    259 /// will be searched to the start, or end
    260 /// amount = amount of marks to find or INT64_MAX for all
    261 ExtmarkInfoArray extmark_get(buf_T *buf, uint32_t ns_id, int l_row, colnr_T l_col, int u_row,
    262                             colnr_T u_col, int64_t amount, ExtmarkType type_filter, bool overlap)
    263 {
    264  ExtmarkInfoArray array = KV_INITIAL_VALUE;
    265  MarkTreeIter itr[1];
    266 
    267  if (overlap) {
    268    // Find all the marks overlapping the start position
    269    if (!marktree_itr_get_overlap(buf->b_marktree, l_row, l_col, itr)) {
    270      return array;
    271    }
    272 
    273    while ((int64_t)kv_size(array) < amount) {
    274      MTPair pair;
    275      if (!marktree_itr_step_overlap(buf->b_marktree, itr, &pair)) {
    276        break;
    277      }
    278      push_mark(&array, ns_id, type_filter, pair);
    279    }
    280  } else {
    281    // Find all the marks beginning with the start position
    282    marktree_itr_get_ext(buf->b_marktree, MTPos(l_row, l_col),
    283                         itr, false, false, NULL, NULL);
    284  }
    285 
    286  while ((int64_t)kv_size(array) < amount) {
    287    MTKey mark = marktree_itr_current(itr);
    288    if (mark.pos.row < 0
    289        || (mark.pos.row > u_row)
    290        || (mark.pos.row == u_row && mark.pos.col > u_col)) {
    291      break;
    292    }
    293    if (!mt_end(mark)) {
    294      MTKey end = marktree_get_alt(buf->b_marktree, mark, NULL);
    295      push_mark(&array, ns_id, type_filter, mtpair_from(mark, end));
    296    }
    297    marktree_itr_next(buf->b_marktree, itr);
    298  }
    299  return array;
    300 }
    301 
    302 static void push_mark(ExtmarkInfoArray *array, uint32_t ns_id, ExtmarkType type_filter, MTPair mark)
    303 {
    304  if (!(ns_id == UINT32_MAX || mark.start.ns == ns_id)) {
    305    return;
    306  }
    307  if (type_filter != kExtmarkNone) {
    308    if (!mt_decor_any(mark.start)) {
    309      return;
    310    }
    311    uint16_t type_flags = decor_type_flags(mt_decor(mark.start));
    312 
    313    if (!(type_flags & type_filter)) {
    314      return;
    315    }
    316  }
    317 
    318  kv_push(*array, mark);
    319 }
    320 
    321 /// Lookup an extmark by id
    322 MTPair extmark_from_id(buf_T *buf, uint32_t ns_id, uint32_t id)
    323 {
    324  MTKey mark = marktree_lookup_ns(buf->b_marktree, ns_id, id, false, NULL);
    325  if (!mark.id) {
    326    return mtpair_from(mark, mark);  // invalid
    327  }
    328  assert(mark.pos.row >= 0);
    329  MTKey end = marktree_get_alt(buf->b_marktree, mark, NULL);
    330 
    331  return mtpair_from(mark, end);
    332 }
    333 
    334 /// free extmarks from the buffer
    335 void extmark_free_all(buf_T *buf)
    336 {
    337  MarkTreeIter itr[1] = { 0 };
    338  marktree_itr_get(buf->b_marktree, 0, 0, itr);
    339  while (true) {
    340    MTKey mark = marktree_itr_current(itr);
    341    if (mark.pos.row < 0) {
    342      break;
    343    }
    344 
    345    // don't free mark.decor twice for a paired mark.
    346    if (!(mt_paired(mark) && mt_end(mark))) {
    347      decor_free(mt_decor(mark));
    348    }
    349 
    350    marktree_itr_next(buf->b_marktree, itr);
    351  }
    352 
    353  marktree_clear(buf->b_marktree);
    354 
    355  buf->b_signcols.max = 0;
    356  CLEAR_FIELD(buf->b_signcols.count);
    357 
    358  map_destroy(uint32_t, buf->b_extmark_ns);
    359  *buf->b_extmark_ns = (Map(uint32_t, uint32_t)) MAP_INIT;
    360 }
    361 
    362 /// invalidate extmarks between range and copy to undo header
    363 ///
    364 /// copying is useful when we cannot simply reverse the operation. This will do
    365 /// nothing on redo, enforces correct position when undo.
    366 void extmark_splice_delete(buf_T *buf, int l_row, colnr_T l_col, int u_row, colnr_T u_col,
    367                           extmark_undo_vec_t *uvp, bool only_copy, ExtmarkOp op)
    368 {
    369  MarkTreeIter itr[1] = { 0 };
    370  ExtmarkUndoObject undo;
    371 
    372  marktree_itr_get(buf->b_marktree, (int32_t)l_row, l_col, itr);
    373  while (true) {
    374    MTKey mark = marktree_itr_current(itr);
    375    if (mark.pos.row < 0 || mark.pos.row > u_row) {
    376      break;
    377    }
    378 
    379    bool copy = true;
    380    // No need to copy left gravity marks at the beginning of the range,
    381    // and right gravity marks at the end of the range, unless invalidated.
    382    if (mark.pos.row == l_row && mark.pos.col - !mt_right(mark) < l_col) {
    383      copy = false;
    384    } else if (mark.pos.row == u_row) {
    385      if (mark.pos.col > u_col + 1) {
    386        break;
    387      } else if (mark.pos.col + mt_right(mark) > u_col) {
    388        copy = false;
    389      }
    390    }
    391 
    392    bool invalidated = false;
    393    // Invalidate/delete mark
    394    if (!only_copy && !mt_invalid(mark) && mt_invalidate(mark) && !mt_end(mark)) {
    395      MarkTreeIter enditr[1] = { *itr };
    396      MTPos endpos = marktree_get_altpos(buf->b_marktree, mark, enditr);
    397      // Invalidate unpaired marks in deleted lines and paired marks whose entire
    398      // range has been deleted.
    399      if ((!mt_paired(mark) && mark.pos.row < u_row)
    400          || (mt_paired(mark)
    401              && (mark.pos.row > l_row || (mark.pos.row == l_row && mark.pos.col >= l_col))
    402              && (endpos.row < u_row || (endpos.row == u_row && endpos.col <= u_col)))) {
    403        if (mt_no_undo(mark)) {
    404          extmark_del(buf, itr, mark, true);
    405          continue;
    406        } else {
    407          copy = true;
    408          invalidated = true;
    409          mt_itr_rawkey(itr).flags |= MT_FLAG_INVALID;
    410          mt_itr_rawkey(enditr).flags |= MT_FLAG_INVALID;
    411          marktree_revise_meta(buf->b_marktree, itr, mark);
    412          buf_decor_remove(buf, mark.pos.row, endpos.row, mark.pos.col, mt_decor(mark), false);
    413        }
    414      }
    415    }
    416 
    417    // Push mark to undo header
    418    if (copy && (only_copy || (uvp != NULL && op == kExtmarkUndo && !mt_no_undo(mark)))) {
    419      ExtmarkSavePos pos = {
    420        .mark = mt_lookup_key(mark),
    421        .invalidated = invalidated,
    422        .old_row = mark.pos.row,
    423        .old_col = mark.pos.col
    424      };
    425      undo.data.savepos = pos;
    426      undo.type = kExtmarkSavePos;
    427      kv_push(*uvp, undo);
    428    }
    429 
    430    marktree_itr_next(buf->b_marktree, itr);
    431  }
    432 }
    433 
    434 /// undo or redo an extmark operation
    435 void extmark_apply_undo(ExtmarkUndoObject undo_info, bool undo)
    436 {
    437  // splice: any text operation changing position (except :move)
    438  if (undo_info.type == kExtmarkSplice) {
    439    // Undo
    440    ExtmarkSplice splice = undo_info.data.splice;
    441    if (undo) {
    442      extmark_splice_impl(curbuf,
    443                          splice.start_row, splice.start_col, splice.start_byte,
    444                          splice.new_row, splice.new_col, splice.new_byte,
    445                          splice.old_row, splice.old_col, splice.old_byte,
    446                          kExtmarkNoUndo);
    447    } else {
    448      extmark_splice_impl(curbuf,
    449                          splice.start_row, splice.start_col, splice.start_byte,
    450                          splice.old_row, splice.old_col, splice.old_byte,
    451                          splice.new_row, splice.new_col, splice.new_byte,
    452                          kExtmarkNoUndo);
    453    }
    454    // kExtmarkSavePos
    455  } else if (undo_info.type == kExtmarkSavePos) {
    456    ExtmarkSavePos pos = undo_info.data.savepos;
    457    if (undo && pos.old_row >= 0) {
    458      extmark_setraw(curbuf, pos.mark, pos.old_row, pos.old_col, pos.invalidated);
    459    }
    460    // No Redo since kExtmarkSplice will move marks back
    461  } else if (undo_info.type == kExtmarkMove) {
    462    ExtmarkMove move = undo_info.data.move;
    463    if (undo) {
    464      extmark_move_region(curbuf,
    465                          move.new_row, move.new_col, move.new_byte,
    466                          move.extent_row, move.extent_col, move.extent_byte,
    467                          move.start_row, move.start_col, move.start_byte,
    468                          kExtmarkNoUndo);
    469    } else {
    470      extmark_move_region(curbuf,
    471                          move.start_row, move.start_col, move.start_byte,
    472                          move.extent_row, move.extent_col, move.extent_byte,
    473                          move.new_row, move.new_col, move.new_byte,
    474                          kExtmarkNoUndo);
    475    }
    476  }
    477 }
    478 
    479 /// Adjust extmark row for inserted/deleted rows (columns stay fixed).
    480 void extmark_adjust(buf_T *buf, linenr_T line1, linenr_T line2, linenr_T amount,
    481                    linenr_T amount_after, ExtmarkOp undo)
    482 {
    483  if (curbuf_splice_pending) {
    484    return;
    485  }
    486  bcount_t start_byte = ml_find_line_or_offset(buf, line1, NULL, true);
    487  bcount_t old_byte = 0;
    488  bcount_t new_byte = 0;
    489  int old_row;
    490  int new_row;
    491  if (amount == MAXLNUM) {
    492    old_row = line2 - line1 + 1;
    493    // TODO(bfredl): ej kasta?
    494    old_byte = (bcount_t)buf->deleted_bytes2;
    495    new_row = amount_after + old_row;
    496  } else {
    497    // A region is either deleted (amount == MAXLNUM) or
    498    // added (line2 == MAXLNUM). The only other case is :move
    499    // which is handled by a separate entry point extmark_move_region.
    500    assert(line2 == MAXLNUM);
    501    old_row = 0;
    502    new_row = (int)amount;
    503  }
    504  if (new_row > 0) {
    505    new_byte = ml_find_line_or_offset(buf, line1 + new_row, NULL, true)
    506               - start_byte;
    507  }
    508  extmark_splice_impl(buf,
    509                      (int)line1 - 1, 0, start_byte,
    510                      old_row, 0, old_byte,
    511                      new_row, 0, new_byte, undo);
    512 }
    513 
    514 // Adjusts extmarks after a text edit, and emits the `on_bytes` event (`:h api-buffer-updates`).
    515 //
    516 // @param buf
    517 // @param start_row   Start row of the region to be changed
    518 // @param start_col   Start col of the region to be changed
    519 // @param old_row     End row of the region to be changed.
    520 //                      Encoded as an offset to start_row.
    521 // @param old_col     End col of the region to be changed. Encodes
    522 //                      an offset from start_col if old_row = 0; otherwise,
    523 //                      encodes the end column of the old region.
    524 // @param old_byte    Byte extent of the region to be changed.
    525 // @param new_row     Row offset of the new region.
    526 // @param new_col     Col offset of the new region. Encodes an offset from
    527 //                      start_col if new_row = 0; otherwise, encodes
    528 //                      the end column of the new region.
    529 // @param new_byte    Byte extent of the new region.
    530 // @param undo
    531 void extmark_splice(buf_T *buf, int start_row, colnr_T start_col, int old_row, colnr_T old_col,
    532                    bcount_t old_byte, int new_row, colnr_T new_col, bcount_t new_byte,
    533                    ExtmarkOp undo)
    534 {
    535  int offset = ml_find_line_or_offset(buf, start_row + 1, NULL, true);
    536 
    537  // On empty buffers, when editing the first line, the line is buffered,
    538  // causing offset to be < 0. While the buffer is not actually empty, the
    539  // buffered line has not been flushed (and should not be) yet, so the call is
    540  // valid but an edge case.
    541  //
    542  // TODO(vigoux): maybe the is a better way of testing that ?
    543  if (offset < 0 && buf->b_ml.ml_chunksize == NULL) {
    544    offset = 0;
    545  }
    546  extmark_splice_impl(buf, start_row, start_col, offset + start_col,
    547                      old_row, old_col, old_byte, new_row, new_col, new_byte,
    548                      undo);
    549 }
    550 
    551 void extmark_splice_impl(buf_T *buf, int start_row, colnr_T start_col, bcount_t start_byte,
    552                         int old_row, colnr_T old_col, bcount_t old_byte, int new_row,
    553                         colnr_T new_col, bcount_t new_byte, ExtmarkOp undo)
    554 {
    555  buf->deleted_bytes2 = 0;
    556  buf_updates_send_splice(buf, start_row, start_col, start_byte,
    557                          old_row, old_col, old_byte,
    558                          new_row, new_col, new_byte);
    559 
    560  if (old_row > 0 || old_col > 0) {
    561    // Copy and invalidate marks that would be effected by delete
    562    // TODO(bfredl): Be smart about marks that already have been
    563    // saved (important for merge!)
    564    int end_row = start_row + old_row;
    565    int end_col = (old_row ? 0 : start_col) + old_col;
    566    u_header_T *uhp = u_force_get_undo_header(buf);
    567    extmark_undo_vec_t *uvp = uhp ? &uhp->uh_extmark : NULL;
    568    extmark_splice_delete(buf, start_row, start_col, end_row, end_col, uvp, false, undo);
    569  }
    570 
    571  // Remove signs inside edited region from "b_signcols.count", add after splicing.
    572  if (old_row > 0 || new_row > 0) {
    573    int count = buf->b_prev_line_count > 0 ? buf->b_prev_line_count : buf->b_ml.ml_line_count;
    574    buf_signcols_count_range(buf, start_row, MIN(count - 1, start_row + old_row), 0, kTrue);
    575    buf->b_prev_line_count = 0;
    576  }
    577 
    578  marktree_splice(buf->b_marktree, (int32_t)start_row, start_col,
    579                  old_row, old_col,
    580                  new_row, new_col);
    581 
    582  if (old_row > 0 || new_row > 0) {
    583    int row2 = MIN(buf->b_ml.ml_line_count - 1, start_row + new_row);
    584    buf_signcols_count_range(buf, start_row, row2, 0, kNone);
    585  }
    586 
    587  if (undo == kExtmarkUndo) {
    588    u_header_T *uhp = u_force_get_undo_header(buf);
    589    if (!uhp) {
    590      return;
    591    }
    592 
    593    bool merged = false;
    594    // TODO(bfredl): this is quite rudimentary. We merge small (within line)
    595    // inserts with each other and small deletes with each other. Add full
    596    // merge algorithm later.
    597    if (old_row == 0 && new_row == 0 && kv_size(uhp->uh_extmark)) {
    598      ExtmarkUndoObject *item = &kv_A(uhp->uh_extmark,
    599                                      kv_size(uhp->uh_extmark) - 1);
    600      if (item->type == kExtmarkSplice) {
    601        ExtmarkSplice *splice = &item->data.splice;
    602        if (splice->start_row == start_row && splice->old_row == 0
    603            && splice->new_row == 0) {
    604          if (old_col == 0 && start_col >= splice->start_col
    605              && start_col <= splice->start_col + splice->new_col) {
    606            splice->new_col += new_col;
    607            splice->new_byte += new_byte;
    608            merged = true;
    609          } else if (new_col == 0
    610                     && start_col == splice->start_col + splice->new_col) {
    611            splice->old_col += old_col;
    612            splice->old_byte += old_byte;
    613            merged = true;
    614          } else if (new_col == 0
    615                     && start_col + old_col == splice->start_col) {
    616            splice->start_col = start_col;
    617            splice->start_byte = start_byte;
    618            splice->old_col += old_col;
    619            splice->old_byte += old_byte;
    620            merged = true;
    621          }
    622        }
    623      }
    624    }
    625 
    626    if (!merged) {
    627      ExtmarkSplice splice;
    628      splice.start_row = start_row;
    629      splice.start_col = start_col;
    630      splice.start_byte = start_byte;
    631      splice.old_row = old_row;
    632      splice.old_col = old_col;
    633      splice.old_byte = old_byte;
    634      splice.new_row = new_row;
    635      splice.new_col = new_col;
    636      splice.new_byte = new_byte;
    637 
    638      kv_push(uhp->uh_extmark,
    639              ((ExtmarkUndoObject){ .type = kExtmarkSplice,
    640                                    .data.splice = splice }));
    641    }
    642  }
    643 }
    644 
    645 void extmark_splice_cols(buf_T *buf, int start_row, colnr_T start_col, colnr_T old_col,
    646                         colnr_T new_col, ExtmarkOp undo)
    647 {
    648  extmark_splice(buf, start_row, start_col,
    649                 0, old_col, old_col,
    650                 0, new_col, new_col, undo);
    651 }
    652 
    653 void extmark_move_region(buf_T *buf, int start_row, colnr_T start_col, bcount_t start_byte,
    654                         int extent_row, colnr_T extent_col, bcount_t extent_byte, int new_row,
    655                         colnr_T new_col, bcount_t new_byte, ExtmarkOp undo)
    656 {
    657  buf->deleted_bytes2 = 0;
    658  // TODO(bfredl): this is not synced to the buffer state inside the callback.
    659  // But unless we make the undo implementation smarter, this is not ensured
    660  // anyway.
    661  buf_updates_send_splice(buf, start_row, start_col, start_byte,
    662                          extent_row, extent_col, extent_byte,
    663                          0, 0, 0);
    664 
    665  int row1 = MIN(start_row, new_row);
    666  int row2 = MAX(start_row, new_row) + extent_row;
    667  buf_signcols_count_range(buf, row1, row2, 0, kTrue);
    668 
    669  marktree_move_region(buf->b_marktree, start_row, start_col,
    670                       extent_row, extent_col,
    671                       new_row, new_col);
    672 
    673  buf_signcols_count_range(buf, row1, row2, 0, kNone);
    674 
    675  buf_updates_send_splice(buf, new_row, new_col, new_byte,
    676                          0, 0, 0,
    677                          extent_row, extent_col, extent_byte);
    678 
    679  if (undo == kExtmarkUndo) {
    680    u_header_T *uhp = u_force_get_undo_header(buf);
    681    if (!uhp) {
    682      return;
    683    }
    684 
    685    ExtmarkMove move;
    686    move.start_row = start_row;
    687    move.start_col = start_col;
    688    move.start_byte = start_byte;
    689    move.extent_row = extent_row;
    690    move.extent_col = extent_col;
    691    move.extent_byte = extent_byte;
    692    move.new_row = new_row;
    693    move.new_col = new_col;
    694    move.new_byte = new_byte;
    695 
    696    kv_push(uhp->uh_extmark,
    697            ((ExtmarkUndoObject){ .type = kExtmarkMove,
    698                                  .data.move = move }));
    699  }
    700 }