neovim

Neovim text editor
git clone https://git.dasho.dev/neovim.git
Log | Files | Refs | README

proc.c (14536B)


      1 #include <assert.h>
      2 #include <inttypes.h>
      3 #include <signal.h>
      4 #include <string.h>
      5 #include <uv.h>
      6 
      7 #include "klib/kvec.h"
      8 #include "nvim/channel.h"
      9 #include "nvim/event/libuv_proc.h"
     10 #include "nvim/event/loop.h"
     11 #include "nvim/event/multiqueue.h"
     12 #include "nvim/event/proc.h"
     13 #include "nvim/event/rstream.h"
     14 #include "nvim/event/stream.h"
     15 #include "nvim/event/wstream.h"
     16 #include "nvim/globals.h"
     17 #include "nvim/log.h"
     18 #include "nvim/main.h"
     19 #include "nvim/memory_defs.h"
     20 #include "nvim/os/proc.h"
     21 #include "nvim/os/pty_proc.h"
     22 #include "nvim/os/shell.h"
     23 #include "nvim/os/time.h"
     24 #include "nvim/ui_client.h"
     25 
     26 #include "event/proc.c.generated.h"
     27 
     28 // Time for a process to exit cleanly before we send KILL.
     29 // For PTY processes SIGTERM is sent first (in case SIGHUP was not enough).
     30 #define KILL_TIMEOUT_MS 2000
     31 
     32 /// Externally defined with gcov.
     33 #ifdef USE_GCOV
     34 void __gcov_flush(void);
     35 #endif
     36 
     37 static bool proc_is_tearing_down = false;
     38 
     39 // Delay exit until handles are closed, to avoid deadlocks
     40 static int exit_need_delay = 0;
     41 
     42 /// @returns zero on success, or negative error code
     43 int proc_spawn(Proc *proc, bool in, bool out, bool err)
     44  FUNC_ATTR_NONNULL_ALL
     45 {
     46 #ifdef MSWIN
     47  const bool out_use_poll = false;
     48 #else
     49  // Using uv_pipe_t to read from PTY master may drop data if the PTY process exits
     50  // immediately after output, as libuv treats a partial read after POLLHUP as EOF,
     51  // which isn't true for PTY master on Linux. Therefore use uv_poll_t instead. #3030
     52  // Ref: https://github.com/libuv/libuv/issues/4992
     53  const bool out_use_poll = proc->type == kProcTypePty;
     54 #endif
     55 
     56  if (in) {
     57    uv_pipe_init(&proc->loop->uv, &proc->in.uv.pipe, 0);
     58  } else {
     59    proc->in.closed = true;
     60  }
     61 
     62  if (out) {
     63    if (!out_use_poll) {
     64      uv_pipe_init(&proc->loop->uv, &proc->out.s.uv.pipe, 0);
     65    }
     66  } else {
     67    proc->out.s.closed = true;
     68  }
     69 
     70  if (err) {
     71    uv_pipe_init(&proc->loop->uv, &proc->err.s.uv.pipe, 0);
     72  } else {
     73    proc->err.s.closed = true;
     74  }
     75 
     76 #ifdef USE_GCOV
     77  // Flush coverage data before forking, to avoid "Merge mismatch" errors.
     78  __gcov_flush();
     79 #endif
     80 
     81  int status;
     82  switch (proc->type) {
     83  case kProcTypeUv:
     84    status = libuv_proc_spawn((LibuvProc *)proc);
     85    break;
     86  case kProcTypePty:
     87    status = pty_proc_spawn((PtyProc *)proc);
     88    break;
     89  }
     90 
     91  if (status) {
     92    if (in) {
     93      uv_close((uv_handle_t *)&proc->in.uv.pipe, NULL);
     94    }
     95    if (out && !out_use_poll) {
     96      uv_close((uv_handle_t *)&proc->out.s.uv.pipe, NULL);
     97    }
     98    if (err) {
     99      uv_close((uv_handle_t *)&proc->err.s.uv.pipe, NULL);
    100    }
    101 
    102    if (proc->type == kProcTypeUv) {
    103      uv_close((uv_handle_t *)&(((LibuvProc *)proc)->uv), NULL);
    104    } else {
    105      proc_close(proc);
    106    }
    107    proc_free(proc);
    108    proc->status = -1;
    109    return status;
    110  }
    111 
    112  if (in) {
    113    stream_init(NULL, &proc->in, -1, false, (uv_stream_t *)&proc->in.uv.pipe);
    114    proc->in.internal_data = proc;
    115    proc->in.internal_close_cb = on_proc_stream_close;
    116    proc->refcount++;
    117  }
    118 
    119  if (out) {
    120    if (out_use_poll) {
    121 #ifdef MSWIN
    122      abort();
    123 #else
    124      stream_init(proc->loop, &proc->out.s, ((PtyProc *)proc)->tty_fd, true, NULL);
    125 #endif
    126    } else {
    127      stream_init(NULL, &proc->out.s, -1, false, (uv_stream_t *)&proc->out.s.uv.pipe);
    128    }
    129    proc->out.s.internal_data = proc;
    130    proc->out.s.internal_close_cb = on_proc_stream_close;
    131    proc->refcount++;
    132  }
    133 
    134  if (err) {
    135    stream_init(NULL, &proc->err.s, -1, false, (uv_stream_t *)&proc->err.s.uv.pipe);
    136    proc->err.s.internal_data = proc;
    137    proc->err.s.internal_close_cb = on_proc_stream_close;
    138    proc->refcount++;
    139  }
    140 
    141  proc->internal_exit_cb = on_proc_exit;
    142  proc->internal_close_cb = decref;
    143  proc->refcount++;
    144  kv_push(proc->loop->children, proc);
    145  DLOG("new: pid=%d exepath=[%s]", proc->pid, proc_get_exepath(proc));
    146  return 0;
    147 }
    148 
    149 void proc_teardown(Loop *loop) FUNC_ATTR_NONNULL_ALL
    150 {
    151  proc_is_tearing_down = true;
    152  for (size_t i = 0; i < kv_size(loop->children); i++) {
    153    Proc *proc = kv_A(loop->children, i);
    154    if (proc->detach || proc->type == kProcTypePty) {
    155      // Close handles to process without killing it.
    156      CREATE_EVENT(loop->events, proc_close_handles, proc);
    157    } else {
    158      proc_stop(proc);
    159    }
    160  }
    161 
    162  // Wait until all children exit and all close events are processed.
    163  LOOP_PROCESS_EVENTS_UNTIL(loop, loop->events, -1,
    164                            kv_size(loop->children) == 0 && multiqueue_empty(loop->events));
    165  pty_proc_teardown(loop);
    166 }
    167 
    168 void proc_close_streams(Proc *proc) FUNC_ATTR_NONNULL_ALL
    169 {
    170  stream_may_close(&proc->in);
    171  rstream_may_close(&proc->out);
    172  rstream_may_close(&proc->err);
    173 }
    174 
    175 /// Synchronously wait for a process to finish
    176 ///
    177 /// @param process  Process instance
    178 /// @param ms       Time in milliseconds to wait for the process.
    179 ///                 0 for no wait. -1 to wait until the process quits.
    180 /// @return Exit code of the process. proc->status will have the same value.
    181 ///         -1 if the timeout expired while the process is still running.
    182 ///         -2 if the user interrupted the wait.
    183 int proc_wait(Proc *proc, int ms, MultiQueue *events)
    184  FUNC_ATTR_NONNULL_ARG(1)
    185 {
    186  if (!proc->refcount) {
    187    int status = proc->status;
    188    LOOP_PROCESS_EVENTS(proc->loop, proc->events, 0);
    189    return status;
    190  }
    191 
    192  if (!events) {
    193    events = proc->events;
    194  }
    195 
    196  // Increase refcount to stop the exit callback from being called (and possibly
    197  // freed) before we have a chance to get the status.
    198  proc->refcount++;
    199  LOOP_PROCESS_EVENTS_UNTIL(proc->loop, events, ms,
    200                            // Until...
    201                            got_int                   // interrupted by the user
    202                            || proc->refcount == 1);  // job exited
    203 
    204  // Assume that a user hitting CTRL-C does not like the current job.  Kill it.
    205  if (got_int) {
    206    got_int = false;
    207    proc_stop(proc);
    208    if (ms == -1) {
    209      // We can only return if all streams/handles are closed and the job
    210      // exited.
    211      LOOP_PROCESS_EVENTS_UNTIL(proc->loop, events, -1,
    212                                proc->refcount == 1);
    213    } else {
    214      LOOP_PROCESS_EVENTS(proc->loop, events, 0);
    215    }
    216 
    217    proc->status = -2;
    218  }
    219 
    220  if (proc->refcount == 1) {
    221    // Job exited, free its resources.
    222    decref(proc);
    223    if (proc->events) {
    224      // decref() created an exit event, process it now.
    225      multiqueue_process_events(proc->events);
    226    }
    227  } else {
    228    proc->refcount--;
    229  }
    230 
    231  return proc->status;
    232 }
    233 
    234 /// Ask a process to terminate and eventually kill if it doesn't respond
    235 void proc_stop(Proc *proc) FUNC_ATTR_NONNULL_ALL
    236 {
    237  bool exited = (proc->status >= 0);
    238  if (exited || proc->stopped_time) {
    239    return;
    240  }
    241  proc->stopped_time = os_hrtime();
    242  proc->exit_signal = SIGTERM;
    243 
    244  switch (proc->type) {
    245  case kProcTypeUv:
    246    os_proc_tree_kill(proc->pid, SIGTERM);
    247    break;
    248  case kProcTypePty:
    249    // close all streams for pty processes to send SIGHUP to the process
    250    proc_close_streams(proc);
    251    pty_proc_close_master((PtyProc *)proc);
    252    break;
    253  }
    254 
    255  // (Re)start timer to verify that stopped process(es) died.
    256  uv_timer_start(&proc->loop->children_kill_timer, children_kill_cb,
    257                 KILL_TIMEOUT_MS, 0);
    258 }
    259 
    260 /// Frees process-owned resources.
    261 void proc_free(Proc *proc) FUNC_ATTR_NONNULL_ALL
    262 {
    263  if (proc->argv != NULL) {
    264    shell_free_argv(proc->argv);
    265    proc->argv = NULL;
    266  }
    267 }
    268 
    269 /// Sends SIGKILL (or SIGTERM..SIGKILL for PTY jobs) to processes that did
    270 /// not terminate after proc_stop().
    271 static void children_kill_cb(uv_timer_t *handle)
    272 {
    273  Loop *loop = handle->loop->data;
    274 
    275  for (size_t i = 0; i < kv_size(loop->children); i++) {
    276    Proc *proc = kv_A(loop->children, i);
    277    bool exited = (proc->status >= 0);
    278    if (exited || !proc->stopped_time) {
    279      continue;
    280    }
    281    uint64_t term_sent = UINT64_MAX == proc->stopped_time;
    282    if (kProcTypePty != proc->type || term_sent) {
    283      proc->exit_signal = SIGKILL;
    284      os_proc_tree_kill(proc->pid, SIGKILL);
    285    } else {
    286      proc->exit_signal = SIGTERM;
    287      os_proc_tree_kill(proc->pid, SIGTERM);
    288      proc->stopped_time = UINT64_MAX;  // Flag: SIGTERM was sent.
    289      // Restart timer.
    290      uv_timer_start(&proc->loop->children_kill_timer, children_kill_cb,
    291                     KILL_TIMEOUT_MS, 0);
    292    }
    293  }
    294 }
    295 
    296 static void proc_close_event(void **argv)
    297 {
    298  Proc *proc = argv[0];
    299  if (proc->cb) {
    300    // User (hint: channel_job_start) is responsible for calling
    301    // proc_free().
    302    proc->cb(proc, proc->status, proc->data);
    303  } else {
    304    proc_free(proc);
    305  }
    306 }
    307 
    308 static void decref(Proc *proc)
    309 {
    310  if (--proc->refcount != 0) {
    311    return;
    312  }
    313 
    314  Loop *loop = proc->loop;
    315  size_t i;
    316  for (i = 0; i < kv_size(loop->children); i++) {
    317    Proc *current = kv_A(loop->children, i);
    318    if (current == proc) {
    319      break;
    320    }
    321  }
    322  assert(i < kv_size(loop->children));  // element found
    323  if (i < kv_size(loop->children) - 1) {
    324    memmove(&kv_A(loop->children, i), &kv_A(loop->children, i + 1),
    325            sizeof(&kv_A(loop->children, i)) * (kv_size(loop->children) - (i + 1)));
    326  }
    327  kv_size(loop->children)--;
    328  CREATE_EVENT(proc->events, proc_close_event, proc);
    329 }
    330 
    331 static void proc_close(Proc *proc)
    332  FUNC_ATTR_NONNULL_ARG(1)
    333 {
    334  if (proc_is_tearing_down && proc->closed && (proc->detach || proc->type == kProcTypePty)) {
    335    // If a detached/pty process dies while tearing down it might get closed twice.
    336    return;
    337  }
    338  assert(!proc->closed);
    339  proc->closed = true;
    340 
    341  if (proc->detach) {
    342    if (proc->type == kProcTypeUv) {
    343      uv_unref((uv_handle_t *)&(((LibuvProc *)proc)->uv));
    344    }
    345  }
    346 
    347  switch (proc->type) {
    348  case kProcTypeUv:
    349    libuv_proc_close((LibuvProc *)proc);
    350    break;
    351  case kProcTypePty:
    352    pty_proc_close((PtyProc *)proc);
    353    break;
    354  }
    355 }
    356 
    357 /// Flush output stream.
    358 ///
    359 /// @param proc     Process, for which an output stream should be flushed.
    360 /// @param stream   Stream to flush.
    361 static void flush_stream(Proc *proc, RStream *stream)
    362  FUNC_ATTR_NONNULL_ARG(1)
    363 {
    364  if (!stream || stream->s.closed) {
    365    return;
    366  }
    367 
    368  size_t max_bytes = SIZE_MAX;
    369 #ifdef MSWIN
    370  if (true) {
    371 #else
    372  // Don't limit remaining data size of PTY master unless when tearing down, as it may
    373  // have more remaining data than system buffer size (at least on Linux). #3030
    374  if (proc->type != kProcTypePty || proc_is_tearing_down) {
    375 #endif
    376    // Maximal remaining data size of terminated process is system buffer size.
    377    // Also helps with a child process that keeps the output streams open. If it
    378    // keeps sending data, we only accept as much data as the system buffer size.
    379    // Otherwise this would block cleanup/teardown.
    380    int system_buffer_size = 0;
    381    // All members of the stream->s.uv union share the same address.
    382    int err = uv_recv_buffer_size((uv_handle_t *)&stream->s.uv, &system_buffer_size);
    383    if (err != 0) {
    384      system_buffer_size = ARENA_BLOCK_SIZE;
    385    }
    386    max_bytes = stream->num_bytes + (size_t)system_buffer_size;
    387  }
    388 
    389  // Read remaining data.
    390  while (!stream->s.closed && stream->num_bytes < max_bytes) {
    391    // Remember number of bytes before polling.
    392    size_t num_bytes = stream->num_bytes;
    393 
    394    if (proc->type == kProcTypePty && !stream->did_eof) {
    395      pty_proc_flush_master((PtyProc *)proc);
    396    }
    397    // Poll for data and process the generated events.
    398    loop_poll_events(proc->loop, 0);
    399    if (stream->s.events) {
    400      multiqueue_process_events(stream->s.events);
    401    }
    402 
    403    // Stream can be closed if it is empty.
    404    if (num_bytes == stream->num_bytes) {
    405      if (stream->read_cb && !stream->did_eof) {
    406        // Stream callback could miss EOF handling if a child keeps the stream
    407        // open. But only send EOF if we haven't already.
    408        stream->read_cb(stream, stream->buffer, 0, stream->s.cb_data, true);
    409      }
    410      break;
    411    }
    412  }
    413 }
    414 
    415 static void proc_close_handles(void **argv)
    416 {
    417  Proc *proc = argv[0];
    418 
    419  exit_need_delay++;
    420  flush_stream(proc, &proc->out);
    421  flush_stream(proc, &proc->err);
    422 
    423  proc_close_streams(proc);
    424  proc_close(proc);
    425  exit_need_delay--;
    426 }
    427 
    428 static void exit_delay_cb(uv_timer_t *handle)
    429 {
    430  uv_timer_stop(&main_loop.exit_delay_timer);
    431  multiqueue_put(main_loop.fast_events, exit_event, main_loop.exit_delay_timer.data);
    432 }
    433 
    434 static void exit_event(void **argv)
    435 {
    436  int status = (int)(intptr_t)argv[0];
    437  if (exit_need_delay) {
    438    main_loop.exit_delay_timer.data = argv[0];
    439    uv_timer_start(&main_loop.exit_delay_timer, exit_delay_cb, 0, 0);
    440    return;
    441  }
    442 
    443  if (!exiting) {
    444    if (ui_client_channel_id) {
    445      ui_client_exit_status = status;
    446      os_exit(status);
    447    } else {
    448      assert(status == 0);  // Called from rpc_close(), which passes 0 as status.
    449      preserve_exit(NULL);
    450    }
    451  }
    452 }
    453 
    454 /// Performs self-exit because the primary RPC channel was closed.
    455 void exit_on_closed_chan(int status)
    456 {
    457  DLOG("self-exit triggered by closed RPC channel...");
    458  multiqueue_put(main_loop.fast_events, exit_event, (void *)(intptr_t)status);
    459 }
    460 
    461 static void on_proc_exit(Proc *proc)
    462 {
    463  Loop *loop = proc->loop;
    464  ILOG("child exited: pid=%d status=%d" PRIu64, proc->pid, proc->status);
    465 
    466  // TODO(justinmk): figure out why rpc_close sometimes(??) isn't called.
    467  // Theories:
    468  // - EOF not received in receive_msgpack, then doesn't call chan_close_on_err().
    469  // - proc_close_handles not tickled by ui_client.c's LOOP_PROCESS_EVENTS?
    470  if (ui_client_channel_id) {
    471    uint64_t server_chan_id = ui_client_channel_id;
    472    Channel *server_chan = find_channel(server_chan_id);
    473    if (server_chan != NULL && server_chan->streamtype == kChannelStreamProc
    474        && proc == &server_chan->stream.proc) {
    475      // Need to call ui_client_may_restart_server() here as well, as sometimes
    476      // rpc_close_event() hasn't been called yet (also see comments above).
    477      ui_client_may_restart_server();
    478      if (ui_client_channel_id == server_chan_id) {
    479        // If the current embedded server has exited and no new server is started,
    480        // the client should exit with the same status.
    481        exit_on_closed_chan(proc->status);
    482      }
    483    }
    484  }
    485 
    486  // Process has terminated, but there could still be data to be read from the
    487  // OS. We are still in the libuv loop, so we cannot call code that polls for
    488  // more data directly. Instead delay the reading after the libuv loop by
    489  // queueing proc_close_handles() as an event.
    490  MultiQueue *queue = proc->events ? proc->events : loop->events;
    491  CREATE_EVENT(queue, proc_close_handles, proc);
    492 }
    493 
    494 static void on_proc_stream_close(Stream *stream, void *data)
    495 {
    496  Proc *proc = data;
    497  decref(proc);
    498 }