tor

The Tor anonymity network
git clone https://git.dasho.dev/tor.git
Log | Files | Refs | README | LICENSE

channelpadding.c (30636B)


      1 /* Copyright (c) 2001 Matej Pfajfar.
      2 * Copyright (c) 2001-2004, Roger Dingledine.
      3 * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
      4 * Copyright (c) 2007-2025, The Tor Project, Inc. */
      5 /* See LICENSE for licensing information */
      6 
      7 /**
      8 * @file channelpadding.c
      9 * @brief Link-level padding code.
     10 **/
     11 
     12 /* CHANNEL_OBJECT_PRIVATE define needed for an O(1) implementation of
     13 * channelpadding_channel_to_channelinfo() */
     14 #define CHANNEL_OBJECT_PRIVATE
     15 
     16 #include "core/or/or.h"
     17 #include "core/or/channel.h"
     18 #include "core/or/channelpadding.h"
     19 #include "core/or/channeltls.h"
     20 #include "app/config/config.h"
     21 #include "feature/nodelist/networkstatus.h"
     22 #include "core/mainloop/connection.h"
     23 #include "core/or/connection_or.h"
     24 #include "lib/crypt_ops/crypto_rand.h"
     25 #include "core/mainloop/mainloop.h"
     26 #include "feature/stats/rephist.h"
     27 #include "feature/relay/router.h"
     28 #include "feature/relay/routermode.h"
     29 #include "lib/time/compat_time.h"
     30 #include "lib/evloop/timers.h"
     31 #include "feature/hs/hs_service.h"
     32 
     33 #include "core/or/cell_st.h"
     34 #include "core/or/or_connection_st.h"
     35 
     36 STATIC int32_t channelpadding_get_netflow_inactive_timeout_ms(
     37                                                           const channel_t *);
     38 STATIC int channelpadding_send_disable_command(channel_t *);
     39 STATIC int64_t channelpadding_compute_time_until_pad_for_netflow(channel_t *);
     40 
     41 /** Total channel padding delay of delays that exceeded the allowed time
     42 *  window since last heartbeat or, if no heartbeat yet, since startup */
     43 static uint64_t channel_padding_delayed_ms = 0;
     44 /** Amount of delays that exceeded the allowed time window since
     45 *  last heartbeat or, if no heartbeat yet, since startup */
     46 static uint64_t channel_padding_delayed_count = 0;
     47 
     48 /** The total number of pending channelpadding timers */
     49 static uint64_t total_timers_pending;
     50 
     51 /** These are cached consensus parameters for netflow */
     52 /** The timeout lower bound that is allowed before sending padding */
     53 static int consensus_nf_ito_low;
     54 /** The timeout upper bound that is allowed before sending padding */
     55 static int consensus_nf_ito_high;
     56 /** The timeout lower bound that is allowed before sending reduced padding */
     57 static int consensus_nf_ito_low_reduced;
     58 /** The timeout upper bound that is allowed before sending reduced padding */
     59 static int consensus_nf_ito_high_reduced;
     60 /** The connection timeout between relays */
     61 static int consensus_nf_conntimeout_relays;
     62 /** The connection timeout for client connections */
     63 static int consensus_nf_conntimeout_clients;
     64 /** Should we pad before circuits are actually used for client data? */
     65 static int consensus_nf_pad_before_usage;
     66 /** Should we pad relay-to-relay connections? */
     67 static int consensus_nf_pad_relays;
     68 /** Should we pad rosos connections? */
     69 static int consensus_nf_pad_single_onion;
     70 
     71 #define TOR_MSEC_PER_SEC 1000
     72 #define TOR_USEC_PER_MSEC 1000
     73 
     74 /**
     75 * How often do we get called by the connection housekeeping (ie: once
     76 * per second) */
     77 #define TOR_HOUSEKEEPING_CALLBACK_MSEC 1000
     78 /**
     79 * Additional extra time buffer on the housekeeping callback, since
     80 * it can be delayed. This extra slack is used to decide if we should
     81 * schedule a timer or wait for the next callback. */
     82 #define TOR_HOUSEKEEPING_CALLBACK_SLACK_MSEC 100
     83 
     84 /**
     85 * This macro tells us if either end of the channel is connected to a client.
     86 * (If we're not a server, we're definitely a client. If the channel thinks
     87 *  it's a client, use that. Then finally verify in the consensus).
     88 */
     89 #define CHANNEL_IS_CLIENT(chan, options) \
     90  (!public_server_mode((options)) || channel_is_client(chan) || \
     91      !connection_or_digest_is_known_relay((chan)->identity_digest))
     92 
     93 /**
     94 * This function is called to update cached consensus parameters every time
     95 * there is a consensus update. This allows us to move the consensus param
     96 * search off of the critical path, so it does not need to be evaluated
     97 * for every single connection, every second.
     98 */
     99 void
    100 channelpadding_new_consensus_params(const networkstatus_t *ns)
    101 {
    102 #define DFLT_NETFLOW_INACTIVE_KEEPALIVE_LOW 1500
    103 #define DFLT_NETFLOW_INACTIVE_KEEPALIVE_HIGH 9500
    104 #define DFLT_NETFLOW_INACTIVE_KEEPALIVE_MIN 0
    105 #define DFLT_NETFLOW_INACTIVE_KEEPALIVE_MAX 60000
    106  consensus_nf_ito_low = networkstatus_get_param(ns, "nf_ito_low",
    107      DFLT_NETFLOW_INACTIVE_KEEPALIVE_LOW,
    108      DFLT_NETFLOW_INACTIVE_KEEPALIVE_MIN,
    109      DFLT_NETFLOW_INACTIVE_KEEPALIVE_MAX);
    110  consensus_nf_ito_high = networkstatus_get_param(ns, "nf_ito_high",
    111      DFLT_NETFLOW_INACTIVE_KEEPALIVE_HIGH,
    112      consensus_nf_ito_low,
    113      DFLT_NETFLOW_INACTIVE_KEEPALIVE_MAX);
    114 
    115 #define DFLT_NETFLOW_REDUCED_KEEPALIVE_LOW 9000
    116 #define DFLT_NETFLOW_REDUCED_KEEPALIVE_HIGH 14000
    117 #define DFLT_NETFLOW_REDUCED_KEEPALIVE_MIN 0
    118 #define DFLT_NETFLOW_REDUCED_KEEPALIVE_MAX 60000
    119  consensus_nf_ito_low_reduced =
    120    networkstatus_get_param(ns, "nf_ito_low_reduced",
    121        DFLT_NETFLOW_REDUCED_KEEPALIVE_LOW,
    122        DFLT_NETFLOW_REDUCED_KEEPALIVE_MIN,
    123        DFLT_NETFLOW_REDUCED_KEEPALIVE_MAX);
    124 
    125  consensus_nf_ito_high_reduced =
    126    networkstatus_get_param(ns, "nf_ito_high_reduced",
    127        DFLT_NETFLOW_REDUCED_KEEPALIVE_HIGH,
    128        consensus_nf_ito_low_reduced,
    129        DFLT_NETFLOW_REDUCED_KEEPALIVE_MAX);
    130 
    131 #define CONNTIMEOUT_RELAYS_DFLT (60*60) // 1 hour
    132 #define CONNTIMEOUT_RELAYS_MIN 60
    133 #define CONNTIMEOUT_RELAYS_MAX (7*24*60*60) // 1 week
    134  consensus_nf_conntimeout_relays =
    135    networkstatus_get_param(ns, "nf_conntimeout_relays",
    136        CONNTIMEOUT_RELAYS_DFLT,
    137        CONNTIMEOUT_RELAYS_MIN,
    138        CONNTIMEOUT_RELAYS_MAX);
    139 
    140 #define CIRCTIMEOUT_CLIENTS_DFLT (30*60) // 30 minutes
    141 #define CIRCTIMEOUT_CLIENTS_MIN 60
    142 #define CIRCTIMEOUT_CLIENTS_MAX (24*60*60) // 24 hours
    143  consensus_nf_conntimeout_clients =
    144    networkstatus_get_param(ns, "nf_conntimeout_clients",
    145        CIRCTIMEOUT_CLIENTS_DFLT,
    146        CIRCTIMEOUT_CLIENTS_MIN,
    147        CIRCTIMEOUT_CLIENTS_MAX);
    148 
    149  consensus_nf_pad_before_usage =
    150    networkstatus_get_param(ns, "nf_pad_before_usage", 1, 0, 1);
    151 
    152  consensus_nf_pad_relays =
    153    networkstatus_get_param(ns, "nf_pad_relays", 0, 0, 1);
    154 
    155  consensus_nf_pad_single_onion =
    156    networkstatus_get_param(ns,
    157                            CHANNELPADDING_SOS_PARAM,
    158                            CHANNELPADDING_SOS_DEFAULT, 0, 1);
    159 }
    160 
    161 /**
    162 * Get a random netflow inactive timeout keepalive period in milliseconds,
    163 * the range for which is determined by consensus parameters, negotiation,
    164 * configuration, or default values. The consensus parameters enforce the
    165 * minimum possible value, to avoid excessively frequent padding.
    166 *
    167 * The ranges for this value were chosen to be low enough to ensure that
    168 * routers do not emit a new netflow record for a connection due to it
    169 * being idle.
    170 *
    171 * Specific timeout values for major routers are listed in Proposal 251.
    172 * No major router appeared capable of setting an inactive timeout below 10
    173 * seconds, so we set the defaults below that value, since we can always
    174 * scale back if it ends up being too much padding.
    175 *
    176 * Returns the next timeout period (in milliseconds) after which we should
    177 * send a padding packet, or 0 if padding is disabled.
    178 */
    179 STATIC int32_t
    180 channelpadding_get_netflow_inactive_timeout_ms(const channel_t *chan)
    181 {
    182  int low_timeout = consensus_nf_ito_low;
    183  int high_timeout = consensus_nf_ito_high;
    184  int X1, X2;
    185 
    186  if (low_timeout == 0 && low_timeout == high_timeout)
    187    return 0; // No padding
    188 
    189  /* If we have negotiated different timeout values, use those, but
    190   * don't allow them to be lower than the consensus ones */
    191  if (chan->padding_timeout_low_ms && chan->padding_timeout_high_ms) {
    192    low_timeout = MAX(low_timeout, chan->padding_timeout_low_ms);
    193    high_timeout = MAX(high_timeout, chan->padding_timeout_high_ms);
    194  }
    195 
    196  if (low_timeout >= high_timeout)
    197    return low_timeout; // No randomization
    198 
    199  /*
    200   * This MAX() hack is here because we apply the timeout on both the client
    201   * and the server. This creates the situation where the total time before
    202   * sending a packet in either direction is actually
    203   * min(client_timeout,server_timeout).
    204   *
    205   * If X is a random variable uniform from 0..R-1 (where R=high-low),
    206   * then Y=max(X,X) has Prob(Y == i) = (2.0*i + 1)/(R*R).
    207   *
    208   * If we create a third random variable Z=min(Y,Y), then it turns out that
    209   * Exp[Z] ~= Exp[X]. Here's a table:
    210   *
    211   *    R     Exp[X]    Exp[Z]    Exp[min(X,X)]   Exp[max(X,X)]
    212   *  2000     999.5    1066        666.2           1332.8
    213   *  3000    1499.5    1599.5      999.5           1999.5
    214   *  5000    2499.5    2666       1666.2           3332.8
    215   *  6000    2999.5    3199.5     1999.5           3999.5
    216   *  7000    3499.5    3732.8     2332.8           4666.2
    217   *  8000    3999.5    4266.2     2666.2           5332.8
    218   *  10000   4999.5    5328       3332.8           6666.2
    219   *  15000   7499.5    7995       4999.5           9999.5
    220   *  20000   9900.5    10661      6666.2           13332.8
    221   *
    222   * In other words, this hack makes it so that when both the client and
    223   * the guard are sending this padding, then the averages work out closer
    224   * to the midpoint of the range, making the overhead easier to tune.
    225   * If only one endpoint is padding (for example: if the relay does not
    226   * support padding, but the client has set ConnectionPadding 1; or
    227   * if the relay does support padding, but the client has set
    228   * ReducedConnectionPadding 1), then the defense will still prevent
    229   * record splitting, but with less overhead than the midpoint
    230   * (as seen by the Exp[max(X,X)] column).
    231   *
    232   * To calculate average padding packet frequency (and thus overhead),
    233   * index into the table by picking a row based on R = high-low. Then,
    234   * use the appropriate column (Exp[Z] for two-sided padding, and
    235   * Exp[max(X,X)] for one-sided padding). Finally, take this value
    236   * and add it to the low timeout value. This value is the average
    237   * frequency which padding packets will be sent.
    238   */
    239 
    240  X1 = crypto_rand_int(high_timeout - low_timeout);
    241  X2 = crypto_rand_int(high_timeout - low_timeout);
    242  return low_timeout + MAX(X1, X2);
    243 }
    244 
    245 /**
    246 * Update this channel's padding settings based on the PADDING_NEGOTIATE
    247 * contents.
    248 *
    249 * Returns -1 on error; 1 on success.
    250 */
    251 int
    252 channelpadding_update_padding_for_channel(channel_t *chan,
    253                const channelpadding_negotiate_t *pad_vars)
    254 {
    255  if (pad_vars->version != 0) {
    256    static ratelim_t version_limit = RATELIM_INIT(600);
    257 
    258    log_fn_ratelim(&version_limit,LOG_PROTOCOL_WARN,LD_PROTOCOL,
    259           "Got a PADDING_NEGOTIATE cell with an unknown version. Ignoring.");
    260    return -1;
    261  }
    262 
    263  // We should not allow malicious relays to disable or reduce padding for
    264  // us as clients. In fact, we should only accept this cell at all if we're
    265  // operating as a relay. Bridges should not accept it from relays, either
    266  // (only from their clients).
    267  if ((get_options()->BridgeRelay &&
    268       connection_or_digest_is_known_relay(chan->identity_digest)) ||
    269      !get_options()->ORPort_set) {
    270    static ratelim_t relay_limit = RATELIM_INIT(600);
    271 
    272    log_fn_ratelim(&relay_limit,LOG_PROTOCOL_WARN,LD_PROTOCOL,
    273           "Got a PADDING_NEGOTIATE from relay at %s (%s). "
    274           "This should not happen.",
    275           channel_describe_peer(chan),
    276           hex_str(chan->identity_digest, DIGEST_LEN));
    277    return -1;
    278  }
    279 
    280  chan->padding_enabled = (pad_vars->command == CHANNELPADDING_COMMAND_START);
    281 
    282  /* Min must not be lower than the current consensus parameter
    283     nf_ito_low. */
    284  chan->padding_timeout_low_ms = MAX(consensus_nf_ito_low,
    285                                     pad_vars->ito_low_ms);
    286 
    287  /* Max must not be lower than ito_low_ms */
    288  chan->padding_timeout_high_ms = MAX(chan->padding_timeout_low_ms,
    289                                      pad_vars->ito_high_ms);
    290 
    291  log_fn(LOG_INFO,LD_OR,
    292         "Negotiated padding=%d, lo=%d, hi=%d on %"PRIu64,
    293         chan->padding_enabled, chan->padding_timeout_low_ms,
    294         chan->padding_timeout_high_ms,
    295         (chan->global_identifier));
    296 
    297  return 1;
    298 }
    299 
    300 /**
    301 * Sends a CELL_PADDING_NEGOTIATE on the channel to tell the other side not
    302 * to send padding.
    303 *
    304 * Returns -1 on error, 0 on success.
    305 */
    306 STATIC int
    307 channelpadding_send_disable_command(channel_t *chan)
    308 {
    309  channelpadding_negotiate_t disable;
    310  cell_t cell;
    311 
    312  tor_assert(chan);
    313  tor_assert(BASE_CHAN_TO_TLS(chan)->conn->link_proto >=
    314             MIN_LINK_PROTO_FOR_CHANNEL_PADDING);
    315 
    316  memset(&cell, 0, sizeof(cell_t));
    317  memset(&disable, 0, sizeof(channelpadding_negotiate_t));
    318  cell.command = CELL_PADDING_NEGOTIATE;
    319 
    320  channelpadding_negotiate_set_command(&disable, CHANNELPADDING_COMMAND_STOP);
    321 
    322  if (channelpadding_negotiate_encode(cell.payload, CELL_PAYLOAD_SIZE,
    323                                      &disable) < 0)
    324    return -1;
    325 
    326  if (chan->write_cell(chan, &cell) == 1)
    327    return 0;
    328  else
    329    return -1;
    330 }
    331 
    332 /**
    333 * Sends a CELL_PADDING_NEGOTIATE on the channel to tell the other side to
    334 * resume sending padding at some rate.
    335 *
    336 * Returns -1 on error, 0 on success.
    337 */
    338 int
    339 channelpadding_send_enable_command(channel_t *chan, uint16_t low_timeout,
    340                                   uint16_t high_timeout)
    341 {
    342  channelpadding_negotiate_t enable;
    343  cell_t cell;
    344 
    345  tor_assert(chan);
    346  tor_assert(BASE_CHAN_TO_TLS(chan)->conn->link_proto >=
    347             MIN_LINK_PROTO_FOR_CHANNEL_PADDING);
    348 
    349  memset(&cell, 0, sizeof(cell_t));
    350  memset(&enable, 0, sizeof(channelpadding_negotiate_t));
    351  cell.command = CELL_PADDING_NEGOTIATE;
    352 
    353  channelpadding_negotiate_set_command(&enable, CHANNELPADDING_COMMAND_START);
    354  channelpadding_negotiate_set_ito_low_ms(&enable, low_timeout);
    355  channelpadding_negotiate_set_ito_high_ms(&enable, high_timeout);
    356 
    357  if (channelpadding_negotiate_encode(cell.payload, CELL_PAYLOAD_SIZE,
    358                                      &enable) < 0)
    359    return -1;
    360 
    361  if (chan->write_cell(chan, &cell) == 1)
    362    return 0;
    363  else
    364    return -1;
    365 }
    366 
    367 /**
    368 * Sends a CELL_PADDING cell on a channel if it has been idle since
    369 * our callback was scheduled.
    370 *
    371 * This function also clears the pending padding timer and the callback
    372 * flags.
    373 */
    374 static void
    375 channelpadding_send_padding_cell_for_callback(channel_t *chan)
    376 {
    377  cell_t cell;
    378 
    379  /* Check that the channel is still valid and open */
    380  if (!chan || chan->state != CHANNEL_STATE_OPEN) {
    381    if (chan) chan->pending_padding_callback = 0;
    382    log_fn(LOG_INFO,LD_OR,
    383           "Scheduled a netflow padding cell, but connection already closed.");
    384    return;
    385  }
    386 
    387  /* We should have a pending callback flag set. */
    388  if (BUG(chan->pending_padding_callback == 0))
    389    return;
    390 
    391  chan->pending_padding_callback = 0;
    392 
    393  if (monotime_coarse_is_zero(&chan->next_padding_time) ||
    394      chan->has_queued_writes(chan) ||
    395      (chan->cmux && circuitmux_num_cells(chan->cmux))) {
    396    /* We must have been active before the timer fired */
    397    monotime_coarse_zero(&chan->next_padding_time);
    398    return;
    399  }
    400 
    401  {
    402    monotime_coarse_t now;
    403    monotime_coarse_get(&now);
    404 
    405    log_fn(LOG_INFO,LD_OR,
    406        "Sending netflow keepalive on %"PRIu64" to %s (%s) after "
    407        "%"PRId64" ms. Delta %"PRId64"ms",
    408        (chan->global_identifier),
    409        safe_str_client(channel_describe_peer(chan)),
    410        safe_str_client(hex_str(chan->identity_digest, DIGEST_LEN)),
    411        (monotime_coarse_diff_msec(&chan->timestamp_xfer,&now)),
    412        (
    413                   monotime_coarse_diff_msec(&chan->next_padding_time,&now)));
    414  }
    415 
    416  /* Clear the timer */
    417  monotime_coarse_zero(&chan->next_padding_time);
    418 
    419  /* Send the padding cell. This will cause the channel to get a
    420   * fresh timestamp_active */
    421  memset(&cell, 0, sizeof(cell));
    422  cell.command = CELL_PADDING;
    423  chan->write_cell(chan, &cell);
    424 }
    425 
    426 /**
    427 * tor_timer callback function for us to send padding on an idle channel.
    428 *
    429 * This function just obtains the channel from the callback handle, ensures
    430 * it is still valid, and then hands it off to
    431 * channelpadding_send_padding_cell_for_callback(), which checks if
    432 * the channel is still idle before sending padding.
    433 */
    434 static void
    435 channelpadding_send_padding_callback(tor_timer_t *timer, void *args,
    436                                     const struct monotime_t *when)
    437 {
    438  channel_t *chan = channel_handle_get((struct channel_handle_t*)args);
    439  (void)timer; (void)when;
    440 
    441  if (chan && CHANNEL_CAN_HANDLE_CELLS(chan)) {
    442    /* Hrmm.. It might be nice to have an equivalent to assert_connection_ok
    443     * for channels. Then we could get rid of the channeltls dependency */
    444    tor_assert(TO_CONN(BASE_CHAN_TO_TLS(chan)->conn)->magic ==
    445               OR_CONNECTION_MAGIC);
    446    assert_connection_ok(TO_CONN(BASE_CHAN_TO_TLS(chan)->conn), approx_time());
    447 
    448    channelpadding_send_padding_cell_for_callback(chan);
    449  } else {
    450     log_fn(LOG_INFO,LD_OR,
    451            "Channel closed while waiting for timer.");
    452  }
    453 
    454  total_timers_pending--;
    455 }
    456 
    457 /**
    458 * Schedules a callback to send padding on a channel in_ms milliseconds from
    459 * now.
    460 *
    461 * Returns CHANNELPADDING_WONTPAD on error, CHANNELPADDING_PADDING_SENT if we
    462 * sent the packet immediately without a timer, and
    463 * CHANNELPADDING_PADDING_SCHEDULED if we decided to schedule a timer.
    464 */
    465 static channelpadding_decision_t
    466 channelpadding_schedule_padding(channel_t *chan, int in_ms)
    467 {
    468  struct timeval timeout;
    469  tor_assert(!chan->pending_padding_callback);
    470 
    471  if (in_ms <= 0) {
    472    chan->pending_padding_callback = 1;
    473    channelpadding_send_padding_cell_for_callback(chan);
    474    return CHANNELPADDING_PADDING_SENT;
    475  }
    476 
    477  timeout.tv_sec = in_ms/TOR_MSEC_PER_SEC;
    478  timeout.tv_usec = (in_ms%TOR_USEC_PER_MSEC)*TOR_USEC_PER_MSEC;
    479 
    480  if (!chan->timer_handle) {
    481    chan->timer_handle = channel_handle_new(chan);
    482  }
    483 
    484  if (chan->padding_timer) {
    485    timer_set_cb(chan->padding_timer,
    486                 channelpadding_send_padding_callback,
    487                 chan->timer_handle);
    488  } else {
    489    chan->padding_timer = timer_new(channelpadding_send_padding_callback,
    490                                    chan->timer_handle);
    491  }
    492  timer_schedule(chan->padding_timer, &timeout);
    493 
    494  rep_hist_padding_count_timers(++total_timers_pending);
    495 
    496  chan->pending_padding_callback = 1;
    497  return CHANNELPADDING_PADDING_SCHEDULED;
    498 }
    499 
    500 /**
    501 * Calculates the number of milliseconds from now to schedule a padding cell.
    502 *
    503 * Returns the number of milliseconds from now (relative) to schedule the
    504 * padding callback. If the padding timer is more than 1.1 seconds in the
    505 * future, we return -1, to avoid scheduling excessive callbacks. If padding
    506 * is disabled in the consensus, we return -2.
    507 *
    508 * Side-effects: Updates chan->next_padding_time_ms, storing an (absolute, not
    509 * relative) millisecond representation of when we should send padding, unless
    510 * other activity happens first. This side-effect allows us to avoid
    511 * scheduling a libevent callback until we're within 1.1 seconds of the padding
    512 * time.
    513 */
    514 #define CHANNELPADDING_TIME_LATER -1
    515 #define CHANNELPADDING_TIME_DISABLED -2
    516 STATIC int64_t
    517 channelpadding_compute_time_until_pad_for_netflow(channel_t *chan)
    518 {
    519  monotime_coarse_t now;
    520  monotime_coarse_get(&now);
    521 
    522  if (monotime_coarse_is_zero(&chan->next_padding_time)) {
    523    /* If the below line or crypto_rand_int() shows up on a profile,
    524     * we can avoid getting a timeout until we're at least nf_ito_lo
    525     * from a timeout window. That will prevent us from setting timers
    526     * on connections that were active up to 1.5 seconds ago.
    527     * Idle connections should only call this once every 5.5s on average
    528     * though, so that might be a micro-optimization for little gain. */
    529    int32_t padding_timeout =
    530        channelpadding_get_netflow_inactive_timeout_ms(chan);
    531 
    532    if (!padding_timeout)
    533      return CHANNELPADDING_TIME_DISABLED;
    534 
    535    monotime_coarse_add_msec(&chan->next_padding_time,
    536                             &chan->timestamp_xfer,
    537                             padding_timeout);
    538  }
    539 
    540  const int64_t ms_till_pad =
    541    monotime_coarse_diff_msec(&now, &chan->next_padding_time);
    542 
    543  /* If the next padding time is beyond the maximum possible consensus value,
    544   * then this indicates a clock jump, so just send padding now. This is
    545   * better than using monotonic time because we want to avoid the situation
    546   * where we wait around forever for monotonic time to move forward after
    547   * a clock jump far into the past.
    548   */
    549  if (ms_till_pad > DFLT_NETFLOW_INACTIVE_KEEPALIVE_MAX) {
    550    tor_fragile_assert();
    551    log_warn(LD_BUG,
    552        "Channel padding timeout scheduled %"PRId64"ms in the future. "
    553        "Did the monotonic clock just jump?",
    554        (ms_till_pad));
    555    return 0; /* Clock jumped: Send padding now */
    556  }
    557 
    558  /* If the timeout will expire before the next time we're called (1000ms
    559     from now, plus some slack), then calculate the number of milliseconds
    560     from now which we should send padding, so we can schedule a callback
    561     then.
    562   */
    563  if (ms_till_pad < (TOR_HOUSEKEEPING_CALLBACK_MSEC +
    564                       TOR_HOUSEKEEPING_CALLBACK_SLACK_MSEC)) {
    565    /* If the padding time is in the past, that means that libevent delayed
    566     * calling the once-per-second callback due to other work taking too long.
    567     * See https://bugs.torproject.org/22212 and
    568     * https://bugs.torproject.org/16585. This is a systemic problem
    569     * with being single-threaded, but let's emit a notice if this
    570     * is long enough in the past that we might have missed a netflow window,
    571     * and allowed a router to emit a netflow frame, just so we don't forget
    572     * about it entirely.. */
    573 #define NETFLOW_MISSED_WINDOW (150000 - DFLT_NETFLOW_INACTIVE_KEEPALIVE_HIGH)
    574    if (ms_till_pad < 0) {
    575      if (ms_till_pad < -NETFLOW_MISSED_WINDOW) {
    576        log_info(LD_OR,
    577                "Channel padding delay of %"PRIu64"ms occurred in the past "
    578                "that exceeded the allowed time window.",
    579               (-ms_till_pad));
    580        channel_padding_delayed_ms -= ms_till_pad;
    581        channel_padding_delayed_count += 1;
    582      }
    583      return 0; /* Clock jumped: Send padding now */
    584    }
    585 
    586    return ms_till_pad;
    587  }
    588  return CHANNELPADDING_TIME_LATER;
    589 }
    590 
    591 /**
    592 * Returns a randomized value for channel idle timeout in seconds.
    593 * The channel idle timeout governs how quickly we close a channel
    594 * after its last circuit has disappeared.
    595 *
    596 * There are three classes of channels:
    597 *  1. Client+non-canonical. These live for 3-4.5 minutes
    598 *  2. relay to relay. These live for 45-75 min by default
    599 *  3. Reduced padding clients. These live for 1.5-2.25 minutes.
    600 *
    601 * Also allows the default relay-to-relay value to be controlled by the
    602 * consensus.
    603 */
    604 unsigned int
    605 channelpadding_get_channel_idle_timeout(const channel_t *chan,
    606                                        int is_canonical)
    607 {
    608  const or_options_t *options = get_options();
    609  unsigned int timeout;
    610 
    611  /* Non-canonical and client channels only last for 3-4.5 min when idle */
    612  if (!is_canonical || CHANNEL_IS_CLIENT(chan, options)) {
    613 #define CONNTIMEOUT_CLIENTS_BASE 180 // 3 to 4.5 min
    614    timeout = CONNTIMEOUT_CLIENTS_BASE
    615        + crypto_rand_int(CONNTIMEOUT_CLIENTS_BASE/2);
    616  } else { // Canonical relay-to-relay channels
    617    // 45..75min or consensus +/- 25%
    618    timeout = consensus_nf_conntimeout_relays;
    619    timeout = 3*timeout/4 + crypto_rand_int(timeout/2);
    620  }
    621 
    622  /* If ReducedConnectionPadding is set, we want to halve the duration of
    623   * the channel idle timeout, since reducing the additional time that
    624   * a channel stays open will reduce the total overhead for making
    625   * new channels. This reduction in overhead/channel expense
    626   * is important for mobile users. The option cannot be set by relays.
    627   *
    628   * We also don't reduce any values for timeout that the user explicitly
    629   * set.
    630   */
    631  if (options->ReducedConnectionPadding
    632      && !options->CircuitsAvailableTimeout) {
    633    timeout /= 2;
    634  }
    635 
    636  return timeout;
    637 }
    638 
    639 /**
    640 * This function controls how long we keep idle circuits open,
    641 * and how long we build predicted circuits. This behavior is under
    642 * the control of channelpadding because circuit availability is the
    643 * dominant factor in channel lifespan, which influences total padding
    644 * overhead.
    645 *
    646 * Returns a randomized number of seconds in a range from
    647 * CircuitsAvailableTimeout to 2*CircuitsAvailableTimeout. This value is halved
    648 * if ReducedConnectionPadding is set. The default value of
    649 * CircuitsAvailableTimeout can be controlled by the consensus.
    650 */
    651 int
    652 channelpadding_get_circuits_available_timeout(void)
    653 {
    654  const or_options_t *options = get_options();
    655  int timeout = options->CircuitsAvailableTimeout;
    656 
    657  if (!timeout) {
    658    timeout = consensus_nf_conntimeout_clients;
    659 
    660    /* If ReducedConnectionPadding is set, we want to halve the duration of
    661     * the channel idle timeout, since reducing the additional time that
    662     * a channel stays open will reduce the total overhead for making
    663     * new connections. This reduction in overhead/connection expense
    664     * is important for mobile users. The option cannot be set by relays.
    665     *
    666     * We also don't reduce any values for timeout that the user explicitly
    667     * set.
    668     */
    669    if (options->ReducedConnectionPadding) {
    670      // half the value to 15..30min by default
    671      timeout /= 2;
    672    }
    673  }
    674 
    675  // 30..60min by default
    676  timeout = timeout + crypto_rand_int(timeout);
    677 
    678  tor_assert(timeout >= 0);
    679 
    680  return timeout;
    681 }
    682 
    683 /**
    684 * Calling this function on a channel causes it to tell the other side
    685 * not to send padding, and disables sending padding from this side as well.
    686 */
    687 void
    688 channelpadding_disable_padding_on_channel(channel_t *chan)
    689 {
    690  chan->padding_enabled = 0;
    691 
    692  // Send cell to disable padding on the other end
    693  channelpadding_send_disable_command(chan);
    694 }
    695 
    696 /**
    697 * Calling this function on a channel causes it to tell the other side
    698 * not to send padding, and reduces the rate that padding is sent from
    699 * this side.
    700 */
    701 void
    702 channelpadding_reduce_padding_on_channel(channel_t *chan)
    703 {
    704  /* Padding can be forced and reduced by clients, regardless of if
    705   * the channel supports it. So we check for support here before
    706   * sending any commands. */
    707  if (chan->padding_enabled) {
    708    channelpadding_send_disable_command(chan);
    709  }
    710 
    711  chan->padding_timeout_low_ms = consensus_nf_ito_low_reduced;
    712  chan->padding_timeout_high_ms = consensus_nf_ito_high_reduced;
    713 
    714  log_fn(LOG_INFO,LD_OR,
    715         "Reduced padding on channel %"PRIu64": lo=%d, hi=%d",
    716         (chan->global_identifier),
    717         chan->padding_timeout_low_ms, chan->padding_timeout_high_ms);
    718 }
    719 
    720 /**
    721 * This function is called once per second by run_connection_housekeeping(),
    722 * but only if the channel is still open, valid, and non-wedged.
    723 *
    724 * It decides if and when we should send a padding cell, and if needed,
    725 * schedules a callback to send that cell at the appropriate time.
    726 *
    727 * Returns an enum that represents the current padding decision state.
    728 * Return value is currently used only by unit tests.
    729 */
    730 channelpadding_decision_t
    731 channelpadding_decide_to_pad_channel(channel_t *chan)
    732 {
    733  const or_options_t *options = get_options();
    734 
    735  /* Only pad open channels */
    736  if (chan->state != CHANNEL_STATE_OPEN)
    737    return CHANNELPADDING_WONTPAD;
    738 
    739  if (chan->channel_usage == CHANNEL_USED_FOR_FULL_CIRCS) {
    740    if (!consensus_nf_pad_before_usage)
    741      return CHANNELPADDING_WONTPAD;
    742  } else if (chan->channel_usage != CHANNEL_USED_FOR_USER_TRAFFIC) {
    743    return CHANNELPADDING_WONTPAD;
    744  }
    745 
    746  if (chan->pending_padding_callback)
    747    return CHANNELPADDING_PADDING_ALREADY_SCHEDULED;
    748 
    749  /* Don't pad the channel if we didn't negotiate it, but still
    750   * allow clients to force padding if options->ChannelPadding is
    751   * explicitly set to 1.
    752   */
    753  if (!chan->padding_enabled && options->ConnectionPadding != 1) {
    754    return CHANNELPADDING_WONTPAD;
    755  }
    756 
    757  if (hs_service_allow_non_anonymous_connection(options) &&
    758      !consensus_nf_pad_single_onion) {
    759    /* If the consensus just changed values, this channel may still
    760     * think padding is enabled. Negotiate it off. */
    761    if (chan->padding_enabled)
    762      channelpadding_disable_padding_on_channel(chan);
    763 
    764    return CHANNELPADDING_WONTPAD;
    765  }
    766 
    767  /* There should always be a cmux on the circuit. After that,
    768   * only schedule padding if there are no queued writes and no
    769   * queued cells in circuitmux queues. */
    770  if (chan->cmux && !chan->has_queued_writes(chan) &&
    771      !circuitmux_num_cells(chan->cmux)) {
    772    int is_client_channel = 0;
    773 
    774    if (CHANNEL_IS_CLIENT(chan, options)) {
    775       is_client_channel = 1;
    776    }
    777 
    778    /* If nf_pad_relays=1 is set in the consensus, we pad
    779     * on *all* idle connections, relay-relay or relay-client.
    780     * Otherwise pad only for client+bridge cons */
    781    if (is_client_channel || consensus_nf_pad_relays) {
    782      int64_t pad_time_ms =
    783          channelpadding_compute_time_until_pad_for_netflow(chan);
    784 
    785      if (pad_time_ms == CHANNELPADDING_TIME_DISABLED) {
    786        return CHANNELPADDING_WONTPAD;
    787      } else if (pad_time_ms == CHANNELPADDING_TIME_LATER) {
    788        chan->currently_padding = 1;
    789        return CHANNELPADDING_PADLATER;
    790      } else {
    791        if (BUG(pad_time_ms > INT_MAX)) {
    792          pad_time_ms = INT_MAX;
    793        }
    794       /* We have to schedule a callback because we're called exactly once per
    795        * second, but we don't want padding packets to go out exactly on an
    796        * integer multiple of seconds. This callback will only be scheduled
    797        * if we're within 1.1 seconds of the padding time.
    798        */
    799        chan->currently_padding = 1;
    800        return channelpadding_schedule_padding(chan, (int)pad_time_ms);
    801      }
    802    } else {
    803      chan->currently_padding = 0;
    804      return CHANNELPADDING_WONTPAD;
    805    }
    806  } else {
    807    return CHANNELPADDING_PADLATER;
    808  }
    809 }
    810 
    811 /* Log a heartbeat message with the average channel padding delay and
    812 * the number of occurred delays (that exceeded the allowed time window)
    813 * since the previous heartbeat or, if we didn't have a heartbeat yet,
    814 * since startup. */
    815 void
    816 channelpadding_log_heartbeat(void)
    817 {
    818  /* Whether we had a heartbeat since startup */
    819  static uint8_t heartbeat = 0;
    820 
    821  if (channel_padding_delayed_count > 0) {
    822    log_notice(LD_OR,
    823               "Average channel padding delay of delays that exceeded "
    824               "the allowed time window since %s: %"PRIu64"ms "
    825               "(Number of delays: %"PRIu64")",
    826               heartbeat ? "previous heartbeat" : "startup",
    827               (uint64_t)((double)channel_padding_delayed_ms /
    828                          channel_padding_delayed_count),
    829               channel_padding_delayed_count);
    830    channel_padding_delayed_count = 0;
    831    channel_padding_delayed_ms = 0;
    832  }
    833  heartbeat = 1;
    834 }