circuitpadding.c (110857B)
1 /* Copyright (c) 2017 The Tor Project, Inc. */ 2 /* See LICENSE for licensing information */ 3 4 /** 5 * \file circuitpadding.c 6 * \brief Circuit-level padding implementation 7 * 8 * \details 9 * 10 * This file implements Tor proposal 254 "Padding Negotiation" which is heavily 11 * inspired by the paper "Toward an Efficient Website Fingerprinting Defense" 12 * by M. Juarez, M. Imani, M. Perry, C. Diaz, M. Wright. 13 * 14 * In particular the code in this file describes mechanisms for clients to 15 * negotiate various types of circuit-level padding from relays. 16 * 17 * Each padding type is described by a state machine (circpad_machine_spec_t), 18 * which is also referred as a "padding machine" in this file. Currently, 19 * these state machines are hardcoded in the source code (e.g. see 20 * circpad_machines_init()), but in the future we will be able to 21 * serialize them in the torrc or the consensus. 22 * 23 * As specified by prop#254, clients can negotiate padding with relays by using 24 * PADDING_NEGOTIATE cells. After successful padding negotiation, padding 25 * machines are assigned to the circuit in their mutable form as a 26 * circpad_machine_runtime_t. 27 * 28 * Each state of a padding state machine can be either: 29 * - A histogram that specifies inter-arrival padding delays. 30 * - Or a parametrized probability distribution that specifies inter-arrival 31 * delays (see circpad_distribution_type_t). 32 * 33 * Padding machines start from the START state and finish with the END 34 * state. They can transition between states using the events in 35 * circpad_event_t. 36 * 37 * When a padding machine reaches the END state, it gets wiped from the circuit 38 * so that other padding machines can take over if needed (see 39 * circpad_machine_spec_transitioned_to_end()). 40 * 41 **************************** 42 * General notes: 43 * 44 * All used machines should be heap allocated and placed into 45 * origin_padding_machines/relay_padding_machines so that they get correctly 46 * cleaned up by the circpad_free_all() function. 47 **/ 48 49 #define CIRCUITPADDING_PRIVATE 50 51 #include <math.h> 52 #include "lib/math/fp.h" 53 #include "lib/math/prob_distr.h" 54 #include "core/or/or.h" 55 #include "core/or/circuitpadding.h" 56 #include "core/or/circuitpadding_machines.h" 57 #include "core/or/circuitlist.h" 58 #include "core/or/circuituse.h" 59 #include "core/mainloop/netstatus.h" 60 #include "core/or/relay.h" 61 #include "feature/stats/rephist.h" 62 #include "feature/nodelist/networkstatus.h" 63 64 #include "core/or/channel.h" 65 66 #include "lib/time/compat_time.h" 67 #include "lib/defs/time.h" 68 #include "lib/crypt_ops/crypto_rand.h" 69 70 #include "core/or/crypt_path_st.h" 71 #include "core/or/circuit_st.h" 72 #include "core/or/origin_circuit_st.h" 73 #include "core/or/or_circuit_st.h" 74 #include "feature/nodelist/routerstatus_st.h" 75 #include "feature/nodelist/node_st.h" 76 #include "core/or/cell_st.h" 77 #include "core/or/extend_info_st.h" 78 #include "core/crypto/relay_crypto.h" 79 #include "feature/nodelist/nodelist.h" 80 81 #include "src/core/or/conflux_util.h" 82 83 #include "app/config/config.h" 84 85 static inline circpad_circuit_state_t circpad_circuit_state( 86 origin_circuit_t *circ); 87 static void circpad_setup_machine_on_circ(circuit_t *on_circ, 88 const circpad_machine_spec_t *machine); 89 static double circpad_distribution_sample(circpad_distribution_t dist); 90 91 static inline void circpad_machine_update_state_length_for_nonpadding( 92 circpad_machine_runtime_t *mi); 93 94 /** Cached consensus params */ 95 static uint8_t circpad_padding_disabled; 96 static uint8_t circpad_padding_reduced; 97 static uint8_t circpad_global_max_padding_percent; 98 static uint16_t circpad_global_allowed_cells; 99 static uint16_t circpad_max_circ_queued_cells; 100 101 /** Global cell counts, for rate limiting */ 102 static uint64_t circpad_global_padding_sent; 103 static uint64_t circpad_global_nonpadding_sent; 104 105 /** This is the list of circpad_machine_spec_t's parsed from consensus and 106 * torrc that have origin_side == 1 (ie: are for client side). 107 * 108 * The machines in this smartlist are considered immutable and they are used 109 * as-is by circuits so they should not change or get deallocated in Tor's 110 * runtime and as long as circuits are alive. */ 111 STATIC smartlist_t *origin_padding_machines = NULL; 112 113 /** This is the list of circpad_machine_spec_t's parsed from consensus and 114 * torrc that have origin_side == 0 (ie: are for relay side). 115 * 116 * The machines in this smartlist are considered immutable and they are used 117 * as-is by circuits so they should not change or get deallocated in Tor's 118 * runtime and as long as circuits are alive. */ 119 STATIC smartlist_t *relay_padding_machines = NULL; 120 121 #ifndef COCCI 122 /** Loop over the current padding state machines using <b>loop_var</b> as the 123 * loop variable. */ 124 #define FOR_EACH_CIRCUIT_MACHINE_BEGIN(loop_var) \ 125 STMT_BEGIN \ 126 for (int loop_var = 0; loop_var < CIRCPAD_MAX_MACHINES; loop_var++) { 127 #define FOR_EACH_CIRCUIT_MACHINE_END } STMT_END ; 128 129 /** Loop over the current active padding state machines using <b>loop_var</b> 130 * as the loop variable. If a machine is not active, skip it. */ 131 #define FOR_EACH_ACTIVE_CIRCUIT_MACHINE_BEGIN(loop_var, circ) \ 132 FOR_EACH_CIRCUIT_MACHINE_BEGIN(loop_var) \ 133 if (!(circ)->padding_info[loop_var]) \ 134 continue; 135 #define FOR_EACH_ACTIVE_CIRCUIT_MACHINE_END } STMT_END ; 136 #endif /* !defined(COCCI) */ 137 138 /** 139 * Free the machineinfo at an index 140 */ 141 static void 142 circpad_circuit_machineinfo_free_idx(circuit_t *circ, int idx) 143 { 144 if (circ->padding_info[idx]) { 145 log_fn(LOG_INFO,LD_CIRC, "Freeing padding info idx %d on circuit %u (%d)", 146 idx, CIRCUIT_IS_ORIGIN(circ) ? 147 TO_ORIGIN_CIRCUIT(circ)->global_identifier : 0, 148 circ->purpose); 149 150 tor_free(circ->padding_info[idx]->histogram); 151 timer_free(circ->padding_info[idx]->padding_timer); 152 tor_free(circ->padding_info[idx]); 153 } 154 } 155 156 /** 157 * Return true if circpad has decided to hold the circuit open for additional 158 * padding. This function is used to take and retain ownership of certain 159 * types of circuits that have padding machines on them, that have been passed 160 * to circuit_mark_for_close(). 161 * 162 * circuit_mark_for_close() calls this function to ask circpad if any padding 163 * machines want to keep the circuit open longer to pad. 164 * 165 * Any non-measurement circuit that was closed for a normal, non-error reason 166 * code may be held open for up to CIRCPAD_DELAY_INFINITE microseconds between 167 * network-driven cell events. 168 * 169 * After CIRCPAD_DELAY_INFINITE microseconds of silence on a circuit, this 170 * function will no longer hold it open (it will return 0 regardless of 171 * what the machines ask for, and thus circuit_expire_old_circuits_clientside() 172 * will close the circuit after roughly 1.25hr of idle time, maximum, 173 * regardless of the padding machine state. 174 */ 175 int 176 circpad_marked_circuit_for_padding(circuit_t *circ, int reason) 177 { 178 /* If the circuit purpose is measurement or path bias, don't 179 * hold it open */ 180 if (circ->purpose == CIRCUIT_PURPOSE_PATH_BIAS_TESTING || 181 circ->purpose == CIRCUIT_PURPOSE_C_MEASURE_TIMEOUT) { 182 return 0; 183 } 184 185 /* If the circuit is closed for any reason other than these three valid, 186 * client-side close reasons, do not try to keep it open. It is probably 187 * damaged or unusable. Note this is OK with vanguards because 188 * controller-closed circuits have REASON=REQUESTED, so vanguards-closed 189 * circuits will not be held open (we want them to close ASAP). */ 190 if (!(reason == END_CIRC_REASON_NONE || 191 reason == END_CIRC_REASON_FINISHED || 192 reason == END_CIRC_REASON_IP_NOW_REDUNDANT)) { 193 return 0; 194 } 195 196 FOR_EACH_ACTIVE_CIRCUIT_MACHINE_BEGIN(i, circ) { 197 circpad_machine_runtime_t *mi = circ->padding_info[i]; 198 if (!mi) { 199 continue; // No padding runtime info; check next machine 200 } 201 202 const circpad_state_t *state = circpad_machine_current_state(mi); 203 204 /* If we're in END state (NULL here), then check next machine */ 205 if (!state) { 206 continue; // check next machine 207 } 208 209 /* If the machine does not want to control the circuit close itself, then 210 * check the next machine */ 211 if (!circ->padding_machine[i]->manage_circ_lifetime) { 212 continue; // check next machine 213 } 214 215 /* If the machine has reached the END state, we can close. Check next 216 * machine. */ 217 if (mi->current_state == CIRCPAD_STATE_END) { 218 continue; // check next machine 219 } 220 221 log_info(LD_CIRC, "Circuit %d is not marked for close because of a " 222 "pending padding machine in index %d.", 223 CIRCUIT_IS_ORIGIN(circ) ? 224 TO_ORIGIN_CIRCUIT(circ)->global_identifier : 0, i); 225 226 /* If the machine has had no network events at all within the 227 * last circpad_delay_t timespan, it's in some deadlock state. 228 * Tell circuit_mark_for_close() that we don't own it anymore. 229 * This will allow circuit_expire_old_circuits_clientside() to 230 * close it. 231 */ 232 if (circ->padding_info[i]->last_cell_time_sec + 233 (time_t)CIRCPAD_DELAY_MAX_SECS < approx_time()) { 234 log_notice(LD_BUG, "Circuit %d was not marked for close because of a " 235 "pending padding machine in index %d for over an hour. " 236 "Circuit is a %s", 237 CIRCUIT_IS_ORIGIN(circ) ? 238 TO_ORIGIN_CIRCUIT(circ)->global_identifier : 0, 239 i, circuit_purpose_to_string(circ->purpose)); 240 241 return 0; // abort timer reached; mark the circuit for close now 242 } 243 244 /* If we weren't marked dirty yet, let's pretend we're dirty now. 245 * ("Dirty" means that a circuit has been used for application traffic 246 * by Tor.. Dirty circuits have different expiry times, and are not 247 * considered in counts of built circuits, etc. By claiming that we're 248 * dirty, the rest of Tor will make decisions as if we were actually 249 * used by application data. 250 * 251 * This is most important for circuit_expire_old_circuits_clientside(), 252 * where we want that function to expire us after the padding machine 253 * has shut down, but using the MaxCircuitDirtiness timer instead of 254 * the idle circuit timer (again, we want this because we're not 255 * supposed to look idle to Guard nodes that can see our lifespan). */ 256 if (!circ->timestamp_dirty) { 257 circ->timestamp_dirty = approx_time(); 258 if (circ->conflux && CIRCUIT_IS_ORIGIN(circ)) 259 conflux_sync_circ_fields(circ->conflux, TO_ORIGIN_CIRCUIT(circ)); 260 } 261 262 /* Take ownership of the circuit */ 263 circuit_change_purpose(circ, CIRCUIT_PURPOSE_C_CIRCUIT_PADDING); 264 265 return 1; 266 } FOR_EACH_ACTIVE_CIRCUIT_MACHINE_END; 267 268 return 0; // No machine wanted to keep the circuit open; mark for close 269 } 270 271 /** 272 * Free all the machineinfos in <b>circ</b> that match <b>machine_num</b>. 273 * 274 * If machine_ctr is non-zero, also make sure it matches the padding_info's 275 * machine counter before freeing. 276 * 277 * Returns true if any machineinfos with that number were freed. 278 * False otherwise. */ 279 static int 280 free_circ_machineinfos_with_machine_num(circuit_t *circ, int machine_num, 281 uint32_t machine_ctr) 282 { 283 int found = 0; 284 FOR_EACH_CIRCUIT_MACHINE_BEGIN(i) { 285 if (circ->padding_machine[i] && 286 circ->padding_machine[i]->machine_num == machine_num) { 287 /* If machine_ctr is non-zero, make sure it matches too. This 288 * is to ensure that old STOP messages don't shutdown newer machines. */ 289 if (machine_ctr && circ->padding_info[i] && 290 circ->padding_info[i]->machine_ctr != machine_ctr) { 291 log_info(LD_CIRC, 292 "Padding shutdown for wrong (old?) machine ctr: %u vs %u", 293 machine_ctr, circ->padding_info[i]->machine_ctr); 294 } else { 295 circpad_circuit_machineinfo_free_idx(circ, i); 296 circ->padding_machine[i] = NULL; 297 found = 1; 298 } 299 } 300 } FOR_EACH_CIRCUIT_MACHINE_END; 301 302 return found; 303 } 304 305 /** 306 * Free all padding machines and mutable info associated with circuit 307 */ 308 void 309 circpad_circuit_free_all_machineinfos(circuit_t *circ) 310 { 311 FOR_EACH_CIRCUIT_MACHINE_BEGIN(i) { 312 circpad_circuit_machineinfo_free_idx(circ, i); 313 } FOR_EACH_CIRCUIT_MACHINE_END; 314 } 315 316 /** 317 * Allocate a new mutable machineinfo structure. 318 */ 319 STATIC circpad_machine_runtime_t * 320 circpad_circuit_machineinfo_new(circuit_t *on_circ, int machine_index) 321 { 322 circpad_machine_runtime_t *mi = 323 tor_malloc_zero(sizeof(circpad_machine_runtime_t)); 324 mi->machine_index = machine_index; 325 mi->on_circ = on_circ; 326 mi->last_cell_time_sec = approx_time(); 327 mi->machine_ctr = on_circ->padding_machine_ctr; 328 329 return mi; 330 } 331 332 /** 333 * Return the circpad_state_t for the current state based on the 334 * mutable info. 335 * 336 * This function returns NULL when the machine is in the end state or in an 337 * invalid state. 338 */ 339 STATIC const circpad_state_t * 340 circpad_machine_current_state(const circpad_machine_runtime_t *mi) 341 { 342 const circpad_machine_spec_t *machine = CIRCPAD_GET_MACHINE(mi); 343 344 if (mi->current_state == CIRCPAD_STATE_END) { 345 return NULL; 346 } else if (BUG(mi->current_state >= machine->num_states)) { 347 log_fn(LOG_WARN,LD_CIRC, 348 "Invalid circuit padding state %d", 349 mi->current_state); 350 351 return NULL; 352 } 353 354 return &machine->states[mi->current_state]; 355 } 356 357 /** 358 * Get the lower bound of a histogram bin. 359 * 360 * You can obtain the upper bound using histogram_get_bin_upper_bound(). 361 * 362 * This function can also be called with 'bin' set to a value equal or greater 363 * than histogram_len in which case the infinity bin is chosen and 364 * CIRCPAD_DELAY_INFINITE is returned. 365 */ 366 STATIC circpad_delay_t 367 circpad_histogram_bin_to_usec(const circpad_machine_runtime_t *mi, 368 circpad_hist_index_t bin) 369 { 370 const circpad_state_t *state = circpad_machine_current_state(mi); 371 circpad_delay_t rtt_add_usec = 0; 372 373 /* Our state should have been checked to be non-null by the caller 374 * (circpad_machine_remove_token()) */ 375 if (BUG(state == NULL)) { 376 return CIRCPAD_DELAY_INFINITE; 377 } 378 379 /* The infinity bin has an upper bound of infinity, so make sure we return 380 * that if they ask for it. */ 381 if (bin > CIRCPAD_INFINITY_BIN(state)) { 382 return CIRCPAD_DELAY_INFINITE; 383 } 384 385 /* If we are using an RTT estimate, consider it as well. */ 386 if (state->use_rtt_estimate) { 387 rtt_add_usec = mi->rtt_estimate_usec; 388 } 389 390 return state->histogram_edges[bin] + rtt_add_usec; 391 } 392 393 /** 394 * Like circpad_histogram_bin_to_usec() but return the upper bound of bin. 395 * (The upper bound is included in the bin.) 396 */ 397 STATIC circpad_delay_t 398 histogram_get_bin_upper_bound(const circpad_machine_runtime_t *mi, 399 circpad_hist_index_t bin) 400 { 401 return circpad_histogram_bin_to_usec(mi, bin+1) - 1; 402 } 403 404 /** Return the midpoint of the histogram bin <b>bin_index</b>. */ 405 static circpad_delay_t 406 circpad_get_histogram_bin_midpoint(const circpad_machine_runtime_t *mi, 407 int bin_index) 408 { 409 circpad_delay_t left_bound = circpad_histogram_bin_to_usec(mi, bin_index); 410 circpad_delay_t right_bound = histogram_get_bin_upper_bound(mi, bin_index); 411 412 return left_bound + (right_bound - left_bound)/2; 413 } 414 415 /** 416 * Return the bin that contains the usec argument. 417 * "Contains" is defined as us in [lower, upper). 418 * 419 * This function will never return the infinity bin (histogram_len-1), in order 420 * to simplify the rest of the code, so if a usec is provided that falls above 421 * the highest non-infinity bin, that bin index will be returned. 422 */ 423 STATIC circpad_hist_index_t 424 circpad_histogram_usec_to_bin(const circpad_machine_runtime_t *mi, 425 circpad_delay_t usec) 426 { 427 const circpad_state_t *state = circpad_machine_current_state(mi); 428 circpad_delay_t rtt_add_usec = 0; 429 circpad_hist_index_t bin; 430 431 /* Our state should have been checked to be non-null by the caller 432 * (circpad_machine_remove_token()) */ 433 if (BUG(state == NULL)) { 434 return 0; 435 } 436 437 /* If we are using an RTT estimate, consider it as well. */ 438 if (state->use_rtt_estimate) { 439 rtt_add_usec = mi->rtt_estimate_usec; 440 } 441 442 /* Walk through the bins and check the upper bound of each bin, if 'usec' is 443 * less-or-equal to that, return that bin. If rtt_estimate is enabled then 444 * add that to the upper bound of each bin. 445 * 446 * We don't want to return the infinity bin here, so don't go there. */ 447 for (bin = 0 ; bin < CIRCPAD_INFINITY_BIN(state) ; bin++) { 448 if (usec <= histogram_get_bin_upper_bound(mi, bin) + rtt_add_usec) { 449 return bin; 450 } 451 } 452 453 /* We don't want to return the infinity bin here, so if we still didn't find 454 * the right bin, return the highest non-infinity bin */ 455 return CIRCPAD_INFINITY_BIN(state)-1; 456 } 457 458 /** 459 * Return true if the machine supports token removal. 460 * 461 * Token removal is equivalent to having a mutable histogram in the 462 * circpad_machine_runtime_t mutable info. So while we're at it, 463 * let's assert that everything is consistent between the mutable 464 * runtime and the readonly machine spec. 465 */ 466 static inline int 467 circpad_is_token_removal_supported(circpad_machine_runtime_t *mi) 468 { 469 /* No runtime histogram == no token removal */ 470 if (mi->histogram == NULL) { 471 /* Machines that don't want token removal are trying to avoid 472 * potentially expensive mallocs, extra memory accesses, and/or 473 * potentially expensive monotime calls. Let's minimize checks 474 * and keep this path fast. */ 475 tor_assert_nonfatal(mi->histogram_len == 0); 476 return 0; 477 } else { 478 /* Machines that do want token removal are less sensitive to performance. 479 * Let's spend some time to check that our state is consistent and sane */ 480 const circpad_state_t *state = circpad_machine_current_state(mi); 481 if (BUG(!state)) { 482 return 1; 483 } 484 tor_assert_nonfatal(state->token_removal != CIRCPAD_TOKEN_REMOVAL_NONE); 485 tor_assert_nonfatal(state->histogram_len == mi->histogram_len); 486 tor_assert_nonfatal(mi->histogram_len != 0); 487 return 1; 488 } 489 490 tor_assert_nonfatal_unreached(); 491 return 0; 492 } 493 494 /** 495 * This function frees any token bins allocated from a previous state 496 * 497 * Called after a state transition, or if the bins are empty. 498 */ 499 STATIC void 500 circpad_machine_setup_tokens(circpad_machine_runtime_t *mi) 501 { 502 const circpad_state_t *state = circpad_machine_current_state(mi); 503 504 /* If this state doesn't exist, or doesn't have token removal, 505 * free any previous state's runtime histogram, and bail. 506 * 507 * If we don't have a token removal strategy, we also don't need a runtime 508 * histogram and we rely on the immutable one in machine_spec_t. */ 509 if (!state || state->token_removal == CIRCPAD_TOKEN_REMOVAL_NONE) { 510 if (mi->histogram) { 511 tor_free(mi->histogram); 512 mi->histogram = NULL; 513 mi->histogram_len = 0; 514 } 515 return; 516 } 517 518 /* Try to avoid re-mallocing if we don't really need to */ 519 if (!mi->histogram || (mi->histogram 520 && mi->histogram_len != state->histogram_len)) { 521 tor_free(mi->histogram); // null ok 522 mi->histogram = tor_malloc_zero(sizeof(circpad_hist_token_t) 523 *state->histogram_len); 524 } 525 mi->histogram_len = state->histogram_len; 526 527 memcpy(mi->histogram, state->histogram, 528 sizeof(circpad_hist_token_t)*state->histogram_len); 529 } 530 531 /** 532 * Choose a length for this state (in cells), if specified. 533 */ 534 static void 535 circpad_choose_state_length(circpad_machine_runtime_t *mi) 536 { 537 const circpad_state_t *state = circpad_machine_current_state(mi); 538 double length; 539 540 if (!state || state->length_dist.type == CIRCPAD_DIST_NONE) { 541 mi->state_length = CIRCPAD_STATE_LENGTH_INFINITE; 542 return; 543 } 544 545 length = circpad_distribution_sample(state->length_dist); 546 length = MAX(0, length); 547 length += state->start_length; 548 549 if (state->max_length) { 550 length = MIN(length, state->max_length); 551 } 552 553 mi->state_length = clamp_double_to_int64(length); 554 555 log_info(LD_CIRC, "State length sampled to %"PRIu64" for circuit %u", 556 mi->state_length, CIRCUIT_IS_ORIGIN(mi->on_circ) ? 557 TO_ORIGIN_CIRCUIT(mi->on_circ)->global_identifier : 0); 558 } 559 560 /** 561 * Sample a value from our iat_dist, and clamp it safely 562 * to circpad_delay_t. 563 * 564 * Before returning, add <b>delay_shift</b> (can be zero) to the sampled value. 565 */ 566 static circpad_delay_t 567 circpad_distribution_sample_iat_delay(const circpad_state_t *state, 568 circpad_delay_t delay_shift) 569 { 570 double val = circpad_distribution_sample(state->iat_dist); 571 /* These comparisons are safe, because the output is in the range 572 * [0, 2**32), and double has a precision of 53 bits. */ 573 /* We want a positive sample value */ 574 val = MAX(0, val); 575 /* Respect the maximum sample setting */ 576 val = MIN(val, state->dist_max_sample_usec); 577 578 /* Now apply the shift: 579 * This addition is exact: val is at most 2**32-1, delay_shift is at most 580 * 2**32-1, and doubles have a precision of 53 bits. */ 581 val += delay_shift; 582 583 /* Clamp the distribution at infinite delay val */ 584 return (circpad_delay_t)MIN(tor_llround(val), CIRCPAD_DELAY_INFINITE); 585 } 586 587 /** 588 * Sample an expected time-until-next-packet delay from the histogram or 589 * probability distribution. 590 * 591 * A bin of the histogram is chosen with probability proportional to the number 592 * of tokens in each bin, and then a time value is chosen uniformly from that 593 * bin's [start,end) time range. 594 */ 595 STATIC circpad_delay_t 596 circpad_machine_sample_delay(circpad_machine_runtime_t *mi) 597 { 598 const circpad_state_t *state = circpad_machine_current_state(mi); 599 const circpad_hist_token_t *histogram = NULL; 600 circpad_hist_index_t curr_bin = 0; 601 circpad_delay_t bin_start, bin_end; 602 /* These three must all be larger than circpad_hist_token_t, because 603 * we sum several circpad_hist_token_t values across the histogram */ 604 uint64_t curr_weight = 0; 605 uint64_t histogram_total_tokens = 0; 606 uint64_t bin_choice; 607 608 tor_assert(state); 609 610 if (state->iat_dist.type != CIRCPAD_DIST_NONE) { 611 /* Sample from a fixed IAT distribution and return */ 612 circpad_delay_t iat_delay_shift = state->use_rtt_estimate ? 613 mi->rtt_estimate_usec + state->dist_added_shift_usec : 614 state->dist_added_shift_usec; 615 return circpad_distribution_sample_iat_delay(state, iat_delay_shift); 616 } else if (circpad_is_token_removal_supported(mi)) { 617 histogram = mi->histogram; 618 for (circpad_hist_index_t b = 0; b < state->histogram_len; b++) 619 histogram_total_tokens += histogram[b]; 620 } else { 621 /* We have a histogram, but it's immutable */ 622 histogram = state->histogram; 623 histogram_total_tokens = state->histogram_total_tokens; 624 } 625 626 /* If we are out of tokens, don't schedule padding. */ 627 if (!histogram_total_tokens) { 628 return CIRCPAD_DELAY_INFINITE; 629 } 630 631 bin_choice = crypto_fast_rng_get_uint64(get_thread_fast_rng(), 632 histogram_total_tokens); 633 634 /* Skip all the initial zero bins */ 635 while (!histogram[curr_bin]) { 636 curr_bin++; 637 } 638 curr_weight = histogram[curr_bin]; 639 640 // TODO: This is not constant-time. Pretty sure we don't 641 // really need it to be, though. 642 while (curr_weight < bin_choice) { 643 curr_bin++; 644 /* It should be impossible to run past the end of the histogram */ 645 if (BUG(curr_bin >= state->histogram_len)) { 646 return CIRCPAD_DELAY_INFINITE; 647 } 648 curr_weight += histogram[curr_bin]; 649 } 650 651 /* Do some basic checking of the current bin we are in */ 652 if (BUG(curr_bin >= state->histogram_len) || 653 BUG(histogram[curr_bin] == 0)) { 654 return CIRCPAD_DELAY_INFINITE; 655 } 656 657 // Store this index to remove the token upon callback. 658 if (state->token_removal != CIRCPAD_TOKEN_REMOVAL_NONE) { 659 mi->chosen_bin = curr_bin; 660 } 661 662 if (curr_bin >= CIRCPAD_INFINITY_BIN(state)) { 663 if (state->token_removal != CIRCPAD_TOKEN_REMOVAL_NONE && 664 mi->histogram[curr_bin] > 0) { 665 mi->histogram[curr_bin]--; 666 } 667 668 // Infinity: Don't send a padding packet. Wait for a real packet 669 // and then see if our bins are empty or what else we should do. 670 return CIRCPAD_DELAY_INFINITE; 671 } 672 673 tor_assert(curr_bin < CIRCPAD_INFINITY_BIN(state)); 674 675 bin_start = circpad_histogram_bin_to_usec(mi, curr_bin); 676 /* We don't need to reduct 1 from the upper bound because the random range 677 * function below samples from [bin_start, bin_end) */ 678 bin_end = circpad_histogram_bin_to_usec(mi, curr_bin+1); 679 680 /* Bin edges are monotonically increasing so this is a bug. Handle it. */ 681 if (BUG(bin_start >= bin_end)) { 682 return bin_start; 683 } 684 685 return (circpad_delay_t)crypto_fast_rng_uint64_range(get_thread_fast_rng(), 686 bin_start, bin_end); 687 } 688 689 /** 690 * Sample a value from the specified probability distribution. 691 * 692 * Uses functions from src/lib/math/prob_distr.c . 693 */ 694 static double 695 circpad_distribution_sample(circpad_distribution_t dist) 696 { 697 log_fn(LOG_DEBUG,LD_CIRC, "Sampling delay with distribution %d", 698 dist.type); 699 700 switch (dist.type) { 701 case CIRCPAD_DIST_NONE: 702 { 703 /* We should not get in here like this */ 704 tor_assert_nonfatal_unreached(); 705 return 0; 706 } 707 case CIRCPAD_DIST_UNIFORM: 708 { 709 // param2 is upper bound, param1 is lower 710 const struct uniform_t my_uniform = { 711 .base = UNIFORM(my_uniform), 712 .a = dist.param1, 713 .b = dist.param2, 714 }; 715 return dist_sample(&my_uniform.base); 716 } 717 case CIRCPAD_DIST_LOGISTIC: 718 { 719 /* param1 is Mu, param2 is sigma. */ 720 const struct logistic_t my_logistic = { 721 .base = LOGISTIC(my_logistic), 722 .mu = dist.param1, 723 .sigma = dist.param2, 724 }; 725 return dist_sample(&my_logistic.base); 726 } 727 case CIRCPAD_DIST_LOG_LOGISTIC: 728 { 729 /* param1 is Alpha, param2 is 1.0/Beta */ 730 const struct log_logistic_t my_log_logistic = { 731 .base = LOG_LOGISTIC(my_log_logistic), 732 .alpha = dist.param1, 733 .beta = dist.param2, 734 }; 735 return dist_sample(&my_log_logistic.base); 736 } 737 case CIRCPAD_DIST_GEOMETRIC: 738 { 739 /* param1 is 'p' (success probability) */ 740 const struct geometric_t my_geometric = { 741 .base = GEOMETRIC(my_geometric), 742 .p = dist.param1, 743 }; 744 return dist_sample(&my_geometric.base); 745 } 746 case CIRCPAD_DIST_WEIBULL: 747 { 748 /* param1 is k, param2 is Lambda */ 749 const struct weibull_t my_weibull = { 750 .base = WEIBULL(my_weibull), 751 .k = dist.param1, 752 .lambda = dist.param2, 753 }; 754 return dist_sample(&my_weibull.base); 755 } 756 case CIRCPAD_DIST_PARETO: 757 { 758 /* param1 is sigma, param2 is xi, no more params for mu so we use 0 */ 759 const struct genpareto_t my_genpareto = { 760 .base = GENPARETO(my_genpareto), 761 .mu = 0, 762 .sigma = dist.param1, 763 .xi = dist.param2, 764 }; 765 return dist_sample(&my_genpareto.base); 766 } 767 } 768 769 tor_assert_nonfatal_unreached(); 770 return 0; 771 } 772 773 /** 774 * Find the index of the first bin whose upper bound is 775 * greater than the target, and that has tokens remaining. 776 * 777 * Used for histograms with token removal. 778 */ 779 static circpad_hist_index_t 780 circpad_machine_first_higher_index(const circpad_machine_runtime_t *mi, 781 circpad_delay_t target_bin_usec) 782 { 783 circpad_hist_index_t bin = circpad_histogram_usec_to_bin(mi, 784 target_bin_usec); 785 786 /* Don't remove from the infinity bin */ 787 for (; bin < CIRCPAD_INFINITY_BIN(mi); bin++) { 788 if (mi->histogram[bin] && 789 histogram_get_bin_upper_bound(mi, bin) >= target_bin_usec) { 790 return bin; 791 } 792 } 793 794 return mi->histogram_len; 795 } 796 797 /** 798 * Find the index of the first bin whose lower bound is lower or equal to 799 * <b>target_bin_usec</b>, and that still has tokens remaining. 800 * 801 * Used for histograms with token removal. 802 */ 803 static circpad_hist_index_t 804 circpad_machine_first_lower_index(const circpad_machine_runtime_t *mi, 805 circpad_delay_t target_bin_usec) 806 { 807 circpad_hist_index_t bin = circpad_histogram_usec_to_bin(mi, 808 target_bin_usec); 809 810 for (; bin >= 0; bin--) { 811 if (mi->histogram[bin] && 812 circpad_histogram_bin_to_usec(mi, bin) <= target_bin_usec) { 813 return bin; 814 } 815 } 816 817 return -1; 818 } 819 820 /** 821 * Remove a token from the first non-empty bin whose upper bound is 822 * greater than the target. 823 * 824 * Used for histograms with token removal. 825 */ 826 STATIC void 827 circpad_machine_remove_higher_token(circpad_machine_runtime_t *mi, 828 circpad_delay_t target_bin_usec) 829 { 830 /* We need to remove the token from the first bin 831 * whose upper bound is greater than the target, and that 832 * has tokens remaining. */ 833 circpad_hist_index_t bin = circpad_machine_first_higher_index(mi, 834 target_bin_usec); 835 836 if (bin >= 0 && bin < CIRCPAD_INFINITY_BIN(mi)) { 837 if (!BUG(mi->histogram[bin] == 0)) { 838 mi->histogram[bin]--; 839 } 840 } 841 } 842 843 /** 844 * Remove a token from the first non-empty bin whose upper bound is 845 * lower than the target. 846 * 847 * Used for histograms with token removal. 848 */ 849 STATIC void 850 circpad_machine_remove_lower_token(circpad_machine_runtime_t *mi, 851 circpad_delay_t target_bin_usec) 852 { 853 circpad_hist_index_t bin = circpad_machine_first_lower_index(mi, 854 target_bin_usec); 855 856 if (bin >= 0 && bin < CIRCPAD_INFINITY_BIN(mi)) { 857 if (!BUG(mi->histogram[bin] == 0)) { 858 mi->histogram[bin]--; 859 } 860 } 861 } 862 863 /* Helper macro: Ensure that the bin has tokens available, and BUG out of the 864 * function if it's not the case. */ 865 #define ENSURE_BIN_CAPACITY(bin_index) \ 866 if (BUG(mi->histogram[bin_index] == 0)) { \ 867 return; \ 868 } 869 870 /** 871 * Remove a token from the closest non-empty bin to the target. 872 * 873 * If use_usec is true, measure "closest" in terms of the next closest bin 874 * midpoint. 875 * 876 * If it is false, use bin index distance only. 877 * 878 * Used for histograms with token removal. 879 */ 880 STATIC void 881 circpad_machine_remove_closest_token(circpad_machine_runtime_t *mi, 882 circpad_delay_t target_bin_usec, 883 bool use_usec) 884 { 885 circpad_hist_index_t lower, higher, current; 886 circpad_hist_index_t bin_to_remove = -1; 887 888 lower = circpad_machine_first_lower_index(mi, target_bin_usec); 889 higher = circpad_machine_first_higher_index(mi, target_bin_usec); 890 current = circpad_histogram_usec_to_bin(mi, target_bin_usec); 891 892 /* Sanity check the results */ 893 if (BUG(lower > current) || BUG(higher < current)) { 894 return; 895 } 896 897 /* Take care of edge cases first */ 898 if (higher == mi->histogram_len && lower == -1) { 899 /* All bins are empty */ 900 return; 901 } else if (higher == mi->histogram_len) { 902 /* All higher bins are empty */ 903 ENSURE_BIN_CAPACITY(lower); 904 mi->histogram[lower]--; 905 return; 906 } else if (lower == -1) { 907 /* All lower bins are empty */ 908 ENSURE_BIN_CAPACITY(higher); 909 mi->histogram[higher]--; 910 return; 911 } 912 913 /* Now handle the intermediate cases */ 914 if (use_usec) { 915 /* Find the closest bin midpoint to the target */ 916 circpad_delay_t lower_usec = circpad_get_histogram_bin_midpoint(mi, lower); 917 circpad_delay_t higher_usec = 918 circpad_get_histogram_bin_midpoint(mi, higher); 919 920 if (target_bin_usec < lower_usec) { 921 // Lower bin is closer 922 ENSURE_BIN_CAPACITY(lower); 923 bin_to_remove = lower; 924 } else if (target_bin_usec > higher_usec) { 925 // Higher bin is closer 926 ENSURE_BIN_CAPACITY(higher); 927 bin_to_remove = higher; 928 } else if (target_bin_usec-lower_usec > higher_usec-target_bin_usec) { 929 // Higher bin is closer 930 ENSURE_BIN_CAPACITY(higher); 931 bin_to_remove = higher; 932 } else { 933 // Lower bin is closer 934 ENSURE_BIN_CAPACITY(lower); 935 bin_to_remove = lower; 936 } 937 mi->histogram[bin_to_remove]--; 938 log_debug(LD_CIRC, "Removing token from bin %d", bin_to_remove); 939 return; 940 } else { 941 if (current - lower > higher - current) { 942 // Higher bin is closer 943 ENSURE_BIN_CAPACITY(higher); 944 mi->histogram[higher]--; 945 return; 946 } else { 947 // Lower bin is closer 948 ENSURE_BIN_CAPACITY(lower); 949 mi->histogram[lower]--; 950 return; 951 } 952 } 953 } 954 955 #undef ENSURE_BIN_CAPACITY 956 957 /** 958 * Remove a token from the exact bin corresponding to the target. 959 * 960 * If it is empty, do nothing. 961 * 962 * Used for histograms with token removal. 963 */ 964 static void 965 circpad_machine_remove_exact(circpad_machine_runtime_t *mi, 966 circpad_delay_t target_bin_usec) 967 { 968 circpad_hist_index_t bin = circpad_histogram_usec_to_bin(mi, 969 target_bin_usec); 970 971 if (mi->histogram[bin] > 0) 972 mi->histogram[bin]--; 973 } 974 975 /** 976 * Check our state's cell limit count and tokens. 977 * 978 * Returns 1 if either limits are hit and we decide to change states, 979 * otherwise returns 0. 980 */ 981 static circpad_decision_t 982 check_machine_token_supply(circpad_machine_runtime_t *mi) 983 { 984 uint32_t histogram_total_tokens = 0; 985 986 /* Check if bins empty. This requires summing up the current mutable 987 * machineinfo histogram token total and checking if it is zero. 988 * Machineinfo does not keep a running token count. We're assuming the 989 * extra space is not worth this short loop iteration. 990 * 991 * We also do not count infinity bin in histogram totals. 992 */ 993 if (circpad_is_token_removal_supported(mi)) { 994 for (circpad_hist_index_t b = 0; b < CIRCPAD_INFINITY_BIN(mi); b++) 995 histogram_total_tokens += mi->histogram[b]; 996 997 /* If we change state, we're done */ 998 if (histogram_total_tokens == 0) { 999 if (circpad_internal_event_bins_empty(mi) == CIRCPAD_STATE_CHANGED) 1000 return CIRCPAD_STATE_CHANGED; 1001 } 1002 } 1003 1004 if (mi->state_length == 0) { 1005 return circpad_internal_event_state_length_up(mi); 1006 } 1007 1008 return CIRCPAD_STATE_UNCHANGED; 1009 } 1010 1011 /** 1012 * Count that a padding packet was sent. 1013 * 1014 * This updates our state length count, our machine rate limit counts, 1015 * and if token removal is used, decrements the histogram. 1016 */ 1017 static inline void 1018 circpad_machine_count_padding_sent(circpad_machine_runtime_t *mi) 1019 { 1020 /* If we have a valid state length bound, consider it */ 1021 if (mi->state_length != CIRCPAD_STATE_LENGTH_INFINITE && 1022 !BUG(mi->state_length <= 0)) { 1023 mi->state_length--; 1024 } 1025 1026 /* 1027 * Update non-padding counts for rate limiting: We scale at UINT16_MAX 1028 * because we only use this for a percentile limit of 2 sig figs, and 1029 * space is scare in the machineinfo struct. 1030 */ 1031 mi->padding_sent++; 1032 if (mi->padding_sent == UINT16_MAX) { 1033 mi->padding_sent /= 2; 1034 mi->nonpadding_sent /= 2; 1035 } 1036 1037 circpad_global_padding_sent++; 1038 1039 /* If we have a mutable histogram, reduce the token count from 1040 * the chosen padding bin (this assumes we always send padding 1041 * when we intended to). */ 1042 if (circpad_is_token_removal_supported(mi)) { 1043 /* Check array bounds and token count before removing */ 1044 if (!BUG(mi->chosen_bin >= mi->histogram_len) && 1045 !BUG(mi->histogram[mi->chosen_bin] == 0)) { 1046 mi->histogram[mi->chosen_bin]--; 1047 } 1048 } 1049 } 1050 1051 /** 1052 * Count a nonpadding packet as being sent. 1053 * 1054 * This function updates our overhead accounting variables, as well 1055 * as decrements the state limit packet counter, if the latter was 1056 * flagged as applying to non-padding as well. 1057 */ 1058 static inline void 1059 circpad_machine_count_nonpadding_sent(circpad_machine_runtime_t *mi) 1060 { 1061 /* Update non-padding counts for rate limiting: We scale at UINT16_MAX 1062 * because we only use this for a percentile limit of 2 sig figs, and 1063 * space is scare in the machineinfo struct. */ 1064 mi->nonpadding_sent++; 1065 if (mi->nonpadding_sent == UINT16_MAX) { 1066 mi->padding_sent /= 2; 1067 mi->nonpadding_sent /= 2; 1068 } 1069 1070 /* Update any state packet length limits that apply */ 1071 circpad_machine_update_state_length_for_nonpadding(mi); 1072 1073 /* Remove a token from the histogram, if applicable */ 1074 circpad_machine_remove_token(mi); 1075 } 1076 1077 /** 1078 * Decrement the state length counter for a non-padding packet. 1079 * 1080 * Only updates the state length if we're using that feature, we 1081 * have a state, and the machine wants to count non-padding packets 1082 * towards the state length. 1083 */ 1084 static inline void 1085 circpad_machine_update_state_length_for_nonpadding( 1086 circpad_machine_runtime_t *mi) 1087 { 1088 const circpad_state_t *state = NULL; 1089 1090 if (mi->state_length == CIRCPAD_STATE_LENGTH_INFINITE) 1091 return; 1092 1093 state = circpad_machine_current_state(mi); 1094 1095 /* If we are not in a padding state (like start or end), we're done */ 1096 if (!state) 1097 return; 1098 1099 /* If we're enforcing a state length on non-padding packets, 1100 * decrement it */ 1101 if (state->length_includes_nonpadding && 1102 mi->state_length > 0) { 1103 mi->state_length--; 1104 } 1105 } 1106 1107 /** 1108 * When a non-padding packet arrives, remove a token from the bin 1109 * corresponding to the delta since last sent packet. If that bin 1110 * is empty, choose a token based on the specified removal strategy 1111 * in the state machine. 1112 */ 1113 STATIC void 1114 circpad_machine_remove_token(circpad_machine_runtime_t *mi) 1115 { 1116 const circpad_state_t *state = NULL; 1117 circpad_time_t current_time; 1118 circpad_delay_t target_bin_usec; 1119 1120 /* Dont remove any tokens if there was no padding scheduled */ 1121 if (!mi->padding_scheduled_at_usec) { 1122 return; 1123 } 1124 1125 state = circpad_machine_current_state(mi); 1126 1127 /* If we are not in a padding state (like start or end), we're done */ 1128 if (!state) 1129 return; 1130 /* Don't remove any tokens if we're not doing token removal */ 1131 if (state->token_removal == CIRCPAD_TOKEN_REMOVAL_NONE) 1132 return; 1133 1134 current_time = monotime_absolute_usec(); 1135 1136 /* If we have scheduled padding some time in the future, we want to see what 1137 bin we are in at the current time */ 1138 target_bin_usec = (circpad_delay_t) 1139 MIN((current_time - mi->padding_scheduled_at_usec), 1140 CIRCPAD_DELAY_INFINITE-1); 1141 1142 /* We are treating this non-padding cell as a padding cell, so we cancel 1143 padding timer, if present. */ 1144 mi->padding_scheduled_at_usec = 0; 1145 if (mi->is_padding_timer_scheduled) { 1146 mi->is_padding_timer_scheduled = 0; 1147 timer_disable(mi->padding_timer); 1148 } 1149 1150 /* Perform the specified token removal strategy */ 1151 switch (state->token_removal) { 1152 case CIRCPAD_TOKEN_REMOVAL_CLOSEST_USEC: 1153 circpad_machine_remove_closest_token(mi, target_bin_usec, 1); 1154 break; 1155 case CIRCPAD_TOKEN_REMOVAL_CLOSEST: 1156 circpad_machine_remove_closest_token(mi, target_bin_usec, 0); 1157 break; 1158 case CIRCPAD_TOKEN_REMOVAL_LOWER: 1159 circpad_machine_remove_lower_token(mi, target_bin_usec); 1160 break; 1161 case CIRCPAD_TOKEN_REMOVAL_HIGHER: 1162 circpad_machine_remove_higher_token(mi, target_bin_usec); 1163 break; 1164 case CIRCPAD_TOKEN_REMOVAL_EXACT: 1165 circpad_machine_remove_exact(mi, target_bin_usec); 1166 break; 1167 case CIRCPAD_TOKEN_REMOVAL_NONE: 1168 default: 1169 tor_assert_nonfatal_unreached(); 1170 log_warn(LD_BUG, "Circpad: Unknown token removal strategy %d", 1171 state->token_removal); 1172 break; 1173 } 1174 } 1175 1176 /** 1177 * Send a relay command with a relay cell payload on a circuit to 1178 * the particular hopnum. 1179 * 1180 * Hopnum starts at 1 (1=guard, 2=middle, 3=exit, etc). 1181 * 1182 * Payload may be null. 1183 * 1184 * Returns negative on error, 0 on success. 1185 */ 1186 MOCK_IMPL(STATIC signed_error_t, 1187 circpad_send_command_to_hop,(origin_circuit_t *circ, uint8_t hopnum, 1188 uint8_t relay_command, const uint8_t *payload, 1189 ssize_t payload_len)) 1190 { 1191 crypt_path_t *target_hop = circuit_get_cpath_hop(circ, hopnum); 1192 signed_error_t ret; 1193 1194 /* Check that the cpath has the target hop */ 1195 if (!target_hop) { 1196 log_fn(LOG_WARN, LD_BUG, "Padding circuit %u has %d hops, not %d", 1197 circ->global_identifier, circuit_get_cpath_len(circ), hopnum); 1198 return -1; 1199 } 1200 1201 /* Check that the target hop is opened */ 1202 if (target_hop->state != CPATH_STATE_OPEN) { 1203 log_fn(LOG_WARN,LD_CIRC, 1204 "Padding circuit %u has %d hops, not %d", 1205 circ->global_identifier, 1206 circuit_get_cpath_opened_len(circ), hopnum); 1207 return -1; 1208 } 1209 1210 /* Send the drop command to the second hop */ 1211 ret = relay_send_command_from_edge(0, TO_CIRCUIT(circ), relay_command, 1212 (const char*)payload, payload_len, 1213 target_hop); 1214 return ret; 1215 } 1216 1217 /** 1218 * Callback helper to send a padding cell. 1219 * 1220 * This helper is called after our histogram-sampled delay period passes 1221 * without another packet being sent first. If a packet is sent before this 1222 * callback happens, it is canceled. So when we're called here, send padding 1223 * right away. 1224 * 1225 * If sending this padding cell forced us to transition states return 1226 * CIRCPAD_STATE_CHANGED. Otherwise return CIRCPAD_STATE_UNCHANGED. 1227 */ 1228 circpad_decision_t 1229 circpad_send_padding_cell_for_callback(circpad_machine_runtime_t *mi) 1230 { 1231 circuit_t *circ = mi->on_circ; 1232 int machine_idx = mi->machine_index; 1233 mi->padding_scheduled_at_usec = 0; 1234 mi->is_padding_timer_scheduled = 0; 1235 circpad_statenum_t state = mi->current_state; 1236 1237 /* Make sure circuit didn't close on us */ 1238 if (mi->on_circ->marked_for_close) { 1239 log_fn(LOG_INFO,LD_CIRC, 1240 "Padding callback on circuit marked for close (%u). Ignoring.", 1241 CIRCUIT_IS_ORIGIN(mi->on_circ) ? 1242 TO_ORIGIN_CIRCUIT(mi->on_circ)->global_identifier : 0); 1243 return CIRCPAD_STATE_CHANGED; 1244 } 1245 1246 circpad_machine_count_padding_sent(mi); 1247 1248 if (CIRCUIT_IS_ORIGIN(mi->on_circ)) { 1249 circpad_send_command_to_hop(TO_ORIGIN_CIRCUIT(mi->on_circ), 1250 CIRCPAD_GET_MACHINE(mi)->target_hopnum, 1251 RELAY_COMMAND_DROP, NULL, 0); 1252 log_info(LD_CIRC, "Callback: Sending padding to origin circuit %u" 1253 " (%d) [length: %"PRIu64"]", 1254 TO_ORIGIN_CIRCUIT(mi->on_circ)->global_identifier, 1255 mi->on_circ->purpose, mi->state_length); 1256 } else { 1257 // If we're a non-origin circ, we can just send from here as if we're the 1258 // edge. 1259 if (TO_OR_CIRCUIT(circ)->p_chan_cells.n <= circpad_max_circ_queued_cells) { 1260 log_info(LD_CIRC, "Callback: Sending padding to circuit (%d)" 1261 " [length: %"PRIu64"]", mi->on_circ->purpose, mi->state_length); 1262 relay_send_command_from_edge(0, mi->on_circ, RELAY_COMMAND_DROP, NULL, 1263 0, NULL); 1264 rep_hist_padding_count_write(PADDING_TYPE_DROP); 1265 } else { 1266 static ratelim_t cell_lim = RATELIM_INIT(600); 1267 log_fn_ratelim(&cell_lim,LOG_NOTICE,LD_CIRC, 1268 "Too many cells (%d) in circ queue to send padding.", 1269 TO_OR_CIRCUIT(circ)->p_chan_cells.n); 1270 } 1271 } 1272 1273 /* This is a padding cell sent from the client or from the middle node, 1274 * (because it's invoked from circuitpadding.c) */ 1275 circpad_cell_event_padding_sent(circ); 1276 1277 /* The circpad_cell_event_padding_sent() could cause us to transition. 1278 * Check that we still have a padding machineinfo, and then check our token 1279 * supply. */ 1280 if (circ->padding_info[machine_idx] != NULL) { 1281 if (state != circ->padding_info[machine_idx]->current_state) 1282 return CIRCPAD_STATE_CHANGED; 1283 else 1284 return check_machine_token_supply(circ->padding_info[machine_idx]); 1285 } else { 1286 return CIRCPAD_STATE_CHANGED; 1287 } 1288 } 1289 1290 /** 1291 * Tor-timer compatible callback that tells us to send a padding cell. 1292 * 1293 * Timers are associated with circpad_machine_runtime_t's. When the machineinfo 1294 * is freed on a circuit, the timers are cancelled. Since the lifetime 1295 * of machineinfo is always longer than the timers, handles are not 1296 * needed. 1297 */ 1298 static void 1299 circpad_send_padding_callback(tor_timer_t *timer, void *args, 1300 const struct monotime_t *time) 1301 { 1302 circpad_machine_runtime_t *mi = ((circpad_machine_runtime_t*)args); 1303 (void)timer; (void)time; 1304 1305 if (mi && mi->on_circ) { 1306 assert_circuit_ok(mi->on_circ); 1307 circpad_send_padding_cell_for_callback(mi); 1308 } else { 1309 // This shouldn't happen (represents a timer leak) 1310 log_fn(LOG_WARN,LD_CIRC, 1311 "Circuit closed while waiting for padding timer."); 1312 tor_fragile_assert(); 1313 } 1314 1315 // TODO-MP-AP: Unify this counter with channelpadding for rephist stats 1316 //total_timers_pending--; 1317 } 1318 1319 /** 1320 * Cache our consensus parameters upon consensus update. 1321 */ 1322 void 1323 circpad_new_consensus_params(const networkstatus_t *ns) 1324 { 1325 circpad_padding_disabled = 1326 networkstatus_get_param(ns, "circpad_padding_disabled", 1327 0, 0, 1); 1328 1329 circpad_padding_reduced = 1330 networkstatus_get_param(ns, "circpad_padding_reduced", 1331 0, 0, 1); 1332 1333 circpad_global_allowed_cells = 1334 networkstatus_get_param(ns, "circpad_global_allowed_cells", 1335 0, 0, UINT16_MAX-1); 1336 1337 circpad_global_max_padding_percent = 1338 networkstatus_get_param(ns, "circpad_global_max_padding_pct", 1339 0, 0, 100); 1340 1341 circpad_max_circ_queued_cells = 1342 networkstatus_get_param(ns, "circpad_max_circ_queued_cells", 1343 CIRCWINDOW_START_MAX, 0, 50*CIRCWINDOW_START_MAX); 1344 } 1345 1346 /** 1347 * Return true if padding is allowed by torrc and consensus. 1348 */ 1349 static bool 1350 circpad_is_padding_allowed(void) 1351 { 1352 /* If padding has been disabled in the consensus, don't send any more 1353 * padding. Technically the machine should be shut down when the next 1354 * machine condition check happens, but machine checks only happen on 1355 * certain circuit events, and if padding is disabled due to some 1356 * network overload or DoS condition, we really want to stop ASAP. */ 1357 if (circpad_padding_disabled || !get_options()->CircuitPadding) { 1358 return 0; 1359 } 1360 1361 return 1; 1362 } 1363 1364 /** 1365 * Check this machine against its padding limits, as well as global 1366 * consensus limits. 1367 * 1368 * We have two limits: a percent and a cell count. The cell count 1369 * limit must be reached before the percent is enforced (this is to 1370 * optionally allow very light padding of things like circuit setup 1371 * while there is no other traffic on the circuit). 1372 * 1373 * TODO: Don't apply limits to machines form torrc. 1374 * 1375 * Returns 1 if limits are set and we've hit them. Otherwise returns 0. 1376 */ 1377 STATIC bool 1378 circpad_machine_reached_padding_limit(circpad_machine_runtime_t *mi) 1379 { 1380 const circpad_machine_spec_t *machine = CIRCPAD_GET_MACHINE(mi); 1381 1382 /* If machine_padding_pct is non-zero, and we've sent more 1383 * than the allowed count of padding cells, then check our 1384 * percent limits for this machine. */ 1385 if (machine->max_padding_percent && 1386 mi->padding_sent >= machine->allowed_padding_count) { 1387 uint32_t total_cells = mi->padding_sent + mi->nonpadding_sent; 1388 1389 /* Check the percent */ 1390 if ((100*(uint32_t)mi->padding_sent) / total_cells > 1391 machine->max_padding_percent) { 1392 return 1; // limit is reached. Stop. 1393 } 1394 } 1395 1396 /* If circpad_max_global_padding_pct is non-zero, and we've 1397 * sent more than the global padding cell limit, then check our 1398 * global tor process percentage limit on padding. */ 1399 if (circpad_global_max_padding_percent && 1400 circpad_global_padding_sent >= circpad_global_allowed_cells) { 1401 uint64_t total_cells = circpad_global_padding_sent + 1402 circpad_global_nonpadding_sent; 1403 1404 /* Check the percent */ 1405 if ((100*circpad_global_padding_sent) / total_cells > 1406 circpad_global_max_padding_percent) { 1407 return 1; // global limit reached. Stop. 1408 } 1409 } 1410 1411 return 0; // All good! 1412 } 1413 1414 /** 1415 * Schedule the next padding time according to the machineinfo on a 1416 * circuit. 1417 * 1418 * The histograms represent inter-packet-delay. Whenever you get an packet 1419 * event you should be scheduling your next timer (after cancelling any old 1420 * ones and updating tokens accordingly). 1421 * 1422 * Returns 1 if we decide to transition states (due to infinity bin), 1423 * 0 otherwise. 1424 */ 1425 MOCK_IMPL(circpad_decision_t, 1426 circpad_machine_schedule_padding,(circpad_machine_runtime_t *mi)) 1427 { 1428 circpad_delay_t in_usec = 0; 1429 struct timeval timeout; 1430 tor_assert(mi); 1431 1432 /* Don't schedule padding if it is disabled */ 1433 if (!circpad_is_padding_allowed()) { 1434 static ratelim_t padding_lim = RATELIM_INIT(600); 1435 log_fn_ratelim(&padding_lim,LOG_INFO,LD_CIRC, 1436 "Padding has been disabled, but machine still on circuit %"PRIu64 1437 ", %d", 1438 mi->on_circ->n_chan ? mi->on_circ->n_chan->global_identifier : 0, 1439 mi->on_circ->n_circ_id); 1440 1441 return CIRCPAD_STATE_UNCHANGED; 1442 } 1443 1444 /* Don't schedule padding if we are currently in dormant mode. */ 1445 if (!is_participating_on_network()) { 1446 log_info(LD_CIRC, "Not scheduling padding because we are dormant."); 1447 return CIRCPAD_STATE_UNCHANGED; 1448 } 1449 1450 // Don't pad in end (but also don't cancel any previously 1451 // scheduled padding either). 1452 if (mi->current_state == CIRCPAD_STATE_END) { 1453 log_fn(LOG_INFO, LD_CIRC, "Padding end state on circuit %u", 1454 CIRCUIT_IS_ORIGIN(mi->on_circ) ? 1455 TO_ORIGIN_CIRCUIT(mi->on_circ)->global_identifier : 0); 1456 return CIRCPAD_STATE_UNCHANGED; 1457 } 1458 1459 /* Check our padding limits */ 1460 if (circpad_machine_reached_padding_limit(mi)) { 1461 if (CIRCUIT_IS_ORIGIN(mi->on_circ)) { 1462 log_fn(LOG_INFO, LD_CIRC, 1463 "Padding machine has reached padding limit on circuit %u", 1464 TO_ORIGIN_CIRCUIT(mi->on_circ)->global_identifier); 1465 } else { 1466 static ratelim_t padding_lim = RATELIM_INIT(600); 1467 log_fn_ratelim(&padding_lim,LOG_INFO,LD_CIRC, 1468 "Padding machine has reached padding limit on circuit %"PRIu64 1469 ", %d", 1470 mi->on_circ->n_chan ? mi->on_circ->n_chan->global_identifier : 0, 1471 mi->on_circ->n_circ_id); 1472 } 1473 return CIRCPAD_STATE_UNCHANGED; 1474 } 1475 1476 if (mi->is_padding_timer_scheduled) { 1477 /* Cancel current timer (if any) */ 1478 timer_disable(mi->padding_timer); 1479 mi->is_padding_timer_scheduled = 0; 1480 } 1481 1482 /* in_usec = in microseconds */ 1483 in_usec = circpad_machine_sample_delay(mi); 1484 /* If we're using token removal, we need to know when the padding 1485 * was scheduled at, so we can remove the appropriate token if 1486 * a non-padding cell is sent before the padding timer expires. 1487 * 1488 * However, since monotime is unpredictably expensive, let's avoid 1489 * using it for machines that don't need token removal. */ 1490 if (circpad_is_token_removal_supported(mi)) { 1491 mi->padding_scheduled_at_usec = monotime_absolute_usec(); 1492 } else { 1493 mi->padding_scheduled_at_usec = 1; 1494 } 1495 log_fn(LOG_INFO,LD_CIRC,"\tPadding in %u usec on circuit %u", in_usec, 1496 CIRCUIT_IS_ORIGIN(mi->on_circ) ? 1497 TO_ORIGIN_CIRCUIT(mi->on_circ)->global_identifier : 0); 1498 1499 // Don't schedule if we have infinite delay. 1500 if (in_usec == CIRCPAD_DELAY_INFINITE) { 1501 return circpad_internal_event_infinity(mi); 1502 } 1503 1504 if (mi->state_length == 0) { 1505 /* If we're at length 0, that means we hit 0 after sending 1506 * a cell earlier, and emitted an event for it, but 1507 * for whatever reason we did not decide to change states then. 1508 * So maybe the machine is waiting for bins empty, or for an 1509 * infinity event later? That would be a strange machine, 1510 * but there's no reason to make it impossible. */ 1511 return CIRCPAD_STATE_UNCHANGED; 1512 } 1513 1514 if (in_usec <= 0) { 1515 return circpad_send_padding_cell_for_callback(mi); 1516 } 1517 1518 timeout.tv_sec = in_usec/TOR_USEC_PER_SEC; 1519 timeout.tv_usec = (in_usec%TOR_USEC_PER_SEC); 1520 1521 log_fn(LOG_INFO, LD_CIRC, "\tPadding circuit %u in %u sec, %u usec", 1522 CIRCUIT_IS_ORIGIN(mi->on_circ) ? 1523 TO_ORIGIN_CIRCUIT(mi->on_circ)->global_identifier : 0, 1524 (unsigned)timeout.tv_sec, (unsigned)timeout.tv_usec); 1525 1526 if (mi->padding_timer) { 1527 timer_set_cb(mi->padding_timer, circpad_send_padding_callback, mi); 1528 } else { 1529 mi->padding_timer = 1530 timer_new(circpad_send_padding_callback, mi); 1531 } 1532 timer_schedule(mi->padding_timer, &timeout); 1533 mi->is_padding_timer_scheduled = 1; 1534 1535 // TODO-MP-AP: Unify with channelpadding counter 1536 //rep_hist_padding_count_timers(++total_timers_pending); 1537 1538 return CIRCPAD_STATE_UNCHANGED; 1539 } 1540 1541 /** 1542 * If the machine transitioned to the END state, we need 1543 * to check to see if it wants us to shut it down immediately. 1544 * If it does, then we need to send the appropriate negotiation commands 1545 * depending on which side it is. 1546 * 1547 * After this function is called, mi may point to freed memory. Do 1548 * not access it. 1549 */ 1550 static void 1551 circpad_machine_spec_transitioned_to_end(circpad_machine_runtime_t *mi) 1552 { 1553 const circpad_machine_spec_t *machine = CIRCPAD_GET_MACHINE(mi); 1554 circuit_t *on_circ = mi->on_circ; 1555 1556 log_fn(LOG_INFO,LD_CIRC, "Padding machine in end state on circuit %u (%d)", 1557 CIRCUIT_IS_ORIGIN(on_circ) ? 1558 TO_ORIGIN_CIRCUIT(on_circ)->global_identifier : 0, 1559 on_circ->purpose); 1560 1561 /* 1562 * We allow machines to shut down and delete themselves as opposed 1563 * to just going back to START or waiting forever in END so that 1564 * we can handle the case where this machine started while it was 1565 * the only machine that matched conditions, but *since* then more 1566 * "higher ranking" machines now match the conditions, and would 1567 * be given a chance to take precedence over this one in 1568 * circpad_add_matching_machines(). 1569 * 1570 * Returning to START or waiting forever in END would not give those 1571 * other machines a chance to be launched, where as shutting down 1572 * here does. 1573 */ 1574 if (machine->should_negotiate_end) { 1575 if (machine->is_origin_side) { 1576 /* We free the machine info here so that we can be replaced 1577 * by a different machine. But we must leave the padding_machine 1578 * in place to wait for the negotiated response */ 1579 uint32_t machine_ctr = mi->machine_ctr; 1580 circpad_circuit_machineinfo_free_idx(on_circ, 1581 machine->machine_index); 1582 circpad_negotiate_padding(TO_ORIGIN_CIRCUIT(on_circ), 1583 machine->machine_num, 1584 machine->target_hopnum, 1585 CIRCPAD_COMMAND_STOP, 1586 machine_ctr); 1587 } else { 1588 uint32_t machine_ctr = mi->machine_ctr; 1589 circpad_circuit_machineinfo_free_idx(on_circ, 1590 machine->machine_index); 1591 circpad_padding_negotiated(on_circ, 1592 machine->machine_num, 1593 CIRCPAD_COMMAND_STOP, 1594 CIRCPAD_RESPONSE_OK, 1595 machine_ctr); 1596 on_circ->padding_machine[machine->machine_index] = NULL; 1597 } 1598 } 1599 } 1600 1601 /** 1602 * Generic state transition function for padding state machines. 1603 * 1604 * Given an event and our mutable machine info, decide if/how to 1605 * transition to a different state, and perform actions accordingly. 1606 * 1607 * Returns 1 if we transition states, 0 otherwise. 1608 */ 1609 MOCK_IMPL(circpad_decision_t, 1610 circpad_machine_spec_transition,(circpad_machine_runtime_t *mi, 1611 circpad_event_t event)) 1612 { 1613 const circpad_state_t *state = 1614 circpad_machine_current_state(mi); 1615 1616 /* If state is null we are in the end state. */ 1617 if (!state) { 1618 /* If we in end state we don't pad no matter what. */ 1619 return CIRCPAD_STATE_UNCHANGED; 1620 } 1621 1622 /* Check if this event is ignored or causes a cancel */ 1623 if (state->next_state[event] == CIRCPAD_STATE_IGNORE) { 1624 return CIRCPAD_STATE_UNCHANGED; 1625 } else if (state->next_state[event] == CIRCPAD_STATE_CANCEL) { 1626 /* Check cancel events and cancel any pending padding */ 1627 mi->padding_scheduled_at_usec = 0; 1628 if (mi->is_padding_timer_scheduled) { 1629 mi->is_padding_timer_scheduled = 0; 1630 /* Cancel current timer (if any) */ 1631 timer_disable(mi->padding_timer); 1632 } 1633 return CIRCPAD_STATE_UNCHANGED; 1634 } else { 1635 circpad_statenum_t s = state->next_state[event]; 1636 /* See if we need to transition to any other states based on this event. 1637 * Whenever a transition happens, even to our own state, we schedule 1638 * padding. 1639 * 1640 * So if a state only wants to schedule padding for an event, it specifies 1641 * a transition to itself. All non-specified events are ignored. 1642 */ 1643 log_fn(LOG_INFO, LD_CIRC, 1644 "Circuit %u circpad machine %d transitioning from %u to %u", 1645 CIRCUIT_IS_ORIGIN(mi->on_circ) ? 1646 TO_ORIGIN_CIRCUIT(mi->on_circ)->global_identifier : 0, 1647 mi->machine_index, mi->current_state, s); 1648 1649 /* If this is not the same state, switch and init tokens, 1650 * otherwise just reschedule padding. */ 1651 if (mi->current_state != s) { 1652 mi->current_state = s; 1653 circpad_machine_setup_tokens(mi); 1654 circpad_choose_state_length(mi); 1655 1656 /* If we transition to the end state, check to see 1657 * if this machine wants to be shut down at end */ 1658 if (s == CIRCPAD_STATE_END) { 1659 circpad_machine_spec_transitioned_to_end(mi); 1660 /* We transitioned but we don't pad in end. Also, mi 1661 * may be freed. Returning STATE_CHANGED prevents us 1662 * from accessing it in any callers of this function. */ 1663 return CIRCPAD_STATE_CHANGED; 1664 } 1665 1666 /* We transitioned to a new state, schedule padding */ 1667 circpad_machine_schedule_padding(mi); 1668 return CIRCPAD_STATE_CHANGED; 1669 } 1670 1671 /* We transitioned back to the same state. Schedule padding, 1672 * and inform if that causes a state transition. */ 1673 return circpad_machine_schedule_padding(mi); 1674 } 1675 1676 return CIRCPAD_STATE_UNCHANGED; 1677 } 1678 1679 /** 1680 * Estimate the circuit RTT from the current middle hop out to the 1681 * end of the circuit. 1682 * 1683 * We estimate RTT by calculating the time between "receive" and 1684 * "send" at a middle hop. This is because we "receive" a cell 1685 * from the origin, and then relay it towards the exit before a 1686 * response comes back. It is that response time from the exit side 1687 * that we want to measure, so that we can make use of it for synthetic 1688 * response delays. 1689 */ 1690 static void 1691 circpad_estimate_circ_rtt_on_received(circuit_t *circ, 1692 circpad_machine_runtime_t *mi) 1693 { 1694 /* Origin circuits don't estimate RTT. They could do it easily enough, 1695 * but they have no reason to use it in any delay calculations. */ 1696 if (CIRCUIT_IS_ORIGIN(circ) || mi->stop_rtt_update) 1697 return; 1698 1699 /* If we already have a last received packet time, that means we 1700 * did not get a response before this packet. The RTT estimate 1701 * only makes sense if we do not have multiple packets on the 1702 * wire, so stop estimating if this is the second packet 1703 * back to back. However, for the first set of back-to-back 1704 * packets, we can wait until the very first response comes back 1705 * to us, to measure that RTT (for the response to optimistic 1706 * data, for example). Hence stop_rtt_update is only checked 1707 * in this received side function, and not in send side below. 1708 */ 1709 if (mi->last_received_time_usec) { 1710 /* We also allow multiple back-to-back packets if the circuit is not 1711 * opened, to handle var cells. 1712 * XXX: Will this work with out var cell plans? Maybe not, 1713 * since we're opened at the middle hop as soon as we process 1714 * one var extend2 :/ */ 1715 if (circ->state == CIRCUIT_STATE_OPEN) { 1716 log_fn(LOG_INFO, LD_CIRC, 1717 "Stopping padding RTT estimation on circuit (%"PRIu64 1718 ", %d) after two back to back packets. Current RTT: %d", 1719 circ->n_chan ? circ->n_chan->global_identifier : 0, 1720 circ->n_circ_id, mi->rtt_estimate_usec); 1721 mi->stop_rtt_update = 1; 1722 1723 if (!mi->rtt_estimate_usec) { 1724 static ratelim_t rtt_lim = RATELIM_INIT(600); 1725 log_fn_ratelim(&rtt_lim,LOG_NOTICE,LD_BUG, 1726 "Circuit got two cells back to back before estimating RTT."); 1727 } 1728 } 1729 } else { 1730 const circpad_state_t *state = circpad_machine_current_state(mi); 1731 if (BUG(!state)) { 1732 return; 1733 } 1734 1735 /* Since monotime is unpredictably expensive, only update this field 1736 * if rtt estimates are needed. Otherwise, stop the rtt update. */ 1737 if (state->use_rtt_estimate) { 1738 mi->last_received_time_usec = monotime_absolute_usec(); 1739 } else { 1740 /* Let's fast-path future decisions not to update rtt if the 1741 * feature is not in use. */ 1742 mi->stop_rtt_update = 1; 1743 } 1744 } 1745 } 1746 1747 /** 1748 * Handles the "send" side of RTT calculation at middle nodes. 1749 * 1750 * This function calculates the RTT from the middle to the end 1751 * of the circuit by subtracting the last received cell timestamp 1752 * from the current time. It allows back-to-back cells until 1753 * the circuit is opened, to allow for var cell handshakes. 1754 * XXX: Check our var cell plans to make sure this will work. 1755 */ 1756 static void 1757 circpad_estimate_circ_rtt_on_send(circuit_t *circ, 1758 circpad_machine_runtime_t *mi) 1759 { 1760 /* Origin circuits don't estimate RTT. They could do it easily enough, 1761 * but they have no reason to use it in any delay calculations. */ 1762 if (CIRCUIT_IS_ORIGIN(circ)) 1763 return; 1764 1765 /* If last_received_time_usec is non-zero, we are waiting for a response 1766 * from the exit side. Calculate the time delta and use it as RTT. */ 1767 if (mi->last_received_time_usec) { 1768 circpad_time_t rtt_time = monotime_absolute_usec() - 1769 mi->last_received_time_usec; 1770 1771 /* Reset the last RTT packet time, so we can tell if two cells 1772 * arrive back to back */ 1773 mi->last_received_time_usec = 0; 1774 1775 /* Use INT32_MAX to ensure the addition doesn't overflow */ 1776 if (rtt_time >= INT32_MAX) { 1777 log_fn(LOG_WARN,LD_CIRC, 1778 "Circuit padding RTT estimate overflowed: %"PRIu64 1779 " vs %"PRIu64, monotime_absolute_usec(), 1780 mi->last_received_time_usec); 1781 return; 1782 } 1783 1784 /* If the old RTT estimate is lower than this one, use this one, because 1785 * the circuit is getting longer. If this estimate is somehow 1786 * faster than the previous, then maybe that was network jitter, or a 1787 * bad monotonic clock source (so our ratchet returned a zero delta). 1788 * In that case, average them. */ 1789 if (mi->rtt_estimate_usec < (circpad_delay_t)rtt_time) { 1790 mi->rtt_estimate_usec = (circpad_delay_t)rtt_time; 1791 } else { 1792 mi->rtt_estimate_usec += (circpad_delay_t)rtt_time; 1793 mi->rtt_estimate_usec /= 2; 1794 } 1795 } else if (circ->state == CIRCUIT_STATE_OPEN) { 1796 /* If last_received_time_usec is zero, then we have gotten two cells back 1797 * to back. Stop estimating RTT in this case. Note that we only 1798 * stop RTT update if the circuit is opened, to allow for RTT estimates 1799 * of var cells during circ setup. */ 1800 if (!mi->rtt_estimate_usec && !mi->stop_rtt_update) { 1801 static ratelim_t rtt_lim = RATELIM_INIT(600); 1802 log_fn_ratelim(&rtt_lim,LOG_NOTICE,LD_BUG, 1803 "Circuit sent two cells back to back before estimating RTT."); 1804 } 1805 mi->stop_rtt_update = 1; 1806 } 1807 } 1808 1809 /** 1810 * A "non-padding" cell has been sent from this endpoint. React 1811 * according to any padding state machines on the circuit. 1812 * 1813 * For origin circuits, this means we sent a cell into the network. 1814 * For middle relay circuits, this means we sent a cell towards the 1815 * origin. 1816 */ 1817 void 1818 circpad_cell_event_nonpadding_sent(circuit_t *on_circ) 1819 { 1820 /* Update global cell count */ 1821 circpad_global_nonpadding_sent++; 1822 1823 /* If there are no machines then this loop should not iterate */ 1824 FOR_EACH_ACTIVE_CIRCUIT_MACHINE_BEGIN(i, on_circ) { 1825 /* First, update any timestamps */ 1826 on_circ->padding_info[i]->last_cell_time_sec = approx_time(); 1827 circpad_estimate_circ_rtt_on_send(on_circ, on_circ->padding_info[i]); 1828 1829 /* Then, do accounting */ 1830 circpad_machine_count_nonpadding_sent(on_circ->padding_info[i]); 1831 1832 /* Check to see if we've run out of tokens for this state already, 1833 * and if not, check for other state transitions */ 1834 if (check_machine_token_supply(on_circ->padding_info[i]) 1835 == CIRCPAD_STATE_UNCHANGED) { 1836 /* If removing a token did not cause a transition, check if 1837 * non-padding sent event should */ 1838 circpad_machine_spec_transition(on_circ->padding_info[i], 1839 CIRCPAD_EVENT_NONPADDING_SENT); 1840 } 1841 } FOR_EACH_ACTIVE_CIRCUIT_MACHINE_END; 1842 } 1843 1844 /** Check if this cell or circuit are related to circuit padding and handle 1845 * them if so. Return 0 if the cell was handled in this subsystem and does 1846 * not need any other consideration, otherwise return 1. 1847 */ 1848 int 1849 circpad_check_received_cell(const relay_msg_t *msg, circuit_t *circ, 1850 crypt_path_t *layer_hint) 1851 { 1852 /* First handle the padding commands, since we want to ignore any other 1853 * commands if this circuit is padding-specific. */ 1854 switch (msg->command) { 1855 case RELAY_COMMAND_DROP: 1856 /* Already examined in circpad_deliver_recognized_relay_cell_events */ 1857 return 0; 1858 case RELAY_COMMAND_PADDING_NEGOTIATE: 1859 circpad_handle_padding_negotiate(circ, msg); 1860 return 0; 1861 case RELAY_COMMAND_PADDING_NEGOTIATED: 1862 if (circpad_handle_padding_negotiated(circ, msg, layer_hint) == 0) 1863 circuit_read_valid_data(TO_ORIGIN_CIRCUIT(circ), msg->length); 1864 return 0; 1865 } 1866 1867 /* If this is a padding circuit we don't need to parse any other commands 1868 * than the padding ones. Just drop them to the floor. 1869 * 1870 * Note: we deliberately do not call circuit_read_valid_data() here. The 1871 * vanguards addon (specifically the 'bandguards' component's dropped cell 1872 * detection) will thus close this circuit, as it would for any other 1873 * unexpected cell. However, default tor will *not* close the circuit. 1874 * 1875 * This is intentional. We are not yet certain that is it optimal to keep 1876 * padding circuits open in cases like these, rather than closing them. 1877 * We suspect that continuing to pad is optimal against a passive classifier, 1878 * but as soon as the adversary is active (even as a client adversary) this 1879 * might change. 1880 * 1881 * So as a way forward, we log the cell command and circuit number, to 1882 * help us enumerate the most common instances of this in testing with 1883 * vanguards, to see which are common enough to verify and handle 1884 * properly. 1885 * - Mike 1886 */ 1887 if (circ->purpose == CIRCUIT_PURPOSE_C_CIRCUIT_PADDING) { 1888 log_fn(LOG_PROTOCOL_WARN, LD_CIRC, 1889 "Ignored cell (%d) that arrived in padding circuit " 1890 " %u.", msg->command, CIRCUIT_IS_ORIGIN(circ) ? 1891 TO_ORIGIN_CIRCUIT(circ)->global_identifier : 0); 1892 return 0; 1893 } 1894 1895 return 1; 1896 } 1897 1898 /** 1899 * A "non-padding" cell has been received by this endpoint. React 1900 * according to any padding state machines on the circuit. 1901 * 1902 * For origin circuits, this means we read a cell from the network. 1903 * For middle relay circuits, this means we received a cell from the 1904 * origin. 1905 */ 1906 void 1907 circpad_cell_event_nonpadding_received(circuit_t *on_circ) 1908 { 1909 FOR_EACH_ACTIVE_CIRCUIT_MACHINE_BEGIN(i, on_circ) { 1910 /* First, update any timestamps */ 1911 on_circ->padding_info[i]->last_cell_time_sec = approx_time(); 1912 circpad_estimate_circ_rtt_on_received(on_circ, on_circ->padding_info[i]); 1913 1914 circpad_machine_spec_transition(on_circ->padding_info[i], 1915 CIRCPAD_EVENT_NONPADDING_RECV); 1916 } FOR_EACH_ACTIVE_CIRCUIT_MACHINE_END; 1917 } 1918 1919 /** 1920 * A padding cell has been sent from this endpoint. React 1921 * according to any padding state machines on the circuit. 1922 * 1923 * For origin circuits, this means we sent a cell into the network. 1924 * For middle relay circuits, this means we sent a cell towards the 1925 * origin. 1926 */ 1927 void 1928 circpad_cell_event_padding_sent(circuit_t *on_circ) 1929 { 1930 FOR_EACH_ACTIVE_CIRCUIT_MACHINE_BEGIN(i, on_circ) { 1931 /* Check to see if we've run out of tokens for this state already, 1932 * and if not, check for other state transitions */ 1933 if (check_machine_token_supply(on_circ->padding_info[i]) 1934 == CIRCPAD_STATE_UNCHANGED) { 1935 /* If removing a token did not cause a transition, check if 1936 * non-padding sent event should */ 1937 1938 on_circ->padding_info[i]->last_cell_time_sec = approx_time(); 1939 circpad_machine_spec_transition(on_circ->padding_info[i], 1940 CIRCPAD_EVENT_PADDING_SENT); 1941 } 1942 } FOR_EACH_ACTIVE_CIRCUIT_MACHINE_END; 1943 } 1944 1945 /** 1946 * A padding cell has been received by this endpoint. React 1947 * according to any padding state machines on the circuit. 1948 * 1949 * For origin circuits, this means we read a cell from the network. 1950 * For middle relay circuits, this means we received a cell from the 1951 * origin. 1952 */ 1953 void 1954 circpad_cell_event_padding_received(circuit_t *on_circ) 1955 { 1956 /* identical to padding sent */ 1957 FOR_EACH_ACTIVE_CIRCUIT_MACHINE_BEGIN(i, on_circ) { 1958 on_circ->padding_info[i]->last_cell_time_sec = approx_time(); 1959 circpad_machine_spec_transition(on_circ->padding_info[i], 1960 CIRCPAD_EVENT_PADDING_RECV); 1961 } FOR_EACH_ACTIVE_CIRCUIT_MACHINE_END; 1962 } 1963 1964 /** 1965 * An "infinite" delay has ben chosen from one of our histograms. 1966 * 1967 * "Infinite" delays mean don't send padding -- but they can also 1968 * mean transition to another state depending on the state machine 1969 * definitions. Check the rules and react accordingly. 1970 * 1971 * Return 1 if we decide to transition, 0 otherwise. 1972 */ 1973 circpad_decision_t 1974 circpad_internal_event_infinity(circpad_machine_runtime_t *mi) 1975 { 1976 return circpad_machine_spec_transition(mi, CIRCPAD_EVENT_INFINITY); 1977 } 1978 1979 /** 1980 * All of the bins of our current state's histogram's are empty. 1981 * 1982 * Check to see if this means transition to another state, and if 1983 * not, refill the tokens. 1984 * 1985 * Return 1 if we decide to transition, 0 otherwise. 1986 */ 1987 circpad_decision_t 1988 circpad_internal_event_bins_empty(circpad_machine_runtime_t *mi) 1989 { 1990 if (circpad_machine_spec_transition(mi, CIRCPAD_EVENT_BINS_EMPTY) 1991 == CIRCPAD_STATE_CHANGED) { 1992 return CIRCPAD_STATE_CHANGED; 1993 } else { 1994 /* If we dont transition, then we refill the tokens */ 1995 circpad_machine_setup_tokens(mi); 1996 return CIRCPAD_STATE_UNCHANGED; 1997 } 1998 } 1999 2000 /** 2001 * This state has used up its cell count. Emit the event and 2002 * see if we transition. 2003 * 2004 * Return 1 if we decide to transition, 0 otherwise. 2005 */ 2006 circpad_decision_t 2007 circpad_internal_event_state_length_up(circpad_machine_runtime_t *mi) 2008 { 2009 return circpad_machine_spec_transition(mi, CIRCPAD_EVENT_LENGTH_COUNT); 2010 } 2011 2012 /** 2013 * Returns true if the circuit matches the conditions. 2014 */ 2015 static inline bool 2016 circpad_machine_conditions_apply(origin_circuit_t *circ, 2017 const circpad_machine_spec_t *machine) 2018 { 2019 /* If padding is disabled, no machines should match/apply. This has 2020 * the effect of shutting down all machines, and not adding any more. */ 2021 if (circpad_padding_disabled || !get_options()->CircuitPadding) 2022 return 0; 2023 2024 /* If the consensus or our torrc has selected reduced connection padding, 2025 * then only allow this machine if it is flagged as acceptable under 2026 * reduced padding conditions */ 2027 if (circpad_padding_reduced || get_options()->ReducedCircuitPadding) { 2028 if (!machine->conditions.reduced_padding_ok) 2029 return 0; 2030 } 2031 2032 if (!(circpad_circ_purpose_to_mask(TO_CIRCUIT(circ)->purpose) 2033 & machine->conditions.apply_purpose_mask)) 2034 return 0; 2035 2036 if (machine->conditions.requires_vanguards) { 2037 const or_options_t *options = get_options(); 2038 2039 /* Pinned middles are effectively vanguards */ 2040 if (!(options->HSLayer2Nodes || options->HSLayer3Nodes)) 2041 return 0; 2042 } 2043 2044 /* We check for any bits set in the circuit state mask so that machines 2045 * can say any of the following through their state bitmask: 2046 * "I want to apply to circuits with either streams or no streams"; OR 2047 * "I only want to apply to circuits with streams"; OR 2048 * "I only want to apply to circuits without streams". */ 2049 if (!(circpad_circuit_state(circ) & machine->conditions.apply_state_mask)) 2050 return 0; 2051 2052 if (circuit_get_cpath_opened_len(circ) < machine->conditions.min_hops) 2053 return 0; 2054 2055 return 1; 2056 } 2057 2058 /** 2059 * Check to see if any of the keep conditions still apply to this circuit. 2060 * 2061 * These conditions keep the machines active if they match, but do not 2062 * cause new machines to start up. 2063 */ 2064 static inline bool 2065 circpad_machine_conditions_keep(origin_circuit_t *circ, 2066 const circpad_machine_spec_t *machine) 2067 { 2068 if ((circpad_circ_purpose_to_mask(TO_CIRCUIT(circ)->purpose) 2069 & machine->conditions.keep_purpose_mask)) 2070 return 1; 2071 2072 if ((circpad_circuit_state(circ) & machine->conditions.keep_state_mask)) 2073 return 1; 2074 2075 return 0; 2076 } 2077 2078 /** 2079 * Returns a minimized representation of the circuit state. 2080 * 2081 * The padding code only cares if the circuit is building, 2082 * opened, used for streams, and/or still has relay early cells. 2083 * This returns a bitmask of all state properties that apply to 2084 * this circuit. 2085 */ 2086 static inline 2087 circpad_circuit_state_t 2088 circpad_circuit_state(origin_circuit_t *circ) 2089 { 2090 circpad_circuit_state_t retmask = 0; 2091 2092 if (circ->p_streams) 2093 retmask |= CIRCPAD_CIRC_STREAMS; 2094 else 2095 retmask |= CIRCPAD_CIRC_NO_STREAMS; 2096 2097 /* We use has_opened to prevent cannibialized circs from flapping. */ 2098 if (circ->has_opened) 2099 retmask |= CIRCPAD_CIRC_OPENED; 2100 else 2101 retmask |= CIRCPAD_CIRC_BUILDING; 2102 2103 if (circ->remaining_relay_early_cells > 0) 2104 retmask |= CIRCPAD_CIRC_HAS_RELAY_EARLY; 2105 else 2106 retmask |= CIRCPAD_CIRC_HAS_NO_RELAY_EARLY; 2107 2108 return retmask; 2109 } 2110 2111 /** 2112 * Convert a normal circuit purpose into a bitmask that we can 2113 * use for determining matching circuits. 2114 */ 2115 circpad_purpose_mask_t 2116 circpad_circ_purpose_to_mask(uint8_t circ_purpose) 2117 { 2118 /* Treat OR circ purposes as ignored. They should not be passed here*/ 2119 if (BUG(circ_purpose <= CIRCUIT_PURPOSE_OR_MAX_)) { 2120 return 0; 2121 } 2122 2123 /* Treat new client circuit purposes as "OMG ITS EVERYTHING". 2124 * This also should not happen */ 2125 if (BUG(circ_purpose - CIRCUIT_PURPOSE_OR_MAX_ - 1 > 32)) { 2126 return CIRCPAD_PURPOSE_ALL; 2127 } 2128 2129 /* Convert the purpose to a bit position */ 2130 return 1 << (circ_purpose - CIRCUIT_PURPOSE_OR_MAX_ - 1); 2131 } 2132 2133 /** 2134 * Shut down any machines whose conditions no longer match 2135 * the current circuit. 2136 */ 2137 static void 2138 circpad_shutdown_old_machines(origin_circuit_t *on_circ) 2139 { 2140 circuit_t *circ = TO_CIRCUIT(on_circ); 2141 2142 FOR_EACH_ACTIVE_CIRCUIT_MACHINE_BEGIN(i, circ) { 2143 /* We shut down a machine if neither the apply conditions 2144 * nor the keep conditions match. If either set of conditions match, 2145 * keep it around. */ 2146 if (!circpad_machine_conditions_apply(on_circ, 2147 circ->padding_machine[i]) && 2148 !circpad_machine_conditions_keep(on_circ, 2149 circ->padding_machine[i])) { 2150 uint32_t machine_ctr = circ->padding_info[i]->machine_ctr; 2151 // Clear machineinfo (frees timers) 2152 circpad_circuit_machineinfo_free_idx(circ, i); 2153 // Send padding negotiate stop 2154 circpad_negotiate_padding(on_circ, 2155 circ->padding_machine[i]->machine_num, 2156 circ->padding_machine[i]->target_hopnum, 2157 CIRCPAD_COMMAND_STOP, 2158 machine_ctr); 2159 } 2160 } FOR_EACH_ACTIVE_CIRCUIT_MACHINE_END; 2161 } 2162 2163 /** 2164 * Negotiate new machines that would apply to this circuit, given the machines 2165 * inside <b>machines_sl</b>. 2166 * 2167 * This function checks to see if we have any free machine indexes, 2168 * and for each free machine index, it initializes the most recently 2169 * added origin-side padding machine that matches the target machine 2170 * index and circuit conditions, and negotiates it with the appropriate 2171 * middle relay. 2172 */ 2173 STATIC void 2174 circpad_add_matching_machines(origin_circuit_t *on_circ, 2175 smartlist_t *machines_sl) 2176 { 2177 circuit_t *circ = TO_CIRCUIT(on_circ); 2178 2179 #ifdef TOR_UNIT_TESTS 2180 /* Tests don't have to init our padding machines */ 2181 if (!machines_sl) 2182 return; 2183 #endif 2184 2185 /* If padding negotiation failed before, do not try again */ 2186 if (on_circ->padding_negotiation_failed) 2187 return; 2188 2189 FOR_EACH_CIRCUIT_MACHINE_BEGIN(i) { 2190 /* If there is a padding machine info, this index is occupied. 2191 * No need to check conditions for this index. */ 2192 if (circ->padding_info[i]) 2193 continue; 2194 2195 /* We have a free machine index. Check the origin padding 2196 * machines in reverse order, so that more recently added 2197 * machines take priority over older ones. */ 2198 SMARTLIST_FOREACH_REVERSE_BEGIN(machines_sl, 2199 circpad_machine_spec_t *, 2200 machine) { 2201 /* Machine definitions have a specific target machine index. 2202 * This is so event ordering is deterministic with respect 2203 * to which machine gets events first when there are two 2204 * machines installed on a circuit. Make sure we only 2205 * add this machine if its target machine index is free. */ 2206 if (machine->machine_index == i && 2207 circpad_machine_conditions_apply(on_circ, machine)) { 2208 2209 // We can only replace this machine if the target hopnum 2210 // is the same, otherwise we'll get invalid data 2211 if (circ->padding_machine[i]) { 2212 if (circ->padding_machine[i]->target_hopnum != 2213 machine->target_hopnum) 2214 continue; 2215 /* Replace it. (Don't free - is global). */ 2216 circ->padding_machine[i] = NULL; 2217 } 2218 2219 /* Set up the machine immediately so that the slot is occupied. 2220 * We will tear it down on error return, or if there is an error 2221 * response from the relay. */ 2222 circpad_setup_machine_on_circ(circ, machine); 2223 if (circpad_negotiate_padding(on_circ, machine->machine_num, 2224 machine->target_hopnum, 2225 CIRCPAD_COMMAND_START, 2226 circ->padding_machine_ctr) < 0) { 2227 log_info(LD_CIRC, 2228 "Padding not negotiated. Cleaning machine from circuit %u", 2229 CIRCUIT_IS_ORIGIN(circ) ? 2230 TO_ORIGIN_CIRCUIT(circ)->global_identifier : 0); 2231 circpad_circuit_machineinfo_free_idx(circ, i); 2232 circ->padding_machine[i] = NULL; 2233 on_circ->padding_negotiation_failed = 1; 2234 } else { 2235 /* Success. Don't try any more machines on this index */ 2236 break; 2237 } 2238 } 2239 } SMARTLIST_FOREACH_END(machine); 2240 } FOR_EACH_CIRCUIT_MACHINE_END; 2241 } 2242 2243 /** 2244 * Event that tells us we added a hop to an origin circuit. 2245 * 2246 * This event is used to decide if we should create a padding machine 2247 * on a circuit. 2248 */ 2249 void 2250 circpad_machine_event_circ_added_hop(origin_circuit_t *on_circ) 2251 { 2252 /* Since our padding conditions do not specify a max_hops, 2253 * all we can do is add machines here */ 2254 circpad_add_matching_machines(on_circ, origin_padding_machines); 2255 } 2256 2257 /** 2258 * Event that tells us that an origin circuit is now built. 2259 * 2260 * Shut down any machines that only applied to un-built circuits. 2261 * Activate any new ones. 2262 */ 2263 void 2264 circpad_machine_event_circ_built(origin_circuit_t *circ) 2265 { 2266 circpad_shutdown_old_machines(circ); 2267 circpad_add_matching_machines(circ, origin_padding_machines); 2268 } 2269 2270 /** 2271 * Circpad purpose changed event. 2272 * 2273 * Shut down any machines that don't apply to our circ purpose. 2274 * Activate any new ones that do. 2275 */ 2276 void 2277 circpad_machine_event_circ_purpose_changed(origin_circuit_t *circ) 2278 { 2279 circpad_shutdown_old_machines(circ); 2280 circpad_add_matching_machines(circ, origin_padding_machines); 2281 } 2282 2283 /** 2284 * Event that tells us that an origin circuit is out of RELAY_EARLY 2285 * cells. 2286 * 2287 * Shut down any machines that only applied to RELAY_EARLY circuits. 2288 * Activate any new ones. 2289 */ 2290 void 2291 circpad_machine_event_circ_has_no_relay_early(origin_circuit_t *circ) 2292 { 2293 circpad_shutdown_old_machines(circ); 2294 circpad_add_matching_machines(circ, origin_padding_machines); 2295 } 2296 2297 /** 2298 * Streams attached event. 2299 * 2300 * Called from link_apconn_to_circ() and handle_hs_exit_conn() 2301 * 2302 * Shut down any machines that only applied to machines without 2303 * streams. Activate any new ones. 2304 */ 2305 void 2306 circpad_machine_event_circ_has_streams(origin_circuit_t *circ) 2307 { 2308 circpad_shutdown_old_machines(circ); 2309 circpad_add_matching_machines(circ, origin_padding_machines); 2310 } 2311 2312 /** 2313 * Streams detached event. 2314 * 2315 * Called from circuit_detach_stream() 2316 * 2317 * Shut down any machines that only applied to machines without 2318 * streams. Activate any new ones. 2319 */ 2320 void 2321 circpad_machine_event_circ_has_no_streams(origin_circuit_t *circ) 2322 { 2323 circpad_shutdown_old_machines(circ); 2324 circpad_add_matching_machines(circ, origin_padding_machines); 2325 } 2326 2327 /** 2328 * Verify that padding is coming from the expected hop. 2329 * 2330 * Returns true if from_hop matches the target hop from 2331 * one of our padding machines. 2332 * 2333 * Returns false if we're not an origin circuit, or if from_hop 2334 * does not match one of the padding machines. 2335 */ 2336 bool 2337 circpad_padding_is_from_expected_hop(circuit_t *circ, 2338 crypt_path_t *from_hop) 2339 { 2340 crypt_path_t *target_hop = NULL; 2341 if (!CIRCUIT_IS_ORIGIN(circ)) 2342 return 0; 2343 2344 FOR_EACH_CIRCUIT_MACHINE_BEGIN(i) { 2345 /* We have to check padding_machine and not padding_info/active 2346 * machines here because padding may arrive after we shut down a 2347 * machine. The info is gone, but the padding_machine waits 2348 * for the padding_negotiated response to come back. */ 2349 if (!circ->padding_machine[i]) 2350 continue; 2351 2352 target_hop = circuit_get_cpath_hop(TO_ORIGIN_CIRCUIT(circ), 2353 circ->padding_machine[i]->target_hopnum); 2354 2355 if (target_hop == from_hop) 2356 return 1; 2357 } FOR_EACH_CIRCUIT_MACHINE_END; 2358 2359 return 0; 2360 } 2361 2362 /** 2363 * Deliver circpad events for an "unrecognized cell". 2364 * 2365 * Unrecognized cells are sent to relays and are forwarded 2366 * onto the next hop of their circuits. Unrecognized cells 2367 * are by definition not padding. We need to tell relay-side 2368 * state machines that a non-padding cell was sent or received, 2369 * depending on the direction, so they can update their histograms 2370 * and decide to pad or not. 2371 */ 2372 void 2373 circpad_deliver_unrecognized_cell_events(circuit_t *circ, 2374 cell_direction_t dir) 2375 { 2376 // We should never see unrecognized cells at origin. 2377 // Our caller emits a warn when this happens. 2378 if (CIRCUIT_IS_ORIGIN(circ)) { 2379 return; 2380 } 2381 2382 if (dir == CELL_DIRECTION_OUT) { 2383 /* When direction is out (away from origin), then we received non-padding 2384 cell coming from the origin to us. */ 2385 circpad_cell_event_nonpadding_received(circ); 2386 } else if (dir == CELL_DIRECTION_IN) { 2387 /* It's in and not origin, so the cell is going away from us. 2388 * So we are relaying a non-padding cell towards the origin. */ 2389 circpad_cell_event_nonpadding_sent(circ); 2390 } 2391 } 2392 2393 /** 2394 * Deliver circpad events for "recognized" relay cells. 2395 * 2396 * Recognized cells are destined for this hop, either client or middle. 2397 * Check if this is a padding cell or not, and send the appropriate 2398 * received event. 2399 */ 2400 void 2401 circpad_deliver_recognized_relay_cell_events(circuit_t *circ, 2402 uint8_t relay_command, 2403 crypt_path_t *layer_hint) 2404 { 2405 if (relay_command == RELAY_COMMAND_DROP) { 2406 rep_hist_padding_count_read(PADDING_TYPE_DROP); 2407 2408 if (CIRCUIT_IS_ORIGIN(circ)) { 2409 if (circpad_padding_is_from_expected_hop(circ, layer_hint)) { 2410 circuit_read_valid_data(TO_ORIGIN_CIRCUIT(circ), 0); 2411 } else { 2412 /* This is unexpected padding. Ignore it for now. */ 2413 return; 2414 } 2415 } 2416 2417 /* The cell should be recognized by now, which means that we are on the 2418 destination, which means that we received a padding cell. We might be 2419 the client or the Middle node, still, because leaky-pipe. */ 2420 circpad_cell_event_padding_received(circ); 2421 log_fn(LOG_INFO, LD_CIRC, "Got padding cell on %s circuit %u.", 2422 CIRCUIT_IS_ORIGIN(circ) ? "origin" : "non-origin", 2423 CIRCUIT_IS_ORIGIN(circ) ? 2424 TO_ORIGIN_CIRCUIT(circ)->global_identifier : 0); 2425 } else { 2426 /* We received a non-padding cell on the edge */ 2427 circpad_cell_event_nonpadding_received(circ); 2428 } 2429 } 2430 2431 /** 2432 * Deliver circpad events for relay cells sent from us. 2433 * 2434 * If this is a padding cell, update our padding stats 2435 * and deliver the event. Otherwise just deliver the event. 2436 */ 2437 void 2438 circpad_deliver_sent_relay_cell_events(circuit_t *circ, 2439 uint8_t relay_command) 2440 { 2441 /* RELAY_COMMAND_DROP is the multi-hop (aka circuit-level) padding cell in 2442 * tor. (CELL_PADDING is a channel-level padding cell, which is not relayed 2443 * or processed here). 2444 * 2445 * We do generate events for PADDING_NEGOTIATE and PADDING_NEGOTIATED cells. 2446 */ 2447 if (relay_command == RELAY_COMMAND_DROP) { 2448 /* Optimization: The event for RELAY_COMMAND_DROP is sent directly 2449 * from circpad_send_padding_cell_for_callback(). This is to avoid 2450 * putting a cell_t and a relay_header_t on the stack repeatedly 2451 * if we decide to send a long train of padding cells back-to-back 2452 * with 0 delay. So we do nothing here. */ 2453 return; 2454 } else { 2455 /* This is a non-padding cell sent from the client or from 2456 * this node. */ 2457 circpad_cell_event_nonpadding_sent(circ); 2458 } 2459 } 2460 2461 /** 2462 * Initialize the states array for a circpad machine. 2463 */ 2464 void 2465 circpad_machine_states_init(circpad_machine_spec_t *machine, 2466 circpad_statenum_t num_states) 2467 { 2468 if (BUG(num_states > CIRCPAD_MAX_MACHINE_STATES)) { 2469 num_states = CIRCPAD_MAX_MACHINE_STATES; 2470 } 2471 2472 machine->num_states = num_states; 2473 machine->states = tor_malloc_zero(sizeof(circpad_state_t)*num_states); 2474 2475 /* Initialize the default next state for all events to 2476 * "ignore" -- if events aren't specified, they are ignored. */ 2477 for (circpad_statenum_t s = 0; s < num_states; s++) { 2478 for (int e = 0; e < CIRCPAD_NUM_EVENTS; e++) { 2479 machine->states[s].next_state[e] = CIRCPAD_STATE_IGNORE; 2480 } 2481 } 2482 } 2483 2484 static void 2485 circpad_setup_machine_on_circ(circuit_t *on_circ, 2486 const circpad_machine_spec_t *machine) 2487 { 2488 if (CIRCUIT_IS_ORIGIN(on_circ) && !machine->is_origin_side) { 2489 log_fn(LOG_WARN, LD_BUG, 2490 "Can't set up non-origin machine on origin circuit!"); 2491 return; 2492 } 2493 2494 if (!CIRCUIT_IS_ORIGIN(on_circ) && machine->is_origin_side) { 2495 log_fn(LOG_WARN, LD_BUG, 2496 "Can't set up origin machine on non-origin circuit!"); 2497 return; 2498 } 2499 2500 IF_BUG_ONCE(on_circ->padding_machine[machine->machine_index] != NULL) { 2501 return; 2502 } 2503 IF_BUG_ONCE(on_circ->padding_info[machine->machine_index] != NULL) { 2504 return; 2505 } 2506 2507 /* Log message */ 2508 if (CIRCUIT_IS_ORIGIN(on_circ)) { 2509 log_info(LD_CIRC, "Registering machine %s to origin circ %u (%d)", 2510 machine->name, 2511 TO_ORIGIN_CIRCUIT(on_circ)->global_identifier, on_circ->purpose); 2512 } else { 2513 log_info(LD_CIRC, "Registering machine %s to non-origin circ (%d)", 2514 machine->name, on_circ->purpose); 2515 } 2516 2517 /* Padding machine ctr starts at 1, so we increment this ctr first. 2518 * (machine ctr of 0 means "any machine"). 2519 * 2520 * See https://bugs.tororject.org/30992. */ 2521 on_circ->padding_machine_ctr++; 2522 2523 /* uint32 wraparound check: 0 is special, just wrap to 1 */ 2524 if (on_circ->padding_machine_ctr == 0) { 2525 on_circ->padding_machine_ctr = 1; 2526 } 2527 2528 on_circ->padding_info[machine->machine_index] = 2529 circpad_circuit_machineinfo_new(on_circ, machine->machine_index); 2530 on_circ->padding_machine[machine->machine_index] = machine; 2531 } 2532 2533 /** Validate a single state of a padding machine */ 2534 static bool 2535 padding_machine_state_is_valid(const circpad_state_t *state) 2536 { 2537 int b; 2538 uint32_t tokens_count = 0; 2539 circpad_delay_t prev_bin_edge = 0; 2540 2541 /* We only validate histograms */ 2542 if (!state->histogram_len) { 2543 return true; 2544 } 2545 2546 /* We need at least two bins in a histogram */ 2547 if (state->histogram_len < 2) { 2548 log_warn(LD_CIRC, "You can't have a histogram with less than 2 bins"); 2549 return false; 2550 } 2551 2552 /* For each machine state, if it's a histogram, make sure all the 2553 * histogram edges are well defined (i.e. are strictly monotonic). */ 2554 for (b = 0 ; b < state->histogram_len ; b++) { 2555 /* Check that histogram edges are strictly increasing. Ignore the first 2556 * edge since it can be zero. */ 2557 if (prev_bin_edge >= state->histogram_edges[b] && b > 0) { 2558 log_warn(LD_CIRC, "Histogram edges are not increasing [%u/%u]", 2559 prev_bin_edge, state->histogram_edges[b]); 2560 return false; 2561 } 2562 2563 prev_bin_edge = state->histogram_edges[b]; 2564 2565 /* Also count the number of tokens as we go through the histogram states */ 2566 tokens_count += state->histogram[b]; 2567 } 2568 /* Verify that the total number of tokens is correct */ 2569 if (tokens_count != state->histogram_total_tokens) { 2570 log_warn(LD_CIRC, "Histogram token count is wrong [%u/%u]", 2571 tokens_count, state->histogram_total_tokens); 2572 return false; 2573 } 2574 2575 return true; 2576 } 2577 2578 /** Basic validation of padding machine */ 2579 static bool 2580 padding_machine_is_valid(const circpad_machine_spec_t *machine) 2581 { 2582 int i; 2583 2584 /* Validate the histograms of the padding machine */ 2585 for (i = 0 ; i < machine->num_states ; i++) { 2586 if (!padding_machine_state_is_valid(&machine->states[i])) { 2587 return false; 2588 } 2589 } 2590 2591 return true; 2592 } 2593 2594 /* Validate and register <b>machine</b> into <b>machine_list</b>. If 2595 * <b>machine_list</b> is NULL, then just validate. */ 2596 void 2597 circpad_register_padding_machine(circpad_machine_spec_t *machine, 2598 smartlist_t *machine_list) 2599 { 2600 if (!padding_machine_is_valid(machine)) { 2601 log_warn(LD_CIRC, "Machine #%u is invalid. Ignoring.", 2602 machine->machine_num); 2603 return; 2604 } 2605 2606 if (machine_list) { 2607 smartlist_add(machine_list, machine); 2608 } 2609 } 2610 2611 #ifdef TOR_UNIT_TESTS 2612 /* These padding machines are only used for tests pending #28634. */ 2613 static void 2614 circpad_circ_client_machine_init(void) 2615 { 2616 circpad_machine_spec_t *circ_client_machine 2617 = tor_malloc_zero(sizeof(circpad_machine_spec_t)); 2618 2619 circ_client_machine->conditions.min_hops = 2; 2620 circ_client_machine->conditions.apply_state_mask = 2621 CIRCPAD_CIRC_BUILDING|CIRCPAD_CIRC_OPENED|CIRCPAD_CIRC_HAS_RELAY_EARLY; 2622 circ_client_machine->conditions.apply_purpose_mask = CIRCPAD_PURPOSE_ALL; 2623 circ_client_machine->conditions.reduced_padding_ok = 1; 2624 2625 circ_client_machine->target_hopnum = 2; 2626 circ_client_machine->is_origin_side = 1; 2627 2628 /* Start, gap, burst */ 2629 circpad_machine_states_init(circ_client_machine, 3); 2630 2631 circ_client_machine->states[CIRCPAD_STATE_START]. 2632 next_state[CIRCPAD_EVENT_NONPADDING_RECV] = CIRCPAD_STATE_BURST; 2633 2634 circ_client_machine->states[CIRCPAD_STATE_BURST]. 2635 next_state[CIRCPAD_EVENT_NONPADDING_RECV] = CIRCPAD_STATE_BURST; 2636 circ_client_machine->states[CIRCPAD_STATE_BURST]. 2637 next_state[CIRCPAD_EVENT_PADDING_RECV] = CIRCPAD_STATE_BURST; 2638 2639 /* If we are in burst state, and we send a non-padding cell, then we cancel 2640 the timer for the next padding cell: 2641 We dont want to send fake extends when actual extends are going on */ 2642 circ_client_machine->states[CIRCPAD_STATE_BURST]. 2643 next_state[CIRCPAD_EVENT_NONPADDING_SENT] = CIRCPAD_STATE_CANCEL; 2644 2645 circ_client_machine->states[CIRCPAD_STATE_BURST]. 2646 next_state[CIRCPAD_EVENT_BINS_EMPTY] = CIRCPAD_STATE_END; 2647 2648 circ_client_machine->states[CIRCPAD_STATE_BURST].token_removal = 2649 CIRCPAD_TOKEN_REMOVAL_CLOSEST; 2650 2651 circ_client_machine->states[CIRCPAD_STATE_BURST].histogram_len = 2; 2652 circ_client_machine->states[CIRCPAD_STATE_BURST].histogram_edges[0]= 500; 2653 circ_client_machine->states[CIRCPAD_STATE_BURST].histogram_edges[1]= 1000000; 2654 2655 /* We have 5 tokens in the histogram, which means that all circuits will look 2656 * like they have 7 hops (since we start this machine after the second hop, 2657 * and tokens are decremented for any valid hops, and fake extends are 2658 * used after that -- 2+5==7). */ 2659 circ_client_machine->states[CIRCPAD_STATE_BURST].histogram[0] = 5; 2660 2661 circ_client_machine->states[CIRCPAD_STATE_BURST].histogram_total_tokens = 5; 2662 2663 circ_client_machine->machine_num = smartlist_len(origin_padding_machines); 2664 circpad_register_padding_machine(circ_client_machine, 2665 origin_padding_machines); 2666 } 2667 2668 static void 2669 circpad_circ_responder_machine_init(void) 2670 { 2671 circpad_machine_spec_t *circ_responder_machine 2672 = tor_malloc_zero(sizeof(circpad_machine_spec_t)); 2673 2674 /* Shut down the machine after we've sent enough packets */ 2675 circ_responder_machine->should_negotiate_end = 1; 2676 2677 /* The relay-side doesn't care what hopnum it is, but for consistency, 2678 * let's match the client */ 2679 circ_responder_machine->target_hopnum = 2; 2680 circ_responder_machine->is_origin_side = 0; 2681 2682 /* Start, gap, burst */ 2683 circpad_machine_states_init(circ_responder_machine, 3); 2684 2685 /* This is the settings of the state machine. In the future we are gonna 2686 serialize this into the consensus or the torrc */ 2687 2688 /* We transition to the burst state on padding receive and on non-padding 2689 * receive */ 2690 circ_responder_machine->states[CIRCPAD_STATE_START]. 2691 next_state[CIRCPAD_EVENT_PADDING_RECV] = CIRCPAD_STATE_BURST; 2692 circ_responder_machine->states[CIRCPAD_STATE_START]. 2693 next_state[CIRCPAD_EVENT_NONPADDING_RECV] = CIRCPAD_STATE_BURST; 2694 2695 /* Inside the burst state we _stay_ in the burst state when a non-padding 2696 * is sent */ 2697 circ_responder_machine->states[CIRCPAD_STATE_BURST]. 2698 next_state[CIRCPAD_EVENT_NONPADDING_SENT] = CIRCPAD_STATE_BURST; 2699 2700 /* Inside the burst state we transition to the gap state when we receive a 2701 * padding cell */ 2702 circ_responder_machine->states[CIRCPAD_STATE_BURST]. 2703 next_state[CIRCPAD_EVENT_PADDING_RECV] = CIRCPAD_STATE_GAP; 2704 2705 /* These describe the padding charasteristics when in burst state */ 2706 2707 /* use_rtt_estimate tries to estimate how long padding cells take to go from 2708 C->M, and uses that as what as the base of the histogram */ 2709 circ_responder_machine->states[CIRCPAD_STATE_BURST].use_rtt_estimate = 1; 2710 /* The histogram is 2 bins: an empty one, and infinity */ 2711 circ_responder_machine->states[CIRCPAD_STATE_BURST].histogram_len = 2; 2712 circ_responder_machine->states[CIRCPAD_STATE_BURST].histogram_edges[0]= 500; 2713 circ_responder_machine->states[CIRCPAD_STATE_BURST].histogram_edges[1] = 2714 1000000; 2715 /* During burst state we wait forever for padding to arrive. 2716 2717 We are waiting for a padding cell from the client to come in, so that we 2718 respond, and we imitate how extend looks like */ 2719 circ_responder_machine->states[CIRCPAD_STATE_BURST].histogram[0] = 0; 2720 // Only infinity bin: 2721 circ_responder_machine->states[CIRCPAD_STATE_BURST].histogram[1] = 1; 2722 circ_responder_machine->states[CIRCPAD_STATE_BURST]. 2723 histogram_total_tokens = 1; 2724 2725 /* From the gap state, we _stay_ in the gap state, when we receive padding 2726 * or non padding */ 2727 circ_responder_machine->states[CIRCPAD_STATE_GAP]. 2728 next_state[CIRCPAD_EVENT_PADDING_RECV] = CIRCPAD_STATE_GAP; 2729 circ_responder_machine->states[CIRCPAD_STATE_GAP]. 2730 next_state[CIRCPAD_EVENT_NONPADDING_RECV] = CIRCPAD_STATE_GAP; 2731 2732 /* And from the gap state, we go to the end, when the bins are empty or a 2733 * non-padding cell is sent */ 2734 circ_responder_machine->states[CIRCPAD_STATE_GAP]. 2735 next_state[CIRCPAD_EVENT_BINS_EMPTY] = CIRCPAD_STATE_END; 2736 circ_responder_machine->states[CIRCPAD_STATE_GAP]. 2737 next_state[CIRCPAD_EVENT_NONPADDING_SENT] = CIRCPAD_STATE_END; 2738 2739 // FIXME: Tune this histogram 2740 2741 /* The gap state is the delay you wait after you receive a padding cell 2742 before you send a padding response */ 2743 circ_responder_machine->states[CIRCPAD_STATE_GAP].use_rtt_estimate = 1; 2744 circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram_len = 6; 2745 /* Specify histogram bins */ 2746 circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram_edges[0]= 500; 2747 circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram_edges[1]= 1000; 2748 circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram_edges[2]= 2000; 2749 circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram_edges[3]= 4000; 2750 circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram_edges[4]= 8000; 2751 circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram_edges[5]= 16000; 2752 /* Specify histogram tokens */ 2753 circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram[0] = 0; 2754 circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram[1] = 1; 2755 circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram[2] = 2; 2756 circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram[3] = 2; 2757 circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram[4] = 1; 2758 /* Total number of tokens */ 2759 circ_responder_machine->states[CIRCPAD_STATE_GAP].histogram_total_tokens = 6; 2760 2761 circ_responder_machine->states[CIRCPAD_STATE_GAP].token_removal = 2762 CIRCPAD_TOKEN_REMOVAL_CLOSEST_USEC; 2763 2764 circ_responder_machine->machine_num = smartlist_len(relay_padding_machines); 2765 circpad_register_padding_machine(circ_responder_machine, 2766 relay_padding_machines); 2767 } 2768 #endif /* defined(TOR_UNIT_TESTS) */ 2769 2770 /** 2771 * Initialize all of our padding machines. 2772 * 2773 * This is called at startup. It sets up some global machines, and then 2774 * loads some from torrc, and from the tor consensus. 2775 */ 2776 void 2777 circpad_machines_init(void) 2778 { 2779 tor_assert_nonfatal(origin_padding_machines == NULL); 2780 tor_assert_nonfatal(relay_padding_machines == NULL); 2781 2782 origin_padding_machines = smartlist_new(); 2783 relay_padding_machines = smartlist_new(); 2784 2785 /* Register machines for hiding client-side intro circuits */ 2786 circpad_machine_client_hide_intro_circuits(origin_padding_machines); 2787 circpad_machine_relay_hide_intro_circuits(relay_padding_machines); 2788 2789 /* Register machines for hiding client-side rendezvous circuits */ 2790 circpad_machine_client_hide_rend_circuits(origin_padding_machines); 2791 circpad_machine_relay_hide_rend_circuits(relay_padding_machines); 2792 2793 // TODO: Parse machines from consensus and torrc 2794 #ifdef TOR_UNIT_TESTS 2795 circpad_circ_client_machine_init(); 2796 circpad_circ_responder_machine_init(); 2797 #endif 2798 } 2799 2800 /** 2801 * Free our padding machines 2802 */ 2803 void 2804 circpad_machines_free(void) 2805 { 2806 if (origin_padding_machines) { 2807 SMARTLIST_FOREACH(origin_padding_machines, 2808 circpad_machine_spec_t *, 2809 m, tor_free(m->states); tor_free(m)); 2810 smartlist_free(origin_padding_machines); 2811 } 2812 2813 if (relay_padding_machines) { 2814 SMARTLIST_FOREACH(relay_padding_machines, 2815 circpad_machine_spec_t *, 2816 m, tor_free(m->states); tor_free(m)); 2817 smartlist_free(relay_padding_machines); 2818 } 2819 } 2820 2821 /** 2822 * Check the Protover info to see if a node supports padding. 2823 */ 2824 static bool 2825 circpad_node_supports_padding(const node_t *node) 2826 { 2827 if (node->rs) { 2828 log_fn(LOG_INFO, LD_CIRC, "Checking padding: %s", 2829 node->rs->pv.supports_hs_setup_padding ? 2830 "supported" : "unsupported"); 2831 return node->rs->pv.supports_hs_setup_padding; 2832 } 2833 2834 log_fn(LOG_INFO, LD_CIRC, "Empty routerstatus in padding check"); 2835 return 0; 2836 } 2837 2838 /** 2839 * Get a node_t for the nth hop in our circuit, starting from 1. 2840 * 2841 * Returns node_t from the consensus for that hop, if it is opened. 2842 * Otherwise returns NULL. 2843 */ 2844 MOCK_IMPL(STATIC const node_t *, 2845 circuit_get_nth_node,(origin_circuit_t *circ, int hop)) 2846 { 2847 crypt_path_t *iter = circuit_get_cpath_hop(circ, hop); 2848 2849 if (!iter || iter->state != CPATH_STATE_OPEN) 2850 return NULL; 2851 2852 return node_get_by_id(iter->extend_info->identity_digest); 2853 } 2854 2855 /** 2856 * Return true if a particular circuit supports padding 2857 * at the desired hop. 2858 */ 2859 static bool 2860 circpad_circuit_supports_padding(origin_circuit_t *circ, 2861 int target_hopnum) 2862 { 2863 const node_t *hop; 2864 2865 if (!(hop = circuit_get_nth_node(circ, target_hopnum))) { 2866 return 0; 2867 } 2868 2869 return circpad_node_supports_padding(hop); 2870 } 2871 2872 /** 2873 * Try to negotiate padding. 2874 * 2875 * Returns -1 on error, 0 on success. 2876 */ 2877 signed_error_t 2878 circpad_negotiate_padding(origin_circuit_t *circ, 2879 circpad_machine_num_t machine, 2880 uint8_t target_hopnum, 2881 uint8_t command, 2882 uint32_t machine_ctr) 2883 { 2884 circpad_negotiate_t type; 2885 cell_t cell; 2886 ssize_t len; 2887 2888 /* Check that the target hop lists support for padding in 2889 * its ProtoVer fields */ 2890 if (!circpad_circuit_supports_padding(circ, target_hopnum)) { 2891 return -1; 2892 } 2893 2894 memset(&cell, 0, sizeof(cell_t)); 2895 memset(&type, 0, sizeof(circpad_negotiate_t)); 2896 // This gets reset to RELAY_EARLY appropriately by 2897 // relay_send_command_from_edge_. At least, it looks that way. 2898 // QQQ-MP-AP: Verify that. 2899 cell.command = CELL_RELAY; 2900 2901 circpad_negotiate_set_command(&type, command); 2902 circpad_negotiate_set_version(&type, 0); 2903 circpad_negotiate_set_machine_type(&type, machine); 2904 circpad_negotiate_set_machine_ctr(&type, machine_ctr); 2905 2906 if ((len = circpad_negotiate_encode(cell.payload, CELL_PAYLOAD_SIZE, 2907 &type)) < 0) 2908 return -1; 2909 2910 log_fn(LOG_INFO,LD_CIRC, 2911 "Negotiating padding on circuit %u (%d), command %d, for ctr %u", 2912 circ->global_identifier, TO_CIRCUIT(circ)->purpose, command, 2913 machine_ctr); 2914 2915 return circpad_send_command_to_hop(circ, target_hopnum, 2916 RELAY_COMMAND_PADDING_NEGOTIATE, 2917 cell.payload, len); 2918 } 2919 2920 /** 2921 * Try to negotiate padding. 2922 * 2923 * Returns 1 if successful (or already set up), 0 otherwise. 2924 */ 2925 bool 2926 circpad_padding_negotiated(circuit_t *circ, 2927 circpad_machine_num_t machine, 2928 uint8_t command, 2929 uint8_t response, 2930 uint32_t machine_ctr) 2931 { 2932 circpad_negotiated_t type; 2933 cell_t cell; 2934 ssize_t len; 2935 2936 memset(&cell, 0, sizeof(cell_t)); 2937 memset(&type, 0, sizeof(circpad_negotiated_t)); 2938 // This gets reset to RELAY_EARLY appropriately by 2939 // relay_send_command_from_edge_. At least, it looks that way. 2940 // QQQ-MP-AP: Verify that. 2941 cell.command = CELL_RELAY; 2942 2943 circpad_negotiated_set_command(&type, command); 2944 circpad_negotiated_set_response(&type, response); 2945 circpad_negotiated_set_version(&type, 0); 2946 circpad_negotiated_set_machine_type(&type, machine); 2947 circpad_negotiated_set_machine_ctr(&type, machine_ctr); 2948 2949 if ((len = circpad_negotiated_encode(cell.payload, CELL_PAYLOAD_SIZE, 2950 &type)) < 0) 2951 return 0; 2952 2953 /* Use relay_send because we're from the middle to the origin. We don't 2954 * need to specify a target hop or layer_hint. */ 2955 return relay_send_command_from_edge(0, circ, 2956 RELAY_COMMAND_PADDING_NEGOTIATED, 2957 (void*)cell.payload, 2958 (size_t)len, NULL) == 0; 2959 } 2960 2961 /** 2962 * Parse and react to a padding_negotiate cell. 2963 * 2964 * This is called at the middle node upon receipt of the client's choice of 2965 * state machine, so that it can use the requested state machine index, if 2966 * it is available. 2967 * 2968 * Returns -1 on error, 0 on success. 2969 */ 2970 signed_error_t 2971 circpad_handle_padding_negotiate(circuit_t *circ, const relay_msg_t *msg) 2972 { 2973 int retval = 0; 2974 /* Should we send back a STOP cell? */ 2975 bool respond_with_stop = true; 2976 circpad_negotiate_t *negotiate; 2977 2978 if (CIRCUIT_IS_ORIGIN(circ)) { 2979 log_fn(LOG_PROTOCOL_WARN, LD_CIRC, 2980 "Padding negotiate cell unsupported at origin (circuit %u)", 2981 TO_ORIGIN_CIRCUIT(circ)->global_identifier); 2982 return -1; 2983 } 2984 2985 if (circpad_negotiate_parse(&negotiate, msg->body, msg->length) < 0) { 2986 log_fn(LOG_PROTOCOL_WARN, LD_CIRC, 2987 "Received malformed PADDING_NEGOTIATE cell; dropping."); 2988 return -1; 2989 } 2990 2991 if (negotiate->command == CIRCPAD_COMMAND_STOP) { 2992 /* Free the machine corresponding to this machine type */ 2993 if (free_circ_machineinfos_with_machine_num(circ, 2994 negotiate->machine_type, 2995 negotiate->machine_ctr)) { 2996 log_info(LD_CIRC, "Received STOP command for machine %u, ctr %u", 2997 negotiate->machine_type, negotiate->machine_ctr); 2998 goto done; 2999 } 3000 3001 /* If we reached this point we received a STOP command from an old or 3002 unknown machine. Don't reply with our own STOP since there is no one to 3003 handle it on the other end */ 3004 respond_with_stop = false; 3005 3006 if (negotiate->machine_ctr <= circ->padding_machine_ctr) { 3007 log_info(LD_CIRC, "Received STOP command for old machine %u, ctr %u", 3008 negotiate->machine_type, negotiate->machine_ctr); 3009 goto done; 3010 3011 } else { 3012 log_fn(LOG_PROTOCOL_WARN, LD_CIRC, 3013 "Received circuit padding stop command for unknown machine."); 3014 goto err; 3015 } 3016 } else if (negotiate->command == CIRCPAD_COMMAND_START) { 3017 SMARTLIST_FOREACH_BEGIN(relay_padding_machines, 3018 const circpad_machine_spec_t *, m) { 3019 if (m->machine_num == negotiate->machine_type) { 3020 circpad_setup_machine_on_circ(circ, m); 3021 if (negotiate->machine_ctr && 3022 circ->padding_machine_ctr != negotiate->machine_ctr) { 3023 log_fn(LOG_PROTOCOL_WARN, LD_CIRC, 3024 "Client and relay have different counts for padding machines: " 3025 "%u vs %u", circ->padding_machine_ctr, negotiate->machine_ctr); 3026 } 3027 circpad_cell_event_nonpadding_received(circ); 3028 goto done; 3029 } 3030 } SMARTLIST_FOREACH_END(m); 3031 } 3032 3033 err: 3034 retval = -1; 3035 3036 done: 3037 if (respond_with_stop) { 3038 circpad_padding_negotiated(circ, negotiate->machine_type, 3039 negotiate->command, 3040 (retval == 0) ? CIRCPAD_RESPONSE_OK : CIRCPAD_RESPONSE_ERR, 3041 negotiate->machine_ctr); 3042 } 3043 3044 circpad_negotiate_free(negotiate); 3045 3046 return retval; 3047 } 3048 3049 /** 3050 * Parse and react to a padding_negotiated cell. 3051 * 3052 * This is called at the origin upon receipt of the middle's response 3053 * to our choice of state machine. 3054 * 3055 * Returns -1 on error, 0 on success. 3056 */ 3057 signed_error_t 3058 circpad_handle_padding_negotiated(circuit_t *circ, const relay_msg_t *msg, 3059 crypt_path_t *layer_hint) 3060 { 3061 circpad_negotiated_t *negotiated; 3062 3063 if (!CIRCUIT_IS_ORIGIN(circ)) { 3064 log_fn(LOG_PROTOCOL_WARN, LD_CIRC, 3065 "Padding negotiated cell unsupported at non-origin."); 3066 return -1; 3067 } 3068 3069 /* Verify this came from the expected hop */ 3070 if (!circpad_padding_is_from_expected_hop(circ, layer_hint)) { 3071 log_fn(LOG_PROTOCOL_WARN, LD_CIRC, 3072 "Padding negotiated cell from wrong hop on circuit %u", 3073 TO_ORIGIN_CIRCUIT(circ)->global_identifier); 3074 return -1; 3075 } 3076 3077 if (circpad_negotiated_parse(&negotiated, msg->body, msg->length) < 0) { 3078 log_fn(LOG_PROTOCOL_WARN, LD_CIRC, 3079 "Received malformed PADDING_NEGOTIATED cell on circuit %u; " 3080 "dropping.", TO_ORIGIN_CIRCUIT(circ)->global_identifier); 3081 return -1; 3082 } 3083 3084 if (negotiated->command == CIRCPAD_COMMAND_STOP) { 3085 log_info(LD_CIRC, 3086 "Received STOP command on PADDING_NEGOTIATED for circuit %u", 3087 TO_ORIGIN_CIRCUIT(circ)->global_identifier); 3088 /* There may not be a padding_info here if we shut down the 3089 * machine in circpad_shutdown_old_machines(). Or, if 3090 * circpad_add_matching_matchines() added a new machine, 3091 * there may be a padding_machine for a different machine num 3092 * than this response. */ 3093 free_circ_machineinfos_with_machine_num(circ, negotiated->machine_type, 3094 negotiated->machine_ctr); 3095 } else if (negotiated->command == CIRCPAD_COMMAND_START && 3096 negotiated->response == CIRCPAD_RESPONSE_ERR) { 3097 // This can still happen due to consensus drift.. free the machines 3098 // and be sad 3099 if (free_circ_machineinfos_with_machine_num(circ, negotiated->machine_type, 3100 negotiated->machine_ctr)) { 3101 // Only fail if a machine was there and matched the error cell 3102 TO_ORIGIN_CIRCUIT(circ)->padding_negotiation_failed = 1; 3103 log_fn(LOG_PROTOCOL_WARN, LD_CIRC, 3104 "Middle node did not accept our padding request on circuit " 3105 "%u (%d)", 3106 TO_ORIGIN_CIRCUIT(circ)->global_identifier, 3107 circ->purpose); 3108 } 3109 } 3110 3111 circpad_negotiated_free(negotiated); 3112 return 0; 3113 } 3114 3115 /** Free memory allocated by this machine spec. */ 3116 STATIC void 3117 machine_spec_free_(circpad_machine_spec_t *m) 3118 { 3119 if (!m) return; 3120 3121 tor_free(m->states); 3122 tor_free(m); 3123 } 3124 3125 /** Free all memory allocated by the circuitpadding subsystem. */ 3126 void 3127 circpad_free_all(void) 3128 { 3129 if (origin_padding_machines) { 3130 SMARTLIST_FOREACH_BEGIN(origin_padding_machines, 3131 circpad_machine_spec_t *, m) { 3132 machine_spec_free(m); 3133 } SMARTLIST_FOREACH_END(m); 3134 smartlist_free(origin_padding_machines); 3135 } 3136 if (relay_padding_machines) { 3137 SMARTLIST_FOREACH_BEGIN(relay_padding_machines, 3138 circpad_machine_spec_t *, m) { 3139 machine_spec_free(m); 3140 } SMARTLIST_FOREACH_END(m); 3141 smartlist_free(relay_padding_machines); 3142 } 3143 } 3144 3145 /* Serialization */ 3146 // TODO: Should we use keyword=value here? Are there helpers for that? 3147 #if 0 3148 static void 3149 circpad_state_serialize(const circpad_state_t *state, 3150 smartlist_t *chunks) 3151 { 3152 smartlist_add_asprintf(chunks, " %u", state->histogram[0]); 3153 for (int i = 1; i < state->histogram_len; i++) { 3154 smartlist_add_asprintf(chunks, ",%u", 3155 state->histogram[i]); 3156 } 3157 3158 smartlist_add_asprintf(chunks, " 0x%x", 3159 state->transition_cancel_events); 3160 3161 for (int i = 0; i < CIRCPAD_NUM_STATES; i++) { 3162 smartlist_add_asprintf(chunks, ",0x%x", 3163 state->transition_events[i]); 3164 } 3165 3166 smartlist_add_asprintf(chunks, " %u %u", 3167 state->use_rtt_estimate, 3168 state->token_removal); 3169 } 3170 3171 char * 3172 circpad_machine_spec_to_string(const circpad_machine_spec_t *machine) 3173 { 3174 smartlist_t *chunks = smartlist_new(); 3175 char *out; 3176 (void)machine; 3177 3178 circpad_state_serialize(&machine->start, chunks); 3179 circpad_state_serialize(&machine->gap, chunks); 3180 circpad_state_serialize(&machine->burst, chunks); 3181 3182 out = smartlist_join_strings(chunks, "", 0, NULL); 3183 3184 SMARTLIST_FOREACH(chunks, char *, cp, tor_free(cp)); 3185 smartlist_free(chunks); 3186 return out; 3187 } 3188 3189 // XXX: Writeme 3190 const circpad_machine_spec_t * 3191 circpad_string_to_machine(const char *str) 3192 { 3193 (void)str; 3194 return NULL; 3195 } 3196 3197 #endif /* 0 */