dirserv.c (27771B)
1 /* Copyright (c) 2001-2004, Roger Dingledine. 2 * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson. 3 * Copyright (c) 2007-2021, The Tor Project, Inc. */ 4 /* See LICENSE for licensing information */ 5 6 #include "core/or/or.h" 7 8 #include "app/config/config.h" 9 #include "core/mainloop/connection.h" 10 #include "feature/dircache/conscache.h" 11 #include "feature/dircache/consdiffmgr.h" 12 #include "feature/dircommon/directory.h" 13 #include "feature/dircache/dirserv.h" 14 #include "feature/nodelist/microdesc.h" 15 #include "feature/nodelist/routerlist.h" 16 #include "feature/relay/router.h" 17 #include "feature/relay/routermode.h" 18 #include "feature/stats/geoip_stats.h" 19 #include "feature/stats/predict_ports.h" 20 21 #include "feature/dircache/cached_dir_st.h" 22 #include "feature/dircommon/dir_connection_st.h" 23 #include "feature/nodelist/extrainfo_st.h" 24 #include "feature/nodelist/microdesc_st.h" 25 #include "feature/nodelist/routerinfo_st.h" 26 #include "feature/nodelist/routerlist_st.h" 27 28 #include "lib/compress/compress.h" 29 30 /** 31 * \file dirserv.c 32 * \brief Directory server core implementation. Manages directory 33 * contents and generates directory documents. 34 * 35 * This module implements most of directory cache functionality, and some of 36 * the directory authority functionality. The directory.c module delegates 37 * here in order to handle incoming requests from clients, via 38 * connection_dirserv_flushed_some() and its kin. In order to save RAM, this 39 * module is responsible for spooling directory objects (in whole or in part) 40 * onto buf_t instances, and then closing the dir_connection_t once the 41 * objects are totally flushed. 42 * 43 * The directory.c module also delegates here for handling descriptor uploads 44 * via dirserv_add_multiple_descriptors(). 45 * 46 * Additionally, this module handles some aspects of voting, including: 47 * deciding how to vote on individual flags (based on decisions reached in 48 * rephist.c), of formatting routerstatus lines, and deciding what relays to 49 * include in an authority's vote. (TODO: Those functions could profitably be 50 * split off. They only live in this file because historically they were 51 * shared among the v1, v2, and v3 directory code.) 52 */ 53 54 static void clear_cached_dir(cached_dir_t *d); 55 static const signed_descriptor_t *get_signed_descriptor_by_fp( 56 const uint8_t *fp, 57 int extrainfo); 58 59 static int spooled_resource_lookup_body(const spooled_resource_t *spooled, 60 int conn_is_encrypted, 61 const uint8_t **body_out, 62 size_t *size_out, 63 time_t *published_out); 64 static cached_dir_t *spooled_resource_lookup_cached_dir( 65 const spooled_resource_t *spooled, 66 time_t *published_out); 67 static cached_dir_t *lookup_cached_dir_by_fp(const uint8_t *fp); 68 69 /********************************************************************/ 70 71 /* A set of functions to answer questions about how we'd like to behave 72 * as a directory mirror */ 73 74 /** Return true iff we want to serve certificates for authorities 75 * that we don't acknowledge as authorities ourself. 76 * Use we_want_to_fetch_unknown_auth_certs to check if we want to fetch 77 * and keep these certificates. 78 */ 79 int 80 directory_caches_unknown_auth_certs(const or_options_t *options) 81 { 82 return dir_server_mode(options) || options->BridgeRelay; 83 } 84 85 /** Return 1 if we want to fetch and serve descriptors, networkstatuses, etc 86 * Else return 0. 87 * Check options->DirPort_set and directory_permits_begindir_requests() 88 * to see if we are willing to serve these directory documents to others via 89 * the DirPort and begindir-over-ORPort, respectively. 90 * 91 * To check if we should fetch documents, use we_want_to_fetch_flavor and 92 * we_want_to_fetch_unknown_auth_certs instead of this function. 93 */ 94 int 95 directory_caches_dir_info(const or_options_t *options) 96 { 97 if (options->BridgeRelay || dir_server_mode(options)) 98 return 1; 99 if (!server_mode(options) || !advertised_server_mode()) 100 return 0; 101 /* We need an up-to-date view of network info if we're going to try to 102 * block exit attempts from unknown relays. */ 103 return ! router_my_exit_policy_is_reject_star() && 104 should_refuse_unknown_exits(options); 105 } 106 107 /** Return 1 if we want to allow remote clients to ask us directory 108 * requests via the "begin_dir" interface, which doesn't require 109 * having any separate port open. */ 110 int 111 directory_permits_begindir_requests(const or_options_t *options) 112 { 113 return options->BridgeRelay != 0 || dir_server_mode(options); 114 } 115 116 /********************************************************************/ 117 118 /** Map from flavor name to the cached_dir_t for the v3 consensuses that we're 119 * currently serving. */ 120 static strmap_t *cached_consensuses = NULL; 121 122 /** Decrement the reference count on <b>d</b>, and free it if it no longer has 123 * any references. */ 124 void 125 cached_dir_decref(cached_dir_t *d) 126 { 127 if (!d || --d->refcnt > 0) 128 return; 129 clear_cached_dir(d); 130 tor_free(d); 131 } 132 133 /** Allocate and return a new cached_dir_t containing the string <b>s</b>, 134 * published at <b>published</b>. */ 135 cached_dir_t * 136 new_cached_dir(char *s, time_t published) 137 { 138 cached_dir_t *d = tor_malloc_zero(sizeof(cached_dir_t)); 139 d->refcnt = 1; 140 d->dir = s; 141 d->dir_len = strlen(s); 142 d->published = published; 143 if (tor_compress(&(d->dir_compressed), &(d->dir_compressed_len), 144 d->dir, d->dir_len, ZLIB_METHOD)) { 145 log_warn(LD_BUG, "Error compressing directory"); 146 } 147 return d; 148 } 149 150 /** Remove all storage held in <b>d</b>, but do not free <b>d</b> itself. */ 151 static void 152 clear_cached_dir(cached_dir_t *d) 153 { 154 tor_free(d->dir); 155 tor_free(d->dir_compressed); 156 memset(d, 0, sizeof(cached_dir_t)); 157 } 158 159 /** Free all storage held by the cached_dir_t in <b>d</b>. */ 160 static void 161 free_cached_dir_(void *_d) 162 { 163 cached_dir_t *d; 164 if (!_d) 165 return; 166 167 d = (cached_dir_t *)_d; 168 cached_dir_decref(d); 169 } 170 171 /** Replace the v3 consensus networkstatus of type <b>flavor_name</b> that 172 * we're serving with <b>networkstatus</b>, published at <b>published</b>. No 173 * validation is performed. */ 174 void 175 dirserv_set_cached_consensus_networkstatus(const char *networkstatus, 176 size_t networkstatus_len, 177 const char *flavor_name, 178 const common_digests_t *digests, 179 const uint8_t *sha3_as_signed, 180 time_t published) 181 { 182 cached_dir_t *new_networkstatus; 183 cached_dir_t *old_networkstatus; 184 if (!cached_consensuses) 185 cached_consensuses = strmap_new(); 186 187 new_networkstatus = 188 new_cached_dir(tor_memdup_nulterm(networkstatus, networkstatus_len), 189 published); 190 memcpy(&new_networkstatus->digests, digests, sizeof(common_digests_t)); 191 memcpy(&new_networkstatus->digest_sha3_as_signed, sha3_as_signed, 192 DIGEST256_LEN); 193 old_networkstatus = strmap_set(cached_consensuses, flavor_name, 194 new_networkstatus); 195 if (old_networkstatus) 196 cached_dir_decref(old_networkstatus); 197 } 198 199 /** Return the latest downloaded consensus networkstatus in encoded, signed, 200 * optionally compressed format, suitable for sending to clients. */ 201 MOCK_IMPL(cached_dir_t *, 202 dirserv_get_consensus,(const char *flavor_name)) 203 { 204 if (!cached_consensuses) 205 return NULL; 206 return strmap_get(cached_consensuses, flavor_name); 207 } 208 209 /** As dir_split_resource_into_fingerprints, but instead fills 210 * <b>spool_out</b> with a list of spoolable_resource_t for the resource 211 * identified through <b>source</b>. */ 212 int 213 dir_split_resource_into_spoolable(const char *resource, 214 dir_spool_source_t source, 215 smartlist_t *spool_out, 216 int *compressed_out, 217 int flags) 218 { 219 smartlist_t *fingerprints = smartlist_new(); 220 221 tor_assert(flags & (DSR_HEX|DSR_BASE64)); 222 const size_t digest_len = 223 (flags & DSR_DIGEST256) ? DIGEST256_LEN : DIGEST_LEN; 224 225 int r = dir_split_resource_into_fingerprints(resource, fingerprints, 226 compressed_out, flags); 227 /* This is not a very efficient implementation XXXX */ 228 SMARTLIST_FOREACH_BEGIN(fingerprints, uint8_t *, digest) { 229 spooled_resource_t *spooled = 230 spooled_resource_new(source, digest, digest_len); 231 if (spooled) 232 smartlist_add(spool_out, spooled); 233 tor_free(digest); 234 } SMARTLIST_FOREACH_END(digest); 235 236 smartlist_free(fingerprints); 237 return r; 238 } 239 240 /** As dirserv_get_routerdescs(), but instead of getting signed_descriptor_t 241 * pointers, adds copies of digests to fps_out, and doesn't use the 242 * /tor/server/ prefix. For a /d/ request, adds descriptor digests; for other 243 * requests, adds identity digests. 244 */ 245 int 246 dirserv_get_routerdesc_spool(smartlist_t *spool_out, 247 const char *key, 248 dir_spool_source_t source, 249 int conn_is_encrypted, 250 const char **msg_out) 251 { 252 *msg_out = NULL; 253 254 if (!strcmp(key, "all")) { 255 const routerlist_t *rl = router_get_routerlist(); 256 SMARTLIST_FOREACH_BEGIN(rl->routers, const routerinfo_t *, r) { 257 spooled_resource_t *spooled; 258 spooled = spooled_resource_new(source, 259 (const uint8_t *)r->cache_info.identity_digest, 260 DIGEST_LEN); 261 /* Treat "all" requests as if they were unencrypted */ 262 conn_is_encrypted = 0; 263 smartlist_add(spool_out, spooled); 264 } SMARTLIST_FOREACH_END(r); 265 } else if (!strcmp(key, "authority")) { 266 const routerinfo_t *ri = router_get_my_routerinfo(); 267 if (ri) 268 smartlist_add(spool_out, 269 spooled_resource_new(source, 270 (const uint8_t *)ri->cache_info.identity_digest, 271 DIGEST_LEN)); 272 } else if (!strcmpstart(key, "d/")) { 273 key += strlen("d/"); 274 dir_split_resource_into_spoolable(key, source, spool_out, NULL, 275 DSR_HEX|DSR_SORT_UNIQ); 276 } else if (!strcmpstart(key, "fp/")) { 277 key += strlen("fp/"); 278 dir_split_resource_into_spoolable(key, source, spool_out, NULL, 279 DSR_HEX|DSR_SORT_UNIQ); 280 } else { 281 *msg_out = "Not found"; 282 return -1; 283 } 284 285 if (! conn_is_encrypted) { 286 /* Remove anything that insists it not be sent unencrypted. */ 287 SMARTLIST_FOREACH_BEGIN(spool_out, spooled_resource_t *, spooled) { 288 const uint8_t *body = NULL; 289 size_t bodylen = 0; 290 int r = spooled_resource_lookup_body(spooled, conn_is_encrypted, 291 &body, &bodylen, NULL); 292 if (r < 0 || body == NULL || bodylen == 0) { 293 SMARTLIST_DEL_CURRENT(spool_out, spooled); 294 spooled_resource_free(spooled); 295 } 296 } SMARTLIST_FOREACH_END(spooled); 297 } 298 299 if (!smartlist_len(spool_out)) { 300 *msg_out = "Servers unavailable"; 301 return -1; 302 } 303 return 0; 304 } 305 306 /* ========== 307 * Spooling code. 308 * ========== */ 309 310 spooled_resource_t * 311 spooled_resource_new(dir_spool_source_t source, 312 const uint8_t *digest, size_t digestlen) 313 { 314 spooled_resource_t *spooled = tor_malloc_zero(sizeof(spooled_resource_t)); 315 spooled->spool_source = source; 316 switch (source) { 317 case DIR_SPOOL_NETWORKSTATUS: 318 spooled->spool_eagerly = 0; 319 break; 320 case DIR_SPOOL_SERVER_BY_DIGEST: 321 case DIR_SPOOL_SERVER_BY_FP: 322 case DIR_SPOOL_EXTRA_BY_DIGEST: 323 case DIR_SPOOL_EXTRA_BY_FP: 324 case DIR_SPOOL_MICRODESC: 325 default: 326 spooled->spool_eagerly = 1; 327 break; 328 case DIR_SPOOL_CONSENSUS_CACHE_ENTRY: 329 tor_assert_unreached(); 330 break; 331 } 332 tor_assert(digestlen <= sizeof(spooled->digest)); 333 if (digest) 334 memcpy(spooled->digest, digest, digestlen); 335 return spooled; 336 } 337 338 /** 339 * Create a new spooled_resource_t to spool the contents of <b>entry</b> to 340 * the user. Return the spooled object on success, or NULL on failure (which 341 * is probably caused by a failure to map the body of the item from disk). 342 * 343 * Adds a reference to entry's reference counter. 344 */ 345 spooled_resource_t * 346 spooled_resource_new_from_cache_entry(consensus_cache_entry_t *entry) 347 { 348 spooled_resource_t *spooled = tor_malloc_zero(sizeof(spooled_resource_t)); 349 spooled->spool_source = DIR_SPOOL_CONSENSUS_CACHE_ENTRY; 350 spooled->spool_eagerly = 0; 351 consensus_cache_entry_incref(entry); 352 spooled->consensus_cache_entry = entry; 353 354 int r = consensus_cache_entry_get_body(entry, 355 &spooled->cce_body, 356 &spooled->cce_len); 357 if (r == 0) { 358 return spooled; 359 } else { 360 spooled_resource_free(spooled); 361 return NULL; 362 } 363 } 364 365 /** Release all storage held by <b>spooled</b>. */ 366 void 367 spooled_resource_free_(spooled_resource_t *spooled) 368 { 369 if (spooled == NULL) 370 return; 371 372 if (spooled->cached_dir_ref) { 373 cached_dir_decref(spooled->cached_dir_ref); 374 } 375 376 if (spooled->consensus_cache_entry) { 377 consensus_cache_entry_decref(spooled->consensus_cache_entry); 378 } 379 380 tor_free(spooled); 381 } 382 383 /** When spooling data from a cached_dir_t object, we always add 384 * at least this much. */ 385 #define DIRSERV_CACHED_DIR_CHUNK_SIZE 8192 386 387 /** Return an compression ratio for compressing objects from <b>source</b>. 388 */ 389 static double 390 estimate_compression_ratio(dir_spool_source_t source) 391 { 392 /* We should put in better estimates here, depending on the number of 393 objects and their type */ 394 (void) source; 395 return 0.5; 396 } 397 398 /** Return an estimated number of bytes needed for transmitting the 399 * resource in <b>spooled</b> on <b>conn</b> 400 * 401 * As a convenient side-effect, set *<b>published_out</b> to the resource's 402 * publication time. 403 */ 404 static size_t 405 spooled_resource_estimate_size(const spooled_resource_t *spooled, 406 dir_connection_t *conn, 407 int compressed, 408 time_t *published_out) 409 { 410 if (spooled->spool_eagerly) { 411 const uint8_t *body = NULL; 412 size_t bodylen = 0; 413 int r = spooled_resource_lookup_body(spooled, 414 connection_dir_is_encrypted(conn), 415 &body, &bodylen, 416 published_out); 417 if (r == -1 || body == NULL || bodylen == 0) 418 return 0; 419 if (compressed) { 420 double ratio = estimate_compression_ratio(spooled->spool_source); 421 bodylen = (size_t)(bodylen * ratio); 422 } 423 return bodylen; 424 } else { 425 cached_dir_t *cached; 426 if (spooled->consensus_cache_entry) { 427 if (published_out) { 428 consensus_cache_entry_get_valid_after( 429 spooled->consensus_cache_entry, published_out); 430 } 431 432 return spooled->cce_len; 433 } 434 if (spooled->cached_dir_ref) { 435 cached = spooled->cached_dir_ref; 436 } else { 437 cached = spooled_resource_lookup_cached_dir(spooled, 438 published_out); 439 } 440 if (cached == NULL) { 441 return 0; 442 } 443 size_t result = compressed ? cached->dir_compressed_len : cached->dir_len; 444 return result; 445 } 446 } 447 448 /** Return code for spooled_resource_flush_some */ 449 typedef enum { 450 SRFS_ERR = -1, 451 SRFS_MORE = 0, 452 SRFS_DONE 453 } spooled_resource_flush_status_t; 454 455 /** Flush some or all of the bytes from <b>spooled</b> onto <b>conn</b>. 456 * Return SRFS_ERR on error, SRFS_MORE if there are more bytes to flush from 457 * this spooled resource, or SRFS_DONE if we are done flushing this spooled 458 * resource. 459 */ 460 static spooled_resource_flush_status_t 461 spooled_resource_flush_some(spooled_resource_t *spooled, 462 dir_connection_t *conn) 463 { 464 if (spooled->spool_eagerly) { 465 /* Spool_eagerly resources are sent all-at-once. */ 466 const uint8_t *body = NULL; 467 size_t bodylen = 0; 468 int r = spooled_resource_lookup_body(spooled, 469 connection_dir_is_encrypted(conn), 470 &body, &bodylen, NULL); 471 if (r == -1 || body == NULL || bodylen == 0) { 472 /* Absent objects count as "done". */ 473 return SRFS_DONE; 474 } 475 476 connection_dir_buf_add((const char*)body, bodylen, conn, 0); 477 478 return SRFS_DONE; 479 } else { 480 cached_dir_t *cached = spooled->cached_dir_ref; 481 consensus_cache_entry_t *cce = spooled->consensus_cache_entry; 482 if (cached == NULL && cce == NULL) { 483 /* The cached_dir_t hasn't been materialized yet. So let's look it up. */ 484 cached = spooled->cached_dir_ref = 485 spooled_resource_lookup_cached_dir(spooled, NULL); 486 if (!cached) { 487 /* Absent objects count as done. */ 488 return SRFS_DONE; 489 } 490 ++cached->refcnt; 491 tor_assert_nonfatal(spooled->cached_dir_offset == 0); 492 } 493 494 if (BUG(!cached && !cce)) 495 return SRFS_DONE; 496 497 int64_t total_len; 498 const char *ptr; 499 if (cached) { 500 total_len = cached->dir_compressed_len; 501 ptr = cached->dir_compressed; 502 } else { 503 total_len = spooled->cce_len; 504 ptr = (const char *)spooled->cce_body; 505 } 506 /* How many bytes left to flush? */ 507 int64_t remaining; 508 remaining = total_len - spooled->cached_dir_offset; 509 if (BUG(remaining < 0)) 510 return SRFS_ERR; 511 ssize_t bytes = (ssize_t) MIN(DIRSERV_CACHED_DIR_CHUNK_SIZE, remaining); 512 513 connection_dir_buf_add(ptr + spooled->cached_dir_offset, 514 bytes, conn, 0); 515 516 spooled->cached_dir_offset += bytes; 517 if (spooled->cached_dir_offset >= (off_t)total_len) { 518 return SRFS_DONE; 519 } else { 520 return SRFS_MORE; 521 } 522 } 523 } 524 525 /** Helper: find the cached_dir_t for a spooled_resource_t, for 526 * sending it to <b>conn</b>. Set *<b>published_out</b>, if provided, 527 * to the published time of the cached_dir_t. 528 * 529 * DOES NOT increase the reference count on the result. Callers must do that 530 * themselves if they mean to hang on to it. 531 */ 532 static cached_dir_t * 533 spooled_resource_lookup_cached_dir(const spooled_resource_t *spooled, 534 time_t *published_out) 535 { 536 tor_assert(spooled->spool_eagerly == 0); 537 cached_dir_t *d = lookup_cached_dir_by_fp(spooled->digest); 538 if (d != NULL) { 539 if (published_out) 540 *published_out = d->published; 541 } 542 return d; 543 } 544 545 /** Helper: Look up the body for an eagerly-served spooled_resource. If 546 * <b>conn_is_encrypted</b> is false, don't look up any resource that 547 * shouldn't be sent over an unencrypted connection. On success, set 548 * <b>body_out</b>, <b>size_out</b>, and <b>published_out</b> to refer 549 * to the resource's body, size, and publication date, and return 0. 550 * On failure return -1. */ 551 static int 552 spooled_resource_lookup_body(const spooled_resource_t *spooled, 553 int conn_is_encrypted, 554 const uint8_t **body_out, 555 size_t *size_out, 556 time_t *published_out) 557 { 558 tor_assert(spooled->spool_eagerly == 1); 559 560 const signed_descriptor_t *sd = NULL; 561 562 switch (spooled->spool_source) { 563 case DIR_SPOOL_EXTRA_BY_FP: { 564 sd = get_signed_descriptor_by_fp(spooled->digest, 1); 565 break; 566 } 567 case DIR_SPOOL_SERVER_BY_FP: { 568 sd = get_signed_descriptor_by_fp(spooled->digest, 0); 569 break; 570 } 571 case DIR_SPOOL_SERVER_BY_DIGEST: { 572 sd = router_get_by_descriptor_digest((const char *)spooled->digest); 573 break; 574 } 575 case DIR_SPOOL_EXTRA_BY_DIGEST: { 576 sd = extrainfo_get_by_descriptor_digest((const char *)spooled->digest); 577 break; 578 } 579 case DIR_SPOOL_MICRODESC: { 580 microdesc_t *md = microdesc_cache_lookup_by_digest256( 581 get_microdesc_cache(), 582 (const char *)spooled->digest); 583 if (! md || ! md->body) { 584 return -1; 585 } 586 *body_out = (const uint8_t *)md->body; 587 *size_out = md->bodylen; 588 if (published_out) 589 *published_out = TIME_MAX; 590 return 0; 591 } 592 case DIR_SPOOL_NETWORKSTATUS: 593 case DIR_SPOOL_CONSENSUS_CACHE_ENTRY: 594 default: 595 /* LCOV_EXCL_START */ 596 tor_assert_nonfatal_unreached(); 597 return -1; 598 /* LCOV_EXCL_STOP */ 599 } 600 601 /* If we get here, then we tried to set "sd" to a signed_descriptor_t. */ 602 603 if (sd == NULL) { 604 return -1; 605 } 606 if (sd->send_unencrypted == 0 && ! conn_is_encrypted) { 607 /* we did this check once before (so we could have an accurate size 608 * estimate and maybe send a 404 if somebody asked for only bridges on 609 * a connection), but we need to do it again in case a previously 610 * unknown bridge descriptor has shown up between then and now. */ 611 return -1; 612 } 613 *body_out = (const uint8_t *) signed_descriptor_get_body(sd); 614 *size_out = sd->signed_descriptor_len; 615 if (published_out) 616 *published_out = sd->published_on; 617 return 0; 618 } 619 620 /** Given a fingerprint <b>fp</b> which is either set if we're looking for a 621 * v2 status, or zeroes if we're looking for a v3 status, or a NUL-padded 622 * flavor name if we want a flavored v3 status, return a pointer to the 623 * appropriate cached dir object, or NULL if there isn't one available. */ 624 static cached_dir_t * 625 lookup_cached_dir_by_fp(const uint8_t *fp) 626 { 627 cached_dir_t *d = NULL; 628 if (tor_digest_is_zero((const char *)fp) && cached_consensuses) { 629 d = strmap_get(cached_consensuses, "ns"); 630 } else if (memchr(fp, '\0', DIGEST_LEN) && cached_consensuses) { 631 /* this here interface is a nasty hack: we're shoving a flavor into 632 * a digest field. */ 633 d = strmap_get(cached_consensuses, (const char *)fp); 634 } 635 return d; 636 } 637 638 /** Try to guess the number of bytes that will be needed to send the 639 * spooled objects for <b>conn</b>'s outgoing spool. In the process, 640 * remove every element of the spool that refers to an absent object, or 641 * which was published earlier than <b>cutoff</b>. Set *<b>size_out</b> 642 * to the number of bytes, and *<b>n_expired_out</b> to the number of 643 * objects removed for being too old. */ 644 void 645 dirserv_spool_remove_missing_and_guess_size(dir_connection_t *conn, 646 time_t cutoff, 647 int compression, 648 size_t *size_out, 649 int *n_expired_out) 650 { 651 if (BUG(!conn)) 652 return; 653 654 smartlist_t *spool = conn->spool; 655 if (!spool) { 656 if (size_out) 657 *size_out = 0; 658 if (n_expired_out) 659 *n_expired_out = 0; 660 return; 661 } 662 int n_expired = 0; 663 uint64_t total = 0; 664 SMARTLIST_FOREACH_BEGIN(spool, spooled_resource_t *, spooled) { 665 time_t published = TIME_MAX; 666 size_t sz = spooled_resource_estimate_size(spooled, conn, 667 compression, &published); 668 if (published < cutoff) { 669 ++n_expired; 670 SMARTLIST_DEL_CURRENT(spool, spooled); 671 spooled_resource_free(spooled); 672 } else if (sz == 0) { 673 SMARTLIST_DEL_CURRENT(spool, spooled); 674 spooled_resource_free(spooled); 675 } else { 676 total += sz; 677 } 678 } SMARTLIST_FOREACH_END(spooled); 679 680 if (size_out) { 681 *size_out = (total > SIZE_MAX) ? SIZE_MAX : (size_t)total; 682 } 683 if (n_expired_out) 684 *n_expired_out = n_expired; 685 } 686 687 /** Helper: used to sort a connection's spool. */ 688 static int 689 dirserv_spool_sort_comparison_(const void **a_, const void **b_) 690 { 691 const spooled_resource_t *a = *a_; 692 const spooled_resource_t *b = *b_; 693 return fast_memcmp(a->digest, b->digest, sizeof(a->digest)); 694 } 695 696 /** Sort all the entries in <b>conn</b> by digest. */ 697 void 698 dirserv_spool_sort(dir_connection_t *conn) 699 { 700 if (conn->spool == NULL) 701 return; 702 smartlist_sort(conn->spool, dirserv_spool_sort_comparison_); 703 } 704 705 /** Return the cache-info for identity fingerprint <b>fp</b>, or 706 * its extra-info document if <b>extrainfo</b> is true. Return 707 * NULL if not found or if the descriptor is older than 708 * <b>publish_cutoff</b>. */ 709 static const signed_descriptor_t * 710 get_signed_descriptor_by_fp(const uint8_t *fp, int extrainfo) 711 { 712 if (router_digest_is_me((const char *)fp)) { 713 if (extrainfo) 714 return &(router_get_my_extrainfo()->cache_info); 715 else 716 return &(router_get_my_routerinfo()->cache_info); 717 } else { 718 const routerinfo_t *ri = router_get_by_id_digest((const char *)fp); 719 if (ri) { 720 if (extrainfo) 721 return extrainfo_get_by_descriptor_digest( 722 ri->cache_info.extra_info_digest); 723 else 724 return &ri->cache_info; 725 } 726 } 727 return NULL; 728 } 729 730 /** When we're spooling data onto our outbuf, add more whenever we dip 731 * below this threshold. */ 732 #define DIRSERV_BUFFER_MIN 16384 733 734 /** 735 * Called whenever we have flushed some directory data in state 736 * SERVER_WRITING, or whenever we want to fill the buffer with initial 737 * directory data (so that subsequent writes will occur, and trigger this 738 * function again.) 739 * 740 * Return 0 on success, and -1 on failure. 741 */ 742 int 743 connection_dirserv_flushed_some(dir_connection_t *conn) 744 { 745 tor_assert(conn->base_.state == DIR_CONN_STATE_SERVER_WRITING); 746 if (conn->spool == NULL) 747 return 0; 748 749 while (connection_get_outbuf_len(TO_CONN(conn)) < DIRSERV_BUFFER_MIN && 750 smartlist_len(conn->spool)) { 751 spooled_resource_t *spooled = 752 smartlist_get(conn->spool, smartlist_len(conn->spool)-1); 753 spooled_resource_flush_status_t status; 754 status = spooled_resource_flush_some(spooled, conn); 755 if (status == SRFS_ERR) { 756 return -1; 757 } else if (status == SRFS_MORE) { 758 return 0; 759 } 760 tor_assert(status == SRFS_DONE); 761 762 /* If we're here, we're done flushing this resource. */ 763 tor_assert(smartlist_pop_last(conn->spool) == spooled); 764 spooled_resource_free(spooled); 765 } 766 767 if (smartlist_len(conn->spool) > 0) { 768 /* We're still spooling something. */ 769 return 0; 770 } 771 772 /* If we get here, we're done. */ 773 smartlist_free(conn->spool); 774 conn->spool = NULL; 775 if (conn->compress_state) { 776 /* Flush the compression state: there could be more bytes pending in there, 777 * and we don't want to omit bytes. */ 778 connection_buf_add_compress("", 0, conn, 1); 779 tor_compress_free(conn->compress_state); 780 conn->compress_state = NULL; 781 } 782 783 /* only count networkstatus serves as successful when the spool runs dry */ 784 if (conn->should_count_geoip_when_finished) { 785 tor_addr_t addr; 786 /* but as a special case, check if conn is on a circuit that used a 787 * version-0 sendme (bugs 41191 and 41192), because we don't want to 788 * count clients that should exit after they receive our consensus. */ 789 if (!connection_dir_used_obsolete_sendme(conn) && 790 tor_addr_parse(&addr, (TO_CONN(conn))->address) >= 0) { 791 geoip_note_client_seen(GEOIP_CLIENT_NETWORKSTATUS, 792 &addr, NULL, 793 time(NULL)); 794 } 795 geoip_note_ns_response(GEOIP_SUCCESS); 796 conn->should_count_geoip_when_finished = 0; 797 } 798 return 0; 799 } 800 801 /** Remove every element from <b>conn</b>'s outgoing spool, and delete 802 * the spool. */ 803 void 804 dir_conn_clear_spool(dir_connection_t *conn) 805 { 806 if (!conn || ! conn->spool) 807 return; 808 SMARTLIST_FOREACH(conn->spool, spooled_resource_t *, s, 809 spooled_resource_free(s)); 810 smartlist_free(conn->spool); 811 conn->spool = NULL; 812 } 813 814 /** Release all storage used by the directory server. */ 815 void 816 dirserv_free_all(void) 817 { 818 strmap_free(cached_consensuses, free_cached_dir_); 819 cached_consensuses = NULL; 820 }