tor

The Tor anonymity network
git clone https://git.dasho.dev/tor.git
Log | Files | Refs | README | LICENSE

test_hs_common.c (73129B)


      1 /* Copyright (c) 2017-2021, The Tor Project, Inc. */
      2 /* See LICENSE for licensing information */
      3 
      4 /**
      5 * \file test_hs_common.c
      6 * \brief Test hidden service common functionalities.
      7 */
      8 
      9 #define CONNECTION_EDGE_PRIVATE
     10 #define HS_COMMON_PRIVATE
     11 #define HS_CLIENT_PRIVATE
     12 #define HS_SERVICE_PRIVATE
     13 #define NODELIST_PRIVATE
     14 
     15 #include "test/test.h"
     16 #include "test/test_helpers.h"
     17 #include "test/log_test_helpers.h"
     18 #include "test/hs_test_helpers.h"
     19 
     20 #include "core/or/connection_edge.h"
     21 #include "lib/crypt_ops/crypto_format.h"
     22 #include "lib/crypt_ops/crypto_rand.h"
     23 #include "feature/hs/hs_common.h"
     24 #include "feature/hs/hs_client.h"
     25 #include "feature/hs/hs_service.h"
     26 #include "app/config/config.h"
     27 #include "feature/nodelist/networkstatus.h"
     28 #include "feature/dirclient/dirclient.h"
     29 #include "feature/dirauth/dirvote.h"
     30 #include "feature/nodelist/nodelist.h"
     31 #include "feature/nodelist/routerlist.h"
     32 #include "app/config/statefile.h"
     33 #include "core/or/circuitlist.h"
     34 #include "feature/dirauth/shared_random.h"
     35 #include "feature/dirauth/voting_schedule.h"
     36 
     37 #include "feature/nodelist/microdesc_st.h"
     38 #include "feature/nodelist/networkstatus_st.h"
     39 #include "feature/nodelist/node_st.h"
     40 #include "app/config/or_state_st.h"
     41 #include "feature/nodelist/routerinfo_st.h"
     42 #include "feature/nodelist/routerstatus_st.h"
     43 
     44 /** Test the validation of HS v3 addresses */
     45 static void
     46 test_validate_address(void *arg)
     47 {
     48  int ret;
     49 
     50  (void) arg;
     51 
     52  /* Address too short and too long. */
     53  setup_full_capture_of_logs(LOG_WARN);
     54  ret = hs_address_is_valid("blah");
     55  tt_int_op(ret, OP_EQ, 0);
     56  expect_log_msg_containing("Invalid length");
     57  teardown_capture_of_logs();
     58 
     59  setup_full_capture_of_logs(LOG_WARN);
     60  ret = hs_address_is_valid(
     61           "p3xnclpu4mu22dwaurjtsybyqk4xfjmcfz6z62yl24uwmhjatiwnlnadb");
     62  tt_int_op(ret, OP_EQ, 0);
     63  expect_log_msg_containing("Invalid length");
     64  teardown_capture_of_logs();
     65 
     66  /* Invalid checksum (taken from prop224) */
     67  setup_full_capture_of_logs(LOG_WARN);
     68  ret = hs_address_is_valid(
     69           "l5satjgud6gucryazcyvyvhuxhr74u6ygigiuyixe3a6ysis67ororad");
     70  tt_int_op(ret, OP_EQ, 0);
     71  expect_log_msg_containing("invalid checksum");
     72  teardown_capture_of_logs();
     73 
     74  setup_full_capture_of_logs(LOG_WARN);
     75  ret = hs_address_is_valid(
     76           "btojiu7nu5y5iwut64eufevogqdw4wmqzugnoluw232r4t3ecsfv37ad");
     77  tt_int_op(ret, OP_EQ, 0);
     78  expect_log_msg_containing("invalid checksum");
     79  teardown_capture_of_logs();
     80 
     81  /* Non base32 decodable string. */
     82  setup_full_capture_of_logs(LOG_WARN);
     83  ret = hs_address_is_valid(
     84           "????????????????????????????????????????????????????????");
     85  tt_int_op(ret, OP_EQ, 0);
     86  expect_log_msg_containing("Unable to base32 decode");
     87  teardown_capture_of_logs();
     88 
     89  /* Valid address. */
     90  ret = hs_address_is_valid(
     91           "25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid");
     92  tt_int_op(ret, OP_EQ, 1);
     93 
     94 done:
     95  ;
     96 }
     97 
     98 static int
     99 mock_write_str_to_file(const char *path, const char *str, int bin)
    100 {
    101  (void)bin;
    102  tt_str_op(path, OP_EQ, "/double/five"PATH_SEPARATOR"squared");
    103  tt_str_op(str, OP_EQ,
    104           "25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid.onion\n");
    105 
    106 done:
    107  return 0;
    108 }
    109 
    110 /** Test building HS v3 onion addresses. Uses test vectors from the
    111 *  ./hs_build_address.py script. */
    112 static void
    113 test_build_address(void *arg)
    114 {
    115  int ret;
    116  char onion_addr[HS_SERVICE_ADDR_LEN_BASE32 + 1];
    117  ed25519_public_key_t pubkey;
    118  /* hex-encoded ed25519 pubkey used in hs_build_address.py */
    119  char pubkey_hex[] =
    120    "d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a";
    121  hs_service_t *service = NULL;
    122 
    123  (void) arg;
    124 
    125  MOCK(write_str_to_file, mock_write_str_to_file);
    126 
    127  /* The following has been created with hs_build_address.py script that
    128   * follows proposal 224 specification to build an onion address. */
    129  static const char *test_addr =
    130    "25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid";
    131 
    132  /* Let's try to build the same onion address as the script */
    133  base16_decode((char*)pubkey.pubkey, sizeof(pubkey.pubkey),
    134                pubkey_hex, strlen(pubkey_hex));
    135  hs_build_address(&pubkey, HS_VERSION_THREE, onion_addr);
    136  tt_str_op(test_addr, OP_EQ, onion_addr);
    137  /* Validate that address. */
    138  ret = hs_address_is_valid(onion_addr);
    139  tt_int_op(ret, OP_EQ, 1);
    140 
    141  service = tor_malloc_zero(sizeof(hs_service_t));
    142  memcpy(service->onion_address, onion_addr, sizeof(service->onion_address));
    143  tor_asprintf(&service->config.directory_path, "/double/five");
    144  ret = write_address_to_file(service, "squared");
    145  tt_int_op(ret, OP_EQ, 0);
    146 
    147 done:
    148  hs_service_free(service);
    149 }
    150 
    151 /** Test that our HS time period calculation functions work properly */
    152 static void
    153 test_time_period(void *arg)
    154 {
    155  (void) arg;
    156  uint64_t tn;
    157  int retval;
    158  time_t fake_time, correct_time, start_time;
    159 
    160  /* Let's do the example in prop224 section [TIME-PERIODS] */
    161  retval = parse_rfc1123_time("Wed, 13 Apr 2016 11:00:00 UTC",
    162                              &fake_time);
    163  tt_int_op(retval, OP_EQ, 0);
    164 
    165  /* Check that the time period number is right */
    166  tn = hs_get_time_period_num(fake_time);
    167  tt_u64_op(tn, OP_EQ, 16903);
    168 
    169  /* Increase current time to 11:59:59 UTC and check that the time period
    170     number is still the same */
    171  fake_time += 3599;
    172  tn = hs_get_time_period_num(fake_time);
    173  tt_u64_op(tn, OP_EQ, 16903);
    174 
    175  { /* Check start time of next time period */
    176    retval = parse_rfc1123_time("Wed, 13 Apr 2016 12:00:00 UTC",
    177                                &correct_time);
    178    tt_int_op(retval, OP_EQ, 0);
    179 
    180    start_time = hs_get_start_time_of_next_time_period(fake_time);
    181    tt_int_op(start_time, OP_EQ, correct_time);
    182  }
    183 
    184  /* Now take time to 12:00:00 UTC and check that the time period rotated */
    185  fake_time += 1;
    186  tn = hs_get_time_period_num(fake_time);
    187  tt_u64_op(tn, OP_EQ, 16904);
    188 
    189  /* Now also check our hs_get_next_time_period_num() function */
    190  tn = hs_get_next_time_period_num(fake_time);
    191  tt_u64_op(tn, OP_EQ, 16905);
    192 
    193  { /* Check start time of next time period again */
    194    retval = parse_rfc1123_time("Wed, 14 Apr 2016 12:00:00 UTC",
    195                                &correct_time);
    196    tt_int_op(retval, OP_EQ, 0);
    197 
    198    start_time = hs_get_start_time_of_next_time_period(fake_time);
    199    tt_int_op(start_time, OP_EQ, correct_time);
    200  }
    201 
    202  /* Now do another sanity check: The time period number at the start of the
    203   * next time period, must be the same time period number as the one returned
    204   * from hs_get_next_time_period_num() */
    205  {
    206    time_t next_tp_start = hs_get_start_time_of_next_time_period(fake_time);
    207    tt_u64_op(hs_get_time_period_num(next_tp_start), OP_EQ,
    208              hs_get_next_time_period_num(fake_time));
    209  }
    210 
    211 done:
    212  ;
    213 }
    214 
    215 /** Test that we can correctly find the start time of the next time period */
    216 static void
    217 test_start_time_of_next_time_period(void *arg)
    218 {
    219  (void) arg;
    220  int retval;
    221  time_t fake_time;
    222  char tbuf[ISO_TIME_LEN + 1];
    223  time_t next_tp_start_time;
    224 
    225  /* Do some basic tests */
    226  retval = parse_rfc1123_time("Wed, 13 Apr 2016 11:00:00 UTC",
    227                              &fake_time);
    228  tt_int_op(retval, OP_EQ, 0);
    229  next_tp_start_time = hs_get_start_time_of_next_time_period(fake_time);
    230  /* Compare it with the correct result */
    231  format_iso_time(tbuf, next_tp_start_time);
    232  tt_str_op("2016-04-13 12:00:00", OP_EQ, tbuf);
    233 
    234  /* Another test with an edge-case time (start of TP) */
    235  retval = parse_rfc1123_time("Wed, 13 Apr 2016 12:00:00 UTC",
    236                              &fake_time);
    237  tt_int_op(retval, OP_EQ, 0);
    238  next_tp_start_time = hs_get_start_time_of_next_time_period(fake_time);
    239  format_iso_time(tbuf, next_tp_start_time);
    240  tt_str_op("2016-04-14 12:00:00", OP_EQ, tbuf);
    241 
    242  {
    243    /* Now pretend we are on a testing network and alter the voting schedule to
    244       be every 10 seconds. This means that a time period has length 10*24
    245       seconds (4 minutes). It also means that we apply a rotational offset of
    246       120 seconds to the time period, so that it starts at 00:02:00 instead of
    247       00:00:00. */
    248    or_options_t *options = get_options_mutable();
    249    options->TestingTorNetwork = 1;
    250    options->V3AuthVotingInterval = 10;
    251    options->TestingV3AuthInitialVotingInterval = 10;
    252 
    253    retval = parse_rfc1123_time("Wed, 13 Apr 2016 00:00:00 UTC",
    254                                &fake_time);
    255    tt_int_op(retval, OP_EQ, 0);
    256    next_tp_start_time = hs_get_start_time_of_next_time_period(fake_time);
    257    /* Compare it with the correct result */
    258    format_iso_time(tbuf, next_tp_start_time);
    259    tt_str_op("2016-04-13 00:02:00", OP_EQ, tbuf);
    260 
    261    retval = parse_rfc1123_time("Wed, 13 Apr 2016 00:02:00 UTC",
    262                                &fake_time);
    263    tt_int_op(retval, OP_EQ, 0);
    264    next_tp_start_time = hs_get_start_time_of_next_time_period(fake_time);
    265    /* Compare it with the correct result */
    266    format_iso_time(tbuf, next_tp_start_time);
    267    tt_str_op("2016-04-13 00:06:00", OP_EQ, tbuf);
    268  }
    269 
    270 done:
    271  ;
    272 }
    273 
    274 /* Cleanup the global nodelist. It also frees the "md" in the node_t because
    275 * we allocate the memory in helper_add_hsdir_to_networkstatus(). */
    276 static void
    277 cleanup_nodelist(void)
    278 {
    279  const smartlist_t *nodelist = nodelist_get_list();
    280  SMARTLIST_FOREACH_BEGIN(nodelist, node_t *, node) {
    281    tor_free(node->md);
    282    node->md = NULL;
    283  } SMARTLIST_FOREACH_END(node);
    284  nodelist_free_all();
    285 }
    286 
    287 static void
    288 helper_add_hsdir_to_networkstatus(networkstatus_t *ns,
    289                                  int identity_idx,
    290                                  const char *nickname,
    291                                  int is_hsdir)
    292 {
    293  routerstatus_t *rs = tor_malloc_zero(sizeof(routerstatus_t));
    294  routerinfo_t *ri = tor_malloc_zero(sizeof(routerinfo_t));
    295  uint8_t identity[DIGEST_LEN];
    296  node_t *node = NULL;
    297 
    298  memset(identity, identity_idx, sizeof(identity));
    299 
    300  memcpy(rs->identity_digest, identity, DIGEST_LEN);
    301  rs->is_hs_dir = is_hsdir;
    302  rs->pv.supports_v3_hsdir = 1;
    303  strlcpy(rs->nickname, nickname, sizeof(rs->nickname));
    304  tor_addr_parse(&ri->ipv4_addr, "1.2.3.4");
    305  tor_addr_parse(&rs->ipv4_addr, "1.2.3.4");
    306  ri->nickname = tor_strdup(nickname);
    307  ri->protocol_list = tor_strdup("HSDir=1-2 LinkAuth=3");
    308  memcpy(ri->cache_info.identity_digest, identity, DIGEST_LEN);
    309  ri->cache_info.signing_key_cert = tor_malloc_zero(sizeof(tor_cert_t));
    310  /* Needed for the HSDir index computation. */
    311  memset(&ri->cache_info.signing_key_cert->signing_key,
    312         identity_idx, ED25519_PUBKEY_LEN);
    313  tt_assert(nodelist_set_routerinfo(ri, NULL));
    314 
    315  node = node_get_mutable_by_id(ri->cache_info.identity_digest);
    316  tt_assert(node);
    317  node->rs = rs;
    318  /* We need this to exist for node_has_preferred_descriptor() to return
    319   * true. */
    320  node->md = tor_malloc_zero(sizeof(microdesc_t));
    321  /* Do this now the nodelist_set_routerinfo() function needs a "rs" to set
    322   * the indexes which it doesn't have when it is called. */
    323  node_set_hsdir_index(node, ns);
    324  node->ri = NULL;
    325  smartlist_add(ns->routerstatus_list, rs);
    326 
    327 done:
    328  if (node == NULL)
    329    routerstatus_free(rs);
    330 
    331  routerinfo_free(ri);
    332 }
    333 
    334 static networkstatus_t *mock_ns = NULL;
    335 
    336 static networkstatus_t *
    337 mock_networkstatus_get_latest_consensus(void)
    338 {
    339  time_t now = approx_time();
    340 
    341  /* If initialized, return it */
    342  if (mock_ns) {
    343    return mock_ns;
    344  }
    345 
    346  /* Initialize fake consensus */
    347  mock_ns = tor_malloc_zero(sizeof(networkstatus_t));
    348 
    349  /* This consensus is live */
    350  mock_ns->valid_after = now-1;
    351  mock_ns->fresh_until = now+1;
    352  mock_ns->valid_until = now+2;
    353  /* Create routerstatus list */
    354  mock_ns->routerstatus_list = smartlist_new();
    355  mock_ns->type = NS_TYPE_CONSENSUS;
    356 
    357  return mock_ns;
    358 }
    359 
    360 static networkstatus_t *
    361 mock_networkstatus_get_reasonably_live_consensus(time_t now, int flavor)
    362 {
    363  (void) now;
    364  (void) flavor;
    365 
    366  tt_assert(mock_ns);
    367 
    368 done:
    369  return mock_ns;
    370 }
    371 
    372 /** Test the responsible HSDirs calculation function */
    373 static void
    374 test_responsible_hsdirs(void *arg)
    375 {
    376  smartlist_t *responsible_dirs = smartlist_new();
    377  networkstatus_t *ns = NULL;
    378  (void) arg;
    379 
    380  hs_init();
    381 
    382  MOCK(networkstatus_get_latest_consensus,
    383       mock_networkstatus_get_latest_consensus);
    384  MOCK(networkstatus_get_reasonably_live_consensus,
    385       mock_networkstatus_get_reasonably_live_consensus);
    386 
    387  ns = networkstatus_get_latest_consensus();
    388 
    389  { /* First router: HSdir */
    390    helper_add_hsdir_to_networkstatus(ns, 1, "igor", 1);
    391  }
    392 
    393  { /* Second HSDir */
    394    helper_add_hsdir_to_networkstatus(ns, 2, "victor", 1);
    395  }
    396 
    397  { /* Third relay but not HSDir */
    398    helper_add_hsdir_to_networkstatus(ns, 3, "spyro", 0);
    399  }
    400 
    401  /* Use a fixed time period and pub key so we always take the same path */
    402  ed25519_public_key_t pubkey;
    403  uint64_t time_period_num = 17653; // 2 May, 2018, 14:00.
    404  memset(&pubkey, 42, sizeof(pubkey));
    405 
    406  hs_get_responsible_hsdirs(&pubkey, time_period_num,
    407                            0, 0, responsible_dirs);
    408 
    409  /* Make sure that we only found 2 responsible HSDirs.
    410   * The third relay was not an hsdir! */
    411  tt_int_op(smartlist_len(responsible_dirs), OP_EQ, 2);
    412 
    413  /** TODO: Build a bigger network and do more tests here */
    414 
    415 done:
    416  SMARTLIST_FOREACH(ns->routerstatus_list,
    417                    routerstatus_t *, rs, routerstatus_free(rs));
    418  smartlist_free(responsible_dirs);
    419  smartlist_clear(ns->routerstatus_list);
    420  networkstatus_vote_free(mock_ns);
    421  cleanup_nodelist();
    422 
    423  UNMOCK(networkstatus_get_reasonably_live_consensus);
    424 }
    425 
    426 static void
    427 mock_directory_initiate_request(directory_request_t *req)
    428 {
    429  (void)req;
    430  return;
    431 }
    432 
    433 static int
    434 mock_hs_desc_encode_descriptor(const hs_descriptor_t *desc,
    435                               const ed25519_keypair_t *signing_kp,
    436                               const uint8_t *descriptor_cookie,
    437                               char **encoded_out)
    438 {
    439  (void)desc;
    440  (void)signing_kp;
    441  (void)descriptor_cookie;
    442 
    443  tor_asprintf(encoded_out, "lulu");
    444  return 0;
    445 }
    446 
    447 static or_state_t dummy_state;
    448 
    449 /* Mock function to get fake or state (used for rev counters) */
    450 static or_state_t *
    451 get_or_state_replacement(void)
    452 {
    453  return &dummy_state;
    454 }
    455 
    456 static int
    457 mock_router_have_minimum_dir_info(void)
    458 {
    459  return 1;
    460 }
    461 
    462 /** Test that we correctly detect when the HSDir hash ring changes so that we
    463 *  reupload our descriptor. */
    464 static void
    465 test_desc_reupload_logic(void *arg)
    466 {
    467  networkstatus_t *ns = NULL;
    468 
    469  (void) arg;
    470 
    471  hs_init();
    472 
    473  MOCK(networkstatus_get_reasonably_live_consensus,
    474       mock_networkstatus_get_reasonably_live_consensus);
    475  MOCK(router_have_minimum_dir_info,
    476       mock_router_have_minimum_dir_info);
    477  MOCK(get_or_state,
    478       get_or_state_replacement);
    479  MOCK(networkstatus_get_latest_consensus,
    480       mock_networkstatus_get_latest_consensus);
    481  MOCK(directory_initiate_request,
    482       mock_directory_initiate_request);
    483  MOCK(hs_desc_encode_descriptor,
    484       mock_hs_desc_encode_descriptor);
    485 
    486  ns = networkstatus_get_latest_consensus();
    487 
    488  /** Test logic:
    489   *  1) Upload descriptor to HSDirs
    490   *     CHECK that previous_hsdirs list was populated.
    491   *  2) Then call router_dir_info_changed() without an HSDir set change.
    492   *     CHECK that no reupload occurs.
    493   *  3) Now change the HSDir set, and call dir_info_changed() again.
    494   *     CHECK that reupload occurs.
    495   *  4) Finally call service_desc_schedule_upload().
    496   *     CHECK that previous_hsdirs list was cleared.
    497   **/
    498 
    499  /* Let's start by building our descriptor and service */
    500  hs_service_descriptor_t *desc = service_descriptor_new();
    501  hs_service_t *service = NULL;
    502  /* hex-encoded ed25519 pubkey used in hs_build_address.py */
    503  char pubkey_hex[] =
    504    "d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a";
    505  char onion_addr[HS_SERVICE_ADDR_LEN_BASE32 + 1];
    506  ed25519_public_key_t pubkey;
    507  base16_decode((char*)pubkey.pubkey, sizeof(pubkey.pubkey),
    508                pubkey_hex, strlen(pubkey_hex));
    509  hs_build_address(&pubkey, HS_VERSION_THREE, onion_addr);
    510  service = tor_malloc_zero(sizeof(hs_service_t));
    511  tt_assert(service);
    512  memcpy(service->onion_address, onion_addr, sizeof(service->onion_address));
    513  ed25519_secret_key_generate(&service->keys.identity_sk, 0);
    514  ed25519_public_key_generate(&service->keys.identity_pk,
    515                              &service->keys.identity_sk);
    516  service->desc_current = desc;
    517  /* Also add service to service map */
    518  hs_service_ht *service_map = get_hs_service_map();
    519  tt_assert(service_map);
    520  tt_int_op(hs_service_get_num_services(), OP_EQ, 0);
    521  register_service(service_map, service);
    522  tt_int_op(hs_service_get_num_services(), OP_EQ, 1);
    523 
    524  /* Now let's create our hash ring: */
    525  {
    526    helper_add_hsdir_to_networkstatus(ns, 1, "dingus", 1);
    527    helper_add_hsdir_to_networkstatus(ns, 2, "clive", 1);
    528    helper_add_hsdir_to_networkstatus(ns, 3, "aaron", 1);
    529    helper_add_hsdir_to_networkstatus(ns, 4, "lizzie", 1);
    530    helper_add_hsdir_to_networkstatus(ns, 5, "daewon", 1);
    531    helper_add_hsdir_to_networkstatus(ns, 6, "clarke", 1);
    532  }
    533 
    534  /* Now let's upload our desc to all hsdirs */
    535  upload_descriptor_to_all(service, desc);
    536  /* Check that previous hsdirs were populated */
    537  tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 6);
    538 
    539  /* Poison next upload time so that we can see if it was changed by
    540   * router_dir_info_changed(). No changes in hash ring so far, so the upload
    541   * time should stay as is. */
    542  desc->next_upload_time = 42;
    543  router_dir_info_changed();
    544  tt_int_op(desc->next_upload_time, OP_EQ, 42);
    545 
    546  /* Now change the HSDir hash ring by swapping nora for aaron.
    547   * Start by clearing the hash ring */
    548  {
    549    SMARTLIST_FOREACH(ns->routerstatus_list,
    550                      routerstatus_t *, rs, routerstatus_free(rs));
    551    smartlist_clear(ns->routerstatus_list);
    552    cleanup_nodelist();
    553    routerlist_free_all();
    554  }
    555 
    556  { /* Now add back all the nodes */
    557    helper_add_hsdir_to_networkstatus(ns, 1, "dingus", 1);
    558    helper_add_hsdir_to_networkstatus(ns, 2, "clive", 1);
    559    helper_add_hsdir_to_networkstatus(ns, 4, "lizzie", 1);
    560    helper_add_hsdir_to_networkstatus(ns, 5, "daewon", 1);
    561    helper_add_hsdir_to_networkstatus(ns, 6, "clarke", 1);
    562    helper_add_hsdir_to_networkstatus(ns, 7, "nora", 1);
    563  }
    564 
    565  /* Now call service_desc_hsdirs_changed() and see that it detected the hash
    566     ring change */
    567  time_t now = approx_time();
    568  tt_assert(now);
    569  tt_int_op(service_desc_hsdirs_changed(service, desc), OP_EQ, 1);
    570  tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 6);
    571 
    572  /* Now order another upload and see that we keep having 6 prev hsdirs */
    573  upload_descriptor_to_all(service, desc);
    574  /* Check that previous hsdirs were populated */
    575  tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 6);
    576 
    577  /* Now restore the HSDir hash ring to its original state by swapping back
    578     aaron for nora */
    579  /* First clear up the hash ring */
    580  {
    581    SMARTLIST_FOREACH(ns->routerstatus_list,
    582                      routerstatus_t *, rs, routerstatus_free(rs));
    583    smartlist_clear(ns->routerstatus_list);
    584    cleanup_nodelist();
    585    routerlist_free_all();
    586  }
    587 
    588  { /* Now populate the hash ring again */
    589    helper_add_hsdir_to_networkstatus(ns, 1, "dingus", 1);
    590    helper_add_hsdir_to_networkstatus(ns, 2, "clive", 1);
    591    helper_add_hsdir_to_networkstatus(ns, 3, "aaron", 1);
    592    helper_add_hsdir_to_networkstatus(ns, 4, "lizzie", 1);
    593    helper_add_hsdir_to_networkstatus(ns, 5, "daewon", 1);
    594    helper_add_hsdir_to_networkstatus(ns, 6, "clarke", 1);
    595  }
    596 
    597  /* Check that our algorithm catches this change of hsdirs */
    598  tt_int_op(service_desc_hsdirs_changed(service, desc), OP_EQ, 1);
    599 
    600  /* Now pretend that the descriptor changed, and order a reupload to all
    601     HSDirs. Make sure that the set of previous HSDirs was cleared. */
    602  service_desc_schedule_upload(desc, now, 1);
    603  tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 0);
    604 
    605  /* Now reupload again: see that the prev hsdir set got populated again. */
    606  upload_descriptor_to_all(service, desc);
    607  tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 6);
    608 
    609 done:
    610  SMARTLIST_FOREACH(ns->routerstatus_list,
    611                    routerstatus_t *, rs, routerstatus_free(rs));
    612  smartlist_clear(ns->routerstatus_list);
    613  if (service) {
    614    remove_service(get_hs_service_map(), service);
    615    hs_service_free(service);
    616  }
    617  networkstatus_vote_free(ns);
    618  cleanup_nodelist();
    619  hs_free_all();
    620 }
    621 
    622 /** Test disaster SRV computation and caching */
    623 static void
    624 test_disaster_srv(void *arg)
    625 {
    626  uint8_t *cached_disaster_srv_one = NULL;
    627  uint8_t *cached_disaster_srv_two = NULL;
    628  uint8_t srv_one[DIGEST256_LEN] = {0};
    629  uint8_t srv_two[DIGEST256_LEN] = {0};
    630  uint8_t srv_three[DIGEST256_LEN] = {0};
    631  uint8_t srv_four[DIGEST256_LEN] = {0};
    632  uint8_t srv_five[DIGEST256_LEN] = {0};
    633 
    634  (void) arg;
    635 
    636  /* Get the cached SRVs: we gonna use them later for verification */
    637  cached_disaster_srv_one = get_first_cached_disaster_srv();
    638  cached_disaster_srv_two = get_second_cached_disaster_srv();
    639 
    640  /* Compute some srvs */
    641  get_disaster_srv(1, srv_one);
    642  get_disaster_srv(2, srv_two);
    643 
    644  /* Check that the cached ones were updated */
    645  tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_one, DIGEST256_LEN);
    646  tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_two, DIGEST256_LEN);
    647 
    648  /* For at least one SRV, check that its result was as expected. */
    649  {
    650    uint8_t srv1_expected[32];
    651    crypto_digest256(
    652        (char*)srv1_expected,
    653        "shared-random-disaster\0\0\0\0\0\0\x05\xA0\0\0\0\0\0\0\0\1",
    654        strlen("shared-random-disaster")+16,
    655        DIGEST_SHA3_256);
    656    tt_mem_op(srv_one, OP_EQ, srv1_expected, DIGEST256_LEN);
    657    tt_str_op(hex_str((char*)srv_one, DIGEST256_LEN), OP_EQ,
    658        "F8A4948707653837FA44ABB5BBC75A12F6F101E7F8FAF699B9715F4965D3507D");
    659  }
    660 
    661  /* Ask for an SRV that has already been computed */
    662  get_disaster_srv(2, srv_two);
    663  /* and check that the cache entries have not changed */
    664  tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_one, DIGEST256_LEN);
    665  tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_two, DIGEST256_LEN);
    666 
    667  /* Ask for a new SRV */
    668  get_disaster_srv(3, srv_three);
    669  tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_three, DIGEST256_LEN);
    670  tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_two, DIGEST256_LEN);
    671 
    672  /* Ask for another SRV: none of the original SRVs should now be cached */
    673  get_disaster_srv(4, srv_four);
    674  tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_three, DIGEST256_LEN);
    675  tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_four, DIGEST256_LEN);
    676 
    677  /* Ask for yet another SRV */
    678  get_disaster_srv(5, srv_five);
    679  tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_five, DIGEST256_LEN);
    680  tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_four, DIGEST256_LEN);
    681 
    682 done:
    683  ;
    684 }
    685 
    686 /** Test our HS descriptor request tracker by making various requests and
    687 *  checking whether they get tracked properly. */
    688 static void
    689 test_hid_serv_request_tracker(void *arg)
    690 {
    691  (void) arg;
    692  time_t retval;
    693  routerstatus_t *hsdir = NULL, *hsdir2 = NULL, *hsdir3 = NULL;
    694  time_t now = approx_time();
    695 
    696  const char *req_key_str_first =
    697 "vd4zb6zesaubtrjvdqcr2w7x7lhw2up4Xnw4526ThUNbL5o1go+EdUuEqlKxHkNbnK41pRzizzs";
    698  const char *req_key_str_second =
    699 "g53o7iavcd62oihswhr24u6czmqws5kpXnw4526ThUNbL5o1go+EdUuEqlKxHkNbnK41pRzizzs";
    700  const char *req_key_str_small = "ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ";
    701 
    702  /*************************** basic test *******************************/
    703 
    704  /* Get request tracker and make sure it's empty */
    705  strmap_t *request_tracker = get_last_hid_serv_requests();
    706  tt_int_op(strmap_size(request_tracker),OP_EQ, 0);
    707 
    708  /* Let's register a hid serv request */
    709  hsdir = tor_malloc_zero(sizeof(routerstatus_t));
    710  memset(hsdir->identity_digest, 'Z', DIGEST_LEN);
    711  retval = hs_lookup_last_hid_serv_request(hsdir, req_key_str_first,
    712                                           now, 1);
    713  tt_int_op(retval, OP_EQ, now);
    714  tt_int_op(strmap_size(request_tracker),OP_EQ, 1);
    715 
    716  /* Let's lookup a non-existent hidserv request */
    717  retval = hs_lookup_last_hid_serv_request(hsdir, req_key_str_second,
    718                                           now+1, 0);
    719  tt_int_op(retval, OP_EQ, 0);
    720  tt_int_op(strmap_size(request_tracker),OP_EQ, 1);
    721 
    722  /* Let's lookup a real hidserv request */
    723  retval = hs_lookup_last_hid_serv_request(hsdir, req_key_str_first,
    724                                           now+2, 0);
    725  tt_int_op(retval, OP_EQ, now); /* we got it */
    726  tt_int_op(strmap_size(request_tracker),OP_EQ, 1);
    727 
    728  /**********************************************************************/
    729 
    730  /* Let's add another request for the same HS but on a different HSDir. */
    731  hsdir2 = tor_malloc_zero(sizeof(routerstatus_t));
    732  memset(hsdir2->identity_digest, 2, DIGEST_LEN);
    733  retval = hs_lookup_last_hid_serv_request(hsdir2, req_key_str_first,
    734                                           now+3, 1);
    735  tt_int_op(retval, OP_EQ, now+3);
    736  tt_int_op(strmap_size(request_tracker),OP_EQ, 2);
    737 
    738  /* Check that we can clean the first request based on time */
    739  hs_clean_last_hid_serv_requests(now+3+REND_HID_SERV_DIR_REQUERY_PERIOD);
    740  tt_int_op(strmap_size(request_tracker),OP_EQ, 1);
    741  /* Check that it doesn't exist anymore */
    742  retval = hs_lookup_last_hid_serv_request(hsdir, req_key_str_first,
    743                                           now+2, 0);
    744  tt_int_op(retval, OP_EQ, 0);
    745 
    746  /* Now let's add a smaller req key str */
    747  hsdir3 = tor_malloc_zero(sizeof(routerstatus_t));
    748  memset(hsdir3->identity_digest, 3, DIGEST_LEN);
    749  retval = hs_lookup_last_hid_serv_request(hsdir3, req_key_str_small,
    750                                           now+4, 1);
    751  tt_int_op(retval, OP_EQ, now+4);
    752  tt_int_op(strmap_size(request_tracker),OP_EQ, 2);
    753 
    754  /*************************** deleting entries **************************/
    755 
    756  /* Add another request with very short key */
    757  retval = hs_lookup_last_hid_serv_request(hsdir, "l",  now, 1);
    758  tt_int_op(retval, OP_EQ, now);
    759  tt_int_op(strmap_size(request_tracker),OP_EQ, 3);
    760 
    761  /* Try deleting entries with a dummy key. Check that our previous requests
    762   * are still there */
    763  tor_capture_bugs_(1);
    764  hs_purge_hid_serv_from_last_hid_serv_requests("a");
    765  tt_int_op(strmap_size(request_tracker),OP_EQ, 3);
    766  tor_end_capture_bugs_();
    767 
    768  /* Try another dummy key. Check that requests are still there */
    769  {
    770    char dummy[2000];
    771    memset(dummy, 'Z', 2000);
    772    dummy[1999] = '\x00';
    773    hs_purge_hid_serv_from_last_hid_serv_requests(dummy);
    774    tt_int_op(strmap_size(request_tracker),OP_EQ, 3);
    775  }
    776 
    777  /* Another dummy key! */
    778  hs_purge_hid_serv_from_last_hid_serv_requests(req_key_str_second);
    779  tt_int_op(strmap_size(request_tracker),OP_EQ, 3);
    780 
    781  /* Now actually delete a request! */
    782  hs_purge_hid_serv_from_last_hid_serv_requests(req_key_str_first);
    783  tt_int_op(strmap_size(request_tracker),OP_EQ, 2);
    784 
    785  /* Purge it all! */
    786  hs_purge_last_hid_serv_requests();
    787  request_tracker = get_last_hid_serv_requests();
    788  tt_int_op(strmap_size(request_tracker),OP_EQ, 0);
    789 
    790 done:
    791  tor_free(hsdir);
    792  tor_free(hsdir2);
    793  tor_free(hsdir3);
    794 }
    795 
    796 static void
    797 test_parse_extended_hostname(void *arg)
    798 {
    799  (void) arg;
    800  hostname_type_t type;
    801 
    802  char address1[] = "fooaddress.onion";
    803  char address3[] = "fooaddress.exit";
    804  char address4[] = "www.torproject.org";
    805  char address5[] = "foo.abcdefghijklmnop.onion";
    806  char address6[] = "foo.bar.abcdefghijklmnop.onion";
    807  char address7[] = ".abcdefghijklmnop.onion";
    808  char address8[] =
    809    "www.25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid.onion";
    810  char address9[] =
    811    "www.15njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid.onion";
    812  char address10[] =
    813    "15njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid7jdl.onion";
    814 
    815  tt_assert(!parse_extended_hostname(address1, &type));
    816  tt_int_op(type, OP_EQ, BAD_HOSTNAME);
    817 
    818  tt_assert(parse_extended_hostname(address3, &type));
    819  tt_int_op(type, OP_EQ, EXIT_HOSTNAME);
    820 
    821  tt_assert(parse_extended_hostname(address4, &type));
    822  tt_int_op(type, OP_EQ, NORMAL_HOSTNAME);
    823 
    824  tt_assert(!parse_extended_hostname(address5, &type));
    825  tt_int_op(type, OP_EQ, BAD_HOSTNAME);
    826 
    827  tt_assert(!parse_extended_hostname(address6, &type));
    828  tt_int_op(type, OP_EQ, BAD_HOSTNAME);
    829 
    830  tt_assert(!parse_extended_hostname(address7, &type));
    831  tt_int_op(type, OP_EQ, BAD_HOSTNAME);
    832 
    833  tt_assert(parse_extended_hostname(address8, &type));
    834  tt_int_op(type, OP_EQ, ONION_V3_HOSTNAME);
    835  tt_str_op(address8, OP_EQ,
    836            "25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid");
    837 
    838  /* Invalid v3 address. */
    839  tt_assert(!parse_extended_hostname(address9, &type));
    840  tt_int_op(type, OP_EQ, BAD_HOSTNAME);
    841 
    842  /* Invalid v3 address: too long */
    843  tt_assert(!parse_extended_hostname(address10, &type));
    844  tt_int_op(type, OP_EQ, BAD_HOSTNAME);
    845 
    846 done: ;
    847 }
    848 
    849 static void
    850 test_time_between_tp_and_srv(void *arg)
    851 {
    852  int ret;
    853  networkstatus_t ns;
    854  (void) arg;
    855 
    856  /* This function should be returning true where "^" are:
    857   *
    858   *    +------------------------------------------------------------------+
    859   *    |                                                                  |
    860   *    | 00:00      12:00       00:00       12:00       00:00       12:00 |
    861   *    | SRV#1      TP#1        SRV#2       TP#2        SRV#3       TP#3  |
    862   *    |                                                                  |
    863   *    |  $==========|-----------$===========|-----------$===========|    |
    864   *    |             ^^^^^^^^^^^^            ^^^^^^^^^^^^                 |
    865   *    |                                                                  |
    866   *    +------------------------------------------------------------------+
    867   */
    868 
    869  ret = parse_rfc1123_time("Sat, 26 Oct 1985 00:00:00 UTC", &ns.valid_after);
    870  tt_int_op(ret, OP_EQ, 0);
    871  ret = parse_rfc1123_time("Sat, 26 Oct 1985 01:00:00 UTC", &ns.fresh_until);
    872  tt_int_op(ret, OP_EQ, 0);
    873  dirauth_sched_recalculate_timing(get_options(), ns.valid_after);
    874  ret = hs_in_period_between_tp_and_srv(&ns, 0);
    875  tt_int_op(ret, OP_EQ, 0);
    876 
    877  ret = parse_rfc1123_time("Sat, 26 Oct 1985 11:00:00 UTC", &ns.valid_after);
    878  tt_int_op(ret, OP_EQ, 0);
    879  ret = parse_rfc1123_time("Sat, 26 Oct 1985 12:00:00 UTC", &ns.fresh_until);
    880  tt_int_op(ret, OP_EQ, 0);
    881  dirauth_sched_recalculate_timing(get_options(), ns.valid_after);
    882  ret = hs_in_period_between_tp_and_srv(&ns, 0);
    883  tt_int_op(ret, OP_EQ, 0);
    884 
    885  ret = parse_rfc1123_time("Sat, 26 Oct 1985 12:00:00 UTC", &ns.valid_after);
    886  tt_int_op(ret, OP_EQ, 0);
    887  ret = parse_rfc1123_time("Sat, 26 Oct 1985 13:00:00 UTC", &ns.fresh_until);
    888  tt_int_op(ret, OP_EQ, 0);
    889  dirauth_sched_recalculate_timing(get_options(), ns.valid_after);
    890  ret = hs_in_period_between_tp_and_srv(&ns, 0);
    891  tt_int_op(ret, OP_EQ, 1);
    892 
    893  ret = parse_rfc1123_time("Sat, 26 Oct 1985 23:00:00 UTC", &ns.valid_after);
    894  tt_int_op(ret, OP_EQ, 0);
    895  ret = parse_rfc1123_time("Sat, 27 Oct 1985 00:00:00 UTC", &ns.fresh_until);
    896  tt_int_op(ret, OP_EQ, 0);
    897  dirauth_sched_recalculate_timing(get_options(), ns.valid_after);
    898  ret = hs_in_period_between_tp_and_srv(&ns, 0);
    899  tt_int_op(ret, OP_EQ, 1);
    900 
    901  ret = parse_rfc1123_time("Sat, 27 Oct 1985 00:00:00 UTC", &ns.valid_after);
    902  tt_int_op(ret, OP_EQ, 0);
    903  ret = parse_rfc1123_time("Sat, 27 Oct 1985 01:00:00 UTC", &ns.fresh_until);
    904  tt_int_op(ret, OP_EQ, 0);
    905  dirauth_sched_recalculate_timing(get_options(), ns.valid_after);
    906  ret = hs_in_period_between_tp_and_srv(&ns, 0);
    907  tt_int_op(ret, OP_EQ, 0);
    908 
    909 done:
    910  ;
    911 }
    912 
    913 /************ Reachability Test (it is huge) ****************/
    914 
    915 /* Simulate different consensus for client and service. Used by the
    916 * reachability test. The SRV and responsible HSDir list are used by all
    917 * reachability tests so make them common to simplify setup and teardown. */
    918 static networkstatus_t *mock_service_ns = NULL;
    919 static networkstatus_t *mock_client_ns = NULL;
    920 static sr_srv_t current_srv, previous_srv;
    921 static smartlist_t *service_responsible_hsdirs = NULL;
    922 static smartlist_t *client_responsible_hsdirs = NULL;
    923 
    924 static networkstatus_t *
    925 mock_networkstatus_get_reasonably_live_consensus_service(time_t now,
    926                                                         int flavor)
    927 {
    928  (void) now;
    929  (void) flavor;
    930 
    931  if (mock_service_ns) {
    932    return mock_service_ns;
    933  }
    934 
    935  mock_service_ns = tor_malloc_zero(sizeof(networkstatus_t));
    936  mock_service_ns->routerstatus_list = smartlist_new();
    937  mock_service_ns->type = NS_TYPE_CONSENSUS;
    938 
    939  return mock_service_ns;
    940 }
    941 
    942 static networkstatus_t *
    943 mock_networkstatus_get_latest_consensus_service(void)
    944 {
    945  return mock_networkstatus_get_reasonably_live_consensus_service(0, 0);
    946 }
    947 
    948 static networkstatus_t *
    949 mock_networkstatus_get_reasonably_live_consensus_client(time_t now, int flavor)
    950 {
    951  (void) now;
    952  (void) flavor;
    953 
    954  if (mock_client_ns) {
    955    return mock_client_ns;
    956  }
    957 
    958  mock_client_ns = tor_malloc_zero(sizeof(networkstatus_t));
    959  mock_client_ns->routerstatus_list = smartlist_new();
    960  mock_client_ns->type = NS_TYPE_CONSENSUS;
    961 
    962  return mock_client_ns;
    963 }
    964 
    965 static networkstatus_t *
    966 mock_networkstatus_get_latest_consensus_client(void)
    967 {
    968  return mock_networkstatus_get_reasonably_live_consensus_client(0, 0);
    969 }
    970 
    971 /* Mock function because we are not trying to test the close circuit that does
    972 * an awful lot of checks on the circuit object. */
    973 static void
    974 mock_circuit_mark_for_close(circuit_t *circ, int reason, int line,
    975                            const char *file)
    976 {
    977  (void) circ;
    978  (void) reason;
    979  (void) line;
    980  (void) file;
    981  return;
    982 }
    983 
    984 /* Initialize a big HSDir V3 hash ring. */
    985 static void
    986 helper_initialize_big_hash_ring(networkstatus_t *ns)
    987 {
    988  int ret;
    989 
    990  /* Generate 250 hsdirs! :) */
    991  for (int counter = 1 ; counter < 251 ; counter++) {
    992    /* Let's generate random nickname for each hsdir... */
    993    char nickname_binary[8];
    994    char nickname_str[13] = {0};
    995    crypto_rand(nickname_binary, sizeof(nickname_binary));
    996    ret = base64_encode(nickname_str, sizeof(nickname_str),
    997                        nickname_binary, sizeof(nickname_binary), 0);
    998    tt_int_op(ret, OP_EQ, 12);
    999    helper_add_hsdir_to_networkstatus(ns, counter, nickname_str, 1);
   1000  }
   1001 
   1002  /* Make sure we have 200 hsdirs in our list */
   1003  tt_int_op(smartlist_len(ns->routerstatus_list), OP_EQ, 250);
   1004 
   1005 done:
   1006  ;
   1007 }
   1008 
   1009 /** Initialize service and publish its descriptor as needed. Return the newly
   1010 *  allocated service object to the caller. */
   1011 static hs_service_t *
   1012 helper_init_service(time_t now)
   1013 {
   1014  int retval;
   1015  hs_service_t *service = hs_service_new(get_options());
   1016  tt_assert(service);
   1017  service->config.version = HS_VERSION_THREE;
   1018  ed25519_secret_key_generate(&service->keys.identity_sk, 0);
   1019  ed25519_public_key_generate(&service->keys.identity_pk,
   1020                              &service->keys.identity_sk);
   1021  /* Register service to global map. */
   1022  retval = register_service(get_hs_service_map(), service);
   1023  tt_int_op(retval, OP_EQ, 0);
   1024 
   1025  /* Initialize service descriptor */
   1026  build_all_descriptors(now);
   1027  tt_assert(service->desc_current);
   1028  tt_assert(service->desc_next);
   1029 
   1030 done:
   1031  return service;
   1032 }
   1033 
   1034 /* Helper function to set the RFC 1123 time string into t. */
   1035 static void
   1036 set_consensus_times(const char *timestr, time_t *t)
   1037 {
   1038  tt_assert(timestr);
   1039  tt_assert(t);
   1040 
   1041  int ret = parse_rfc1123_time(timestr, t);
   1042  tt_int_op(ret, OP_EQ, 0);
   1043 
   1044 done:
   1045  return;
   1046 }
   1047 
   1048 /* Helper function to cleanup the mock consensus (client and service) */
   1049 static void
   1050 cleanup_mock_ns(void)
   1051 {
   1052  if (mock_service_ns) {
   1053    SMARTLIST_FOREACH(mock_service_ns->routerstatus_list,
   1054                      routerstatus_t *, rs, routerstatus_free(rs));
   1055    smartlist_clear(mock_service_ns->routerstatus_list);
   1056    mock_service_ns->sr_info.current_srv = NULL;
   1057    mock_service_ns->sr_info.previous_srv = NULL;
   1058    networkstatus_vote_free(mock_service_ns);
   1059    mock_service_ns = NULL;
   1060  }
   1061 
   1062  if (mock_client_ns) {
   1063    SMARTLIST_FOREACH(mock_client_ns->routerstatus_list,
   1064                      routerstatus_t *, rs, routerstatus_free(rs));
   1065    smartlist_clear(mock_client_ns->routerstatus_list);
   1066    mock_client_ns->sr_info.current_srv = NULL;
   1067    mock_client_ns->sr_info.previous_srv = NULL;
   1068    networkstatus_vote_free(mock_client_ns);
   1069    mock_client_ns = NULL;
   1070  }
   1071 }
   1072 
   1073 /* Helper function to setup a reachability test. Once called, the
   1074 * cleanup_reachability_test MUST be called at the end. */
   1075 static void
   1076 setup_reachability_test(void)
   1077 {
   1078  MOCK(circuit_mark_for_close_, mock_circuit_mark_for_close);
   1079  MOCK(get_or_state, get_or_state_replacement);
   1080 
   1081  hs_init();
   1082 
   1083  /* Baseline to start with. */
   1084  memset(&current_srv, 0, sizeof(current_srv));
   1085  memset(&previous_srv, 1, sizeof(previous_srv));
   1086 
   1087  /* Initialize the consensuses. */
   1088  mock_networkstatus_get_latest_consensus_service();
   1089  mock_networkstatus_get_latest_consensus_client();
   1090 
   1091  service_responsible_hsdirs = smartlist_new();
   1092  client_responsible_hsdirs = smartlist_new();
   1093 }
   1094 
   1095 /* Helper function to cleanup a reachability test initial setup. */
   1096 static void
   1097 cleanup_reachability_test(void)
   1098 {
   1099  smartlist_free(service_responsible_hsdirs);
   1100  service_responsible_hsdirs = NULL;
   1101  smartlist_free(client_responsible_hsdirs);
   1102  client_responsible_hsdirs = NULL;
   1103  hs_free_all();
   1104  cleanup_mock_ns();
   1105  UNMOCK(get_or_state);
   1106  UNMOCK(circuit_mark_for_close_);
   1107 }
   1108 
   1109 /* A reachability test always check if the resulting service and client
   1110 * responsible HSDir for the given parameters are equal.
   1111 *
   1112 * Return true iff the same exact nodes are in both list. */
   1113 static int
   1114 are_responsible_hsdirs_equal(void)
   1115 {
   1116  int count = 0;
   1117  tt_int_op(smartlist_len(client_responsible_hsdirs), OP_EQ, 6);
   1118  tt_int_op(smartlist_len(service_responsible_hsdirs), OP_EQ, 8);
   1119 
   1120  SMARTLIST_FOREACH_BEGIN(client_responsible_hsdirs,
   1121                          const routerstatus_t *, c_rs) {
   1122    SMARTLIST_FOREACH_BEGIN(service_responsible_hsdirs,
   1123                            const routerstatus_t *, s_rs) {
   1124      if (tor_memeq(c_rs->identity_digest, s_rs->identity_digest,
   1125                    DIGEST_LEN)) {
   1126        count++;
   1127        break;
   1128      }
   1129    } SMARTLIST_FOREACH_END(s_rs);
   1130  } SMARTLIST_FOREACH_END(c_rs);
   1131 
   1132 done:
   1133  return (count == 6);
   1134 }
   1135 
   1136 /* Tor doesn't use such a function to get the previous HSDir, it is only used
   1137 * in node_set_hsdir_index(). We need it here so we can test the reachability
   1138 * scenario 6 that requires the previous time period to compute the list of
   1139 * responsible HSDir because of the client state timing. */
   1140 static uint64_t
   1141 get_previous_time_period(time_t now)
   1142 {
   1143  return hs_get_time_period_num(now) - 1;
   1144 }
   1145 
   1146 /* Configuration of a reachability test scenario. */
   1147 typedef struct reachability_cfg_t {
   1148  /* Consensus timings to be set. They have to be compliant with
   1149   * RFC 1123 time format. */
   1150  const char *service_valid_after;
   1151  const char *service_valid_until;
   1152  const char *client_valid_after;
   1153  const char *client_valid_until;
   1154 
   1155  /* SRVs that the service and client should use. */
   1156  sr_srv_t *service_current_srv;
   1157  sr_srv_t *service_previous_srv;
   1158  sr_srv_t *client_current_srv;
   1159  sr_srv_t *client_previous_srv;
   1160 
   1161  /* A time period function for the service to use for this scenario. For a
   1162   * successful reachability test, the client always use the current time
   1163   * period thus why no client function. */
   1164  uint64_t (*service_time_period_fn)(time_t);
   1165 
   1166  /* Is the client and service expected to be in a new time period. After
   1167   * setting the consensus time, the reachability test checks
   1168   * hs_in_period_between_tp_and_srv() and test the returned value against
   1169   * this. */
   1170  unsigned int service_in_new_tp;
   1171  unsigned int client_in_new_tp;
   1172 
   1173  /* Some scenario requires a hint that the client, because of its consensus
   1174   * time, will request the "next" service descriptor so this indicates if it
   1175   * is the case or not. */
   1176  unsigned int client_fetch_next_desc;
   1177 } reachability_cfg_t;
   1178 
   1179 /* Some defines to help with semantic while reading a configuration below. */
   1180 #define NOT_IN_NEW_TP 0
   1181 #define IN_NEW_TP 1
   1182 #define DONT_NEED_NEXT_DESC 0
   1183 #define NEED_NEXT_DESC 1
   1184 
   1185 static reachability_cfg_t reachability_scenarios[] = {
   1186  /* Scenario 1
   1187   *
   1188   *  +------------------------------------------------------------------+
   1189   *  |                                                                  |
   1190   *  | 00:00      12:00       00:00       12:00       00:00       12:00 |
   1191   *  | SRV#1      TP#1        SRV#2       TP#2        SRV#3       TP#3  |
   1192   *  |                                                                  |
   1193   *  |  $==========|-----------$===========|-----------$===========|    |
   1194   *  |              ^ ^                                                 |
   1195   *  |              S C                                                 |
   1196   *  +------------------------------------------------------------------+
   1197   *
   1198   *  S: Service, C: Client
   1199   *
   1200   *  Service consensus valid_after time is set to 13:00 and client to 15:00,
   1201   *  both are after TP#1 thus have access to SRV#1. Service and client should
   1202   *  be using TP#1.
   1203   */
   1204 
   1205  { "Sat, 26 Oct 1985 13:00:00 UTC", /* Service valid_after */
   1206    "Sat, 26 Oct 1985 14:00:00 UTC", /* Service valid_until */
   1207    "Sat, 26 Oct 1985 15:00:00 UTC", /* Client valid_after */
   1208    "Sat, 26 Oct 1985 16:00:00 UTC", /* Client valid_until. */
   1209    &current_srv, NULL, /* Service current and previous SRV */
   1210    &current_srv, NULL, /* Client current and previous SRV */
   1211    hs_get_time_period_num, /* Service time period function. */
   1212    IN_NEW_TP, /* Is service in new TP? */
   1213    IN_NEW_TP, /* Is client in new TP? */
   1214    NEED_NEXT_DESC },
   1215 
   1216  /* Scenario 2
   1217   *
   1218   *  +------------------------------------------------------------------+
   1219   *  |                                                                  |
   1220   *  | 00:00      12:00       00:00       12:00       00:00       12:00 |
   1221   *  | SRV#1      TP#1        SRV#2       TP#2        SRV#3       TP#3  |
   1222   *  |                                                                  |
   1223   *  |  $==========|-----------$===========|-----------$===========|    |
   1224   *  |                        ^ ^                                       |
   1225   *  |                        S C                                       |
   1226   *  +------------------------------------------------------------------+
   1227   *
   1228   *  S: Service, C: Client
   1229   *
   1230   *  Service consensus valid_after time is set to 23:00 and client to 01:00,
   1231   *  which makes the client after the SRV#2 and the service just before. The
   1232   *  service should only be using TP#1. The client should be using TP#1.
   1233   */
   1234 
   1235  { "Sat, 26 Oct 1985 23:00:00 UTC", /* Service valid_after */
   1236    "Sat, 27 Oct 1985 00:00:00 UTC", /* Service valid_until */
   1237    "Sat, 27 Oct 1985 01:00:00 UTC", /* Client valid_after */
   1238    "Sat, 27 Oct 1985 02:00:00 UTC", /* Client valid_until. */
   1239    &previous_srv, NULL, /* Service current and previous SRV */
   1240    &current_srv, &previous_srv, /* Client current and previous SRV */
   1241    hs_get_time_period_num, /* Service time period function. */
   1242    IN_NEW_TP, /* Is service in new TP? */
   1243    NOT_IN_NEW_TP, /* Is client in new TP? */
   1244    NEED_NEXT_DESC },
   1245 
   1246  /* Scenario 3
   1247   *
   1248   *  +------------------------------------------------------------------+
   1249   *  |                                                                  |
   1250   *  | 00:00      12:00       00:00       12:00       00:00       12:00 |
   1251   *  | SRV#1      TP#1        SRV#2       TP#2        SRV#3       TP#3  |
   1252   *  |                                                                  |
   1253   *  |  $==========|-----------$===========|----------$===========|     |
   1254   *  |                            ^ ^                                   |
   1255   *  |                            S C                                   |
   1256   *  +------------------------------------------------------------------+
   1257   *
   1258   *  S: Service, C: Client
   1259   *
   1260   *  Service consensus valid_after time is set to 03:00 and client to 05:00,
   1261   *  which makes both after SRV#2. The service should be using TP#1 as its
   1262   *  current time period. The client should be using TP#1.
   1263   */
   1264 
   1265  { "Sat, 27 Oct 1985 03:00:00 UTC", /* Service valid_after */
   1266    "Sat, 27 Oct 1985 04:00:00 UTC", /* Service valid_until */
   1267    "Sat, 27 Oct 1985 05:00:00 UTC", /* Client valid_after */
   1268    "Sat, 27 Oct 1985 06:00:00 UTC", /* Client valid_until. */
   1269    &current_srv, &previous_srv, /* Service current and previous SRV */
   1270    &current_srv, &previous_srv, /* Client current and previous SRV */
   1271    hs_get_time_period_num, /* Service time period function. */
   1272    NOT_IN_NEW_TP, /* Is service in new TP? */
   1273    NOT_IN_NEW_TP, /* Is client in new TP? */
   1274    DONT_NEED_NEXT_DESC },
   1275 
   1276  /* Scenario 4
   1277   *
   1278   *  +------------------------------------------------------------------+
   1279   *  |                                                                  |
   1280   *  | 00:00      12:00       00:00       12:00       00:00       12:00 |
   1281   *  | SRV#1      TP#1        SRV#2       TP#2        SRV#3       TP#3  |
   1282   *  |                                                                  |
   1283   *  |  $==========|-----------$===========|-----------$===========|    |
   1284   *  |                                    ^ ^                           |
   1285   *  |                                    S C                           |
   1286   *  +------------------------------------------------------------------+
   1287   *
   1288   *  S: Service, C: Client
   1289   *
   1290   *  Service consensus valid_after time is set to 11:00 and client to 13:00,
   1291   *  which makes the service before TP#2 and the client just after. The
   1292   *  service should be using TP#1 as its current time period and TP#2 as the
   1293   *  next. The client should be using TP#2 time period.
   1294   */
   1295 
   1296  { "Sat, 27 Oct 1985 11:00:00 UTC", /* Service valid_after */
   1297    "Sat, 27 Oct 1985 12:00:00 UTC", /* Service valid_until */
   1298    "Sat, 27 Oct 1985 13:00:00 UTC", /* Client valid_after */
   1299    "Sat, 27 Oct 1985 14:00:00 UTC", /* Client valid_until. */
   1300    &current_srv, &previous_srv, /* Service current and previous SRV */
   1301    &current_srv, &previous_srv, /* Client current and previous SRV */
   1302    hs_get_next_time_period_num, /* Service time period function. */
   1303    NOT_IN_NEW_TP, /* Is service in new TP? */
   1304    IN_NEW_TP, /* Is client in new TP? */
   1305    NEED_NEXT_DESC },
   1306 
   1307  /* Scenario 5
   1308   *
   1309   *  +------------------------------------------------------------------+
   1310   *  |                                                                  |
   1311   *  | 00:00      12:00       00:00       12:00       00:00       12:00 |
   1312   *  | SRV#1      TP#1        SRV#2       TP#2        SRV#3       TP#3  |
   1313   *  |                                                                  |
   1314   *  |  $==========|-----------$===========|-----------$===========|    |
   1315   *  |                        ^ ^                                       |
   1316   *  |                        C S                                       |
   1317   *  +------------------------------------------------------------------+
   1318   *
   1319   *  S: Service, C: Client
   1320   *
   1321   *  Service consensus valid_after time is set to 01:00 and client to 23:00,
   1322   *  which makes the service after SRV#2 and the client just before. The
   1323   *  service should be using TP#1 as its current time period and TP#2 as the
   1324   *  next. The client should be using TP#1 time period.
   1325   */
   1326 
   1327  { "Sat, 27 Oct 1985 01:00:00 UTC", /* Service valid_after */
   1328    "Sat, 27 Oct 1985 02:00:00 UTC", /* Service valid_until */
   1329    "Sat, 26 Oct 1985 23:00:00 UTC", /* Client valid_after */
   1330    "Sat, 27 Oct 1985 00:00:00 UTC", /* Client valid_until. */
   1331    &current_srv, &previous_srv, /* Service current and previous SRV */
   1332    &previous_srv, NULL, /* Client current and previous SRV */
   1333    hs_get_time_period_num, /* Service time period function. */
   1334    NOT_IN_NEW_TP, /* Is service in new TP? */
   1335    IN_NEW_TP, /* Is client in new TP? */
   1336    DONT_NEED_NEXT_DESC },
   1337 
   1338  /* Scenario 6
   1339   *
   1340   *  +------------------------------------------------------------------+
   1341   *  |                                                                  |
   1342   *  | 00:00      12:00       00:00       12:00       00:00       12:00 |
   1343   *  | SRV#1      TP#1        SRV#2       TP#2        SRV#3       TP#3  |
   1344   *  |                                                                  |
   1345   *  |  $==========|-----------$===========|-----------$===========|    |
   1346   *  |                                    ^ ^                           |
   1347   *  |                                    C S                           |
   1348   *  +------------------------------------------------------------------+
   1349   *
   1350   *  S: Service, C: Client
   1351   *
   1352   *  Service consensus valid_after time is set to 13:00 and client to 11:00,
   1353   *  which makes the service outside after TP#2 and the client just before.
   1354   *  The service should be using TP#1 as its current time period and TP#2 as
   1355   *  its next. The client should be using TP#1 time period.
   1356   */
   1357 
   1358  { "Sat, 27 Oct 1985 13:00:00 UTC", /* Service valid_after */
   1359    "Sat, 27 Oct 1985 14:00:00 UTC", /* Service valid_until */
   1360    "Sat, 27 Oct 1985 11:00:00 UTC", /* Client valid_after */
   1361    "Sat, 27 Oct 1985 12:00:00 UTC", /* Client valid_until. */
   1362    &current_srv, &previous_srv, /* Service current and previous SRV */
   1363    &current_srv, &previous_srv, /* Client current and previous SRV */
   1364    get_previous_time_period, /* Service time period function. */
   1365    IN_NEW_TP, /* Is service in new TP? */
   1366    NOT_IN_NEW_TP, /* Is client in new TP? */
   1367    DONT_NEED_NEXT_DESC },
   1368 
   1369  /* End marker. */
   1370  { NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 0, 0}
   1371 };
   1372 
   1373 /* Run a single reachability scenario. num_scenario is the corresponding
   1374 * scenario number from the documentation. It is used to log it in case of
   1375 * failure so we know which scenario fails. */
   1376 static int
   1377 run_reachability_scenario(const reachability_cfg_t *cfg, int num_scenario)
   1378 {
   1379  int ret = -1;
   1380  hs_service_t *service;
   1381  uint64_t service_tp, client_tp;
   1382  ed25519_public_key_t service_blinded_pk, client_blinded_pk;
   1383 
   1384  setup_reachability_test();
   1385 
   1386  tt_assert(cfg);
   1387 
   1388  /* Set service consensus time. */
   1389  set_consensus_times(cfg->service_valid_after,
   1390                      &mock_service_ns->valid_after);
   1391  set_consensus_times(cfg->service_valid_until,
   1392                      &mock_service_ns->valid_until);
   1393  set_consensus_times(cfg->service_valid_until,
   1394                      &mock_service_ns->fresh_until);
   1395  dirauth_sched_recalculate_timing(get_options(),
   1396                                     mock_service_ns->valid_after);
   1397  /* Check that service is in the right time period point */
   1398  tt_int_op(hs_in_period_between_tp_and_srv(mock_service_ns, 0), OP_EQ,
   1399            cfg->service_in_new_tp);
   1400 
   1401  /* Set client consensus time. */
   1402  set_consensus_times(cfg->client_valid_after,
   1403                      &mock_client_ns->valid_after);
   1404  set_consensus_times(cfg->client_valid_until,
   1405                      &mock_client_ns->valid_until);
   1406  set_consensus_times(cfg->client_valid_until,
   1407                      &mock_client_ns->fresh_until);
   1408  dirauth_sched_recalculate_timing(get_options(),
   1409                                     mock_client_ns->valid_after);
   1410  /* Check that client is in the right time period point */
   1411  tt_int_op(hs_in_period_between_tp_and_srv(mock_client_ns, 0), OP_EQ,
   1412            cfg->client_in_new_tp);
   1413 
   1414  /* Set the SRVs for this scenario. */
   1415  mock_client_ns->sr_info.current_srv = cfg->client_current_srv;
   1416  mock_client_ns->sr_info.previous_srv = cfg->client_previous_srv;
   1417  mock_service_ns->sr_info.current_srv = cfg->service_current_srv;
   1418  mock_service_ns->sr_info.previous_srv = cfg->service_previous_srv;
   1419 
   1420  /* Initialize a service to get keys. */
   1421  update_approx_time(mock_service_ns->valid_after);
   1422  service = helper_init_service(mock_service_ns->valid_after+1);
   1423 
   1424  /*
   1425   * === Client setup ===
   1426   */
   1427 
   1428  MOCK(networkstatus_get_reasonably_live_consensus,
   1429       mock_networkstatus_get_reasonably_live_consensus_client);
   1430  MOCK(networkstatus_get_latest_consensus,
   1431       mock_networkstatus_get_latest_consensus_client);
   1432 
   1433  /* Make networkstatus_is_live() happy. */
   1434  update_approx_time(mock_client_ns->valid_after);
   1435  /* Initialize a big hashring for this consensus with the hsdir index set. */
   1436  helper_initialize_big_hash_ring(mock_client_ns);
   1437 
   1438  /* Client ONLY use the current time period. This is the whole point of these
   1439   * reachability test that is to make sure the client can always reach the
   1440   * service using only its current time period. */
   1441  client_tp = hs_get_time_period_num(0);
   1442 
   1443  hs_build_blinded_pubkey(&service->keys.identity_pk, NULL, 0,
   1444                          client_tp, &client_blinded_pk);
   1445  hs_get_responsible_hsdirs(&client_blinded_pk, client_tp, 0, 1,
   1446                            client_responsible_hsdirs);
   1447  /* Cleanup the nodelist so we can let the service computes its own set of
   1448   * node with its own hashring. */
   1449  cleanup_nodelist();
   1450  tt_int_op(smartlist_len(client_responsible_hsdirs), OP_EQ, 6);
   1451 
   1452  UNMOCK(networkstatus_get_latest_consensus);
   1453  UNMOCK(networkstatus_get_reasonably_live_consensus);
   1454 
   1455  /*
   1456   * === Service setup ===
   1457   */
   1458 
   1459  MOCK(networkstatus_get_reasonably_live_consensus,
   1460       mock_networkstatus_get_reasonably_live_consensus_service);
   1461  MOCK(networkstatus_get_latest_consensus,
   1462       mock_networkstatus_get_latest_consensus_service);
   1463 
   1464  /* Make networkstatus_is_live() happy. */
   1465  update_approx_time(mock_service_ns->valid_after);
   1466  /* Initialize a big hashring for this consensus with the hsdir index set. */
   1467  helper_initialize_big_hash_ring(mock_service_ns);
   1468 
   1469  service_tp = cfg->service_time_period_fn(0);
   1470 
   1471  hs_build_blinded_pubkey(&service->keys.identity_pk, NULL, 0,
   1472                          service_tp, &service_blinded_pk);
   1473 
   1474  /* A service builds two lists of responsible HSDir, for the current and the
   1475   * next descriptor. Depending on the scenario, the client timing indicate if
   1476   * it is fetching the current or the next descriptor so we use the
   1477   * "client_fetch_next_desc" to know which one the client is trying to get to
   1478   * confirm that the service computes the same hashring for the same blinded
   1479   * key and service time period function. */
   1480  hs_get_responsible_hsdirs(&service_blinded_pk, service_tp,
   1481                            cfg->client_fetch_next_desc, 0,
   1482                            service_responsible_hsdirs);
   1483  cleanup_nodelist();
   1484  tt_int_op(smartlist_len(service_responsible_hsdirs), OP_EQ, 8);
   1485 
   1486  UNMOCK(networkstatus_get_latest_consensus);
   1487  UNMOCK(networkstatus_get_reasonably_live_consensus);
   1488 
   1489  /* Some testing of the values we just got from the client and service. */
   1490  tt_mem_op(&client_blinded_pk, OP_EQ, &service_blinded_pk,
   1491            ED25519_PUBKEY_LEN);
   1492  tt_int_op(are_responsible_hsdirs_equal(), OP_EQ, 1);
   1493 
   1494  /* Everything went well. */
   1495  ret = 0;
   1496 
   1497 done:
   1498  cleanup_reachability_test();
   1499  if (ret == -1) {
   1500    /* Do this so we can know which scenario failed. */
   1501    char msg[32];
   1502    tor_snprintf(msg, sizeof(msg), "Scenario %d failed", num_scenario);
   1503    tt_fail_msg(msg);
   1504  }
   1505  return ret;
   1506 }
   1507 
   1508 static void
   1509 test_reachability(void *arg)
   1510 {
   1511  (void) arg;
   1512 
   1513  /* NOTE: An important axiom to understand here is that SRV#N must only be
   1514   * used with TP#N value. For example, SRV#2 with TP#1 should NEVER be used
   1515   * together. The HSDir index computation is based on this axiom.*/
   1516 
   1517  for (int i = 0; reachability_scenarios[i].service_valid_after; ++i) {
   1518    int ret = run_reachability_scenario(&reachability_scenarios[i], i + 1);
   1519    if (ret < 0) {
   1520      return;
   1521    }
   1522  }
   1523 }
   1524 
   1525 static void
   1526 test_blinding_basics(void *arg)
   1527 {
   1528  (void)arg;
   1529  char *mem_op_hex_tmp = NULL;
   1530  const uint64_t time_period = 1234;
   1531  ed25519_keypair_t keypair;
   1532 
   1533  time_t instant;
   1534  tt_int_op(0, OP_EQ, parse_iso_time("1973-05-20 01:50:33", &instant));
   1535  tt_int_op(1440, OP_EQ, get_time_period_length()); // in minutes, remember.
   1536  tt_int_op(time_period, OP_EQ, hs_get_time_period_num(instant));
   1537 
   1538  const char pubkey_hex[] =
   1539    "833990B085C1A688C1D4C8B1F6B56AFAF5A2ECA674449E1D704F83765CCB7BC6";
   1540  const char seckey_hex[] =
   1541    "D8C7FF0E31295B66540D789AF3E3DF992038A9592EEA01D8B7CBA06D6E66D159"
   1542    "4D6167696320576F7264733A20737065697373636F62616C742062697669756D";
   1543  base16_decode((char*)keypair.pubkey.pubkey, sizeof(keypair.pubkey.pubkey),
   1544                pubkey_hex, strlen(pubkey_hex));
   1545  base16_decode((char*)keypair.seckey.seckey, sizeof(keypair.seckey.seckey),
   1546                seckey_hex, strlen(seckey_hex));
   1547 
   1548  uint64_t period_len = get_time_period_length();
   1549  tt_u64_op(period_len, OP_EQ, 1440);
   1550  uint8_t params[32];
   1551  build_blinded_key_param(&keypair.pubkey, NULL, 0,
   1552                          time_period, 1440,
   1553                          params);
   1554  test_memeq_hex(params,
   1555                 "379E50DB31FEE6775ABD0AF6FB7C371E"
   1556                 "060308F4F847DB09FE4CFE13AF602287");
   1557 
   1558  ed25519_public_key_t blinded_public;
   1559  hs_build_blinded_pubkey(&keypair.pubkey, NULL, 0, time_period,
   1560                          &blinded_public);
   1561  hs_subcredential_t subcred;
   1562  hs_get_subcredential(&keypair.pubkey, &blinded_public, &subcred);
   1563 
   1564  test_memeq_hex(blinded_public.pubkey,
   1565                 "3A50BF210E8F9EE955AE0014F7A6917F"
   1566                 "B65EBF098A86305ABB508D1A7291B6D5");
   1567  test_memeq_hex(subcred.subcred,
   1568                 "635D55907816E8D76398A675A50B1C2F"
   1569                 "3E36B42A5CA77BA3A0441285161AE07D");
   1570 
   1571  ed25519_keypair_t blinded_keypair;
   1572  hs_build_blinded_keypair(&keypair, NULL, 0, time_period,
   1573                           &blinded_keypair);
   1574  tt_mem_op(blinded_public.pubkey, OP_EQ, blinded_keypair.pubkey.pubkey,
   1575            ED25519_PUBKEY_LEN);
   1576  test_memeq_hex(blinded_keypair.seckey.seckey,
   1577                 "A958DC83AC885F6814C67035DE817A2C"
   1578                 "604D5D2F715282079448F789B656350B"
   1579                 "4540FE1F80AA3F7E91306B7BF7A8E367"
   1580                 "293352B14A29FDCC8C19F3558075524B");
   1581 
   1582 done:
   1583  tor_free(mem_op_hex_tmp);
   1584 }
   1585 
   1586 /** Pick an HSDir for service with <b>onion_identity_pk</b> as a client. Put
   1587 *  its identity digest in <b>hsdir_digest_out</b>. */
   1588 static void
   1589 helper_client_pick_hsdir(const ed25519_public_key_t *onion_identity_pk,
   1590                        char *hsdir_digest_out)
   1591 {
   1592  tt_assert(onion_identity_pk);
   1593 
   1594  routerstatus_t *client_hsdir = pick_hsdir_v3(onion_identity_pk);
   1595  tt_assert(client_hsdir);
   1596  digest_to_base64(hsdir_digest_out, client_hsdir->identity_digest);
   1597 
   1598 done:
   1599  ;
   1600 }
   1601 
   1602 static void
   1603 test_hs_indexes(void *arg)
   1604 {
   1605  int ret;
   1606  uint64_t period_num = 42;
   1607  ed25519_public_key_t pubkey;
   1608 
   1609  (void) arg;
   1610 
   1611  /* Build the hs_index */
   1612  {
   1613    uint8_t hs_index[DIGEST256_LEN];
   1614    const char *b32_test_vector =
   1615      "37e5cbbd56a22823714f18f1623ece5983a0d64c78495a8cfab854245e5f9a8a";
   1616    char test_vector[DIGEST256_LEN];
   1617    ret = base16_decode(test_vector, sizeof(test_vector), b32_test_vector,
   1618                        strlen(b32_test_vector));
   1619    tt_int_op(ret, OP_EQ, sizeof(test_vector));
   1620    /* Our test vector uses a public key set to 32 bytes of \x42. */
   1621    memset(&pubkey, '\x42', sizeof(pubkey));
   1622    hs_build_hs_index(1, &pubkey, period_num, hs_index);
   1623    tt_mem_op(hs_index, OP_EQ, test_vector, sizeof(hs_index));
   1624  }
   1625 
   1626  /* Build the hsdir_index */
   1627  {
   1628    uint8_t srv[DIGEST256_LEN];
   1629    uint8_t hsdir_index[DIGEST256_LEN];
   1630    const char *b32_test_vector =
   1631      "db475361014a09965e7e5e4d4a25b8f8d4b8f16cb1d8a7e95eed50249cc1a2d5";
   1632    char test_vector[DIGEST256_LEN];
   1633    ret = base16_decode(test_vector, sizeof(test_vector), b32_test_vector,
   1634                        strlen(b32_test_vector));
   1635    tt_int_op(ret, OP_EQ, sizeof(test_vector));
   1636    /* Our test vector uses a public key set to 32 bytes of \x42. */
   1637    memset(&pubkey, '\x42', sizeof(pubkey));
   1638    memset(srv, '\x43', sizeof(srv));
   1639    hs_build_hsdir_index(&pubkey, srv, period_num, hsdir_index);
   1640    tt_mem_op(hsdir_index, OP_EQ, test_vector, sizeof(hsdir_index));
   1641  }
   1642 
   1643 done:
   1644  ;
   1645 }
   1646 
   1647 #define EARLY_IN_SRV_TO_TP 0
   1648 #define LATE_IN_SRV_TO_TP 1
   1649 #define EARLY_IN_TP_TO_SRV 2
   1650 #define LATE_IN_TP_TO_SRV 3
   1651 
   1652 /** Set the consensus and system time based on <b>position</b>. See the
   1653 *    following diagram for details:
   1654 *
   1655 *  +------------------------------------------------------------------+
   1656 *  |                                                                  |
   1657 *  | 00:00      12:00       00:00       12:00       00:00       12:00 |
   1658 *  | SRV#1      TP#1        SRV#2       TP#2        SRV#3       TP#3  |
   1659 *  |                                                                  |
   1660 *  |  $==========|-----------$===========|----------$===========|     |
   1661 *  |                                                                  |
   1662 *  |                                                                  |
   1663 *  +------------------------------------------------------------------+
   1664 */
   1665 static time_t
   1666 helper_set_consensus_and_system_time(networkstatus_t *ns, int position)
   1667 {
   1668  time_t real_time = 0;
   1669 
   1670  /* The period between SRV#N and TP#N is from 00:00 to 12:00 UTC. Consensus
   1671   * valid_after is what matters here, the rest is just to specify the voting
   1672   * period correctly. */
   1673  if (position == LATE_IN_SRV_TO_TP) {
   1674    parse_rfc1123_time("Wed, 13 Apr 2016 11:00:00 UTC", &ns->valid_after);
   1675    parse_rfc1123_time("Wed, 13 Apr 2016 12:00:00 UTC", &ns->fresh_until);
   1676    parse_rfc1123_time("Wed, 13 Apr 2016 14:00:00 UTC", &ns->valid_until);
   1677  } else if (position == EARLY_IN_TP_TO_SRV) {
   1678    parse_rfc1123_time("Wed, 13 Apr 2016 13:00:00 UTC", &ns->valid_after);
   1679    parse_rfc1123_time("Wed, 13 Apr 2016 14:00:00 UTC", &ns->fresh_until);
   1680    parse_rfc1123_time("Wed, 13 Apr 2016 16:00:00 UTC", &ns->valid_until);
   1681  } else if (position == LATE_IN_TP_TO_SRV) {
   1682    parse_rfc1123_time("Wed, 13 Apr 2016 23:00:00 UTC", &ns->valid_after);
   1683    parse_rfc1123_time("Wed, 14 Apr 2016 00:00:00 UTC", &ns->fresh_until);
   1684    parse_rfc1123_time("Wed, 14 Apr 2016 02:00:00 UTC", &ns->valid_until);
   1685  } else if (position == EARLY_IN_SRV_TO_TP) {
   1686    parse_rfc1123_time("Wed, 14 Apr 2016 01:00:00 UTC", &ns->valid_after);
   1687    parse_rfc1123_time("Wed, 14 Apr 2016 02:00:00 UTC", &ns->fresh_until);
   1688    parse_rfc1123_time("Wed, 14 Apr 2016 04:00:00 UTC", &ns->valid_until);
   1689  } else {
   1690    tt_assert(0);
   1691  }
   1692  dirauth_sched_recalculate_timing(get_options(), ns->valid_after);
   1693 
   1694  /* Set system time: pretend to be just 2 minutes before consensus expiry */
   1695  real_time = ns->valid_until - 120;
   1696  update_approx_time(real_time);
   1697 
   1698 done:
   1699  return real_time;
   1700 }
   1701 
   1702 /** Helper function that carries out the actual test for
   1703 *  test_client_service_sync() */
   1704 static void
   1705 helper_test_hsdir_sync(networkstatus_t *ns,
   1706                       int service_position, int client_position,
   1707                       int client_fetches_next_desc)
   1708 {
   1709  hs_service_descriptor_t *desc;
   1710  int retval;
   1711 
   1712  /** Test logic:
   1713   *   1) Initialize service time: consensus and system time.
   1714   *   1.1) Initialize service hash ring
   1715   *   2) Initialize service and publish descriptors.
   1716   *   3) Initialize client time: consensus and system time.
   1717   *   3.1) Initialize client hash ring
   1718   *   4) Try to fetch descriptor as client, and CHECK that the HSDir picked by
   1719   *      the client was also picked by service.
   1720   */
   1721 
   1722  /* 1) Initialize service time: consensus and real time */
   1723  time_t now = helper_set_consensus_and_system_time(ns, service_position);
   1724  helper_initialize_big_hash_ring(ns);
   1725 
   1726  /* 2) Initialize service */
   1727  hs_service_t *service = helper_init_service(now);
   1728  desc = client_fetches_next_desc ? service->desc_next : service->desc_current;
   1729 
   1730  /* Now let's upload our desc to all hsdirs */
   1731  upload_descriptor_to_all(service, desc);
   1732  /* Cleanup right now so we don't memleak on error. */
   1733  cleanup_nodelist();
   1734  /* Check that previous hsdirs were populated */
   1735  tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 8);
   1736 
   1737  /* 3) Initialize client time */
   1738  helper_set_consensus_and_system_time(ns, client_position);
   1739 
   1740  cleanup_nodelist();
   1741  SMARTLIST_FOREACH(ns->routerstatus_list,
   1742                    routerstatus_t *, rs, routerstatus_free(rs));
   1743  smartlist_clear(ns->routerstatus_list);
   1744  helper_initialize_big_hash_ring(ns);
   1745 
   1746  /* 4) Pick 6 HSDirs as a client and check that they were also chosen by the
   1747        service. */
   1748  for (int y = 0 ; y < 6 ; y++) {
   1749    char client_hsdir_b64_digest[BASE64_DIGEST_LEN+1] = {0};
   1750    helper_client_pick_hsdir(&service->keys.identity_pk,
   1751                             client_hsdir_b64_digest);
   1752 
   1753    /* CHECK: Go through the hsdirs chosen by the service and make sure that it
   1754     * contains the one picked by the client! */
   1755    retval = smartlist_contains_string(desc->previous_hsdirs,
   1756                                       client_hsdir_b64_digest);
   1757    tt_int_op(retval, OP_EQ, 1);
   1758  }
   1759 
   1760  /* Finally, try to pick a 7th hsdir and see that NULL is returned since we
   1761   * exhausted all of them: */
   1762  tt_assert(!pick_hsdir_v3(&service->keys.identity_pk));
   1763 
   1764 done:
   1765  /* At the end: free all services and initialize the subsystem again, we will
   1766   * need it for next scenario. */
   1767  cleanup_nodelist();
   1768  hs_service_free_all();
   1769  hs_service_init();
   1770  SMARTLIST_FOREACH(ns->routerstatus_list,
   1771                    routerstatus_t *, rs, routerstatus_free(rs));
   1772  smartlist_clear(ns->routerstatus_list);
   1773 }
   1774 
   1775 /** This test ensures that client and service will pick the same HSDirs, under
   1776 *  various timing scenarios:
   1777 *  a) Scenario where both client and service are in the time segment between
   1778 *     SRV#N and TP#N:
   1779 *  b) Scenario where both client and service are in the time segment between
   1780 *     TP#N and SRV#N+1.
   1781 *  c) Scenario where service is between SRV#N and TP#N, but client is between
   1782 *     TP#N and SRV#N+1.
   1783 *  d) Scenario where service is between TP#N and SRV#N+1, but client is
   1784 *     between SRV#N and TP#N.
   1785 *
   1786 * This test is important because it tests that upload_descriptor_to_all() is
   1787 * in synch with pick_hsdir_v3(). That's not the case for the
   1788 * test_reachability() test which only compares the responsible hsdir sets.
   1789 */
   1790 static void
   1791 test_client_service_hsdir_set_sync(void *arg)
   1792 {
   1793  networkstatus_t *ns = NULL;
   1794 
   1795  (void) arg;
   1796 
   1797  MOCK(networkstatus_get_latest_consensus,
   1798       mock_networkstatus_get_latest_consensus);
   1799  MOCK(networkstatus_get_reasonably_live_consensus,
   1800       mock_networkstatus_get_reasonably_live_consensus);
   1801  MOCK(get_or_state,
   1802       get_or_state_replacement);
   1803  MOCK(hs_desc_encode_descriptor,
   1804       mock_hs_desc_encode_descriptor);
   1805  MOCK(directory_initiate_request,
   1806       mock_directory_initiate_request);
   1807 
   1808  hs_init();
   1809 
   1810  /* Initialize a big hash ring: we want it to be big so that client and
   1811   * service cannot accidentally select the same HSDirs */
   1812  ns = networkstatus_get_latest_consensus();
   1813  tt_assert(ns);
   1814 
   1815  /** Now test the various synch scenarios. See the helper function for more
   1816      details: */
   1817 
   1818  /*  a) Scenario where both client and service are in the time segment between
   1819   *     SRV#N and TP#N. At this time the client fetches the first HS desc:
   1820   *
   1821   *  +------------------------------------------------------------------+
   1822   *  |                                                                  |
   1823   *  | 00:00      12:00       00:00       12:00       00:00       12:00 |
   1824   *  | SRV#1      TP#1        SRV#2       TP#2        SRV#3       TP#3  |
   1825   *  |                                                                  |
   1826   *  |  $==========|-----------$===========|----------$===========|     |
   1827   *  |                                  ^ ^                             |
   1828   *  |                                  S C                             |
   1829   *  +------------------------------------------------------------------+
   1830   */
   1831  helper_test_hsdir_sync(ns, LATE_IN_SRV_TO_TP, LATE_IN_SRV_TO_TP, 0);
   1832 
   1833  /*  b) Scenario where both client and service are in the time segment between
   1834   *     TP#N and SRV#N+1. At this time the client fetches the second HS
   1835   *     desc:
   1836   *
   1837   *  +------------------------------------------------------------------+
   1838   *  |                                                                  |
   1839   *  | 00:00      12:00       00:00       12:00       00:00       12:00 |
   1840   *  | SRV#1      TP#1        SRV#2       TP#2        SRV#3       TP#3  |
   1841   *  |                                                                  |
   1842   *  |  $==========|-----------$===========|-----------$===========|    |
   1843   *  |                      ^ ^                                         |
   1844   *  |                      S C                                         |
   1845   *  +------------------------------------------------------------------+
   1846   */
   1847  helper_test_hsdir_sync(ns, LATE_IN_TP_TO_SRV, LATE_IN_TP_TO_SRV, 1);
   1848 
   1849  /*  c) Scenario where service is between SRV#N and TP#N, but client is
   1850   *     between TP#N and SRV#N+1. Client is forward in time so it fetches the
   1851   *     second HS desc.
   1852   *
   1853   *  +------------------------------------------------------------------+
   1854   *  |                                                                  |
   1855   *  | 00:00      12:00       00:00       12:00       00:00       12:00 |
   1856   *  | SRV#1      TP#1        SRV#2       TP#2        SRV#3       TP#3  |
   1857   *  |                                                                  |
   1858   *  |  $==========|-----------$===========|-----------$===========|    |
   1859   *  |                                    ^ ^                           |
   1860   *  |                                    S C                           |
   1861   *  +------------------------------------------------------------------+
   1862   */
   1863  helper_test_hsdir_sync(ns, LATE_IN_SRV_TO_TP, EARLY_IN_TP_TO_SRV, 1);
   1864 
   1865  /*  d) Scenario where service is between TP#N and SRV#N+1, but client is
   1866   *     between SRV#N and TP#N. Client is backwards in time so it fetches the
   1867   *     first HS desc.
   1868   *
   1869   *  +------------------------------------------------------------------+
   1870   *  |                                                                  |
   1871   *  | 00:00      12:00       00:00       12:00       00:00       12:00 |
   1872   *  | SRV#1      TP#1        SRV#2       TP#2        SRV#3       TP#3  |
   1873   *  |                                                                  |
   1874   *  |  $==========|-----------$===========|-----------$===========|    |
   1875   *  |                                    ^ ^                           |
   1876   *  |                                    C S                           |
   1877   *  +------------------------------------------------------------------+
   1878   */
   1879  helper_test_hsdir_sync(ns, EARLY_IN_TP_TO_SRV, LATE_IN_SRV_TO_TP, 0);
   1880 
   1881  /*  e) Scenario where service is between SRV#N and TP#N, but client is
   1882   *     between TP#N-1 and SRV#3. Client is backwards in time so it fetches
   1883   *     the first HS desc.
   1884   *
   1885   *  +------------------------------------------------------------------+
   1886   *  |                                                                  |
   1887   *  | 00:00      12:00       00:00       12:00       00:00       12:00 |
   1888   *  | SRV#1      TP#1        SRV#2       TP#2        SRV#3       TP#3  |
   1889   *  |                                                                  |
   1890   *  |  $==========|-----------$===========|-----------$===========|    |
   1891   *  |                        ^ ^                                       |
   1892   *  |                        C S                                       |
   1893   *  +------------------------------------------------------------------+
   1894   */
   1895  helper_test_hsdir_sync(ns, EARLY_IN_SRV_TO_TP, LATE_IN_TP_TO_SRV, 0);
   1896 
   1897  /*  f) Scenario where service is between TP#N and SRV#N+1, but client is
   1898   *     between SRV#N+1 and TP#N+1. Client is forward in time so it fetches
   1899   *     the second HS desc.
   1900   *
   1901   *  +------------------------------------------------------------------+
   1902   *  |                                                                  |
   1903   *  | 00:00      12:00       00:00       12:00       00:00       12:00 |
   1904   *  | SRV#1      TP#1        SRV#2       TP#2        SRV#3       TP#3  |
   1905   *  |                                                                  |
   1906   *  |  $==========|-----------$===========|-----------$===========|    |
   1907   *  |                        ^ ^                                       |
   1908   *  |                        S C                                       |
   1909   *  +------------------------------------------------------------------+
   1910   */
   1911  helper_test_hsdir_sync(ns, LATE_IN_TP_TO_SRV, EARLY_IN_SRV_TO_TP, 1);
   1912 
   1913 done:
   1914  networkstatus_vote_free(ns);
   1915  nodelist_free_all();
   1916  hs_free_all();
   1917 }
   1918 
   1919 struct testcase_t hs_common_tests[] = {
   1920  { "blinding_basics", test_blinding_basics, TT_FORK, NULL, NULL },
   1921  { "build_address", test_build_address, TT_FORK,
   1922    NULL, NULL },
   1923  { "validate_address", test_validate_address, TT_FORK,
   1924    NULL, NULL },
   1925  { "time_period", test_time_period, TT_FORK,
   1926    NULL, NULL },
   1927  { "start_time_of_next_time_period", test_start_time_of_next_time_period,
   1928    TT_FORK, NULL, NULL },
   1929  { "responsible_hsdirs", test_responsible_hsdirs, TT_FORK,
   1930    NULL, NULL },
   1931  { "desc_reupload_logic", test_desc_reupload_logic, TT_FORK,
   1932    NULL, NULL },
   1933  { "disaster_srv", test_disaster_srv, TT_FORK,
   1934    NULL, NULL },
   1935  { "hid_serv_request_tracker", test_hid_serv_request_tracker, TT_FORK,
   1936    NULL, NULL },
   1937  { "parse_extended_hostname", test_parse_extended_hostname, TT_FORK,
   1938    NULL, NULL },
   1939  { "time_between_tp_and_srv", test_time_between_tp_and_srv, TT_FORK,
   1940    NULL, NULL },
   1941  { "reachability", test_reachability, TT_FORK,
   1942    NULL, NULL },
   1943  { "client_service_hsdir_set_sync", test_client_service_hsdir_set_sync,
   1944    TT_FORK, NULL, NULL },
   1945  { "hs_indexes", test_hs_indexes, TT_FORK,
   1946    NULL, NULL },
   1947 
   1948  END_OF_TESTCASES
   1949 };