sctp_pcb.c (235173B)
1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * a) Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 14 * b) Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the distribution. 17 * 18 * c) Neither the name of Cisco Systems, Inc. nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <netinet/sctp_os.h> 36 #if defined(__FreeBSD__) && !defined(__Userspace__) 37 #include <sys/proc.h> 38 #endif 39 #include <netinet/sctp_var.h> 40 #include <netinet/sctp_sysctl.h> 41 #include <netinet/sctp_pcb.h> 42 #include <netinet/sctputil.h> 43 #include <netinet/sctp.h> 44 #include <netinet/sctp_header.h> 45 #include <netinet/sctp_asconf.h> 46 #include <netinet/sctp_output.h> 47 #include <netinet/sctp_timer.h> 48 #include <netinet/sctp_bsd_addr.h> 49 #if defined(INET) || defined(INET6) 50 #if !defined(_WIN32) 51 #include <netinet/udp.h> 52 #endif 53 #endif 54 #ifdef INET6 55 #if defined(__Userspace__) 56 #include "user_ip6_var.h" 57 #else 58 #include <netinet6/ip6_var.h> 59 #endif 60 #endif 61 #if defined(__FreeBSD__) && !defined(__Userspace__) 62 #include <sys/sched.h> 63 #include <sys/smp.h> 64 #include <sys/unistd.h> 65 #endif 66 #if defined(__Userspace__) 67 #include <user_socketvar.h> 68 #include <user_atomic.h> 69 #if !defined(_WIN32) 70 #include <netdb.h> 71 #endif 72 #endif 73 74 #if !defined(__FreeBSD__) || defined(__Userspace__) 75 struct sctp_base_info system_base_info; 76 77 #endif 78 /* FIX: we don't handle multiple link local scopes */ 79 /* "scopeless" replacement IN6_ARE_ADDR_EQUAL */ 80 #ifdef INET6 81 int 82 SCTP6_ARE_ADDR_EQUAL(struct sockaddr_in6 *a, struct sockaddr_in6 *b) 83 { 84 #ifdef SCTP_EMBEDDED_V6_SCOPE 85 #if defined(__APPLE__) && !defined(__Userspace__) 86 struct in6_addr tmp_a, tmp_b; 87 88 tmp_a = a->sin6_addr; 89 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) 90 if (in6_embedscope(&tmp_a, a, NULL, NULL) != 0) { 91 #else 92 if (in6_embedscope(&tmp_a, a, NULL, NULL, NULL) != 0) { 93 #endif 94 return (0); 95 } 96 tmp_b = b->sin6_addr; 97 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) 98 if (in6_embedscope(&tmp_b, b, NULL, NULL) != 0) { 99 #else 100 if (in6_embedscope(&tmp_b, b, NULL, NULL, NULL) != 0) { 101 #endif 102 return (0); 103 } 104 return (IN6_ARE_ADDR_EQUAL(&tmp_a, &tmp_b)); 105 #elif defined(SCTP_KAME) 106 struct sockaddr_in6 tmp_a, tmp_b; 107 108 memcpy(&tmp_a, a, sizeof(struct sockaddr_in6)); 109 if (sa6_embedscope(&tmp_a, MODULE_GLOBAL(ip6_use_defzone)) != 0) { 110 return (0); 111 } 112 memcpy(&tmp_b, b, sizeof(struct sockaddr_in6)); 113 if (sa6_embedscope(&tmp_b, MODULE_GLOBAL(ip6_use_defzone)) != 0) { 114 return (0); 115 } 116 return (IN6_ARE_ADDR_EQUAL(&tmp_a.sin6_addr, &tmp_b.sin6_addr)); 117 #else 118 struct in6_addr tmp_a, tmp_b; 119 120 tmp_a = a->sin6_addr; 121 if (in6_embedscope(&tmp_a, a) != 0) { 122 return (0); 123 } 124 tmp_b = b->sin6_addr; 125 if (in6_embedscope(&tmp_b, b) != 0) { 126 return (0); 127 } 128 return (IN6_ARE_ADDR_EQUAL(&tmp_a, &tmp_b)); 129 #endif 130 #else 131 return (IN6_ARE_ADDR_EQUAL(&(a->sin6_addr), &(b->sin6_addr))); 132 #endif /* SCTP_EMBEDDED_V6_SCOPE */ 133 } 134 #endif 135 136 void 137 sctp_fill_pcbinfo(struct sctp_pcbinfo *spcb) 138 { 139 /* 140 * We really don't need to lock this, but I will just because it 141 * does not hurt. 142 */ 143 SCTP_INP_INFO_RLOCK(); 144 spcb->ep_count = SCTP_BASE_INFO(ipi_count_ep); 145 spcb->asoc_count = SCTP_BASE_INFO(ipi_count_asoc); 146 spcb->laddr_count = SCTP_BASE_INFO(ipi_count_laddr); 147 spcb->raddr_count = SCTP_BASE_INFO(ipi_count_raddr); 148 spcb->chk_count = SCTP_BASE_INFO(ipi_count_chunk); 149 spcb->readq_count = SCTP_BASE_INFO(ipi_count_readq); 150 spcb->stream_oque = SCTP_BASE_INFO(ipi_count_strmoq); 151 spcb->free_chunks = SCTP_BASE_INFO(ipi_free_chunks); 152 SCTP_INP_INFO_RUNLOCK(); 153 } 154 155 /*- 156 * Addresses are added to VRF's (Virtual Router's). For BSD we 157 * have only the default VRF 0. We maintain a hash list of 158 * VRF's. Each VRF has its own list of sctp_ifn's. Each of 159 * these has a list of addresses. When we add a new address 160 * to a VRF we lookup the ifn/ifn_index, if the ifn does 161 * not exist we create it and add it to the list of IFN's 162 * within the VRF. Once we have the sctp_ifn, we add the 163 * address to the list. So we look something like: 164 * 165 * hash-vrf-table 166 * vrf-> ifn-> ifn -> ifn 167 * vrf | 168 * ... +--ifa-> ifa -> ifa 169 * vrf 170 * 171 * We keep these separate lists since the SCTP subsystem will 172 * point to these from its source address selection nets structure. 173 * When an address is deleted it does not happen right away on 174 * the SCTP side, it gets scheduled. What we do when a 175 * delete happens is immediately remove the address from 176 * the master list and decrement the refcount. As our 177 * addip iterator works through and frees the src address 178 * selection pointing to the sctp_ifa, eventually the refcount 179 * will reach 0 and we will delete it. Note that it is assumed 180 * that any locking on system level ifn/ifa is done at the 181 * caller of these functions and these routines will only 182 * lock the SCTP structures as they add or delete things. 183 * 184 * Other notes on VRF concepts. 185 * - An endpoint can be in multiple VRF's 186 * - An association lives within a VRF and only one VRF. 187 * - Any incoming packet we can deduce the VRF for by 188 * looking at the mbuf/pak inbound (for BSD its VRF=0 :D) 189 * - Any downward send call or connect call must supply the 190 * VRF via ancillary data or via some sort of set default 191 * VRF socket option call (again for BSD no brainer since 192 * the VRF is always 0). 193 * - An endpoint may add multiple VRF's to it. 194 * - Listening sockets can accept associations in any 195 * of the VRF's they are in but the assoc will end up 196 * in only one VRF (gotten from the packet or connect/send). 197 * 198 */ 199 200 struct sctp_vrf * 201 sctp_allocate_vrf(int vrf_id) 202 { 203 struct sctp_vrf *vrf = NULL; 204 struct sctp_vrflist *bucket; 205 206 /* First allocate the VRF structure */ 207 vrf = sctp_find_vrf(vrf_id); 208 if (vrf) { 209 /* Already allocated */ 210 return (vrf); 211 } 212 SCTP_MALLOC(vrf, struct sctp_vrf *, sizeof(struct sctp_vrf), 213 SCTP_M_VRF); 214 if (vrf == NULL) { 215 /* No memory */ 216 #ifdef INVARIANTS 217 panic("No memory for VRF:%d", vrf_id); 218 #endif 219 return (NULL); 220 } 221 /* setup the VRF */ 222 memset(vrf, 0, sizeof(struct sctp_vrf)); 223 vrf->vrf_id = vrf_id; 224 LIST_INIT(&vrf->ifnlist); 225 vrf->total_ifa_count = 0; 226 vrf->refcount = 0; 227 /* now also setup table ids */ 228 SCTP_INIT_VRF_TABLEID(vrf); 229 /* Init the HASH of addresses */ 230 vrf->vrf_addr_hash = SCTP_HASH_INIT(SCTP_VRF_ADDR_HASH_SIZE, 231 &vrf->vrf_addr_hashmark); 232 if (vrf->vrf_addr_hash == NULL) { 233 /* No memory */ 234 #ifdef INVARIANTS 235 panic("No memory for VRF:%d", vrf_id); 236 #endif 237 SCTP_FREE(vrf, SCTP_M_VRF); 238 return (NULL); 239 } 240 241 /* Add it to the hash table */ 242 bucket = &SCTP_BASE_INFO(sctp_vrfhash)[(vrf_id & SCTP_BASE_INFO(hashvrfmark))]; 243 LIST_INSERT_HEAD(bucket, vrf, next_vrf); 244 atomic_add_int(&SCTP_BASE_INFO(ipi_count_vrfs), 1); 245 return (vrf); 246 } 247 248 struct sctp_ifn * 249 sctp_find_ifn(void *ifn, uint32_t ifn_index) 250 { 251 struct sctp_ifn *sctp_ifnp; 252 struct sctp_ifnlist *hash_ifn_head; 253 254 /* We assume the lock is held for the addresses 255 * if that's wrong problems could occur :-) 256 */ 257 SCTP_IPI_ADDR_LOCK_ASSERT(); 258 hash_ifn_head = &SCTP_BASE_INFO(vrf_ifn_hash)[(ifn_index & SCTP_BASE_INFO(vrf_ifn_hashmark))]; 259 LIST_FOREACH(sctp_ifnp, hash_ifn_head, next_bucket) { 260 if (sctp_ifnp->ifn_index == ifn_index) { 261 return (sctp_ifnp); 262 } 263 if (sctp_ifnp->ifn_p && ifn && (sctp_ifnp->ifn_p == ifn)) { 264 return (sctp_ifnp); 265 } 266 } 267 return (NULL); 268 } 269 270 struct sctp_vrf * 271 sctp_find_vrf(uint32_t vrf_id) 272 { 273 struct sctp_vrflist *bucket; 274 struct sctp_vrf *liste; 275 276 bucket = &SCTP_BASE_INFO(sctp_vrfhash)[(vrf_id & SCTP_BASE_INFO(hashvrfmark))]; 277 LIST_FOREACH(liste, bucket, next_vrf) { 278 if (vrf_id == liste->vrf_id) { 279 return (liste); 280 } 281 } 282 return (NULL); 283 } 284 285 void 286 sctp_free_vrf(struct sctp_vrf *vrf) 287 { 288 if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(&vrf->refcount)) { 289 if (vrf->vrf_addr_hash) { 290 SCTP_HASH_FREE(vrf->vrf_addr_hash, vrf->vrf_addr_hashmark); 291 vrf->vrf_addr_hash = NULL; 292 } 293 /* We zero'd the count */ 294 LIST_REMOVE(vrf, next_vrf); 295 SCTP_FREE(vrf, SCTP_M_VRF); 296 atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_vrfs), 1); 297 } 298 } 299 300 void 301 sctp_free_ifn(struct sctp_ifn *sctp_ifnp) 302 { 303 if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(&sctp_ifnp->refcount)) { 304 /* We zero'd the count */ 305 if (sctp_ifnp->vrf) { 306 sctp_free_vrf(sctp_ifnp->vrf); 307 } 308 SCTP_FREE(sctp_ifnp, SCTP_M_IFN); 309 atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_ifns), 1); 310 } 311 } 312 313 void 314 sctp_update_ifn_mtu(uint32_t ifn_index, uint32_t mtu) 315 { 316 struct sctp_ifn *sctp_ifnp; 317 318 sctp_ifnp = sctp_find_ifn((void *)NULL, ifn_index); 319 if (sctp_ifnp != NULL) { 320 sctp_ifnp->ifn_mtu = mtu; 321 } 322 } 323 324 void 325 sctp_free_ifa(struct sctp_ifa *sctp_ifap) 326 { 327 if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(&sctp_ifap->refcount)) { 328 /* We zero'd the count */ 329 if (sctp_ifap->ifn_p) { 330 sctp_free_ifn(sctp_ifap->ifn_p); 331 } 332 SCTP_FREE(sctp_ifap, SCTP_M_IFA); 333 atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_ifas), 1); 334 } 335 } 336 337 static void 338 sctp_delete_ifn(struct sctp_ifn *sctp_ifnp, int hold_addr_lock) 339 { 340 struct sctp_ifn *found; 341 342 found = sctp_find_ifn(sctp_ifnp->ifn_p, sctp_ifnp->ifn_index); 343 if (found == NULL) { 344 /* Not in the list.. sorry */ 345 return; 346 } 347 if (hold_addr_lock == 0) { 348 SCTP_IPI_ADDR_WLOCK(); 349 } else { 350 SCTP_IPI_ADDR_WLOCK_ASSERT(); 351 } 352 LIST_REMOVE(sctp_ifnp, next_bucket); 353 LIST_REMOVE(sctp_ifnp, next_ifn); 354 if (hold_addr_lock == 0) { 355 SCTP_IPI_ADDR_WUNLOCK(); 356 } 357 /* Take away the reference, and possibly free it */ 358 sctp_free_ifn(sctp_ifnp); 359 } 360 361 void 362 sctp_mark_ifa_addr_down(uint32_t vrf_id, struct sockaddr *addr, 363 const char *if_name, uint32_t ifn_index) 364 { 365 struct sctp_vrf *vrf; 366 struct sctp_ifa *sctp_ifap; 367 368 SCTP_IPI_ADDR_RLOCK(); 369 vrf = sctp_find_vrf(vrf_id); 370 if (vrf == NULL) { 371 SCTPDBG(SCTP_DEBUG_PCB4, "Can't find vrf_id 0x%x\n", vrf_id); 372 goto out; 373 } 374 sctp_ifap = sctp_find_ifa_by_addr(addr, vrf->vrf_id, SCTP_ADDR_LOCKED); 375 if (sctp_ifap == NULL) { 376 SCTPDBG(SCTP_DEBUG_PCB4, "Can't find sctp_ifap for address\n"); 377 goto out; 378 } 379 if (sctp_ifap->ifn_p == NULL) { 380 SCTPDBG(SCTP_DEBUG_PCB4, "IFA has no IFN - can't mark unusable\n"); 381 goto out; 382 } 383 if (if_name) { 384 if (strncmp(if_name, sctp_ifap->ifn_p->ifn_name, SCTP_IFNAMSIZ) != 0) { 385 SCTPDBG(SCTP_DEBUG_PCB4, "IFN %s of IFA not the same as %s\n", 386 sctp_ifap->ifn_p->ifn_name, if_name); 387 goto out; 388 } 389 } else { 390 if (sctp_ifap->ifn_p->ifn_index != ifn_index) { 391 SCTPDBG(SCTP_DEBUG_PCB4, "IFA owned by ifn_index:%d down command for ifn_index:%d - ignored\n", 392 sctp_ifap->ifn_p->ifn_index, ifn_index); 393 goto out; 394 } 395 } 396 397 sctp_ifap->localifa_flags &= (~SCTP_ADDR_VALID); 398 sctp_ifap->localifa_flags |= SCTP_ADDR_IFA_UNUSEABLE; 399 out: 400 SCTP_IPI_ADDR_RUNLOCK(); 401 } 402 403 void 404 sctp_mark_ifa_addr_up(uint32_t vrf_id, struct sockaddr *addr, 405 const char *if_name, uint32_t ifn_index) 406 { 407 struct sctp_vrf *vrf; 408 struct sctp_ifa *sctp_ifap; 409 410 SCTP_IPI_ADDR_RLOCK(); 411 vrf = sctp_find_vrf(vrf_id); 412 if (vrf == NULL) { 413 SCTPDBG(SCTP_DEBUG_PCB4, "Can't find vrf_id 0x%x\n", vrf_id); 414 goto out; 415 } 416 sctp_ifap = sctp_find_ifa_by_addr(addr, vrf->vrf_id, SCTP_ADDR_LOCKED); 417 if (sctp_ifap == NULL) { 418 SCTPDBG(SCTP_DEBUG_PCB4, "Can't find sctp_ifap for address\n"); 419 goto out; 420 } 421 if (sctp_ifap->ifn_p == NULL) { 422 SCTPDBG(SCTP_DEBUG_PCB4, "IFA has no IFN - can't mark unusable\n"); 423 goto out; 424 } 425 if (if_name) { 426 if (strncmp(if_name, sctp_ifap->ifn_p->ifn_name, SCTP_IFNAMSIZ) != 0) { 427 SCTPDBG(SCTP_DEBUG_PCB4, "IFN %s of IFA not the same as %s\n", 428 sctp_ifap->ifn_p->ifn_name, if_name); 429 goto out; 430 } 431 } else { 432 if (sctp_ifap->ifn_p->ifn_index != ifn_index) { 433 SCTPDBG(SCTP_DEBUG_PCB4, "IFA owned by ifn_index:%d down command for ifn_index:%d - ignored\n", 434 sctp_ifap->ifn_p->ifn_index, ifn_index); 435 goto out; 436 } 437 } 438 439 sctp_ifap->localifa_flags &= (~SCTP_ADDR_IFA_UNUSEABLE); 440 sctp_ifap->localifa_flags |= SCTP_ADDR_VALID; 441 out: 442 SCTP_IPI_ADDR_RUNLOCK(); 443 } 444 445 /*- 446 * Add an ifa to an ifn. 447 * Register the interface as necessary. 448 * NOTE: ADDR write lock MUST be held. 449 */ 450 static void 451 sctp_add_ifa_to_ifn(struct sctp_ifn *sctp_ifnp, struct sctp_ifa *sctp_ifap) 452 { 453 int ifa_af; 454 455 LIST_INSERT_HEAD(&sctp_ifnp->ifalist, sctp_ifap, next_ifa); 456 sctp_ifap->ifn_p = sctp_ifnp; 457 atomic_add_int(&sctp_ifap->ifn_p->refcount, 1); 458 /* update address counts */ 459 sctp_ifnp->ifa_count++; 460 ifa_af = sctp_ifap->address.sa.sa_family; 461 switch (ifa_af) { 462 #ifdef INET 463 case AF_INET: 464 sctp_ifnp->num_v4++; 465 break; 466 #endif 467 #ifdef INET6 468 case AF_INET6: 469 sctp_ifnp->num_v6++; 470 break; 471 #endif 472 default: 473 break; 474 } 475 if (sctp_ifnp->ifa_count == 1) { 476 /* register the new interface */ 477 sctp_ifnp->registered_af = ifa_af; 478 } 479 } 480 481 /*- 482 * Remove an ifa from its ifn. 483 * If no more addresses exist, remove the ifn too. Otherwise, re-register 484 * the interface based on the remaining address families left. 485 * NOTE: ADDR write lock MUST be held. 486 */ 487 static void 488 sctp_remove_ifa_from_ifn(struct sctp_ifa *sctp_ifap) 489 { 490 LIST_REMOVE(sctp_ifap, next_ifa); 491 if (sctp_ifap->ifn_p) { 492 /* update address counts */ 493 sctp_ifap->ifn_p->ifa_count--; 494 switch (sctp_ifap->address.sa.sa_family) { 495 #ifdef INET 496 case AF_INET: 497 sctp_ifap->ifn_p->num_v4--; 498 break; 499 #endif 500 #ifdef INET6 501 case AF_INET6: 502 sctp_ifap->ifn_p->num_v6--; 503 break; 504 #endif 505 default: 506 break; 507 } 508 509 if (LIST_EMPTY(&sctp_ifap->ifn_p->ifalist)) { 510 /* remove the ifn, possibly freeing it */ 511 sctp_delete_ifn(sctp_ifap->ifn_p, SCTP_ADDR_LOCKED); 512 } else { 513 /* re-register address family type, if needed */ 514 if ((sctp_ifap->ifn_p->num_v6 == 0) && 515 (sctp_ifap->ifn_p->registered_af == AF_INET6)) { 516 sctp_ifap->ifn_p->registered_af = AF_INET; 517 } else if ((sctp_ifap->ifn_p->num_v4 == 0) && 518 (sctp_ifap->ifn_p->registered_af == AF_INET)) { 519 sctp_ifap->ifn_p->registered_af = AF_INET6; 520 } 521 /* free the ifn refcount */ 522 sctp_free_ifn(sctp_ifap->ifn_p); 523 } 524 sctp_ifap->ifn_p = NULL; 525 } 526 } 527 528 struct sctp_ifa * 529 sctp_add_addr_to_vrf(uint32_t vrf_id, void *ifn, uint32_t ifn_index, 530 uint32_t ifn_type, const char *if_name, void *ifa, 531 struct sockaddr *addr, uint32_t ifa_flags, 532 int dynamic_add) 533 { 534 struct sctp_vrf *vrf; 535 struct sctp_ifn *sctp_ifnp, *new_sctp_ifnp; 536 struct sctp_ifa *sctp_ifap, *new_sctp_ifap; 537 struct sctp_ifalist *hash_addr_head; 538 struct sctp_ifnlist *hash_ifn_head; 539 uint32_t hash_of_addr; 540 int new_ifn_af = 0; 541 542 #ifdef SCTP_DEBUG 543 SCTPDBG(SCTP_DEBUG_PCB4, "vrf_id 0x%x: adding address: ", vrf_id); 544 SCTPDBG_ADDR(SCTP_DEBUG_PCB4, addr); 545 #endif 546 SCTP_MALLOC(new_sctp_ifnp, struct sctp_ifn *, 547 sizeof(struct sctp_ifn), SCTP_M_IFN); 548 if (new_sctp_ifnp == NULL) { 549 #ifdef INVARIANTS 550 panic("No memory for IFN"); 551 #endif 552 return (NULL); 553 } 554 SCTP_MALLOC(new_sctp_ifap, struct sctp_ifa *, sizeof(struct sctp_ifa), SCTP_M_IFA); 555 if (new_sctp_ifap == NULL) { 556 #ifdef INVARIANTS 557 panic("No memory for IFA"); 558 #endif 559 SCTP_FREE(new_sctp_ifnp, SCTP_M_IFN); 560 return (NULL); 561 } 562 563 SCTP_IPI_ADDR_WLOCK(); 564 sctp_ifnp = sctp_find_ifn(ifn, ifn_index); 565 if (sctp_ifnp) { 566 vrf = sctp_ifnp->vrf; 567 } else { 568 vrf = sctp_find_vrf(vrf_id); 569 if (vrf == NULL) { 570 vrf = sctp_allocate_vrf(vrf_id); 571 if (vrf == NULL) { 572 SCTP_IPI_ADDR_WUNLOCK(); 573 SCTP_FREE(new_sctp_ifnp, SCTP_M_IFN); 574 SCTP_FREE(new_sctp_ifap, SCTP_M_IFA); 575 return (NULL); 576 } 577 } 578 } 579 if (sctp_ifnp == NULL) { 580 /* build one and add it, can't hold lock 581 * until after malloc done though. 582 */ 583 sctp_ifnp = new_sctp_ifnp; 584 new_sctp_ifnp = NULL; 585 memset(sctp_ifnp, 0, sizeof(struct sctp_ifn)); 586 sctp_ifnp->ifn_index = ifn_index; 587 sctp_ifnp->ifn_p = ifn; 588 sctp_ifnp->ifn_type = ifn_type; 589 sctp_ifnp->refcount = 0; 590 sctp_ifnp->vrf = vrf; 591 atomic_add_int(&vrf->refcount, 1); 592 sctp_ifnp->ifn_mtu = SCTP_GATHER_MTU_FROM_IFN_INFO(ifn, ifn_index); 593 if (if_name != NULL) { 594 SCTP_SNPRINTF(sctp_ifnp->ifn_name, SCTP_IFNAMSIZ, "%s", if_name); 595 } else { 596 SCTP_SNPRINTF(sctp_ifnp->ifn_name, SCTP_IFNAMSIZ, "%s", "unknown"); 597 } 598 hash_ifn_head = &SCTP_BASE_INFO(vrf_ifn_hash)[(ifn_index & SCTP_BASE_INFO(vrf_ifn_hashmark))]; 599 LIST_INIT(&sctp_ifnp->ifalist); 600 LIST_INSERT_HEAD(hash_ifn_head, sctp_ifnp, next_bucket); 601 LIST_INSERT_HEAD(&vrf->ifnlist, sctp_ifnp, next_ifn); 602 atomic_add_int(&SCTP_BASE_INFO(ipi_count_ifns), 1); 603 new_ifn_af = 1; 604 } 605 sctp_ifap = sctp_find_ifa_by_addr(addr, vrf->vrf_id, SCTP_ADDR_LOCKED); 606 if (sctp_ifap) { 607 /* Hmm, it already exists? */ 608 if ((sctp_ifap->ifn_p) && 609 (sctp_ifap->ifn_p->ifn_index == ifn_index)) { 610 SCTPDBG(SCTP_DEBUG_PCB4, "Using existing ifn %s (0x%x) for ifa %p\n", 611 sctp_ifap->ifn_p->ifn_name, ifn_index, 612 (void *)sctp_ifap); 613 if (new_ifn_af) { 614 /* Remove the created one that we don't want */ 615 sctp_delete_ifn(sctp_ifnp, SCTP_ADDR_LOCKED); 616 } 617 if (sctp_ifap->localifa_flags & SCTP_BEING_DELETED) { 618 /* easy to solve, just switch back to active */ 619 SCTPDBG(SCTP_DEBUG_PCB4, "Clearing deleted ifa flag\n"); 620 sctp_ifap->localifa_flags = SCTP_ADDR_VALID; 621 sctp_ifap->ifn_p = sctp_ifnp; 622 atomic_add_int(&sctp_ifap->ifn_p->refcount, 1); 623 } 624 exit_stage_left: 625 SCTP_IPI_ADDR_WUNLOCK(); 626 if (new_sctp_ifnp != NULL) { 627 SCTP_FREE(new_sctp_ifnp, SCTP_M_IFN); 628 } 629 SCTP_FREE(new_sctp_ifap, SCTP_M_IFA); 630 return (sctp_ifap); 631 } else { 632 if (sctp_ifap->ifn_p) { 633 /* 634 * The last IFN gets the address, remove the 635 * old one 636 */ 637 SCTPDBG(SCTP_DEBUG_PCB4, "Moving ifa %p from %s (0x%x) to %s (0x%x)\n", 638 (void *)sctp_ifap, sctp_ifap->ifn_p->ifn_name, 639 sctp_ifap->ifn_p->ifn_index, if_name, 640 ifn_index); 641 /* remove the address from the old ifn */ 642 sctp_remove_ifa_from_ifn(sctp_ifap); 643 /* move the address over to the new ifn */ 644 sctp_add_ifa_to_ifn(sctp_ifnp, sctp_ifap); 645 goto exit_stage_left; 646 } else { 647 /* repair ifnp which was NULL ? */ 648 sctp_ifap->localifa_flags = SCTP_ADDR_VALID; 649 SCTPDBG(SCTP_DEBUG_PCB4, "Repairing ifn %p for ifa %p\n", 650 (void *)sctp_ifnp, (void *)sctp_ifap); 651 sctp_add_ifa_to_ifn(sctp_ifnp, sctp_ifap); 652 } 653 goto exit_stage_left; 654 } 655 } 656 sctp_ifap = new_sctp_ifap; 657 memset(sctp_ifap, 0, sizeof(struct sctp_ifa)); 658 sctp_ifap->ifn_p = sctp_ifnp; 659 atomic_add_int(&sctp_ifnp->refcount, 1); 660 sctp_ifap->vrf_id = vrf_id; 661 sctp_ifap->ifa = ifa; 662 #ifdef HAVE_SA_LEN 663 memcpy(&sctp_ifap->address, addr, addr->sa_len); 664 #else 665 switch (addr->sa_family) { 666 #ifdef INET 667 case AF_INET: 668 memcpy(&sctp_ifap->address, addr, sizeof(struct sockaddr_in)); 669 break; 670 #endif 671 #ifdef INET6 672 case AF_INET6: 673 memcpy(&sctp_ifap->address, addr, sizeof(struct sockaddr_in6)); 674 break; 675 #endif 676 #if defined(__Userspace__) 677 case AF_CONN: 678 memcpy(&sctp_ifap->address, addr, sizeof(struct sockaddr_conn)); 679 break; 680 #endif 681 default: 682 /* TSNH */ 683 break; 684 } 685 #endif 686 sctp_ifap->localifa_flags = SCTP_ADDR_VALID | SCTP_ADDR_DEFER_USE; 687 sctp_ifap->flags = ifa_flags; 688 /* Set scope */ 689 switch (sctp_ifap->address.sa.sa_family) { 690 #ifdef INET 691 case AF_INET: 692 { 693 struct sockaddr_in *sin; 694 695 sin = &sctp_ifap->address.sin; 696 if (SCTP_IFN_IS_IFT_LOOP(sctp_ifap->ifn_p) || 697 (IN4_ISLOOPBACK_ADDRESS(&sin->sin_addr))) { 698 sctp_ifap->src_is_loop = 1; 699 } 700 if ((IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 701 sctp_ifap->src_is_priv = 1; 702 } 703 sctp_ifnp->num_v4++; 704 if (new_ifn_af) 705 new_ifn_af = AF_INET; 706 break; 707 } 708 #endif 709 #ifdef INET6 710 case AF_INET6: 711 { 712 /* ok to use deprecated addresses? */ 713 struct sockaddr_in6 *sin6; 714 715 sin6 = &sctp_ifap->address.sin6; 716 if (SCTP_IFN_IS_IFT_LOOP(sctp_ifap->ifn_p) || 717 (IN6_IS_ADDR_LOOPBACK(&sin6->sin6_addr))) { 718 sctp_ifap->src_is_loop = 1; 719 } 720 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 721 sctp_ifap->src_is_priv = 1; 722 } 723 sctp_ifnp->num_v6++; 724 if (new_ifn_af) 725 new_ifn_af = AF_INET6; 726 break; 727 } 728 #endif 729 #if defined(__Userspace__) 730 case AF_CONN: 731 if (new_ifn_af) 732 new_ifn_af = AF_CONN; 733 break; 734 #endif 735 default: 736 new_ifn_af = 0; 737 break; 738 } 739 hash_of_addr = sctp_get_ifa_hash_val(&sctp_ifap->address.sa); 740 741 if ((sctp_ifap->src_is_priv == 0) && 742 (sctp_ifap->src_is_loop == 0)) { 743 sctp_ifap->src_is_glob = 1; 744 } 745 hash_addr_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)]; 746 LIST_INSERT_HEAD(hash_addr_head, sctp_ifap, next_bucket); 747 sctp_ifap->refcount = 1; 748 LIST_INSERT_HEAD(&sctp_ifnp->ifalist, sctp_ifap, next_ifa); 749 sctp_ifnp->ifa_count++; 750 vrf->total_ifa_count++; 751 atomic_add_int(&SCTP_BASE_INFO(ipi_count_ifas), 1); 752 if (new_ifn_af) { 753 sctp_ifnp->registered_af = new_ifn_af; 754 } 755 SCTP_IPI_ADDR_WUNLOCK(); 756 if (new_sctp_ifnp != NULL) { 757 SCTP_FREE(new_sctp_ifnp, SCTP_M_IFN); 758 } 759 760 if (dynamic_add) { 761 /* Bump up the refcount so that when the timer 762 * completes it will drop back down. 763 */ 764 struct sctp_laddr *wi; 765 766 atomic_add_int(&sctp_ifap->refcount, 1); 767 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); 768 if (wi == NULL) { 769 /* 770 * Gak, what can we do? We have lost an address 771 * change can you say HOSED? 772 */ 773 SCTPDBG(SCTP_DEBUG_PCB4, "Lost an address change?\n"); 774 /* Opps, must decrement the count */ 775 sctp_del_addr_from_vrf(vrf_id, addr, ifn_index, 776 if_name); 777 return (NULL); 778 } 779 SCTP_INCR_LADDR_COUNT(); 780 memset(wi, 0, sizeof(*wi)); 781 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time); 782 wi->ifa = sctp_ifap; 783 wi->action = SCTP_ADD_IP_ADDRESS; 784 785 SCTP_WQ_ADDR_LOCK(); 786 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 787 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 788 (struct sctp_inpcb *)NULL, 789 (struct sctp_tcb *)NULL, 790 (struct sctp_nets *)NULL); 791 SCTP_WQ_ADDR_UNLOCK(); 792 } else { 793 /* it's ready for use */ 794 sctp_ifap->localifa_flags &= ~SCTP_ADDR_DEFER_USE; 795 } 796 return (sctp_ifap); 797 } 798 799 void 800 sctp_del_addr_from_vrf(uint32_t vrf_id, struct sockaddr *addr, 801 uint32_t ifn_index, const char *if_name) 802 { 803 struct sctp_vrf *vrf; 804 struct sctp_ifa *sctp_ifap = NULL; 805 806 SCTP_IPI_ADDR_WLOCK(); 807 vrf = sctp_find_vrf(vrf_id); 808 if (vrf == NULL) { 809 SCTPDBG(SCTP_DEBUG_PCB4, "Can't find vrf_id 0x%x\n", vrf_id); 810 goto out_now; 811 } 812 813 #ifdef SCTP_DEBUG 814 SCTPDBG(SCTP_DEBUG_PCB4, "vrf_id 0x%x: deleting address:", vrf_id); 815 SCTPDBG_ADDR(SCTP_DEBUG_PCB4, addr); 816 #endif 817 sctp_ifap = sctp_find_ifa_by_addr(addr, vrf->vrf_id, SCTP_ADDR_LOCKED); 818 if (sctp_ifap) { 819 /* Validate the delete */ 820 if (sctp_ifap->ifn_p) { 821 int valid = 0; 822 /*- 823 * The name has priority over the ifn_index 824 * if its given. 825 */ 826 if (if_name) { 827 if (strncmp(if_name, sctp_ifap->ifn_p->ifn_name, SCTP_IFNAMSIZ) == 0) { 828 /* They match its a correct delete */ 829 valid = 1; 830 } 831 } 832 if (!valid) { 833 /* last ditch check ifn_index */ 834 if (ifn_index == sctp_ifap->ifn_p->ifn_index) { 835 valid = 1; 836 } 837 } 838 if (!valid) { 839 SCTPDBG(SCTP_DEBUG_PCB4, "ifn:%d ifname:%s does not match addresses\n", 840 ifn_index, ((if_name == NULL) ? "NULL" : if_name)); 841 SCTPDBG(SCTP_DEBUG_PCB4, "ifn:%d ifname:%s - ignoring delete\n", 842 sctp_ifap->ifn_p->ifn_index, sctp_ifap->ifn_p->ifn_name); 843 SCTP_IPI_ADDR_WUNLOCK(); 844 return; 845 } 846 } 847 SCTPDBG(SCTP_DEBUG_PCB4, "Deleting ifa %p\n", (void *)sctp_ifap); 848 sctp_ifap->localifa_flags &= SCTP_ADDR_VALID; 849 /* 850 * We don't set the flag. This means that the structure will 851 * hang around in EP's that have bound specific to it until 852 * they close. This gives us TCP like behavior if someone 853 * removes an address (or for that matter adds it right back). 854 */ 855 /* sctp_ifap->localifa_flags |= SCTP_BEING_DELETED; */ 856 vrf->total_ifa_count--; 857 LIST_REMOVE(sctp_ifap, next_bucket); 858 sctp_remove_ifa_from_ifn(sctp_ifap); 859 } 860 #ifdef SCTP_DEBUG 861 else { 862 SCTPDBG(SCTP_DEBUG_PCB4, "Del Addr-ifn:%d Could not find address:", 863 ifn_index); 864 SCTPDBG_ADDR(SCTP_DEBUG_PCB1, addr); 865 } 866 #endif 867 868 out_now: 869 SCTP_IPI_ADDR_WUNLOCK(); 870 if (sctp_ifap) { 871 struct sctp_laddr *wi; 872 873 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); 874 if (wi == NULL) { 875 /* 876 * Gak, what can we do? We have lost an address 877 * change can you say HOSED? 878 */ 879 SCTPDBG(SCTP_DEBUG_PCB4, "Lost an address change?\n"); 880 881 /* Oops, must decrement the count */ 882 sctp_free_ifa(sctp_ifap); 883 return; 884 } 885 SCTP_INCR_LADDR_COUNT(); 886 memset(wi, 0, sizeof(*wi)); 887 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time); 888 wi->ifa = sctp_ifap; 889 wi->action = SCTP_DEL_IP_ADDRESS; 890 SCTP_WQ_ADDR_LOCK(); 891 /* 892 * Should this really be a tailq? As it is we will process the 893 * newest first :-0 894 */ 895 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 896 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 897 (struct sctp_inpcb *)NULL, 898 (struct sctp_tcb *)NULL, 899 (struct sctp_nets *)NULL); 900 SCTP_WQ_ADDR_UNLOCK(); 901 } 902 return; 903 } 904 905 static int 906 sctp_does_stcb_own_this_addr(struct sctp_tcb *stcb, struct sockaddr *to) 907 { 908 int loopback_scope; 909 #if defined(INET) 910 int ipv4_local_scope, ipv4_addr_legal; 911 #endif 912 #if defined(INET6) 913 int local_scope, site_scope, ipv6_addr_legal; 914 #endif 915 #if defined(__Userspace__) 916 int conn_addr_legal; 917 #endif 918 struct sctp_vrf *vrf; 919 struct sctp_ifn *sctp_ifn; 920 struct sctp_ifa *sctp_ifa; 921 922 loopback_scope = stcb->asoc.scope.loopback_scope; 923 #if defined(INET) 924 ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope; 925 ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal; 926 #endif 927 #if defined(INET6) 928 local_scope = stcb->asoc.scope.local_scope; 929 site_scope = stcb->asoc.scope.site_scope; 930 ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal; 931 #endif 932 #if defined(__Userspace__) 933 conn_addr_legal = stcb->asoc.scope.conn_addr_legal; 934 #endif 935 936 SCTP_IPI_ADDR_RLOCK(); 937 vrf = sctp_find_vrf(stcb->asoc.vrf_id); 938 if (vrf == NULL) { 939 /* no vrf, no addresses */ 940 SCTP_IPI_ADDR_RUNLOCK(); 941 return (0); 942 } 943 944 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 945 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 946 if ((loopback_scope == 0) && 947 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 948 continue; 949 } 950 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 951 if (sctp_is_addr_restricted(stcb, sctp_ifa) && 952 (!sctp_is_addr_pending(stcb, sctp_ifa))) { 953 /* We allow pending addresses, where we 954 * have sent an asconf-add to be considered 955 * valid. 956 */ 957 continue; 958 } 959 if (sctp_ifa->address.sa.sa_family != to->sa_family) { 960 continue; 961 } 962 switch (sctp_ifa->address.sa.sa_family) { 963 #ifdef INET 964 case AF_INET: 965 if (ipv4_addr_legal) { 966 struct sockaddr_in *sin, *rsin; 967 968 sin = &sctp_ifa->address.sin; 969 rsin = (struct sockaddr_in *)to; 970 if ((ipv4_local_scope == 0) && 971 IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) { 972 continue; 973 } 974 #if defined(__FreeBSD__) && !defined(__Userspace__) 975 if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred, 976 &sin->sin_addr) != 0) { 977 continue; 978 } 979 #endif 980 if (sin->sin_addr.s_addr == rsin->sin_addr.s_addr) { 981 SCTP_IPI_ADDR_RUNLOCK(); 982 return (1); 983 } 984 } 985 break; 986 #endif 987 #ifdef INET6 988 case AF_INET6: 989 if (ipv6_addr_legal) { 990 struct sockaddr_in6 *sin6, *rsin6; 991 #if defined(SCTP_EMBEDDED_V6_SCOPE) && !defined(SCTP_KAME) 992 struct sockaddr_in6 lsa6; 993 #endif 994 sin6 = &sctp_ifa->address.sin6; 995 rsin6 = (struct sockaddr_in6 *)to; 996 #if defined(__FreeBSD__) && !defined(__Userspace__) 997 if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred, 998 &sin6->sin6_addr) != 0) { 999 continue; 1000 } 1001 #endif 1002 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 1003 if (local_scope == 0) 1004 continue; 1005 #if defined(SCTP_EMBEDDED_V6_SCOPE) 1006 if (sin6->sin6_scope_id == 0) { 1007 #ifdef SCTP_KAME 1008 if (sa6_recoverscope(sin6) != 0) 1009 continue; 1010 #else 1011 lsa6 = *sin6; 1012 if (in6_recoverscope(&lsa6, 1013 &lsa6.sin6_addr, 1014 NULL)) 1015 continue; 1016 sin6 = &lsa6; 1017 #endif /* SCTP_KAME */ 1018 } 1019 #endif /* SCTP_EMBEDDED_V6_SCOPE */ 1020 } 1021 if ((site_scope == 0) && 1022 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 1023 continue; 1024 } 1025 if (SCTP6_ARE_ADDR_EQUAL(sin6, rsin6)) { 1026 SCTP_IPI_ADDR_RUNLOCK(); 1027 return (1); 1028 } 1029 } 1030 break; 1031 #endif 1032 #if defined(__Userspace__) 1033 case AF_CONN: 1034 if (conn_addr_legal) { 1035 struct sockaddr_conn *sconn, *rsconn; 1036 1037 sconn = &sctp_ifa->address.sconn; 1038 rsconn = (struct sockaddr_conn *)to; 1039 if (sconn->sconn_addr == rsconn->sconn_addr) { 1040 SCTP_IPI_ADDR_RUNLOCK(); 1041 return (1); 1042 } 1043 } 1044 break; 1045 #endif 1046 default: 1047 /* TSNH */ 1048 break; 1049 } 1050 } 1051 } 1052 } else { 1053 struct sctp_laddr *laddr; 1054 1055 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, sctp_nxt_addr) { 1056 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) { 1057 SCTPDBG(SCTP_DEBUG_PCB1, "ifa being deleted\n"); 1058 continue; 1059 } 1060 if (sctp_is_addr_restricted(stcb, laddr->ifa) && 1061 (!sctp_is_addr_pending(stcb, laddr->ifa))) { 1062 /* We allow pending addresses, where we 1063 * have sent an asconf-add to be considered 1064 * valid. 1065 */ 1066 continue; 1067 } 1068 if (laddr->ifa->address.sa.sa_family != to->sa_family) { 1069 continue; 1070 } 1071 switch (to->sa_family) { 1072 #ifdef INET 1073 case AF_INET: 1074 { 1075 struct sockaddr_in *sin, *rsin; 1076 1077 sin = &laddr->ifa->address.sin; 1078 rsin = (struct sockaddr_in *)to; 1079 if (sin->sin_addr.s_addr == rsin->sin_addr.s_addr) { 1080 SCTP_IPI_ADDR_RUNLOCK(); 1081 return (1); 1082 } 1083 break; 1084 } 1085 #endif 1086 #ifdef INET6 1087 case AF_INET6: 1088 { 1089 struct sockaddr_in6 *sin6, *rsin6; 1090 1091 sin6 = &laddr->ifa->address.sin6; 1092 rsin6 = (struct sockaddr_in6 *)to; 1093 if (SCTP6_ARE_ADDR_EQUAL(sin6, rsin6)) { 1094 SCTP_IPI_ADDR_RUNLOCK(); 1095 return (1); 1096 } 1097 break; 1098 } 1099 1100 #endif 1101 #if defined(__Userspace__) 1102 case AF_CONN: 1103 { 1104 struct sockaddr_conn *sconn, *rsconn; 1105 1106 sconn = &laddr->ifa->address.sconn; 1107 rsconn = (struct sockaddr_conn *)to; 1108 if (sconn->sconn_addr == rsconn->sconn_addr) { 1109 SCTP_IPI_ADDR_RUNLOCK(); 1110 return (1); 1111 } 1112 break; 1113 } 1114 #endif 1115 default: 1116 /* TSNH */ 1117 break; 1118 } 1119 } 1120 } 1121 SCTP_IPI_ADDR_RUNLOCK(); 1122 return (0); 1123 } 1124 1125 static struct sctp_tcb * 1126 sctp_tcb_special_locate(struct sctp_inpcb **inp_p, struct sockaddr *from, 1127 struct sockaddr *to, struct sctp_nets **netp, uint32_t vrf_id) 1128 { 1129 /**** ASSUMES THE CALLER holds the INP_INFO_RLOCK */ 1130 /* 1131 * If we support the TCP model, then we must now dig through to see 1132 * if we can find our endpoint in the list of tcp ep's. 1133 */ 1134 uint16_t lport, rport; 1135 struct sctppcbhead *ephead; 1136 struct sctp_inpcb *inp; 1137 struct sctp_laddr *laddr; 1138 struct sctp_tcb *stcb; 1139 struct sctp_nets *net; 1140 #ifdef SCTP_MVRF 1141 int fnd, i; 1142 #endif 1143 1144 if ((to == NULL) || (from == NULL)) { 1145 return (NULL); 1146 } 1147 1148 switch (to->sa_family) { 1149 #ifdef INET 1150 case AF_INET: 1151 if (from->sa_family == AF_INET) { 1152 lport = ((struct sockaddr_in *)to)->sin_port; 1153 rport = ((struct sockaddr_in *)from)->sin_port; 1154 } else { 1155 return (NULL); 1156 } 1157 break; 1158 #endif 1159 #ifdef INET6 1160 case AF_INET6: 1161 if (from->sa_family == AF_INET6) { 1162 lport = ((struct sockaddr_in6 *)to)->sin6_port; 1163 rport = ((struct sockaddr_in6 *)from)->sin6_port; 1164 } else { 1165 return (NULL); 1166 } 1167 break; 1168 #endif 1169 #if defined(__Userspace__) 1170 case AF_CONN: 1171 if (from->sa_family == AF_CONN) { 1172 lport = ((struct sockaddr_conn *)to)->sconn_port; 1173 rport = ((struct sockaddr_conn *)from)->sconn_port; 1174 } else { 1175 return (NULL); 1176 } 1177 break; 1178 #endif 1179 default: 1180 return (NULL); 1181 } 1182 ephead = &SCTP_BASE_INFO(sctp_tcpephash)[SCTP_PCBHASH_ALLADDR((lport | rport), SCTP_BASE_INFO(hashtcpmark))]; 1183 /* 1184 * Ok now for each of the guys in this bucket we must look and see: 1185 * - Does the remote port match. - Does there single association's 1186 * addresses match this address (to). If so we update p_ep to point 1187 * to this ep and return the tcb from it. 1188 */ 1189 LIST_FOREACH(inp, ephead, sctp_hash) { 1190 SCTP_INP_RLOCK(inp); 1191 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { 1192 SCTP_INP_RUNLOCK(inp); 1193 continue; 1194 } 1195 if (lport != inp->sctp_lport) { 1196 SCTP_INP_RUNLOCK(inp); 1197 continue; 1198 } 1199 #if defined(__FreeBSD__) && !defined(__Userspace__) 1200 switch (to->sa_family) { 1201 #ifdef INET 1202 case AF_INET: 1203 { 1204 struct sockaddr_in *sin; 1205 1206 sin = (struct sockaddr_in *)to; 1207 if (prison_check_ip4(inp->ip_inp.inp.inp_cred, 1208 &sin->sin_addr) != 0) { 1209 SCTP_INP_RUNLOCK(inp); 1210 continue; 1211 } 1212 break; 1213 } 1214 #endif 1215 #ifdef INET6 1216 case AF_INET6: 1217 { 1218 struct sockaddr_in6 *sin6; 1219 1220 sin6 = (struct sockaddr_in6 *)to; 1221 if (prison_check_ip6(inp->ip_inp.inp.inp_cred, 1222 &sin6->sin6_addr) != 0) { 1223 SCTP_INP_RUNLOCK(inp); 1224 continue; 1225 } 1226 break; 1227 } 1228 #endif 1229 default: 1230 SCTP_INP_RUNLOCK(inp); 1231 continue; 1232 } 1233 #endif 1234 #ifdef SCTP_MVRF 1235 fnd = 0; 1236 for (i = 0; i < inp->num_vrfs; i++) { 1237 if (inp->m_vrf_ids[i] == vrf_id) { 1238 fnd = 1; 1239 break; 1240 } 1241 } 1242 if (fnd == 0) { 1243 SCTP_INP_RUNLOCK(inp); 1244 continue; 1245 } 1246 #else 1247 if (inp->def_vrf_id != vrf_id) { 1248 SCTP_INP_RUNLOCK(inp); 1249 continue; 1250 } 1251 #endif 1252 /* check to see if the ep has one of the addresses */ 1253 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) { 1254 /* We are NOT bound all, so look further */ 1255 int match = 0; 1256 1257 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 1258 if (laddr->ifa == NULL) { 1259 SCTPDBG(SCTP_DEBUG_PCB1, "%s: NULL ifa\n", __func__); 1260 continue; 1261 } 1262 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) { 1263 SCTPDBG(SCTP_DEBUG_PCB1, "ifa being deleted\n"); 1264 continue; 1265 } 1266 if (laddr->ifa->address.sa.sa_family == 1267 to->sa_family) { 1268 /* see if it matches */ 1269 #ifdef INET 1270 if (from->sa_family == AF_INET) { 1271 struct sockaddr_in *intf_addr, *sin; 1272 1273 intf_addr = &laddr->ifa->address.sin; 1274 sin = (struct sockaddr_in *)to; 1275 if (sin->sin_addr.s_addr == 1276 intf_addr->sin_addr.s_addr) { 1277 match = 1; 1278 break; 1279 } 1280 } 1281 #endif 1282 #ifdef INET6 1283 if (from->sa_family == AF_INET6) { 1284 struct sockaddr_in6 *intf_addr6; 1285 struct sockaddr_in6 *sin6; 1286 1287 sin6 = (struct sockaddr_in6 *) 1288 to; 1289 intf_addr6 = &laddr->ifa->address.sin6; 1290 1291 if (SCTP6_ARE_ADDR_EQUAL(sin6, 1292 intf_addr6)) { 1293 match = 1; 1294 break; 1295 } 1296 } 1297 #endif 1298 #if defined(__Userspace__) 1299 if (from->sa_family == AF_CONN) { 1300 struct sockaddr_conn *intf_addr, *sconn; 1301 1302 intf_addr = &laddr->ifa->address.sconn; 1303 sconn = (struct sockaddr_conn *)to; 1304 if (sconn->sconn_addr == 1305 intf_addr->sconn_addr) { 1306 match = 1; 1307 break; 1308 } 1309 } 1310 #endif 1311 } 1312 } 1313 if (match == 0) { 1314 /* This endpoint does not have this address */ 1315 SCTP_INP_RUNLOCK(inp); 1316 continue; 1317 } 1318 } 1319 /* 1320 * Ok if we hit here the ep has the address, does it hold 1321 * the tcb? 1322 */ 1323 /* XXX: Why don't we TAILQ_FOREACH through sctp_asoc_list? */ 1324 stcb = LIST_FIRST(&inp->sctp_asoc_list); 1325 if (stcb == NULL) { 1326 SCTP_INP_RUNLOCK(inp); 1327 continue; 1328 } 1329 SCTP_TCB_LOCK(stcb); 1330 if (!sctp_does_stcb_own_this_addr(stcb, to)) { 1331 SCTP_TCB_UNLOCK(stcb); 1332 SCTP_INP_RUNLOCK(inp); 1333 continue; 1334 } 1335 if (stcb->rport != rport) { 1336 /* remote port does not match. */ 1337 SCTP_TCB_UNLOCK(stcb); 1338 SCTP_INP_RUNLOCK(inp); 1339 continue; 1340 } 1341 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 1342 SCTP_TCB_UNLOCK(stcb); 1343 SCTP_INP_RUNLOCK(inp); 1344 continue; 1345 } 1346 if (!sctp_does_stcb_own_this_addr(stcb, to)) { 1347 SCTP_TCB_UNLOCK(stcb); 1348 SCTP_INP_RUNLOCK(inp); 1349 continue; 1350 } 1351 /* Does this TCB have a matching address? */ 1352 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1353 if (net->ro._l_addr.sa.sa_family != from->sa_family) { 1354 /* not the same family, can't be a match */ 1355 continue; 1356 } 1357 switch (from->sa_family) { 1358 #ifdef INET 1359 case AF_INET: 1360 { 1361 struct sockaddr_in *sin, *rsin; 1362 1363 sin = (struct sockaddr_in *)&net->ro._l_addr; 1364 rsin = (struct sockaddr_in *)from; 1365 if (sin->sin_addr.s_addr == 1366 rsin->sin_addr.s_addr) { 1367 /* found it */ 1368 if (netp != NULL) { 1369 *netp = net; 1370 } 1371 /* Update the endpoint pointer */ 1372 *inp_p = inp; 1373 SCTP_INP_RUNLOCK(inp); 1374 return (stcb); 1375 } 1376 break; 1377 } 1378 #endif 1379 #ifdef INET6 1380 case AF_INET6: 1381 { 1382 struct sockaddr_in6 *sin6, *rsin6; 1383 1384 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 1385 rsin6 = (struct sockaddr_in6 *)from; 1386 if (SCTP6_ARE_ADDR_EQUAL(sin6, 1387 rsin6)) { 1388 /* found it */ 1389 if (netp != NULL) { 1390 *netp = net; 1391 } 1392 /* Update the endpoint pointer */ 1393 *inp_p = inp; 1394 SCTP_INP_RUNLOCK(inp); 1395 return (stcb); 1396 } 1397 break; 1398 } 1399 #endif 1400 #if defined(__Userspace__) 1401 case AF_CONN: 1402 { 1403 struct sockaddr_conn *sconn, *rsconn; 1404 1405 sconn = (struct sockaddr_conn *)&net->ro._l_addr; 1406 rsconn = (struct sockaddr_conn *)from; 1407 if (sconn->sconn_addr == rsconn->sconn_addr) { 1408 /* found it */ 1409 if (netp != NULL) { 1410 *netp = net; 1411 } 1412 /* Update the endpoint pointer */ 1413 *inp_p = inp; 1414 SCTP_INP_RUNLOCK(inp); 1415 return (stcb); 1416 } 1417 break; 1418 } 1419 #endif 1420 default: 1421 /* TSNH */ 1422 break; 1423 } 1424 } 1425 SCTP_TCB_UNLOCK(stcb); 1426 SCTP_INP_RUNLOCK(inp); 1427 } 1428 return (NULL); 1429 } 1430 1431 /* 1432 * rules for use 1433 * 1434 * 1) If I return a NULL you must decrement any INP ref cnt. 2) If I find an 1435 * stcb, both will be locked (locked_tcb and stcb) but decrement will be done 1436 * (if locked == NULL). 3) Decrement happens on return ONLY if locked == 1437 * NULL. 1438 */ 1439 1440 struct sctp_tcb * 1441 sctp_findassociation_ep_addr(struct sctp_inpcb **inp_p, struct sockaddr *remote, 1442 struct sctp_nets **netp, struct sockaddr *local, struct sctp_tcb *locked_tcb) 1443 { 1444 struct sctpasochead *head; 1445 struct sctp_inpcb *inp; 1446 struct sctp_tcb *stcb = NULL; 1447 struct sctp_nets *net; 1448 uint16_t rport; 1449 1450 inp = *inp_p; 1451 switch (remote->sa_family) { 1452 #ifdef INET 1453 case AF_INET: 1454 rport = (((struct sockaddr_in *)remote)->sin_port); 1455 break; 1456 #endif 1457 #ifdef INET6 1458 case AF_INET6: 1459 rport = (((struct sockaddr_in6 *)remote)->sin6_port); 1460 break; 1461 #endif 1462 #if defined(__Userspace__) 1463 case AF_CONN: 1464 rport = (((struct sockaddr_conn *)remote)->sconn_port); 1465 break; 1466 #endif 1467 default: 1468 return (NULL); 1469 } 1470 if (locked_tcb) { 1471 /* 1472 * UN-lock so we can do proper locking here this occurs when 1473 * called from load_addresses_from_init. 1474 */ 1475 atomic_add_int(&locked_tcb->asoc.refcnt, 1); 1476 SCTP_TCB_UNLOCK(locked_tcb); 1477 } 1478 SCTP_INP_INFO_RLOCK(); 1479 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1480 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 1481 /*- 1482 * Now either this guy is our listener or it's the 1483 * connector. If it is the one that issued the connect, then 1484 * it's only chance is to be the first TCB in the list. If 1485 * it is the acceptor, then do the special_lookup to hash 1486 * and find the real inp. 1487 */ 1488 if ((inp->sctp_socket) && SCTP_IS_LISTENING(inp)) { 1489 /* to is peer addr, from is my addr */ 1490 #ifndef SCTP_MVRF 1491 stcb = sctp_tcb_special_locate(inp_p, remote, local, 1492 netp, inp->def_vrf_id); 1493 if ((stcb != NULL) && (locked_tcb == NULL)) { 1494 /* we have a locked tcb, lower refcount */ 1495 SCTP_INP_DECR_REF(inp); 1496 } 1497 if ((locked_tcb != NULL) && (locked_tcb != stcb)) { 1498 SCTP_INP_RLOCK(locked_tcb->sctp_ep); 1499 SCTP_TCB_LOCK(locked_tcb); 1500 atomic_subtract_int(&locked_tcb->asoc.refcnt, 1); 1501 SCTP_INP_RUNLOCK(locked_tcb->sctp_ep); 1502 } 1503 #else 1504 /*- 1505 * MVRF is tricky, we must look in every VRF 1506 * the endpoint has. 1507 */ 1508 int i; 1509 1510 for (i = 0; i < inp->num_vrfs; i++) { 1511 stcb = sctp_tcb_special_locate(inp_p, remote, local, 1512 netp, inp->m_vrf_ids[i]); 1513 if ((stcb != NULL) && (locked_tcb == NULL)) { 1514 /* we have a locked tcb, lower refcount */ 1515 SCTP_INP_DECR_REF(inp); 1516 break; 1517 } 1518 if ((locked_tcb != NULL) && (locked_tcb != stcb)) { 1519 SCTP_INP_RLOCK(locked_tcb->sctp_ep); 1520 SCTP_TCB_LOCK(locked_tcb); 1521 atomic_subtract_int(&locked_tcb->asoc.refcnt, 1); 1522 SCTP_INP_RUNLOCK(locked_tcb->sctp_ep); 1523 break; 1524 } 1525 } 1526 #endif 1527 SCTP_INP_INFO_RUNLOCK(); 1528 return (stcb); 1529 } else { 1530 SCTP_INP_WLOCK(inp); 1531 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { 1532 goto null_return; 1533 } 1534 stcb = LIST_FIRST(&inp->sctp_asoc_list); 1535 if (stcb == NULL) { 1536 goto null_return; 1537 } 1538 SCTP_TCB_LOCK(stcb); 1539 1540 if (stcb->rport != rport) { 1541 /* remote port does not match. */ 1542 SCTP_TCB_UNLOCK(stcb); 1543 goto null_return; 1544 } 1545 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 1546 SCTP_TCB_UNLOCK(stcb); 1547 goto null_return; 1548 } 1549 if (local && !sctp_does_stcb_own_this_addr(stcb, local)) { 1550 SCTP_TCB_UNLOCK(stcb); 1551 goto null_return; 1552 } 1553 /* now look at the list of remote addresses */ 1554 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1555 #ifdef INVARIANTS 1556 if (net == (TAILQ_NEXT(net, sctp_next))) { 1557 panic("Corrupt net list"); 1558 } 1559 #endif 1560 if (net->ro._l_addr.sa.sa_family != 1561 remote->sa_family) { 1562 /* not the same family */ 1563 continue; 1564 } 1565 switch (remote->sa_family) { 1566 #ifdef INET 1567 case AF_INET: 1568 { 1569 struct sockaddr_in *sin, *rsin; 1570 1571 sin = (struct sockaddr_in *) 1572 &net->ro._l_addr; 1573 rsin = (struct sockaddr_in *)remote; 1574 if (sin->sin_addr.s_addr == 1575 rsin->sin_addr.s_addr) { 1576 /* found it */ 1577 if (netp != NULL) { 1578 *netp = net; 1579 } 1580 if (locked_tcb == NULL) { 1581 SCTP_INP_DECR_REF(inp); 1582 } else if (locked_tcb != stcb) { 1583 SCTP_TCB_LOCK(locked_tcb); 1584 } 1585 if (locked_tcb) { 1586 atomic_subtract_int(&locked_tcb->asoc.refcnt, 1); 1587 } 1588 1589 SCTP_INP_WUNLOCK(inp); 1590 SCTP_INP_INFO_RUNLOCK(); 1591 return (stcb); 1592 } 1593 break; 1594 } 1595 #endif 1596 #ifdef INET6 1597 case AF_INET6: 1598 { 1599 struct sockaddr_in6 *sin6, *rsin6; 1600 1601 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 1602 rsin6 = (struct sockaddr_in6 *)remote; 1603 if (SCTP6_ARE_ADDR_EQUAL(sin6, 1604 rsin6)) { 1605 /* found it */ 1606 if (netp != NULL) { 1607 *netp = net; 1608 } 1609 if (locked_tcb == NULL) { 1610 SCTP_INP_DECR_REF(inp); 1611 } else if (locked_tcb != stcb) { 1612 SCTP_TCB_LOCK(locked_tcb); 1613 } 1614 if (locked_tcb) { 1615 atomic_subtract_int(&locked_tcb->asoc.refcnt, 1); 1616 } 1617 SCTP_INP_WUNLOCK(inp); 1618 SCTP_INP_INFO_RUNLOCK(); 1619 return (stcb); 1620 } 1621 break; 1622 } 1623 #endif 1624 #if defined(__Userspace__) 1625 case AF_CONN: 1626 { 1627 struct sockaddr_conn *sconn, *rsconn; 1628 1629 sconn = (struct sockaddr_conn *)&net->ro._l_addr; 1630 rsconn = (struct sockaddr_conn *)remote; 1631 if (sconn->sconn_addr == rsconn->sconn_addr) { 1632 /* found it */ 1633 if (netp != NULL) { 1634 *netp = net; 1635 } 1636 if (locked_tcb == NULL) { 1637 SCTP_INP_DECR_REF(inp); 1638 } else if (locked_tcb != stcb) { 1639 SCTP_TCB_LOCK(locked_tcb); 1640 } 1641 if (locked_tcb) { 1642 atomic_subtract_int(&locked_tcb->asoc.refcnt, 1); 1643 } 1644 SCTP_INP_WUNLOCK(inp); 1645 SCTP_INP_INFO_RUNLOCK(); 1646 return (stcb); 1647 } 1648 break; 1649 } 1650 #endif 1651 default: 1652 /* TSNH */ 1653 break; 1654 } 1655 } 1656 SCTP_TCB_UNLOCK(stcb); 1657 } 1658 } else { 1659 SCTP_INP_WLOCK(inp); 1660 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { 1661 goto null_return; 1662 } 1663 head = &inp->sctp_tcbhash[SCTP_PCBHASH_ALLADDR(rport, 1664 inp->sctp_hashmark)]; 1665 LIST_FOREACH(stcb, head, sctp_tcbhash) { 1666 if (stcb->rport != rport) { 1667 /* remote port does not match */ 1668 continue; 1669 } 1670 SCTP_TCB_LOCK(stcb); 1671 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 1672 SCTP_TCB_UNLOCK(stcb); 1673 continue; 1674 } 1675 if (local && !sctp_does_stcb_own_this_addr(stcb, local)) { 1676 SCTP_TCB_UNLOCK(stcb); 1677 continue; 1678 } 1679 /* now look at the list of remote addresses */ 1680 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1681 #ifdef INVARIANTS 1682 if (net == (TAILQ_NEXT(net, sctp_next))) { 1683 panic("Corrupt net list"); 1684 } 1685 #endif 1686 if (net->ro._l_addr.sa.sa_family != 1687 remote->sa_family) { 1688 /* not the same family */ 1689 continue; 1690 } 1691 switch (remote->sa_family) { 1692 #ifdef INET 1693 case AF_INET: 1694 { 1695 struct sockaddr_in *sin, *rsin; 1696 1697 sin = (struct sockaddr_in *) 1698 &net->ro._l_addr; 1699 rsin = (struct sockaddr_in *)remote; 1700 if (sin->sin_addr.s_addr == 1701 rsin->sin_addr.s_addr) { 1702 /* found it */ 1703 if (netp != NULL) { 1704 *netp = net; 1705 } 1706 if (locked_tcb == NULL) { 1707 SCTP_INP_DECR_REF(inp); 1708 } else if (locked_tcb != stcb) { 1709 SCTP_TCB_LOCK(locked_tcb); 1710 } 1711 if (locked_tcb) { 1712 atomic_subtract_int(&locked_tcb->asoc.refcnt, 1); 1713 } 1714 SCTP_INP_WUNLOCK(inp); 1715 SCTP_INP_INFO_RUNLOCK(); 1716 return (stcb); 1717 } 1718 break; 1719 } 1720 #endif 1721 #ifdef INET6 1722 case AF_INET6: 1723 { 1724 struct sockaddr_in6 *sin6, *rsin6; 1725 1726 sin6 = (struct sockaddr_in6 *) 1727 &net->ro._l_addr; 1728 rsin6 = (struct sockaddr_in6 *)remote; 1729 if (SCTP6_ARE_ADDR_EQUAL(sin6, 1730 rsin6)) { 1731 /* found it */ 1732 if (netp != NULL) { 1733 *netp = net; 1734 } 1735 if (locked_tcb == NULL) { 1736 SCTP_INP_DECR_REF(inp); 1737 } else if (locked_tcb != stcb) { 1738 SCTP_TCB_LOCK(locked_tcb); 1739 } 1740 if (locked_tcb) { 1741 atomic_subtract_int(&locked_tcb->asoc.refcnt, 1); 1742 } 1743 SCTP_INP_WUNLOCK(inp); 1744 SCTP_INP_INFO_RUNLOCK(); 1745 return (stcb); 1746 } 1747 break; 1748 } 1749 #endif 1750 #if defined(__Userspace__) 1751 case AF_CONN: 1752 { 1753 struct sockaddr_conn *sconn, *rsconn; 1754 1755 sconn = (struct sockaddr_conn *)&net->ro._l_addr; 1756 rsconn = (struct sockaddr_conn *)remote; 1757 if (sconn->sconn_addr == rsconn->sconn_addr) { 1758 /* found it */ 1759 if (netp != NULL) { 1760 *netp = net; 1761 } 1762 if (locked_tcb == NULL) { 1763 SCTP_INP_DECR_REF(inp); 1764 } else if (locked_tcb != stcb) { 1765 SCTP_TCB_LOCK(locked_tcb); 1766 } 1767 if (locked_tcb) { 1768 atomic_subtract_int(&locked_tcb->asoc.refcnt, 1); 1769 } 1770 SCTP_INP_WUNLOCK(inp); 1771 SCTP_INP_INFO_RUNLOCK(); 1772 return (stcb); 1773 } 1774 break; 1775 } 1776 #endif 1777 default: 1778 /* TSNH */ 1779 break; 1780 } 1781 } 1782 SCTP_TCB_UNLOCK(stcb); 1783 } 1784 } 1785 null_return: 1786 /* clean up for returning null */ 1787 if (locked_tcb) { 1788 SCTP_TCB_LOCK(locked_tcb); 1789 atomic_subtract_int(&locked_tcb->asoc.refcnt, 1); 1790 } 1791 SCTP_INP_WUNLOCK(inp); 1792 SCTP_INP_INFO_RUNLOCK(); 1793 /* not found */ 1794 return (NULL); 1795 } 1796 1797 /* 1798 * Find an association for a specific endpoint using the association id given 1799 * out in the COMM_UP notification 1800 */ 1801 struct sctp_tcb * 1802 sctp_findasoc_ep_asocid_locked(struct sctp_inpcb *inp, sctp_assoc_t asoc_id, int want_lock) 1803 { 1804 /* 1805 * Use my the assoc_id to find a endpoint 1806 */ 1807 struct sctpasochead *head; 1808 struct sctp_tcb *stcb; 1809 uint32_t id; 1810 1811 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { 1812 SCTP_PRINTF("TSNH ep_associd0\n"); 1813 return (NULL); 1814 } 1815 id = (uint32_t)asoc_id; 1816 head = &inp->sctp_asocidhash[SCTP_PCBHASH_ASOC(id, inp->hashasocidmark)]; 1817 if (head == NULL) { 1818 /* invalid id TSNH */ 1819 SCTP_PRINTF("TSNH ep_associd1\n"); 1820 return (NULL); 1821 } 1822 LIST_FOREACH(stcb, head, sctp_tcbasocidhash) { 1823 if (stcb->asoc.assoc_id == id) { 1824 if (inp != stcb->sctp_ep) { 1825 /* 1826 * some other guy has the same id active (id 1827 * collision ??). 1828 */ 1829 SCTP_PRINTF("TSNH ep_associd2\n"); 1830 continue; 1831 } 1832 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 1833 continue; 1834 } 1835 if (want_lock) { 1836 SCTP_TCB_LOCK(stcb); 1837 } 1838 return (stcb); 1839 } 1840 } 1841 return (NULL); 1842 } 1843 1844 struct sctp_tcb * 1845 sctp_findassociation_ep_asocid(struct sctp_inpcb *inp, sctp_assoc_t asoc_id, int want_lock) 1846 { 1847 struct sctp_tcb *stcb; 1848 1849 SCTP_INP_RLOCK(inp); 1850 stcb = sctp_findasoc_ep_asocid_locked(inp, asoc_id, want_lock); 1851 SCTP_INP_RUNLOCK(inp); 1852 return (stcb); 1853 } 1854 1855 /* 1856 * Endpoint probe expects that the INP_INFO is locked. 1857 */ 1858 static struct sctp_inpcb * 1859 sctp_endpoint_probe(struct sockaddr *nam, struct sctppcbhead *head, 1860 uint16_t lport, uint32_t vrf_id) 1861 { 1862 struct sctp_inpcb *inp; 1863 struct sctp_laddr *laddr; 1864 #ifdef INET 1865 struct sockaddr_in *sin; 1866 #endif 1867 #ifdef INET6 1868 struct sockaddr_in6 *sin6; 1869 struct sockaddr_in6 *intf_addr6; 1870 #endif 1871 #if defined(__Userspace__) 1872 struct sockaddr_conn *sconn; 1873 #endif 1874 #ifdef SCTP_MVRF 1875 int i; 1876 #endif 1877 int fnd; 1878 1879 #ifdef INET 1880 sin = NULL; 1881 #endif 1882 #ifdef INET6 1883 sin6 = NULL; 1884 #endif 1885 #if defined(__Userspace__) 1886 sconn = NULL; 1887 #endif 1888 switch (nam->sa_family) { 1889 #ifdef INET 1890 case AF_INET: 1891 sin = (struct sockaddr_in *)nam; 1892 break; 1893 #endif 1894 #ifdef INET6 1895 case AF_INET6: 1896 sin6 = (struct sockaddr_in6 *)nam; 1897 break; 1898 #endif 1899 #if defined(__Userspace__) 1900 case AF_CONN: 1901 sconn = (struct sockaddr_conn *)nam; 1902 break; 1903 #endif 1904 default: 1905 /* unsupported family */ 1906 return (NULL); 1907 } 1908 1909 if (head == NULL) 1910 return (NULL); 1911 1912 LIST_FOREACH(inp, head, sctp_hash) { 1913 SCTP_INP_RLOCK(inp); 1914 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { 1915 SCTP_INP_RUNLOCK(inp); 1916 continue; 1917 } 1918 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) && 1919 (inp->sctp_lport == lport)) { 1920 /* got it */ 1921 switch (nam->sa_family) { 1922 #ifdef INET 1923 case AF_INET: 1924 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 1925 SCTP_IPV6_V6ONLY(inp)) { 1926 /* IPv4 on a IPv6 socket with ONLY IPv6 set */ 1927 SCTP_INP_RUNLOCK(inp); 1928 continue; 1929 } 1930 #if defined(__FreeBSD__) && !defined(__Userspace__) 1931 if (prison_check_ip4(inp->ip_inp.inp.inp_cred, 1932 &sin->sin_addr) != 0) { 1933 SCTP_INP_RUNLOCK(inp); 1934 continue; 1935 } 1936 #endif 1937 break; 1938 #endif 1939 #ifdef INET6 1940 case AF_INET6: 1941 /* A V6 address and the endpoint is NOT bound V6 */ 1942 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 1943 SCTP_INP_RUNLOCK(inp); 1944 continue; 1945 } 1946 #if defined(__FreeBSD__) && !defined(__Userspace__) 1947 if (prison_check_ip6(inp->ip_inp.inp.inp_cred, 1948 &sin6->sin6_addr) != 0) { 1949 SCTP_INP_RUNLOCK(inp); 1950 continue; 1951 } 1952 #endif 1953 break; 1954 #endif 1955 default: 1956 break; 1957 } 1958 /* does a VRF id match? */ 1959 fnd = 0; 1960 #ifdef SCTP_MVRF 1961 for (i = 0; i < inp->num_vrfs; i++) { 1962 if (inp->m_vrf_ids[i] == vrf_id) { 1963 fnd = 1; 1964 break; 1965 } 1966 } 1967 #else 1968 if (inp->def_vrf_id == vrf_id) 1969 fnd = 1; 1970 #endif 1971 1972 SCTP_INP_RUNLOCK(inp); 1973 if (!fnd) 1974 continue; 1975 return (inp); 1976 } 1977 SCTP_INP_RUNLOCK(inp); 1978 } 1979 switch (nam->sa_family) { 1980 #ifdef INET 1981 case AF_INET: 1982 if (sin->sin_addr.s_addr == INADDR_ANY) { 1983 /* Can't hunt for one that has no address specified */ 1984 return (NULL); 1985 } 1986 break; 1987 #endif 1988 #ifdef INET6 1989 case AF_INET6: 1990 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 1991 /* Can't hunt for one that has no address specified */ 1992 return (NULL); 1993 } 1994 break; 1995 #endif 1996 #if defined(__Userspace__) 1997 case AF_CONN: 1998 if (sconn->sconn_addr == NULL) { 1999 return (NULL); 2000 } 2001 break; 2002 #endif 2003 default: 2004 break; 2005 } 2006 /* 2007 * ok, not bound to all so see if we can find a EP bound to this 2008 * address. 2009 */ 2010 LIST_FOREACH(inp, head, sctp_hash) { 2011 SCTP_INP_RLOCK(inp); 2012 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { 2013 SCTP_INP_RUNLOCK(inp); 2014 continue; 2015 } 2016 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL)) { 2017 SCTP_INP_RUNLOCK(inp); 2018 continue; 2019 } 2020 /* 2021 * Ok this could be a likely candidate, look at all of its 2022 * addresses 2023 */ 2024 if (inp->sctp_lport != lport) { 2025 SCTP_INP_RUNLOCK(inp); 2026 continue; 2027 } 2028 /* does a VRF id match? */ 2029 fnd = 0; 2030 #ifdef SCTP_MVRF 2031 for (i = 0; i < inp->num_vrfs; i++) { 2032 if (inp->m_vrf_ids[i] == vrf_id) { 2033 fnd = 1; 2034 break; 2035 } 2036 } 2037 #else 2038 if (inp->def_vrf_id == vrf_id) 2039 fnd = 1; 2040 2041 #endif 2042 if (!fnd) { 2043 SCTP_INP_RUNLOCK(inp); 2044 continue; 2045 } 2046 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 2047 if (laddr->ifa == NULL) { 2048 SCTPDBG(SCTP_DEBUG_PCB1, "%s: NULL ifa\n", 2049 __func__); 2050 continue; 2051 } 2052 SCTPDBG(SCTP_DEBUG_PCB1, "Ok laddr->ifa:%p is possible, ", 2053 (void *)laddr->ifa); 2054 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) { 2055 SCTPDBG(SCTP_DEBUG_PCB1, "Huh IFA being deleted\n"); 2056 continue; 2057 } 2058 if (laddr->ifa->address.sa.sa_family == nam->sa_family) { 2059 /* possible, see if it matches */ 2060 switch (nam->sa_family) { 2061 #ifdef INET 2062 case AF_INET: 2063 #if defined(__APPLE__) && !defined(__Userspace__) 2064 if (sin == NULL) { 2065 /* TSNH */ 2066 break; 2067 } 2068 #endif 2069 if (sin->sin_addr.s_addr == 2070 laddr->ifa->address.sin.sin_addr.s_addr) { 2071 SCTP_INP_RUNLOCK(inp); 2072 return (inp); 2073 } 2074 break; 2075 #endif 2076 #ifdef INET6 2077 case AF_INET6: 2078 intf_addr6 = &laddr->ifa->address.sin6; 2079 if (SCTP6_ARE_ADDR_EQUAL(sin6, 2080 intf_addr6)) { 2081 SCTP_INP_RUNLOCK(inp); 2082 return (inp); 2083 } 2084 break; 2085 #endif 2086 #if defined(__Userspace__) 2087 case AF_CONN: 2088 if (sconn->sconn_addr == laddr->ifa->address.sconn.sconn_addr) { 2089 SCTP_INP_RUNLOCK(inp); 2090 return (inp); 2091 } 2092 break; 2093 #endif 2094 } 2095 } 2096 } 2097 SCTP_INP_RUNLOCK(inp); 2098 } 2099 return (NULL); 2100 } 2101 2102 static struct sctp_inpcb * 2103 sctp_isport_inuse(struct sctp_inpcb *inp, uint16_t lport, uint32_t vrf_id) 2104 { 2105 struct sctppcbhead *head; 2106 struct sctp_inpcb *t_inp; 2107 #ifdef SCTP_MVRF 2108 int i; 2109 #endif 2110 int fnd; 2111 2112 head = &SCTP_BASE_INFO(sctp_ephash)[SCTP_PCBHASH_ALLADDR(lport, 2113 SCTP_BASE_INFO(hashmark))]; 2114 LIST_FOREACH(t_inp, head, sctp_hash) { 2115 if (t_inp->sctp_lport != lport) { 2116 continue; 2117 } 2118 /* is it in the VRF in question */ 2119 fnd = 0; 2120 #ifdef SCTP_MVRF 2121 for (i = 0; i < inp->num_vrfs; i++) { 2122 if (t_inp->m_vrf_ids[i] == vrf_id) { 2123 fnd = 1; 2124 break; 2125 } 2126 } 2127 #else 2128 if (t_inp->def_vrf_id == vrf_id) 2129 fnd = 1; 2130 #endif 2131 if (!fnd) 2132 continue; 2133 2134 /* This one is in use. */ 2135 /* check the v6/v4 binding issue */ 2136 if ((t_inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 2137 SCTP_IPV6_V6ONLY(t_inp)) { 2138 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2139 /* collision in V6 space */ 2140 return (t_inp); 2141 } else { 2142 /* inp is BOUND_V4 no conflict */ 2143 continue; 2144 } 2145 } else if (t_inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2146 /* t_inp is bound v4 and v6, conflict always */ 2147 return (t_inp); 2148 } else { 2149 /* t_inp is bound only V4 */ 2150 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 2151 SCTP_IPV6_V6ONLY(inp)) { 2152 /* no conflict */ 2153 continue; 2154 } 2155 /* else fall through to conflict */ 2156 } 2157 return (t_inp); 2158 } 2159 return (NULL); 2160 } 2161 2162 int 2163 sctp_swap_inpcb_for_listen(struct sctp_inpcb *inp) 2164 { 2165 /* For 1-2-1 with port reuse */ 2166 struct sctppcbhead *head; 2167 struct sctp_inpcb *tinp, *ninp; 2168 2169 SCTP_INP_INFO_WLOCK_ASSERT(); 2170 SCTP_INP_WLOCK_ASSERT(inp); 2171 2172 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE)) { 2173 /* only works with port reuse on */ 2174 return (-1); 2175 } 2176 if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) { 2177 return (0); 2178 } 2179 SCTP_INP_WUNLOCK(inp); 2180 head = &SCTP_BASE_INFO(sctp_ephash)[SCTP_PCBHASH_ALLADDR(inp->sctp_lport, 2181 SCTP_BASE_INFO(hashmark))]; 2182 /* Kick out all non-listeners to the TCP hash */ 2183 LIST_FOREACH_SAFE(tinp, head, sctp_hash, ninp) { 2184 if (tinp->sctp_lport != inp->sctp_lport) { 2185 continue; 2186 } 2187 if (tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { 2188 continue; 2189 } 2190 if (tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 2191 continue; 2192 } 2193 if (SCTP_IS_LISTENING(tinp)) { 2194 continue; 2195 } 2196 SCTP_INP_WLOCK(tinp); 2197 LIST_REMOVE(tinp, sctp_hash); 2198 head = &SCTP_BASE_INFO(sctp_tcpephash)[SCTP_PCBHASH_ALLADDR(tinp->sctp_lport, SCTP_BASE_INFO(hashtcpmark))]; 2199 tinp->sctp_flags |= SCTP_PCB_FLAGS_IN_TCPPOOL; 2200 LIST_INSERT_HEAD(head, tinp, sctp_hash); 2201 SCTP_INP_WUNLOCK(tinp); 2202 } 2203 SCTP_INP_WLOCK(inp); 2204 /* Pull from where he was */ 2205 LIST_REMOVE(inp, sctp_hash); 2206 inp->sctp_flags &= ~SCTP_PCB_FLAGS_IN_TCPPOOL; 2207 head = &SCTP_BASE_INFO(sctp_ephash)[SCTP_PCBHASH_ALLADDR(inp->sctp_lport, SCTP_BASE_INFO(hashmark))]; 2208 LIST_INSERT_HEAD(head, inp, sctp_hash); 2209 return (0); 2210 } 2211 2212 struct sctp_inpcb * 2213 sctp_pcb_findep(struct sockaddr *nam, int find_tcp_pool, int have_lock, 2214 uint32_t vrf_id) 2215 { 2216 /* 2217 * First we check the hash table to see if someone has this port 2218 * bound with just the port. 2219 */ 2220 struct sctp_inpcb *inp; 2221 struct sctppcbhead *head; 2222 int lport; 2223 unsigned int i; 2224 #ifdef INET 2225 struct sockaddr_in *sin; 2226 #endif 2227 #ifdef INET6 2228 struct sockaddr_in6 *sin6; 2229 #endif 2230 #if defined(__Userspace__) 2231 struct sockaddr_conn *sconn; 2232 #endif 2233 2234 switch (nam->sa_family) { 2235 #ifdef INET 2236 case AF_INET: 2237 sin = (struct sockaddr_in *)nam; 2238 lport = sin->sin_port; 2239 break; 2240 #endif 2241 #ifdef INET6 2242 case AF_INET6: 2243 sin6 = (struct sockaddr_in6 *)nam; 2244 lport = sin6->sin6_port; 2245 break; 2246 #endif 2247 #if defined(__Userspace__) 2248 case AF_CONN: 2249 sconn = (struct sockaddr_conn *)nam; 2250 lport = sconn->sconn_port; 2251 break; 2252 #endif 2253 default: 2254 return (NULL); 2255 } 2256 /* 2257 * I could cheat here and just cast to one of the types but we will 2258 * do it right. It also provides the check against an Unsupported 2259 * type too. 2260 */ 2261 /* Find the head of the ALLADDR chain */ 2262 if (have_lock == 0) { 2263 SCTP_INP_INFO_RLOCK(); 2264 } 2265 head = &SCTP_BASE_INFO(sctp_ephash)[SCTP_PCBHASH_ALLADDR(lport, 2266 SCTP_BASE_INFO(hashmark))]; 2267 inp = sctp_endpoint_probe(nam, head, lport, vrf_id); 2268 2269 /* 2270 * If the TCP model exists it could be that the main listening 2271 * endpoint is gone but there still exists a connected socket for this 2272 * guy. If so we can return the first one that we find. This may NOT 2273 * be the correct one so the caller should be wary on the returned INP. 2274 * Currently the only caller that sets find_tcp_pool is in bindx where 2275 * we are verifying that a user CAN bind the address. He either 2276 * has bound it already, or someone else has, or its open to bind, 2277 * so this is good enough. 2278 */ 2279 if (inp == NULL && find_tcp_pool) { 2280 for (i = 0; i < SCTP_BASE_INFO(hashtcpmark) + 1; i++) { 2281 head = &SCTP_BASE_INFO(sctp_tcpephash)[i]; 2282 inp = sctp_endpoint_probe(nam, head, lport, vrf_id); 2283 if (inp) { 2284 break; 2285 } 2286 } 2287 } 2288 if (inp) { 2289 SCTP_INP_INCR_REF(inp); 2290 } 2291 if (have_lock == 0) { 2292 SCTP_INP_INFO_RUNLOCK(); 2293 } 2294 return (inp); 2295 } 2296 2297 /* 2298 * Find an association for an endpoint with the pointer to whom you want to 2299 * send to and the endpoint pointer. The address can be IPv4 or IPv6. We may 2300 * need to change the *to to some other struct like a mbuf... 2301 */ 2302 struct sctp_tcb * 2303 sctp_findassociation_addr_sa(struct sockaddr *from, struct sockaddr *to, 2304 struct sctp_inpcb **inp_p, struct sctp_nets **netp, int find_tcp_pool, 2305 uint32_t vrf_id) 2306 { 2307 struct sctp_inpcb *inp = NULL; 2308 struct sctp_tcb *stcb; 2309 2310 SCTP_INP_INFO_RLOCK(); 2311 if (find_tcp_pool) { 2312 if (inp_p != NULL) { 2313 stcb = sctp_tcb_special_locate(inp_p, from, to, netp, 2314 vrf_id); 2315 } else { 2316 stcb = sctp_tcb_special_locate(&inp, from, to, netp, 2317 vrf_id); 2318 } 2319 if (stcb != NULL) { 2320 SCTP_INP_INFO_RUNLOCK(); 2321 return (stcb); 2322 } 2323 } 2324 inp = sctp_pcb_findep(to, 0, 1, vrf_id); 2325 if (inp_p != NULL) { 2326 *inp_p = inp; 2327 } 2328 SCTP_INP_INFO_RUNLOCK(); 2329 if (inp == NULL) { 2330 return (NULL); 2331 } 2332 /* 2333 * ok, we have an endpoint, now lets find the assoc for it (if any) 2334 * we now place the source address or from in the to of the find 2335 * endpoint call. Since in reality this chain is used from the 2336 * inbound packet side. 2337 */ 2338 if (inp_p != NULL) { 2339 stcb = sctp_findassociation_ep_addr(inp_p, from, netp, to, 2340 NULL); 2341 } else { 2342 stcb = sctp_findassociation_ep_addr(&inp, from, netp, to, 2343 NULL); 2344 } 2345 return (stcb); 2346 } 2347 2348 /* 2349 * This routine will grub through the mbuf that is a INIT or INIT-ACK and 2350 * find all addresses that the sender has specified in any address list. Each 2351 * address will be used to lookup the TCB and see if one exits. 2352 */ 2353 static struct sctp_tcb * 2354 sctp_findassociation_special_addr(struct mbuf *m, int offset, 2355 struct sctphdr *sh, struct sctp_inpcb **inp_p, struct sctp_nets **netp, 2356 struct sockaddr *dst) 2357 { 2358 struct sctp_paramhdr *phdr, param_buf; 2359 #if defined(INET) || defined(INET6) 2360 struct sctp_tcb *stcb; 2361 uint16_t ptype; 2362 #endif 2363 uint16_t plen; 2364 #ifdef INET 2365 struct sockaddr_in sin4; 2366 #endif 2367 #ifdef INET6 2368 struct sockaddr_in6 sin6; 2369 #endif 2370 2371 #ifdef INET 2372 memset(&sin4, 0, sizeof(sin4)); 2373 #ifdef HAVE_SIN_LEN 2374 sin4.sin_len = sizeof(sin4); 2375 #endif 2376 sin4.sin_family = AF_INET; 2377 sin4.sin_port = sh->src_port; 2378 #endif 2379 #ifdef INET6 2380 memset(&sin6, 0, sizeof(sin6)); 2381 #ifdef HAVE_SIN6_LEN 2382 sin6.sin6_len = sizeof(sin6); 2383 #endif 2384 sin6.sin6_family = AF_INET6; 2385 sin6.sin6_port = sh->src_port; 2386 #endif 2387 2388 offset += sizeof(struct sctp_init_chunk); 2389 2390 phdr = sctp_get_next_param(m, offset, ¶m_buf, sizeof(param_buf)); 2391 while (phdr != NULL) { 2392 /* now we must see if we want the parameter */ 2393 #if defined(INET) || defined(INET6) 2394 ptype = ntohs(phdr->param_type); 2395 #endif 2396 plen = ntohs(phdr->param_length); 2397 if (plen == 0) { 2398 break; 2399 } 2400 #ifdef INET 2401 if (ptype == SCTP_IPV4_ADDRESS && 2402 plen == sizeof(struct sctp_ipv4addr_param)) { 2403 /* Get the rest of the address */ 2404 struct sctp_ipv4addr_param ip4_param, *p4; 2405 2406 phdr = sctp_get_next_param(m, offset, 2407 (struct sctp_paramhdr *)&ip4_param, sizeof(ip4_param)); 2408 if (phdr == NULL) { 2409 return (NULL); 2410 } 2411 p4 = (struct sctp_ipv4addr_param *)phdr; 2412 memcpy(&sin4.sin_addr, &p4->addr, sizeof(p4->addr)); 2413 /* look it up */ 2414 stcb = sctp_findassociation_ep_addr(inp_p, 2415 (struct sockaddr *)&sin4, netp, dst, NULL); 2416 if (stcb != NULL) { 2417 return (stcb); 2418 } 2419 } 2420 #endif 2421 #ifdef INET6 2422 if (ptype == SCTP_IPV6_ADDRESS && 2423 plen == sizeof(struct sctp_ipv6addr_param)) { 2424 /* Get the rest of the address */ 2425 struct sctp_ipv6addr_param ip6_param, *p6; 2426 2427 phdr = sctp_get_next_param(m, offset, 2428 (struct sctp_paramhdr *)&ip6_param, sizeof(ip6_param)); 2429 if (phdr == NULL) { 2430 return (NULL); 2431 } 2432 p6 = (struct sctp_ipv6addr_param *)phdr; 2433 memcpy(&sin6.sin6_addr, &p6->addr, sizeof(p6->addr)); 2434 /* look it up */ 2435 stcb = sctp_findassociation_ep_addr(inp_p, 2436 (struct sockaddr *)&sin6, netp, dst, NULL); 2437 if (stcb != NULL) { 2438 return (stcb); 2439 } 2440 } 2441 #endif 2442 offset += SCTP_SIZE32(plen); 2443 phdr = sctp_get_next_param(m, offset, ¶m_buf, 2444 sizeof(param_buf)); 2445 } 2446 return (NULL); 2447 } 2448 2449 static struct sctp_tcb * 2450 sctp_findassoc_by_vtag(struct sockaddr *from, struct sockaddr *to, uint32_t vtag, 2451 struct sctp_inpcb **inp_p, struct sctp_nets **netp, uint16_t rport, 2452 uint16_t lport, int skip_src_check, uint32_t vrf_id, uint32_t remote_tag) 2453 { 2454 /* 2455 * Use my vtag to hash. If we find it we then verify the source addr 2456 * is in the assoc. If all goes well we save a bit on rec of a 2457 * packet. 2458 */ 2459 struct sctpasochead *head; 2460 struct sctp_nets *net; 2461 struct sctp_tcb *stcb; 2462 #ifdef SCTP_MVRF 2463 unsigned int i; 2464 #endif 2465 2466 SCTP_INP_INFO_RLOCK(); 2467 head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(vtag, 2468 SCTP_BASE_INFO(hashasocmark))]; 2469 LIST_FOREACH(stcb, head, sctp_asocs) { 2470 SCTP_INP_RLOCK(stcb->sctp_ep); 2471 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { 2472 SCTP_INP_RUNLOCK(stcb->sctp_ep); 2473 continue; 2474 } 2475 #ifdef SCTP_MVRF 2476 for (i = 0; i < stcb->sctp_ep->num_vrfs; i++) { 2477 if (stcb->sctp_ep->m_vrf_ids[i] == vrf_id) { 2478 break; 2479 } 2480 } 2481 if (i == stcb->sctp_ep->num_vrfs) { 2482 SCTP_INP_RUNLOCK(inp); 2483 continue; 2484 } 2485 #else 2486 if (stcb->sctp_ep->def_vrf_id != vrf_id) { 2487 SCTP_INP_RUNLOCK(stcb->sctp_ep); 2488 continue; 2489 } 2490 #endif 2491 SCTP_TCB_LOCK(stcb); 2492 SCTP_INP_RUNLOCK(stcb->sctp_ep); 2493 if (stcb->asoc.my_vtag == vtag) { 2494 /* candidate */ 2495 if (stcb->rport != rport) { 2496 SCTP_TCB_UNLOCK(stcb); 2497 continue; 2498 } 2499 if (stcb->sctp_ep->sctp_lport != lport) { 2500 SCTP_TCB_UNLOCK(stcb); 2501 continue; 2502 } 2503 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 2504 SCTP_TCB_UNLOCK(stcb); 2505 continue; 2506 } 2507 /* RRS:Need toaddr check here */ 2508 if (sctp_does_stcb_own_this_addr(stcb, to) == 0) { 2509 /* Endpoint does not own this address */ 2510 SCTP_TCB_UNLOCK(stcb); 2511 continue; 2512 } 2513 if (remote_tag) { 2514 /* If we have both vtags that's all we match on */ 2515 if (stcb->asoc.peer_vtag == remote_tag) { 2516 /* If both tags match we consider it conclusive 2517 * and check NO source/destination addresses 2518 */ 2519 goto conclusive; 2520 } 2521 } 2522 if (skip_src_check) { 2523 conclusive: 2524 if (from) { 2525 *netp = sctp_findnet(stcb, from); 2526 } else { 2527 *netp = NULL; /* unknown */ 2528 } 2529 if (inp_p) 2530 *inp_p = stcb->sctp_ep; 2531 SCTP_INP_INFO_RUNLOCK(); 2532 return (stcb); 2533 } 2534 net = sctp_findnet(stcb, from); 2535 if (net) { 2536 /* yep its him. */ 2537 *netp = net; 2538 SCTP_STAT_INCR(sctps_vtagexpress); 2539 *inp_p = stcb->sctp_ep; 2540 SCTP_INP_INFO_RUNLOCK(); 2541 return (stcb); 2542 } else { 2543 /* 2544 * not him, this should only happen in rare 2545 * cases so I peg it. 2546 */ 2547 SCTP_STAT_INCR(sctps_vtagbogus); 2548 } 2549 } 2550 SCTP_TCB_UNLOCK(stcb); 2551 } 2552 SCTP_INP_INFO_RUNLOCK(); 2553 return (NULL); 2554 } 2555 2556 /* 2557 * Find an association with the pointer to the inbound IP packet. This can be 2558 * a IPv4 or IPv6 packet. 2559 */ 2560 struct sctp_tcb * 2561 sctp_findassociation_addr(struct mbuf *m, int offset, 2562 struct sockaddr *src, struct sockaddr *dst, 2563 struct sctphdr *sh, struct sctp_chunkhdr *ch, 2564 struct sctp_inpcb **inp_p, struct sctp_nets **netp, uint32_t vrf_id) 2565 { 2566 struct sctp_tcb *stcb; 2567 struct sctp_inpcb *inp; 2568 2569 if (sh->v_tag) { 2570 /* we only go down this path if vtag is non-zero */ 2571 stcb = sctp_findassoc_by_vtag(src, dst, ntohl(sh->v_tag), 2572 inp_p, netp, sh->src_port, sh->dest_port, 0, vrf_id, 0); 2573 if (stcb) { 2574 return (stcb); 2575 } 2576 } 2577 2578 if (inp_p) { 2579 stcb = sctp_findassociation_addr_sa(src, dst, inp_p, netp, 2580 1, vrf_id); 2581 inp = *inp_p; 2582 } else { 2583 stcb = sctp_findassociation_addr_sa(src, dst, &inp, netp, 2584 1, vrf_id); 2585 } 2586 SCTPDBG(SCTP_DEBUG_PCB1, "stcb:%p inp:%p\n", (void *)stcb, (void *)inp); 2587 if (stcb == NULL && inp) { 2588 /* Found a EP but not this address */ 2589 if ((ch->chunk_type == SCTP_INITIATION) || 2590 (ch->chunk_type == SCTP_INITIATION_ACK)) { 2591 /*- 2592 * special hook, we do NOT return linp or an 2593 * association that is linked to an existing 2594 * association that is under the TCP pool (i.e. no 2595 * listener exists). The endpoint finding routine 2596 * will always find a listener before examining the 2597 * TCP pool. 2598 */ 2599 if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) { 2600 if (inp_p) { 2601 *inp_p = NULL; 2602 } 2603 return (NULL); 2604 } 2605 stcb = sctp_findassociation_special_addr(m, 2606 offset, sh, &inp, netp, dst); 2607 if (inp_p != NULL) { 2608 *inp_p = inp; 2609 } 2610 } 2611 } 2612 SCTPDBG(SCTP_DEBUG_PCB1, "stcb is %p\n", (void *)stcb); 2613 return (stcb); 2614 } 2615 2616 /* 2617 * lookup an association by an ASCONF lookup address. 2618 * if the lookup address is 0.0.0.0 or ::0, use the vtag to do the lookup 2619 */ 2620 struct sctp_tcb * 2621 sctp_findassociation_ep_asconf(struct mbuf *m, int offset, 2622 struct sockaddr *dst, struct sctphdr *sh, 2623 struct sctp_inpcb **inp_p, struct sctp_nets **netp, uint32_t vrf_id) 2624 { 2625 struct sctp_tcb *stcb; 2626 union sctp_sockstore remote_store; 2627 struct sctp_paramhdr param_buf, *phdr; 2628 int ptype; 2629 int zero_address = 0; 2630 #ifdef INET 2631 struct sockaddr_in *sin; 2632 #endif 2633 #ifdef INET6 2634 struct sockaddr_in6 *sin6; 2635 #endif 2636 2637 memset(&remote_store, 0, sizeof(remote_store)); 2638 phdr = sctp_get_next_param(m, offset + sizeof(struct sctp_asconf_chunk), 2639 ¶m_buf, sizeof(struct sctp_paramhdr)); 2640 if (phdr == NULL) { 2641 SCTPDBG(SCTP_DEBUG_INPUT3, "%s: failed to get asconf lookup addr\n", 2642 __func__); 2643 return NULL; 2644 } 2645 ptype = (int)((uint32_t) ntohs(phdr->param_type)); 2646 /* get the correlation address */ 2647 switch (ptype) { 2648 #ifdef INET6 2649 case SCTP_IPV6_ADDRESS: 2650 { 2651 /* ipv6 address param */ 2652 struct sctp_ipv6addr_param *p6, p6_buf; 2653 2654 if (ntohs(phdr->param_length) != sizeof(struct sctp_ipv6addr_param)) { 2655 return NULL; 2656 } 2657 p6 = (struct sctp_ipv6addr_param *)sctp_get_next_param(m, 2658 offset + sizeof(struct sctp_asconf_chunk), 2659 &p6_buf.ph, sizeof(p6_buf)); 2660 if (p6 == NULL) { 2661 SCTPDBG(SCTP_DEBUG_INPUT3, "%s: failed to get asconf v6 lookup addr\n", 2662 __func__); 2663 return (NULL); 2664 } 2665 sin6 = &remote_store.sin6; 2666 sin6->sin6_family = AF_INET6; 2667 #ifdef HAVE_SIN6_LEN 2668 sin6->sin6_len = sizeof(*sin6); 2669 #endif 2670 sin6->sin6_port = sh->src_port; 2671 memcpy(&sin6->sin6_addr, &p6->addr, sizeof(struct in6_addr)); 2672 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) 2673 zero_address = 1; 2674 break; 2675 } 2676 #endif 2677 #ifdef INET 2678 case SCTP_IPV4_ADDRESS: 2679 { 2680 /* ipv4 address param */ 2681 struct sctp_ipv4addr_param *p4, p4_buf; 2682 2683 if (ntohs(phdr->param_length) != sizeof(struct sctp_ipv4addr_param)) { 2684 return NULL; 2685 } 2686 p4 = (struct sctp_ipv4addr_param *)sctp_get_next_param(m, 2687 offset + sizeof(struct sctp_asconf_chunk), 2688 &p4_buf.ph, sizeof(p4_buf)); 2689 if (p4 == NULL) { 2690 SCTPDBG(SCTP_DEBUG_INPUT3, "%s: failed to get asconf v4 lookup addr\n", 2691 __func__); 2692 return (NULL); 2693 } 2694 sin = &remote_store.sin; 2695 sin->sin_family = AF_INET; 2696 #ifdef HAVE_SIN_LEN 2697 sin->sin_len = sizeof(*sin); 2698 #endif 2699 sin->sin_port = sh->src_port; 2700 memcpy(&sin->sin_addr, &p4->addr, sizeof(struct in_addr)); 2701 if (sin->sin_addr.s_addr == INADDR_ANY) 2702 zero_address = 1; 2703 break; 2704 } 2705 #endif 2706 default: 2707 /* invalid address param type */ 2708 return NULL; 2709 } 2710 2711 if (zero_address) { 2712 stcb = sctp_findassoc_by_vtag(NULL, dst, ntohl(sh->v_tag), inp_p, 2713 netp, sh->src_port, sh->dest_port, 1, vrf_id, 0); 2714 if (stcb != NULL) { 2715 SCTP_INP_DECR_REF(*inp_p); 2716 } 2717 } else { 2718 stcb = sctp_findassociation_ep_addr(inp_p, 2719 &remote_store.sa, netp, 2720 dst, NULL); 2721 } 2722 return (stcb); 2723 } 2724 2725 /* 2726 * allocate a sctp_inpcb and setup a temporary binding to a port/all 2727 * addresses. This way if we don't get a bind we by default pick a ephemeral 2728 * port with all addresses bound. 2729 */ 2730 int 2731 sctp_inpcb_alloc(struct socket *so, uint32_t vrf_id) 2732 { 2733 /* 2734 * we get called when a new endpoint starts up. We need to allocate 2735 * the sctp_inpcb structure from the zone and init it. Mark it as 2736 * unbound and find a port that we can use as an ephemeral with 2737 * INADDR_ANY. If the user binds later no problem we can then add in 2738 * the specific addresses. And setup the default parameters for the 2739 * EP. 2740 */ 2741 int i, error; 2742 struct sctp_inpcb *inp; 2743 struct sctp_pcb *m; 2744 struct timeval time; 2745 sctp_sharedkey_t *null_key; 2746 2747 error = 0; 2748 2749 SCTP_INP_INFO_WLOCK(); 2750 inp = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_ep), struct sctp_inpcb); 2751 if (inp == NULL) { 2752 SCTP_PRINTF("Out of SCTP-INPCB structures - no resources\n"); 2753 SCTP_INP_INFO_WUNLOCK(); 2754 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOBUFS); 2755 return (ENOBUFS); 2756 } 2757 /* zap it */ 2758 memset(inp, 0, sizeof(*inp)); 2759 2760 /* bump generations */ 2761 #if defined(__APPLE__) && !defined(__Userspace__) 2762 inp->ip_inp.inp.inp_state = INPCB_STATE_INUSE; 2763 #endif 2764 /* setup socket pointers */ 2765 inp->sctp_socket = so; 2766 inp->ip_inp.inp.inp_socket = so; 2767 #if defined(__FreeBSD__) && !defined(__Userspace__) 2768 inp->ip_inp.inp.inp_cred = crhold(so->so_cred); 2769 #endif 2770 #ifdef INET6 2771 #if !defined(__Userspace__) && !defined(_WIN32) 2772 if (INP_SOCKAF(so) == AF_INET6) { 2773 if (MODULE_GLOBAL(ip6_auto_flowlabel)) { 2774 inp->ip_inp.inp.inp_flags |= IN6P_AUTOFLOWLABEL; 2775 } 2776 if (MODULE_GLOBAL(ip6_v6only)) { 2777 inp->ip_inp.inp.inp_flags |= IN6P_IPV6_V6ONLY; 2778 } 2779 } 2780 #endif 2781 #endif 2782 inp->sctp_associd_counter = 1; 2783 inp->partial_delivery_point = SCTP_SB_LIMIT_RCV(so) >> SCTP_PARTIAL_DELIVERY_SHIFT; 2784 inp->sctp_frag_point = 0; 2785 inp->max_cwnd = 0; 2786 inp->sctp_cmt_on_off = SCTP_BASE_SYSCTL(sctp_cmt_on_off); 2787 inp->ecn_supported = (uint8_t)SCTP_BASE_SYSCTL(sctp_ecn_enable); 2788 inp->prsctp_supported = (uint8_t)SCTP_BASE_SYSCTL(sctp_pr_enable); 2789 inp->auth_supported = (uint8_t)SCTP_BASE_SYSCTL(sctp_auth_enable); 2790 inp->asconf_supported = (uint8_t)SCTP_BASE_SYSCTL(sctp_asconf_enable); 2791 inp->reconfig_supported = (uint8_t)SCTP_BASE_SYSCTL(sctp_reconfig_enable); 2792 inp->nrsack_supported = (uint8_t)SCTP_BASE_SYSCTL(sctp_nrsack_enable); 2793 inp->pktdrop_supported = (uint8_t)SCTP_BASE_SYSCTL(sctp_pktdrop_enable); 2794 inp->idata_supported = 0; 2795 inp->rcv_edmid = SCTP_EDMID_NONE; 2796 2797 #if defined(__FreeBSD__) && !defined(__Userspace__) 2798 inp->fibnum = so->so_fibnum; 2799 #else 2800 inp->fibnum = 0; 2801 #endif 2802 #if defined(__Userspace__) 2803 inp->ulp_info = NULL; 2804 inp->recv_callback = NULL; 2805 inp->send_callback = NULL; 2806 inp->send_sb_threshold = 0; 2807 #endif 2808 /* init the small hash table we use to track asocid <-> tcb */ 2809 inp->sctp_asocidhash = SCTP_HASH_INIT(SCTP_STACK_VTAG_HASH_SIZE, &inp->hashasocidmark); 2810 if (inp->sctp_asocidhash == NULL) { 2811 #if defined(__FreeBSD__) && !defined(__Userspace__) 2812 crfree(inp->ip_inp.inp.inp_cred); 2813 #endif 2814 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp); 2815 SCTP_INP_INFO_WUNLOCK(); 2816 return (ENOBUFS); 2817 } 2818 SCTP_INCR_EP_COUNT(); 2819 inp->ip_inp.inp.inp_ip_ttl = MODULE_GLOBAL(ip_defttl); 2820 SCTP_INP_INFO_WUNLOCK(); 2821 2822 so->so_pcb = (caddr_t)inp; 2823 2824 if (SCTP_SO_TYPE(so) == SOCK_SEQPACKET) { 2825 /* UDP style socket */ 2826 inp->sctp_flags = (SCTP_PCB_FLAGS_UDPTYPE | 2827 SCTP_PCB_FLAGS_UNBOUND); 2828 /* Be sure it is NON-BLOCKING IO for UDP */ 2829 /* SCTP_SET_SO_NBIO(so); */ 2830 } else if (SCTP_SO_TYPE(so) == SOCK_STREAM) { 2831 /* TCP style socket */ 2832 inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE | 2833 SCTP_PCB_FLAGS_UNBOUND); 2834 /* Be sure we have blocking IO by default */ 2835 SOCK_LOCK(so); 2836 SCTP_CLEAR_SO_NBIO(so); 2837 SOCK_UNLOCK(so); 2838 } else { 2839 /* 2840 * unsupported socket type (RAW, etc)- in case we missed it 2841 * in protosw 2842 */ 2843 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EOPNOTSUPP); 2844 so->so_pcb = NULL; 2845 #if defined(__FreeBSD__) && !defined(__Userspace__) 2846 crfree(inp->ip_inp.inp.inp_cred); 2847 #endif 2848 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp); 2849 return (EOPNOTSUPP); 2850 } 2851 if (SCTP_BASE_SYSCTL(sctp_default_frag_interleave) == SCTP_FRAG_LEVEL_1) { 2852 sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); 2853 sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS); 2854 } else if (SCTP_BASE_SYSCTL(sctp_default_frag_interleave) == SCTP_FRAG_LEVEL_2) { 2855 sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); 2856 sctp_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS); 2857 } else if (SCTP_BASE_SYSCTL(sctp_default_frag_interleave) == SCTP_FRAG_LEVEL_0) { 2858 sctp_feature_off(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); 2859 sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS); 2860 } 2861 inp->sctp_tcbhash = SCTP_HASH_INIT(SCTP_BASE_SYSCTL(sctp_pcbtblsize), 2862 &inp->sctp_hashmark); 2863 if (inp->sctp_tcbhash == NULL) { 2864 SCTP_PRINTF("Out of SCTP-INPCB->hashinit - no resources\n"); 2865 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOBUFS); 2866 so->so_pcb = NULL; 2867 #if defined(__FreeBSD__) && !defined(__Userspace__) 2868 crfree(inp->ip_inp.inp.inp_cred); 2869 #endif 2870 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp); 2871 return (ENOBUFS); 2872 } 2873 #ifdef SCTP_MVRF 2874 inp->vrf_size = SCTP_DEFAULT_VRF_SIZE; 2875 SCTP_MALLOC(inp->m_vrf_ids, uint32_t *, 2876 (sizeof(uint32_t) * inp->vrf_size), SCTP_M_MVRF); 2877 if (inp->m_vrf_ids == NULL) { 2878 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOBUFS); 2879 so->so_pcb = NULL; 2880 SCTP_HASH_FREE(inp->sctp_tcbhash, inp->sctp_hashmark); 2881 #if defined(__FreeBSD__) && !defined(__Userspace__) 2882 crfree(inp->ip_inp.inp.inp_cred); 2883 #endif 2884 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp); 2885 return (ENOBUFS); 2886 } 2887 inp->m_vrf_ids[0] = vrf_id; 2888 inp->num_vrfs = 1; 2889 #endif 2890 inp->def_vrf_id = vrf_id; 2891 2892 #if defined(__APPLE__) && !defined(__Userspace__) 2893 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) 2894 inp->ip_inp.inp.inpcb_mtx = lck_mtx_alloc_init(SCTP_BASE_INFO(sctbinfo).mtx_grp, SCTP_BASE_INFO(sctbinfo).mtx_attr); 2895 if (inp->ip_inp.inp.inpcb_mtx == NULL) { 2896 SCTP_PRINTF("in_pcballoc: can't alloc mutex! so=%p\n", (void *)so); 2897 #ifdef SCTP_MVRF 2898 SCTP_FREE(inp->m_vrf_ids, SCTP_M_MVRF); 2899 #endif 2900 SCTP_HASH_FREE(inp->sctp_tcbhash, inp->sctp_hashmark); 2901 so->so_pcb = NULL; 2902 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp); 2903 SCTP_UNLOCK_EXC(SCTP_BASE_INFO(sctbinfo).ipi_lock); 2904 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOMEM); 2905 return (ENOMEM); 2906 } 2907 #elif defined(APPLE_LION) || defined(APPLE_MOUNTAINLION) 2908 lck_mtx_init(&inp->ip_inp.inp.inpcb_mtx, SCTP_BASE_INFO(sctbinfo).mtx_grp, SCTP_BASE_INFO(sctbinfo).mtx_attr); 2909 #else 2910 lck_mtx_init(&inp->ip_inp.inp.inpcb_mtx, SCTP_BASE_INFO(sctbinfo).ipi_lock_grp, SCTP_BASE_INFO(sctbinfo).ipi_lock_attr); 2911 #endif 2912 #endif 2913 SCTP_INP_INFO_WLOCK(); 2914 SCTP_INP_LOCK_INIT(inp); 2915 #if defined(__FreeBSD__) && !defined(__Userspace__) 2916 rw_init_flags(&inp->ip_inp.inp.inp_lock, "sctpinp", 2917 RW_RECURSE | RW_DUPOK); 2918 #endif 2919 SCTP_INP_READ_LOCK_INIT(inp); 2920 SCTP_ASOC_CREATE_LOCK_INIT(inp); 2921 /* lock the new ep */ 2922 SCTP_INP_WLOCK(inp); 2923 2924 /* add it to the info area */ 2925 LIST_INSERT_HEAD(&SCTP_BASE_INFO(listhead), inp, sctp_list); 2926 #if defined(__APPLE__) && !defined(__Userspace__) 2927 inp->ip_inp.inp.inp_pcbinfo = &SCTP_BASE_INFO(sctbinfo); 2928 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) || defined(APPLE_LION) || defined(APPLE_MOUNTAINLION) 2929 LIST_INSERT_HEAD(SCTP_BASE_INFO(sctbinfo).listhead, &inp->ip_inp.inp, inp_list); 2930 #else 2931 LIST_INSERT_HEAD(SCTP_BASE_INFO(sctbinfo).ipi_listhead, &inp->ip_inp.inp, inp_list); 2932 #endif 2933 #endif 2934 SCTP_INP_INFO_WUNLOCK(); 2935 2936 TAILQ_INIT(&inp->read_queue); 2937 LIST_INIT(&inp->sctp_addr_list); 2938 2939 LIST_INIT(&inp->sctp_asoc_list); 2940 2941 #ifdef SCTP_TRACK_FREED_ASOCS 2942 /* TEMP CODE */ 2943 LIST_INIT(&inp->sctp_asoc_free_list); 2944 #endif 2945 /* Init the timer structure for signature change */ 2946 SCTP_OS_TIMER_INIT(&inp->sctp_ep.signature_change.timer); 2947 inp->sctp_ep.signature_change.type = SCTP_TIMER_TYPE_NEWCOOKIE; 2948 2949 /* now init the actual endpoint default data */ 2950 m = &inp->sctp_ep; 2951 2952 /* setup the base timeout information */ 2953 m->sctp_timeoutticks[SCTP_TIMER_SEND] = sctp_secs_to_ticks(SCTP_SEND_SEC); /* needed ? */ 2954 m->sctp_timeoutticks[SCTP_TIMER_INIT] = sctp_secs_to_ticks(SCTP_INIT_SEC); /* needed ? */ 2955 m->sctp_timeoutticks[SCTP_TIMER_RECV] = sctp_msecs_to_ticks(SCTP_BASE_SYSCTL(sctp_delayed_sack_time_default)); 2956 m->sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = sctp_msecs_to_ticks(SCTP_BASE_SYSCTL(sctp_heartbeat_interval_default)); 2957 m->sctp_timeoutticks[SCTP_TIMER_PMTU] = sctp_secs_to_ticks(SCTP_BASE_SYSCTL(sctp_pmtu_raise_time_default)); 2958 m->sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] = sctp_secs_to_ticks(SCTP_BASE_SYSCTL(sctp_shutdown_guard_time_default)); 2959 m->sctp_timeoutticks[SCTP_TIMER_SIGNATURE] = sctp_secs_to_ticks(SCTP_BASE_SYSCTL(sctp_secret_lifetime_default)); 2960 /* all max/min max are in ms */ 2961 m->sctp_maxrto = SCTP_BASE_SYSCTL(sctp_rto_max_default); 2962 m->sctp_minrto = SCTP_BASE_SYSCTL(sctp_rto_min_default); 2963 m->initial_rto = SCTP_BASE_SYSCTL(sctp_rto_initial_default); 2964 m->initial_init_rto_max = SCTP_BASE_SYSCTL(sctp_init_rto_max_default); 2965 m->sctp_sack_freq = SCTP_BASE_SYSCTL(sctp_sack_freq_default); 2966 m->max_init_times = SCTP_BASE_SYSCTL(sctp_init_rtx_max_default); 2967 m->max_send_times = SCTP_BASE_SYSCTL(sctp_assoc_rtx_max_default); 2968 m->def_net_failure = SCTP_BASE_SYSCTL(sctp_path_rtx_max_default); 2969 m->def_net_pf_threshold = SCTP_BASE_SYSCTL(sctp_path_pf_threshold); 2970 m->sctp_sws_sender = SCTP_SWS_SENDER_DEF; 2971 m->sctp_sws_receiver = SCTP_SWS_RECEIVER_DEF; 2972 m->max_burst = SCTP_BASE_SYSCTL(sctp_max_burst_default); 2973 m->fr_max_burst = SCTP_BASE_SYSCTL(sctp_fr_max_burst_default); 2974 2975 m->sctp_default_cc_module = SCTP_BASE_SYSCTL(sctp_default_cc_module); 2976 m->sctp_default_ss_module = SCTP_BASE_SYSCTL(sctp_default_ss_module); 2977 m->max_open_streams_intome = SCTP_BASE_SYSCTL(sctp_nr_incoming_streams_default); 2978 /* number of streams to pre-open on a association */ 2979 m->pre_open_stream_count = SCTP_BASE_SYSCTL(sctp_nr_outgoing_streams_default); 2980 2981 m->default_mtu = 0; 2982 /* Add adaptation cookie */ 2983 m->adaptation_layer_indicator = 0; 2984 m->adaptation_layer_indicator_provided = 0; 2985 2986 /* seed random number generator */ 2987 m->random_counter = 1; 2988 m->store_at = SCTP_SIGNATURE_SIZE; 2989 SCTP_READ_RANDOM(m->random_numbers, sizeof(m->random_numbers)); 2990 sctp_fill_random_store(m); 2991 2992 /* Minimum cookie size */ 2993 m->size_of_a_cookie = (sizeof(struct sctp_init_msg) * 2) + 2994 sizeof(struct sctp_state_cookie); 2995 m->size_of_a_cookie += SCTP_SIGNATURE_SIZE; 2996 2997 /* Setup the initial secret */ 2998 (void)SCTP_GETTIME_TIMEVAL(&time); 2999 m->time_of_secret_change = time.tv_sec; 3000 3001 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { 3002 m->secret_key[0][i] = sctp_select_initial_TSN(m); 3003 } 3004 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL); 3005 3006 /* How long is a cookie good for ? */ 3007 m->def_cookie_life = sctp_msecs_to_ticks(SCTP_BASE_SYSCTL(sctp_valid_cookie_life_default)); 3008 /* 3009 * Initialize authentication parameters 3010 */ 3011 m->local_hmacs = sctp_default_supported_hmaclist(); 3012 m->local_auth_chunks = sctp_alloc_chunklist(); 3013 if (inp->asconf_supported) { 3014 sctp_auth_add_chunk(SCTP_ASCONF, m->local_auth_chunks); 3015 sctp_auth_add_chunk(SCTP_ASCONF_ACK, m->local_auth_chunks); 3016 } 3017 m->default_dscp = 0; 3018 #ifdef INET6 3019 m->default_flowlabel = 0; 3020 #endif 3021 m->port = 0; /* encapsulation disabled by default */ 3022 LIST_INIT(&m->shared_keys); 3023 /* add default NULL key as key id 0 */ 3024 null_key = sctp_alloc_sharedkey(); 3025 sctp_insert_sharedkey(&m->shared_keys, null_key); 3026 SCTP_INP_WUNLOCK(inp); 3027 #ifdef SCTP_LOG_CLOSING 3028 sctp_log_closing(inp, NULL, 12); 3029 #endif 3030 return (error); 3031 } 3032 3033 void 3034 sctp_move_pcb_and_assoc(struct sctp_inpcb *old_inp, struct sctp_inpcb *new_inp, 3035 struct sctp_tcb *stcb) 3036 { 3037 struct sctp_nets *net; 3038 uint16_t lport, rport; 3039 struct sctppcbhead *head; 3040 struct sctp_laddr *laddr, *oladdr; 3041 3042 atomic_add_int(&stcb->asoc.refcnt, 1); 3043 SCTP_TCB_UNLOCK(stcb); 3044 SCTP_INP_INFO_WLOCK(); 3045 SCTP_INP_WLOCK(old_inp); 3046 SCTP_INP_WLOCK(new_inp); 3047 SCTP_TCB_LOCK(stcb); 3048 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3049 3050 #if defined(__FreeBSD__) && !defined(__Userspace__) 3051 #ifdef INET6 3052 if (old_inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 3053 new_inp->ip_inp.inp.inp_flags |= old_inp->ip_inp.inp.inp_flags & INP_CONTROLOPTS; 3054 if (old_inp->ip_inp.inp.in6p_outputopts) { 3055 new_inp->ip_inp.inp.in6p_outputopts = ip6_copypktopts(old_inp->ip_inp.inp.in6p_outputopts, M_NOWAIT); 3056 } 3057 } 3058 #endif 3059 #if defined(INET) && defined(INET6) 3060 else 3061 #endif 3062 #ifdef INET 3063 { 3064 new_inp->ip_inp.inp.inp_ip_tos = old_inp->ip_inp.inp.inp_ip_tos; 3065 new_inp->ip_inp.inp.inp_ip_ttl = old_inp->ip_inp.inp.inp_ip_ttl; 3066 } 3067 #endif 3068 #endif 3069 new_inp->sctp_ep.time_of_secret_change = 3070 old_inp->sctp_ep.time_of_secret_change; 3071 memcpy(new_inp->sctp_ep.secret_key, old_inp->sctp_ep.secret_key, 3072 sizeof(old_inp->sctp_ep.secret_key)); 3073 new_inp->sctp_ep.current_secret_number = 3074 old_inp->sctp_ep.current_secret_number; 3075 new_inp->sctp_ep.last_secret_number = 3076 old_inp->sctp_ep.last_secret_number; 3077 new_inp->sctp_ep.size_of_a_cookie = old_inp->sctp_ep.size_of_a_cookie; 3078 3079 /* make it so new data pours into the new socket */ 3080 stcb->sctp_socket = new_inp->sctp_socket; 3081 stcb->sctp_ep = new_inp; 3082 3083 /* Copy the port across */ 3084 lport = new_inp->sctp_lport = old_inp->sctp_lport; 3085 rport = stcb->rport; 3086 /* Pull the tcb from the old association */ 3087 LIST_REMOVE(stcb, sctp_tcbhash); 3088 LIST_REMOVE(stcb, sctp_tcblist); 3089 if (stcb->asoc.in_asocid_hash) { 3090 LIST_REMOVE(stcb, sctp_tcbasocidhash); 3091 } 3092 /* Now insert the new_inp into the TCP connected hash */ 3093 head = &SCTP_BASE_INFO(sctp_tcpephash)[SCTP_PCBHASH_ALLADDR((lport | rport), SCTP_BASE_INFO(hashtcpmark))]; 3094 3095 LIST_INSERT_HEAD(head, new_inp, sctp_hash); 3096 /* Its safe to access */ 3097 new_inp->sctp_flags &= ~SCTP_PCB_FLAGS_UNBOUND; 3098 3099 /* Now move the tcb into the endpoint list */ 3100 LIST_INSERT_HEAD(&new_inp->sctp_asoc_list, stcb, sctp_tcblist); 3101 /* 3102 * Question, do we even need to worry about the ep-hash since we 3103 * only have one connection? Probably not :> so lets get rid of it 3104 * and not suck up any kernel memory in that. 3105 */ 3106 if (stcb->asoc.in_asocid_hash) { 3107 struct sctpasochead *lhd; 3108 lhd = &new_inp->sctp_asocidhash[SCTP_PCBHASH_ASOC(stcb->asoc.assoc_id, 3109 new_inp->hashasocidmark)]; 3110 LIST_INSERT_HEAD(lhd, stcb, sctp_tcbasocidhash); 3111 } 3112 /* Ok. Let's restart timer. */ 3113 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 3114 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, new_inp, 3115 stcb, net); 3116 } 3117 3118 SCTP_INP_INFO_WUNLOCK(); 3119 if (new_inp->sctp_tcbhash != NULL) { 3120 SCTP_HASH_FREE(new_inp->sctp_tcbhash, new_inp->sctp_hashmark); 3121 new_inp->sctp_tcbhash = NULL; 3122 } 3123 if ((new_inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) { 3124 /* Subset bound, so copy in the laddr list from the old_inp */ 3125 LIST_FOREACH(oladdr, &old_inp->sctp_addr_list, sctp_nxt_addr) { 3126 laddr = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); 3127 if (laddr == NULL) { 3128 /* 3129 * Gak, what can we do? This assoc is really 3130 * HOSED. We probably should send an abort 3131 * here. 3132 */ 3133 SCTPDBG(SCTP_DEBUG_PCB1, "Association hosed in TCP model, out of laddr memory\n"); 3134 continue; 3135 } 3136 SCTP_INCR_LADDR_COUNT(); 3137 memset(laddr, 0, sizeof(*laddr)); 3138 (void)SCTP_GETTIME_TIMEVAL(&laddr->start_time); 3139 laddr->ifa = oladdr->ifa; 3140 atomic_add_int(&laddr->ifa->refcount, 1); 3141 LIST_INSERT_HEAD(&new_inp->sctp_addr_list, laddr, 3142 sctp_nxt_addr); 3143 new_inp->laddr_count++; 3144 if (oladdr == stcb->asoc.last_used_address) { 3145 stcb->asoc.last_used_address = laddr; 3146 } 3147 } 3148 } 3149 /* Now any running timers need to be adjusted. */ 3150 if (stcb->asoc.dack_timer.ep == old_inp) { 3151 SCTP_INP_DECR_REF(old_inp); 3152 stcb->asoc.dack_timer.ep = new_inp; 3153 SCTP_INP_INCR_REF(new_inp); 3154 } 3155 if (stcb->asoc.asconf_timer.ep == old_inp) { 3156 SCTP_INP_DECR_REF(old_inp); 3157 stcb->asoc.asconf_timer.ep = new_inp; 3158 SCTP_INP_INCR_REF(new_inp); 3159 } 3160 if (stcb->asoc.strreset_timer.ep == old_inp) { 3161 SCTP_INP_DECR_REF(old_inp); 3162 stcb->asoc.strreset_timer.ep = new_inp; 3163 SCTP_INP_INCR_REF(new_inp); 3164 } 3165 if (stcb->asoc.shut_guard_timer.ep == old_inp) { 3166 SCTP_INP_DECR_REF(old_inp); 3167 stcb->asoc.shut_guard_timer.ep = new_inp; 3168 SCTP_INP_INCR_REF(new_inp); 3169 } 3170 if (stcb->asoc.autoclose_timer.ep == old_inp) { 3171 SCTP_INP_DECR_REF(old_inp); 3172 stcb->asoc.autoclose_timer.ep = new_inp; 3173 SCTP_INP_INCR_REF(new_inp); 3174 } 3175 if (stcb->asoc.delete_prim_timer.ep == old_inp) { 3176 SCTP_INP_DECR_REF(old_inp); 3177 stcb->asoc.delete_prim_timer.ep = new_inp; 3178 SCTP_INP_INCR_REF(new_inp); 3179 } 3180 /* now what about the nets? */ 3181 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 3182 if (net->pmtu_timer.ep == old_inp) { 3183 SCTP_INP_DECR_REF(old_inp); 3184 net->pmtu_timer.ep = new_inp; 3185 SCTP_INP_INCR_REF(new_inp); 3186 } 3187 if (net->hb_timer.ep == old_inp) { 3188 SCTP_INP_DECR_REF(old_inp); 3189 net->hb_timer.ep = new_inp; 3190 SCTP_INP_INCR_REF(new_inp); 3191 } 3192 if (net->rxt_timer.ep == old_inp) { 3193 SCTP_INP_DECR_REF(old_inp); 3194 net->rxt_timer.ep = new_inp; 3195 SCTP_INP_INCR_REF(new_inp); 3196 } 3197 } 3198 SCTP_INP_WUNLOCK(new_inp); 3199 SCTP_INP_WUNLOCK(old_inp); 3200 } 3201 3202 /* 3203 * insert an laddr entry with the given ifa for the desired list 3204 */ 3205 static int 3206 sctp_insert_laddr(struct sctpladdr *list, struct sctp_ifa *ifa, uint32_t act) 3207 { 3208 struct sctp_laddr *laddr; 3209 3210 laddr = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); 3211 if (laddr == NULL) { 3212 /* out of memory? */ 3213 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); 3214 return (EINVAL); 3215 } 3216 SCTP_INCR_LADDR_COUNT(); 3217 memset(laddr, 0, sizeof(*laddr)); 3218 (void)SCTP_GETTIME_TIMEVAL(&laddr->start_time); 3219 laddr->ifa = ifa; 3220 laddr->action = act; 3221 atomic_add_int(&ifa->refcount, 1); 3222 /* insert it */ 3223 LIST_INSERT_HEAD(list, laddr, sctp_nxt_addr); 3224 3225 return (0); 3226 } 3227 3228 /* 3229 * Remove an laddr entry from the local address list (on an assoc) 3230 */ 3231 static void 3232 sctp_remove_laddr(struct sctp_laddr *laddr) 3233 { 3234 3235 /* remove from the list */ 3236 LIST_REMOVE(laddr, sctp_nxt_addr); 3237 sctp_free_ifa(laddr->ifa); 3238 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_laddr), laddr); 3239 SCTP_DECR_LADDR_COUNT(); 3240 } 3241 #if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__Userspace__)) 3242 3243 /* 3244 * Don't know why, but without this there is an unknown reference when 3245 * compiling NetBSD... hmm 3246 */ 3247 extern void in6_sin6_2_sin(struct sockaddr_in *, struct sockaddr_in6 *sin6); 3248 #endif 3249 3250 /* 3251 * Bind the socket, with the PCB and global info locks held. Note, if a 3252 * socket address is specified, the PCB lock may be dropped and re-acquired. 3253 * 3254 * sctp_ifap is used to bypass normal local address validation checks. 3255 */ 3256 int 3257 #if defined(__FreeBSD__) && !defined(__Userspace__) 3258 sctp_inpcb_bind_locked(struct sctp_inpcb *inp, struct sockaddr *addr, 3259 struct sctp_ifa *sctp_ifap, struct thread *td) 3260 #elif defined(_WIN32) && !defined(__Userspace__) 3261 sctp_inpcb_bind_locked(struct sctp_inpcb *inp, struct sockaddr *addr, 3262 struct sctp_ifa *sctp_ifap, PKTHREAD p) 3263 #else 3264 sctp_inpcb_bind_locked(struct sctp_inpcb *inp, struct sockaddr *addr, 3265 struct sctp_ifa *sctp_ifap, struct proc *p) 3266 #endif 3267 { 3268 /* bind a ep to a socket address */ 3269 struct sctppcbhead *head; 3270 struct sctp_inpcb *inp_tmp; 3271 #if (defined(__FreeBSD__) || defined(__APPLE__)) && !defined(__Userspace__) 3272 struct inpcb *ip_inp; 3273 #endif 3274 int port_reuse_active = 0; 3275 int bindall; 3276 #ifdef SCTP_MVRF 3277 int i; 3278 #endif 3279 uint16_t lport; 3280 int error; 3281 uint32_t vrf_id; 3282 3283 #if defined(__FreeBSD__) && !defined(__Userspace__) 3284 KASSERT(td != NULL, ("%s: null thread", __func__)); 3285 3286 #endif 3287 error = 0; 3288 lport = 0; 3289 bindall = 1; 3290 #if (defined(__FreeBSD__) || defined(__APPLE__)) && !defined(__Userspace__) 3291 ip_inp = &inp->ip_inp.inp; 3292 #endif 3293 3294 SCTP_INP_INFO_WLOCK_ASSERT(); 3295 SCTP_INP_WLOCK_ASSERT(inp); 3296 3297 #ifdef SCTP_DEBUG 3298 if (addr) { 3299 SCTPDBG(SCTP_DEBUG_PCB1, "Bind called port: %d\n", 3300 ntohs(((struct sockaddr_in *)addr)->sin_port)); 3301 SCTPDBG(SCTP_DEBUG_PCB1, "Addr: "); 3302 SCTPDBG_ADDR(SCTP_DEBUG_PCB1, addr); 3303 } 3304 #endif 3305 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 0) { 3306 error = EINVAL; 3307 /* already did a bind, subsequent binds NOT allowed ! */ 3308 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); 3309 goto out; 3310 } 3311 if (addr != NULL) { 3312 switch (addr->sa_family) { 3313 #ifdef INET 3314 case AF_INET: 3315 { 3316 struct sockaddr_in *sin; 3317 3318 /* IPV6_V6ONLY socket? */ 3319 if (SCTP_IPV6_V6ONLY(inp)) { 3320 error = EINVAL; 3321 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); 3322 goto out; 3323 } 3324 #ifdef HAVE_SA_LEN 3325 if (addr->sa_len != sizeof(*sin)) { 3326 error = EINVAL; 3327 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); 3328 goto out; 3329 } 3330 #endif 3331 3332 sin = (struct sockaddr_in *)addr; 3333 lport = sin->sin_port; 3334 #if defined(__FreeBSD__) && !defined(__Userspace__) 3335 /* 3336 * For LOOPBACK the prison_local_ip4() call will transmute the ip address 3337 * to the proper value. 3338 */ 3339 if ((error = prison_local_ip4(td->td_ucred, &sin->sin_addr)) != 0) { 3340 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); 3341 goto out; 3342 } 3343 #endif 3344 if (sin->sin_addr.s_addr != INADDR_ANY) { 3345 bindall = 0; 3346 } 3347 break; 3348 } 3349 #endif 3350 #ifdef INET6 3351 case AF_INET6: 3352 { 3353 /* Only for pure IPv6 Address. (No IPv4 Mapped!) */ 3354 struct sockaddr_in6 *sin6; 3355 3356 sin6 = (struct sockaddr_in6 *)addr; 3357 #ifdef HAVE_SA_LEN 3358 if (addr->sa_len != sizeof(*sin6)) { 3359 error = EINVAL; 3360 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); 3361 goto out; 3362 } 3363 #endif 3364 lport = sin6->sin6_port; 3365 #if defined(__FreeBSD__) && !defined(__Userspace__) 3366 /* 3367 * For LOOPBACK the prison_local_ip6() call will transmute the ipv6 address 3368 * to the proper value. 3369 */ 3370 if ((error = prison_local_ip6(td->td_ucred, &sin6->sin6_addr, 3371 (SCTP_IPV6_V6ONLY(inp) != 0))) != 0) { 3372 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); 3373 goto out; 3374 } 3375 #endif 3376 if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 3377 bindall = 0; 3378 #ifdef SCTP_EMBEDDED_V6_SCOPE 3379 /* KAME hack: embed scopeid */ 3380 #if defined(SCTP_KAME) 3381 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) { 3382 error = EINVAL; 3383 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); 3384 goto out; 3385 } 3386 #elif defined(__APPLE__) && !defined(__Userspace__) 3387 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) 3388 if (in6_embedscope(&sin6->sin6_addr, sin6, ip_inp, NULL) != 0) { 3389 #else 3390 if (in6_embedscope(&sin6->sin6_addr, sin6, ip_inp, NULL, NULL) != 0) { 3391 #endif 3392 error = EINVAL; 3393 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); 3394 goto out; 3395 } 3396 #elif defined(__FreeBSD__) && !defined(__Userspace__) 3397 error = scope6_check_id(sin6, MODULE_GLOBAL(ip6_use_defzone)); 3398 if (error != 0) { 3399 error = EINVAL; 3400 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); 3401 goto out; 3402 } 3403 #else 3404 if (in6_embedscope(&sin6->sin6_addr, sin6) != 0) { 3405 error = EINVAL; 3406 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); 3407 goto out; 3408 } 3409 #endif 3410 #endif /* SCTP_EMBEDDED_V6_SCOPE */ 3411 } 3412 #ifndef SCOPEDROUTING 3413 /* this must be cleared for ifa_ifwithaddr() */ 3414 sin6->sin6_scope_id = 0; 3415 #endif /* SCOPEDROUTING */ 3416 break; 3417 } 3418 #endif 3419 #if defined(__Userspace__) 3420 case AF_CONN: 3421 { 3422 struct sockaddr_conn *sconn; 3423 3424 #ifdef HAVE_SA_LEN 3425 if (addr->sa_len != sizeof(struct sockaddr_conn)) { 3426 error = EINVAL; 3427 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); 3428 goto out; 3429 } 3430 #endif 3431 sconn = (struct sockaddr_conn *)addr; 3432 lport = sconn->sconn_port; 3433 if (sconn->sconn_addr != NULL) { 3434 bindall = 0; 3435 } 3436 break; 3437 } 3438 #endif 3439 default: 3440 error = EAFNOSUPPORT; 3441 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); 3442 goto out; 3443 } 3444 } 3445 /* Setup a vrf_id to be the default for the non-bind-all case. */ 3446 vrf_id = inp->def_vrf_id; 3447 3448 if (lport) { 3449 /* 3450 * Did the caller specify a port? if so we must see if an ep 3451 * already has this one bound. 3452 */ 3453 /* got to be root to get at low ports */ 3454 #if !(defined(_WIN32) && !defined(__Userspace__)) 3455 if (ntohs(lport) < IPPORT_RESERVED && 3456 #if defined(__FreeBSD__) && !defined(__Userspace__) 3457 (error = priv_check(td, PRIV_NETINET_RESERVEDPORT)) != 0) { 3458 #elif defined(__APPLE__) && !defined(__Userspace__) 3459 (error = suser(p->p_ucred, &p->p_acflag)) != 0) { 3460 #elif defined(__Userspace__) 3461 /* TODO ensure uid is 0, etc... */ 3462 0) { 3463 #else 3464 (error = suser(p, 0)) != 0) { 3465 #endif 3466 goto out; 3467 } 3468 #endif 3469 SCTP_INP_INCR_REF(inp); 3470 SCTP_INP_WUNLOCK(inp); 3471 if (bindall) { 3472 #ifdef SCTP_MVRF 3473 for (i = 0; i < inp->num_vrfs; i++) { 3474 vrf_id = inp->m_vrf_ids[i]; 3475 #else 3476 vrf_id = inp->def_vrf_id; 3477 #endif 3478 inp_tmp = sctp_pcb_findep(addr, 0, 1, vrf_id); 3479 if (inp_tmp != NULL) { 3480 /* 3481 * lock guy returned and lower count 3482 * note that we are not bound so 3483 * inp_tmp should NEVER be inp. And 3484 * it is this inp (inp_tmp) that gets 3485 * the reference bump, so we must 3486 * lower it. 3487 */ 3488 SCTP_INP_DECR_REF(inp_tmp); 3489 /* unlock info */ 3490 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) && 3491 (sctp_is_feature_on(inp_tmp, SCTP_PCB_FLAGS_PORTREUSE))) { 3492 /* Ok, must be one-2-one and allowing port re-use */ 3493 port_reuse_active = 1; 3494 goto continue_anyway; 3495 } 3496 SCTP_INP_WLOCK(inp); 3497 SCTP_INP_DECR_REF(inp); 3498 error = EADDRINUSE; 3499 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); 3500 goto out; 3501 } 3502 #ifdef SCTP_MVRF 3503 } 3504 #endif 3505 } else { 3506 inp_tmp = sctp_pcb_findep(addr, 0, 1, vrf_id); 3507 if (inp_tmp != NULL) { 3508 /* 3509 * lock guy returned and lower count note 3510 * that we are not bound so inp_tmp should 3511 * NEVER be inp. And it is this inp (inp_tmp) 3512 * that gets the reference bump, so we must 3513 * lower it. 3514 */ 3515 SCTP_INP_DECR_REF(inp_tmp); 3516 /* unlock info */ 3517 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) && 3518 (sctp_is_feature_on(inp_tmp, SCTP_PCB_FLAGS_PORTREUSE))) { 3519 /* Ok, must be one-2-one and allowing port re-use */ 3520 port_reuse_active = 1; 3521 goto continue_anyway; 3522 } 3523 SCTP_INP_WLOCK(inp); 3524 SCTP_INP_DECR_REF(inp); 3525 error = EADDRINUSE; 3526 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); 3527 goto out; 3528 } 3529 } 3530 continue_anyway: 3531 SCTP_INP_WLOCK(inp); 3532 SCTP_INP_DECR_REF(inp); 3533 if (bindall) { 3534 /* verify that no lport is not used by a singleton */ 3535 if ((port_reuse_active == 0) && 3536 (inp_tmp = sctp_isport_inuse(inp, lport, vrf_id))) { 3537 /* Sorry someone already has this one bound */ 3538 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) && 3539 (sctp_is_feature_on(inp_tmp, SCTP_PCB_FLAGS_PORTREUSE))) { 3540 port_reuse_active = 1; 3541 } else { 3542 error = EADDRINUSE; 3543 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); 3544 goto out; 3545 } 3546 } 3547 } 3548 } else { 3549 uint16_t first, last, candidate; 3550 uint16_t count; 3551 3552 #if defined(__Userspace__) 3553 first = MODULE_GLOBAL(ipport_firstauto); 3554 last = MODULE_GLOBAL(ipport_lastauto); 3555 #elif defined(_WIN32) 3556 first = 1; 3557 last = 0xffff; 3558 #elif defined(__FreeBSD__) || defined(__APPLE__) 3559 if (ip_inp->inp_flags & INP_HIGHPORT) { 3560 first = MODULE_GLOBAL(ipport_hifirstauto); 3561 last = MODULE_GLOBAL(ipport_hilastauto); 3562 } else if (ip_inp->inp_flags & INP_LOWPORT) { 3563 #if defined(__FreeBSD__) 3564 if ((error = priv_check(td, PRIV_NETINET_RESERVEDPORT)) != 0) { 3565 #else 3566 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0) { 3567 #endif 3568 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); 3569 goto out; 3570 } 3571 first = MODULE_GLOBAL(ipport_lowfirstauto); 3572 last = MODULE_GLOBAL(ipport_lowlastauto); 3573 } else { 3574 first = MODULE_GLOBAL(ipport_firstauto); 3575 last = MODULE_GLOBAL(ipport_lastauto); 3576 } 3577 #endif 3578 if (first > last) { 3579 uint16_t temp; 3580 3581 temp = first; 3582 first = last; 3583 last = temp; 3584 } 3585 count = last - first + 1; /* number of candidates */ 3586 candidate = first + sctp_select_initial_TSN(&inp->sctp_ep) % (count); 3587 3588 for (;;) { 3589 #ifdef SCTP_MVRF 3590 for (i = 0; i < inp->num_vrfs; i++) { 3591 if (sctp_isport_inuse(inp, htons(candidate), inp->m_vrf_ids[i]) != NULL) { 3592 break; 3593 } 3594 } 3595 if (i == inp->num_vrfs) { 3596 lport = htons(candidate); 3597 break; 3598 } 3599 #else 3600 if (sctp_isport_inuse(inp, htons(candidate), inp->def_vrf_id) == NULL) { 3601 lport = htons(candidate); 3602 break; 3603 } 3604 #endif 3605 if (--count == 0) { 3606 error = EADDRINUSE; 3607 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); 3608 goto out; 3609 } 3610 if (candidate == last) 3611 candidate = first; 3612 else 3613 candidate = candidate + 1; 3614 } 3615 } 3616 if (inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE | 3617 SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 3618 /* 3619 * this really should not happen. The guy did a non-blocking 3620 * bind and then did a close at the same time. 3621 */ 3622 error = EINVAL; 3623 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); 3624 goto out; 3625 } 3626 /* ok we look clear to give out this port, so lets setup the binding */ 3627 if (bindall) { 3628 /* binding to all addresses, so just set in the proper flags */ 3629 inp->sctp_flags |= SCTP_PCB_FLAGS_BOUNDALL; 3630 /* set the automatic addr changes from kernel flag */ 3631 if (SCTP_BASE_SYSCTL(sctp_auto_asconf) == 0) { 3632 sctp_feature_off(inp, SCTP_PCB_FLAGS_DO_ASCONF); 3633 sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTO_ASCONF); 3634 } else { 3635 sctp_feature_on(inp, SCTP_PCB_FLAGS_DO_ASCONF); 3636 sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF); 3637 } 3638 if (SCTP_BASE_SYSCTL(sctp_multiple_asconfs) == 0) { 3639 sctp_feature_off(inp, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS); 3640 } else { 3641 sctp_feature_on(inp, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS); 3642 } 3643 /* set the automatic mobility_base from kernel 3644 flag (by micchie) 3645 */ 3646 if (SCTP_BASE_SYSCTL(sctp_mobility_base) == 0) { 3647 sctp_mobility_feature_off(inp, SCTP_MOBILITY_BASE); 3648 sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED); 3649 } else { 3650 sctp_mobility_feature_on(inp, SCTP_MOBILITY_BASE); 3651 sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED); 3652 } 3653 /* set the automatic mobility_fasthandoff from kernel 3654 flag (by micchie) 3655 */ 3656 if (SCTP_BASE_SYSCTL(sctp_mobility_fasthandoff) == 0) { 3657 sctp_mobility_feature_off(inp, SCTP_MOBILITY_FASTHANDOFF); 3658 sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED); 3659 } else { 3660 sctp_mobility_feature_on(inp, SCTP_MOBILITY_FASTHANDOFF); 3661 sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED); 3662 } 3663 } else { 3664 /* 3665 * bind specific, make sure flags is off and add a new 3666 * address structure to the sctp_addr_list inside the ep 3667 * structure. 3668 * 3669 * We will need to allocate one and insert it at the head. The 3670 * socketopt call can just insert new addresses in there as 3671 * well. It will also have to do the embed scope kame hack 3672 * too (before adding). 3673 */ 3674 struct sctp_ifa *ifa; 3675 union sctp_sockstore store; 3676 3677 memset(&store, 0, sizeof(store)); 3678 switch (addr->sa_family) { 3679 #ifdef INET 3680 case AF_INET: 3681 memcpy(&store.sin, addr, sizeof(struct sockaddr_in)); 3682 store.sin.sin_port = 0; 3683 break; 3684 #endif 3685 #ifdef INET6 3686 case AF_INET6: 3687 memcpy(&store.sin6, addr, sizeof(struct sockaddr_in6)); 3688 store.sin6.sin6_port = 0; 3689 break; 3690 #endif 3691 #if defined(__Userspace__) 3692 case AF_CONN: 3693 memcpy(&store.sconn, addr, sizeof(struct sockaddr_conn)); 3694 store.sconn.sconn_port = 0; 3695 break; 3696 #endif 3697 default: 3698 break; 3699 } 3700 /* 3701 * first find the interface with the bound address need to 3702 * zero out the port to find the address! yuck! can't do 3703 * this earlier since need port for sctp_pcb_findep() 3704 */ 3705 if (sctp_ifap != NULL) { 3706 ifa = sctp_ifap; 3707 } else { 3708 /* Note for BSD we hit here always other 3709 * O/S's will pass things in via the 3710 * sctp_ifap argument. 3711 */ 3712 ifa = sctp_find_ifa_by_addr(&store.sa, 3713 vrf_id, SCTP_ADDR_NOT_LOCKED); 3714 } 3715 if (ifa == NULL) { 3716 error = EADDRNOTAVAIL; 3717 /* Can't find an interface with that address */ 3718 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); 3719 goto out; 3720 } 3721 #ifdef INET6 3722 if (addr->sa_family == AF_INET6) { 3723 /* GAK, more FIXME IFA lock? */ 3724 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) { 3725 /* Can't bind a non-existent addr. */ 3726 error = EINVAL; 3727 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); 3728 goto out; 3729 } 3730 } 3731 #endif 3732 /* we're not bound all */ 3733 inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUNDALL; 3734 /* allow bindx() to send ASCONF's for binding changes */ 3735 sctp_feature_on(inp, SCTP_PCB_FLAGS_DO_ASCONF); 3736 /* clear automatic addr changes from kernel flag */ 3737 sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTO_ASCONF); 3738 3739 /* add this address to the endpoint list */ 3740 error = sctp_insert_laddr(&inp->sctp_addr_list, ifa, 0); 3741 if (error != 0) 3742 goto out; 3743 inp->laddr_count++; 3744 } 3745 /* find the bucket */ 3746 if (port_reuse_active) { 3747 /* Put it into tcp 1-2-1 hash */ 3748 head = &SCTP_BASE_INFO(sctp_tcpephash)[SCTP_PCBHASH_ALLADDR(lport, SCTP_BASE_INFO(hashtcpmark))]; 3749 inp->sctp_flags |= SCTP_PCB_FLAGS_IN_TCPPOOL; 3750 } else { 3751 head = &SCTP_BASE_INFO(sctp_ephash)[SCTP_PCBHASH_ALLADDR(lport, SCTP_BASE_INFO(hashmark))]; 3752 } 3753 /* put it in the bucket */ 3754 LIST_INSERT_HEAD(head, inp, sctp_hash); 3755 SCTPDBG(SCTP_DEBUG_PCB1, "Main hash to bind at head:%p, bound port:%d - in tcp_pool=%d\n", 3756 (void *)head, ntohs(lport), port_reuse_active); 3757 /* set in the port */ 3758 inp->sctp_lport = lport; 3759 3760 /* turn off just the unbound flag */ 3761 KASSERT((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) != 0, 3762 ("%s: inp %p is already bound", __func__, inp)); 3763 inp->sctp_flags &= ~SCTP_PCB_FLAGS_UNBOUND; 3764 out: 3765 return (error); 3766 } 3767 3768 int 3769 #if defined(__FreeBSD__) && !defined(__Userspace__) 3770 sctp_inpcb_bind(struct socket *so, struct sockaddr *addr, 3771 struct sctp_ifa *sctp_ifap, struct thread *td) 3772 #elif defined(_WIN32) && !defined(__Userspace__) 3773 sctp_inpcb_bind(struct socket *so, struct sockaddr *addr, 3774 struct sctp_ifa *sctp_ifap, PKTHREAD p) 3775 #else 3776 sctp_inpcb_bind(struct socket *so, struct sockaddr *addr, 3777 struct sctp_ifa *sctp_ifap, struct proc *p) 3778 #endif 3779 { 3780 struct sctp_inpcb *inp; 3781 int error; 3782 3783 inp = so->so_pcb; 3784 SCTP_INP_INFO_WLOCK(); 3785 SCTP_INP_WLOCK(inp); 3786 #if defined(__FreeBSD__) && !defined(__Userspace__) 3787 error = sctp_inpcb_bind_locked(inp, addr, sctp_ifap, td); 3788 #else 3789 error = sctp_inpcb_bind_locked(inp, addr, sctp_ifap, p); 3790 #endif 3791 SCTP_INP_WUNLOCK(inp); 3792 SCTP_INP_INFO_WUNLOCK(); 3793 return (error); 3794 } 3795 3796 static void 3797 sctp_iterator_inp_being_freed(struct sctp_inpcb *inp) 3798 { 3799 struct sctp_iterator *it, *nit; 3800 3801 /* 3802 * We enter with the only the ITERATOR_LOCK in place and a write 3803 * lock on the inp_info stuff. 3804 */ 3805 it = sctp_it_ctl.cur_it; 3806 #if defined(__FreeBSD__) && !defined(__Userspace__) 3807 if (it && (it->vn != curvnet)) { 3808 /* Its not looking at our VNET */ 3809 return; 3810 } 3811 #endif 3812 if (it && (it->inp == inp)) { 3813 /* 3814 * This is tricky and we hold the iterator lock, 3815 * but when it returns and gets the lock (when we 3816 * release it) the iterator will try to operate on 3817 * inp. We need to stop that from happening. But 3818 * of course the iterator has a reference on the 3819 * stcb and inp. We can mark it and it will stop. 3820 * 3821 * If its a single iterator situation, we 3822 * set the end iterator flag. Otherwise 3823 * we set the iterator to go to the next inp. 3824 * 3825 */ 3826 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 3827 sctp_it_ctl.iterator_flags |= SCTP_ITERATOR_STOP_CUR_IT; 3828 } else { 3829 sctp_it_ctl.iterator_flags |= SCTP_ITERATOR_STOP_CUR_INP; 3830 } 3831 } 3832 /* Now go through and remove any single reference to 3833 * our inp that may be still pending on the list 3834 */ 3835 SCTP_IPI_ITERATOR_WQ_LOCK(); 3836 TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) { 3837 #if defined(__FreeBSD__) && !defined(__Userspace__) 3838 if (it->vn != curvnet) { 3839 continue; 3840 } 3841 #endif 3842 if (it->inp == inp) { 3843 /* This one points to me is it inp specific? */ 3844 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 3845 /* Remove and free this one */ 3846 TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, 3847 it, sctp_nxt_itr); 3848 if (it->function_atend != NULL) { 3849 (*it->function_atend) (it->pointer, it->val); 3850 } 3851 SCTP_FREE(it, SCTP_M_ITER); 3852 } else { 3853 it->inp = LIST_NEXT(it->inp, sctp_list); 3854 if (it->inp) { 3855 SCTP_INP_INCR_REF(it->inp); 3856 } 3857 } 3858 /* When its put in the refcnt is incremented so decr it */ 3859 SCTP_INP_DECR_REF(inp); 3860 } 3861 } 3862 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 3863 } 3864 3865 /* release sctp_inpcb unbind the port */ 3866 void 3867 sctp_inpcb_free(struct sctp_inpcb *inp, int immediate, int from) 3868 { 3869 /* 3870 * Here we free a endpoint. We must find it (if it is in the Hash 3871 * table) and remove it from there. Then we must also find it in the 3872 * overall list and remove it from there. After all removals are 3873 * complete then any timer has to be stopped. Then start the actual 3874 * freeing. a) Any local lists. b) Any associations. c) The hash of 3875 * all associations. d) finally the ep itself. 3876 */ 3877 struct sctp_tcb *stcb, *nstcb; 3878 struct sctp_laddr *laddr, *nladdr; 3879 struct inpcb *ip_pcb; 3880 struct socket *so; 3881 int being_refed = 0; 3882 struct sctp_queued_to_read *sq, *nsq; 3883 #if !defined(__Userspace__) 3884 #if !defined(__FreeBSD__) 3885 sctp_rtentry_t *rt; 3886 #endif 3887 #endif 3888 int cnt; 3889 sctp_sharedkey_t *shared_key, *nshared_key; 3890 3891 #if defined(__APPLE__) && !defined(__Userspace__) 3892 sctp_lock_assert(SCTP_INP_SO(inp)); 3893 #endif 3894 #ifdef SCTP_LOG_CLOSING 3895 sctp_log_closing(inp, NULL, 0); 3896 #endif 3897 SCTP_ITERATOR_LOCK(); 3898 /* mark any iterators on the list or being processed */ 3899 sctp_iterator_inp_being_freed(inp); 3900 SCTP_ITERATOR_UNLOCK(); 3901 3902 SCTP_ASOC_CREATE_LOCK(inp); 3903 SCTP_INP_INFO_WLOCK(); 3904 SCTP_INP_WLOCK(inp); 3905 so = inp->sctp_socket; 3906 KASSERT((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) != 0, 3907 ("%s: inp %p still has socket", __func__, inp)); 3908 KASSERT((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) == 0, 3909 ("%s: double free of inp %p", __func__, inp)); 3910 if (from == SCTP_CALLED_AFTER_CMPSET_OFCLOSE) { 3911 inp->sctp_flags &= ~SCTP_PCB_FLAGS_CLOSE_IP; 3912 /* socket is gone, so no more wakeups allowed */ 3913 inp->sctp_flags |= SCTP_PCB_FLAGS_DONT_WAKE; 3914 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEINPUT; 3915 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEOUTPUT; 3916 } 3917 /* First time through we have the socket lock, after that no more. */ 3918 sctp_timer_stop(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL, 3919 SCTP_FROM_SCTP_PCB + SCTP_LOC_1); 3920 3921 if (inp->control) { 3922 sctp_m_freem(inp->control); 3923 inp->control = NULL; 3924 } 3925 if (inp->pkt) { 3926 sctp_m_freem(inp->pkt); 3927 inp->pkt = NULL; 3928 } 3929 ip_pcb = &inp->ip_inp.inp; /* we could just cast the main pointer 3930 * here but I will be nice :> (i.e. 3931 * ip_pcb = ep;) */ 3932 if (immediate == SCTP_FREE_SHOULD_USE_GRACEFUL_CLOSE) { 3933 int cnt_in_sd; 3934 3935 cnt_in_sd = 0; 3936 LIST_FOREACH_SAFE(stcb, &inp->sctp_asoc_list, sctp_tcblist, nstcb) { 3937 SCTP_TCB_LOCK(stcb); 3938 /* Disconnect the socket please. */ 3939 stcb->sctp_socket = NULL; 3940 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_CLOSED_SOCKET); 3941 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 3942 /* Skip guys being freed */ 3943 cnt_in_sd++; 3944 if (stcb->asoc.state & SCTP_STATE_IN_ACCEPT_QUEUE) { 3945 /* 3946 * Special case - we did not start a kill 3947 * timer on the asoc due to it was not 3948 * closed. So go ahead and start it now. 3949 */ 3950 SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_IN_ACCEPT_QUEUE); 3951 sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL); 3952 } 3953 SCTP_TCB_UNLOCK(stcb); 3954 continue; 3955 } 3956 if (((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 3957 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) && 3958 (stcb->asoc.total_output_queue_size == 0)) { 3959 /* If we have data in queue, we don't want to just 3960 * free since the app may have done, send()/close 3961 * or connect/send/close. And it wants the data 3962 * to get across first. 3963 */ 3964 /* Just abandon things in the front states */ 3965 if (sctp_free_assoc(inp, stcb, SCTP_PCBFREE_NOFORCE, 3966 SCTP_FROM_SCTP_PCB + SCTP_LOC_2) == 0) { 3967 cnt_in_sd++; 3968 } 3969 continue; 3970 } 3971 if ((stcb->asoc.size_on_reasm_queue > 0) || 3972 (stcb->asoc.size_on_all_streams > 0) || 3973 ((so != NULL) && (SCTP_SBAVAIL(&so->so_rcv) > 0))) { 3974 /* Left with Data unread */ 3975 struct mbuf *op_err; 3976 3977 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, ""); 3978 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_PCB + SCTP_LOC_3; 3979 sctp_send_abort_tcb(stcb, op_err, SCTP_SO_LOCKED); 3980 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 3981 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 3982 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 3983 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 3984 } 3985 if (sctp_free_assoc(inp, stcb, 3986 SCTP_PCBFREE_NOFORCE, SCTP_FROM_SCTP_PCB + SCTP_LOC_4) == 0) { 3987 cnt_in_sd++; 3988 } 3989 continue; 3990 } else if (TAILQ_EMPTY(&stcb->asoc.send_queue) && 3991 TAILQ_EMPTY(&stcb->asoc.sent_queue) && 3992 (stcb->asoc.stream_queue_cnt == 0)) { 3993 if ((*stcb->asoc.ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, &stcb->asoc)) { 3994 goto abort_anyway; 3995 } 3996 if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) && 3997 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 3998 struct sctp_nets *netp; 3999 4000 /* 4001 * there is nothing queued to send, 4002 * so I send shutdown 4003 */ 4004 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4005 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4006 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4007 } 4008 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT); 4009 sctp_stop_timers_for_shutdown(stcb); 4010 if (stcb->asoc.alternate) { 4011 netp = stcb->asoc.alternate; 4012 } else { 4013 netp = stcb->asoc.primary_destination; 4014 } 4015 sctp_send_shutdown(stcb, netp); 4016 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, 4017 netp); 4018 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, NULL); 4019 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_LOCKED); 4020 } 4021 } else { 4022 /* mark into shutdown pending */ 4023 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING); 4024 if ((*stcb->asoc.ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, &stcb->asoc)) { 4025 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT); 4026 } 4027 if (TAILQ_EMPTY(&stcb->asoc.send_queue) && 4028 TAILQ_EMPTY(&stcb->asoc.sent_queue) && 4029 (stcb->asoc.state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 4030 struct mbuf *op_err; 4031 abort_anyway: 4032 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, ""); 4033 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_PCB + SCTP_LOC_5; 4034 sctp_send_abort_tcb(stcb, op_err, SCTP_SO_LOCKED); 4035 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4036 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4037 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4038 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4039 } 4040 if (sctp_free_assoc(inp, stcb, 4041 SCTP_PCBFREE_NOFORCE, 4042 SCTP_FROM_SCTP_PCB + SCTP_LOC_6) == 0) { 4043 cnt_in_sd++; 4044 } 4045 continue; 4046 } else { 4047 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED); 4048 } 4049 } 4050 cnt_in_sd++; 4051 SCTP_TCB_UNLOCK(stcb); 4052 } 4053 /* now is there some left in our SHUTDOWN state? */ 4054 if (cnt_in_sd) { 4055 #ifdef SCTP_LOG_CLOSING 4056 sctp_log_closing(inp, NULL, 2); 4057 #endif 4058 inp->sctp_socket = NULL; 4059 SCTP_INP_WUNLOCK(inp); 4060 SCTP_ASOC_CREATE_UNLOCK(inp); 4061 SCTP_INP_INFO_WUNLOCK(); 4062 return; 4063 } 4064 } 4065 inp->sctp_socket = NULL; 4066 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 0) { 4067 /* 4068 * ok, this guy has been bound. It's port is 4069 * somewhere in the SCTP_BASE_INFO(hash table). Remove 4070 * it! 4071 */ 4072 LIST_REMOVE(inp, sctp_hash); 4073 inp->sctp_flags |= SCTP_PCB_FLAGS_UNBOUND; 4074 } 4075 4076 /* If there is a timer running to kill us, 4077 * forget it, since it may have a contest 4078 * on the INP lock.. which would cause us 4079 * to die ... 4080 */ 4081 cnt = 0; 4082 LIST_FOREACH_SAFE(stcb, &inp->sctp_asoc_list, sctp_tcblist, nstcb) { 4083 SCTP_TCB_LOCK(stcb); 4084 if (immediate != SCTP_FREE_SHOULD_USE_GRACEFUL_CLOSE) { 4085 /* Disconnect the socket please */ 4086 stcb->sctp_socket = NULL; 4087 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_CLOSED_SOCKET); 4088 } 4089 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4090 if (stcb->asoc.state & SCTP_STATE_IN_ACCEPT_QUEUE) { 4091 SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_IN_ACCEPT_QUEUE); 4092 sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL); 4093 } 4094 cnt++; 4095 SCTP_TCB_UNLOCK(stcb); 4096 continue; 4097 } 4098 /* Free associations that are NOT killing us */ 4099 if ((SCTP_GET_STATE(stcb) != SCTP_STATE_COOKIE_WAIT) && 4100 ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) { 4101 struct mbuf *op_err; 4102 4103 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, ""); 4104 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_PCB + SCTP_LOC_7; 4105 sctp_send_abort_tcb(stcb, op_err, SCTP_SO_LOCKED); 4106 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 4107 } else if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 4108 cnt++; 4109 SCTP_TCB_UNLOCK(stcb); 4110 continue; 4111 } 4112 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 4113 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4114 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4115 } 4116 if (sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE, 4117 SCTP_FROM_SCTP_PCB + SCTP_LOC_8) == 0) { 4118 cnt++; 4119 } 4120 } 4121 if (cnt) { 4122 /* Ok we have someone out there that will kill us */ 4123 #ifdef SCTP_LOG_CLOSING 4124 sctp_log_closing(inp, NULL, 3); 4125 #endif 4126 SCTP_INP_WUNLOCK(inp); 4127 SCTP_ASOC_CREATE_UNLOCK(inp); 4128 SCTP_INP_INFO_WUNLOCK(); 4129 return; 4130 } 4131 if (SCTP_INP_LOCK_CONTENDED(inp)) 4132 being_refed++; 4133 if (SCTP_INP_READ_CONTENDED(inp)) 4134 being_refed++; 4135 if (SCTP_ASOC_CREATE_LOCK_CONTENDED(inp)) 4136 being_refed++; 4137 /* NOTE: 0 refcount also means no timers are referencing us. */ 4138 if ((inp->refcount) || 4139 (being_refed) || 4140 (inp->sctp_flags & SCTP_PCB_FLAGS_CLOSE_IP)) { 4141 #ifdef SCTP_LOG_CLOSING 4142 sctp_log_closing(inp, NULL, 4); 4143 #endif 4144 sctp_timer_start(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL); 4145 SCTP_INP_WUNLOCK(inp); 4146 SCTP_ASOC_CREATE_UNLOCK(inp); 4147 SCTP_INP_INFO_WUNLOCK(); 4148 return; 4149 } 4150 inp->sctp_ep.signature_change.type = 0; 4151 inp->sctp_flags |= SCTP_PCB_FLAGS_SOCKET_ALLGONE; 4152 /* Remove it from the list .. last thing we need a 4153 * lock for. 4154 */ 4155 LIST_REMOVE(inp, sctp_list); 4156 SCTP_INP_WUNLOCK(inp); 4157 SCTP_ASOC_CREATE_UNLOCK(inp); 4158 SCTP_INP_INFO_WUNLOCK(); 4159 4160 #ifdef SCTP_LOG_CLOSING 4161 sctp_log_closing(inp, NULL, 5); 4162 #endif 4163 #if !(defined(_WIN32) || defined(__Userspace__)) 4164 #if !(defined(__FreeBSD__) && !defined(__Userspace__)) 4165 rt = ip_pcb->inp_route.ro_rt; 4166 #endif 4167 #endif 4168 if ((inp->sctp_asocidhash) != NULL) { 4169 SCTP_HASH_FREE(inp->sctp_asocidhash, inp->hashasocidmark); 4170 inp->sctp_asocidhash = NULL; 4171 } 4172 /*sa_ignore FREED_MEMORY*/ 4173 TAILQ_FOREACH_SAFE(sq, &inp->read_queue, next, nsq) { 4174 /* Its only abandoned if it had data left */ 4175 if (sq->length) 4176 SCTP_STAT_INCR(sctps_left_abandon); 4177 4178 TAILQ_REMOVE(&inp->read_queue, sq, next); 4179 sctp_free_remote_addr(sq->whoFrom); 4180 if (so) 4181 SCTP_SB_DECR(&so->so_rcv, sq->length); 4182 if (sq->data) { 4183 sctp_m_freem(sq->data); 4184 sq->data = NULL; 4185 } 4186 /* 4187 * no need to free the net count, since at this point all 4188 * assoc's are gone. 4189 */ 4190 sctp_free_a_readq(NULL, sq); 4191 } 4192 /* Now the sctp_pcb things */ 4193 /* 4194 * free each asoc if it is not already closed/free. we can't use the 4195 * macro here since le_next will get freed as part of the 4196 * sctp_free_assoc() call. 4197 */ 4198 if (ip_pcb->inp_options) { 4199 (void)sctp_m_free(ip_pcb->inp_options); 4200 ip_pcb->inp_options = 0; 4201 } 4202 #if !(defined(_WIN32) || defined(__Userspace__)) 4203 #if !defined(__FreeBSD__) 4204 if (rt) { 4205 RTFREE(rt); 4206 ip_pcb->inp_route.ro_rt = 0; 4207 } 4208 #endif 4209 #endif 4210 #ifdef INET6 4211 #if !(defined(_WIN32) || defined(__Userspace__)) 4212 #if (defined(__FreeBSD__) || defined(__APPLE__) && !defined(__Userspace__)) 4213 if (ip_pcb->inp_vflag & INP_IPV6) { 4214 #else 4215 if (inp->inp_vflag & INP_IPV6) { 4216 #endif 4217 ip6_freepcbopts(ip_pcb->in6p_outputopts); 4218 } 4219 #endif 4220 #endif /* INET6 */ 4221 ip_pcb->inp_vflag = 0; 4222 /* free up authentication fields */ 4223 if (inp->sctp_ep.local_auth_chunks != NULL) 4224 sctp_free_chunklist(inp->sctp_ep.local_auth_chunks); 4225 if (inp->sctp_ep.local_hmacs != NULL) 4226 sctp_free_hmaclist(inp->sctp_ep.local_hmacs); 4227 4228 LIST_FOREACH_SAFE(shared_key, &inp->sctp_ep.shared_keys, next, nshared_key) { 4229 LIST_REMOVE(shared_key, next); 4230 sctp_free_sharedkey(shared_key); 4231 /*sa_ignore FREED_MEMORY*/ 4232 } 4233 4234 #if defined(__APPLE__) && !defined(__Userspace__) 4235 inp->ip_inp.inp.inp_state = INPCB_STATE_DEAD; 4236 if (in_pcb_checkstate(&inp->ip_inp.inp, WNT_STOPUSING, 1) != WNT_STOPUSING) { 4237 #ifdef INVARIANTS 4238 panic("sctp_inpcb_free inp = %p couldn't set to STOPUSING", (void *)inp); 4239 #else 4240 SCTP_PRINTF("sctp_inpcb_free inp = %p couldn't set to STOPUSING\n", (void *)inp); 4241 #endif 4242 } 4243 inp->ip_inp.inp.inp_socket->so_flags |= SOF_PCBCLEARING; 4244 #endif 4245 /* 4246 * if we have an address list the following will free the list of 4247 * ifaddr's that are set into this ep. Again macro limitations here, 4248 * since the LIST_FOREACH could be a bad idea. 4249 */ 4250 LIST_FOREACH_SAFE(laddr, &inp->sctp_addr_list, sctp_nxt_addr, nladdr) { 4251 sctp_remove_laddr(laddr); 4252 } 4253 4254 #ifdef SCTP_TRACK_FREED_ASOCS 4255 /* TEMP CODE */ 4256 LIST_FOREACH_SAFE(stcb, &inp->sctp_asoc_free_list, sctp_tcblist, nstcb) { 4257 LIST_REMOVE(stcb, sctp_tcblist); 4258 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), stcb); 4259 SCTP_DECR_ASOC_COUNT(); 4260 } 4261 /* *** END TEMP CODE ****/ 4262 #endif 4263 #ifdef SCTP_MVRF 4264 SCTP_FREE(inp->m_vrf_ids, SCTP_M_MVRF); 4265 #endif 4266 /* Now lets see about freeing the EP hash table. */ 4267 if (inp->sctp_tcbhash != NULL) { 4268 SCTP_HASH_FREE(inp->sctp_tcbhash, inp->sctp_hashmark); 4269 inp->sctp_tcbhash = NULL; 4270 } 4271 /* Now we must put the ep memory back into the zone pool */ 4272 #if defined(__FreeBSD__) && !defined(__Userspace__) 4273 crfree(inp->ip_inp.inp.inp_cred); 4274 INP_LOCK_DESTROY(&inp->ip_inp.inp); 4275 #endif 4276 SCTP_INP_LOCK_DESTROY(inp); 4277 SCTP_INP_READ_LOCK_DESTROY(inp); 4278 SCTP_ASOC_CREATE_LOCK_DESTROY(inp); 4279 #if !(defined(__APPLE__) && !defined(__Userspace__)) 4280 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp); 4281 SCTP_DECR_EP_COUNT(); 4282 #else 4283 /* For Tiger, we will do this later... */ 4284 #endif 4285 } 4286 4287 struct sctp_nets * 4288 sctp_findnet(struct sctp_tcb *stcb, struct sockaddr *addr) 4289 { 4290 struct sctp_nets *net; 4291 /* locate the address */ 4292 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 4293 if (sctp_cmpaddr(addr, (struct sockaddr *)&net->ro._l_addr)) 4294 return (net); 4295 } 4296 return (NULL); 4297 } 4298 4299 int 4300 sctp_is_address_on_local_host(struct sockaddr *addr, uint32_t vrf_id) 4301 { 4302 struct sctp_ifa *sctp_ifa; 4303 sctp_ifa = sctp_find_ifa_by_addr(addr, vrf_id, SCTP_ADDR_NOT_LOCKED); 4304 if (sctp_ifa) { 4305 return (1); 4306 } else { 4307 return (0); 4308 } 4309 } 4310 4311 /* 4312 * add's a remote endpoint address, done with the INIT/INIT-ACK as well as 4313 * when a ASCONF arrives that adds it. It will also initialize all the cwnd 4314 * stats of stuff. 4315 */ 4316 int 4317 sctp_add_remote_addr(struct sctp_tcb *stcb, struct sockaddr *newaddr, 4318 struct sctp_nets **netp, uint16_t port, int set_scope, int from) 4319 { 4320 /* 4321 * The following is redundant to the same lines in the 4322 * sctp_aloc_assoc() but is needed since others call the add 4323 * address function 4324 */ 4325 struct sctp_nets *net, *netfirst; 4326 int addr_inscope; 4327 4328 SCTPDBG(SCTP_DEBUG_PCB1, "Adding an address (from:%d) to the peer: ", 4329 from); 4330 SCTPDBG_ADDR(SCTP_DEBUG_PCB1, newaddr); 4331 4332 netfirst = sctp_findnet(stcb, newaddr); 4333 if (netfirst) { 4334 /* 4335 * Lie and return ok, we don't want to make the association 4336 * go away for this behavior. It will happen in the TCP 4337 * model in a connected socket. It does not reach the hash 4338 * table until after the association is built so it can't be 4339 * found. Mark as reachable, since the initial creation will 4340 * have been cleared and the NOT_IN_ASSOC flag will have 4341 * been added... and we don't want to end up removing it 4342 * back out. 4343 */ 4344 if (netfirst->dest_state & SCTP_ADDR_UNCONFIRMED) { 4345 netfirst->dest_state = (SCTP_ADDR_REACHABLE | 4346 SCTP_ADDR_UNCONFIRMED); 4347 } else { 4348 netfirst->dest_state = SCTP_ADDR_REACHABLE; 4349 } 4350 4351 return (0); 4352 } 4353 addr_inscope = 1; 4354 switch (newaddr->sa_family) { 4355 #ifdef INET 4356 case AF_INET: 4357 { 4358 struct sockaddr_in *sin; 4359 4360 sin = (struct sockaddr_in *)newaddr; 4361 if (sin->sin_addr.s_addr == 0) { 4362 /* Invalid address */ 4363 return (-1); 4364 } 4365 /* zero out the zero area */ 4366 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); 4367 4368 /* assure len is set */ 4369 #ifdef HAVE_SIN_LEN 4370 sin->sin_len = sizeof(struct sockaddr_in); 4371 #endif 4372 if (set_scope) { 4373 if (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) { 4374 stcb->asoc.scope.ipv4_local_scope = 1; 4375 } 4376 } else { 4377 /* Validate the address is in scope */ 4378 if ((IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) && 4379 (stcb->asoc.scope.ipv4_local_scope == 0)) { 4380 addr_inscope = 0; 4381 } 4382 } 4383 break; 4384 } 4385 #endif 4386 #ifdef INET6 4387 case AF_INET6: 4388 { 4389 struct sockaddr_in6 *sin6; 4390 4391 sin6 = (struct sockaddr_in6 *)newaddr; 4392 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 4393 /* Invalid address */ 4394 return (-1); 4395 } 4396 /* assure len is set */ 4397 #ifdef HAVE_SIN6_LEN 4398 sin6->sin6_len = sizeof(struct sockaddr_in6); 4399 #endif 4400 if (set_scope) { 4401 if (sctp_is_address_on_local_host(newaddr, stcb->asoc.vrf_id)) { 4402 stcb->asoc.scope.loopback_scope = 1; 4403 stcb->asoc.scope.local_scope = 0; 4404 stcb->asoc.scope.ipv4_local_scope = 1; 4405 stcb->asoc.scope.site_scope = 1; 4406 } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 4407 /* 4408 * If the new destination is a LINK_LOCAL we 4409 * must have common site scope. Don't set 4410 * the local scope since we may not share 4411 * all links, only loopback can do this. 4412 * Links on the local network would also be 4413 * on our private network for v4 too. 4414 */ 4415 stcb->asoc.scope.ipv4_local_scope = 1; 4416 stcb->asoc.scope.site_scope = 1; 4417 } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)) { 4418 /* 4419 * If the new destination is SITE_LOCAL then 4420 * we must have site scope in common. 4421 */ 4422 stcb->asoc.scope.site_scope = 1; 4423 } 4424 } else { 4425 /* Validate the address is in scope */ 4426 if (IN6_IS_ADDR_LOOPBACK(&sin6->sin6_addr) && 4427 (stcb->asoc.scope.loopback_scope == 0)) { 4428 addr_inscope = 0; 4429 } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr) && 4430 (stcb->asoc.scope.local_scope == 0)) { 4431 addr_inscope = 0; 4432 } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr) && 4433 (stcb->asoc.scope.site_scope == 0)) { 4434 addr_inscope = 0; 4435 } 4436 } 4437 break; 4438 } 4439 #endif 4440 #if defined(__Userspace__) 4441 case AF_CONN: 4442 { 4443 struct sockaddr_conn *sconn; 4444 4445 sconn = (struct sockaddr_conn *)newaddr; 4446 if (sconn->sconn_addr == NULL) { 4447 /* Invalid address */ 4448 return (-1); 4449 } 4450 #ifdef HAVE_SCONN_LEN 4451 sconn->sconn_len = sizeof(struct sockaddr_conn); 4452 #endif 4453 break; 4454 } 4455 #endif 4456 default: 4457 /* not supported family type */ 4458 return (-1); 4459 } 4460 net = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_net), struct sctp_nets); 4461 if (net == NULL) { 4462 return (-1); 4463 } 4464 SCTP_INCR_RADDR_COUNT(); 4465 memset(net, 0, sizeof(struct sctp_nets)); 4466 (void)SCTP_GETTIME_TIMEVAL(&net->start_time); 4467 #ifdef HAVE_SA_LEN 4468 memcpy(&net->ro._l_addr, newaddr, newaddr->sa_len); 4469 #endif 4470 switch (newaddr->sa_family) { 4471 #ifdef INET 4472 case AF_INET: 4473 #ifndef HAVE_SA_LEN 4474 memcpy(&net->ro._l_addr, newaddr, sizeof(struct sockaddr_in)); 4475 #endif 4476 ((struct sockaddr_in *)&net->ro._l_addr)->sin_port = stcb->rport; 4477 break; 4478 #endif 4479 #ifdef INET6 4480 case AF_INET6: 4481 #ifndef HAVE_SA_LEN 4482 memcpy(&net->ro._l_addr, newaddr, sizeof(struct sockaddr_in6)); 4483 #endif 4484 ((struct sockaddr_in6 *)&net->ro._l_addr)->sin6_port = stcb->rport; 4485 break; 4486 #endif 4487 #if defined(__Userspace__) 4488 case AF_CONN: 4489 #ifndef HAVE_SA_LEN 4490 memcpy(&net->ro._l_addr, newaddr, sizeof(struct sockaddr_conn)); 4491 #endif 4492 ((struct sockaddr_conn *)&net->ro._l_addr)->sconn_port = stcb->rport; 4493 break; 4494 #endif 4495 default: 4496 break; 4497 } 4498 net->addr_is_local = sctp_is_address_on_local_host(newaddr, stcb->asoc.vrf_id); 4499 if (net->addr_is_local && ((set_scope || (from == SCTP_ADDR_IS_CONFIRMED)))) { 4500 stcb->asoc.scope.loopback_scope = 1; 4501 stcb->asoc.scope.ipv4_local_scope = 1; 4502 stcb->asoc.scope.local_scope = 0; 4503 stcb->asoc.scope.site_scope = 1; 4504 addr_inscope = 1; 4505 } 4506 net->failure_threshold = stcb->asoc.def_net_failure; 4507 net->pf_threshold = stcb->asoc.def_net_pf_threshold; 4508 if (addr_inscope == 0) { 4509 net->dest_state = (SCTP_ADDR_REACHABLE | 4510 SCTP_ADDR_OUT_OF_SCOPE); 4511 } else { 4512 if (from == SCTP_ADDR_IS_CONFIRMED) 4513 /* SCTP_ADDR_IS_CONFIRMED is passed by connect_x */ 4514 net->dest_state = SCTP_ADDR_REACHABLE; 4515 else 4516 net->dest_state = SCTP_ADDR_REACHABLE | 4517 SCTP_ADDR_UNCONFIRMED; 4518 } 4519 /* We set this to 0, the timer code knows that 4520 * this means its an initial value 4521 */ 4522 net->rto_needed = 1; 4523 net->RTO = 0; 4524 net->RTO_measured = 0; 4525 stcb->asoc.numnets++; 4526 net->ref_count = 1; 4527 net->cwr_window_tsn = net->last_cwr_tsn = stcb->asoc.sending_seq - 1; 4528 net->port = port; 4529 net->dscp = stcb->asoc.default_dscp; 4530 #ifdef INET6 4531 net->flowlabel = stcb->asoc.default_flowlabel; 4532 #endif 4533 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DONOT_HEARTBEAT)) { 4534 net->dest_state |= SCTP_ADDR_NOHB; 4535 } else { 4536 net->dest_state &= ~SCTP_ADDR_NOHB; 4537 } 4538 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DO_NOT_PMTUD)) { 4539 net->dest_state |= SCTP_ADDR_NO_PMTUD; 4540 } else { 4541 net->dest_state &= ~SCTP_ADDR_NO_PMTUD; 4542 } 4543 net->heart_beat_delay = stcb->asoc.heart_beat_delay; 4544 /* Init the timer structure */ 4545 SCTP_OS_TIMER_INIT(&net->rxt_timer.timer); 4546 SCTP_OS_TIMER_INIT(&net->pmtu_timer.timer); 4547 SCTP_OS_TIMER_INIT(&net->hb_timer.timer); 4548 4549 /* Now generate a route for this guy */ 4550 #ifdef INET6 4551 #ifdef SCTP_EMBEDDED_V6_SCOPE 4552 /* KAME hack: embed scopeid */ 4553 if (newaddr->sa_family == AF_INET6) { 4554 struct sockaddr_in6 *sin6; 4555 4556 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 4557 #if defined(__APPLE__) && !defined(__Userspace__) 4558 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) 4559 (void)in6_embedscope(&sin6->sin6_addr, sin6, &stcb->sctp_ep->ip_inp.inp, NULL); 4560 #else 4561 (void)in6_embedscope(&sin6->sin6_addr, sin6, &stcb->sctp_ep->ip_inp.inp, NULL, NULL); 4562 #endif 4563 #elif defined(SCTP_KAME) 4564 (void)sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)); 4565 #else 4566 (void)in6_embedscope(&sin6->sin6_addr, sin6); 4567 #endif 4568 #ifndef SCOPEDROUTING 4569 sin6->sin6_scope_id = 0; 4570 #endif 4571 } 4572 #endif /* SCTP_EMBEDDED_V6_SCOPE */ 4573 #endif 4574 SCTP_RTALLOC((sctp_route_t *)&net->ro, 4575 stcb->asoc.vrf_id, 4576 stcb->sctp_ep->fibnum); 4577 4578 net->src_addr_selected = 0; 4579 #if !defined(__Userspace__) 4580 if (SCTP_ROUTE_HAS_VALID_IFN(&net->ro)) { 4581 /* Get source address */ 4582 net->ro._s_addr = sctp_source_address_selection(stcb->sctp_ep, 4583 stcb, 4584 (sctp_route_t *)&net->ro, 4585 net, 4586 0, 4587 stcb->asoc.vrf_id); 4588 if (stcb->asoc.default_mtu > 0) { 4589 net->mtu = stcb->asoc.default_mtu; 4590 switch (net->ro._l_addr.sa.sa_family) { 4591 #ifdef INET 4592 case AF_INET: 4593 net->mtu += SCTP_MIN_V4_OVERHEAD; 4594 break; 4595 #endif 4596 #ifdef INET6 4597 case AF_INET6: 4598 net->mtu += SCTP_MIN_OVERHEAD; 4599 break; 4600 #endif 4601 #if defined(__Userspace__) 4602 case AF_CONN: 4603 net->mtu += sizeof(struct sctphdr); 4604 break; 4605 #endif 4606 default: 4607 break; 4608 } 4609 #if defined(INET) || defined(INET6) 4610 if (net->port) { 4611 net->mtu += (uint32_t)sizeof(struct udphdr); 4612 } 4613 #endif 4614 } else if (net->ro._s_addr != NULL) { 4615 uint32_t imtu, rmtu, hcmtu; 4616 4617 net->src_addr_selected = 1; 4618 /* Now get the interface MTU */ 4619 if (net->ro._s_addr->ifn_p != NULL) { 4620 /* 4621 * XXX: Should we here just use 4622 * net->ro._s_addr->ifn_p->ifn_mtu 4623 */ 4624 imtu = SCTP_GATHER_MTU_FROM_IFN_INFO(net->ro._s_addr->ifn_p->ifn_p, 4625 net->ro._s_addr->ifn_p->ifn_index); 4626 } else { 4627 imtu = 0; 4628 } 4629 #if defined(__FreeBSD__) && !defined(__Userspace__) 4630 rmtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, net->ro.ro_nh); 4631 hcmtu = sctp_hc_get_mtu(&net->ro._l_addr, stcb->sctp_ep->fibnum); 4632 #else 4633 rmtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, net->ro.ro_rt); 4634 hcmtu = 0; 4635 #endif 4636 net->mtu = sctp_min_mtu(hcmtu, rmtu, imtu); 4637 #if defined(__FreeBSD__) && !defined(__Userspace__) 4638 #else 4639 if (rmtu == 0) { 4640 /* Start things off to match mtu of interface please. */ 4641 SCTP_SET_MTU_OF_ROUTE(&net->ro._l_addr.sa, 4642 net->ro.ro_rt, net->mtu); 4643 } 4644 #endif 4645 } 4646 } 4647 #endif 4648 if (net->mtu == 0) { 4649 if (stcb->asoc.default_mtu > 0) { 4650 net->mtu = stcb->asoc.default_mtu; 4651 switch (net->ro._l_addr.sa.sa_family) { 4652 #ifdef INET 4653 case AF_INET: 4654 net->mtu += SCTP_MIN_V4_OVERHEAD; 4655 break; 4656 #endif 4657 #ifdef INET6 4658 case AF_INET6: 4659 net->mtu += SCTP_MIN_OVERHEAD; 4660 break; 4661 #endif 4662 #if defined(__Userspace__) 4663 case AF_CONN: 4664 net->mtu += sizeof(struct sctphdr); 4665 break; 4666 #endif 4667 default: 4668 break; 4669 } 4670 #if defined(INET) || defined(INET6) 4671 if (net->port) { 4672 net->mtu += (uint32_t)sizeof(struct udphdr); 4673 } 4674 #endif 4675 } else { 4676 switch (newaddr->sa_family) { 4677 #ifdef INET 4678 case AF_INET: 4679 net->mtu = SCTP_DEFAULT_MTU; 4680 break; 4681 #endif 4682 #ifdef INET6 4683 case AF_INET6: 4684 net->mtu = 1280; 4685 break; 4686 #endif 4687 #if defined(__Userspace__) 4688 case AF_CONN: 4689 net->mtu = 1280; 4690 break; 4691 #endif 4692 default: 4693 break; 4694 } 4695 } 4696 } 4697 #if defined(INET) || defined(INET6) 4698 if (net->port) { 4699 net->mtu -= (uint32_t)sizeof(struct udphdr); 4700 } 4701 #endif 4702 if (from == SCTP_ALLOC_ASOC) { 4703 stcb->asoc.smallest_mtu = net->mtu; 4704 } 4705 if (stcb->asoc.smallest_mtu > net->mtu) { 4706 sctp_pathmtu_adjustment(stcb, net->mtu, true); 4707 } 4708 #ifdef INET6 4709 #ifdef SCTP_EMBEDDED_V6_SCOPE 4710 if (newaddr->sa_family == AF_INET6) { 4711 struct sockaddr_in6 *sin6; 4712 4713 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 4714 #ifdef SCTP_KAME 4715 (void)sa6_recoverscope(sin6); 4716 #else 4717 (void)in6_recoverscope(sin6, &sin6->sin6_addr, NULL); 4718 #endif /* SCTP_KAME */ 4719 } 4720 #endif /* SCTP_EMBEDDED_V6_SCOPE */ 4721 #endif 4722 4723 /* JRS - Use the congestion control given in the CC module */ 4724 if (stcb->asoc.cc_functions.sctp_set_initial_cc_param != NULL) 4725 (*stcb->asoc.cc_functions.sctp_set_initial_cc_param)(stcb, net); 4726 4727 /* 4728 * CMT: CUC algo - set find_pseudo_cumack to TRUE (1) at beginning 4729 * of assoc (2005/06/27, iyengar@cis.udel.edu) 4730 */ 4731 net->find_pseudo_cumack = 1; 4732 net->find_rtx_pseudo_cumack = 1; 4733 #if defined(__FreeBSD__) && !defined(__Userspace__) 4734 /* Choose an initial flowid. */ 4735 net->flowid = stcb->asoc.my_vtag ^ 4736 ntohs(stcb->rport) ^ 4737 ntohs(stcb->sctp_ep->sctp_lport); 4738 net->flowtype = M_HASHTYPE_OPAQUE_HASH; 4739 #endif 4740 if (netp) { 4741 *netp = net; 4742 } 4743 netfirst = TAILQ_FIRST(&stcb->asoc.nets); 4744 #if defined(__FreeBSD__) && !defined(__Userspace__) 4745 if (net->ro.ro_nh == NULL) { 4746 #else 4747 if (net->ro.ro_rt == NULL) { 4748 #endif 4749 /* Since we have no route put it at the back */ 4750 TAILQ_INSERT_TAIL(&stcb->asoc.nets, net, sctp_next); 4751 } else if (netfirst == NULL) { 4752 /* We are the first one in the pool. */ 4753 TAILQ_INSERT_HEAD(&stcb->asoc.nets, net, sctp_next); 4754 #if defined(__FreeBSD__) && !defined(__Userspace__) 4755 } else if (netfirst->ro.ro_nh == NULL) { 4756 #else 4757 } else if (netfirst->ro.ro_rt == NULL) { 4758 #endif 4759 /* 4760 * First one has NO route. Place this one ahead of the first 4761 * one. 4762 */ 4763 TAILQ_INSERT_HEAD(&stcb->asoc.nets, net, sctp_next); 4764 #if defined(__FreeBSD__) && !defined(__Userspace__) 4765 } else if (net->ro.ro_nh->nh_ifp != netfirst->ro.ro_nh->nh_ifp) { 4766 #else 4767 } else if (net->ro.ro_rt->rt_ifp != netfirst->ro.ro_rt->rt_ifp) { 4768 #endif 4769 /* 4770 * This one has a different interface than the one at the 4771 * top of the list. Place it ahead. 4772 */ 4773 TAILQ_INSERT_HEAD(&stcb->asoc.nets, net, sctp_next); 4774 } else { 4775 /* 4776 * Ok we have the same interface as the first one. Move 4777 * forward until we find either a) one with a NULL route... 4778 * insert ahead of that b) one with a different ifp.. insert 4779 * after that. c) end of the list.. insert at the tail. 4780 */ 4781 struct sctp_nets *netlook; 4782 4783 do { 4784 netlook = TAILQ_NEXT(netfirst, sctp_next); 4785 if (netlook == NULL) { 4786 /* End of the list */ 4787 TAILQ_INSERT_TAIL(&stcb->asoc.nets, net, sctp_next); 4788 break; 4789 #if defined(__FreeBSD__) && !defined(__Userspace__) 4790 } else if (netlook->ro.ro_nh == NULL) { 4791 #else 4792 } else if (netlook->ro.ro_rt == NULL) { 4793 #endif 4794 /* next one has NO route */ 4795 TAILQ_INSERT_BEFORE(netfirst, net, sctp_next); 4796 break; 4797 #if defined(__FreeBSD__) && !defined(__Userspace__) 4798 } else if (netlook->ro.ro_nh->nh_ifp != net->ro.ro_nh->nh_ifp) { 4799 #else 4800 } else if (netlook->ro.ro_rt->rt_ifp != net->ro.ro_rt->rt_ifp) { 4801 #endif 4802 TAILQ_INSERT_AFTER(&stcb->asoc.nets, netlook, 4803 net, sctp_next); 4804 break; 4805 } 4806 /* Shift forward */ 4807 netfirst = netlook; 4808 } while (netlook != NULL); 4809 } 4810 4811 /* got to have a primary set */ 4812 if (stcb->asoc.primary_destination == 0) { 4813 stcb->asoc.primary_destination = net; 4814 #if defined(__FreeBSD__) && !defined(__Userspace__) 4815 } else if ((stcb->asoc.primary_destination->ro.ro_nh == NULL) && 4816 (net->ro.ro_nh) && 4817 #else 4818 } else if ((stcb->asoc.primary_destination->ro.ro_rt == NULL) && 4819 (net->ro.ro_rt) && 4820 #endif 4821 ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0)) { 4822 /* No route to current primary adopt new primary */ 4823 stcb->asoc.primary_destination = net; 4824 } 4825 /* Validate primary is first */ 4826 net = TAILQ_FIRST(&stcb->asoc.nets); 4827 if ((net != stcb->asoc.primary_destination) && 4828 (stcb->asoc.primary_destination)) { 4829 /* first one on the list is NOT the primary 4830 * sctp_cmpaddr() is much more efficient if 4831 * the primary is the first on the list, make it 4832 * so. 4833 */ 4834 TAILQ_REMOVE(&stcb->asoc.nets, 4835 stcb->asoc.primary_destination, sctp_next); 4836 TAILQ_INSERT_HEAD(&stcb->asoc.nets, 4837 stcb->asoc.primary_destination, sctp_next); 4838 } 4839 return (0); 4840 } 4841 4842 static uint32_t 4843 sctp_aloc_a_assoc_id(struct sctp_inpcb *inp, struct sctp_tcb *stcb) 4844 { 4845 uint32_t id; 4846 struct sctpasochead *head; 4847 struct sctp_tcb *lstcb; 4848 4849 try_again: 4850 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { 4851 /* TSNH */ 4852 return (0); 4853 } 4854 /* 4855 * We don't allow assoc id to be one of SCTP_FUTURE_ASSOC, 4856 * SCTP_CURRENT_ASSOC and SCTP_ALL_ASSOC. 4857 */ 4858 if (inp->sctp_associd_counter <= SCTP_ALL_ASSOC) { 4859 inp->sctp_associd_counter = SCTP_ALL_ASSOC + 1; 4860 } 4861 id = inp->sctp_associd_counter; 4862 inp->sctp_associd_counter++; 4863 lstcb = sctp_findasoc_ep_asocid_locked(inp, (sctp_assoc_t)id, 0); 4864 if (lstcb) { 4865 goto try_again; 4866 } 4867 head = &inp->sctp_asocidhash[SCTP_PCBHASH_ASOC(id, inp->hashasocidmark)]; 4868 LIST_INSERT_HEAD(head, stcb, sctp_tcbasocidhash); 4869 stcb->asoc.in_asocid_hash = 1; 4870 return (id); 4871 } 4872 4873 /* 4874 * allocate an association and add it to the endpoint. The caller must be 4875 * careful to add all additional addresses once they are know right away or 4876 * else the assoc will be may experience a blackout scenario. 4877 */ 4878 static struct sctp_tcb * 4879 sctp_aloc_assoc_locked(struct sctp_inpcb *inp, struct sockaddr *firstaddr, 4880 int *error, uint32_t override_tag, uint32_t initial_tsn, 4881 uint32_t vrf_id, uint16_t o_streams, uint16_t port, 4882 #if defined(__FreeBSD__) && !defined(__Userspace__) 4883 struct thread *p, 4884 #elif defined(_WIN32) && !defined(__Userspace__) 4885 PKTHREAD p, 4886 #else 4887 #if defined(__Userspace__) 4888 /* __Userspace__ NULL proc is going to be passed here. See sctp_lower_sosend */ 4889 #endif 4890 struct proc *p, 4891 #endif 4892 int initialize_auth_params) 4893 { 4894 /* note the p argument is only valid in unbound sockets */ 4895 4896 struct sctp_tcb *stcb; 4897 struct sctp_association *asoc; 4898 struct sctpasochead *head; 4899 uint16_t rport; 4900 int err; 4901 4902 SCTP_INP_INFO_WLOCK_ASSERT(); 4903 SCTP_INP_WLOCK_ASSERT(inp); 4904 4905 /* 4906 * Assumption made here: Caller has done a 4907 * sctp_findassociation_ep_addr(ep, addr's); to make sure the 4908 * address does not exist already. 4909 */ 4910 if (SCTP_BASE_INFO(ipi_count_asoc) >= SCTP_MAX_NUM_OF_ASOC) { 4911 /* Hit max assoc, sorry no more */ 4912 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOBUFS); 4913 *error = ENOBUFS; 4914 return (NULL); 4915 } 4916 if (firstaddr == NULL) { 4917 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); 4918 *error = EINVAL; 4919 return (NULL); 4920 } 4921 if (inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 4922 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); 4923 *error = EINVAL; 4924 return (NULL); 4925 } 4926 if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) && 4927 ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE)) || 4928 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) { 4929 /* 4930 * If its in the TCP pool, its NOT allowed to create an 4931 * association. The parent listener needs to call 4932 * sctp_aloc_assoc.. or the one-2-many socket. If a peeled 4933 * off, or connected one does this.. its an error. 4934 */ 4935 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); 4936 *error = EINVAL; 4937 return (NULL); 4938 } 4939 if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || 4940 (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)) { 4941 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) || 4942 (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED)) { 4943 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); 4944 *error = EINVAL; 4945 return (NULL); 4946 } 4947 } 4948 SCTPDBG(SCTP_DEBUG_PCB3, "Allocate an association for peer:"); 4949 #ifdef SCTP_DEBUG 4950 if (firstaddr) { 4951 SCTPDBG_ADDR(SCTP_DEBUG_PCB3, firstaddr); 4952 switch (firstaddr->sa_family) { 4953 #ifdef INET 4954 case AF_INET: 4955 SCTPDBG(SCTP_DEBUG_PCB3, "Port:%d\n", 4956 ntohs(((struct sockaddr_in *)firstaddr)->sin_port)); 4957 break; 4958 #endif 4959 #ifdef INET6 4960 case AF_INET6: 4961 SCTPDBG(SCTP_DEBUG_PCB3, "Port:%d\n", 4962 ntohs(((struct sockaddr_in6 *)firstaddr)->sin6_port)); 4963 break; 4964 #endif 4965 #if defined(__Userspace__) 4966 case AF_CONN: 4967 SCTPDBG(SCTP_DEBUG_PCB3, "Port:%d\n", 4968 ntohs(((struct sockaddr_conn *)firstaddr)->sconn_port)); 4969 break; 4970 #endif 4971 default: 4972 break; 4973 } 4974 } else { 4975 SCTPDBG(SCTP_DEBUG_PCB3,"None\n"); 4976 } 4977 #endif /* SCTP_DEBUG */ 4978 switch (firstaddr->sa_family) { 4979 #ifdef INET 4980 case AF_INET: 4981 { 4982 struct sockaddr_in *sin; 4983 4984 sin = (struct sockaddr_in *)firstaddr; 4985 if ((ntohs(sin->sin_port) == 0) || 4986 (sin->sin_addr.s_addr == INADDR_ANY) || 4987 (sin->sin_addr.s_addr == INADDR_BROADCAST) || 4988 IN_MULTICAST(ntohl(sin->sin_addr.s_addr)) || 4989 #if defined(__Userspace__) 4990 ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) || 4991 ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 4992 (SCTP_IPV6_V6ONLY(inp) != 0)))) { 4993 #else 4994 ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 4995 (SCTP_IPV6_V6ONLY(inp) != 0))) { 4996 #endif 4997 /* Invalid address */ 4998 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); 4999 *error = EINVAL; 5000 return (NULL); 5001 } 5002 rport = sin->sin_port; 5003 break; 5004 } 5005 #endif 5006 #ifdef INET6 5007 case AF_INET6: 5008 { 5009 struct sockaddr_in6 *sin6; 5010 5011 sin6 = (struct sockaddr_in6 *)firstaddr; 5012 if ((ntohs(sin6->sin6_port) == 0) || 5013 IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) || 5014 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr) || 5015 ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0)) { 5016 /* Invalid address */ 5017 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); 5018 *error = EINVAL; 5019 return (NULL); 5020 } 5021 rport = sin6->sin6_port; 5022 break; 5023 } 5024 #endif 5025 #if defined(__Userspace__) 5026 case AF_CONN: 5027 { 5028 struct sockaddr_conn *sconn; 5029 5030 sconn = (struct sockaddr_conn *)firstaddr; 5031 if ((ntohs(sconn->sconn_port) == 0) || 5032 (sconn->sconn_addr == NULL) || 5033 ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) == 0)) { 5034 /* Invalid address */ 5035 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); 5036 *error = EINVAL; 5037 return (NULL); 5038 } 5039 rport = sconn->sconn_port; 5040 break; 5041 } 5042 #endif 5043 default: 5044 /* not supported family type */ 5045 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); 5046 *error = EINVAL; 5047 return (NULL); 5048 } 5049 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 5050 /* 5051 * If you have not performed a bind, then we need to do the 5052 * ephemeral bind for you. 5053 */ 5054 if ((err = sctp_inpcb_bind_locked(inp, NULL, NULL, p))) { 5055 /* bind error, probably perm */ 5056 *error = err; 5057 return (NULL); 5058 } 5059 } 5060 stcb = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_asoc), struct sctp_tcb); 5061 if (stcb == NULL) { 5062 /* out of memory? */ 5063 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOMEM); 5064 *error = ENOMEM; 5065 return (NULL); 5066 } 5067 SCTP_INCR_ASOC_COUNT(); 5068 5069 memset(stcb, 0, sizeof(*stcb)); 5070 asoc = &stcb->asoc; 5071 5072 SCTP_TCB_LOCK_INIT(stcb); 5073 stcb->rport = rport; 5074 /* setup back pointer's */ 5075 stcb->sctp_ep = inp; 5076 stcb->sctp_socket = inp->sctp_socket; 5077 if ((err = sctp_init_asoc(inp, stcb, override_tag, initial_tsn, vrf_id, o_streams))) { 5078 /* failed */ 5079 SCTP_TCB_LOCK_DESTROY(stcb); 5080 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), stcb); 5081 SCTP_DECR_ASOC_COUNT(); 5082 *error = err; 5083 return (NULL); 5084 } 5085 SCTP_TCB_LOCK(stcb); 5086 5087 asoc->assoc_id = sctp_aloc_a_assoc_id(inp, stcb); 5088 /* now that my_vtag is set, add it to the hash */ 5089 head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))]; 5090 /* put it in the bucket in the vtag hash of assoc's for the system */ 5091 LIST_INSERT_HEAD(head, stcb, sctp_asocs); 5092 5093 if (sctp_add_remote_addr(stcb, firstaddr, NULL, port, SCTP_DO_SETSCOPE, SCTP_ALLOC_ASOC)) { 5094 /* failure.. memory error? */ 5095 if (asoc->strmout) { 5096 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 5097 asoc->strmout = NULL; 5098 } 5099 if (asoc->mapping_array) { 5100 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 5101 asoc->mapping_array = NULL; 5102 } 5103 if (asoc->nr_mapping_array) { 5104 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP); 5105 asoc->nr_mapping_array = NULL; 5106 } 5107 SCTP_DECR_ASOC_COUNT(); 5108 SCTP_TCB_UNLOCK(stcb); 5109 SCTP_TCB_LOCK_DESTROY(stcb); 5110 LIST_REMOVE(stcb, sctp_asocs); 5111 LIST_REMOVE(stcb, sctp_tcbasocidhash); 5112 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), stcb); 5113 SCTP_INP_WUNLOCK(inp); 5114 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOBUFS); 5115 *error = ENOBUFS; 5116 return (NULL); 5117 } 5118 /* Init all the timers */ 5119 SCTP_OS_TIMER_INIT(&asoc->dack_timer.timer); 5120 SCTP_OS_TIMER_INIT(&asoc->strreset_timer.timer); 5121 SCTP_OS_TIMER_INIT(&asoc->asconf_timer.timer); 5122 SCTP_OS_TIMER_INIT(&asoc->shut_guard_timer.timer); 5123 SCTP_OS_TIMER_INIT(&asoc->autoclose_timer.timer); 5124 SCTP_OS_TIMER_INIT(&asoc->delete_prim_timer.timer); 5125 5126 LIST_INSERT_HEAD(&inp->sctp_asoc_list, stcb, sctp_tcblist); 5127 /* now file the port under the hash as well */ 5128 if (inp->sctp_tcbhash != NULL) { 5129 head = &inp->sctp_tcbhash[SCTP_PCBHASH_ALLADDR(stcb->rport, 5130 inp->sctp_hashmark)]; 5131 LIST_INSERT_HEAD(head, stcb, sctp_tcbhash); 5132 } 5133 if (initialize_auth_params == SCTP_INITIALIZE_AUTH_PARAMS) { 5134 sctp_initialize_auth_params(inp, stcb); 5135 } 5136 SCTPDBG(SCTP_DEBUG_PCB1, "Association %p now allocated\n", (void *)stcb); 5137 return (stcb); 5138 } 5139 5140 struct sctp_tcb * 5141 sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr, 5142 int *error, uint32_t override_tag, uint32_t initial_tsn, 5143 uint32_t vrf_id, uint16_t o_streams, uint16_t port, 5144 #if defined(__FreeBSD__) && !defined(__Userspace__) 5145 struct thread *p, 5146 #elif defined(_WIN32) && !defined(__Userspace__) 5147 PKTHREAD p, 5148 #else 5149 struct proc *p, 5150 #endif 5151 int initialize_auth_params) 5152 { 5153 struct sctp_tcb *stcb; 5154 5155 SCTP_INP_INFO_WLOCK(); 5156 SCTP_INP_WLOCK(inp); 5157 stcb = sctp_aloc_assoc_locked(inp, firstaddr, error, override_tag, 5158 initial_tsn, vrf_id, o_streams, port, p, initialize_auth_params); 5159 SCTP_INP_INFO_WUNLOCK(); 5160 SCTP_INP_WUNLOCK(inp); 5161 return (stcb); 5162 } 5163 5164 struct sctp_tcb * 5165 sctp_aloc_assoc_connected(struct sctp_inpcb *inp, struct sockaddr *firstaddr, 5166 int *error, uint32_t override_tag, uint32_t initial_tsn, 5167 uint32_t vrf_id, uint16_t o_streams, uint16_t port, 5168 #if defined(__FreeBSD__) && !defined(__Userspace__) 5169 struct thread *p, 5170 #elif defined(_WIN32) && !defined(__Userspace__) 5171 PKTHREAD p, 5172 #else 5173 struct proc *p, 5174 #endif 5175 int initialize_auth_params) 5176 { 5177 struct sctp_tcb *stcb; 5178 5179 SCTP_INP_INFO_WLOCK(); 5180 SCTP_INP_WLOCK(inp); 5181 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 5182 SCTP_IS_LISTENING(inp)) { 5183 SCTP_INP_INFO_WUNLOCK(); 5184 SCTP_INP_WUNLOCK(inp); 5185 *error = EINVAL; 5186 return (NULL); 5187 } 5188 stcb = sctp_aloc_assoc_locked(inp, firstaddr, error, override_tag, 5189 initial_tsn, vrf_id, o_streams, port, p, initialize_auth_params); 5190 SCTP_INP_INFO_WUNLOCK(); 5191 if (stcb != NULL && (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)) { 5192 inp->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 5193 soisconnecting(inp->sctp_socket); 5194 } 5195 SCTP_INP_WUNLOCK(inp); 5196 return (stcb); 5197 } 5198 5199 void 5200 sctp_remove_net(struct sctp_tcb *stcb, struct sctp_nets *net) 5201 { 5202 struct sctp_inpcb *inp; 5203 struct sctp_association *asoc; 5204 5205 inp = stcb->sctp_ep; 5206 asoc = &stcb->asoc; 5207 asoc->numnets--; 5208 TAILQ_REMOVE(&asoc->nets, net, sctp_next); 5209 if (net == asoc->primary_destination) { 5210 /* Reset primary */ 5211 struct sctp_nets *lnet; 5212 5213 lnet = TAILQ_FIRST(&asoc->nets); 5214 /* Mobility adaptation 5215 Ideally, if deleted destination is the primary, it becomes 5216 a fast retransmission trigger by the subsequent SET PRIMARY. 5217 (by micchie) 5218 */ 5219 if (sctp_is_mobility_feature_on(stcb->sctp_ep, 5220 SCTP_MOBILITY_BASE) || 5221 sctp_is_mobility_feature_on(stcb->sctp_ep, 5222 SCTP_MOBILITY_FASTHANDOFF)) { 5223 SCTPDBG(SCTP_DEBUG_ASCONF1, "remove_net: primary dst is deleting\n"); 5224 if (asoc->deleted_primary != NULL) { 5225 SCTPDBG(SCTP_DEBUG_ASCONF1, "remove_net: deleted primary may be already stored\n"); 5226 goto out; 5227 } 5228 asoc->deleted_primary = net; 5229 atomic_add_int(&net->ref_count, 1); 5230 memset(&net->lastsa, 0, sizeof(net->lastsa)); 5231 memset(&net->lastsv, 0, sizeof(net->lastsv)); 5232 sctp_mobility_feature_on(stcb->sctp_ep, 5233 SCTP_MOBILITY_PRIM_DELETED); 5234 sctp_timer_start(SCTP_TIMER_TYPE_PRIM_DELETED, 5235 stcb->sctp_ep, stcb, NULL); 5236 } 5237 out: 5238 /* Try to find a confirmed primary */ 5239 asoc->primary_destination = sctp_find_alternate_net(stcb, lnet, 0); 5240 } 5241 if (net == asoc->last_data_chunk_from) { 5242 /* Reset primary */ 5243 asoc->last_data_chunk_from = TAILQ_FIRST(&asoc->nets); 5244 } 5245 if (net == asoc->last_control_chunk_from) { 5246 /* Clear net */ 5247 asoc->last_control_chunk_from = NULL; 5248 } 5249 if (net == asoc->last_net_cmt_send_started) { 5250 /* Clear net */ 5251 asoc->last_net_cmt_send_started = NULL; 5252 } 5253 if (net == stcb->asoc.alternate) { 5254 sctp_free_remote_addr(stcb->asoc.alternate); 5255 stcb->asoc.alternate = NULL; 5256 } 5257 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net, 5258 SCTP_FROM_SCTP_PCB + SCTP_LOC_9); 5259 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, 5260 SCTP_FROM_SCTP_PCB + SCTP_LOC_10); 5261 net->dest_state |= SCTP_ADDR_BEING_DELETED; 5262 sctp_free_remote_addr(net); 5263 } 5264 5265 /* 5266 * remove a remote endpoint address from an association, it will fail if the 5267 * address does not exist. 5268 */ 5269 int 5270 sctp_del_remote_addr(struct sctp_tcb *stcb, struct sockaddr *remaddr) 5271 { 5272 /* 5273 * Here we need to remove a remote address. This is quite simple, we 5274 * first find it in the list of address for the association 5275 * (tasoc->asoc.nets) and then if it is there, we do a LIST_REMOVE 5276 * on that item. Note we do not allow it to be removed if there are 5277 * no other addresses. 5278 */ 5279 struct sctp_association *asoc; 5280 struct sctp_nets *net, *nnet; 5281 5282 asoc = &stcb->asoc; 5283 5284 /* locate the address */ 5285 TAILQ_FOREACH_SAFE(net, &asoc->nets, sctp_next, nnet) { 5286 if (net->ro._l_addr.sa.sa_family != remaddr->sa_family) { 5287 continue; 5288 } 5289 if (sctp_cmpaddr((struct sockaddr *)&net->ro._l_addr, 5290 remaddr)) { 5291 /* we found the guy */ 5292 if (asoc->numnets < 2) { 5293 /* Must have at LEAST two remote addresses */ 5294 return (-1); 5295 } else { 5296 sctp_remove_net(stcb, net); 5297 return (0); 5298 } 5299 } 5300 } 5301 /* not found. */ 5302 return (-2); 5303 } 5304 5305 static bool 5306 sctp_is_in_timewait(uint32_t tag, uint16_t lport, uint16_t rport, time_t now) 5307 { 5308 struct sctpvtaghead *chain; 5309 struct sctp_tagblock *twait_block; 5310 int i; 5311 5312 SCTP_INP_INFO_LOCK_ASSERT(); 5313 chain = &SCTP_BASE_INFO(vtag_timewait)[(tag % SCTP_STACK_VTAG_HASH_SIZE)]; 5314 LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) { 5315 for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) { 5316 if ((twait_block->vtag_block[i].tv_sec_at_expire >= now) && 5317 (twait_block->vtag_block[i].v_tag == tag) && 5318 (twait_block->vtag_block[i].lport == lport) && 5319 (twait_block->vtag_block[i].rport == rport)) { 5320 return (true); 5321 } 5322 } 5323 } 5324 return (false); 5325 } 5326 5327 static void 5328 sctp_set_vtag_block(struct sctp_timewait *vtag_block, time_t time, 5329 uint32_t tag, uint16_t lport, uint16_t rport) 5330 { 5331 vtag_block->tv_sec_at_expire = time; 5332 vtag_block->v_tag = tag; 5333 vtag_block->lport = lport; 5334 vtag_block->rport = rport; 5335 } 5336 5337 static void 5338 sctp_add_vtag_to_timewait(uint32_t tag, uint16_t lport, uint16_t rport) 5339 { 5340 struct sctpvtaghead *chain; 5341 struct sctp_tagblock *twait_block; 5342 struct timeval now; 5343 time_t time; 5344 int i; 5345 bool set; 5346 5347 SCTP_INP_INFO_WLOCK_ASSERT(); 5348 (void)SCTP_GETTIME_TIMEVAL(&now); 5349 time = now.tv_sec + SCTP_BASE_SYSCTL(sctp_vtag_time_wait); 5350 chain = &SCTP_BASE_INFO(vtag_timewait)[(tag % SCTP_STACK_VTAG_HASH_SIZE)]; 5351 set = false; 5352 LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) { 5353 /* Block(s) present, lets find space, and expire on the fly */ 5354 for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) { 5355 if ((twait_block->vtag_block[i].v_tag == 0) && !set) { 5356 sctp_set_vtag_block(twait_block->vtag_block + i, time, tag, lport, rport); 5357 set = true; 5358 continue; 5359 } 5360 if ((twait_block->vtag_block[i].v_tag != 0) && 5361 (twait_block->vtag_block[i].tv_sec_at_expire < now.tv_sec)) { 5362 if (set) { 5363 /* Audit expires this guy */ 5364 sctp_set_vtag_block(twait_block->vtag_block + i, 0, 0, 0, 0); 5365 } else { 5366 /* Reuse it for the new tag */ 5367 sctp_set_vtag_block(twait_block->vtag_block + i, time, tag, lport, rport); 5368 set = true; 5369 } 5370 } 5371 } 5372 if (set) { 5373 /* 5374 * We only do up to the block where we can 5375 * place our tag for audits 5376 */ 5377 break; 5378 } 5379 } 5380 /* Need to add a new block to chain */ 5381 if (!set) { 5382 SCTP_MALLOC(twait_block, struct sctp_tagblock *, 5383 sizeof(struct sctp_tagblock), SCTP_M_TIMW); 5384 if (twait_block == NULL) { 5385 return; 5386 } 5387 memset(twait_block, 0, sizeof(struct sctp_tagblock)); 5388 LIST_INSERT_HEAD(chain, twait_block, sctp_nxt_tagblock); 5389 sctp_set_vtag_block(twait_block->vtag_block, time, tag, lport, rport); 5390 } 5391 } 5392 5393 void 5394 sctp_clean_up_stream(struct sctp_tcb *stcb, struct sctp_readhead *rh) 5395 { 5396 struct sctp_tmit_chunk *chk, *nchk; 5397 struct sctp_queued_to_read *control, *ncontrol; 5398 5399 TAILQ_FOREACH_SAFE(control, rh, next_instrm, ncontrol) { 5400 TAILQ_REMOVE(rh, control, next_instrm); 5401 control->on_strm_q = 0; 5402 if (control->on_read_q == 0) { 5403 sctp_free_remote_addr(control->whoFrom); 5404 if (control->data) { 5405 sctp_m_freem(control->data); 5406 control->data = NULL; 5407 } 5408 } 5409 /* Reassembly free? */ 5410 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) { 5411 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 5412 if (chk->data) { 5413 sctp_m_freem(chk->data); 5414 chk->data = NULL; 5415 } 5416 if (chk->holds_key_ref) 5417 sctp_auth_key_release(stcb, chk->auth_keyid, SCTP_SO_LOCKED); 5418 sctp_free_remote_addr(chk->whoTo); 5419 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk); 5420 SCTP_DECR_CHK_COUNT(); 5421 /*sa_ignore FREED_MEMORY*/ 5422 } 5423 /* 5424 * We don't free the address here 5425 * since all the net's were freed 5426 * above. 5427 */ 5428 if (control->on_read_q == 0) { 5429 sctp_free_a_readq(stcb, control); 5430 } 5431 } 5432 } 5433 5434 /*- 5435 * Free the association after un-hashing the remote port. This 5436 * function ALWAYS returns holding NO LOCK on the stcb. It DOES 5437 * expect that the input to this function IS a locked TCB. 5438 * It will return 0, if it did NOT destroy the association (instead 5439 * it unlocks it. It will return NON-zero if it either destroyed the 5440 * association OR the association is already destroyed. 5441 */ 5442 int 5443 sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfree, int from_location) 5444 { 5445 int i; 5446 struct sctp_association *asoc; 5447 struct sctp_nets *net, *nnet; 5448 struct sctp_laddr *laddr, *naddr; 5449 struct sctp_tmit_chunk *chk, *nchk; 5450 struct sctp_asconf_addr *aparam, *naparam; 5451 struct sctp_asconf_ack *aack, *naack; 5452 struct sctp_stream_reset_list *strrst, *nstrrst; 5453 struct sctp_queued_to_read *sq, *nsq; 5454 struct sctp_stream_queue_pending *sp, *nsp; 5455 sctp_sharedkey_t *shared_key, *nshared_key; 5456 struct socket *so; 5457 5458 /* first, lets purge the entry from the hash table. */ 5459 #if defined(__APPLE__) && !defined(__Userspace__) 5460 sctp_lock_assert(SCTP_INP_SO(inp)); 5461 #endif 5462 SCTP_TCB_LOCK_ASSERT(stcb); 5463 5464 #ifdef SCTP_LOG_CLOSING 5465 sctp_log_closing(inp, stcb, 6); 5466 #endif 5467 if (stcb->asoc.state == 0) { 5468 #ifdef SCTP_LOG_CLOSING 5469 sctp_log_closing(inp, NULL, 7); 5470 #endif 5471 /* there is no asoc, really TSNH :-0 */ 5472 return (1); 5473 } 5474 if (stcb->asoc.alternate) { 5475 sctp_free_remote_addr(stcb->asoc.alternate); 5476 stcb->asoc.alternate = NULL; 5477 } 5478 #if !(defined(__APPLE__) && !defined(__Userspace__)) 5479 /* TEMP CODE */ 5480 if (stcb->freed_from_where == 0) { 5481 /* Only record the first place free happened from */ 5482 stcb->freed_from_where = from_location; 5483 } 5484 /* TEMP CODE */ 5485 #endif 5486 5487 asoc = &stcb->asoc; 5488 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 5489 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) 5490 /* nothing around */ 5491 so = NULL; 5492 else 5493 so = inp->sctp_socket; 5494 5495 /* 5496 * We used timer based freeing if a reader or writer is in the way. 5497 * So we first check if we are actually being called from a timer, 5498 * if so we abort early if a reader or writer is still in the way. 5499 */ 5500 if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) && 5501 (from_inpcbfree == SCTP_NORMAL_PROC)) { 5502 /* 5503 * is it the timer driving us? if so are the reader/writers 5504 * gone? 5505 */ 5506 if (stcb->asoc.refcnt) { 5507 /* nope, reader or writer in the way */ 5508 sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL); 5509 /* no asoc destroyed */ 5510 SCTP_TCB_UNLOCK(stcb); 5511 #ifdef SCTP_LOG_CLOSING 5512 sctp_log_closing(inp, stcb, 8); 5513 #endif 5514 return (0); 5515 } 5516 } 5517 /* Now clean up any other timers */ 5518 sctp_stop_association_timers(stcb, false); 5519 /* Now the read queue needs to be cleaned up (only once) */ 5520 if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0) { 5521 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_ABOUT_TO_BE_FREED); 5522 SCTP_INP_READ_LOCK(inp); 5523 TAILQ_FOREACH(sq, &inp->read_queue, next) { 5524 if (sq->stcb == stcb) { 5525 sq->do_not_ref_stcb = 1; 5526 sq->sinfo_cumtsn = stcb->asoc.cumulative_tsn; 5527 /* If there is no end, there never 5528 * will be now. 5529 */ 5530 if (sq->end_added == 0) { 5531 /* Held for PD-API, clear that. */ 5532 sq->pdapi_aborted = 1; 5533 sq->held_length = 0; 5534 if (sctp_stcb_is_feature_on(inp, stcb, SCTP_PCB_FLAGS_PDAPIEVNT) && (so != NULL)) { 5535 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION, 5536 stcb, 5537 SCTP_PARTIAL_DELIVERY_ABORTED, 5538 (void *)sq, 5539 SCTP_SO_LOCKED); 5540 } 5541 /* Add an end to wake them */ 5542 sq->end_added = 1; 5543 } 5544 } 5545 } 5546 SCTP_INP_READ_UNLOCK(inp); 5547 if (stcb->block_entry) { 5548 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_PCB, ECONNRESET); 5549 stcb->block_entry->error = ECONNRESET; 5550 stcb->block_entry = NULL; 5551 } 5552 } 5553 if ((stcb->asoc.refcnt) || (stcb->asoc.state & SCTP_STATE_IN_ACCEPT_QUEUE)) { 5554 /* Someone holds a reference OR the socket is unaccepted yet. 5555 */ 5556 if ((stcb->asoc.refcnt) || 5557 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 5558 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 5559 SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_IN_ACCEPT_QUEUE); 5560 sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL); 5561 } 5562 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 5563 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) 5564 /* nothing around */ 5565 so = NULL; 5566 if (so) { 5567 /* Wake any reader/writers */ 5568 sctp_sorwakeup(inp, so); 5569 sctp_sowwakeup(inp, so); 5570 } 5571 SCTP_TCB_UNLOCK(stcb); 5572 5573 #ifdef SCTP_LOG_CLOSING 5574 sctp_log_closing(inp, stcb, 9); 5575 #endif 5576 /* no asoc destroyed */ 5577 return (0); 5578 } 5579 #ifdef SCTP_LOG_CLOSING 5580 sctp_log_closing(inp, stcb, 10); 5581 #endif 5582 /* When I reach here, no others want 5583 * to kill the assoc yet.. and I own 5584 * the lock. Now its possible an abort 5585 * comes in when I do the lock exchange 5586 * below to grab all the locks to do 5587 * the final take out. to prevent this 5588 * we increment the count, which will 5589 * start a timer and blow out above thus 5590 * assuring us that we hold exclusive 5591 * killing of the asoc. Note that 5592 * after getting back the TCB lock 5593 * we will go ahead and increment the 5594 * counter back up and stop any timer 5595 * a passing stranger may have started :-S 5596 */ 5597 if (from_inpcbfree == SCTP_NORMAL_PROC) { 5598 atomic_add_int(&stcb->asoc.refcnt, 1); 5599 5600 SCTP_TCB_UNLOCK(stcb); 5601 SCTP_INP_INFO_WLOCK(); 5602 SCTP_INP_WLOCK(inp); 5603 SCTP_TCB_LOCK(stcb); 5604 } 5605 /* Double check the GONE flag */ 5606 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 5607 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) 5608 /* nothing around */ 5609 so = NULL; 5610 5611 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 5612 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 5613 /* 5614 * For TCP type we need special handling when we are 5615 * connected. We also include the peel'ed off ones to. 5616 */ 5617 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 5618 inp->sctp_flags &= ~SCTP_PCB_FLAGS_CONNECTED; 5619 inp->sctp_flags |= SCTP_PCB_FLAGS_WAS_CONNECTED; 5620 if (so) { 5621 SOCKBUF_LOCK(&so->so_rcv); 5622 so->so_state &= ~(SS_ISCONNECTING | 5623 SS_ISDISCONNECTING | 5624 #if !(defined(__FreeBSD__) && !defined(__Userspace__)) 5625 SS_ISCONFIRMING | 5626 #endif 5627 SS_ISCONNECTED); 5628 so->so_state |= SS_ISDISCONNECTED; 5629 #if defined(__APPLE__) && !defined(__Userspace__) 5630 socantrcvmore(so); 5631 #else 5632 socantrcvmore_locked(so); 5633 #endif 5634 socantsendmore(so); 5635 sctp_sowwakeup(inp, so); 5636 sctp_sorwakeup(inp, so); 5637 SCTP_SOWAKEUP(so); 5638 } 5639 } 5640 } 5641 5642 /* Make it invalid too, that way if its 5643 * about to run it will abort and return. 5644 */ 5645 /* re-increment the lock */ 5646 if (from_inpcbfree == SCTP_NORMAL_PROC) { 5647 atomic_subtract_int(&stcb->asoc.refcnt, 1); 5648 } 5649 if (stcb->asoc.refcnt) { 5650 SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_IN_ACCEPT_QUEUE); 5651 sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL); 5652 if (from_inpcbfree == SCTP_NORMAL_PROC) { 5653 SCTP_INP_INFO_WUNLOCK(); 5654 SCTP_INP_WUNLOCK(inp); 5655 } 5656 SCTP_TCB_UNLOCK(stcb); 5657 return (0); 5658 } 5659 asoc->state = 0; 5660 if (inp->sctp_tcbhash) { 5661 LIST_REMOVE(stcb, sctp_tcbhash); 5662 } 5663 if (stcb->asoc.in_asocid_hash) { 5664 LIST_REMOVE(stcb, sctp_tcbasocidhash); 5665 } 5666 if (inp->sctp_socket == NULL) { 5667 stcb->sctp_socket = NULL; 5668 } 5669 /* Now lets remove it from the list of ALL associations in the EP */ 5670 LIST_REMOVE(stcb, sctp_tcblist); 5671 if (from_inpcbfree == SCTP_NORMAL_PROC) { 5672 SCTP_INP_INCR_REF(inp); 5673 SCTP_INP_WUNLOCK(inp); 5674 } 5675 /* pull from vtag hash */ 5676 LIST_REMOVE(stcb, sctp_asocs); 5677 sctp_add_vtag_to_timewait(asoc->my_vtag, inp->sctp_lport, stcb->rport); 5678 5679 /* Now restop the timers to be sure 5680 * this is paranoia at is finest! 5681 */ 5682 sctp_stop_association_timers(stcb, true); 5683 5684 /* 5685 * The chunk lists and such SHOULD be empty but we check them just 5686 * in case. 5687 */ 5688 /* anything on the wheel needs to be removed */ 5689 for (i = 0; i < asoc->streamoutcnt; i++) { 5690 struct sctp_stream_out *outs; 5691 5692 outs = &asoc->strmout[i]; 5693 /* now clean up any chunks here */ 5694 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) { 5695 atomic_subtract_int(&asoc->stream_queue_cnt, 1); 5696 TAILQ_REMOVE(&outs->outqueue, sp, next); 5697 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp); 5698 sctp_free_spbufspace(stcb, asoc, sp); 5699 if (sp->data) { 5700 if (so) { 5701 /* Still an open socket - report */ 5702 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb, 5703 0, (void *)sp, SCTP_SO_LOCKED); 5704 } 5705 if (sp->data) { 5706 sctp_m_freem(sp->data); 5707 sp->data = NULL; 5708 sp->tail_mbuf = NULL; 5709 sp->length = 0; 5710 } 5711 } 5712 if (sp->net) { 5713 sctp_free_remote_addr(sp->net); 5714 sp->net = NULL; 5715 } 5716 sctp_free_a_strmoq(stcb, sp, SCTP_SO_LOCKED); 5717 } 5718 } 5719 /*sa_ignore FREED_MEMORY*/ 5720 TAILQ_FOREACH_SAFE(strrst, &asoc->resetHead, next_resp, nstrrst) { 5721 TAILQ_REMOVE(&asoc->resetHead, strrst, next_resp); 5722 SCTP_FREE(strrst, SCTP_M_STRESET); 5723 } 5724 TAILQ_FOREACH_SAFE(sq, &asoc->pending_reply_queue, next, nsq) { 5725 TAILQ_REMOVE(&asoc->pending_reply_queue, sq, next); 5726 if (sq->data) { 5727 sctp_m_freem(sq->data); 5728 sq->data = NULL; 5729 } 5730 sctp_free_remote_addr(sq->whoFrom); 5731 sq->whoFrom = NULL; 5732 sq->stcb = NULL; 5733 /* Free the ctl entry */ 5734 sctp_free_a_readq(stcb, sq); 5735 /*sa_ignore FREED_MEMORY*/ 5736 } 5737 TAILQ_FOREACH_SAFE(chk, &asoc->free_chunks, sctp_next, nchk) { 5738 TAILQ_REMOVE(&asoc->free_chunks, chk, sctp_next); 5739 if (chk->data) { 5740 sctp_m_freem(chk->data); 5741 chk->data = NULL; 5742 } 5743 if (chk->holds_key_ref) 5744 sctp_auth_key_release(stcb, chk->auth_keyid, SCTP_SO_LOCKED); 5745 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk); 5746 SCTP_DECR_CHK_COUNT(); 5747 atomic_subtract_int(&SCTP_BASE_INFO(ipi_free_chunks), 1); 5748 asoc->free_chunk_cnt--; 5749 /*sa_ignore FREED_MEMORY*/ 5750 } 5751 /* pending send queue SHOULD be empty */ 5752 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 5753 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 5754 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 5755 #ifdef INVARIANTS 5756 } else { 5757 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 5758 #endif 5759 } 5760 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 5761 if (chk->data) { 5762 if (so) { 5763 /* Still a socket? */ 5764 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 5765 0, chk, SCTP_SO_LOCKED); 5766 } 5767 if (chk->data) { 5768 sctp_m_freem(chk->data); 5769 chk->data = NULL; 5770 } 5771 } 5772 if (chk->holds_key_ref) 5773 sctp_auth_key_release(stcb, chk->auth_keyid, SCTP_SO_LOCKED); 5774 if (chk->whoTo) { 5775 sctp_free_remote_addr(chk->whoTo); 5776 chk->whoTo = NULL; 5777 } 5778 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk); 5779 SCTP_DECR_CHK_COUNT(); 5780 /*sa_ignore FREED_MEMORY*/ 5781 } 5782 /* sent queue SHOULD be empty */ 5783 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) { 5784 if (chk->sent != SCTP_DATAGRAM_NR_ACKED) { 5785 if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) { 5786 asoc->strmout[chk->rec.data.sid].chunks_on_queues--; 5787 #ifdef INVARIANTS 5788 } else { 5789 panic("No chunks on the queues for sid %u.", chk->rec.data.sid); 5790 #endif 5791 } 5792 } 5793 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 5794 if (chk->data) { 5795 if (so) { 5796 /* Still a socket? */ 5797 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 5798 0, chk, SCTP_SO_LOCKED); 5799 } 5800 if (chk->data) { 5801 sctp_m_freem(chk->data); 5802 chk->data = NULL; 5803 } 5804 } 5805 if (chk->holds_key_ref) 5806 sctp_auth_key_release(stcb, chk->auth_keyid, SCTP_SO_LOCKED); 5807 sctp_free_remote_addr(chk->whoTo); 5808 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk); 5809 SCTP_DECR_CHK_COUNT(); 5810 /*sa_ignore FREED_MEMORY*/ 5811 } 5812 #ifdef INVARIANTS 5813 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 5814 if (stcb->asoc.strmout[i].chunks_on_queues > 0) { 5815 panic("%u chunks left for stream %u.", stcb->asoc.strmout[i].chunks_on_queues, i); 5816 } 5817 } 5818 #endif 5819 /* control queue MAY not be empty */ 5820 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) { 5821 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); 5822 if (chk->data) { 5823 sctp_m_freem(chk->data); 5824 chk->data = NULL; 5825 } 5826 if (chk->holds_key_ref) 5827 sctp_auth_key_release(stcb, chk->auth_keyid, SCTP_SO_LOCKED); 5828 sctp_free_remote_addr(chk->whoTo); 5829 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk); 5830 SCTP_DECR_CHK_COUNT(); 5831 /*sa_ignore FREED_MEMORY*/ 5832 } 5833 /* ASCONF queue MAY not be empty */ 5834 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) { 5835 TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next); 5836 if (chk->data) { 5837 sctp_m_freem(chk->data); 5838 chk->data = NULL; 5839 } 5840 if (chk->holds_key_ref) 5841 sctp_auth_key_release(stcb, chk->auth_keyid, SCTP_SO_LOCKED); 5842 sctp_free_remote_addr(chk->whoTo); 5843 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk); 5844 SCTP_DECR_CHK_COUNT(); 5845 /*sa_ignore FREED_MEMORY*/ 5846 } 5847 if (asoc->mapping_array) { 5848 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 5849 asoc->mapping_array = NULL; 5850 } 5851 if (asoc->nr_mapping_array) { 5852 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP); 5853 asoc->nr_mapping_array = NULL; 5854 } 5855 /* the stream outs */ 5856 if (asoc->strmout) { 5857 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 5858 asoc->strmout = NULL; 5859 } 5860 asoc->strm_realoutsize = asoc->streamoutcnt = 0; 5861 if (asoc->strmin) { 5862 for (i = 0; i < asoc->streamincnt; i++) { 5863 sctp_clean_up_stream(stcb, &asoc->strmin[i].inqueue); 5864 sctp_clean_up_stream(stcb, &asoc->strmin[i].uno_inqueue); 5865 } 5866 SCTP_FREE(asoc->strmin, SCTP_M_STRMI); 5867 asoc->strmin = NULL; 5868 } 5869 asoc->streamincnt = 0; 5870 TAILQ_FOREACH_SAFE(net, &asoc->nets, sctp_next, nnet) { 5871 #ifdef INVARIANTS 5872 if (SCTP_BASE_INFO(ipi_count_raddr) == 0) { 5873 panic("no net's left alloc'ed, or list points to itself"); 5874 } 5875 #endif 5876 TAILQ_REMOVE(&asoc->nets, net, sctp_next); 5877 sctp_free_remote_addr(net); 5878 } 5879 LIST_FOREACH_SAFE(laddr, &asoc->sctp_restricted_addrs, sctp_nxt_addr, naddr) { 5880 /*sa_ignore FREED_MEMORY*/ 5881 sctp_remove_laddr(laddr); 5882 } 5883 5884 /* pending asconf (address) parameters */ 5885 TAILQ_FOREACH_SAFE(aparam, &asoc->asconf_queue, next, naparam) { 5886 /*sa_ignore FREED_MEMORY*/ 5887 TAILQ_REMOVE(&asoc->asconf_queue, aparam, next); 5888 SCTP_FREE(aparam,SCTP_M_ASC_ADDR); 5889 } 5890 TAILQ_FOREACH_SAFE(aack, &asoc->asconf_ack_sent, next, naack) { 5891 /*sa_ignore FREED_MEMORY*/ 5892 TAILQ_REMOVE(&asoc->asconf_ack_sent, aack, next); 5893 if (aack->data != NULL) { 5894 sctp_m_freem(aack->data); 5895 } 5896 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asconf_ack), aack); 5897 } 5898 /* clean up auth stuff */ 5899 if (asoc->local_hmacs) 5900 sctp_free_hmaclist(asoc->local_hmacs); 5901 if (asoc->peer_hmacs) 5902 sctp_free_hmaclist(asoc->peer_hmacs); 5903 5904 if (asoc->local_auth_chunks) 5905 sctp_free_chunklist(asoc->local_auth_chunks); 5906 if (asoc->peer_auth_chunks) 5907 sctp_free_chunklist(asoc->peer_auth_chunks); 5908 5909 sctp_free_authinfo(&asoc->authinfo); 5910 5911 LIST_FOREACH_SAFE(shared_key, &asoc->shared_keys, next, nshared_key) { 5912 LIST_REMOVE(shared_key, next); 5913 sctp_free_sharedkey(shared_key); 5914 /*sa_ignore FREED_MEMORY*/ 5915 } 5916 5917 /* Insert new items here :> */ 5918 5919 /* Get rid of LOCK */ 5920 SCTP_TCB_UNLOCK(stcb); 5921 SCTP_TCB_LOCK_DESTROY(stcb); 5922 if (from_inpcbfree == SCTP_NORMAL_PROC) { 5923 SCTP_INP_INFO_WUNLOCK(); 5924 SCTP_INP_RLOCK(inp); 5925 } 5926 #if defined(__APPLE__) && !defined(__Userspace__) 5927 /* TEMP CODE */ 5928 stcb->freed_from_where = from_location; 5929 #endif 5930 #ifdef SCTP_TRACK_FREED_ASOCS 5931 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 5932 /* now clean up the tasoc itself */ 5933 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), stcb); 5934 SCTP_DECR_ASOC_COUNT(); 5935 } else { 5936 LIST_INSERT_HEAD(&inp->sctp_asoc_free_list, stcb, sctp_tcblist); 5937 } 5938 #else 5939 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), stcb); 5940 SCTP_DECR_ASOC_COUNT(); 5941 #endif 5942 if (from_inpcbfree == SCTP_NORMAL_PROC) { 5943 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 5944 /* If its NOT the inp_free calling us AND 5945 * sctp_close as been called, we 5946 * call back... 5947 */ 5948 SCTP_INP_RUNLOCK(inp); 5949 /* This will start the kill timer (if we are 5950 * the last one) since we hold an increment yet. But 5951 * this is the only safe way to do this 5952 * since otherwise if the socket closes 5953 * at the same time we are here we might 5954 * collide in the cleanup. 5955 */ 5956 sctp_inpcb_free(inp, 5957 SCTP_FREE_SHOULD_USE_GRACEFUL_CLOSE, 5958 SCTP_CALLED_DIRECTLY_NOCMPSET); 5959 SCTP_INP_DECR_REF(inp); 5960 } else { 5961 /* The socket is still open. */ 5962 SCTP_INP_DECR_REF(inp); 5963 SCTP_INP_RUNLOCK(inp); 5964 } 5965 } 5966 /* destroyed the asoc */ 5967 #ifdef SCTP_LOG_CLOSING 5968 sctp_log_closing(inp, NULL, 11); 5969 #endif 5970 return (1); 5971 } 5972 5973 /* 5974 * determine if a destination is "reachable" based upon the addresses bound 5975 * to the current endpoint (e.g. only v4 or v6 currently bound) 5976 */ 5977 /* 5978 * FIX: if we allow assoc-level bindx(), then this needs to be fixed to use 5979 * assoc level v4/v6 flags, as the assoc *may* not have the same address 5980 * types bound as its endpoint 5981 */ 5982 int 5983 sctp_destination_is_reachable(struct sctp_tcb *stcb, struct sockaddr *destaddr) 5984 { 5985 struct sctp_inpcb *inp; 5986 int answer; 5987 5988 /* 5989 * No locks here, the TCB, in all cases is already locked and an 5990 * assoc is up. There is either a INP lock by the caller applied (in 5991 * asconf case when deleting an address) or NOT in the HB case, 5992 * however if HB then the INP increment is up and the INP will not 5993 * be removed (on top of the fact that we have a TCB lock). So we 5994 * only want to read the sctp_flags, which is either bound-all or 5995 * not.. no protection needed since once an assoc is up you can't be 5996 * changing your binding. 5997 */ 5998 inp = stcb->sctp_ep; 5999 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6000 /* if bound all, destination is not restricted */ 6001 /* 6002 * RRS: Question during lock work: Is this correct? If you 6003 * are bound-all you still might need to obey the V4--V6 6004 * flags??? IMO this bound-all stuff needs to be removed! 6005 */ 6006 return (1); 6007 } 6008 /* NOTE: all "scope" checks are done when local addresses are added */ 6009 switch (destaddr->sa_family) { 6010 #ifdef INET6 6011 case AF_INET6: 6012 answer = inp->ip_inp.inp.inp_vflag & INP_IPV6; 6013 break; 6014 #endif 6015 #ifdef INET 6016 case AF_INET: 6017 answer = inp->ip_inp.inp.inp_vflag & INP_IPV4; 6018 break; 6019 #endif 6020 #if defined(__Userspace__) 6021 case AF_CONN: 6022 answer = inp->ip_inp.inp.inp_vflag & INP_CONN; 6023 break; 6024 #endif 6025 default: 6026 /* invalid family, so it's unreachable */ 6027 answer = 0; 6028 break; 6029 } 6030 return (answer); 6031 } 6032 6033 /* 6034 * update the inp_vflags on an endpoint 6035 */ 6036 static void 6037 sctp_update_ep_vflag(struct sctp_inpcb *inp) 6038 { 6039 struct sctp_laddr *laddr; 6040 6041 /* first clear the flag */ 6042 inp->ip_inp.inp.inp_vflag = 0; 6043 /* set the flag based on addresses on the ep list */ 6044 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 6045 if (laddr->ifa == NULL) { 6046 SCTPDBG(SCTP_DEBUG_PCB1, "%s: NULL ifa\n", 6047 __func__); 6048 continue; 6049 } 6050 6051 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) { 6052 continue; 6053 } 6054 switch (laddr->ifa->address.sa.sa_family) { 6055 #ifdef INET6 6056 case AF_INET6: 6057 inp->ip_inp.inp.inp_vflag |= INP_IPV6; 6058 break; 6059 #endif 6060 #ifdef INET 6061 case AF_INET: 6062 inp->ip_inp.inp.inp_vflag |= INP_IPV4; 6063 break; 6064 #endif 6065 #if defined(__Userspace__) 6066 case AF_CONN: 6067 inp->ip_inp.inp.inp_vflag |= INP_CONN; 6068 break; 6069 #endif 6070 default: 6071 break; 6072 } 6073 } 6074 } 6075 6076 /* 6077 * Add the address to the endpoint local address list There is nothing to be 6078 * done if we are bound to all addresses 6079 */ 6080 void 6081 sctp_add_local_addr_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa, uint32_t action) 6082 { 6083 struct sctp_laddr *laddr; 6084 struct sctp_tcb *stcb; 6085 int fnd, error = 0; 6086 6087 fnd = 0; 6088 6089 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6090 /* You are already bound to all. You have it already */ 6091 return; 6092 } 6093 #ifdef INET6 6094 if (ifa->address.sa.sa_family == AF_INET6) { 6095 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) { 6096 /* Can't bind a non-useable addr. */ 6097 return; 6098 } 6099 } 6100 #endif 6101 /* first, is it already present? */ 6102 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 6103 if (laddr->ifa == ifa) { 6104 fnd = 1; 6105 break; 6106 } 6107 } 6108 6109 if (fnd == 0) { 6110 /* Not in the ep list */ 6111 error = sctp_insert_laddr(&inp->sctp_addr_list, ifa, action); 6112 if (error != 0) 6113 return; 6114 inp->laddr_count++; 6115 /* update inp_vflag flags */ 6116 switch (ifa->address.sa.sa_family) { 6117 #ifdef INET6 6118 case AF_INET6: 6119 inp->ip_inp.inp.inp_vflag |= INP_IPV6; 6120 break; 6121 #endif 6122 #ifdef INET 6123 case AF_INET: 6124 inp->ip_inp.inp.inp_vflag |= INP_IPV4; 6125 break; 6126 #endif 6127 #if defined(__Userspace__) 6128 case AF_CONN: 6129 inp->ip_inp.inp.inp_vflag |= INP_CONN; 6130 break; 6131 #endif 6132 default: 6133 break; 6134 } 6135 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { 6136 sctp_add_local_addr_restricted(stcb, ifa); 6137 } 6138 } 6139 return; 6140 } 6141 6142 /* 6143 * select a new (hopefully reachable) destination net (should only be used 6144 * when we deleted an ep addr that is the only usable source address to reach 6145 * the destination net) 6146 */ 6147 static void 6148 sctp_select_primary_destination(struct sctp_tcb *stcb) 6149 { 6150 struct sctp_nets *net; 6151 6152 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 6153 /* for now, we'll just pick the first reachable one we find */ 6154 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) 6155 continue; 6156 if (sctp_destination_is_reachable(stcb, 6157 (struct sockaddr *)&net->ro._l_addr)) { 6158 /* found a reachable destination */ 6159 stcb->asoc.primary_destination = net; 6160 } 6161 } 6162 /* I can't there from here! ...we're gonna die shortly... */ 6163 } 6164 6165 /* 6166 * Delete the address from the endpoint local address list. There is nothing 6167 * to be done if we are bound to all addresses 6168 */ 6169 void 6170 sctp_del_local_addr_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa) 6171 { 6172 struct sctp_laddr *laddr; 6173 int fnd; 6174 6175 fnd = 0; 6176 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6177 /* You are already bound to all. You have it already */ 6178 return; 6179 } 6180 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 6181 if (laddr->ifa == ifa) { 6182 fnd = 1; 6183 break; 6184 } 6185 } 6186 if (fnd && (inp->laddr_count < 2)) { 6187 /* can't delete unless there are at LEAST 2 addresses */ 6188 return; 6189 } 6190 if (fnd) { 6191 /* 6192 * clean up any use of this address go through our 6193 * associations and clear any last_used_address that match 6194 * this one for each assoc, see if a new primary_destination 6195 * is needed 6196 */ 6197 struct sctp_tcb *stcb; 6198 6199 /* clean up "next_addr_touse" */ 6200 if (inp->next_addr_touse == laddr) 6201 /* delete this address */ 6202 inp->next_addr_touse = NULL; 6203 6204 /* clean up "last_used_address" */ 6205 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { 6206 struct sctp_nets *net; 6207 6208 SCTP_TCB_LOCK(stcb); 6209 if (stcb->asoc.last_used_address == laddr) 6210 /* delete this address */ 6211 stcb->asoc.last_used_address = NULL; 6212 /* Now spin through all the nets and purge any ref to laddr */ 6213 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 6214 if (net->ro._s_addr == laddr->ifa) { 6215 /* Yep, purge src address selected */ 6216 #if defined(__FreeBSD__) && !defined(__Userspace__) 6217 RO_NHFREE(&net->ro); 6218 #else 6219 sctp_rtentry_t *rt; 6220 6221 /* delete this address if cached */ 6222 rt = net->ro.ro_rt; 6223 if (rt != NULL) { 6224 RTFREE(rt); 6225 net->ro.ro_rt = NULL; 6226 } 6227 #endif 6228 sctp_free_ifa(net->ro._s_addr); 6229 net->ro._s_addr = NULL; 6230 net->src_addr_selected = 0; 6231 } 6232 } 6233 SCTP_TCB_UNLOCK(stcb); 6234 } /* for each tcb */ 6235 /* remove it from the ep list */ 6236 sctp_remove_laddr(laddr); 6237 inp->laddr_count--; 6238 /* update inp_vflag flags */ 6239 sctp_update_ep_vflag(inp); 6240 } 6241 return; 6242 } 6243 6244 /* 6245 * Add the address to the TCB local address restricted list. 6246 * This is a "pending" address list (eg. addresses waiting for an 6247 * ASCONF-ACK response) and cannot be used as a valid source address. 6248 */ 6249 void 6250 sctp_add_local_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa) 6251 { 6252 struct sctp_laddr *laddr; 6253 struct sctpladdr *list; 6254 6255 /* 6256 * Assumes TCB is locked.. and possibly the INP. May need to 6257 * confirm/fix that if we need it and is not the case. 6258 */ 6259 list = &stcb->asoc.sctp_restricted_addrs; 6260 6261 #ifdef INET6 6262 if (ifa->address.sa.sa_family == AF_INET6) { 6263 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) { 6264 /* Can't bind a non-existent addr. */ 6265 return; 6266 } 6267 } 6268 #endif 6269 /* does the address already exist? */ 6270 LIST_FOREACH(laddr, list, sctp_nxt_addr) { 6271 if (laddr->ifa == ifa) { 6272 return; 6273 } 6274 } 6275 6276 /* add to the list */ 6277 (void)sctp_insert_laddr(list, ifa, 0); 6278 return; 6279 } 6280 6281 /* 6282 * Remove a local address from the TCB local address restricted list 6283 */ 6284 void 6285 sctp_del_local_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa) 6286 { 6287 struct sctp_inpcb *inp; 6288 struct sctp_laddr *laddr; 6289 6290 /* 6291 * This is called by asconf work. It is assumed that a) The TCB is 6292 * locked and b) The INP is locked. This is true in as much as I can 6293 * trace through the entry asconf code where I did these locks. 6294 * Again, the ASCONF code is a bit different in that it does lock 6295 * the INP during its work often times. This must be since we don't 6296 * want other proc's looking up things while what they are looking 6297 * up is changing :-D 6298 */ 6299 6300 inp = stcb->sctp_ep; 6301 /* if subset bound and don't allow ASCONF's, can't delete last */ 6302 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) && 6303 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DO_ASCONF)) { 6304 if (stcb->sctp_ep->laddr_count < 2) { 6305 /* can't delete last address */ 6306 return; 6307 } 6308 } 6309 LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) { 6310 /* remove the address if it exists */ 6311 if (laddr->ifa == NULL) 6312 continue; 6313 if (laddr->ifa == ifa) { 6314 sctp_remove_laddr(laddr); 6315 return; 6316 } 6317 } 6318 6319 /* address not found! */ 6320 return; 6321 } 6322 6323 #if defined(__FreeBSD__) && !defined(__Userspace__) 6324 /* sysctl */ 6325 static int sctp_max_number_of_assoc = SCTP_MAX_NUM_OF_ASOC; 6326 static int sctp_scale_up_for_address = SCTP_SCALE_FOR_ADDR; 6327 #endif 6328 6329 #if defined(__FreeBSD__) && !defined(__Userspace__) 6330 #if defined(SCTP_MCORE_INPUT) && defined(SMP) 6331 struct sctp_mcore_ctrl *sctp_mcore_workers = NULL; 6332 int *sctp_cpuarry = NULL; 6333 6334 void 6335 sctp_queue_to_mcore(struct mbuf *m, int off, int cpu_to_use) 6336 { 6337 /* Queue a packet to a processor for the specified core */ 6338 struct sctp_mcore_queue *qent; 6339 struct sctp_mcore_ctrl *wkq; 6340 int need_wake = 0; 6341 6342 if (sctp_mcore_workers == NULL) { 6343 /* Something went way bad during setup */ 6344 sctp_input_with_port(m, off, 0); 6345 return; 6346 } 6347 SCTP_MALLOC(qent, struct sctp_mcore_queue *, 6348 (sizeof(struct sctp_mcore_queue)), 6349 SCTP_M_MCORE); 6350 if (qent == NULL) { 6351 /* This is trouble */ 6352 sctp_input_with_port(m, off, 0); 6353 return; 6354 } 6355 qent->vn = curvnet; 6356 qent->m = m; 6357 qent->off = off; 6358 qent->v6 = 0; 6359 wkq = &sctp_mcore_workers[cpu_to_use]; 6360 SCTP_MCORE_QLOCK(wkq); 6361 6362 TAILQ_INSERT_TAIL(&wkq->que, qent, next); 6363 if (wkq->running == 0) { 6364 need_wake = 1; 6365 } 6366 SCTP_MCORE_QUNLOCK(wkq); 6367 if (need_wake) { 6368 wakeup(&wkq->running); 6369 } 6370 } 6371 6372 static void 6373 sctp_mcore_thread(void *arg) 6374 { 6375 6376 struct sctp_mcore_ctrl *wkq; 6377 struct sctp_mcore_queue *qent; 6378 6379 wkq = (struct sctp_mcore_ctrl *)arg; 6380 struct mbuf *m; 6381 int off, v6; 6382 6383 /* Wait for first tickle */ 6384 SCTP_MCORE_LOCK(wkq); 6385 wkq->running = 0; 6386 msleep(&wkq->running, 6387 &wkq->core_mtx, 6388 0, "wait for pkt", 0); 6389 SCTP_MCORE_UNLOCK(wkq); 6390 6391 /* Bind to our cpu */ 6392 thread_lock(curthread); 6393 sched_bind(curthread, wkq->cpuid); 6394 thread_unlock(curthread); 6395 6396 /* Now lets start working */ 6397 SCTP_MCORE_LOCK(wkq); 6398 /* Now grab lock and go */ 6399 for (;;) { 6400 SCTP_MCORE_QLOCK(wkq); 6401 skip_sleep: 6402 wkq->running = 1; 6403 qent = TAILQ_FIRST(&wkq->que); 6404 if (qent) { 6405 TAILQ_REMOVE(&wkq->que, qent, next); 6406 SCTP_MCORE_QUNLOCK(wkq); 6407 CURVNET_SET(qent->vn); 6408 m = qent->m; 6409 off = qent->off; 6410 v6 = qent->v6; 6411 SCTP_FREE(qent, SCTP_M_MCORE); 6412 if (v6 == 0) { 6413 sctp_input_with_port(m, off, 0); 6414 } else { 6415 SCTP_PRINTF("V6 not yet supported\n"); 6416 sctp_m_freem(m); 6417 } 6418 CURVNET_RESTORE(); 6419 SCTP_MCORE_QLOCK(wkq); 6420 } 6421 wkq->running = 0; 6422 if (!TAILQ_EMPTY(&wkq->que)) { 6423 goto skip_sleep; 6424 } 6425 SCTP_MCORE_QUNLOCK(wkq); 6426 msleep(&wkq->running, 6427 &wkq->core_mtx, 6428 0, "wait for pkt", 0); 6429 } 6430 } 6431 6432 static void 6433 sctp_startup_mcore_threads(void) 6434 { 6435 int i, cpu; 6436 6437 if (mp_ncpus == 1) 6438 return; 6439 6440 if (sctp_mcore_workers != NULL) { 6441 /* Already been here in some previous 6442 * vnet? 6443 */ 6444 return; 6445 } 6446 SCTP_MALLOC(sctp_mcore_workers, struct sctp_mcore_ctrl *, 6447 ((mp_maxid+1) * sizeof(struct sctp_mcore_ctrl)), 6448 SCTP_M_MCORE); 6449 if (sctp_mcore_workers == NULL) { 6450 /* TSNH I hope */ 6451 return; 6452 } 6453 memset(sctp_mcore_workers, 0 , ((mp_maxid+1) * 6454 sizeof(struct sctp_mcore_ctrl))); 6455 /* Init the structures */ 6456 for (i = 0; i<=mp_maxid; i++) { 6457 TAILQ_INIT(&sctp_mcore_workers[i].que); 6458 SCTP_MCORE_LOCK_INIT(&sctp_mcore_workers[i]); 6459 SCTP_MCORE_QLOCK_INIT(&sctp_mcore_workers[i]); 6460 sctp_mcore_workers[i].cpuid = i; 6461 } 6462 if (sctp_cpuarry == NULL) { 6463 SCTP_MALLOC(sctp_cpuarry, int *, 6464 (mp_ncpus * sizeof(int)), 6465 SCTP_M_MCORE); 6466 i = 0; 6467 CPU_FOREACH(cpu) { 6468 sctp_cpuarry[i] = cpu; 6469 i++; 6470 } 6471 } 6472 /* Now start them all */ 6473 CPU_FOREACH(cpu) { 6474 (void)kproc_create(sctp_mcore_thread, 6475 (void *)&sctp_mcore_workers[cpu], 6476 &sctp_mcore_workers[cpu].thread_proc, 6477 0, 6478 SCTP_KTHREAD_PAGES, 6479 SCTP_MCORE_NAME); 6480 } 6481 } 6482 #endif 6483 #endif 6484 6485 #if defined(__FreeBSD__) && !defined(__Userspace__) 6486 #if defined(SCTP_NOT_YET) 6487 static struct mbuf * 6488 sctp_netisr_hdlr(struct mbuf *m, uintptr_t source) 6489 { 6490 struct ip *ip; 6491 struct sctphdr *sh; 6492 int offset; 6493 uint32_t flowid, tag; 6494 6495 /* 6496 * No flow id built by lower layers fix it so we 6497 * create one. 6498 */ 6499 ip = mtod(m, struct ip *); 6500 offset = (ip->ip_hl << 2) + sizeof(struct sctphdr); 6501 if (SCTP_BUF_LEN(m) < offset) { 6502 if ((m = m_pullup(m, offset)) == NULL) { 6503 SCTP_STAT_INCR(sctps_hdrops); 6504 return (NULL); 6505 } 6506 ip = mtod(m, struct ip *); 6507 } 6508 sh = (struct sctphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 6509 tag = htonl(sh->v_tag); 6510 flowid = tag ^ ntohs(sh->dest_port) ^ ntohs(sh->src_port); 6511 m->m_pkthdr.flowid = flowid; 6512 /* FIX ME */ 6513 m->m_flags |= M_FLOWID; 6514 return (m); 6515 } 6516 6517 #endif 6518 #endif 6519 #if defined(__FreeBSD__) && !defined(__Userspace__) 6520 #define VALIDATE_LOADER_TUNABLE(var_name, prefix) \ 6521 if (SCTP_BASE_SYSCTL(var_name) < prefix##_MIN || \ 6522 SCTP_BASE_SYSCTL(var_name) > prefix##_MAX) \ 6523 SCTP_BASE_SYSCTL(var_name) = prefix##_DEFAULT 6524 6525 #endif 6526 void 6527 #if defined(__Userspace__) 6528 sctp_pcb_init(int start_threads) 6529 #else 6530 sctp_pcb_init(void) 6531 #endif 6532 { 6533 /* 6534 * SCTP initialization for the PCB structures should be called by 6535 * the sctp_init() function. 6536 */ 6537 int i; 6538 struct timeval tv; 6539 6540 if (SCTP_BASE_VAR(sctp_pcb_initialized) != 0) { 6541 /* error I was called twice */ 6542 return; 6543 } 6544 SCTP_BASE_VAR(sctp_pcb_initialized) = 1; 6545 6546 #if defined(SCTP_PROCESS_LEVEL_LOCKS) 6547 #if !defined(_WIN32) 6548 pthread_mutexattr_init(&SCTP_BASE_VAR(mtx_attr)); 6549 pthread_rwlockattr_init(&SCTP_BASE_VAR(rwlock_attr)); 6550 #ifdef INVARIANTS 6551 pthread_mutexattr_settype(&SCTP_BASE_VAR(mtx_attr), PTHREAD_MUTEX_ERRORCHECK); 6552 #endif 6553 #endif 6554 #endif 6555 #if defined(SCTP_LOCAL_TRACE_BUF) 6556 #if defined(_WIN32) && !defined(__Userspace__) 6557 if (SCTP_BASE_SYSCTL(sctp_log) != NULL) { 6558 memset(SCTP_BASE_SYSCTL(sctp_log), 0, sizeof(struct sctp_log)); 6559 } 6560 #else 6561 memset(&SCTP_BASE_SYSCTL(sctp_log), 0, sizeof(struct sctp_log)); 6562 #endif 6563 #endif 6564 #if defined(__FreeBSD__) && !defined(__Userspace__) 6565 #if defined(SMP) && defined(SCTP_USE_PERCPU_STAT) 6566 SCTP_MALLOC(SCTP_BASE_STATS, struct sctpstat *, 6567 ((mp_maxid+1) * sizeof(struct sctpstat)), 6568 SCTP_M_MCORE); 6569 #endif 6570 #endif 6571 (void)SCTP_GETTIME_TIMEVAL(&tv); 6572 #if defined(__FreeBSD__) && !defined(__Userspace__) 6573 #if defined(SMP) && defined(SCTP_USE_PERCPU_STAT) 6574 memset(SCTP_BASE_STATS, 0, sizeof(struct sctpstat) * (mp_maxid+1)); 6575 SCTP_BASE_STATS[PCPU_GET(cpuid)].sctps_discontinuitytime.tv_sec = (uint32_t)tv.tv_sec; 6576 SCTP_BASE_STATS[PCPU_GET(cpuid)].sctps_discontinuitytime.tv_usec = (uint32_t)tv.tv_usec; 6577 #else 6578 memset(&SCTP_BASE_STATS, 0, sizeof(struct sctpstat)); 6579 SCTP_BASE_STAT(sctps_discontinuitytime).tv_sec = (uint32_t)tv.tv_sec; 6580 SCTP_BASE_STAT(sctps_discontinuitytime).tv_usec = (uint32_t)tv.tv_usec; 6581 #endif 6582 #else 6583 memset(&SCTP_BASE_STATS, 0, sizeof(struct sctpstat)); 6584 SCTP_BASE_STAT(sctps_discontinuitytime).tv_sec = (uint32_t)tv.tv_sec; 6585 SCTP_BASE_STAT(sctps_discontinuitytime).tv_usec = (uint32_t)tv.tv_usec; 6586 #endif 6587 /* init the empty list of (All) Endpoints */ 6588 LIST_INIT(&SCTP_BASE_INFO(listhead)); 6589 #if defined(__APPLE__) && !defined(__Userspace__) 6590 LIST_INIT(&SCTP_BASE_INFO(inplisthead)); 6591 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) || defined(APPLE_LION) || defined(APPLE_MOUNTAINLION) 6592 SCTP_BASE_INFO(sctbinfo).listhead = &SCTP_BASE_INFO(inplisthead); 6593 SCTP_BASE_INFO(sctbinfo).mtx_grp_attr = lck_grp_attr_alloc_init(); 6594 lck_grp_attr_setdefault(SCTP_BASE_INFO(sctbinfo).mtx_grp_attr); 6595 SCTP_BASE_INFO(sctbinfo).mtx_grp = lck_grp_alloc_init("sctppcb", SCTP_BASE_INFO(sctbinfo).mtx_grp_attr); 6596 SCTP_BASE_INFO(sctbinfo).mtx_attr = lck_attr_alloc_init(); 6597 lck_attr_setdefault(SCTP_BASE_INFO(sctbinfo).mtx_attr); 6598 #else 6599 SCTP_BASE_INFO(sctbinfo).ipi_listhead = &SCTP_BASE_INFO(inplisthead); 6600 SCTP_BASE_INFO(sctbinfo).ipi_lock_grp_attr = lck_grp_attr_alloc_init(); 6601 lck_grp_attr_setdefault(SCTP_BASE_INFO(sctbinfo).ipi_lock_grp_attr); 6602 SCTP_BASE_INFO(sctbinfo).ipi_lock_grp = lck_grp_alloc_init("sctppcb", SCTP_BASE_INFO(sctbinfo).ipi_lock_grp_attr); 6603 SCTP_BASE_INFO(sctbinfo).ipi_lock_attr = lck_attr_alloc_init(); 6604 lck_attr_setdefault(SCTP_BASE_INFO(sctbinfo).ipi_lock_attr); 6605 #endif 6606 #if !defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION) 6607 SCTP_BASE_INFO(sctbinfo).ipi_gc = sctp_gc; 6608 in_pcbinfo_attach(&SCTP_BASE_INFO(sctbinfo)); 6609 #endif 6610 #endif 6611 6612 /* init the hash table of endpoints */ 6613 #if defined(__FreeBSD__) && !defined(__Userspace__) 6614 TUNABLE_INT_FETCH("net.inet.sctp.tcbhashsize", &SCTP_BASE_SYSCTL(sctp_hashtblsize)); 6615 TUNABLE_INT_FETCH("net.inet.sctp.pcbhashsize", &SCTP_BASE_SYSCTL(sctp_pcbtblsize)); 6616 TUNABLE_INT_FETCH("net.inet.sctp.chunkscale", &SCTP_BASE_SYSCTL(sctp_chunkscale)); 6617 VALIDATE_LOADER_TUNABLE(sctp_hashtblsize, SCTPCTL_TCBHASHSIZE); 6618 VALIDATE_LOADER_TUNABLE(sctp_pcbtblsize, SCTPCTL_PCBHASHSIZE); 6619 VALIDATE_LOADER_TUNABLE(sctp_chunkscale, SCTPCTL_CHUNKSCALE); 6620 #endif 6621 SCTP_BASE_INFO(sctp_asochash) = SCTP_HASH_INIT((SCTP_BASE_SYSCTL(sctp_hashtblsize) * 31), 6622 &SCTP_BASE_INFO(hashasocmark)); 6623 SCTP_BASE_INFO(sctp_ephash) = SCTP_HASH_INIT(SCTP_BASE_SYSCTL(sctp_hashtblsize), 6624 &SCTP_BASE_INFO(hashmark)); 6625 SCTP_BASE_INFO(sctp_tcpephash) = SCTP_HASH_INIT(SCTP_BASE_SYSCTL(sctp_hashtblsize), 6626 &SCTP_BASE_INFO(hashtcpmark)); 6627 SCTP_BASE_INFO(hashtblsize) = SCTP_BASE_SYSCTL(sctp_hashtblsize); 6628 SCTP_BASE_INFO(sctp_vrfhash) = SCTP_HASH_INIT(SCTP_SIZE_OF_VRF_HASH, 6629 &SCTP_BASE_INFO(hashvrfmark)); 6630 6631 SCTP_BASE_INFO(vrf_ifn_hash) = SCTP_HASH_INIT(SCTP_VRF_IFN_HASH_SIZE, 6632 &SCTP_BASE_INFO(vrf_ifn_hashmark)); 6633 /* init the zones */ 6634 /* 6635 * FIX ME: Should check for NULL returns, but if it does fail we are 6636 * doomed to panic anyways... add later maybe. 6637 */ 6638 SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_ep), "sctp_ep", 6639 sizeof(struct sctp_inpcb), maxsockets); 6640 6641 SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_asoc), "sctp_asoc", 6642 sizeof(struct sctp_tcb), sctp_max_number_of_assoc); 6643 6644 SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_laddr), "sctp_laddr", 6645 sizeof(struct sctp_laddr), 6646 (sctp_max_number_of_assoc * sctp_scale_up_for_address)); 6647 6648 SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_net), "sctp_raddr", 6649 sizeof(struct sctp_nets), 6650 (sctp_max_number_of_assoc * sctp_scale_up_for_address)); 6651 6652 SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_chunk), "sctp_chunk", 6653 sizeof(struct sctp_tmit_chunk), 6654 (sctp_max_number_of_assoc * SCTP_BASE_SYSCTL(sctp_chunkscale))); 6655 6656 SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_readq), "sctp_readq", 6657 sizeof(struct sctp_queued_to_read), 6658 (sctp_max_number_of_assoc * SCTP_BASE_SYSCTL(sctp_chunkscale))); 6659 6660 SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_strmoq), "sctp_stream_msg_out", 6661 sizeof(struct sctp_stream_queue_pending), 6662 (sctp_max_number_of_assoc * SCTP_BASE_SYSCTL(sctp_chunkscale))); 6663 6664 SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_asconf), "sctp_asconf", 6665 sizeof(struct sctp_asconf), 6666 (sctp_max_number_of_assoc * SCTP_BASE_SYSCTL(sctp_chunkscale))); 6667 6668 SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_asconf_ack), "sctp_asconf_ack", 6669 sizeof(struct sctp_asconf_ack), 6670 (sctp_max_number_of_assoc * SCTP_BASE_SYSCTL(sctp_chunkscale))); 6671 6672 /* Master Lock INIT for info structure */ 6673 SCTP_INP_INFO_LOCK_INIT(); 6674 SCTP_STATLOG_INIT_LOCK(); 6675 6676 SCTP_IPI_COUNT_INIT(); 6677 SCTP_IPI_ADDR_INIT(); 6678 #ifdef SCTP_PACKET_LOGGING 6679 SCTP_IP_PKTLOG_INIT(); 6680 #endif 6681 LIST_INIT(&SCTP_BASE_INFO(addr_wq)); 6682 6683 SCTP_WQ_ADDR_INIT(); 6684 /* not sure if we need all the counts */ 6685 SCTP_BASE_INFO(ipi_count_ep) = 0; 6686 /* assoc/tcb zone info */ 6687 SCTP_BASE_INFO(ipi_count_asoc) = 0; 6688 /* local addrlist zone info */ 6689 SCTP_BASE_INFO(ipi_count_laddr) = 0; 6690 /* remote addrlist zone info */ 6691 SCTP_BASE_INFO(ipi_count_raddr) = 0; 6692 /* chunk info */ 6693 SCTP_BASE_INFO(ipi_count_chunk) = 0; 6694 6695 /* socket queue zone info */ 6696 SCTP_BASE_INFO(ipi_count_readq) = 0; 6697 6698 /* stream out queue cont */ 6699 SCTP_BASE_INFO(ipi_count_strmoq) = 0; 6700 6701 SCTP_BASE_INFO(ipi_free_strmoq) = 0; 6702 SCTP_BASE_INFO(ipi_free_chunks) = 0; 6703 6704 SCTP_OS_TIMER_INIT(&SCTP_BASE_INFO(addr_wq_timer.timer)); 6705 6706 /* Init the TIMEWAIT list */ 6707 for (i = 0; i < SCTP_STACK_VTAG_HASH_SIZE; i++) { 6708 LIST_INIT(&SCTP_BASE_INFO(vtag_timewait)[i]); 6709 } 6710 #if defined(SCTP_PROCESS_LEVEL_LOCKS) 6711 #if defined(_WIN32) 6712 InitializeConditionVariable(&sctp_it_ctl.iterator_wakeup); 6713 #else 6714 (void)pthread_cond_init(&sctp_it_ctl.iterator_wakeup, NULL); 6715 #endif 6716 #endif 6717 sctp_startup_iterator(); 6718 6719 #if defined(__FreeBSD__) && !defined(__Userspace__) 6720 #if defined(SCTP_MCORE_INPUT) && defined(SMP) 6721 sctp_startup_mcore_threads(); 6722 #endif 6723 #endif 6724 6725 /* 6726 * INIT the default VRF which for BSD is the only one, other O/S's 6727 * may have more. But initially they must start with one and then 6728 * add the VRF's as addresses are added. 6729 */ 6730 sctp_init_vrf_list(SCTP_DEFAULT_VRF); 6731 #if defined(__FreeBSD__) && !defined(__Userspace__) && defined(SCTP_NOT_YET) 6732 if (ip_register_flow_handler(sctp_netisr_hdlr, IPPROTO_SCTP)) { 6733 SCTP_PRINTF("***SCTP- Error can't register netisr handler***\n"); 6734 } 6735 #endif 6736 #if defined(_SCTP_NEEDS_CALLOUT_) || defined(_USER_SCTP_NEEDS_CALLOUT_) 6737 /* allocate the lock for the callout/timer queue */ 6738 SCTP_TIMERQ_LOCK_INIT(); 6739 TAILQ_INIT(&SCTP_BASE_INFO(callqueue)); 6740 #endif 6741 #if defined(__Userspace__) 6742 mbuf_initialize(NULL); 6743 atomic_init(); 6744 #if defined(INET) || defined(INET6) 6745 if (start_threads) 6746 recv_thread_init(); 6747 #endif 6748 #endif 6749 } 6750 6751 /* 6752 * Assumes that the SCTP_BASE_INFO() lock is NOT held. 6753 */ 6754 void 6755 sctp_pcb_finish(void) 6756 { 6757 struct sctp_vrflist *vrf_bucket; 6758 struct sctp_vrf *vrf, *nvrf; 6759 struct sctp_ifn *ifn, *nifn; 6760 struct sctp_ifa *ifa, *nifa; 6761 struct sctpvtaghead *chain; 6762 struct sctp_tagblock *twait_block, *prev_twait_block; 6763 struct sctp_laddr *wi, *nwi; 6764 int i; 6765 struct sctp_iterator *it, *nit; 6766 6767 if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) { 6768 SCTP_PRINTF("%s: race condition on teardown.\n", __func__); 6769 return; 6770 } 6771 SCTP_BASE_VAR(sctp_pcb_initialized) = 0; 6772 #if !(defined(__FreeBSD__) && !defined(__Userspace__)) 6773 /* Notify the iterator to exit. */ 6774 SCTP_IPI_ITERATOR_WQ_LOCK(); 6775 sctp_it_ctl.iterator_flags |= SCTP_ITERATOR_MUST_EXIT; 6776 sctp_wakeup_iterator(); 6777 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 6778 #endif 6779 #if defined(__APPLE__) && !defined(__Userspace__) 6780 #if !defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION) 6781 in_pcbinfo_detach(&SCTP_BASE_INFO(sctbinfo)); 6782 #endif 6783 SCTP_IPI_ITERATOR_WQ_LOCK(); 6784 do { 6785 msleep(&sctp_it_ctl.iterator_flags, 6786 sctp_it_ctl.ipi_iterator_wq_mtx, 6787 0, "waiting_for_work", 0); 6788 } while ((sctp_it_ctl.iterator_flags & SCTP_ITERATOR_EXITED) == 0); 6789 thread_deallocate(sctp_it_ctl.thread_proc); 6790 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 6791 #endif 6792 #if defined(_WIN32) && !defined(__Userspace__) 6793 if (sctp_it_ctl.iterator_thread_obj != NULL) { 6794 NTSTATUS status = STATUS_SUCCESS; 6795 6796 KeSetEvent(&sctp_it_ctl.iterator_wakeup[1], IO_NO_INCREMENT, FALSE); 6797 status = KeWaitForSingleObject(sctp_it_ctl.iterator_thread_obj, 6798 Executive, 6799 KernelMode, 6800 FALSE, 6801 NULL); 6802 ObDereferenceObject(sctp_it_ctl.iterator_thread_obj); 6803 } 6804 #endif 6805 #if defined(__Userspace__) 6806 if (SCTP_BASE_VAR(iterator_thread_started)) { 6807 #if defined(_WIN32) 6808 WaitForSingleObject(sctp_it_ctl.thread_proc, INFINITE); 6809 CloseHandle(sctp_it_ctl.thread_proc); 6810 sctp_it_ctl.thread_proc = NULL; 6811 #else 6812 pthread_join(sctp_it_ctl.thread_proc, NULL); 6813 sctp_it_ctl.thread_proc = 0; 6814 #endif 6815 } 6816 #endif 6817 #if defined(SCTP_PROCESS_LEVEL_LOCKS) 6818 #if defined(_WIN32) 6819 DeleteConditionVariable(&sctp_it_ctl.iterator_wakeup); 6820 #else 6821 pthread_cond_destroy(&sctp_it_ctl.iterator_wakeup); 6822 pthread_mutexattr_destroy(&SCTP_BASE_VAR(mtx_attr)); 6823 pthread_rwlockattr_destroy(&SCTP_BASE_VAR(rwlock_attr)); 6824 #endif 6825 #endif 6826 /* In FreeBSD the iterator thread never exits 6827 * but we do clean up. 6828 * The only way FreeBSD reaches here is if we have VRF's 6829 * but we still add the ifdef to make it compile on old versions. 6830 */ 6831 #if defined(__FreeBSD__) && !defined(__Userspace__) 6832 retry: 6833 #endif 6834 SCTP_IPI_ITERATOR_WQ_LOCK(); 6835 #if defined(__FreeBSD__) && !defined(__Userspace__) 6836 /* 6837 * sctp_iterator_worker() might be working on an it entry without 6838 * holding the lock. We won't find it on the list either and 6839 * continue and free/destroy it. While holding the lock, spin, to 6840 * avoid the race condition as sctp_iterator_worker() will have to 6841 * wait to re-acquire the lock. 6842 */ 6843 if (sctp_it_ctl.iterator_running != 0 || sctp_it_ctl.cur_it != NULL) { 6844 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 6845 SCTP_PRINTF("%s: Iterator running while we held the lock. Retry. " 6846 "cur_it=%p\n", __func__, sctp_it_ctl.cur_it); 6847 DELAY(10); 6848 goto retry; 6849 } 6850 #endif 6851 TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) { 6852 #if defined(__FreeBSD__) && !defined(__Userspace__) 6853 if (it->vn != curvnet) { 6854 continue; 6855 } 6856 #endif 6857 TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr); 6858 if (it->function_atend != NULL) { 6859 (*it->function_atend) (it->pointer, it->val); 6860 } 6861 SCTP_FREE(it,SCTP_M_ITER); 6862 } 6863 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 6864 #if defined(__FreeBSD__) && !defined(__Userspace__) 6865 SCTP_ITERATOR_LOCK(); 6866 if ((sctp_it_ctl.cur_it) && 6867 (sctp_it_ctl.cur_it->vn == curvnet)) { 6868 sctp_it_ctl.iterator_flags |= SCTP_ITERATOR_STOP_CUR_IT; 6869 } 6870 SCTP_ITERATOR_UNLOCK(); 6871 #endif 6872 #if !(defined(__FreeBSD__) && !defined(__Userspace__)) 6873 SCTP_IPI_ITERATOR_WQ_DESTROY(); 6874 SCTP_ITERATOR_LOCK_DESTROY(); 6875 #endif 6876 SCTP_OS_TIMER_STOP_DRAIN(&SCTP_BASE_INFO(addr_wq_timer.timer)); 6877 SCTP_WQ_ADDR_LOCK(); 6878 LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) { 6879 LIST_REMOVE(wi, sctp_nxt_addr); 6880 SCTP_DECR_LADDR_COUNT(); 6881 if (wi->action == SCTP_DEL_IP_ADDRESS) { 6882 SCTP_FREE(wi->ifa, SCTP_M_IFA); 6883 } 6884 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_laddr), wi); 6885 } 6886 SCTP_WQ_ADDR_UNLOCK(); 6887 6888 /* 6889 * free the vrf/ifn/ifa lists and hashes (be sure address monitor 6890 * is destroyed first). 6891 */ 6892 SCTP_IPI_ADDR_WLOCK(); 6893 vrf_bucket = &SCTP_BASE_INFO(sctp_vrfhash)[(SCTP_DEFAULT_VRFID & SCTP_BASE_INFO(hashvrfmark))]; 6894 LIST_FOREACH_SAFE(vrf, vrf_bucket, next_vrf, nvrf) { 6895 LIST_FOREACH_SAFE(ifn, &vrf->ifnlist, next_ifn, nifn) { 6896 LIST_FOREACH_SAFE(ifa, &ifn->ifalist, next_ifa, nifa) { 6897 /* free the ifa */ 6898 LIST_REMOVE(ifa, next_bucket); 6899 LIST_REMOVE(ifa, next_ifa); 6900 SCTP_FREE(ifa, SCTP_M_IFA); 6901 } 6902 /* free the ifn */ 6903 LIST_REMOVE(ifn, next_bucket); 6904 LIST_REMOVE(ifn, next_ifn); 6905 SCTP_FREE(ifn, SCTP_M_IFN); 6906 } 6907 SCTP_HASH_FREE(vrf->vrf_addr_hash, vrf->vrf_addr_hashmark); 6908 /* free the vrf */ 6909 LIST_REMOVE(vrf, next_vrf); 6910 SCTP_FREE(vrf, SCTP_M_VRF); 6911 } 6912 SCTP_IPI_ADDR_WUNLOCK(); 6913 /* free the vrf hashes */ 6914 SCTP_HASH_FREE(SCTP_BASE_INFO(sctp_vrfhash), SCTP_BASE_INFO(hashvrfmark)); 6915 SCTP_HASH_FREE(SCTP_BASE_INFO(vrf_ifn_hash), SCTP_BASE_INFO(vrf_ifn_hashmark)); 6916 6917 /* free the TIMEWAIT list elements malloc'd in the function 6918 * sctp_add_vtag_to_timewait()... 6919 */ 6920 for (i = 0; i < SCTP_STACK_VTAG_HASH_SIZE; i++) { 6921 chain = &SCTP_BASE_INFO(vtag_timewait)[i]; 6922 if (!LIST_EMPTY(chain)) { 6923 prev_twait_block = NULL; 6924 LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) { 6925 if (prev_twait_block) { 6926 SCTP_FREE(prev_twait_block, SCTP_M_TIMW); 6927 } 6928 prev_twait_block = twait_block; 6929 } 6930 SCTP_FREE(prev_twait_block, SCTP_M_TIMW); 6931 } 6932 } 6933 6934 /* free the locks and mutexes */ 6935 #if defined(__APPLE__) && !defined(__Userspace__) 6936 SCTP_TIMERQ_LOCK_DESTROY(); 6937 #endif 6938 #ifdef SCTP_PACKET_LOGGING 6939 SCTP_IP_PKTLOG_DESTROY(); 6940 #endif 6941 SCTP_IPI_ADDR_DESTROY(); 6942 #if defined(__APPLE__) && !defined(__Userspace__) 6943 SCTP_IPI_COUNT_DESTROY(); 6944 #endif 6945 SCTP_STATLOG_DESTROY(); 6946 SCTP_INP_INFO_LOCK_DESTROY(); 6947 6948 SCTP_WQ_ADDR_DESTROY(); 6949 6950 #if defined(__APPLE__) && !defined(__Userspace__) 6951 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) || defined(APPLE_LION) || defined(APPLE_MOUNTAINLION) 6952 lck_grp_attr_free(SCTP_BASE_INFO(sctbinfo).mtx_grp_attr); 6953 lck_grp_free(SCTP_BASE_INFO(sctbinfo).mtx_grp); 6954 lck_attr_free(SCTP_BASE_INFO(sctbinfo).mtx_attr); 6955 #else 6956 lck_grp_attr_free(SCTP_BASE_INFO(sctbinfo).ipi_lock_grp_attr); 6957 lck_grp_free(SCTP_BASE_INFO(sctbinfo).ipi_lock_grp); 6958 lck_attr_free(SCTP_BASE_INFO(sctbinfo).ipi_lock_attr); 6959 #endif 6960 #endif 6961 #if defined(__Userspace__) 6962 SCTP_TIMERQ_LOCK_DESTROY(); 6963 SCTP_ZONE_DESTROY(zone_mbuf); 6964 SCTP_ZONE_DESTROY(zone_clust); 6965 SCTP_ZONE_DESTROY(zone_ext_refcnt); 6966 #endif 6967 /* Get rid of other stuff too. */ 6968 if (SCTP_BASE_INFO(sctp_asochash) != NULL) 6969 SCTP_HASH_FREE(SCTP_BASE_INFO(sctp_asochash), SCTP_BASE_INFO(hashasocmark)); 6970 if (SCTP_BASE_INFO(sctp_ephash) != NULL) 6971 SCTP_HASH_FREE(SCTP_BASE_INFO(sctp_ephash), SCTP_BASE_INFO(hashmark)); 6972 if (SCTP_BASE_INFO(sctp_tcpephash) != NULL) 6973 SCTP_HASH_FREE(SCTP_BASE_INFO(sctp_tcpephash), SCTP_BASE_INFO(hashtcpmark)); 6974 6975 #if defined(_WIN32) || defined(__FreeBSD__) || defined(__Userspace__) 6976 SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_ep)); 6977 SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_asoc)); 6978 SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_laddr)); 6979 SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_net)); 6980 SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_chunk)); 6981 SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_readq)); 6982 SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_strmoq)); 6983 SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_asconf)); 6984 SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_asconf_ack)); 6985 #endif 6986 #if defined(__FreeBSD__) && !defined(__Userspace__) 6987 #if defined(SMP) && defined(SCTP_USE_PERCPU_STAT) 6988 SCTP_FREE(SCTP_BASE_STATS, SCTP_M_MCORE); 6989 #endif 6990 #endif 6991 } 6992 6993 int 6994 sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m, 6995 int offset, int limit, 6996 struct sockaddr *src, struct sockaddr *dst, 6997 struct sockaddr *altsa, uint16_t port) 6998 { 6999 /* 7000 * grub through the INIT pulling addresses and loading them to the 7001 * nets structure in the asoc. The from address in the mbuf should 7002 * also be loaded (if it is not already). This routine can be called 7003 * with either INIT or INIT-ACK's as long as the m points to the IP 7004 * packet and the offset points to the beginning of the parameters. 7005 */ 7006 struct sctp_inpcb *inp; 7007 struct sctp_nets *net, *nnet, *net_tmp; 7008 struct sctp_paramhdr *phdr, param_buf; 7009 struct sctp_tcb *stcb_tmp; 7010 uint16_t ptype, plen; 7011 struct sockaddr *sa; 7012 uint8_t random_store[SCTP_PARAM_BUFFER_SIZE]; 7013 struct sctp_auth_random *p_random = NULL; 7014 uint16_t random_len = 0; 7015 uint8_t hmacs_store[SCTP_PARAM_BUFFER_SIZE]; 7016 struct sctp_auth_hmac_algo *hmacs = NULL; 7017 uint16_t hmacs_len = 0; 7018 uint8_t saw_asconf = 0; 7019 uint8_t saw_asconf_ack = 0; 7020 uint8_t chunks_store[SCTP_PARAM_BUFFER_SIZE]; 7021 struct sctp_auth_chunk_list *chunks = NULL; 7022 uint16_t num_chunks = 0; 7023 sctp_key_t *new_key; 7024 uint32_t keylen; 7025 int got_random = 0, got_hmacs = 0, got_chklist = 0; 7026 uint8_t peer_supports_ecn; 7027 uint8_t peer_supports_prsctp; 7028 uint8_t peer_supports_auth; 7029 uint8_t peer_supports_asconf; 7030 uint8_t peer_supports_asconf_ack; 7031 uint8_t peer_supports_reconfig; 7032 uint8_t peer_supports_nrsack; 7033 uint8_t peer_supports_pktdrop; 7034 uint8_t peer_supports_idata; 7035 #ifdef INET 7036 struct sockaddr_in sin; 7037 #endif 7038 #ifdef INET6 7039 struct sockaddr_in6 sin6; 7040 #endif 7041 7042 /* First get the destination address setup too. */ 7043 #ifdef INET 7044 memset(&sin, 0, sizeof(sin)); 7045 sin.sin_family = AF_INET; 7046 #ifdef HAVE_SIN_LEN 7047 sin.sin_len = sizeof(sin); 7048 #endif 7049 sin.sin_port = stcb->rport; 7050 #endif 7051 #ifdef INET6 7052 memset(&sin6, 0, sizeof(sin6)); 7053 sin6.sin6_family = AF_INET6; 7054 #ifdef HAVE_SIN6_LEN 7055 sin6.sin6_len = sizeof(struct sockaddr_in6); 7056 #endif 7057 sin6.sin6_port = stcb->rport; 7058 #endif 7059 if (altsa) { 7060 sa = altsa; 7061 } else { 7062 sa = src; 7063 } 7064 peer_supports_idata = 0; 7065 peer_supports_ecn = 0; 7066 peer_supports_prsctp = 0; 7067 peer_supports_auth = 0; 7068 peer_supports_asconf = 0; 7069 peer_supports_asconf_ack = 0; 7070 peer_supports_reconfig = 0; 7071 peer_supports_nrsack = 0; 7072 peer_supports_pktdrop = 0; 7073 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 7074 /* mark all addresses that we have currently on the list */ 7075 net->dest_state |= SCTP_ADDR_NOT_IN_ASSOC; 7076 } 7077 /* does the source address already exist? if so skip it */ 7078 inp = stcb->sctp_ep; 7079 atomic_add_int(&stcb->asoc.refcnt, 1); 7080 stcb_tmp = sctp_findassociation_ep_addr(&inp, sa, &net_tmp, dst, stcb); 7081 atomic_subtract_int(&stcb->asoc.refcnt, 1); 7082 7083 if ((stcb_tmp == NULL && inp == stcb->sctp_ep) || inp == NULL) { 7084 /* we must add the source address */ 7085 /* no scope set here since we have a tcb already. */ 7086 switch (sa->sa_family) { 7087 #ifdef INET 7088 case AF_INET: 7089 if (stcb->asoc.scope.ipv4_addr_legal) { 7090 if (sctp_add_remote_addr(stcb, sa, NULL, port, SCTP_DONOT_SETSCOPE, SCTP_LOAD_ADDR_2)) { 7091 return (-1); 7092 } 7093 } 7094 break; 7095 #endif 7096 #ifdef INET6 7097 case AF_INET6: 7098 if (stcb->asoc.scope.ipv6_addr_legal) { 7099 if (sctp_add_remote_addr(stcb, sa, NULL, port, SCTP_DONOT_SETSCOPE, SCTP_LOAD_ADDR_3)) { 7100 return (-2); 7101 } 7102 } 7103 break; 7104 #endif 7105 #if defined(__Userspace__) 7106 case AF_CONN: 7107 if (stcb->asoc.scope.conn_addr_legal) { 7108 if (sctp_add_remote_addr(stcb, sa, NULL, port, SCTP_DONOT_SETSCOPE, SCTP_LOAD_ADDR_3)) { 7109 return (-2); 7110 } 7111 } 7112 break; 7113 #endif 7114 default: 7115 break; 7116 } 7117 } else { 7118 if (net_tmp != NULL && stcb_tmp == stcb) { 7119 net_tmp->dest_state &= ~SCTP_ADDR_NOT_IN_ASSOC; 7120 } else if (stcb_tmp != stcb) { 7121 /* It belongs to another association? */ 7122 if (stcb_tmp) 7123 SCTP_TCB_UNLOCK(stcb_tmp); 7124 return (-3); 7125 } 7126 } 7127 if (stcb->asoc.state == 0) { 7128 /* the assoc was freed? */ 7129 return (-4); 7130 } 7131 /* now we must go through each of the params. */ 7132 phdr = sctp_get_next_param(m, offset, ¶m_buf, sizeof(param_buf)); 7133 while (phdr) { 7134 ptype = ntohs(phdr->param_type); 7135 plen = ntohs(phdr->param_length); 7136 /* 7137 * SCTP_PRINTF("ptype => %0x, plen => %d\n", (uint32_t)ptype, 7138 * (int)plen); 7139 */ 7140 if (offset + plen > limit) { 7141 break; 7142 } 7143 if (plen < sizeof(struct sctp_paramhdr)) { 7144 break; 7145 } 7146 #ifdef INET 7147 if (ptype == SCTP_IPV4_ADDRESS) { 7148 if (stcb->asoc.scope.ipv4_addr_legal) { 7149 struct sctp_ipv4addr_param *p4, p4_buf; 7150 7151 /* ok get the v4 address and check/add */ 7152 phdr = sctp_get_next_param(m, offset, 7153 (struct sctp_paramhdr *)&p4_buf, 7154 sizeof(p4_buf)); 7155 if (plen != sizeof(struct sctp_ipv4addr_param) || 7156 phdr == NULL) { 7157 return (-5); 7158 } 7159 p4 = (struct sctp_ipv4addr_param *)phdr; 7160 sin.sin_addr.s_addr = p4->addr; 7161 if (IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) { 7162 /* Skip multi-cast addresses */ 7163 goto next_param; 7164 } 7165 if ((sin.sin_addr.s_addr == INADDR_BROADCAST) || 7166 (sin.sin_addr.s_addr == INADDR_ANY)) { 7167 goto next_param; 7168 } 7169 sa = (struct sockaddr *)&sin; 7170 inp = stcb->sctp_ep; 7171 atomic_add_int(&stcb->asoc.refcnt, 1); 7172 stcb_tmp = sctp_findassociation_ep_addr(&inp, sa, &net, 7173 dst, stcb); 7174 atomic_subtract_int(&stcb->asoc.refcnt, 1); 7175 7176 if ((stcb_tmp == NULL && inp == stcb->sctp_ep) || 7177 inp == NULL) { 7178 /* we must add the source address */ 7179 /* 7180 * no scope set since we have a tcb 7181 * already 7182 */ 7183 7184 /* 7185 * we must validate the state again 7186 * here 7187 */ 7188 add_it_now: 7189 if (stcb->asoc.state == 0) { 7190 /* the assoc was freed? */ 7191 return (-7); 7192 } 7193 if (sctp_add_remote_addr(stcb, sa, NULL, port, SCTP_DONOT_SETSCOPE, SCTP_LOAD_ADDR_4)) { 7194 return (-8); 7195 } 7196 } else if (stcb_tmp == stcb) { 7197 if (stcb->asoc.state == 0) { 7198 /* the assoc was freed? */ 7199 return (-10); 7200 } 7201 if (net != NULL) { 7202 /* clear flag */ 7203 net->dest_state &= 7204 ~SCTP_ADDR_NOT_IN_ASSOC; 7205 } 7206 } else { 7207 /* 7208 * strange, address is in another 7209 * assoc? straighten out locks. 7210 */ 7211 if (stcb_tmp) { 7212 if (SCTP_GET_STATE(stcb_tmp) == SCTP_STATE_COOKIE_WAIT) { 7213 struct mbuf *op_err; 7214 char msg[SCTP_DIAG_INFO_LEN]; 7215 7216 /* in setup state we abort this guy */ 7217 SCTP_SNPRINTF(msg, sizeof(msg), 7218 "%s:%d at %s", __FILE__, __LINE__, __func__); 7219 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 7220 msg); 7221 sctp_abort_an_association(stcb_tmp->sctp_ep, 7222 stcb_tmp, op_err, false, 7223 SCTP_SO_NOT_LOCKED); 7224 goto add_it_now; 7225 } 7226 SCTP_TCB_UNLOCK(stcb_tmp); 7227 } 7228 7229 if (stcb->asoc.state == 0) { 7230 /* the assoc was freed? */ 7231 return (-12); 7232 } 7233 return (-13); 7234 } 7235 } 7236 } else 7237 #endif 7238 #ifdef INET6 7239 if (ptype == SCTP_IPV6_ADDRESS) { 7240 if (stcb->asoc.scope.ipv6_addr_legal) { 7241 /* ok get the v6 address and check/add */ 7242 struct sctp_ipv6addr_param *p6, p6_buf; 7243 7244 phdr = sctp_get_next_param(m, offset, 7245 (struct sctp_paramhdr *)&p6_buf, 7246 sizeof(p6_buf)); 7247 if (plen != sizeof(struct sctp_ipv6addr_param) || 7248 phdr == NULL) { 7249 return (-14); 7250 } 7251 p6 = (struct sctp_ipv6addr_param *)phdr; 7252 memcpy((caddr_t)&sin6.sin6_addr, p6->addr, 7253 sizeof(p6->addr)); 7254 if (IN6_IS_ADDR_MULTICAST(&sin6.sin6_addr)) { 7255 /* Skip multi-cast addresses */ 7256 goto next_param; 7257 } 7258 if (IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) { 7259 /* Link local make no sense without scope */ 7260 goto next_param; 7261 } 7262 sa = (struct sockaddr *)&sin6; 7263 inp = stcb->sctp_ep; 7264 atomic_add_int(&stcb->asoc.refcnt, 1); 7265 stcb_tmp = sctp_findassociation_ep_addr(&inp, sa, &net, 7266 dst, stcb); 7267 atomic_subtract_int(&stcb->asoc.refcnt, 1); 7268 if (stcb_tmp == NULL && 7269 (inp == stcb->sctp_ep || inp == NULL)) { 7270 /* 7271 * we must validate the state again 7272 * here 7273 */ 7274 add_it_now6: 7275 if (stcb->asoc.state == 0) { 7276 /* the assoc was freed? */ 7277 return (-16); 7278 } 7279 /* 7280 * we must add the address, no scope 7281 * set 7282 */ 7283 if (sctp_add_remote_addr(stcb, sa, NULL, port, SCTP_DONOT_SETSCOPE, SCTP_LOAD_ADDR_5)) { 7284 return (-17); 7285 } 7286 } else if (stcb_tmp == stcb) { 7287 /* 7288 * we must validate the state again 7289 * here 7290 */ 7291 if (stcb->asoc.state == 0) { 7292 /* the assoc was freed? */ 7293 return (-19); 7294 } 7295 if (net != NULL) { 7296 /* clear flag */ 7297 net->dest_state &= 7298 ~SCTP_ADDR_NOT_IN_ASSOC; 7299 } 7300 } else { 7301 /* 7302 * strange, address is in another 7303 * assoc? straighten out locks. 7304 */ 7305 if (stcb_tmp) { 7306 if (SCTP_GET_STATE(stcb_tmp) == SCTP_STATE_COOKIE_WAIT) { 7307 struct mbuf *op_err; 7308 char msg[SCTP_DIAG_INFO_LEN]; 7309 7310 /* in setup state we abort this guy */ 7311 SCTP_SNPRINTF(msg, sizeof(msg), 7312 "%s:%d at %s", __FILE__, __LINE__, __func__); 7313 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 7314 msg); 7315 sctp_abort_an_association(stcb_tmp->sctp_ep, 7316 stcb_tmp, op_err, false, 7317 SCTP_SO_NOT_LOCKED); 7318 goto add_it_now6; 7319 } 7320 SCTP_TCB_UNLOCK(stcb_tmp); 7321 } 7322 if (stcb->asoc.state == 0) { 7323 /* the assoc was freed? */ 7324 return (-21); 7325 } 7326 return (-22); 7327 } 7328 } 7329 } else 7330 #endif 7331 if (ptype == SCTP_ECN_CAPABLE) { 7332 peer_supports_ecn = 1; 7333 } else if (ptype == SCTP_ULP_ADAPTATION) { 7334 if (stcb->asoc.state != SCTP_STATE_OPEN) { 7335 struct sctp_adaptation_layer_indication ai, *aip; 7336 7337 phdr = sctp_get_next_param(m, offset, 7338 (struct sctp_paramhdr *)&ai, sizeof(ai)); 7339 aip = (struct sctp_adaptation_layer_indication *)phdr; 7340 if (aip) { 7341 stcb->asoc.peers_adaptation = ntohl(aip->indication); 7342 stcb->asoc.adaptation_needed = 1; 7343 } 7344 } 7345 } else if (ptype == SCTP_SET_PRIM_ADDR) { 7346 struct sctp_asconf_addr_param lstore, *fee; 7347 int lptype; 7348 struct sockaddr *lsa = NULL; 7349 #ifdef INET 7350 struct sctp_asconf_addrv4_param *fii; 7351 #endif 7352 7353 if (stcb->asoc.asconf_supported == 0) { 7354 return (-100); 7355 } 7356 if (plen > sizeof(lstore)) { 7357 return (-23); 7358 } 7359 if (plen < sizeof(struct sctp_asconf_addrv4_param)) { 7360 return (-101); 7361 } 7362 phdr = sctp_get_next_param(m, offset, 7363 (struct sctp_paramhdr *)&lstore, 7364 plen); 7365 if (phdr == NULL) { 7366 return (-24); 7367 } 7368 fee = (struct sctp_asconf_addr_param *)phdr; 7369 lptype = ntohs(fee->addrp.ph.param_type); 7370 switch (lptype) { 7371 #ifdef INET 7372 case SCTP_IPV4_ADDRESS: 7373 if (plen != 7374 sizeof(struct sctp_asconf_addrv4_param)) { 7375 SCTP_PRINTF("Sizeof setprim in init/init ack not %d but %d - ignored\n", 7376 (int)sizeof(struct sctp_asconf_addrv4_param), 7377 plen); 7378 } else { 7379 fii = (struct sctp_asconf_addrv4_param *)fee; 7380 sin.sin_addr.s_addr = fii->addrp.addr; 7381 lsa = (struct sockaddr *)&sin; 7382 } 7383 break; 7384 #endif 7385 #ifdef INET6 7386 case SCTP_IPV6_ADDRESS: 7387 if (plen != 7388 sizeof(struct sctp_asconf_addr_param)) { 7389 SCTP_PRINTF("Sizeof setprim (v6) in init/init ack not %d but %d - ignored\n", 7390 (int)sizeof(struct sctp_asconf_addr_param), 7391 plen); 7392 } else { 7393 memcpy(sin6.sin6_addr.s6_addr, 7394 fee->addrp.addr, 7395 sizeof(fee->addrp.addr)); 7396 lsa = (struct sockaddr *)&sin6; 7397 } 7398 break; 7399 #endif 7400 default: 7401 break; 7402 } 7403 if (lsa) { 7404 (void)sctp_set_primary_addr(stcb, sa, NULL); 7405 } 7406 } else if (ptype == SCTP_HAS_NAT_SUPPORT) { 7407 stcb->asoc.peer_supports_nat = 1; 7408 } else if (ptype == SCTP_PRSCTP_SUPPORTED) { 7409 /* Peer supports pr-sctp */ 7410 peer_supports_prsctp = 1; 7411 } else if (ptype == SCTP_ZERO_CHECKSUM_ACCEPTABLE) { 7412 struct sctp_zero_checksum_acceptable zero_chksum, *zero_chksum_p; 7413 7414 phdr = sctp_get_next_param(m, offset, 7415 (struct sctp_paramhdr *)&zero_chksum, 7416 sizeof(struct sctp_zero_checksum_acceptable)); 7417 if (phdr != NULL) { 7418 /* 7419 * Only send zero checksums if the upper layer 7420 * has enabled the support for the same method 7421 * as allowed by the peer. 7422 */ 7423 zero_chksum_p = (struct sctp_zero_checksum_acceptable *)phdr; 7424 if ((ntohl(zero_chksum_p->edmid) != SCTP_EDMID_NONE) && 7425 (ntohl(zero_chksum_p->edmid) == stcb->asoc.rcv_edmid)) { 7426 stcb->asoc.snd_edmid = stcb->asoc.rcv_edmid; 7427 } 7428 } 7429 } else if (ptype == SCTP_SUPPORTED_CHUNK_EXT) { 7430 /* A supported extension chunk */ 7431 struct sctp_supported_chunk_types_param *pr_supported; 7432 uint8_t local_store[SCTP_PARAM_BUFFER_SIZE]; 7433 int num_ent, i; 7434 7435 if (plen > sizeof(local_store)) { 7436 return (-35); 7437 } 7438 phdr = sctp_get_next_param(m, offset, 7439 (struct sctp_paramhdr *)&local_store, plen); 7440 if (phdr == NULL) { 7441 return (-25); 7442 } 7443 pr_supported = (struct sctp_supported_chunk_types_param *)phdr; 7444 num_ent = plen - sizeof(struct sctp_paramhdr); 7445 for (i = 0; i < num_ent; i++) { 7446 switch (pr_supported->chunk_types[i]) { 7447 case SCTP_ASCONF: 7448 peer_supports_asconf = 1; 7449 break; 7450 case SCTP_ASCONF_ACK: 7451 peer_supports_asconf_ack = 1; 7452 break; 7453 case SCTP_FORWARD_CUM_TSN: 7454 peer_supports_prsctp = 1; 7455 break; 7456 case SCTP_PACKET_DROPPED: 7457 peer_supports_pktdrop = 1; 7458 break; 7459 case SCTP_NR_SELECTIVE_ACK: 7460 peer_supports_nrsack = 1; 7461 break; 7462 case SCTP_STREAM_RESET: 7463 peer_supports_reconfig = 1; 7464 break; 7465 case SCTP_AUTHENTICATION: 7466 peer_supports_auth = 1; 7467 break; 7468 case SCTP_IDATA: 7469 peer_supports_idata = 1; 7470 break; 7471 default: 7472 /* one I have not learned yet */ 7473 break; 7474 } 7475 } 7476 } else if (ptype == SCTP_RANDOM) { 7477 if (plen > sizeof(random_store)) 7478 break; 7479 if (got_random) { 7480 /* already processed a RANDOM */ 7481 goto next_param; 7482 } 7483 phdr = sctp_get_next_param(m, offset, 7484 (struct sctp_paramhdr *)random_store, 7485 plen); 7486 if (phdr == NULL) 7487 return (-26); 7488 p_random = (struct sctp_auth_random *)phdr; 7489 random_len = plen - sizeof(*p_random); 7490 /* enforce the random length */ 7491 if (random_len != SCTP_AUTH_RANDOM_SIZE_REQUIRED) { 7492 SCTPDBG(SCTP_DEBUG_AUTH1, "SCTP: invalid RANDOM len\n"); 7493 return (-27); 7494 } 7495 got_random = 1; 7496 } else if (ptype == SCTP_HMAC_LIST) { 7497 uint16_t num_hmacs; 7498 uint16_t i; 7499 7500 if (plen > sizeof(hmacs_store)) 7501 break; 7502 if (got_hmacs) { 7503 /* already processed a HMAC list */ 7504 goto next_param; 7505 } 7506 phdr = sctp_get_next_param(m, offset, 7507 (struct sctp_paramhdr *)hmacs_store, 7508 plen); 7509 if (phdr == NULL) 7510 return (-28); 7511 hmacs = (struct sctp_auth_hmac_algo *)phdr; 7512 hmacs_len = plen - sizeof(*hmacs); 7513 num_hmacs = hmacs_len / sizeof(hmacs->hmac_ids[0]); 7514 /* validate the hmac list */ 7515 if (sctp_verify_hmac_param(hmacs, num_hmacs)) { 7516 return (-29); 7517 } 7518 if (stcb->asoc.peer_hmacs != NULL) 7519 sctp_free_hmaclist(stcb->asoc.peer_hmacs); 7520 stcb->asoc.peer_hmacs = sctp_alloc_hmaclist(num_hmacs); 7521 if (stcb->asoc.peer_hmacs != NULL) { 7522 for (i = 0; i < num_hmacs; i++) { 7523 (void)sctp_auth_add_hmacid(stcb->asoc.peer_hmacs, 7524 ntohs(hmacs->hmac_ids[i])); 7525 } 7526 } 7527 got_hmacs = 1; 7528 } else if (ptype == SCTP_CHUNK_LIST) { 7529 int i; 7530 7531 if (plen > sizeof(chunks_store)) 7532 break; 7533 if (got_chklist) { 7534 /* already processed a Chunks list */ 7535 goto next_param; 7536 } 7537 phdr = sctp_get_next_param(m, offset, 7538 (struct sctp_paramhdr *)chunks_store, 7539 plen); 7540 if (phdr == NULL) 7541 return (-30); 7542 chunks = (struct sctp_auth_chunk_list *)phdr; 7543 num_chunks = plen - sizeof(*chunks); 7544 if (stcb->asoc.peer_auth_chunks != NULL) 7545 sctp_clear_chunklist(stcb->asoc.peer_auth_chunks); 7546 else 7547 stcb->asoc.peer_auth_chunks = sctp_alloc_chunklist(); 7548 for (i = 0; i < num_chunks; i++) { 7549 (void)sctp_auth_add_chunk(chunks->chunk_types[i], 7550 stcb->asoc.peer_auth_chunks); 7551 /* record asconf/asconf-ack if listed */ 7552 if (chunks->chunk_types[i] == SCTP_ASCONF) 7553 saw_asconf = 1; 7554 if (chunks->chunk_types[i] == SCTP_ASCONF_ACK) 7555 saw_asconf_ack = 1; 7556 } 7557 got_chklist = 1; 7558 } else if ((ptype == SCTP_HEARTBEAT_INFO) || 7559 (ptype == SCTP_STATE_COOKIE) || 7560 (ptype == SCTP_UNRECOG_PARAM) || 7561 (ptype == SCTP_COOKIE_PRESERVE) || 7562 (ptype == SCTP_SUPPORTED_ADDRTYPE) || 7563 (ptype == SCTP_ADD_IP_ADDRESS) || 7564 (ptype == SCTP_DEL_IP_ADDRESS) || 7565 (ptype == SCTP_ERROR_CAUSE_IND) || 7566 (ptype == SCTP_SUCCESS_REPORT)) { 7567 /* don't care */ 7568 } else { 7569 if ((ptype & 0x8000) == 0x0000) { 7570 /* 7571 * must stop processing the rest of the 7572 * param's. Any report bits were handled 7573 * with the call to 7574 * sctp_arethere_unrecognized_parameters() 7575 * when the INIT or INIT-ACK was first seen. 7576 */ 7577 break; 7578 } 7579 } 7580 7581 next_param: 7582 offset += SCTP_SIZE32(plen); 7583 if (offset >= limit) { 7584 break; 7585 } 7586 phdr = sctp_get_next_param(m, offset, ¶m_buf, 7587 sizeof(param_buf)); 7588 } 7589 /* Now check to see if we need to purge any addresses */ 7590 TAILQ_FOREACH_SAFE(net, &stcb->asoc.nets, sctp_next, nnet) { 7591 if ((net->dest_state & SCTP_ADDR_NOT_IN_ASSOC) == 7592 SCTP_ADDR_NOT_IN_ASSOC) { 7593 /* This address has been removed from the asoc */ 7594 /* remove and free it */ 7595 stcb->asoc.numnets--; 7596 TAILQ_REMOVE(&stcb->asoc.nets, net, sctp_next); 7597 if (net == stcb->asoc.alternate) { 7598 sctp_free_remote_addr(stcb->asoc.alternate); 7599 stcb->asoc.alternate = NULL; 7600 } 7601 if (net == stcb->asoc.primary_destination) { 7602 stcb->asoc.primary_destination = NULL; 7603 sctp_select_primary_destination(stcb); 7604 } 7605 sctp_free_remote_addr(net); 7606 } 7607 } 7608 if ((stcb->asoc.ecn_supported == 1) && 7609 (peer_supports_ecn == 0)) { 7610 stcb->asoc.ecn_supported = 0; 7611 } 7612 if ((stcb->asoc.prsctp_supported == 1) && 7613 (peer_supports_prsctp == 0)) { 7614 stcb->asoc.prsctp_supported = 0; 7615 } 7616 if ((stcb->asoc.auth_supported == 1) && 7617 ((peer_supports_auth == 0) || 7618 (got_random == 0) || (got_hmacs == 0))) { 7619 stcb->asoc.auth_supported = 0; 7620 } 7621 if ((stcb->asoc.asconf_supported == 1) && 7622 ((peer_supports_asconf == 0) || (peer_supports_asconf_ack == 0) || 7623 (stcb->asoc.auth_supported == 0) || 7624 (saw_asconf == 0) || (saw_asconf_ack == 0))) { 7625 stcb->asoc.asconf_supported = 0; 7626 } 7627 if ((stcb->asoc.reconfig_supported == 1) && 7628 (peer_supports_reconfig == 0)) { 7629 stcb->asoc.reconfig_supported = 0; 7630 } 7631 if ((stcb->asoc.idata_supported == 1) && 7632 (peer_supports_idata == 0)) { 7633 stcb->asoc.idata_supported = 0; 7634 } 7635 if ((stcb->asoc.nrsack_supported == 1) && 7636 (peer_supports_nrsack == 0)) { 7637 stcb->asoc.nrsack_supported = 0; 7638 } 7639 if ((stcb->asoc.pktdrop_supported == 1) && 7640 (peer_supports_pktdrop == 0)) { 7641 stcb->asoc.pktdrop_supported = 0; 7642 } 7643 /* validate authentication required parameters */ 7644 if ((peer_supports_auth == 0) && (got_chklist == 1)) { 7645 /* peer does not support auth but sent a chunks list? */ 7646 return (-31); 7647 } 7648 if ((peer_supports_asconf == 1) && (peer_supports_auth == 0)) { 7649 /* peer supports asconf but not auth? */ 7650 return (-32); 7651 } else if ((peer_supports_asconf == 1) && 7652 (peer_supports_auth == 1) && 7653 ((saw_asconf == 0) || (saw_asconf_ack == 0))) { 7654 return (-33); 7655 } 7656 /* concatenate the full random key */ 7657 keylen = sizeof(*p_random) + random_len + sizeof(*hmacs) + hmacs_len; 7658 if (chunks != NULL) { 7659 keylen += sizeof(*chunks) + num_chunks; 7660 } 7661 new_key = sctp_alloc_key(keylen); 7662 if (new_key != NULL) { 7663 /* copy in the RANDOM */ 7664 if (p_random != NULL) { 7665 keylen = sizeof(*p_random) + random_len; 7666 memcpy(new_key->key, p_random, keylen); 7667 } else { 7668 keylen = 0; 7669 } 7670 /* append in the AUTH chunks */ 7671 if (chunks != NULL) { 7672 memcpy(new_key->key + keylen, chunks, 7673 sizeof(*chunks) + num_chunks); 7674 keylen += sizeof(*chunks) + num_chunks; 7675 } 7676 /* append in the HMACs */ 7677 if (hmacs != NULL) { 7678 memcpy(new_key->key + keylen, hmacs, 7679 sizeof(*hmacs) + hmacs_len); 7680 } 7681 } else { 7682 /* failed to get memory for the key */ 7683 return (-34); 7684 } 7685 if (stcb->asoc.authinfo.peer_random != NULL) 7686 sctp_free_key(stcb->asoc.authinfo.peer_random); 7687 stcb->asoc.authinfo.peer_random = new_key; 7688 sctp_clear_cachedkeys(stcb, stcb->asoc.authinfo.assoc_keyid); 7689 sctp_clear_cachedkeys(stcb, stcb->asoc.authinfo.recv_keyid); 7690 7691 return (0); 7692 } 7693 7694 int 7695 sctp_set_primary_addr(struct sctp_tcb *stcb, struct sockaddr *sa, 7696 struct sctp_nets *net) 7697 { 7698 /* make sure the requested primary address exists in the assoc */ 7699 if (net == NULL && sa) 7700 net = sctp_findnet(stcb, sa); 7701 7702 if (net == NULL) { 7703 /* didn't find the requested primary address! */ 7704 return (-1); 7705 } else { 7706 /* set the primary address */ 7707 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) { 7708 /* Must be confirmed, so queue to set */ 7709 net->dest_state |= SCTP_ADDR_REQ_PRIMARY; 7710 return (0); 7711 } 7712 stcb->asoc.primary_destination = net; 7713 if (((net->dest_state & SCTP_ADDR_PF) == 0) && 7714 (stcb->asoc.alternate != NULL)) { 7715 sctp_free_remote_addr(stcb->asoc.alternate); 7716 stcb->asoc.alternate = NULL; 7717 } 7718 net = TAILQ_FIRST(&stcb->asoc.nets); 7719 if (net != stcb->asoc.primary_destination) { 7720 /* first one on the list is NOT the primary 7721 * sctp_cmpaddr() is much more efficient if 7722 * the primary is the first on the list, make it 7723 * so. 7724 */ 7725 TAILQ_REMOVE(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next); 7726 TAILQ_INSERT_HEAD(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next); 7727 } 7728 return (0); 7729 } 7730 } 7731 7732 bool 7733 sctp_is_vtag_good(uint32_t tag, uint16_t lport, uint16_t rport, struct timeval *now) 7734 { 7735 struct sctpasochead *head; 7736 struct sctp_tcb *stcb; 7737 7738 SCTP_INP_INFO_LOCK_ASSERT(); 7739 7740 head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(tag, SCTP_BASE_INFO(hashasocmark))]; 7741 LIST_FOREACH(stcb, head, sctp_asocs) { 7742 /* We choose not to lock anything here. TCB's can't be 7743 * removed since we have the read lock, so they can't 7744 * be freed on us, same thing for the INP. I may 7745 * be wrong with this assumption, but we will go 7746 * with it for now :-) 7747 */ 7748 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { 7749 continue; 7750 } 7751 if (stcb->asoc.my_vtag == tag) { 7752 /* candidate */ 7753 if (stcb->rport != rport) { 7754 continue; 7755 } 7756 if (stcb->sctp_ep->sctp_lport != lport) { 7757 continue; 7758 } 7759 /* The tag is currently used, so don't use it. */ 7760 return (false); 7761 } 7762 } 7763 return (!sctp_is_in_timewait(tag, lport, rport, now->tv_sec)); 7764 } 7765 7766 static void 7767 sctp_drain_mbufs(struct sctp_tcb *stcb) 7768 { 7769 /* 7770 * We must hunt this association for MBUF's past the cumack (i.e. 7771 * out of order data that we can renege on). 7772 */ 7773 struct sctp_association *asoc; 7774 struct sctp_tmit_chunk *chk, *nchk; 7775 uint32_t cumulative_tsn_p1; 7776 struct sctp_queued_to_read *control, *ncontrol; 7777 int cnt, strmat; 7778 uint32_t gap, i; 7779 int fnd = 0; 7780 7781 /* We look for anything larger than the cum-ack + 1 */ 7782 7783 asoc = &stcb->asoc; 7784 if (asoc->cumulative_tsn == asoc->highest_tsn_inside_map) { 7785 /* none we can reneg on. */ 7786 return; 7787 } 7788 SCTP_STAT_INCR(sctps_protocol_drains_done); 7789 cumulative_tsn_p1 = asoc->cumulative_tsn + 1; 7790 cnt = 0; 7791 /* Ok that was fun, now we will drain all the inbound streams? */ 7792 for (strmat = 0; strmat < asoc->streamincnt; strmat++) { 7793 TAILQ_FOREACH_SAFE(control, &asoc->strmin[strmat].inqueue, next_instrm, ncontrol) { 7794 #ifdef INVARIANTS 7795 if (control->on_strm_q != SCTP_ON_ORDERED) { 7796 panic("Huh control: %p on_q: %d -- not ordered?", 7797 control, control->on_strm_q); 7798 } 7799 #endif 7800 if (SCTP_TSN_GT(control->sinfo_tsn, cumulative_tsn_p1)) { 7801 /* Yep it is above cum-ack */ 7802 cnt++; 7803 SCTP_CALC_TSN_TO_GAP(gap, control->sinfo_tsn, asoc->mapping_array_base_tsn); 7804 KASSERT(control->length > 0, ("control has zero length")); 7805 if (asoc->size_on_all_streams >= control->length) { 7806 asoc->size_on_all_streams -= control->length; 7807 } else { 7808 #ifdef INVARIANTS 7809 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 7810 #else 7811 asoc->size_on_all_streams = 0; 7812 #endif 7813 } 7814 sctp_ucount_decr(asoc->cnt_on_all_streams); 7815 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap); 7816 if (control->on_read_q) { 7817 TAILQ_REMOVE(&stcb->sctp_ep->read_queue, control, next); 7818 control->on_read_q = 0; 7819 } 7820 TAILQ_REMOVE(&asoc->strmin[strmat].inqueue, control, next_instrm); 7821 control->on_strm_q = 0; 7822 if (control->data) { 7823 sctp_m_freem(control->data); 7824 control->data = NULL; 7825 } 7826 sctp_free_remote_addr(control->whoFrom); 7827 /* Now its reasm? */ 7828 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) { 7829 cnt++; 7830 SCTP_CALC_TSN_TO_GAP(gap, chk->rec.data.tsn, asoc->mapping_array_base_tsn); 7831 KASSERT(chk->send_size > 0, ("chunk has zero length")); 7832 if (asoc->size_on_reasm_queue >= chk->send_size) { 7833 asoc->size_on_reasm_queue -= chk->send_size; 7834 } else { 7835 #ifdef INVARIANTS 7836 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size); 7837 #else 7838 asoc->size_on_reasm_queue = 0; 7839 #endif 7840 } 7841 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 7842 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap); 7843 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 7844 if (chk->data) { 7845 sctp_m_freem(chk->data); 7846 chk->data = NULL; 7847 } 7848 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 7849 } 7850 sctp_free_a_readq(stcb, control); 7851 } 7852 } 7853 TAILQ_FOREACH_SAFE(control, &asoc->strmin[strmat].uno_inqueue, next_instrm, ncontrol) { 7854 #ifdef INVARIANTS 7855 if (control->on_strm_q != SCTP_ON_UNORDERED) { 7856 panic("Huh control: %p on_q: %d -- not unordered?", 7857 control, control->on_strm_q); 7858 } 7859 #endif 7860 if (SCTP_TSN_GT(control->sinfo_tsn, cumulative_tsn_p1)) { 7861 /* Yep it is above cum-ack */ 7862 cnt++; 7863 SCTP_CALC_TSN_TO_GAP(gap, control->sinfo_tsn, asoc->mapping_array_base_tsn); 7864 KASSERT(control->length > 0, ("control has zero length")); 7865 if (asoc->size_on_all_streams >= control->length) { 7866 asoc->size_on_all_streams -= control->length; 7867 } else { 7868 #ifdef INVARIANTS 7869 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 7870 #else 7871 asoc->size_on_all_streams = 0; 7872 #endif 7873 } 7874 sctp_ucount_decr(asoc->cnt_on_all_streams); 7875 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap); 7876 if (control->on_read_q) { 7877 TAILQ_REMOVE(&stcb->sctp_ep->read_queue, control, next); 7878 control->on_read_q = 0; 7879 } 7880 TAILQ_REMOVE(&asoc->strmin[strmat].uno_inqueue, control, next_instrm); 7881 control->on_strm_q = 0; 7882 if (control->data) { 7883 sctp_m_freem(control->data); 7884 control->data = NULL; 7885 } 7886 sctp_free_remote_addr(control->whoFrom); 7887 /* Now its reasm? */ 7888 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) { 7889 cnt++; 7890 SCTP_CALC_TSN_TO_GAP(gap, chk->rec.data.tsn, asoc->mapping_array_base_tsn); 7891 KASSERT(chk->send_size > 0, ("chunk has zero length")); 7892 if (asoc->size_on_reasm_queue >= chk->send_size) { 7893 asoc->size_on_reasm_queue -= chk->send_size; 7894 } else { 7895 #ifdef INVARIANTS 7896 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size); 7897 #else 7898 asoc->size_on_reasm_queue = 0; 7899 #endif 7900 } 7901 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 7902 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap); 7903 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 7904 if (chk->data) { 7905 sctp_m_freem(chk->data); 7906 chk->data = NULL; 7907 } 7908 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 7909 } 7910 sctp_free_a_readq(stcb, control); 7911 } 7912 } 7913 } 7914 if (cnt) { 7915 /* We must back down to see what the new highest is */ 7916 for (i = asoc->highest_tsn_inside_map; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) { 7917 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn); 7918 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) { 7919 asoc->highest_tsn_inside_map = i; 7920 fnd = 1; 7921 break; 7922 } 7923 } 7924 if (!fnd) { 7925 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1; 7926 } 7927 7928 /* 7929 * Question, should we go through the delivery queue? The only 7930 * reason things are on here is the app not reading OR a p-d-api up. 7931 * An attacker COULD send enough in to initiate the PD-API and then 7932 * send a bunch of stuff to other streams... these would wind up on 7933 * the delivery queue.. and then we would not get to them. But in 7934 * order to do this I then have to back-track and un-deliver 7935 * sequence numbers in streams.. el-yucko. I think for now we will 7936 * NOT look at the delivery queue and leave it to be something to 7937 * consider later. An alternative would be to abort the P-D-API with 7938 * a notification and then deliver the data.... Or another method 7939 * might be to keep track of how many times the situation occurs and 7940 * if we see a possible attack underway just abort the association. 7941 */ 7942 #ifdef SCTP_DEBUG 7943 SCTPDBG(SCTP_DEBUG_PCB1, "Freed %d chunks from reneg harvest\n", cnt); 7944 #endif 7945 /* 7946 * Now do we need to find a new 7947 * asoc->highest_tsn_inside_map? 7948 */ 7949 asoc->last_revoke_count = cnt; 7950 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, 7951 SCTP_FROM_SCTP_PCB + SCTP_LOC_11); 7952 /*sa_ignore NO_NULL_CHK*/ 7953 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 7954 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_DRAIN, SCTP_SO_NOT_LOCKED); 7955 } 7956 /* 7957 * Another issue, in un-setting the TSN's in the mapping array we 7958 * DID NOT adjust the highest_tsn marker. This will cause one of two 7959 * things to occur. It may cause us to do extra work in checking for 7960 * our mapping array movement. More importantly it may cause us to 7961 * SACK every datagram. This may not be a bad thing though since we 7962 * will recover once we get our cum-ack above and all this stuff we 7963 * dumped recovered. 7964 */ 7965 } 7966 7967 #if defined(__FreeBSD__) && !defined(__Userspace__) 7968 static void 7969 #else 7970 void 7971 #endif 7972 sctp_drain(void) 7973 { 7974 #if defined(__FreeBSD__) && !defined(__Userspace__) 7975 struct epoch_tracker et; 7976 VNET_ITERATOR_DECL(vnet_iter); 7977 7978 NET_EPOCH_ENTER(et); 7979 #else 7980 struct sctp_inpcb *inp; 7981 struct sctp_tcb *stcb; 7982 7983 SCTP_STAT_INCR(sctps_protocol_drain_calls); 7984 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { 7985 return; 7986 } 7987 #endif 7988 /* 7989 * We must walk the PCB lists for ALL associations here. The system 7990 * is LOW on MBUF's and needs help. This is where reneging will 7991 * occur. We really hope this does NOT happen! 7992 */ 7993 #if defined(__FreeBSD__) && !defined(__Userspace__) 7994 VNET_LIST_RLOCK_NOSLEEP(); 7995 VNET_FOREACH(vnet_iter) { 7996 CURVNET_SET(vnet_iter); 7997 struct sctp_inpcb *inp; 7998 struct sctp_tcb *stcb; 7999 #endif 8000 8001 #if defined(__FreeBSD__) && !defined(__Userspace__) 8002 SCTP_STAT_INCR(sctps_protocol_drain_calls); 8003 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { 8004 #ifdef VIMAGE 8005 continue; 8006 #else 8007 NET_EPOCH_EXIT(et); 8008 return; 8009 #endif 8010 } 8011 #endif 8012 SCTP_INP_INFO_RLOCK(); 8013 LIST_FOREACH(inp, &SCTP_BASE_INFO(listhead), sctp_list) { 8014 /* For each endpoint */ 8015 SCTP_INP_RLOCK(inp); 8016 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { 8017 /* For each association */ 8018 SCTP_TCB_LOCK(stcb); 8019 sctp_drain_mbufs(stcb); 8020 SCTP_TCB_UNLOCK(stcb); 8021 } 8022 SCTP_INP_RUNLOCK(inp); 8023 } 8024 SCTP_INP_INFO_RUNLOCK(); 8025 #if defined(__FreeBSD__) && !defined(__Userspace__) 8026 CURVNET_RESTORE(); 8027 } 8028 VNET_LIST_RUNLOCK_NOSLEEP(); 8029 NET_EPOCH_EXIT(et); 8030 #endif 8031 } 8032 #if defined(__FreeBSD__) && !defined(__Userspace__) 8033 EVENTHANDLER_DEFINE(vm_lowmem, sctp_drain, NULL, LOWMEM_PRI_DEFAULT); 8034 EVENTHANDLER_DEFINE(mbuf_lowmem, sctp_drain, NULL, LOWMEM_PRI_DEFAULT); 8035 #endif 8036 8037 /* 8038 * start a new iterator 8039 * iterates through all endpoints and associations based on the pcb_state 8040 * flags and asoc_state. "af" (mandatory) is executed for all matching 8041 * assocs and "ef" (optional) is executed when the iterator completes. 8042 * "inpf" (optional) is executed for each new endpoint as it is being 8043 * iterated through. inpe (optional) is called when the inp completes 8044 * its way through all the stcbs. 8045 */ 8046 int 8047 sctp_initiate_iterator(inp_func inpf, 8048 asoc_func af, 8049 inp_func inpe, 8050 uint32_t pcb_state, 8051 uint32_t pcb_features, 8052 uint32_t asoc_state, 8053 void *argp, 8054 uint32_t argi, 8055 end_func ef, 8056 struct sctp_inpcb *s_inp, 8057 uint8_t chunk_output_off) 8058 { 8059 struct sctp_iterator *it = NULL; 8060 8061 if (af == NULL) { 8062 return (-1); 8063 } 8064 if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) { 8065 SCTP_PRINTF("%s: abort on initialize being %d\n", __func__, 8066 SCTP_BASE_VAR(sctp_pcb_initialized)); 8067 return (-1); 8068 } 8069 SCTP_MALLOC(it, struct sctp_iterator *, sizeof(struct sctp_iterator), 8070 SCTP_M_ITER); 8071 if (it == NULL) { 8072 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOMEM); 8073 return (-1); 8074 } 8075 memset(it, 0, sizeof(*it)); 8076 it->function_assoc = af; 8077 it->function_inp = inpf; 8078 if (inpf) 8079 it->done_current_ep = 0; 8080 else 8081 it->done_current_ep = 1; 8082 it->function_atend = ef; 8083 it->pointer = argp; 8084 it->val = argi; 8085 it->pcb_flags = pcb_state; 8086 it->pcb_features = pcb_features; 8087 it->asoc_state = asoc_state; 8088 it->function_inp_end = inpe; 8089 it->no_chunk_output = chunk_output_off; 8090 #if defined(__FreeBSD__) && !defined(__Userspace__) 8091 it->vn = curvnet; 8092 #endif 8093 if (s_inp) { 8094 /* Assume lock is held here */ 8095 it->inp = s_inp; 8096 SCTP_INP_INCR_REF(it->inp); 8097 it->iterator_flags = SCTP_ITERATOR_DO_SINGLE_INP; 8098 } else { 8099 SCTP_INP_INFO_RLOCK(); 8100 it->inp = LIST_FIRST(&SCTP_BASE_INFO(listhead)); 8101 if (it->inp) { 8102 SCTP_INP_INCR_REF(it->inp); 8103 } 8104 SCTP_INP_INFO_RUNLOCK(); 8105 it->iterator_flags = SCTP_ITERATOR_DO_ALL_INP; 8106 } 8107 SCTP_IPI_ITERATOR_WQ_LOCK(); 8108 if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) { 8109 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 8110 SCTP_PRINTF("%s: rollback on initialize being %d it=%p\n", __func__, 8111 SCTP_BASE_VAR(sctp_pcb_initialized), it); 8112 SCTP_FREE(it, SCTP_M_ITER); 8113 return (-1); 8114 } 8115 TAILQ_INSERT_TAIL(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr); 8116 if (sctp_it_ctl.iterator_running == 0) { 8117 sctp_wakeup_iterator(); 8118 } 8119 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 8120 /* sa_ignore MEMLEAK {memory is put on the tailq for the iterator} */ 8121 return (0); 8122 } 8123 8124 /* 8125 * Atomically add flags to the sctp_flags of an inp. 8126 * To be used when the write lock of the inp is not held. 8127 */ 8128 void 8129 sctp_pcb_add_flags(struct sctp_inpcb *inp, uint32_t flags) 8130 { 8131 uint32_t old_flags, new_flags; 8132 8133 do { 8134 old_flags = inp->sctp_flags; 8135 new_flags = old_flags | flags; 8136 } while (atomic_cmpset_int(&inp->sctp_flags, old_flags, new_flags) == 0); 8137 }