sctp_output.c (415699B)
1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * a) Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 14 * b) Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the distribution. 17 * 18 * c) Neither the name of Cisco Systems, Inc. nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <netinet/sctp_os.h> 36 #if defined(__FreeBSD__) && !defined(__Userspace__) 37 #include <sys/proc.h> 38 #endif 39 #include <netinet/sctp_var.h> 40 #include <netinet/sctp_sysctl.h> 41 #include <netinet/sctp_header.h> 42 #include <netinet/sctp_pcb.h> 43 #include <netinet/sctputil.h> 44 #include <netinet/sctp_output.h> 45 #include <netinet/sctp_uio.h> 46 #include <netinet/sctputil.h> 47 #include <netinet/sctp_auth.h> 48 #include <netinet/sctp_timer.h> 49 #include <netinet/sctp_asconf.h> 50 #include <netinet/sctp_indata.h> 51 #include <netinet/sctp_bsd_addr.h> 52 #include <netinet/sctp_input.h> 53 #include <netinet/sctp_crc32.h> 54 #if defined(__FreeBSD__) && !defined(__Userspace__) 55 #include <netinet/sctp_kdtrace.h> 56 #endif 57 #if defined(__linux__) 58 #define __FAVOR_BSD /* (on Ubuntu at least) enables UDP header field names like BSD in RFC 768 */ 59 #endif 60 #if defined(INET) || defined(INET6) 61 #if !defined(_WIN32) 62 #include <netinet/udp.h> 63 #endif 64 #endif 65 #if !defined(__Userspace__) 66 #if defined(__APPLE__) 67 #include <netinet/in.h> 68 #endif 69 #if defined(__FreeBSD__) && !defined(__Userspace__) 70 #include <netinet/udp_var.h> 71 #include <machine/in_cksum.h> 72 #endif 73 #endif 74 #if defined(__Userspace__) && defined(INET6) 75 #include <netinet6/sctp6_var.h> 76 #endif 77 78 #if defined(_WIN32) && !defined(_MSC_VER) 79 #include <minmax.h> 80 #endif 81 82 #if defined(__APPLE__) && !defined(__Userspace__) 83 #if !(defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)) 84 #define SCTP_MAX_LINKHDR 16 85 #endif 86 #endif 87 88 #define SCTP_MAX_GAPS_INARRAY 4 89 struct sack_track { 90 uint8_t right_edge; /* mergable on the right edge */ 91 uint8_t left_edge; /* mergable on the left edge */ 92 uint8_t num_entries; 93 uint8_t spare; 94 struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY]; 95 }; 96 97 const struct sack_track sack_array[256] = { 98 {0, 0, 0, 0, /* 0x00 */ 99 {{0, 0}, 100 {0, 0}, 101 {0, 0}, 102 {0, 0} 103 } 104 }, 105 {1, 0, 1, 0, /* 0x01 */ 106 {{0, 0}, 107 {0, 0}, 108 {0, 0}, 109 {0, 0} 110 } 111 }, 112 {0, 0, 1, 0, /* 0x02 */ 113 {{1, 1}, 114 {0, 0}, 115 {0, 0}, 116 {0, 0} 117 } 118 }, 119 {1, 0, 1, 0, /* 0x03 */ 120 {{0, 1}, 121 {0, 0}, 122 {0, 0}, 123 {0, 0} 124 } 125 }, 126 {0, 0, 1, 0, /* 0x04 */ 127 {{2, 2}, 128 {0, 0}, 129 {0, 0}, 130 {0, 0} 131 } 132 }, 133 {1, 0, 2, 0, /* 0x05 */ 134 {{0, 0}, 135 {2, 2}, 136 {0, 0}, 137 {0, 0} 138 } 139 }, 140 {0, 0, 1, 0, /* 0x06 */ 141 {{1, 2}, 142 {0, 0}, 143 {0, 0}, 144 {0, 0} 145 } 146 }, 147 {1, 0, 1, 0, /* 0x07 */ 148 {{0, 2}, 149 {0, 0}, 150 {0, 0}, 151 {0, 0} 152 } 153 }, 154 {0, 0, 1, 0, /* 0x08 */ 155 {{3, 3}, 156 {0, 0}, 157 {0, 0}, 158 {0, 0} 159 } 160 }, 161 {1, 0, 2, 0, /* 0x09 */ 162 {{0, 0}, 163 {3, 3}, 164 {0, 0}, 165 {0, 0} 166 } 167 }, 168 {0, 0, 2, 0, /* 0x0a */ 169 {{1, 1}, 170 {3, 3}, 171 {0, 0}, 172 {0, 0} 173 } 174 }, 175 {1, 0, 2, 0, /* 0x0b */ 176 {{0, 1}, 177 {3, 3}, 178 {0, 0}, 179 {0, 0} 180 } 181 }, 182 {0, 0, 1, 0, /* 0x0c */ 183 {{2, 3}, 184 {0, 0}, 185 {0, 0}, 186 {0, 0} 187 } 188 }, 189 {1, 0, 2, 0, /* 0x0d */ 190 {{0, 0}, 191 {2, 3}, 192 {0, 0}, 193 {0, 0} 194 } 195 }, 196 {0, 0, 1, 0, /* 0x0e */ 197 {{1, 3}, 198 {0, 0}, 199 {0, 0}, 200 {0, 0} 201 } 202 }, 203 {1, 0, 1, 0, /* 0x0f */ 204 {{0, 3}, 205 {0, 0}, 206 {0, 0}, 207 {0, 0} 208 } 209 }, 210 {0, 0, 1, 0, /* 0x10 */ 211 {{4, 4}, 212 {0, 0}, 213 {0, 0}, 214 {0, 0} 215 } 216 }, 217 {1, 0, 2, 0, /* 0x11 */ 218 {{0, 0}, 219 {4, 4}, 220 {0, 0}, 221 {0, 0} 222 } 223 }, 224 {0, 0, 2, 0, /* 0x12 */ 225 {{1, 1}, 226 {4, 4}, 227 {0, 0}, 228 {0, 0} 229 } 230 }, 231 {1, 0, 2, 0, /* 0x13 */ 232 {{0, 1}, 233 {4, 4}, 234 {0, 0}, 235 {0, 0} 236 } 237 }, 238 {0, 0, 2, 0, /* 0x14 */ 239 {{2, 2}, 240 {4, 4}, 241 {0, 0}, 242 {0, 0} 243 } 244 }, 245 {1, 0, 3, 0, /* 0x15 */ 246 {{0, 0}, 247 {2, 2}, 248 {4, 4}, 249 {0, 0} 250 } 251 }, 252 {0, 0, 2, 0, /* 0x16 */ 253 {{1, 2}, 254 {4, 4}, 255 {0, 0}, 256 {0, 0} 257 } 258 }, 259 {1, 0, 2, 0, /* 0x17 */ 260 {{0, 2}, 261 {4, 4}, 262 {0, 0}, 263 {0, 0} 264 } 265 }, 266 {0, 0, 1, 0, /* 0x18 */ 267 {{3, 4}, 268 {0, 0}, 269 {0, 0}, 270 {0, 0} 271 } 272 }, 273 {1, 0, 2, 0, /* 0x19 */ 274 {{0, 0}, 275 {3, 4}, 276 {0, 0}, 277 {0, 0} 278 } 279 }, 280 {0, 0, 2, 0, /* 0x1a */ 281 {{1, 1}, 282 {3, 4}, 283 {0, 0}, 284 {0, 0} 285 } 286 }, 287 {1, 0, 2, 0, /* 0x1b */ 288 {{0, 1}, 289 {3, 4}, 290 {0, 0}, 291 {0, 0} 292 } 293 }, 294 {0, 0, 1, 0, /* 0x1c */ 295 {{2, 4}, 296 {0, 0}, 297 {0, 0}, 298 {0, 0} 299 } 300 }, 301 {1, 0, 2, 0, /* 0x1d */ 302 {{0, 0}, 303 {2, 4}, 304 {0, 0}, 305 {0, 0} 306 } 307 }, 308 {0, 0, 1, 0, /* 0x1e */ 309 {{1, 4}, 310 {0, 0}, 311 {0, 0}, 312 {0, 0} 313 } 314 }, 315 {1, 0, 1, 0, /* 0x1f */ 316 {{0, 4}, 317 {0, 0}, 318 {0, 0}, 319 {0, 0} 320 } 321 }, 322 {0, 0, 1, 0, /* 0x20 */ 323 {{5, 5}, 324 {0, 0}, 325 {0, 0}, 326 {0, 0} 327 } 328 }, 329 {1, 0, 2, 0, /* 0x21 */ 330 {{0, 0}, 331 {5, 5}, 332 {0, 0}, 333 {0, 0} 334 } 335 }, 336 {0, 0, 2, 0, /* 0x22 */ 337 {{1, 1}, 338 {5, 5}, 339 {0, 0}, 340 {0, 0} 341 } 342 }, 343 {1, 0, 2, 0, /* 0x23 */ 344 {{0, 1}, 345 {5, 5}, 346 {0, 0}, 347 {0, 0} 348 } 349 }, 350 {0, 0, 2, 0, /* 0x24 */ 351 {{2, 2}, 352 {5, 5}, 353 {0, 0}, 354 {0, 0} 355 } 356 }, 357 {1, 0, 3, 0, /* 0x25 */ 358 {{0, 0}, 359 {2, 2}, 360 {5, 5}, 361 {0, 0} 362 } 363 }, 364 {0, 0, 2, 0, /* 0x26 */ 365 {{1, 2}, 366 {5, 5}, 367 {0, 0}, 368 {0, 0} 369 } 370 }, 371 {1, 0, 2, 0, /* 0x27 */ 372 {{0, 2}, 373 {5, 5}, 374 {0, 0}, 375 {0, 0} 376 } 377 }, 378 {0, 0, 2, 0, /* 0x28 */ 379 {{3, 3}, 380 {5, 5}, 381 {0, 0}, 382 {0, 0} 383 } 384 }, 385 {1, 0, 3, 0, /* 0x29 */ 386 {{0, 0}, 387 {3, 3}, 388 {5, 5}, 389 {0, 0} 390 } 391 }, 392 {0, 0, 3, 0, /* 0x2a */ 393 {{1, 1}, 394 {3, 3}, 395 {5, 5}, 396 {0, 0} 397 } 398 }, 399 {1, 0, 3, 0, /* 0x2b */ 400 {{0, 1}, 401 {3, 3}, 402 {5, 5}, 403 {0, 0} 404 } 405 }, 406 {0, 0, 2, 0, /* 0x2c */ 407 {{2, 3}, 408 {5, 5}, 409 {0, 0}, 410 {0, 0} 411 } 412 }, 413 {1, 0, 3, 0, /* 0x2d */ 414 {{0, 0}, 415 {2, 3}, 416 {5, 5}, 417 {0, 0} 418 } 419 }, 420 {0, 0, 2, 0, /* 0x2e */ 421 {{1, 3}, 422 {5, 5}, 423 {0, 0}, 424 {0, 0} 425 } 426 }, 427 {1, 0, 2, 0, /* 0x2f */ 428 {{0, 3}, 429 {5, 5}, 430 {0, 0}, 431 {0, 0} 432 } 433 }, 434 {0, 0, 1, 0, /* 0x30 */ 435 {{4, 5}, 436 {0, 0}, 437 {0, 0}, 438 {0, 0} 439 } 440 }, 441 {1, 0, 2, 0, /* 0x31 */ 442 {{0, 0}, 443 {4, 5}, 444 {0, 0}, 445 {0, 0} 446 } 447 }, 448 {0, 0, 2, 0, /* 0x32 */ 449 {{1, 1}, 450 {4, 5}, 451 {0, 0}, 452 {0, 0} 453 } 454 }, 455 {1, 0, 2, 0, /* 0x33 */ 456 {{0, 1}, 457 {4, 5}, 458 {0, 0}, 459 {0, 0} 460 } 461 }, 462 {0, 0, 2, 0, /* 0x34 */ 463 {{2, 2}, 464 {4, 5}, 465 {0, 0}, 466 {0, 0} 467 } 468 }, 469 {1, 0, 3, 0, /* 0x35 */ 470 {{0, 0}, 471 {2, 2}, 472 {4, 5}, 473 {0, 0} 474 } 475 }, 476 {0, 0, 2, 0, /* 0x36 */ 477 {{1, 2}, 478 {4, 5}, 479 {0, 0}, 480 {0, 0} 481 } 482 }, 483 {1, 0, 2, 0, /* 0x37 */ 484 {{0, 2}, 485 {4, 5}, 486 {0, 0}, 487 {0, 0} 488 } 489 }, 490 {0, 0, 1, 0, /* 0x38 */ 491 {{3, 5}, 492 {0, 0}, 493 {0, 0}, 494 {0, 0} 495 } 496 }, 497 {1, 0, 2, 0, /* 0x39 */ 498 {{0, 0}, 499 {3, 5}, 500 {0, 0}, 501 {0, 0} 502 } 503 }, 504 {0, 0, 2, 0, /* 0x3a */ 505 {{1, 1}, 506 {3, 5}, 507 {0, 0}, 508 {0, 0} 509 } 510 }, 511 {1, 0, 2, 0, /* 0x3b */ 512 {{0, 1}, 513 {3, 5}, 514 {0, 0}, 515 {0, 0} 516 } 517 }, 518 {0, 0, 1, 0, /* 0x3c */ 519 {{2, 5}, 520 {0, 0}, 521 {0, 0}, 522 {0, 0} 523 } 524 }, 525 {1, 0, 2, 0, /* 0x3d */ 526 {{0, 0}, 527 {2, 5}, 528 {0, 0}, 529 {0, 0} 530 } 531 }, 532 {0, 0, 1, 0, /* 0x3e */ 533 {{1, 5}, 534 {0, 0}, 535 {0, 0}, 536 {0, 0} 537 } 538 }, 539 {1, 0, 1, 0, /* 0x3f */ 540 {{0, 5}, 541 {0, 0}, 542 {0, 0}, 543 {0, 0} 544 } 545 }, 546 {0, 0, 1, 0, /* 0x40 */ 547 {{6, 6}, 548 {0, 0}, 549 {0, 0}, 550 {0, 0} 551 } 552 }, 553 {1, 0, 2, 0, /* 0x41 */ 554 {{0, 0}, 555 {6, 6}, 556 {0, 0}, 557 {0, 0} 558 } 559 }, 560 {0, 0, 2, 0, /* 0x42 */ 561 {{1, 1}, 562 {6, 6}, 563 {0, 0}, 564 {0, 0} 565 } 566 }, 567 {1, 0, 2, 0, /* 0x43 */ 568 {{0, 1}, 569 {6, 6}, 570 {0, 0}, 571 {0, 0} 572 } 573 }, 574 {0, 0, 2, 0, /* 0x44 */ 575 {{2, 2}, 576 {6, 6}, 577 {0, 0}, 578 {0, 0} 579 } 580 }, 581 {1, 0, 3, 0, /* 0x45 */ 582 {{0, 0}, 583 {2, 2}, 584 {6, 6}, 585 {0, 0} 586 } 587 }, 588 {0, 0, 2, 0, /* 0x46 */ 589 {{1, 2}, 590 {6, 6}, 591 {0, 0}, 592 {0, 0} 593 } 594 }, 595 {1, 0, 2, 0, /* 0x47 */ 596 {{0, 2}, 597 {6, 6}, 598 {0, 0}, 599 {0, 0} 600 } 601 }, 602 {0, 0, 2, 0, /* 0x48 */ 603 {{3, 3}, 604 {6, 6}, 605 {0, 0}, 606 {0, 0} 607 } 608 }, 609 {1, 0, 3, 0, /* 0x49 */ 610 {{0, 0}, 611 {3, 3}, 612 {6, 6}, 613 {0, 0} 614 } 615 }, 616 {0, 0, 3, 0, /* 0x4a */ 617 {{1, 1}, 618 {3, 3}, 619 {6, 6}, 620 {0, 0} 621 } 622 }, 623 {1, 0, 3, 0, /* 0x4b */ 624 {{0, 1}, 625 {3, 3}, 626 {6, 6}, 627 {0, 0} 628 } 629 }, 630 {0, 0, 2, 0, /* 0x4c */ 631 {{2, 3}, 632 {6, 6}, 633 {0, 0}, 634 {0, 0} 635 } 636 }, 637 {1, 0, 3, 0, /* 0x4d */ 638 {{0, 0}, 639 {2, 3}, 640 {6, 6}, 641 {0, 0} 642 } 643 }, 644 {0, 0, 2, 0, /* 0x4e */ 645 {{1, 3}, 646 {6, 6}, 647 {0, 0}, 648 {0, 0} 649 } 650 }, 651 {1, 0, 2, 0, /* 0x4f */ 652 {{0, 3}, 653 {6, 6}, 654 {0, 0}, 655 {0, 0} 656 } 657 }, 658 {0, 0, 2, 0, /* 0x50 */ 659 {{4, 4}, 660 {6, 6}, 661 {0, 0}, 662 {0, 0} 663 } 664 }, 665 {1, 0, 3, 0, /* 0x51 */ 666 {{0, 0}, 667 {4, 4}, 668 {6, 6}, 669 {0, 0} 670 } 671 }, 672 {0, 0, 3, 0, /* 0x52 */ 673 {{1, 1}, 674 {4, 4}, 675 {6, 6}, 676 {0, 0} 677 } 678 }, 679 {1, 0, 3, 0, /* 0x53 */ 680 {{0, 1}, 681 {4, 4}, 682 {6, 6}, 683 {0, 0} 684 } 685 }, 686 {0, 0, 3, 0, /* 0x54 */ 687 {{2, 2}, 688 {4, 4}, 689 {6, 6}, 690 {0, 0} 691 } 692 }, 693 {1, 0, 4, 0, /* 0x55 */ 694 {{0, 0}, 695 {2, 2}, 696 {4, 4}, 697 {6, 6} 698 } 699 }, 700 {0, 0, 3, 0, /* 0x56 */ 701 {{1, 2}, 702 {4, 4}, 703 {6, 6}, 704 {0, 0} 705 } 706 }, 707 {1, 0, 3, 0, /* 0x57 */ 708 {{0, 2}, 709 {4, 4}, 710 {6, 6}, 711 {0, 0} 712 } 713 }, 714 {0, 0, 2, 0, /* 0x58 */ 715 {{3, 4}, 716 {6, 6}, 717 {0, 0}, 718 {0, 0} 719 } 720 }, 721 {1, 0, 3, 0, /* 0x59 */ 722 {{0, 0}, 723 {3, 4}, 724 {6, 6}, 725 {0, 0} 726 } 727 }, 728 {0, 0, 3, 0, /* 0x5a */ 729 {{1, 1}, 730 {3, 4}, 731 {6, 6}, 732 {0, 0} 733 } 734 }, 735 {1, 0, 3, 0, /* 0x5b */ 736 {{0, 1}, 737 {3, 4}, 738 {6, 6}, 739 {0, 0} 740 } 741 }, 742 {0, 0, 2, 0, /* 0x5c */ 743 {{2, 4}, 744 {6, 6}, 745 {0, 0}, 746 {0, 0} 747 } 748 }, 749 {1, 0, 3, 0, /* 0x5d */ 750 {{0, 0}, 751 {2, 4}, 752 {6, 6}, 753 {0, 0} 754 } 755 }, 756 {0, 0, 2, 0, /* 0x5e */ 757 {{1, 4}, 758 {6, 6}, 759 {0, 0}, 760 {0, 0} 761 } 762 }, 763 {1, 0, 2, 0, /* 0x5f */ 764 {{0, 4}, 765 {6, 6}, 766 {0, 0}, 767 {0, 0} 768 } 769 }, 770 {0, 0, 1, 0, /* 0x60 */ 771 {{5, 6}, 772 {0, 0}, 773 {0, 0}, 774 {0, 0} 775 } 776 }, 777 {1, 0, 2, 0, /* 0x61 */ 778 {{0, 0}, 779 {5, 6}, 780 {0, 0}, 781 {0, 0} 782 } 783 }, 784 {0, 0, 2, 0, /* 0x62 */ 785 {{1, 1}, 786 {5, 6}, 787 {0, 0}, 788 {0, 0} 789 } 790 }, 791 {1, 0, 2, 0, /* 0x63 */ 792 {{0, 1}, 793 {5, 6}, 794 {0, 0}, 795 {0, 0} 796 } 797 }, 798 {0, 0, 2, 0, /* 0x64 */ 799 {{2, 2}, 800 {5, 6}, 801 {0, 0}, 802 {0, 0} 803 } 804 }, 805 {1, 0, 3, 0, /* 0x65 */ 806 {{0, 0}, 807 {2, 2}, 808 {5, 6}, 809 {0, 0} 810 } 811 }, 812 {0, 0, 2, 0, /* 0x66 */ 813 {{1, 2}, 814 {5, 6}, 815 {0, 0}, 816 {0, 0} 817 } 818 }, 819 {1, 0, 2, 0, /* 0x67 */ 820 {{0, 2}, 821 {5, 6}, 822 {0, 0}, 823 {0, 0} 824 } 825 }, 826 {0, 0, 2, 0, /* 0x68 */ 827 {{3, 3}, 828 {5, 6}, 829 {0, 0}, 830 {0, 0} 831 } 832 }, 833 {1, 0, 3, 0, /* 0x69 */ 834 {{0, 0}, 835 {3, 3}, 836 {5, 6}, 837 {0, 0} 838 } 839 }, 840 {0, 0, 3, 0, /* 0x6a */ 841 {{1, 1}, 842 {3, 3}, 843 {5, 6}, 844 {0, 0} 845 } 846 }, 847 {1, 0, 3, 0, /* 0x6b */ 848 {{0, 1}, 849 {3, 3}, 850 {5, 6}, 851 {0, 0} 852 } 853 }, 854 {0, 0, 2, 0, /* 0x6c */ 855 {{2, 3}, 856 {5, 6}, 857 {0, 0}, 858 {0, 0} 859 } 860 }, 861 {1, 0, 3, 0, /* 0x6d */ 862 {{0, 0}, 863 {2, 3}, 864 {5, 6}, 865 {0, 0} 866 } 867 }, 868 {0, 0, 2, 0, /* 0x6e */ 869 {{1, 3}, 870 {5, 6}, 871 {0, 0}, 872 {0, 0} 873 } 874 }, 875 {1, 0, 2, 0, /* 0x6f */ 876 {{0, 3}, 877 {5, 6}, 878 {0, 0}, 879 {0, 0} 880 } 881 }, 882 {0, 0, 1, 0, /* 0x70 */ 883 {{4, 6}, 884 {0, 0}, 885 {0, 0}, 886 {0, 0} 887 } 888 }, 889 {1, 0, 2, 0, /* 0x71 */ 890 {{0, 0}, 891 {4, 6}, 892 {0, 0}, 893 {0, 0} 894 } 895 }, 896 {0, 0, 2, 0, /* 0x72 */ 897 {{1, 1}, 898 {4, 6}, 899 {0, 0}, 900 {0, 0} 901 } 902 }, 903 {1, 0, 2, 0, /* 0x73 */ 904 {{0, 1}, 905 {4, 6}, 906 {0, 0}, 907 {0, 0} 908 } 909 }, 910 {0, 0, 2, 0, /* 0x74 */ 911 {{2, 2}, 912 {4, 6}, 913 {0, 0}, 914 {0, 0} 915 } 916 }, 917 {1, 0, 3, 0, /* 0x75 */ 918 {{0, 0}, 919 {2, 2}, 920 {4, 6}, 921 {0, 0} 922 } 923 }, 924 {0, 0, 2, 0, /* 0x76 */ 925 {{1, 2}, 926 {4, 6}, 927 {0, 0}, 928 {0, 0} 929 } 930 }, 931 {1, 0, 2, 0, /* 0x77 */ 932 {{0, 2}, 933 {4, 6}, 934 {0, 0}, 935 {0, 0} 936 } 937 }, 938 {0, 0, 1, 0, /* 0x78 */ 939 {{3, 6}, 940 {0, 0}, 941 {0, 0}, 942 {0, 0} 943 } 944 }, 945 {1, 0, 2, 0, /* 0x79 */ 946 {{0, 0}, 947 {3, 6}, 948 {0, 0}, 949 {0, 0} 950 } 951 }, 952 {0, 0, 2, 0, /* 0x7a */ 953 {{1, 1}, 954 {3, 6}, 955 {0, 0}, 956 {0, 0} 957 } 958 }, 959 {1, 0, 2, 0, /* 0x7b */ 960 {{0, 1}, 961 {3, 6}, 962 {0, 0}, 963 {0, 0} 964 } 965 }, 966 {0, 0, 1, 0, /* 0x7c */ 967 {{2, 6}, 968 {0, 0}, 969 {0, 0}, 970 {0, 0} 971 } 972 }, 973 {1, 0, 2, 0, /* 0x7d */ 974 {{0, 0}, 975 {2, 6}, 976 {0, 0}, 977 {0, 0} 978 } 979 }, 980 {0, 0, 1, 0, /* 0x7e */ 981 {{1, 6}, 982 {0, 0}, 983 {0, 0}, 984 {0, 0} 985 } 986 }, 987 {1, 0, 1, 0, /* 0x7f */ 988 {{0, 6}, 989 {0, 0}, 990 {0, 0}, 991 {0, 0} 992 } 993 }, 994 {0, 1, 1, 0, /* 0x80 */ 995 {{7, 7}, 996 {0, 0}, 997 {0, 0}, 998 {0, 0} 999 } 1000 }, 1001 {1, 1, 2, 0, /* 0x81 */ 1002 {{0, 0}, 1003 {7, 7}, 1004 {0, 0}, 1005 {0, 0} 1006 } 1007 }, 1008 {0, 1, 2, 0, /* 0x82 */ 1009 {{1, 1}, 1010 {7, 7}, 1011 {0, 0}, 1012 {0, 0} 1013 } 1014 }, 1015 {1, 1, 2, 0, /* 0x83 */ 1016 {{0, 1}, 1017 {7, 7}, 1018 {0, 0}, 1019 {0, 0} 1020 } 1021 }, 1022 {0, 1, 2, 0, /* 0x84 */ 1023 {{2, 2}, 1024 {7, 7}, 1025 {0, 0}, 1026 {0, 0} 1027 } 1028 }, 1029 {1, 1, 3, 0, /* 0x85 */ 1030 {{0, 0}, 1031 {2, 2}, 1032 {7, 7}, 1033 {0, 0} 1034 } 1035 }, 1036 {0, 1, 2, 0, /* 0x86 */ 1037 {{1, 2}, 1038 {7, 7}, 1039 {0, 0}, 1040 {0, 0} 1041 } 1042 }, 1043 {1, 1, 2, 0, /* 0x87 */ 1044 {{0, 2}, 1045 {7, 7}, 1046 {0, 0}, 1047 {0, 0} 1048 } 1049 }, 1050 {0, 1, 2, 0, /* 0x88 */ 1051 {{3, 3}, 1052 {7, 7}, 1053 {0, 0}, 1054 {0, 0} 1055 } 1056 }, 1057 {1, 1, 3, 0, /* 0x89 */ 1058 {{0, 0}, 1059 {3, 3}, 1060 {7, 7}, 1061 {0, 0} 1062 } 1063 }, 1064 {0, 1, 3, 0, /* 0x8a */ 1065 {{1, 1}, 1066 {3, 3}, 1067 {7, 7}, 1068 {0, 0} 1069 } 1070 }, 1071 {1, 1, 3, 0, /* 0x8b */ 1072 {{0, 1}, 1073 {3, 3}, 1074 {7, 7}, 1075 {0, 0} 1076 } 1077 }, 1078 {0, 1, 2, 0, /* 0x8c */ 1079 {{2, 3}, 1080 {7, 7}, 1081 {0, 0}, 1082 {0, 0} 1083 } 1084 }, 1085 {1, 1, 3, 0, /* 0x8d */ 1086 {{0, 0}, 1087 {2, 3}, 1088 {7, 7}, 1089 {0, 0} 1090 } 1091 }, 1092 {0, 1, 2, 0, /* 0x8e */ 1093 {{1, 3}, 1094 {7, 7}, 1095 {0, 0}, 1096 {0, 0} 1097 } 1098 }, 1099 {1, 1, 2, 0, /* 0x8f */ 1100 {{0, 3}, 1101 {7, 7}, 1102 {0, 0}, 1103 {0, 0} 1104 } 1105 }, 1106 {0, 1, 2, 0, /* 0x90 */ 1107 {{4, 4}, 1108 {7, 7}, 1109 {0, 0}, 1110 {0, 0} 1111 } 1112 }, 1113 {1, 1, 3, 0, /* 0x91 */ 1114 {{0, 0}, 1115 {4, 4}, 1116 {7, 7}, 1117 {0, 0} 1118 } 1119 }, 1120 {0, 1, 3, 0, /* 0x92 */ 1121 {{1, 1}, 1122 {4, 4}, 1123 {7, 7}, 1124 {0, 0} 1125 } 1126 }, 1127 {1, 1, 3, 0, /* 0x93 */ 1128 {{0, 1}, 1129 {4, 4}, 1130 {7, 7}, 1131 {0, 0} 1132 } 1133 }, 1134 {0, 1, 3, 0, /* 0x94 */ 1135 {{2, 2}, 1136 {4, 4}, 1137 {7, 7}, 1138 {0, 0} 1139 } 1140 }, 1141 {1, 1, 4, 0, /* 0x95 */ 1142 {{0, 0}, 1143 {2, 2}, 1144 {4, 4}, 1145 {7, 7} 1146 } 1147 }, 1148 {0, 1, 3, 0, /* 0x96 */ 1149 {{1, 2}, 1150 {4, 4}, 1151 {7, 7}, 1152 {0, 0} 1153 } 1154 }, 1155 {1, 1, 3, 0, /* 0x97 */ 1156 {{0, 2}, 1157 {4, 4}, 1158 {7, 7}, 1159 {0, 0} 1160 } 1161 }, 1162 {0, 1, 2, 0, /* 0x98 */ 1163 {{3, 4}, 1164 {7, 7}, 1165 {0, 0}, 1166 {0, 0} 1167 } 1168 }, 1169 {1, 1, 3, 0, /* 0x99 */ 1170 {{0, 0}, 1171 {3, 4}, 1172 {7, 7}, 1173 {0, 0} 1174 } 1175 }, 1176 {0, 1, 3, 0, /* 0x9a */ 1177 {{1, 1}, 1178 {3, 4}, 1179 {7, 7}, 1180 {0, 0} 1181 } 1182 }, 1183 {1, 1, 3, 0, /* 0x9b */ 1184 {{0, 1}, 1185 {3, 4}, 1186 {7, 7}, 1187 {0, 0} 1188 } 1189 }, 1190 {0, 1, 2, 0, /* 0x9c */ 1191 {{2, 4}, 1192 {7, 7}, 1193 {0, 0}, 1194 {0, 0} 1195 } 1196 }, 1197 {1, 1, 3, 0, /* 0x9d */ 1198 {{0, 0}, 1199 {2, 4}, 1200 {7, 7}, 1201 {0, 0} 1202 } 1203 }, 1204 {0, 1, 2, 0, /* 0x9e */ 1205 {{1, 4}, 1206 {7, 7}, 1207 {0, 0}, 1208 {0, 0} 1209 } 1210 }, 1211 {1, 1, 2, 0, /* 0x9f */ 1212 {{0, 4}, 1213 {7, 7}, 1214 {0, 0}, 1215 {0, 0} 1216 } 1217 }, 1218 {0, 1, 2, 0, /* 0xa0 */ 1219 {{5, 5}, 1220 {7, 7}, 1221 {0, 0}, 1222 {0, 0} 1223 } 1224 }, 1225 {1, 1, 3, 0, /* 0xa1 */ 1226 {{0, 0}, 1227 {5, 5}, 1228 {7, 7}, 1229 {0, 0} 1230 } 1231 }, 1232 {0, 1, 3, 0, /* 0xa2 */ 1233 {{1, 1}, 1234 {5, 5}, 1235 {7, 7}, 1236 {0, 0} 1237 } 1238 }, 1239 {1, 1, 3, 0, /* 0xa3 */ 1240 {{0, 1}, 1241 {5, 5}, 1242 {7, 7}, 1243 {0, 0} 1244 } 1245 }, 1246 {0, 1, 3, 0, /* 0xa4 */ 1247 {{2, 2}, 1248 {5, 5}, 1249 {7, 7}, 1250 {0, 0} 1251 } 1252 }, 1253 {1, 1, 4, 0, /* 0xa5 */ 1254 {{0, 0}, 1255 {2, 2}, 1256 {5, 5}, 1257 {7, 7} 1258 } 1259 }, 1260 {0, 1, 3, 0, /* 0xa6 */ 1261 {{1, 2}, 1262 {5, 5}, 1263 {7, 7}, 1264 {0, 0} 1265 } 1266 }, 1267 {1, 1, 3, 0, /* 0xa7 */ 1268 {{0, 2}, 1269 {5, 5}, 1270 {7, 7}, 1271 {0, 0} 1272 } 1273 }, 1274 {0, 1, 3, 0, /* 0xa8 */ 1275 {{3, 3}, 1276 {5, 5}, 1277 {7, 7}, 1278 {0, 0} 1279 } 1280 }, 1281 {1, 1, 4, 0, /* 0xa9 */ 1282 {{0, 0}, 1283 {3, 3}, 1284 {5, 5}, 1285 {7, 7} 1286 } 1287 }, 1288 {0, 1, 4, 0, /* 0xaa */ 1289 {{1, 1}, 1290 {3, 3}, 1291 {5, 5}, 1292 {7, 7} 1293 } 1294 }, 1295 {1, 1, 4, 0, /* 0xab */ 1296 {{0, 1}, 1297 {3, 3}, 1298 {5, 5}, 1299 {7, 7} 1300 } 1301 }, 1302 {0, 1, 3, 0, /* 0xac */ 1303 {{2, 3}, 1304 {5, 5}, 1305 {7, 7}, 1306 {0, 0} 1307 } 1308 }, 1309 {1, 1, 4, 0, /* 0xad */ 1310 {{0, 0}, 1311 {2, 3}, 1312 {5, 5}, 1313 {7, 7} 1314 } 1315 }, 1316 {0, 1, 3, 0, /* 0xae */ 1317 {{1, 3}, 1318 {5, 5}, 1319 {7, 7}, 1320 {0, 0} 1321 } 1322 }, 1323 {1, 1, 3, 0, /* 0xaf */ 1324 {{0, 3}, 1325 {5, 5}, 1326 {7, 7}, 1327 {0, 0} 1328 } 1329 }, 1330 {0, 1, 2, 0, /* 0xb0 */ 1331 {{4, 5}, 1332 {7, 7}, 1333 {0, 0}, 1334 {0, 0} 1335 } 1336 }, 1337 {1, 1, 3, 0, /* 0xb1 */ 1338 {{0, 0}, 1339 {4, 5}, 1340 {7, 7}, 1341 {0, 0} 1342 } 1343 }, 1344 {0, 1, 3, 0, /* 0xb2 */ 1345 {{1, 1}, 1346 {4, 5}, 1347 {7, 7}, 1348 {0, 0} 1349 } 1350 }, 1351 {1, 1, 3, 0, /* 0xb3 */ 1352 {{0, 1}, 1353 {4, 5}, 1354 {7, 7}, 1355 {0, 0} 1356 } 1357 }, 1358 {0, 1, 3, 0, /* 0xb4 */ 1359 {{2, 2}, 1360 {4, 5}, 1361 {7, 7}, 1362 {0, 0} 1363 } 1364 }, 1365 {1, 1, 4, 0, /* 0xb5 */ 1366 {{0, 0}, 1367 {2, 2}, 1368 {4, 5}, 1369 {7, 7} 1370 } 1371 }, 1372 {0, 1, 3, 0, /* 0xb6 */ 1373 {{1, 2}, 1374 {4, 5}, 1375 {7, 7}, 1376 {0, 0} 1377 } 1378 }, 1379 {1, 1, 3, 0, /* 0xb7 */ 1380 {{0, 2}, 1381 {4, 5}, 1382 {7, 7}, 1383 {0, 0} 1384 } 1385 }, 1386 {0, 1, 2, 0, /* 0xb8 */ 1387 {{3, 5}, 1388 {7, 7}, 1389 {0, 0}, 1390 {0, 0} 1391 } 1392 }, 1393 {1, 1, 3, 0, /* 0xb9 */ 1394 {{0, 0}, 1395 {3, 5}, 1396 {7, 7}, 1397 {0, 0} 1398 } 1399 }, 1400 {0, 1, 3, 0, /* 0xba */ 1401 {{1, 1}, 1402 {3, 5}, 1403 {7, 7}, 1404 {0, 0} 1405 } 1406 }, 1407 {1, 1, 3, 0, /* 0xbb */ 1408 {{0, 1}, 1409 {3, 5}, 1410 {7, 7}, 1411 {0, 0} 1412 } 1413 }, 1414 {0, 1, 2, 0, /* 0xbc */ 1415 {{2, 5}, 1416 {7, 7}, 1417 {0, 0}, 1418 {0, 0} 1419 } 1420 }, 1421 {1, 1, 3, 0, /* 0xbd */ 1422 {{0, 0}, 1423 {2, 5}, 1424 {7, 7}, 1425 {0, 0} 1426 } 1427 }, 1428 {0, 1, 2, 0, /* 0xbe */ 1429 {{1, 5}, 1430 {7, 7}, 1431 {0, 0}, 1432 {0, 0} 1433 } 1434 }, 1435 {1, 1, 2, 0, /* 0xbf */ 1436 {{0, 5}, 1437 {7, 7}, 1438 {0, 0}, 1439 {0, 0} 1440 } 1441 }, 1442 {0, 1, 1, 0, /* 0xc0 */ 1443 {{6, 7}, 1444 {0, 0}, 1445 {0, 0}, 1446 {0, 0} 1447 } 1448 }, 1449 {1, 1, 2, 0, /* 0xc1 */ 1450 {{0, 0}, 1451 {6, 7}, 1452 {0, 0}, 1453 {0, 0} 1454 } 1455 }, 1456 {0, 1, 2, 0, /* 0xc2 */ 1457 {{1, 1}, 1458 {6, 7}, 1459 {0, 0}, 1460 {0, 0} 1461 } 1462 }, 1463 {1, 1, 2, 0, /* 0xc3 */ 1464 {{0, 1}, 1465 {6, 7}, 1466 {0, 0}, 1467 {0, 0} 1468 } 1469 }, 1470 {0, 1, 2, 0, /* 0xc4 */ 1471 {{2, 2}, 1472 {6, 7}, 1473 {0, 0}, 1474 {0, 0} 1475 } 1476 }, 1477 {1, 1, 3, 0, /* 0xc5 */ 1478 {{0, 0}, 1479 {2, 2}, 1480 {6, 7}, 1481 {0, 0} 1482 } 1483 }, 1484 {0, 1, 2, 0, /* 0xc6 */ 1485 {{1, 2}, 1486 {6, 7}, 1487 {0, 0}, 1488 {0, 0} 1489 } 1490 }, 1491 {1, 1, 2, 0, /* 0xc7 */ 1492 {{0, 2}, 1493 {6, 7}, 1494 {0, 0}, 1495 {0, 0} 1496 } 1497 }, 1498 {0, 1, 2, 0, /* 0xc8 */ 1499 {{3, 3}, 1500 {6, 7}, 1501 {0, 0}, 1502 {0, 0} 1503 } 1504 }, 1505 {1, 1, 3, 0, /* 0xc9 */ 1506 {{0, 0}, 1507 {3, 3}, 1508 {6, 7}, 1509 {0, 0} 1510 } 1511 }, 1512 {0, 1, 3, 0, /* 0xca */ 1513 {{1, 1}, 1514 {3, 3}, 1515 {6, 7}, 1516 {0, 0} 1517 } 1518 }, 1519 {1, 1, 3, 0, /* 0xcb */ 1520 {{0, 1}, 1521 {3, 3}, 1522 {6, 7}, 1523 {0, 0} 1524 } 1525 }, 1526 {0, 1, 2, 0, /* 0xcc */ 1527 {{2, 3}, 1528 {6, 7}, 1529 {0, 0}, 1530 {0, 0} 1531 } 1532 }, 1533 {1, 1, 3, 0, /* 0xcd */ 1534 {{0, 0}, 1535 {2, 3}, 1536 {6, 7}, 1537 {0, 0} 1538 } 1539 }, 1540 {0, 1, 2, 0, /* 0xce */ 1541 {{1, 3}, 1542 {6, 7}, 1543 {0, 0}, 1544 {0, 0} 1545 } 1546 }, 1547 {1, 1, 2, 0, /* 0xcf */ 1548 {{0, 3}, 1549 {6, 7}, 1550 {0, 0}, 1551 {0, 0} 1552 } 1553 }, 1554 {0, 1, 2, 0, /* 0xd0 */ 1555 {{4, 4}, 1556 {6, 7}, 1557 {0, 0}, 1558 {0, 0} 1559 } 1560 }, 1561 {1, 1, 3, 0, /* 0xd1 */ 1562 {{0, 0}, 1563 {4, 4}, 1564 {6, 7}, 1565 {0, 0} 1566 } 1567 }, 1568 {0, 1, 3, 0, /* 0xd2 */ 1569 {{1, 1}, 1570 {4, 4}, 1571 {6, 7}, 1572 {0, 0} 1573 } 1574 }, 1575 {1, 1, 3, 0, /* 0xd3 */ 1576 {{0, 1}, 1577 {4, 4}, 1578 {6, 7}, 1579 {0, 0} 1580 } 1581 }, 1582 {0, 1, 3, 0, /* 0xd4 */ 1583 {{2, 2}, 1584 {4, 4}, 1585 {6, 7}, 1586 {0, 0} 1587 } 1588 }, 1589 {1, 1, 4, 0, /* 0xd5 */ 1590 {{0, 0}, 1591 {2, 2}, 1592 {4, 4}, 1593 {6, 7} 1594 } 1595 }, 1596 {0, 1, 3, 0, /* 0xd6 */ 1597 {{1, 2}, 1598 {4, 4}, 1599 {6, 7}, 1600 {0, 0} 1601 } 1602 }, 1603 {1, 1, 3, 0, /* 0xd7 */ 1604 {{0, 2}, 1605 {4, 4}, 1606 {6, 7}, 1607 {0, 0} 1608 } 1609 }, 1610 {0, 1, 2, 0, /* 0xd8 */ 1611 {{3, 4}, 1612 {6, 7}, 1613 {0, 0}, 1614 {0, 0} 1615 } 1616 }, 1617 {1, 1, 3, 0, /* 0xd9 */ 1618 {{0, 0}, 1619 {3, 4}, 1620 {6, 7}, 1621 {0, 0} 1622 } 1623 }, 1624 {0, 1, 3, 0, /* 0xda */ 1625 {{1, 1}, 1626 {3, 4}, 1627 {6, 7}, 1628 {0, 0} 1629 } 1630 }, 1631 {1, 1, 3, 0, /* 0xdb */ 1632 {{0, 1}, 1633 {3, 4}, 1634 {6, 7}, 1635 {0, 0} 1636 } 1637 }, 1638 {0, 1, 2, 0, /* 0xdc */ 1639 {{2, 4}, 1640 {6, 7}, 1641 {0, 0}, 1642 {0, 0} 1643 } 1644 }, 1645 {1, 1, 3, 0, /* 0xdd */ 1646 {{0, 0}, 1647 {2, 4}, 1648 {6, 7}, 1649 {0, 0} 1650 } 1651 }, 1652 {0, 1, 2, 0, /* 0xde */ 1653 {{1, 4}, 1654 {6, 7}, 1655 {0, 0}, 1656 {0, 0} 1657 } 1658 }, 1659 {1, 1, 2, 0, /* 0xdf */ 1660 {{0, 4}, 1661 {6, 7}, 1662 {0, 0}, 1663 {0, 0} 1664 } 1665 }, 1666 {0, 1, 1, 0, /* 0xe0 */ 1667 {{5, 7}, 1668 {0, 0}, 1669 {0, 0}, 1670 {0, 0} 1671 } 1672 }, 1673 {1, 1, 2, 0, /* 0xe1 */ 1674 {{0, 0}, 1675 {5, 7}, 1676 {0, 0}, 1677 {0, 0} 1678 } 1679 }, 1680 {0, 1, 2, 0, /* 0xe2 */ 1681 {{1, 1}, 1682 {5, 7}, 1683 {0, 0}, 1684 {0, 0} 1685 } 1686 }, 1687 {1, 1, 2, 0, /* 0xe3 */ 1688 {{0, 1}, 1689 {5, 7}, 1690 {0, 0}, 1691 {0, 0} 1692 } 1693 }, 1694 {0, 1, 2, 0, /* 0xe4 */ 1695 {{2, 2}, 1696 {5, 7}, 1697 {0, 0}, 1698 {0, 0} 1699 } 1700 }, 1701 {1, 1, 3, 0, /* 0xe5 */ 1702 {{0, 0}, 1703 {2, 2}, 1704 {5, 7}, 1705 {0, 0} 1706 } 1707 }, 1708 {0, 1, 2, 0, /* 0xe6 */ 1709 {{1, 2}, 1710 {5, 7}, 1711 {0, 0}, 1712 {0, 0} 1713 } 1714 }, 1715 {1, 1, 2, 0, /* 0xe7 */ 1716 {{0, 2}, 1717 {5, 7}, 1718 {0, 0}, 1719 {0, 0} 1720 } 1721 }, 1722 {0, 1, 2, 0, /* 0xe8 */ 1723 {{3, 3}, 1724 {5, 7}, 1725 {0, 0}, 1726 {0, 0} 1727 } 1728 }, 1729 {1, 1, 3, 0, /* 0xe9 */ 1730 {{0, 0}, 1731 {3, 3}, 1732 {5, 7}, 1733 {0, 0} 1734 } 1735 }, 1736 {0, 1, 3, 0, /* 0xea */ 1737 {{1, 1}, 1738 {3, 3}, 1739 {5, 7}, 1740 {0, 0} 1741 } 1742 }, 1743 {1, 1, 3, 0, /* 0xeb */ 1744 {{0, 1}, 1745 {3, 3}, 1746 {5, 7}, 1747 {0, 0} 1748 } 1749 }, 1750 {0, 1, 2, 0, /* 0xec */ 1751 {{2, 3}, 1752 {5, 7}, 1753 {0, 0}, 1754 {0, 0} 1755 } 1756 }, 1757 {1, 1, 3, 0, /* 0xed */ 1758 {{0, 0}, 1759 {2, 3}, 1760 {5, 7}, 1761 {0, 0} 1762 } 1763 }, 1764 {0, 1, 2, 0, /* 0xee */ 1765 {{1, 3}, 1766 {5, 7}, 1767 {0, 0}, 1768 {0, 0} 1769 } 1770 }, 1771 {1, 1, 2, 0, /* 0xef */ 1772 {{0, 3}, 1773 {5, 7}, 1774 {0, 0}, 1775 {0, 0} 1776 } 1777 }, 1778 {0, 1, 1, 0, /* 0xf0 */ 1779 {{4, 7}, 1780 {0, 0}, 1781 {0, 0}, 1782 {0, 0} 1783 } 1784 }, 1785 {1, 1, 2, 0, /* 0xf1 */ 1786 {{0, 0}, 1787 {4, 7}, 1788 {0, 0}, 1789 {0, 0} 1790 } 1791 }, 1792 {0, 1, 2, 0, /* 0xf2 */ 1793 {{1, 1}, 1794 {4, 7}, 1795 {0, 0}, 1796 {0, 0} 1797 } 1798 }, 1799 {1, 1, 2, 0, /* 0xf3 */ 1800 {{0, 1}, 1801 {4, 7}, 1802 {0, 0}, 1803 {0, 0} 1804 } 1805 }, 1806 {0, 1, 2, 0, /* 0xf4 */ 1807 {{2, 2}, 1808 {4, 7}, 1809 {0, 0}, 1810 {0, 0} 1811 } 1812 }, 1813 {1, 1, 3, 0, /* 0xf5 */ 1814 {{0, 0}, 1815 {2, 2}, 1816 {4, 7}, 1817 {0, 0} 1818 } 1819 }, 1820 {0, 1, 2, 0, /* 0xf6 */ 1821 {{1, 2}, 1822 {4, 7}, 1823 {0, 0}, 1824 {0, 0} 1825 } 1826 }, 1827 {1, 1, 2, 0, /* 0xf7 */ 1828 {{0, 2}, 1829 {4, 7}, 1830 {0, 0}, 1831 {0, 0} 1832 } 1833 }, 1834 {0, 1, 1, 0, /* 0xf8 */ 1835 {{3, 7}, 1836 {0, 0}, 1837 {0, 0}, 1838 {0, 0} 1839 } 1840 }, 1841 {1, 1, 2, 0, /* 0xf9 */ 1842 {{0, 0}, 1843 {3, 7}, 1844 {0, 0}, 1845 {0, 0} 1846 } 1847 }, 1848 {0, 1, 2, 0, /* 0xfa */ 1849 {{1, 1}, 1850 {3, 7}, 1851 {0, 0}, 1852 {0, 0} 1853 } 1854 }, 1855 {1, 1, 2, 0, /* 0xfb */ 1856 {{0, 1}, 1857 {3, 7}, 1858 {0, 0}, 1859 {0, 0} 1860 } 1861 }, 1862 {0, 1, 1, 0, /* 0xfc */ 1863 {{2, 7}, 1864 {0, 0}, 1865 {0, 0}, 1866 {0, 0} 1867 } 1868 }, 1869 {1, 1, 2, 0, /* 0xfd */ 1870 {{0, 0}, 1871 {2, 7}, 1872 {0, 0}, 1873 {0, 0} 1874 } 1875 }, 1876 {0, 1, 1, 0, /* 0xfe */ 1877 {{1, 7}, 1878 {0, 0}, 1879 {0, 0}, 1880 {0, 0} 1881 } 1882 }, 1883 {1, 1, 1, 0, /* 0xff */ 1884 {{0, 7}, 1885 {0, 0}, 1886 {0, 0}, 1887 {0, 0} 1888 } 1889 } 1890 }; 1891 1892 int 1893 sctp_is_address_in_scope(struct sctp_ifa *ifa, 1894 struct sctp_scoping *scope, 1895 int do_update) 1896 { 1897 if ((scope->loopback_scope == 0) && 1898 (ifa->ifn_p) && SCTP_IFN_IS_IFT_LOOP(ifa->ifn_p)) { 1899 /* 1900 * skip loopback if not in scope * 1901 */ 1902 return (0); 1903 } 1904 switch (ifa->address.sa.sa_family) { 1905 #ifdef INET 1906 case AF_INET: 1907 if (scope->ipv4_addr_legal) { 1908 struct sockaddr_in *sin; 1909 1910 sin = &ifa->address.sin; 1911 if (sin->sin_addr.s_addr == 0) { 1912 /* not in scope , unspecified */ 1913 return (0); 1914 } 1915 if ((scope->ipv4_local_scope == 0) && 1916 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 1917 /* private address not in scope */ 1918 return (0); 1919 } 1920 } else { 1921 return (0); 1922 } 1923 break; 1924 #endif 1925 #ifdef INET6 1926 case AF_INET6: 1927 if (scope->ipv6_addr_legal) { 1928 struct sockaddr_in6 *sin6; 1929 1930 /* Must update the flags, bummer, which 1931 * means any IFA locks must now be applied HERE <-> 1932 */ 1933 if (do_update) { 1934 sctp_gather_internal_ifa_flags(ifa); 1935 } 1936 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) { 1937 return (0); 1938 } 1939 /* ok to use deprecated addresses? */ 1940 sin6 = &ifa->address.sin6; 1941 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 1942 /* skip unspecified addresses */ 1943 return (0); 1944 } 1945 if ( /* (local_scope == 0) && */ 1946 (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) { 1947 return (0); 1948 } 1949 if ((scope->site_scope == 0) && 1950 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 1951 return (0); 1952 } 1953 } else { 1954 return (0); 1955 } 1956 break; 1957 #endif 1958 #if defined(__Userspace__) 1959 case AF_CONN: 1960 if (!scope->conn_addr_legal) { 1961 return (0); 1962 } 1963 break; 1964 #endif 1965 default: 1966 return (0); 1967 } 1968 return (1); 1969 } 1970 1971 static struct mbuf * 1972 sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa, uint16_t *len) 1973 { 1974 #if defined(INET) || defined(INET6) 1975 struct sctp_paramhdr *paramh; 1976 struct mbuf *mret; 1977 uint16_t plen; 1978 #endif 1979 1980 switch (ifa->address.sa.sa_family) { 1981 #ifdef INET 1982 case AF_INET: 1983 plen = (uint16_t)sizeof(struct sctp_ipv4addr_param); 1984 break; 1985 #endif 1986 #ifdef INET6 1987 case AF_INET6: 1988 plen = (uint16_t)sizeof(struct sctp_ipv6addr_param); 1989 break; 1990 #endif 1991 default: 1992 return (m); 1993 } 1994 #if defined(INET) || defined(INET6) 1995 if (M_TRAILINGSPACE(m) >= plen) { 1996 /* easy side we just drop it on the end */ 1997 paramh = (struct sctp_paramhdr *)(SCTP_BUF_AT(m, SCTP_BUF_LEN(m))); 1998 mret = m; 1999 } else { 2000 /* Need more space */ 2001 mret = m; 2002 while (SCTP_BUF_NEXT(mret) != NULL) { 2003 mret = SCTP_BUF_NEXT(mret); 2004 } 2005 SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(plen, 0, M_NOWAIT, 1, MT_DATA); 2006 if (SCTP_BUF_NEXT(mret) == NULL) { 2007 /* We are hosed, can't add more addresses */ 2008 return (m); 2009 } 2010 mret = SCTP_BUF_NEXT(mret); 2011 paramh = mtod(mret, struct sctp_paramhdr *); 2012 } 2013 /* now add the parameter */ 2014 switch (ifa->address.sa.sa_family) { 2015 #ifdef INET 2016 case AF_INET: 2017 { 2018 struct sctp_ipv4addr_param *ipv4p; 2019 struct sockaddr_in *sin; 2020 2021 sin = &ifa->address.sin; 2022 ipv4p = (struct sctp_ipv4addr_param *)paramh; 2023 paramh->param_type = htons(SCTP_IPV4_ADDRESS); 2024 paramh->param_length = htons(plen); 2025 ipv4p->addr = sin->sin_addr.s_addr; 2026 SCTP_BUF_LEN(mret) += plen; 2027 break; 2028 } 2029 #endif 2030 #ifdef INET6 2031 case AF_INET6: 2032 { 2033 struct sctp_ipv6addr_param *ipv6p; 2034 struct sockaddr_in6 *sin6; 2035 2036 sin6 = &ifa->address.sin6; 2037 ipv6p = (struct sctp_ipv6addr_param *)paramh; 2038 paramh->param_type = htons(SCTP_IPV6_ADDRESS); 2039 paramh->param_length = htons(plen); 2040 memcpy(ipv6p->addr, &sin6->sin6_addr, 2041 sizeof(ipv6p->addr)); 2042 #if defined(SCTP_EMBEDDED_V6_SCOPE) 2043 /* clear embedded scope in the address */ 2044 in6_clearscope((struct in6_addr *)ipv6p->addr); 2045 #endif 2046 SCTP_BUF_LEN(mret) += plen; 2047 break; 2048 } 2049 #endif 2050 default: 2051 return (m); 2052 } 2053 if (len != NULL) { 2054 *len += plen; 2055 } 2056 return (mret); 2057 #endif 2058 } 2059 2060 struct mbuf * 2061 sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2062 struct sctp_scoping *scope, 2063 struct mbuf *m_at, int cnt_inits_to, 2064 uint16_t *padding_len, uint16_t *chunk_len) 2065 { 2066 struct sctp_vrf *vrf = NULL; 2067 int cnt, limit_out = 0, total_count; 2068 uint32_t vrf_id; 2069 2070 vrf_id = inp->def_vrf_id; 2071 SCTP_IPI_ADDR_RLOCK(); 2072 vrf = sctp_find_vrf(vrf_id); 2073 if (vrf == NULL) { 2074 SCTP_IPI_ADDR_RUNLOCK(); 2075 return (m_at); 2076 } 2077 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 2078 struct sctp_ifa *sctp_ifap; 2079 struct sctp_ifn *sctp_ifnp; 2080 2081 cnt = cnt_inits_to; 2082 if (vrf->total_ifa_count > SCTP_COUNT_LIMIT) { 2083 limit_out = 1; 2084 cnt = SCTP_ADDRESS_LIMIT; 2085 goto skip_count; 2086 } 2087 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) { 2088 if ((scope->loopback_scope == 0) && 2089 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) { 2090 /* 2091 * Skip loopback devices if loopback_scope 2092 * not set 2093 */ 2094 continue; 2095 } 2096 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) { 2097 #if defined(__FreeBSD__) && !defined(__Userspace__) 2098 #ifdef INET 2099 if ((sctp_ifap->address.sa.sa_family == AF_INET) && 2100 (prison_check_ip4(inp->ip_inp.inp.inp_cred, 2101 &sctp_ifap->address.sin.sin_addr) != 0)) { 2102 continue; 2103 } 2104 #endif 2105 #ifdef INET6 2106 if ((sctp_ifap->address.sa.sa_family == AF_INET6) && 2107 (prison_check_ip6(inp->ip_inp.inp.inp_cred, 2108 &sctp_ifap->address.sin6.sin6_addr) != 0)) { 2109 continue; 2110 } 2111 #endif 2112 #endif 2113 if (sctp_is_addr_restricted(stcb, sctp_ifap)) { 2114 continue; 2115 } 2116 #if defined(__Userspace__) 2117 if (sctp_ifap->address.sa.sa_family == AF_CONN) { 2118 continue; 2119 } 2120 #endif 2121 if (sctp_is_address_in_scope(sctp_ifap, scope, 1) == 0) { 2122 continue; 2123 } 2124 cnt++; 2125 if (cnt > SCTP_ADDRESS_LIMIT) { 2126 break; 2127 } 2128 } 2129 if (cnt > SCTP_ADDRESS_LIMIT) { 2130 break; 2131 } 2132 } 2133 skip_count: 2134 if (cnt > 1) { 2135 total_count = 0; 2136 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) { 2137 cnt = 0; 2138 if ((scope->loopback_scope == 0) && 2139 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) { 2140 /* 2141 * Skip loopback devices if 2142 * loopback_scope not set 2143 */ 2144 continue; 2145 } 2146 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) { 2147 #if defined(__FreeBSD__) && !defined(__Userspace__) 2148 #ifdef INET 2149 if ((sctp_ifap->address.sa.sa_family == AF_INET) && 2150 (prison_check_ip4(inp->ip_inp.inp.inp_cred, 2151 &sctp_ifap->address.sin.sin_addr) != 0)) { 2152 continue; 2153 } 2154 #endif 2155 #ifdef INET6 2156 if ((sctp_ifap->address.sa.sa_family == AF_INET6) && 2157 (prison_check_ip6(inp->ip_inp.inp.inp_cred, 2158 &sctp_ifap->address.sin6.sin6_addr) != 0)) { 2159 continue; 2160 } 2161 #endif 2162 #endif 2163 if (sctp_is_addr_restricted(stcb, sctp_ifap)) { 2164 continue; 2165 } 2166 #if defined(__Userspace__) 2167 if (sctp_ifap->address.sa.sa_family == AF_CONN) { 2168 continue; 2169 } 2170 #endif 2171 if (sctp_is_address_in_scope(sctp_ifap, 2172 scope, 0) == 0) { 2173 continue; 2174 } 2175 if ((chunk_len != NULL) && 2176 (padding_len != NULL) && 2177 (*padding_len > 0)) { 2178 memset(mtod(m_at, caddr_t) + *chunk_len, 0, *padding_len); 2179 SCTP_BUF_LEN(m_at) += *padding_len; 2180 *chunk_len += *padding_len; 2181 *padding_len = 0; 2182 } 2183 m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap, chunk_len); 2184 if (limit_out) { 2185 cnt++; 2186 total_count++; 2187 if (cnt >= 2) { 2188 /* two from each address */ 2189 break; 2190 } 2191 if (total_count > SCTP_ADDRESS_LIMIT) { 2192 /* No more addresses */ 2193 break; 2194 } 2195 } 2196 } 2197 } 2198 } 2199 } else { 2200 struct sctp_laddr *laddr; 2201 2202 cnt = cnt_inits_to; 2203 /* First, how many ? */ 2204 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 2205 if (laddr->ifa == NULL) { 2206 continue; 2207 } 2208 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) 2209 /* Address being deleted by the system, dont 2210 * list. 2211 */ 2212 continue; 2213 if (laddr->action == SCTP_DEL_IP_ADDRESS) { 2214 /* Address being deleted on this ep 2215 * don't list. 2216 */ 2217 continue; 2218 } 2219 #if defined(__Userspace__) 2220 if (laddr->ifa->address.sa.sa_family == AF_CONN) { 2221 continue; 2222 } 2223 #endif 2224 if (sctp_is_address_in_scope(laddr->ifa, 2225 scope, 1) == 0) { 2226 continue; 2227 } 2228 cnt++; 2229 } 2230 /* 2231 * To get through a NAT we only list addresses if we have 2232 * more than one. That way if you just bind a single address 2233 * we let the source of the init dictate our address. 2234 */ 2235 if (cnt > 1) { 2236 cnt = cnt_inits_to; 2237 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 2238 if (laddr->ifa == NULL) { 2239 continue; 2240 } 2241 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) { 2242 continue; 2243 } 2244 #if defined(__Userspace__) 2245 if (laddr->ifa->address.sa.sa_family == AF_CONN) { 2246 continue; 2247 } 2248 #endif 2249 if (sctp_is_address_in_scope(laddr->ifa, 2250 scope, 0) == 0) { 2251 continue; 2252 } 2253 if ((chunk_len != NULL) && 2254 (padding_len != NULL) && 2255 (*padding_len > 0)) { 2256 memset(mtod(m_at, caddr_t) + *chunk_len, 0, *padding_len); 2257 SCTP_BUF_LEN(m_at) += *padding_len; 2258 *chunk_len += *padding_len; 2259 *padding_len = 0; 2260 } 2261 m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa, chunk_len); 2262 cnt++; 2263 if (cnt >= SCTP_ADDRESS_LIMIT) { 2264 break; 2265 } 2266 } 2267 } 2268 } 2269 SCTP_IPI_ADDR_RUNLOCK(); 2270 return (m_at); 2271 } 2272 2273 static struct sctp_ifa * 2274 sctp_is_ifa_addr_preferred(struct sctp_ifa *ifa, 2275 uint8_t dest_is_loop, 2276 uint8_t dest_is_priv, 2277 sa_family_t fam) 2278 { 2279 uint8_t dest_is_global = 0; 2280 /* dest_is_priv is true if destination is a private address */ 2281 /* dest_is_loop is true if destination is a loopback addresses */ 2282 2283 /** 2284 * Here we determine if its a preferred address. A preferred address 2285 * means it is the same scope or higher scope then the destination. 2286 * L = loopback, P = private, G = global 2287 * ----------------------------------------- 2288 * src | dest | result 2289 * ---------------------------------------- 2290 * L | L | yes 2291 * ----------------------------------------- 2292 * P | L | yes-v4 no-v6 2293 * ----------------------------------------- 2294 * G | L | yes-v4 no-v6 2295 * ----------------------------------------- 2296 * L | P | no 2297 * ----------------------------------------- 2298 * P | P | yes 2299 * ----------------------------------------- 2300 * G | P | no 2301 * ----------------------------------------- 2302 * L | G | no 2303 * ----------------------------------------- 2304 * P | G | no 2305 * ----------------------------------------- 2306 * G | G | yes 2307 * ----------------------------------------- 2308 */ 2309 2310 if (ifa->address.sa.sa_family != fam) { 2311 /* forget mis-matched family */ 2312 return (NULL); 2313 } 2314 if ((dest_is_priv == 0) && (dest_is_loop == 0)) { 2315 dest_is_global = 1; 2316 } 2317 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Is destination preferred:"); 2318 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ifa->address.sa); 2319 /* Ok the address may be ok */ 2320 #ifdef INET6 2321 if (fam == AF_INET6) { 2322 /* ok to use deprecated addresses? no lets not! */ 2323 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) { 2324 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:1\n"); 2325 return (NULL); 2326 } 2327 if (ifa->src_is_priv && !ifa->src_is_loop) { 2328 if (dest_is_loop) { 2329 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:2\n"); 2330 return (NULL); 2331 } 2332 } 2333 if (ifa->src_is_glob) { 2334 if (dest_is_loop) { 2335 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:3\n"); 2336 return (NULL); 2337 } 2338 } 2339 } 2340 #endif 2341 /* Now that we know what is what, implement or table 2342 * this could in theory be done slicker (it used to be), but this 2343 * is straightforward and easier to validate :-) 2344 */ 2345 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src_loop:%d src_priv:%d src_glob:%d\n", 2346 ifa->src_is_loop, ifa->src_is_priv, ifa->src_is_glob); 2347 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dest_loop:%d dest_priv:%d dest_glob:%d\n", 2348 dest_is_loop, dest_is_priv, dest_is_global); 2349 2350 if ((ifa->src_is_loop) && (dest_is_priv)) { 2351 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:4\n"); 2352 return (NULL); 2353 } 2354 if ((ifa->src_is_glob) && (dest_is_priv)) { 2355 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:5\n"); 2356 return (NULL); 2357 } 2358 if ((ifa->src_is_loop) && (dest_is_global)) { 2359 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:6\n"); 2360 return (NULL); 2361 } 2362 if ((ifa->src_is_priv) && (dest_is_global)) { 2363 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:7\n"); 2364 return (NULL); 2365 } 2366 SCTPDBG(SCTP_DEBUG_OUTPUT3, "YES\n"); 2367 /* its a preferred address */ 2368 return (ifa); 2369 } 2370 2371 static struct sctp_ifa * 2372 sctp_is_ifa_addr_acceptable(struct sctp_ifa *ifa, 2373 uint8_t dest_is_loop, 2374 uint8_t dest_is_priv, 2375 sa_family_t fam) 2376 { 2377 uint8_t dest_is_global = 0; 2378 2379 /** 2380 * Here we determine if its a acceptable address. A acceptable 2381 * address means it is the same scope or higher scope but we can 2382 * allow for NAT which means its ok to have a global dest and a 2383 * private src. 2384 * 2385 * L = loopback, P = private, G = global 2386 * ----------------------------------------- 2387 * src | dest | result 2388 * ----------------------------------------- 2389 * L | L | yes 2390 * ----------------------------------------- 2391 * P | L | yes-v4 no-v6 2392 * ----------------------------------------- 2393 * G | L | yes 2394 * ----------------------------------------- 2395 * L | P | no 2396 * ----------------------------------------- 2397 * P | P | yes 2398 * ----------------------------------------- 2399 * G | P | yes - May not work 2400 * ----------------------------------------- 2401 * L | G | no 2402 * ----------------------------------------- 2403 * P | G | yes - May not work 2404 * ----------------------------------------- 2405 * G | G | yes 2406 * ----------------------------------------- 2407 */ 2408 2409 if (ifa->address.sa.sa_family != fam) { 2410 /* forget non matching family */ 2411 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa_fam:%d fam:%d\n", 2412 ifa->address.sa.sa_family, fam); 2413 return (NULL); 2414 } 2415 /* Ok the address may be ok */ 2416 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, &ifa->address.sa); 2417 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst_is_loop:%d dest_is_priv:%d\n", 2418 dest_is_loop, dest_is_priv); 2419 if ((dest_is_loop == 0) && (dest_is_priv == 0)) { 2420 dest_is_global = 1; 2421 } 2422 #ifdef INET6 2423 if (fam == AF_INET6) { 2424 /* ok to use deprecated addresses? */ 2425 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) { 2426 return (NULL); 2427 } 2428 if (ifa->src_is_priv) { 2429 /* Special case, linklocal to loop */ 2430 if (dest_is_loop) 2431 return (NULL); 2432 } 2433 } 2434 #endif 2435 /* 2436 * Now that we know what is what, implement our table. 2437 * This could in theory be done slicker (it used to be), but this 2438 * is straightforward and easier to validate :-) 2439 */ 2440 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_priv:%d\n", 2441 ifa->src_is_loop, 2442 dest_is_priv); 2443 if ((ifa->src_is_loop == 1) && (dest_is_priv)) { 2444 return (NULL); 2445 } 2446 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_glob:%d\n", 2447 ifa->src_is_loop, 2448 dest_is_global); 2449 if ((ifa->src_is_loop == 1) && (dest_is_global)) { 2450 return (NULL); 2451 } 2452 SCTPDBG(SCTP_DEBUG_OUTPUT3, "address is acceptable\n"); 2453 /* its an acceptable address */ 2454 return (ifa); 2455 } 2456 2457 int 2458 sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa) 2459 { 2460 struct sctp_laddr *laddr; 2461 2462 if (stcb == NULL) { 2463 /* There are no restrictions, no TCB :-) */ 2464 return (0); 2465 } 2466 LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) { 2467 if (laddr->ifa == NULL) { 2468 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n", 2469 __func__); 2470 continue; 2471 } 2472 if (laddr->ifa == ifa) { 2473 /* Yes it is on the list */ 2474 return (1); 2475 } 2476 } 2477 return (0); 2478 } 2479 2480 int 2481 sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa) 2482 { 2483 struct sctp_laddr *laddr; 2484 2485 if (ifa == NULL) 2486 return (0); 2487 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 2488 if (laddr->ifa == NULL) { 2489 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n", 2490 __func__); 2491 continue; 2492 } 2493 if ((laddr->ifa == ifa) && laddr->action == 0) 2494 /* same pointer */ 2495 return (1); 2496 } 2497 return (0); 2498 } 2499 2500 static struct sctp_ifa * 2501 sctp_choose_boundspecific_inp(struct sctp_inpcb *inp, 2502 sctp_route_t *ro, 2503 uint32_t vrf_id, 2504 int non_asoc_addr_ok, 2505 uint8_t dest_is_priv, 2506 uint8_t dest_is_loop, 2507 sa_family_t fam) 2508 { 2509 struct sctp_laddr *laddr, *starting_point; 2510 void *ifn; 2511 int resettotop = 0; 2512 struct sctp_ifn *sctp_ifn; 2513 struct sctp_ifa *sctp_ifa, *sifa; 2514 struct sctp_vrf *vrf; 2515 uint32_t ifn_index; 2516 2517 vrf = sctp_find_vrf(vrf_id); 2518 if (vrf == NULL) 2519 return (NULL); 2520 2521 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro); 2522 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro); 2523 sctp_ifn = sctp_find_ifn(ifn, ifn_index); 2524 /* 2525 * first question, is the ifn we will emit on in our list, if so, we 2526 * want such an address. Note that we first looked for a 2527 * preferred address. 2528 */ 2529 if (sctp_ifn) { 2530 /* is a preferred one on the interface we route out? */ 2531 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 2532 #if defined(__FreeBSD__) && !defined(__Userspace__) 2533 #ifdef INET 2534 if ((sctp_ifa->address.sa.sa_family == AF_INET) && 2535 (prison_check_ip4(inp->ip_inp.inp.inp_cred, 2536 &sctp_ifa->address.sin.sin_addr) != 0)) { 2537 continue; 2538 } 2539 #endif 2540 #ifdef INET6 2541 if ((sctp_ifa->address.sa.sa_family == AF_INET6) && 2542 (prison_check_ip6(inp->ip_inp.inp.inp_cred, 2543 &sctp_ifa->address.sin6.sin6_addr) != 0)) { 2544 continue; 2545 } 2546 #endif 2547 #endif 2548 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && 2549 (non_asoc_addr_ok == 0)) 2550 continue; 2551 sifa = sctp_is_ifa_addr_preferred(sctp_ifa, 2552 dest_is_loop, 2553 dest_is_priv, fam); 2554 if (sifa == NULL) 2555 continue; 2556 if (sctp_is_addr_in_ep(inp, sifa)) { 2557 atomic_add_int(&sifa->refcount, 1); 2558 return (sifa); 2559 } 2560 } 2561 } 2562 /* 2563 * ok, now we now need to find one on the list of the addresses. 2564 * We can't get one on the emitting interface so let's find first 2565 * a preferred one. If not that an acceptable one otherwise... 2566 * we return NULL. 2567 */ 2568 starting_point = inp->next_addr_touse; 2569 once_again: 2570 if (inp->next_addr_touse == NULL) { 2571 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list); 2572 resettotop = 1; 2573 } 2574 for (laddr = inp->next_addr_touse; laddr; 2575 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) { 2576 if (laddr->ifa == NULL) { 2577 /* address has been removed */ 2578 continue; 2579 } 2580 if (laddr->action == SCTP_DEL_IP_ADDRESS) { 2581 /* address is being deleted */ 2582 continue; 2583 } 2584 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, 2585 dest_is_priv, fam); 2586 if (sifa == NULL) 2587 continue; 2588 atomic_add_int(&sifa->refcount, 1); 2589 return (sifa); 2590 } 2591 if (resettotop == 0) { 2592 inp->next_addr_touse = NULL; 2593 goto once_again; 2594 } 2595 2596 inp->next_addr_touse = starting_point; 2597 resettotop = 0; 2598 once_again_too: 2599 if (inp->next_addr_touse == NULL) { 2600 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list); 2601 resettotop = 1; 2602 } 2603 2604 /* ok, what about an acceptable address in the inp */ 2605 for (laddr = inp->next_addr_touse; laddr; 2606 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) { 2607 if (laddr->ifa == NULL) { 2608 /* address has been removed */ 2609 continue; 2610 } 2611 if (laddr->action == SCTP_DEL_IP_ADDRESS) { 2612 /* address is being deleted */ 2613 continue; 2614 } 2615 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop, 2616 dest_is_priv, fam); 2617 if (sifa == NULL) 2618 continue; 2619 atomic_add_int(&sifa->refcount, 1); 2620 return (sifa); 2621 } 2622 if (resettotop == 0) { 2623 inp->next_addr_touse = NULL; 2624 goto once_again_too; 2625 } 2626 2627 /* 2628 * no address bound can be a source for the destination we are in 2629 * trouble 2630 */ 2631 return (NULL); 2632 } 2633 2634 static struct sctp_ifa * 2635 sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp, 2636 struct sctp_tcb *stcb, 2637 sctp_route_t *ro, 2638 uint32_t vrf_id, 2639 uint8_t dest_is_priv, 2640 uint8_t dest_is_loop, 2641 int non_asoc_addr_ok, 2642 sa_family_t fam) 2643 { 2644 struct sctp_laddr *laddr, *starting_point; 2645 void *ifn; 2646 struct sctp_ifn *sctp_ifn; 2647 struct sctp_ifa *sctp_ifa, *sifa; 2648 uint8_t start_at_beginning = 0; 2649 struct sctp_vrf *vrf; 2650 uint32_t ifn_index; 2651 2652 /* 2653 * first question, is the ifn we will emit on in our list, if so, we 2654 * want that one. 2655 */ 2656 vrf = sctp_find_vrf(vrf_id); 2657 if (vrf == NULL) 2658 return (NULL); 2659 2660 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro); 2661 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro); 2662 sctp_ifn = sctp_find_ifn(ifn, ifn_index); 2663 2664 /* 2665 * first question, is the ifn we will emit on in our list? If so, 2666 * we want that one. First we look for a preferred. Second, we go 2667 * for an acceptable. 2668 */ 2669 if (sctp_ifn) { 2670 /* first try for a preferred address on the ep */ 2671 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 2672 #if defined(__FreeBSD__) && !defined(__Userspace__) 2673 #ifdef INET 2674 if ((sctp_ifa->address.sa.sa_family == AF_INET) && 2675 (prison_check_ip4(inp->ip_inp.inp.inp_cred, 2676 &sctp_ifa->address.sin.sin_addr) != 0)) { 2677 continue; 2678 } 2679 #endif 2680 #ifdef INET6 2681 if ((sctp_ifa->address.sa.sa_family == AF_INET6) && 2682 (prison_check_ip6(inp->ip_inp.inp.inp_cred, 2683 &sctp_ifa->address.sin6.sin6_addr) != 0)) { 2684 continue; 2685 } 2686 #endif 2687 #endif 2688 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0)) 2689 continue; 2690 if (sctp_is_addr_in_ep(inp, sctp_ifa)) { 2691 sifa = sctp_is_ifa_addr_preferred(sctp_ifa, dest_is_loop, dest_is_priv, fam); 2692 if (sifa == NULL) 2693 continue; 2694 if (((non_asoc_addr_ok == 0) && 2695 (sctp_is_addr_restricted(stcb, sifa))) || 2696 (non_asoc_addr_ok && 2697 (sctp_is_addr_restricted(stcb, sifa)) && 2698 (!sctp_is_addr_pending(stcb, sifa)))) { 2699 /* on the no-no list */ 2700 continue; 2701 } 2702 atomic_add_int(&sifa->refcount, 1); 2703 return (sifa); 2704 } 2705 } 2706 /* next try for an acceptable address on the ep */ 2707 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 2708 #if defined(__FreeBSD__) && !defined(__Userspace__) 2709 #ifdef INET 2710 if ((sctp_ifa->address.sa.sa_family == AF_INET) && 2711 (prison_check_ip4(inp->ip_inp.inp.inp_cred, 2712 &sctp_ifa->address.sin.sin_addr) != 0)) { 2713 continue; 2714 } 2715 #endif 2716 #ifdef INET6 2717 if ((sctp_ifa->address.sa.sa_family == AF_INET6) && 2718 (prison_check_ip6(inp->ip_inp.inp.inp_cred, 2719 &sctp_ifa->address.sin6.sin6_addr) != 0)) { 2720 continue; 2721 } 2722 #endif 2723 #endif 2724 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0)) 2725 continue; 2726 if (sctp_is_addr_in_ep(inp, sctp_ifa)) { 2727 sifa= sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv,fam); 2728 if (sifa == NULL) 2729 continue; 2730 if (((non_asoc_addr_ok == 0) && 2731 (sctp_is_addr_restricted(stcb, sifa))) || 2732 (non_asoc_addr_ok && 2733 (sctp_is_addr_restricted(stcb, sifa)) && 2734 (!sctp_is_addr_pending(stcb, sifa)))) { 2735 /* on the no-no list */ 2736 continue; 2737 } 2738 atomic_add_int(&sifa->refcount, 1); 2739 return (sifa); 2740 } 2741 } 2742 } 2743 /* 2744 * if we can't find one like that then we must look at all 2745 * addresses bound to pick one at first preferable then 2746 * secondly acceptable. 2747 */ 2748 starting_point = stcb->asoc.last_used_address; 2749 sctp_from_the_top: 2750 if (stcb->asoc.last_used_address == NULL) { 2751 start_at_beginning = 1; 2752 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list); 2753 } 2754 /* search beginning with the last used address */ 2755 for (laddr = stcb->asoc.last_used_address; laddr; 2756 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) { 2757 if (laddr->ifa == NULL) { 2758 /* address has been removed */ 2759 continue; 2760 } 2761 if (laddr->action == SCTP_DEL_IP_ADDRESS) { 2762 /* address is being deleted */ 2763 continue; 2764 } 2765 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, dest_is_priv, fam); 2766 if (sifa == NULL) 2767 continue; 2768 if (((non_asoc_addr_ok == 0) && 2769 (sctp_is_addr_restricted(stcb, sifa))) || 2770 (non_asoc_addr_ok && 2771 (sctp_is_addr_restricted(stcb, sifa)) && 2772 (!sctp_is_addr_pending(stcb, sifa)))) { 2773 /* on the no-no list */ 2774 continue; 2775 } 2776 stcb->asoc.last_used_address = laddr; 2777 atomic_add_int(&sifa->refcount, 1); 2778 return (sifa); 2779 } 2780 if (start_at_beginning == 0) { 2781 stcb->asoc.last_used_address = NULL; 2782 goto sctp_from_the_top; 2783 } 2784 /* now try for any higher scope than the destination */ 2785 stcb->asoc.last_used_address = starting_point; 2786 start_at_beginning = 0; 2787 sctp_from_the_top2: 2788 if (stcb->asoc.last_used_address == NULL) { 2789 start_at_beginning = 1; 2790 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list); 2791 } 2792 /* search beginning with the last used address */ 2793 for (laddr = stcb->asoc.last_used_address; laddr; 2794 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) { 2795 if (laddr->ifa == NULL) { 2796 /* address has been removed */ 2797 continue; 2798 } 2799 if (laddr->action == SCTP_DEL_IP_ADDRESS) { 2800 /* address is being deleted */ 2801 continue; 2802 } 2803 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop, 2804 dest_is_priv, fam); 2805 if (sifa == NULL) 2806 continue; 2807 if (((non_asoc_addr_ok == 0) && 2808 (sctp_is_addr_restricted(stcb, sifa))) || 2809 (non_asoc_addr_ok && 2810 (sctp_is_addr_restricted(stcb, sifa)) && 2811 (!sctp_is_addr_pending(stcb, sifa)))) { 2812 /* on the no-no list */ 2813 continue; 2814 } 2815 stcb->asoc.last_used_address = laddr; 2816 atomic_add_int(&sifa->refcount, 1); 2817 return (sifa); 2818 } 2819 if (start_at_beginning == 0) { 2820 stcb->asoc.last_used_address = NULL; 2821 goto sctp_from_the_top2; 2822 } 2823 return (NULL); 2824 } 2825 2826 static struct sctp_ifa * 2827 sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn, 2828 #if defined(__FreeBSD__) && !defined(__Userspace__) 2829 struct sctp_inpcb *inp, 2830 #else 2831 struct sctp_inpcb *inp SCTP_UNUSED, 2832 #endif 2833 struct sctp_tcb *stcb, 2834 int non_asoc_addr_ok, 2835 uint8_t dest_is_loop, 2836 uint8_t dest_is_priv, 2837 int addr_wanted, 2838 sa_family_t fam, 2839 sctp_route_t *ro) 2840 { 2841 struct sctp_ifa *ifa, *sifa; 2842 int num_eligible_addr = 0; 2843 #ifdef INET6 2844 #ifdef SCTP_EMBEDDED_V6_SCOPE 2845 struct sockaddr_in6 sin6, lsa6; 2846 2847 if (fam == AF_INET6) { 2848 memcpy(&sin6, &ro->ro_dst, sizeof(struct sockaddr_in6)); 2849 #ifdef SCTP_KAME 2850 (void)sa6_recoverscope(&sin6); 2851 #else 2852 (void)in6_recoverscope(&sin6, &sin6.sin6_addr, NULL); 2853 #endif /* SCTP_KAME */ 2854 } 2855 #endif /* SCTP_EMBEDDED_V6_SCOPE */ 2856 #endif /* INET6 */ 2857 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) { 2858 #if defined(__FreeBSD__) && !defined(__Userspace__) 2859 #ifdef INET 2860 if ((ifa->address.sa.sa_family == AF_INET) && 2861 (prison_check_ip4(inp->ip_inp.inp.inp_cred, 2862 &ifa->address.sin.sin_addr) != 0)) { 2863 continue; 2864 } 2865 #endif 2866 #ifdef INET6 2867 if ((ifa->address.sa.sa_family == AF_INET6) && 2868 (prison_check_ip6(inp->ip_inp.inp.inp_cred, 2869 &ifa->address.sin6.sin6_addr) != 0)) { 2870 continue; 2871 } 2872 #endif 2873 #endif 2874 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && 2875 (non_asoc_addr_ok == 0)) 2876 continue; 2877 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop, 2878 dest_is_priv, fam); 2879 if (sifa == NULL) 2880 continue; 2881 #ifdef INET6 2882 if (fam == AF_INET6 && 2883 dest_is_loop && 2884 sifa->src_is_loop && sifa->src_is_priv) { 2885 /* don't allow fe80::1 to be a src on loop ::1, we don't list it 2886 * to the peer so we will get an abort. 2887 */ 2888 continue; 2889 } 2890 #ifdef SCTP_EMBEDDED_V6_SCOPE 2891 if (fam == AF_INET6 && 2892 IN6_IS_ADDR_LINKLOCAL(&sifa->address.sin6.sin6_addr) && 2893 IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) { 2894 /* link-local <-> link-local must belong to the same scope. */ 2895 memcpy(&lsa6, &sifa->address.sin6, sizeof(struct sockaddr_in6)); 2896 #ifdef SCTP_KAME 2897 (void)sa6_recoverscope(&lsa6); 2898 #else 2899 (void)in6_recoverscope(&lsa6, &lsa6.sin6_addr, NULL); 2900 #endif /* SCTP_KAME */ 2901 if (sin6.sin6_scope_id != lsa6.sin6_scope_id) { 2902 continue; 2903 } 2904 } 2905 #endif /* SCTP_EMBEDDED_V6_SCOPE */ 2906 #endif /* INET6 */ 2907 2908 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Userspace__) 2909 /* Check if the IPv6 address matches to next-hop. 2910 In the mobile case, old IPv6 address may be not deleted 2911 from the interface. Then, the interface has previous and 2912 new addresses. We should use one corresponding to the 2913 next-hop. (by micchie) 2914 */ 2915 #ifdef INET6 2916 if (stcb && fam == AF_INET6 && 2917 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) { 2918 if (sctp_v6src_match_nexthop(&sifa->address.sin6, ro) == 0) { 2919 continue; 2920 } 2921 } 2922 #endif 2923 #ifdef INET 2924 /* Avoid topologically incorrect IPv4 address */ 2925 if (stcb && fam == AF_INET && 2926 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) { 2927 if (sctp_v4src_match_nexthop(sifa, ro) == 0) { 2928 continue; 2929 } 2930 } 2931 #endif 2932 #endif 2933 if (stcb) { 2934 if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) { 2935 continue; 2936 } 2937 if (((non_asoc_addr_ok == 0) && 2938 (sctp_is_addr_restricted(stcb, sifa))) || 2939 (non_asoc_addr_ok && 2940 (sctp_is_addr_restricted(stcb, sifa)) && 2941 (!sctp_is_addr_pending(stcb, sifa)))) { 2942 /* 2943 * It is restricted for some reason.. 2944 * probably not yet added. 2945 */ 2946 continue; 2947 } 2948 } 2949 if (num_eligible_addr >= addr_wanted) { 2950 return (sifa); 2951 } 2952 num_eligible_addr++; 2953 } 2954 return (NULL); 2955 } 2956 2957 static int 2958 sctp_count_num_preferred_boundall(struct sctp_ifn *ifn, 2959 #if defined(__FreeBSD__) && !defined(__Userspace__) 2960 struct sctp_inpcb *inp, 2961 #else 2962 struct sctp_inpcb *inp SCTP_UNUSED, 2963 #endif 2964 struct sctp_tcb *stcb, 2965 int non_asoc_addr_ok, 2966 uint8_t dest_is_loop, 2967 uint8_t dest_is_priv, 2968 sa_family_t fam) 2969 { 2970 struct sctp_ifa *ifa, *sifa; 2971 int num_eligible_addr = 0; 2972 2973 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) { 2974 #if defined(__FreeBSD__) && !defined(__Userspace__) 2975 #ifdef INET 2976 if ((ifa->address.sa.sa_family == AF_INET) && 2977 (prison_check_ip4(inp->ip_inp.inp.inp_cred, 2978 &ifa->address.sin.sin_addr) != 0)) { 2979 continue; 2980 } 2981 #endif 2982 #ifdef INET6 2983 if ((ifa->address.sa.sa_family == AF_INET6) && 2984 (stcb != NULL) && 2985 (prison_check_ip6(inp->ip_inp.inp.inp_cred, 2986 &ifa->address.sin6.sin6_addr) != 0)) { 2987 continue; 2988 } 2989 #endif 2990 #endif 2991 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && 2992 (non_asoc_addr_ok == 0)) { 2993 continue; 2994 } 2995 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop, 2996 dest_is_priv, fam); 2997 if (sifa == NULL) { 2998 continue; 2999 } 3000 if (stcb) { 3001 if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) { 3002 continue; 3003 } 3004 if (((non_asoc_addr_ok == 0) && 3005 (sctp_is_addr_restricted(stcb, sifa))) || 3006 (non_asoc_addr_ok && 3007 (sctp_is_addr_restricted(stcb, sifa)) && 3008 (!sctp_is_addr_pending(stcb, sifa)))) { 3009 /* 3010 * It is restricted for some reason.. 3011 * probably not yet added. 3012 */ 3013 continue; 3014 } 3015 } 3016 num_eligible_addr++; 3017 } 3018 return (num_eligible_addr); 3019 } 3020 3021 static struct sctp_ifa * 3022 sctp_choose_boundall(struct sctp_inpcb *inp, 3023 struct sctp_tcb *stcb, 3024 struct sctp_nets *net, 3025 sctp_route_t *ro, 3026 uint32_t vrf_id, 3027 uint8_t dest_is_priv, 3028 uint8_t dest_is_loop, 3029 int non_asoc_addr_ok, 3030 sa_family_t fam) 3031 { 3032 int cur_addr_num = 0, num_preferred = 0; 3033 void *ifn; 3034 struct sctp_ifn *sctp_ifn, *looked_at = NULL, *emit_ifn; 3035 struct sctp_ifa *sctp_ifa, *sifa; 3036 uint32_t ifn_index; 3037 struct sctp_vrf *vrf; 3038 #ifdef INET 3039 int retried = 0; 3040 #endif 3041 3042 /*- 3043 * For boundall we can use any address in the association. 3044 * If non_asoc_addr_ok is set we can use any address (at least in 3045 * theory). So we look for preferred addresses first. If we find one, 3046 * we use it. Otherwise we next try to get an address on the 3047 * interface, which we should be able to do (unless non_asoc_addr_ok 3048 * is false and we are routed out that way). In these cases where we 3049 * can't use the address of the interface we go through all the 3050 * ifn's looking for an address we can use and fill that in. Punting 3051 * means we send back address 0, which will probably cause problems 3052 * actually since then IP will fill in the address of the route ifn, 3053 * which means we probably already rejected it.. i.e. here comes an 3054 * abort :-<. 3055 */ 3056 vrf = sctp_find_vrf(vrf_id); 3057 if (vrf == NULL) 3058 return (NULL); 3059 3060 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro); 3061 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro); 3062 SCTPDBG(SCTP_DEBUG_OUTPUT2,"ifn from route:%p ifn_index:%d\n", ifn, ifn_index); 3063 emit_ifn = looked_at = sctp_ifn = sctp_find_ifn(ifn, ifn_index); 3064 if (sctp_ifn == NULL) { 3065 /* ?? We don't have this guy ?? */ 3066 SCTPDBG(SCTP_DEBUG_OUTPUT2,"No ifn emit interface?\n"); 3067 goto bound_all_plan_b; 3068 } 3069 SCTPDBG(SCTP_DEBUG_OUTPUT2,"ifn_index:%d name:%s is emit interface\n", 3070 ifn_index, sctp_ifn->ifn_name); 3071 3072 if (net) { 3073 cur_addr_num = net->indx_of_eligible_next_to_use; 3074 } 3075 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, 3076 inp, stcb, 3077 non_asoc_addr_ok, 3078 dest_is_loop, 3079 dest_is_priv, fam); 3080 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Found %d preferred source addresses for intf:%s\n", 3081 num_preferred, sctp_ifn->ifn_name); 3082 if (num_preferred == 0) { 3083 /* 3084 * no eligible addresses, we must use some other interface 3085 * address if we can find one. 3086 */ 3087 goto bound_all_plan_b; 3088 } 3089 /* 3090 * Ok we have num_eligible_addr set with how many we can use, this 3091 * may vary from call to call due to addresses being deprecated 3092 * etc.. 3093 */ 3094 if (cur_addr_num >= num_preferred) { 3095 cur_addr_num = 0; 3096 } 3097 /* 3098 * select the nth address from the list (where cur_addr_num is the 3099 * nth) and 0 is the first one, 1 is the second one etc... 3100 */ 3101 SCTPDBG(SCTP_DEBUG_OUTPUT2, "cur_addr_num:%d\n", cur_addr_num); 3102 3103 sctp_ifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, dest_is_loop, 3104 dest_is_priv, cur_addr_num, fam, ro); 3105 3106 /* if sctp_ifa is NULL something changed??, fall to plan b. */ 3107 if (sctp_ifa) { 3108 atomic_add_int(&sctp_ifa->refcount, 1); 3109 if (net) { 3110 /* save off where the next one we will want */ 3111 net->indx_of_eligible_next_to_use = cur_addr_num + 1; 3112 } 3113 return (sctp_ifa); 3114 } 3115 /* 3116 * plan_b: Look at all interfaces and find a preferred address. If 3117 * no preferred fall through to plan_c. 3118 */ 3119 bound_all_plan_b: 3120 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan B\n"); 3121 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 3122 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Examine interface %s\n", 3123 sctp_ifn->ifn_name); 3124 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 3125 /* wrong base scope */ 3126 SCTPDBG(SCTP_DEBUG_OUTPUT2, "skip\n"); 3127 continue; 3128 } 3129 if ((sctp_ifn == looked_at) && looked_at) { 3130 /* already looked at this guy */ 3131 SCTPDBG(SCTP_DEBUG_OUTPUT2, "already seen\n"); 3132 continue; 3133 } 3134 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, 3135 dest_is_loop, dest_is_priv, fam); 3136 SCTPDBG(SCTP_DEBUG_OUTPUT2, 3137 "Found ifn:%p %d preferred source addresses\n", 3138 ifn, num_preferred); 3139 if (num_preferred == 0) { 3140 /* None on this interface. */ 3141 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No preferred -- skipping to next\n"); 3142 continue; 3143 } 3144 SCTPDBG(SCTP_DEBUG_OUTPUT2, 3145 "num preferred:%d on interface:%p cur_addr_num:%d\n", 3146 num_preferred, (void *)sctp_ifn, cur_addr_num); 3147 3148 /* 3149 * Ok we have num_eligible_addr set with how many we can 3150 * use, this may vary from call to call due to addresses 3151 * being deprecated etc.. 3152 */ 3153 if (cur_addr_num >= num_preferred) { 3154 cur_addr_num = 0; 3155 } 3156 sifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, dest_is_loop, 3157 dest_is_priv, cur_addr_num, fam, ro); 3158 if (sifa == NULL) 3159 continue; 3160 if (net) { 3161 net->indx_of_eligible_next_to_use = cur_addr_num + 1; 3162 SCTPDBG(SCTP_DEBUG_OUTPUT2, "we selected %d\n", 3163 cur_addr_num); 3164 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Source:"); 3165 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa); 3166 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Dest:"); 3167 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &net->ro._l_addr.sa); 3168 } 3169 atomic_add_int(&sifa->refcount, 1); 3170 return (sifa); 3171 } 3172 #ifdef INET 3173 again_with_private_addresses_allowed: 3174 #endif 3175 /* plan_c: do we have an acceptable address on the emit interface */ 3176 sifa = NULL; 3177 SCTPDBG(SCTP_DEBUG_OUTPUT2,"Trying Plan C: find acceptable on interface\n"); 3178 if (emit_ifn == NULL) { 3179 SCTPDBG(SCTP_DEBUG_OUTPUT2,"Jump to Plan D - no emit_ifn\n"); 3180 goto plan_d; 3181 } 3182 LIST_FOREACH(sctp_ifa, &emit_ifn->ifalist, next_ifa) { 3183 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifa:%p\n", (void *)sctp_ifa); 3184 #if defined(__FreeBSD__) && !defined(__Userspace__) 3185 #ifdef INET 3186 if ((sctp_ifa->address.sa.sa_family == AF_INET) && 3187 (prison_check_ip4(inp->ip_inp.inp.inp_cred, 3188 &sctp_ifa->address.sin.sin_addr) != 0)) { 3189 SCTPDBG(SCTP_DEBUG_OUTPUT2,"Jailed\n"); 3190 continue; 3191 } 3192 #endif 3193 #ifdef INET6 3194 if ((sctp_ifa->address.sa.sa_family == AF_INET6) && 3195 (prison_check_ip6(inp->ip_inp.inp.inp_cred, 3196 &sctp_ifa->address.sin6.sin6_addr) != 0)) { 3197 SCTPDBG(SCTP_DEBUG_OUTPUT2,"Jailed\n"); 3198 continue; 3199 } 3200 #endif 3201 #endif 3202 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && 3203 (non_asoc_addr_ok == 0)) { 3204 SCTPDBG(SCTP_DEBUG_OUTPUT2,"Defer\n"); 3205 continue; 3206 } 3207 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, 3208 dest_is_priv, fam); 3209 if (sifa == NULL) { 3210 SCTPDBG(SCTP_DEBUG_OUTPUT2, "IFA not acceptable\n"); 3211 continue; 3212 } 3213 if (stcb) { 3214 if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) { 3215 SCTPDBG(SCTP_DEBUG_OUTPUT2, "NOT in scope\n"); 3216 sifa = NULL; 3217 continue; 3218 } 3219 if (((non_asoc_addr_ok == 0) && 3220 (sctp_is_addr_restricted(stcb, sifa))) || 3221 (non_asoc_addr_ok && 3222 (sctp_is_addr_restricted(stcb, sifa)) && 3223 (!sctp_is_addr_pending(stcb, sifa)))) { 3224 /* 3225 * It is restricted for some 3226 * reason.. probably not yet added. 3227 */ 3228 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Its restricted\n"); 3229 sifa = NULL; 3230 continue; 3231 } 3232 } 3233 atomic_add_int(&sifa->refcount, 1); 3234 goto out; 3235 } 3236 plan_d: 3237 /* 3238 * plan_d: We are in trouble. No preferred address on the emit 3239 * interface. And not even a preferred address on all interfaces. 3240 * Go out and see if we can find an acceptable address somewhere 3241 * amongst all interfaces. 3242 */ 3243 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan D looked_at is %p\n", (void *)looked_at); 3244 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 3245 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 3246 /* wrong base scope */ 3247 continue; 3248 } 3249 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 3250 #if defined(__FreeBSD__) && !defined(__Userspace__) 3251 #ifdef INET 3252 if ((sctp_ifa->address.sa.sa_family == AF_INET) && 3253 (prison_check_ip4(inp->ip_inp.inp.inp_cred, 3254 &sctp_ifa->address.sin.sin_addr) != 0)) { 3255 continue; 3256 } 3257 #endif 3258 #ifdef INET6 3259 if ((sctp_ifa->address.sa.sa_family == AF_INET6) && 3260 (prison_check_ip6(inp->ip_inp.inp.inp_cred, 3261 &sctp_ifa->address.sin6.sin6_addr) != 0)) { 3262 continue; 3263 } 3264 #endif 3265 #endif 3266 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && 3267 (non_asoc_addr_ok == 0)) 3268 continue; 3269 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, 3270 dest_is_loop, 3271 dest_is_priv, fam); 3272 if (sifa == NULL) 3273 continue; 3274 if (stcb) { 3275 if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) { 3276 sifa = NULL; 3277 continue; 3278 } 3279 if (((non_asoc_addr_ok == 0) && 3280 (sctp_is_addr_restricted(stcb, sifa))) || 3281 (non_asoc_addr_ok && 3282 (sctp_is_addr_restricted(stcb, sifa)) && 3283 (!sctp_is_addr_pending(stcb, sifa)))) { 3284 /* 3285 * It is restricted for some 3286 * reason.. probably not yet added. 3287 */ 3288 sifa = NULL; 3289 continue; 3290 } 3291 } 3292 goto out; 3293 } 3294 } 3295 #ifdef INET 3296 if (stcb) { 3297 if ((retried == 0) && (stcb->asoc.scope.ipv4_local_scope == 0)) { 3298 stcb->asoc.scope.ipv4_local_scope = 1; 3299 retried = 1; 3300 goto again_with_private_addresses_allowed; 3301 } else if (retried == 1) { 3302 stcb->asoc.scope.ipv4_local_scope = 0; 3303 } 3304 } 3305 #endif 3306 out: 3307 #ifdef INET 3308 if (sifa) { 3309 if (retried == 1) { 3310 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 3311 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 3312 /* wrong base scope */ 3313 continue; 3314 } 3315 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 3316 struct sctp_ifa *tmp_sifa; 3317 3318 #if defined(__FreeBSD__) && !defined(__Userspace__) 3319 #ifdef INET 3320 if ((sctp_ifa->address.sa.sa_family == AF_INET) && 3321 (prison_check_ip4(inp->ip_inp.inp.inp_cred, 3322 &sctp_ifa->address.sin.sin_addr) != 0)) { 3323 continue; 3324 } 3325 #endif 3326 #ifdef INET6 3327 if ((sctp_ifa->address.sa.sa_family == AF_INET6) && 3328 (prison_check_ip6(inp->ip_inp.inp.inp_cred, 3329 &sctp_ifa->address.sin6.sin6_addr) != 0)) { 3330 continue; 3331 } 3332 #endif 3333 #endif 3334 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && 3335 (non_asoc_addr_ok == 0)) 3336 continue; 3337 tmp_sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, 3338 dest_is_loop, 3339 dest_is_priv, fam); 3340 if (tmp_sifa == NULL) { 3341 continue; 3342 } 3343 if (tmp_sifa == sifa) { 3344 continue; 3345 } 3346 if (stcb) { 3347 if (sctp_is_address_in_scope(tmp_sifa, 3348 &stcb->asoc.scope, 0) == 0) { 3349 continue; 3350 } 3351 if (((non_asoc_addr_ok == 0) && 3352 (sctp_is_addr_restricted(stcb, tmp_sifa))) || 3353 (non_asoc_addr_ok && 3354 (sctp_is_addr_restricted(stcb, tmp_sifa)) && 3355 (!sctp_is_addr_pending(stcb, tmp_sifa)))) { 3356 /* 3357 * It is restricted for some 3358 * reason.. probably not yet added. 3359 */ 3360 continue; 3361 } 3362 } 3363 if ((tmp_sifa->address.sin.sin_family == AF_INET) && 3364 (IN4_ISPRIVATE_ADDRESS(&(tmp_sifa->address.sin.sin_addr)))) { 3365 sctp_add_local_addr_restricted(stcb, tmp_sifa); 3366 } 3367 } 3368 } 3369 } 3370 atomic_add_int(&sifa->refcount, 1); 3371 } 3372 #endif 3373 return (sifa); 3374 } 3375 3376 /* tcb may be NULL */ 3377 struct sctp_ifa * 3378 sctp_source_address_selection(struct sctp_inpcb *inp, 3379 struct sctp_tcb *stcb, 3380 sctp_route_t *ro, 3381 struct sctp_nets *net, 3382 int non_asoc_addr_ok, uint32_t vrf_id) 3383 { 3384 struct sctp_ifa *answer; 3385 uint8_t dest_is_priv, dest_is_loop; 3386 sa_family_t fam; 3387 #ifdef INET 3388 struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst; 3389 #endif 3390 #ifdef INET6 3391 struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst; 3392 #endif 3393 3394 /** 3395 * Rules: 3396 * - Find the route if needed, cache if I can. 3397 * - Look at interface address in route, Is it in the bound list. If so we 3398 * have the best source. 3399 * - If not we must rotate amongst the addresses. 3400 * 3401 * Caveats and issues 3402 * 3403 * Do we need to pay attention to scope. We can have a private address 3404 * or a global address we are sourcing or sending to. So if we draw 3405 * it out 3406 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz 3407 * For V4 3408 * ------------------------------------------ 3409 * source * dest * result 3410 * ----------------------------------------- 3411 * <a> Private * Global * NAT 3412 * ----------------------------------------- 3413 * <b> Private * Private * No problem 3414 * ----------------------------------------- 3415 * <c> Global * Private * Huh, How will this work? 3416 * ----------------------------------------- 3417 * <d> Global * Global * No Problem 3418 *------------------------------------------ 3419 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz 3420 * For V6 3421 *------------------------------------------ 3422 * source * dest * result 3423 * ----------------------------------------- 3424 * <a> Linklocal * Global * 3425 * ----------------------------------------- 3426 * <b> Linklocal * Linklocal * No problem 3427 * ----------------------------------------- 3428 * <c> Global * Linklocal * Huh, How will this work? 3429 * ----------------------------------------- 3430 * <d> Global * Global * No Problem 3431 *------------------------------------------ 3432 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz 3433 * 3434 * And then we add to that what happens if there are multiple addresses 3435 * assigned to an interface. Remember the ifa on a ifn is a linked 3436 * list of addresses. So one interface can have more than one IP 3437 * address. What happens if we have both a private and a global 3438 * address? Do we then use context of destination to sort out which 3439 * one is best? And what about NAT's sending P->G may get you a NAT 3440 * translation, or should you select the G thats on the interface in 3441 * preference. 3442 * 3443 * Decisions: 3444 * 3445 * - count the number of addresses on the interface. 3446 * - if it is one, no problem except case <c>. 3447 * For <a> we will assume a NAT out there. 3448 * - if there are more than one, then we need to worry about scope P 3449 * or G. We should prefer G -> G and P -> P if possible. 3450 * Then as a secondary fall back to mixed types G->P being a last 3451 * ditch one. 3452 * - The above all works for bound all, but bound specific we need to 3453 * use the same concept but instead only consider the bound 3454 * addresses. If the bound set is NOT assigned to the interface then 3455 * we must use rotation amongst the bound addresses.. 3456 */ 3457 #if defined(__FreeBSD__) && !defined(__Userspace__) 3458 if (ro->ro_nh == NULL) { 3459 #else 3460 if (ro->ro_rt == NULL) { 3461 #endif 3462 /* 3463 * Need a route to cache. 3464 */ 3465 SCTP_RTALLOC(ro, vrf_id, inp->fibnum); 3466 } 3467 #if defined(__FreeBSD__) && !defined(__Userspace__) 3468 if (ro->ro_nh == NULL) { 3469 #else 3470 if (ro->ro_rt == NULL) { 3471 #endif 3472 return (NULL); 3473 } 3474 #if defined(_WIN32) 3475 /* On Windows the sa_family is U_SHORT or ADDRESS_FAMILY */ 3476 fam = (sa_family_t)ro->ro_dst.sa_family; 3477 #else 3478 fam = ro->ro_dst.sa_family; 3479 #endif 3480 dest_is_priv = dest_is_loop = 0; 3481 /* Setup our scopes for the destination */ 3482 switch (fam) { 3483 #ifdef INET 3484 case AF_INET: 3485 /* Scope based on outbound address */ 3486 if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) { 3487 dest_is_loop = 1; 3488 if (net != NULL) { 3489 /* mark it as local */ 3490 net->addr_is_local = 1; 3491 } 3492 } else if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) { 3493 dest_is_priv = 1; 3494 } 3495 break; 3496 #endif 3497 #ifdef INET6 3498 case AF_INET6: 3499 /* Scope based on outbound address */ 3500 #if defined(_WIN32) 3501 if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr)) { 3502 #else 3503 if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr) || 3504 SCTP_ROUTE_IS_REAL_LOOP(ro)) { 3505 #endif 3506 /* 3507 * If the address is a loopback address, which 3508 * consists of "::1" OR "fe80::1%lo0", we are loopback 3509 * scope. But we don't use dest_is_priv (link local 3510 * addresses). 3511 */ 3512 dest_is_loop = 1; 3513 if (net != NULL) { 3514 /* mark it as local */ 3515 net->addr_is_local = 1; 3516 } 3517 } else if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) { 3518 dest_is_priv = 1; 3519 } 3520 break; 3521 #endif 3522 } 3523 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Select source addr for:"); 3524 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&ro->ro_dst); 3525 SCTP_IPI_ADDR_RLOCK(); 3526 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 3527 /* 3528 * Bound all case 3529 */ 3530 answer = sctp_choose_boundall(inp, stcb, net, ro, vrf_id, 3531 dest_is_priv, dest_is_loop, 3532 non_asoc_addr_ok, fam); 3533 SCTP_IPI_ADDR_RUNLOCK(); 3534 return (answer); 3535 } 3536 /* 3537 * Subset bound case 3538 */ 3539 if (stcb) { 3540 answer = sctp_choose_boundspecific_stcb(inp, stcb, ro, 3541 vrf_id, dest_is_priv, 3542 dest_is_loop, 3543 non_asoc_addr_ok, fam); 3544 } else { 3545 answer = sctp_choose_boundspecific_inp(inp, ro, vrf_id, 3546 non_asoc_addr_ok, 3547 dest_is_priv, 3548 dest_is_loop, fam); 3549 } 3550 SCTP_IPI_ADDR_RUNLOCK(); 3551 return (answer); 3552 } 3553 3554 static bool 3555 sctp_find_cmsg(int c_type, void *data, struct mbuf *control, size_t cpsize) 3556 { 3557 #if defined(_WIN32) 3558 WSACMSGHDR cmh; 3559 #else 3560 struct cmsghdr cmh; 3561 #endif 3562 struct sctp_sndinfo sndinfo; 3563 struct sctp_prinfo prinfo; 3564 struct sctp_authinfo authinfo; 3565 int tot_len, rem_len, cmsg_data_len, cmsg_data_off, off; 3566 bool found; 3567 3568 /* 3569 * Independent of how many mbufs, find the c_type inside the control 3570 * structure and copy out the data. 3571 */ 3572 found = false; 3573 tot_len = SCTP_BUF_LEN(control); 3574 for (off = 0; off < tot_len; off += CMSG_ALIGN(cmh.cmsg_len)) { 3575 rem_len = tot_len - off; 3576 if (rem_len < (int)CMSG_ALIGN(sizeof(cmh))) { 3577 /* There is not enough room for one more. */ 3578 return (found); 3579 } 3580 m_copydata(control, off, sizeof(cmh), (caddr_t)&cmh); 3581 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) { 3582 /* We dont't have a complete CMSG header. */ 3583 return (found); 3584 } 3585 if ((cmh.cmsg_len > INT_MAX) || ((int)cmh.cmsg_len > rem_len)) { 3586 /* We don't have the complete CMSG. */ 3587 return (found); 3588 } 3589 cmsg_data_len = (int)cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh)); 3590 cmsg_data_off = off + CMSG_ALIGN(sizeof(cmh)); 3591 if ((cmh.cmsg_level == IPPROTO_SCTP) && 3592 ((c_type == cmh.cmsg_type) || 3593 ((c_type == SCTP_SNDRCV) && 3594 ((cmh.cmsg_type == SCTP_SNDINFO) || 3595 (cmh.cmsg_type == SCTP_PRINFO) || 3596 (cmh.cmsg_type == SCTP_AUTHINFO))))) { 3597 if (c_type == cmh.cmsg_type) { 3598 if (cpsize > INT_MAX) { 3599 return (found); 3600 } 3601 if (cmsg_data_len < (int)cpsize) { 3602 return (found); 3603 } 3604 /* It is exactly what we want. Copy it out. */ 3605 m_copydata(control, cmsg_data_off, (int)cpsize, (caddr_t)data); 3606 return (1); 3607 } else { 3608 struct sctp_sndrcvinfo *sndrcvinfo; 3609 3610 sndrcvinfo = (struct sctp_sndrcvinfo *)data; 3611 if (!found) { 3612 if (cpsize < sizeof(struct sctp_sndrcvinfo)) { 3613 return (found); 3614 } 3615 memset(sndrcvinfo, 0, sizeof(struct sctp_sndrcvinfo)); 3616 } 3617 switch (cmh.cmsg_type) { 3618 case SCTP_SNDINFO: 3619 if (cmsg_data_len < (int)sizeof(struct sctp_sndinfo)) { 3620 return (found); 3621 } 3622 m_copydata(control, cmsg_data_off, sizeof(struct sctp_sndinfo), (caddr_t)&sndinfo); 3623 sndrcvinfo->sinfo_stream = sndinfo.snd_sid; 3624 sndrcvinfo->sinfo_flags = sndinfo.snd_flags; 3625 sndrcvinfo->sinfo_ppid = sndinfo.snd_ppid; 3626 sndrcvinfo->sinfo_context = sndinfo.snd_context; 3627 sndrcvinfo->sinfo_assoc_id = sndinfo.snd_assoc_id; 3628 break; 3629 case SCTP_PRINFO: 3630 if (cmsg_data_len < (int)sizeof(struct sctp_prinfo)) { 3631 return (found); 3632 } 3633 m_copydata(control, cmsg_data_off, sizeof(struct sctp_prinfo), (caddr_t)&prinfo); 3634 if (prinfo.pr_policy != SCTP_PR_SCTP_NONE) { 3635 sndrcvinfo->sinfo_timetolive = prinfo.pr_value; 3636 } else { 3637 sndrcvinfo->sinfo_timetolive = 0; 3638 } 3639 sndrcvinfo->sinfo_flags |= prinfo.pr_policy; 3640 break; 3641 case SCTP_AUTHINFO: 3642 if (cmsg_data_len < (int)sizeof(struct sctp_authinfo)) { 3643 return (found); 3644 } 3645 m_copydata(control, cmsg_data_off, sizeof(struct sctp_authinfo), (caddr_t)&authinfo); 3646 sndrcvinfo->sinfo_keynumber_valid = 1; 3647 sndrcvinfo->sinfo_keynumber = authinfo.auth_keynumber; 3648 break; 3649 default: 3650 return (found); 3651 } 3652 found = true; 3653 } 3654 } 3655 } 3656 return (found); 3657 } 3658 3659 static int 3660 sctp_process_cmsgs_for_init(struct sctp_tcb *stcb, struct mbuf *control, int *error) 3661 { 3662 #if defined(_WIN32) 3663 WSACMSGHDR cmh; 3664 #else 3665 struct cmsghdr cmh; 3666 #endif 3667 struct sctp_initmsg initmsg; 3668 #ifdef INET 3669 struct sockaddr_in sin; 3670 #endif 3671 #ifdef INET6 3672 struct sockaddr_in6 sin6; 3673 #endif 3674 int tot_len, rem_len, cmsg_data_len, cmsg_data_off, off; 3675 3676 tot_len = SCTP_BUF_LEN(control); 3677 for (off = 0; off < tot_len; off += CMSG_ALIGN(cmh.cmsg_len)) { 3678 rem_len = tot_len - off; 3679 if (rem_len < (int)CMSG_ALIGN(sizeof(cmh))) { 3680 /* There is not enough room for one more. */ 3681 *error = EINVAL; 3682 return (1); 3683 } 3684 m_copydata(control, off, sizeof(cmh), (caddr_t)&cmh); 3685 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) { 3686 /* We dont't have a complete CMSG header. */ 3687 *error = EINVAL; 3688 return (1); 3689 } 3690 if ((cmh.cmsg_len > INT_MAX) || ((int)cmh.cmsg_len > rem_len)) { 3691 /* We don't have the complete CMSG. */ 3692 *error = EINVAL; 3693 return (1); 3694 } 3695 cmsg_data_len = (int)cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh)); 3696 cmsg_data_off = off + CMSG_ALIGN(sizeof(cmh)); 3697 if (cmh.cmsg_level == IPPROTO_SCTP) { 3698 switch (cmh.cmsg_type) { 3699 case SCTP_INIT: 3700 if (cmsg_data_len < (int)sizeof(struct sctp_initmsg)) { 3701 *error = EINVAL; 3702 return (1); 3703 } 3704 m_copydata(control, cmsg_data_off, sizeof(struct sctp_initmsg), (caddr_t)&initmsg); 3705 if (initmsg.sinit_max_attempts) 3706 stcb->asoc.max_init_times = initmsg.sinit_max_attempts; 3707 if (initmsg.sinit_num_ostreams) 3708 stcb->asoc.pre_open_streams = initmsg.sinit_num_ostreams; 3709 if (initmsg.sinit_max_instreams) 3710 stcb->asoc.max_inbound_streams = initmsg.sinit_max_instreams; 3711 if (initmsg.sinit_max_init_timeo) 3712 stcb->asoc.initial_init_rto_max = initmsg.sinit_max_init_timeo; 3713 if (stcb->asoc.streamoutcnt < stcb->asoc.pre_open_streams) { 3714 struct sctp_stream_out *tmp_str; 3715 unsigned int i; 3716 #if defined(SCTP_DETAILED_STR_STATS) 3717 int j; 3718 #endif 3719 3720 /* Default is NOT correct */ 3721 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, default:%d pre_open:%d\n", 3722 stcb->asoc.streamoutcnt, stcb->asoc.pre_open_streams); 3723 SCTP_TCB_UNLOCK(stcb); 3724 SCTP_MALLOC(tmp_str, 3725 struct sctp_stream_out *, 3726 (stcb->asoc.pre_open_streams * sizeof(struct sctp_stream_out)), 3727 SCTP_M_STRMO); 3728 SCTP_TCB_LOCK(stcb); 3729 if (tmp_str != NULL) { 3730 SCTP_FREE(stcb->asoc.strmout, SCTP_M_STRMO); 3731 stcb->asoc.strmout = tmp_str; 3732 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt = stcb->asoc.pre_open_streams; 3733 } else { 3734 stcb->asoc.pre_open_streams = stcb->asoc.streamoutcnt; 3735 } 3736 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 3737 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue); 3738 stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], NULL); 3739 stcb->asoc.strmout[i].chunks_on_queues = 0; 3740 #if defined(SCTP_DETAILED_STR_STATS) 3741 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) { 3742 stcb->asoc.strmout[i].abandoned_sent[j] = 0; 3743 stcb->asoc.strmout[i].abandoned_unsent[j] = 0; 3744 } 3745 #else 3746 stcb->asoc.strmout[i].abandoned_sent[0] = 0; 3747 stcb->asoc.strmout[i].abandoned_unsent[0] = 0; 3748 #endif 3749 stcb->asoc.strmout[i].next_mid_ordered = 0; 3750 stcb->asoc.strmout[i].next_mid_unordered = 0; 3751 stcb->asoc.strmout[i].sid = i; 3752 stcb->asoc.strmout[i].last_msg_incomplete = 0; 3753 stcb->asoc.strmout[i].state = SCTP_STREAM_OPENING; 3754 } 3755 } 3756 break; 3757 #ifdef INET 3758 case SCTP_DSTADDRV4: 3759 if (cmsg_data_len < (int)sizeof(struct in_addr)) { 3760 *error = EINVAL; 3761 return (1); 3762 } 3763 memset(&sin, 0, sizeof(struct sockaddr_in)); 3764 sin.sin_family = AF_INET; 3765 #ifdef HAVE_SIN_LEN 3766 sin.sin_len = sizeof(struct sockaddr_in); 3767 #endif 3768 sin.sin_port = stcb->rport; 3769 m_copydata(control, cmsg_data_off, sizeof(struct in_addr), (caddr_t)&sin.sin_addr); 3770 if ((sin.sin_addr.s_addr == INADDR_ANY) || 3771 (sin.sin_addr.s_addr == INADDR_BROADCAST) || 3772 IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) { 3773 *error = EINVAL; 3774 return (1); 3775 } 3776 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL, stcb->asoc.port, 3777 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) { 3778 *error = ENOBUFS; 3779 return (1); 3780 } 3781 break; 3782 #endif 3783 #ifdef INET6 3784 case SCTP_DSTADDRV6: 3785 if (cmsg_data_len < (int)sizeof(struct in6_addr)) { 3786 *error = EINVAL; 3787 return (1); 3788 } 3789 memset(&sin6, 0, sizeof(struct sockaddr_in6)); 3790 sin6.sin6_family = AF_INET6; 3791 #ifdef HAVE_SIN6_LEN 3792 sin6.sin6_len = sizeof(struct sockaddr_in6); 3793 #endif 3794 sin6.sin6_port = stcb->rport; 3795 m_copydata(control, cmsg_data_off, sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr); 3796 if (IN6_IS_ADDR_UNSPECIFIED(&sin6.sin6_addr) || 3797 IN6_IS_ADDR_MULTICAST(&sin6.sin6_addr)) { 3798 *error = EINVAL; 3799 return (1); 3800 } 3801 #ifdef INET 3802 if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) { 3803 in6_sin6_2_sin(&sin, &sin6); 3804 if ((sin.sin_addr.s_addr == INADDR_ANY) || 3805 (sin.sin_addr.s_addr == INADDR_BROADCAST) || 3806 IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) { 3807 *error = EINVAL; 3808 return (1); 3809 } 3810 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL, stcb->asoc.port, 3811 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) { 3812 *error = ENOBUFS; 3813 return (1); 3814 } 3815 } else 3816 #endif 3817 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin6, NULL, stcb->asoc.port, 3818 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) { 3819 *error = ENOBUFS; 3820 return (1); 3821 } 3822 break; 3823 #endif 3824 default: 3825 break; 3826 } 3827 } 3828 } 3829 return (0); 3830 } 3831 3832 #if defined(INET) || defined(INET6) 3833 static struct sctp_tcb * 3834 sctp_findassociation_cmsgs(struct sctp_inpcb **inp_p, 3835 uint16_t port, 3836 struct mbuf *control, 3837 struct sctp_nets **net_p, 3838 int *error) 3839 { 3840 #if defined(_WIN32) 3841 WSACMSGHDR cmh; 3842 #else 3843 struct cmsghdr cmh; 3844 #endif 3845 struct sctp_tcb *stcb; 3846 struct sockaddr *addr; 3847 #ifdef INET 3848 struct sockaddr_in sin; 3849 #endif 3850 #ifdef INET6 3851 struct sockaddr_in6 sin6; 3852 #endif 3853 int tot_len, rem_len, cmsg_data_len, cmsg_data_off, off; 3854 3855 tot_len = SCTP_BUF_LEN(control); 3856 for (off = 0; off < tot_len; off += CMSG_ALIGN(cmh.cmsg_len)) { 3857 rem_len = tot_len - off; 3858 if (rem_len < (int)CMSG_ALIGN(sizeof(cmh))) { 3859 /* There is not enough room for one more. */ 3860 *error = EINVAL; 3861 return (NULL); 3862 } 3863 m_copydata(control, off, sizeof(cmh), (caddr_t)&cmh); 3864 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) { 3865 /* We dont't have a complete CMSG header. */ 3866 *error = EINVAL; 3867 return (NULL); 3868 } 3869 if ((cmh.cmsg_len > INT_MAX) || ((int)cmh.cmsg_len > rem_len)) { 3870 /* We don't have the complete CMSG. */ 3871 *error = EINVAL; 3872 return (NULL); 3873 } 3874 cmsg_data_len = (int)cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh)); 3875 cmsg_data_off = off + CMSG_ALIGN(sizeof(cmh)); 3876 if (cmh.cmsg_level == IPPROTO_SCTP) { 3877 switch (cmh.cmsg_type) { 3878 #ifdef INET 3879 case SCTP_DSTADDRV4: 3880 if (cmsg_data_len < (int)sizeof(struct in_addr)) { 3881 *error = EINVAL; 3882 return (NULL); 3883 } 3884 memset(&sin, 0, sizeof(struct sockaddr_in)); 3885 sin.sin_family = AF_INET; 3886 #ifdef HAVE_SIN_LEN 3887 sin.sin_len = sizeof(struct sockaddr_in); 3888 #endif 3889 sin.sin_port = port; 3890 m_copydata(control, cmsg_data_off, sizeof(struct in_addr), (caddr_t)&sin.sin_addr); 3891 addr = (struct sockaddr *)&sin; 3892 break; 3893 #endif 3894 #ifdef INET6 3895 case SCTP_DSTADDRV6: 3896 if (cmsg_data_len < (int)sizeof(struct in6_addr)) { 3897 *error = EINVAL; 3898 return (NULL); 3899 } 3900 memset(&sin6, 0, sizeof(struct sockaddr_in6)); 3901 sin6.sin6_family = AF_INET6; 3902 #ifdef HAVE_SIN6_LEN 3903 sin6.sin6_len = sizeof(struct sockaddr_in6); 3904 #endif 3905 sin6.sin6_port = port; 3906 m_copydata(control, cmsg_data_off, sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr); 3907 #ifdef INET 3908 if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) { 3909 in6_sin6_2_sin(&sin, &sin6); 3910 addr = (struct sockaddr *)&sin; 3911 } else 3912 #endif 3913 addr = (struct sockaddr *)&sin6; 3914 break; 3915 #endif 3916 default: 3917 addr = NULL; 3918 break; 3919 } 3920 if (addr) { 3921 stcb = sctp_findassociation_ep_addr(inp_p, addr, net_p, NULL, NULL); 3922 if (stcb != NULL) { 3923 return (stcb); 3924 } 3925 } 3926 } 3927 } 3928 return (NULL); 3929 } 3930 #endif 3931 3932 static struct mbuf * 3933 sctp_add_cookie(struct mbuf *init, int init_offset, 3934 struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in, uint8_t **signature) 3935 { 3936 struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret; 3937 struct sctp_state_cookie *stc; 3938 struct sctp_paramhdr *ph; 3939 uint16_t cookie_sz; 3940 3941 mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) + 3942 sizeof(struct sctp_paramhdr)), 0, 3943 M_NOWAIT, 1, MT_DATA); 3944 if (mret == NULL) { 3945 return (NULL); 3946 } 3947 copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_NOWAIT); 3948 if (copy_init == NULL) { 3949 sctp_m_freem(mret); 3950 return (NULL); 3951 } 3952 #ifdef SCTP_MBUF_LOGGING 3953 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 3954 sctp_log_mbc(copy_init, SCTP_MBUF_ICOPY); 3955 } 3956 #endif 3957 copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL, 3958 M_NOWAIT); 3959 if (copy_initack == NULL) { 3960 sctp_m_freem(mret); 3961 sctp_m_freem(copy_init); 3962 return (NULL); 3963 } 3964 #ifdef SCTP_MBUF_LOGGING 3965 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 3966 sctp_log_mbc(copy_initack, SCTP_MBUF_ICOPY); 3967 } 3968 #endif 3969 /* easy side we just drop it on the end */ 3970 ph = mtod(mret, struct sctp_paramhdr *); 3971 SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) + 3972 sizeof(struct sctp_paramhdr); 3973 stc = (struct sctp_state_cookie *)((caddr_t)ph + 3974 sizeof(struct sctp_paramhdr)); 3975 ph->param_type = htons(SCTP_STATE_COOKIE); 3976 ph->param_length = 0; /* fill in at the end */ 3977 /* Fill in the stc cookie data */ 3978 memcpy(stc, stc_in, sizeof(struct sctp_state_cookie)); 3979 3980 /* tack the INIT and then the INIT-ACK onto the chain */ 3981 cookie_sz = 0; 3982 for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 3983 cookie_sz += SCTP_BUF_LEN(m_at); 3984 if (SCTP_BUF_NEXT(m_at) == NULL) { 3985 SCTP_BUF_NEXT(m_at) = copy_init; 3986 break; 3987 } 3988 } 3989 for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 3990 cookie_sz += SCTP_BUF_LEN(m_at); 3991 if (SCTP_BUF_NEXT(m_at) == NULL) { 3992 SCTP_BUF_NEXT(m_at) = copy_initack; 3993 break; 3994 } 3995 } 3996 for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 3997 cookie_sz += SCTP_BUF_LEN(m_at); 3998 if (SCTP_BUF_NEXT(m_at) == NULL) { 3999 break; 4000 } 4001 } 4002 sig = sctp_get_mbuf_for_msg(SCTP_SIGNATURE_SIZE, 0, M_NOWAIT, 1, MT_DATA); 4003 if (sig == NULL) { 4004 /* no space, so free the entire chain */ 4005 sctp_m_freem(mret); 4006 return (NULL); 4007 } 4008 SCTP_BUF_NEXT(m_at) = sig; 4009 SCTP_BUF_LEN(sig) = SCTP_SIGNATURE_SIZE; 4010 cookie_sz += SCTP_SIGNATURE_SIZE; 4011 ph->param_length = htons(cookie_sz); 4012 *signature = (uint8_t *)mtod(sig, caddr_t); 4013 memset(*signature, 0, SCTP_SIGNATURE_SIZE); 4014 return (mret); 4015 } 4016 4017 static uint8_t 4018 sctp_get_ect(struct sctp_tcb *stcb) 4019 { 4020 if ((stcb != NULL) && (stcb->asoc.ecn_supported == 1)) { 4021 return (SCTP_ECT0_BIT); 4022 } else { 4023 return (0); 4024 } 4025 } 4026 4027 #if defined(INET) || defined(INET6) 4028 static void 4029 sctp_handle_no_route(struct sctp_tcb *stcb, 4030 struct sctp_nets *net, 4031 int so_locked) 4032 { 4033 SCTPDBG(SCTP_DEBUG_OUTPUT1, "dropped packet - no valid source addr\n"); 4034 4035 if (net) { 4036 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Destination was "); 4037 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT1, &net->ro._l_addr.sa); 4038 if (net->dest_state & SCTP_ADDR_CONFIRMED) { 4039 if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) { 4040 SCTPDBG(SCTP_DEBUG_OUTPUT1, "no route takes interface %p down\n", (void *)net); 4041 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, 4042 stcb, 0, 4043 (void *)net, 4044 so_locked); 4045 net->dest_state &= ~SCTP_ADDR_REACHABLE; 4046 net->dest_state &= ~SCTP_ADDR_PF; 4047 } 4048 } 4049 if (stcb) { 4050 if (net == stcb->asoc.primary_destination) { 4051 /* need a new primary */ 4052 struct sctp_nets *alt; 4053 4054 alt = sctp_find_alternate_net(stcb, net, 0); 4055 if (alt != net) { 4056 if (stcb->asoc.alternate) { 4057 sctp_free_remote_addr(stcb->asoc.alternate); 4058 } 4059 stcb->asoc.alternate = alt; 4060 atomic_add_int(&stcb->asoc.alternate->ref_count, 1); 4061 if (net->ro._s_addr) { 4062 sctp_free_ifa(net->ro._s_addr); 4063 net->ro._s_addr = NULL; 4064 } 4065 net->src_addr_selected = 0; 4066 } 4067 } 4068 } 4069 } 4070 } 4071 #endif 4072 4073 static int 4074 sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, 4075 struct sctp_tcb *stcb, /* may be NULL */ 4076 struct sctp_nets *net, 4077 struct sockaddr *to, 4078 struct mbuf *m, 4079 uint32_t auth_offset, 4080 struct sctp_auth_chunk *auth, 4081 uint16_t auth_keyid, 4082 int nofragment_flag, 4083 int ecn_ok, 4084 int out_of_asoc_ok, 4085 uint16_t src_port, 4086 uint16_t dest_port, 4087 uint32_t v_tag, 4088 uint16_t port, 4089 union sctp_sockstore *over_addr, 4090 #if defined(__FreeBSD__) && !defined(__Userspace__) 4091 uint8_t mflowtype, uint32_t mflowid, 4092 #endif 4093 bool use_zero_crc, 4094 int so_locked) 4095 /* nofragment_flag to tell if IP_DF should be set (IPv4 only) */ 4096 { 4097 /** 4098 * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet header 4099 * WITH an SCTPHDR but no IP header, endpoint inp and sa structure: 4100 * - fill in the HMAC digest of any AUTH chunk in the packet. 4101 * - calculate and fill in the SCTP checksum. 4102 * - prepend an IP address header. 4103 * - if boundall use INADDR_ANY. 4104 * - if boundspecific do source address selection. 4105 * - set fragmentation option for ipV4. 4106 * - On return from IP output, check/adjust mtu size of output 4107 * interface and smallest_mtu size as well. 4108 */ 4109 /* Will need ifdefs around this */ 4110 struct mbuf *newm; 4111 struct sctphdr *sctphdr; 4112 int packet_length; 4113 int ret; 4114 #if defined(INET) || defined(INET6) 4115 uint32_t vrf_id; 4116 #endif 4117 #if defined(INET) || defined(INET6) 4118 struct mbuf *o_pak; 4119 sctp_route_t *ro = NULL; 4120 struct udphdr *udp = NULL; 4121 #endif 4122 uint8_t tos_value; 4123 #if defined(__APPLE__) && !defined(__Userspace__) 4124 struct socket *so = NULL; 4125 #endif 4126 4127 #if defined(__APPLE__) && !defined(__Userspace__) 4128 if (so_locked) { 4129 sctp_lock_assert(SCTP_INP_SO(inp)); 4130 SCTP_TCB_LOCK_ASSERT(stcb); 4131 } else { 4132 sctp_unlock_assert(SCTP_INP_SO(inp)); 4133 } 4134 #endif 4135 if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) { 4136 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT); 4137 sctp_m_freem(m); 4138 return (EFAULT); 4139 } 4140 #if defined(INET) || defined(INET6) 4141 if (stcb) { 4142 vrf_id = stcb->asoc.vrf_id; 4143 } else { 4144 vrf_id = inp->def_vrf_id; 4145 } 4146 #endif 4147 /* fill in the HMAC digest for any AUTH chunk in the packet */ 4148 if ((auth != NULL) && (stcb != NULL)) { 4149 sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb, auth_keyid); 4150 } 4151 4152 if (net) { 4153 tos_value = net->dscp; 4154 } else if (stcb) { 4155 tos_value = stcb->asoc.default_dscp; 4156 } else { 4157 tos_value = inp->sctp_ep.default_dscp; 4158 } 4159 4160 switch (to->sa_family) { 4161 #ifdef INET 4162 case AF_INET: 4163 { 4164 struct ip *ip = NULL; 4165 sctp_route_t iproute; 4166 int len; 4167 4168 len = SCTP_MIN_V4_OVERHEAD; 4169 if (port) { 4170 len += sizeof(struct udphdr); 4171 } 4172 newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA); 4173 if (newm == NULL) { 4174 sctp_m_freem(m); 4175 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 4176 return (ENOMEM); 4177 } 4178 SCTP_ALIGN_TO_END(newm, len); 4179 SCTP_BUF_LEN(newm) = len; 4180 SCTP_BUF_NEXT(newm) = m; 4181 m = newm; 4182 #if defined(__FreeBSD__) && !defined(__Userspace__) 4183 if (net != NULL) { 4184 m->m_pkthdr.flowid = net->flowid; 4185 M_HASHTYPE_SET(m, net->flowtype); 4186 } else { 4187 m->m_pkthdr.flowid = mflowid; 4188 M_HASHTYPE_SET(m, mflowtype); 4189 } 4190 #endif 4191 packet_length = sctp_calculate_len(m); 4192 ip = mtod(m, struct ip *); 4193 ip->ip_v = IPVERSION; 4194 ip->ip_hl = (sizeof(struct ip) >> 2); 4195 if (tos_value == 0) { 4196 /* 4197 * This means especially, that it is not set at the 4198 * SCTP layer. So use the value from the IP layer. 4199 */ 4200 tos_value = inp->ip_inp.inp.inp_ip_tos; 4201 } 4202 tos_value &= 0xfc; 4203 if (ecn_ok) { 4204 tos_value |= sctp_get_ect(stcb); 4205 } 4206 if ((nofragment_flag) && (port == 0)) { 4207 #if defined(__FreeBSD__) && !defined(__Userspace__) 4208 ip->ip_off = htons(IP_DF); 4209 #elif defined(WITH_CONVERT_IP_OFF) || defined(__APPLE__) 4210 ip->ip_off = IP_DF; 4211 #else 4212 ip->ip_off = htons(IP_DF); 4213 #endif 4214 } else { 4215 #if defined(__FreeBSD__) && !defined(__Userspace__) 4216 ip->ip_off = htons(0); 4217 #else 4218 ip->ip_off = 0; 4219 #endif 4220 } 4221 #if defined(__Userspace__) 4222 ip->ip_id = htons(SCTP_IP_ID(inp)++); 4223 #elif defined(__FreeBSD__) 4224 /* FreeBSD has a function for ip_id's */ 4225 ip_fillid(ip); 4226 #elif defined(__APPLE__) 4227 #if RANDOM_IP_ID 4228 ip->ip_id = ip_randomid(); 4229 #else 4230 ip->ip_id = htons(ip_id++); 4231 #endif 4232 #else 4233 ip->ip_id = SCTP_IP_ID(inp)++; 4234 #endif 4235 4236 ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl; 4237 #if defined(__FreeBSD__) && !defined(__Userspace__) 4238 ip->ip_len = htons(packet_length); 4239 #else 4240 ip->ip_len = packet_length; 4241 #endif 4242 ip->ip_tos = tos_value; 4243 if (port) { 4244 ip->ip_p = IPPROTO_UDP; 4245 } else { 4246 ip->ip_p = IPPROTO_SCTP; 4247 } 4248 ip->ip_sum = 0; 4249 if (net == NULL) { 4250 ro = &iproute; 4251 memset(&iproute, 0, sizeof(iproute)); 4252 #ifdef HAVE_SA_LEN 4253 memcpy(&ro->ro_dst, to, to->sa_len); 4254 #else 4255 memcpy(&ro->ro_dst, to, sizeof(struct sockaddr_in)); 4256 #endif 4257 } else { 4258 ro = (sctp_route_t *)&net->ro; 4259 } 4260 /* Now the address selection part */ 4261 ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr; 4262 4263 /* call the routine to select the src address */ 4264 if (net && out_of_asoc_ok == 0) { 4265 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED|SCTP_ADDR_IFA_UNUSEABLE))) { 4266 sctp_free_ifa(net->ro._s_addr); 4267 net->ro._s_addr = NULL; 4268 net->src_addr_selected = 0; 4269 #if defined(__FreeBSD__) && !defined(__Userspace__) 4270 RO_NHFREE(ro); 4271 #else 4272 if (ro->ro_rt) { 4273 RTFREE(ro->ro_rt); 4274 ro->ro_rt = NULL; 4275 } 4276 #endif 4277 } 4278 if (net->src_addr_selected == 0) { 4279 /* Cache the source address */ 4280 net->ro._s_addr = sctp_source_address_selection(inp,stcb, 4281 ro, net, 0, 4282 vrf_id); 4283 net->src_addr_selected = 1; 4284 } 4285 if (net->ro._s_addr == NULL) { 4286 /* No route to host */ 4287 net->src_addr_selected = 0; 4288 sctp_handle_no_route(stcb, net, so_locked); 4289 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH); 4290 sctp_m_freem(m); 4291 return (EHOSTUNREACH); 4292 } 4293 ip->ip_src = net->ro._s_addr->address.sin.sin_addr; 4294 } else { 4295 if (over_addr == NULL) { 4296 struct sctp_ifa *_lsrc; 4297 4298 _lsrc = sctp_source_address_selection(inp, stcb, ro, 4299 net, 4300 out_of_asoc_ok, 4301 vrf_id); 4302 if (_lsrc == NULL) { 4303 sctp_handle_no_route(stcb, net, so_locked); 4304 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH); 4305 sctp_m_freem(m); 4306 return (EHOSTUNREACH); 4307 } 4308 ip->ip_src = _lsrc->address.sin.sin_addr; 4309 sctp_free_ifa(_lsrc); 4310 } else { 4311 ip->ip_src = over_addr->sin.sin_addr; 4312 SCTP_RTALLOC(ro, vrf_id, inp->fibnum); 4313 } 4314 } 4315 if (port) { 4316 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) { 4317 sctp_handle_no_route(stcb, net, so_locked); 4318 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH); 4319 sctp_m_freem(m); 4320 return (EHOSTUNREACH); 4321 } 4322 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 4323 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)); 4324 udp->uh_dport = port; 4325 udp->uh_ulen = htons((uint16_t)(packet_length - sizeof(struct ip))); 4326 #if !defined(__Userspace__) 4327 #if defined(__FreeBSD__) 4328 if (V_udp_cksum) { 4329 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP)); 4330 } else { 4331 udp->uh_sum = 0; 4332 } 4333 #else 4334 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP)); 4335 #endif 4336 #else 4337 udp->uh_sum = 0; 4338 #endif 4339 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr)); 4340 } else { 4341 sctphdr = (struct sctphdr *)((caddr_t)ip + sizeof(struct ip)); 4342 } 4343 4344 sctphdr->src_port = src_port; 4345 sctphdr->dest_port = dest_port; 4346 sctphdr->v_tag = v_tag; 4347 sctphdr->checksum = 0; 4348 4349 /* 4350 * If source address selection fails and we find no route 4351 * then the ip_output should fail as well with a 4352 * NO_ROUTE_TO_HOST type error. We probably should catch 4353 * that somewhere and abort the association right away 4354 * (assuming this is an INIT being sent). 4355 */ 4356 #if defined(__FreeBSD__) && !defined(__Userspace__) 4357 if (ro->ro_nh == NULL) { 4358 #else 4359 if (ro->ro_rt == NULL) { 4360 #endif 4361 /* 4362 * src addr selection failed to find a route (or 4363 * valid source addr), so we can't get there from 4364 * here (yet)! 4365 */ 4366 sctp_handle_no_route(stcb, net, so_locked); 4367 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH); 4368 sctp_m_freem(m); 4369 return (EHOSTUNREACH); 4370 } 4371 if (ro != &iproute) { 4372 memcpy(&iproute, ro, sizeof(*ro)); 4373 } 4374 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv4 output routine from low level src addr:%x\n", 4375 (uint32_t) (ntohl(ip->ip_src.s_addr))); 4376 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Destination is %x\n", 4377 (uint32_t)(ntohl(ip->ip_dst.s_addr))); 4378 #if defined(__FreeBSD__) && !defined(__Userspace__) 4379 SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n", 4380 (void *)ro->ro_nh); 4381 #else 4382 SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n", 4383 (void *)ro->ro_rt); 4384 #endif 4385 4386 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) { 4387 /* failed to prepend data, give up */ 4388 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 4389 sctp_m_freem(m); 4390 return (ENOMEM); 4391 } 4392 SCTP_ATTACH_CHAIN(o_pak, m, packet_length); 4393 if (port) { 4394 if (use_zero_crc) { 4395 SCTP_STAT_INCR(sctps_sendzerocrc); 4396 } else { 4397 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip) + sizeof(struct udphdr)); 4398 SCTP_STAT_INCR(sctps_sendswcrc); 4399 } 4400 #if !defined(__Userspace__) 4401 #if defined(__FreeBSD__) 4402 if (V_udp_cksum) { 4403 SCTP_ENABLE_UDP_CSUM(o_pak); 4404 } 4405 #else 4406 SCTP_ENABLE_UDP_CSUM(o_pak); 4407 #endif 4408 #endif 4409 } else { 4410 if (use_zero_crc) { 4411 SCTP_STAT_INCR(sctps_sendzerocrc); 4412 } else { 4413 #if defined(__FreeBSD__) && !defined(__Userspace__) 4414 m->m_pkthdr.csum_flags = CSUM_SCTP; 4415 m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum); 4416 SCTP_STAT_INCR(sctps_sendhwcrc); 4417 #else 4418 if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) && 4419 (stcb) && (stcb->asoc.scope.loopback_scope))) { 4420 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip)); 4421 SCTP_STAT_INCR(sctps_sendswcrc); 4422 } else { 4423 SCTP_STAT_INCR(sctps_sendhwcrc); 4424 } 4425 #endif 4426 } 4427 } 4428 #ifdef SCTP_PACKET_LOGGING 4429 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) 4430 sctp_packet_log(o_pak); 4431 #endif 4432 /* send it out. table id is taken from stcb */ 4433 #if defined(__APPLE__) && !defined(__Userspace__) 4434 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) { 4435 so = SCTP_INP_SO(inp); 4436 SCTP_SOCKET_UNLOCK(so, 0); 4437 } 4438 #endif 4439 #if defined(__FreeBSD__) && !defined(__Userspace__) 4440 SCTP_PROBE5(send, NULL, stcb, ip, stcb, sctphdr); 4441 #endif 4442 SCTP_IP_OUTPUT(ret, o_pak, ro, inp, vrf_id); 4443 #if defined(__APPLE__) && !defined(__Userspace__) 4444 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) { 4445 atomic_add_int(&stcb->asoc.refcnt, 1); 4446 SCTP_TCB_UNLOCK(stcb); 4447 SCTP_SOCKET_LOCK(so, 0); 4448 SCTP_TCB_LOCK(stcb); 4449 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4450 } 4451 #endif 4452 #if defined(__FreeBSD__) && !defined(__Userspace__) 4453 if (port) { 4454 UDPSTAT_INC(udps_opackets); 4455 } 4456 #endif 4457 SCTP_STAT_INCR(sctps_sendpackets); 4458 SCTP_STAT_INCR_COUNTER64(sctps_outpackets); 4459 if (ret) 4460 SCTP_STAT_INCR(sctps_senderrors); 4461 4462 SCTPDBG(SCTP_DEBUG_OUTPUT3, "IP output returns %d\n", ret); 4463 if (net == NULL) { 4464 /* free tempy routes */ 4465 #if defined(__FreeBSD__) && !defined(__Userspace__) 4466 RO_NHFREE(ro); 4467 #else 4468 if (ro->ro_rt) { 4469 RTFREE(ro->ro_rt); 4470 ro->ro_rt = NULL; 4471 } 4472 #endif 4473 } else { 4474 #if defined(__FreeBSD__) && !defined(__Userspace__) 4475 if ((ro->ro_nh != NULL) && (net->ro._s_addr) && 4476 #else 4477 if ((ro->ro_rt != NULL) && (net->ro._s_addr) && 4478 #endif 4479 ((net->dest_state & SCTP_ADDR_NO_PMTUD) == 0)) { 4480 uint32_t mtu; 4481 4482 #if defined(__FreeBSD__) && !defined(__Userspace__) 4483 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_nh); 4484 #else 4485 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt); 4486 #endif 4487 if (mtu > 0) { 4488 if (net->port) { 4489 mtu -= sizeof(struct udphdr); 4490 } 4491 if (mtu < net->mtu) { 4492 net->mtu = mtu; 4493 if ((stcb != NULL) && (stcb->asoc.smallest_mtu > mtu)) { 4494 sctp_pathmtu_adjustment(stcb, mtu, true); 4495 } 4496 } 4497 } 4498 #if defined(__FreeBSD__) && !defined(__Userspace__) 4499 } else if (ro->ro_nh == NULL) { 4500 #else 4501 } else if (ro->ro_rt == NULL) { 4502 #endif 4503 /* route was freed */ 4504 if (net->ro._s_addr && 4505 net->src_addr_selected) { 4506 sctp_free_ifa(net->ro._s_addr); 4507 net->ro._s_addr = NULL; 4508 } 4509 net->src_addr_selected = 0; 4510 } 4511 } 4512 return (ret); 4513 } 4514 #endif 4515 #ifdef INET6 4516 case AF_INET6: 4517 { 4518 uint32_t flowlabel, flowinfo; 4519 struct ip6_hdr *ip6h; 4520 struct route_in6 ip6route; 4521 #if !defined(__Userspace__) 4522 struct ifnet *ifp; 4523 #endif 4524 struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp; 4525 int prev_scope = 0; 4526 #ifdef SCTP_EMBEDDED_V6_SCOPE 4527 struct sockaddr_in6 lsa6_storage; 4528 int error; 4529 #endif 4530 u_short prev_port = 0; 4531 int len; 4532 4533 if (net) { 4534 flowlabel = net->flowlabel; 4535 } else if (stcb) { 4536 flowlabel = stcb->asoc.default_flowlabel; 4537 } else { 4538 flowlabel = inp->sctp_ep.default_flowlabel; 4539 } 4540 if (flowlabel == 0) { 4541 /* 4542 * This means especially, that it is not set at the 4543 * SCTP layer. So use the value from the IP layer. 4544 */ 4545 #if defined(__APPLE__) && !defined(__Userspace__) && (!defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION)) 4546 flowlabel = ntohl(inp->ip_inp.inp.inp_flow); 4547 #else 4548 flowlabel = ntohl(((struct inpcb *)inp)->inp_flow); 4549 #endif 4550 } 4551 flowlabel &= 0x000fffff; 4552 len = SCTP_MIN_OVERHEAD; 4553 if (port) { 4554 len += sizeof(struct udphdr); 4555 } 4556 newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA); 4557 if (newm == NULL) { 4558 sctp_m_freem(m); 4559 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 4560 return (ENOMEM); 4561 } 4562 SCTP_ALIGN_TO_END(newm, len); 4563 SCTP_BUF_LEN(newm) = len; 4564 SCTP_BUF_NEXT(newm) = m; 4565 m = newm; 4566 #if defined(__FreeBSD__) && !defined(__Userspace__) 4567 if (net != NULL) { 4568 m->m_pkthdr.flowid = net->flowid; 4569 M_HASHTYPE_SET(m, net->flowtype); 4570 } else { 4571 m->m_pkthdr.flowid = mflowid; 4572 M_HASHTYPE_SET(m, mflowtype); 4573 } 4574 #endif 4575 packet_length = sctp_calculate_len(m); 4576 4577 ip6h = mtod(m, struct ip6_hdr *); 4578 /* protect *sin6 from overwrite */ 4579 sin6 = (struct sockaddr_in6 *)to; 4580 tmp = *sin6; 4581 sin6 = &tmp; 4582 4583 #ifdef SCTP_EMBEDDED_V6_SCOPE 4584 /* KAME hack: embed scopeid */ 4585 #if defined(__APPLE__) && !defined(__Userspace__) 4586 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) 4587 if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0) 4588 #else 4589 if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0) 4590 #endif 4591 #elif defined(SCTP_KAME) 4592 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) 4593 #else 4594 if (in6_embedscope(&sin6->sin6_addr, sin6) != 0) 4595 #endif 4596 { 4597 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 4598 sctp_m_freem(m); 4599 return (EINVAL); 4600 } 4601 #endif /* SCTP_EMBEDDED_V6_SCOPE */ 4602 if (net == NULL) { 4603 memset(&ip6route, 0, sizeof(ip6route)); 4604 ro = (sctp_route_t *)&ip6route; 4605 #ifdef HAVE_SIN6_LEN 4606 memcpy(&ro->ro_dst, sin6, sin6->sin6_len); 4607 #else 4608 memcpy(&ro->ro_dst, sin6, sizeof(struct sockaddr_in6)); 4609 #endif 4610 } else { 4611 ro = (sctp_route_t *)&net->ro; 4612 } 4613 /* 4614 * We assume here that inp_flow is in host byte order within 4615 * the TCB! 4616 */ 4617 if (tos_value == 0) { 4618 /* 4619 * This means especially, that it is not set at the 4620 * SCTP layer. So use the value from the IP layer. 4621 */ 4622 #if defined(__APPLE__) && !defined(__Userspace__) && (!defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION)) 4623 tos_value = (ntohl(inp->ip_inp.inp.inp_flow) >> 20) & 0xff; 4624 #else 4625 tos_value = (ntohl(((struct inpcb *)inp)->inp_flow) >> 20) & 0xff; 4626 #endif 4627 } 4628 tos_value &= 0xfc; 4629 if (ecn_ok) { 4630 tos_value |= sctp_get_ect(stcb); 4631 } 4632 flowinfo = 0x06; 4633 flowinfo <<= 8; 4634 flowinfo |= tos_value; 4635 flowinfo <<= 20; 4636 flowinfo |= flowlabel; 4637 ip6h->ip6_flow = htonl(flowinfo); 4638 if (port) { 4639 ip6h->ip6_nxt = IPPROTO_UDP; 4640 } else { 4641 ip6h->ip6_nxt = IPPROTO_SCTP; 4642 } 4643 ip6h->ip6_plen = htons((uint16_t)(packet_length - sizeof(struct ip6_hdr))); 4644 ip6h->ip6_dst = sin6->sin6_addr; 4645 4646 /* 4647 * Add SRC address selection here: we can only reuse to a 4648 * limited degree the kame src-addr-sel, since we can try 4649 * their selection but it may not be bound. 4650 */ 4651 memset(&lsa6_tmp, 0, sizeof(lsa6_tmp)); 4652 lsa6_tmp.sin6_family = AF_INET6; 4653 #ifdef HAVE_SIN6_LEN 4654 lsa6_tmp.sin6_len = sizeof(lsa6_tmp); 4655 #endif 4656 lsa6 = &lsa6_tmp; 4657 if (net && out_of_asoc_ok == 0) { 4658 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED|SCTP_ADDR_IFA_UNUSEABLE))) { 4659 sctp_free_ifa(net->ro._s_addr); 4660 net->ro._s_addr = NULL; 4661 net->src_addr_selected = 0; 4662 #if defined(__FreeBSD__) && !defined(__Userspace__) 4663 RO_NHFREE(ro); 4664 #else 4665 if (ro->ro_rt) { 4666 RTFREE(ro->ro_rt); 4667 ro->ro_rt = NULL; 4668 } 4669 #endif 4670 } 4671 if (net->src_addr_selected == 0) { 4672 #ifdef SCTP_EMBEDDED_V6_SCOPE 4673 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 4674 /* KAME hack: embed scopeid */ 4675 #if defined(__APPLE__) && !defined(__Userspace__) 4676 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) 4677 if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0) 4678 #else 4679 if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0) 4680 #endif 4681 #elif defined(SCTP_KAME) 4682 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) 4683 #else 4684 if (in6_embedscope(&sin6->sin6_addr, sin6) != 0) 4685 #endif 4686 { 4687 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 4688 sctp_m_freem(m); 4689 return (EINVAL); 4690 } 4691 #endif /* SCTP_EMBEDDED_V6_SCOPE */ 4692 /* Cache the source address */ 4693 net->ro._s_addr = sctp_source_address_selection(inp, 4694 stcb, 4695 ro, 4696 net, 4697 0, 4698 vrf_id); 4699 #ifdef SCTP_EMBEDDED_V6_SCOPE 4700 #ifdef SCTP_KAME 4701 (void)sa6_recoverscope(sin6); 4702 #else 4703 (void)in6_recoverscope(sin6, &sin6->sin6_addr, NULL); 4704 #endif /* SCTP_KAME */ 4705 #endif /* SCTP_EMBEDDED_V6_SCOPE */ 4706 net->src_addr_selected = 1; 4707 } 4708 if (net->ro._s_addr == NULL) { 4709 SCTPDBG(SCTP_DEBUG_OUTPUT3, "V6:No route to host\n"); 4710 net->src_addr_selected = 0; 4711 sctp_handle_no_route(stcb, net, so_locked); 4712 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH); 4713 sctp_m_freem(m); 4714 return (EHOSTUNREACH); 4715 } 4716 lsa6->sin6_addr = net->ro._s_addr->address.sin6.sin6_addr; 4717 } else { 4718 #ifdef SCTP_EMBEDDED_V6_SCOPE 4719 sin6 = (struct sockaddr_in6 *)&ro->ro_dst; 4720 /* KAME hack: embed scopeid */ 4721 #if defined(__APPLE__) && !defined(__Userspace__) 4722 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) 4723 if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0) 4724 #else 4725 if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0) 4726 #endif 4727 #elif defined(SCTP_KAME) 4728 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) 4729 #else 4730 if (in6_embedscope(&sin6->sin6_addr, sin6) != 0) 4731 #endif 4732 { 4733 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 4734 sctp_m_freem(m); 4735 return (EINVAL); 4736 } 4737 #endif /* SCTP_EMBEDDED_V6_SCOPE */ 4738 if (over_addr == NULL) { 4739 struct sctp_ifa *_lsrc; 4740 4741 _lsrc = sctp_source_address_selection(inp, stcb, ro, 4742 net, 4743 out_of_asoc_ok, 4744 vrf_id); 4745 if (_lsrc == NULL) { 4746 sctp_handle_no_route(stcb, net, so_locked); 4747 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH); 4748 sctp_m_freem(m); 4749 return (EHOSTUNREACH); 4750 } 4751 lsa6->sin6_addr = _lsrc->address.sin6.sin6_addr; 4752 sctp_free_ifa(_lsrc); 4753 } else { 4754 lsa6->sin6_addr = over_addr->sin6.sin6_addr; 4755 SCTP_RTALLOC(ro, vrf_id, inp->fibnum); 4756 } 4757 #ifdef SCTP_EMBEDDED_V6_SCOPE 4758 #ifdef SCTP_KAME 4759 (void)sa6_recoverscope(sin6); 4760 #else 4761 (void)in6_recoverscope(sin6, &sin6->sin6_addr, NULL); 4762 #endif /* SCTP_KAME */ 4763 #endif /* SCTP_EMBEDDED_V6_SCOPE */ 4764 } 4765 lsa6->sin6_port = inp->sctp_lport; 4766 4767 #if defined(__FreeBSD__) && !defined(__Userspace__) 4768 if (ro->ro_nh == NULL) { 4769 #else 4770 if (ro->ro_rt == NULL) { 4771 #endif 4772 /* 4773 * src addr selection failed to find a route (or 4774 * valid source addr), so we can't get there from 4775 * here! 4776 */ 4777 sctp_handle_no_route(stcb, net, so_locked); 4778 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH); 4779 sctp_m_freem(m); 4780 return (EHOSTUNREACH); 4781 } 4782 #ifndef SCOPEDROUTING 4783 #ifdef SCTP_EMBEDDED_V6_SCOPE 4784 /* 4785 * XXX: sa6 may not have a valid sin6_scope_id in the 4786 * non-SCOPEDROUTING case. 4787 */ 4788 memset(&lsa6_storage, 0, sizeof(lsa6_storage)); 4789 lsa6_storage.sin6_family = AF_INET6; 4790 #ifdef HAVE_SIN6_LEN 4791 lsa6_storage.sin6_len = sizeof(lsa6_storage); 4792 #endif 4793 #ifdef SCTP_KAME 4794 lsa6_storage.sin6_addr = lsa6->sin6_addr; 4795 if ((error = sa6_recoverscope(&lsa6_storage)) != 0) { 4796 #else 4797 if ((error = in6_recoverscope(&lsa6_storage, &lsa6->sin6_addr, 4798 NULL)) != 0) { 4799 #endif /* SCTP_KAME */ 4800 SCTPDBG(SCTP_DEBUG_OUTPUT3, "recover scope fails error %d\n", error); 4801 sctp_m_freem(m); 4802 return (error); 4803 } 4804 /* XXX */ 4805 lsa6_storage.sin6_addr = lsa6->sin6_addr; 4806 lsa6_storage.sin6_port = inp->sctp_lport; 4807 lsa6 = &lsa6_storage; 4808 #endif /* SCTP_EMBEDDED_V6_SCOPE */ 4809 #endif /* SCOPEDROUTING */ 4810 ip6h->ip6_src = lsa6->sin6_addr; 4811 4812 if (port) { 4813 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) { 4814 sctp_handle_no_route(stcb, net, so_locked); 4815 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH); 4816 sctp_m_freem(m); 4817 return (EHOSTUNREACH); 4818 } 4819 udp = (struct udphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr)); 4820 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)); 4821 udp->uh_dport = port; 4822 udp->uh_ulen = htons((uint16_t)(packet_length - sizeof(struct ip6_hdr))); 4823 udp->uh_sum = 0; 4824 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr)); 4825 } else { 4826 sctphdr = (struct sctphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr)); 4827 } 4828 4829 sctphdr->src_port = src_port; 4830 sctphdr->dest_port = dest_port; 4831 sctphdr->v_tag = v_tag; 4832 sctphdr->checksum = 0; 4833 4834 /* 4835 * We set the hop limit now since there is a good chance 4836 * that our ro pointer is now filled 4837 */ 4838 ip6h->ip6_hlim = SCTP_GET_HLIM(inp, ro); 4839 #if !defined(__Userspace__) 4840 ifp = SCTP_GET_IFN_VOID_FROM_ROUTE(ro); 4841 #endif 4842 4843 #ifdef SCTP_DEBUG 4844 /* Copy to be sure something bad is not happening */ 4845 sin6->sin6_addr = ip6h->ip6_dst; 4846 lsa6->sin6_addr = ip6h->ip6_src; 4847 #endif 4848 4849 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv6 output routine from low level\n"); 4850 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src: "); 4851 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)lsa6); 4852 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst: "); 4853 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)sin6); 4854 if (net) { 4855 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 4856 /* preserve the port and scope for link local send */ 4857 prev_scope = sin6->sin6_scope_id; 4858 prev_port = sin6->sin6_port; 4859 } 4860 4861 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) { 4862 /* failed to prepend data, give up */ 4863 sctp_m_freem(m); 4864 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 4865 return (ENOMEM); 4866 } 4867 SCTP_ATTACH_CHAIN(o_pak, m, packet_length); 4868 if (port) { 4869 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr) + sizeof(struct udphdr)); 4870 SCTP_STAT_INCR(sctps_sendswcrc); 4871 #if !defined(__Userspace__) 4872 #if defined(_WIN32) 4873 udp->uh_sum = 0; 4874 #else 4875 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), packet_length - sizeof(struct ip6_hdr))) == 0) { 4876 udp->uh_sum = 0xffff; 4877 } 4878 #endif 4879 #endif 4880 } else { 4881 #if defined(__FreeBSD__) && !defined(__Userspace__) 4882 m->m_pkthdr.csum_flags = CSUM_SCTP_IPV6; 4883 m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum); 4884 SCTP_STAT_INCR(sctps_sendhwcrc); 4885 #else 4886 if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) && 4887 (stcb) && (stcb->asoc.scope.loopback_scope))) { 4888 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr)); 4889 SCTP_STAT_INCR(sctps_sendswcrc); 4890 } else { 4891 SCTP_STAT_INCR(sctps_sendhwcrc); 4892 } 4893 #endif 4894 } 4895 /* send it out. table id is taken from stcb */ 4896 #if defined(__APPLE__) && !defined(__Userspace__) 4897 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) { 4898 so = SCTP_INP_SO(inp); 4899 SCTP_SOCKET_UNLOCK(so, 0); 4900 } 4901 #endif 4902 #ifdef SCTP_PACKET_LOGGING 4903 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) 4904 sctp_packet_log(o_pak); 4905 #endif 4906 #if !defined(__Userspace__) 4907 #if defined(__FreeBSD__) 4908 SCTP_PROBE5(send, NULL, stcb, ip6h, stcb, sctphdr); 4909 #endif 4910 SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, &ifp, inp, vrf_id); 4911 #else 4912 SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, NULL, inp, vrf_id); 4913 #endif 4914 #if defined(__APPLE__) && !defined(__Userspace__) 4915 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) { 4916 atomic_add_int(&stcb->asoc.refcnt, 1); 4917 SCTP_TCB_UNLOCK(stcb); 4918 SCTP_SOCKET_LOCK(so, 0); 4919 SCTP_TCB_LOCK(stcb); 4920 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4921 } 4922 #endif 4923 if (net) { 4924 /* for link local this must be done */ 4925 sin6->sin6_scope_id = prev_scope; 4926 sin6->sin6_port = prev_port; 4927 } 4928 SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret); 4929 #if defined(__FreeBSD__) && !defined(__Userspace__) 4930 if (port) { 4931 UDPSTAT_INC(udps_opackets); 4932 } 4933 #endif 4934 SCTP_STAT_INCR(sctps_sendpackets); 4935 SCTP_STAT_INCR_COUNTER64(sctps_outpackets); 4936 if (ret) { 4937 SCTP_STAT_INCR(sctps_senderrors); 4938 } 4939 if (net == NULL) { 4940 /* Now if we had a temp route free it */ 4941 #if defined(__FreeBSD__) && !defined(__Userspace__) 4942 RO_NHFREE(ro); 4943 #else 4944 if (ro->ro_rt) { 4945 RTFREE(ro->ro_rt); 4946 ro->ro_rt = NULL; 4947 } 4948 #endif 4949 } else { 4950 /* PMTU check versus smallest asoc MTU goes here */ 4951 #if defined(__FreeBSD__) && !defined(__Userspace__) 4952 if (ro->ro_nh == NULL) { 4953 #else 4954 if (ro->ro_rt == NULL) { 4955 #endif 4956 /* Route was freed */ 4957 if (net->ro._s_addr && 4958 net->src_addr_selected) { 4959 sctp_free_ifa(net->ro._s_addr); 4960 net->ro._s_addr = NULL; 4961 } 4962 net->src_addr_selected = 0; 4963 } 4964 #if defined(__FreeBSD__) && !defined(__Userspace__) 4965 if ((ro->ro_nh != NULL) && (net->ro._s_addr) && 4966 #else 4967 if ((ro->ro_rt != NULL) && (net->ro._s_addr) && 4968 #endif 4969 ((net->dest_state & SCTP_ADDR_NO_PMTUD) == 0)) { 4970 uint32_t mtu; 4971 4972 #if defined(__FreeBSD__) && !defined(__Userspace__) 4973 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_nh); 4974 #else 4975 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt); 4976 #endif 4977 if (mtu > 0) { 4978 if (net->port) { 4979 mtu -= sizeof(struct udphdr); 4980 } 4981 if (mtu < net->mtu) { 4982 net->mtu = mtu; 4983 if ((stcb != NULL) && (stcb->asoc.smallest_mtu > mtu)) { 4984 sctp_pathmtu_adjustment(stcb, mtu, false); 4985 } 4986 } 4987 } 4988 } 4989 #if !defined(__Userspace__) 4990 else if (ifp != NULL) { 4991 #if defined(_WIN32) 4992 #define ND_IFINFO(ifp) (ifp) 4993 #define linkmtu if_mtu 4994 #endif 4995 if ((ND_IFINFO(ifp)->linkmtu > 0) && 4996 (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) { 4997 sctp_pathmtu_adjustment(stcb, ND_IFINFO(ifp)->linkmtu, false); 4998 } 4999 } 5000 #endif 5001 } 5002 return (ret); 5003 } 5004 #endif 5005 #if defined(__Userspace__) 5006 case AF_CONN: 5007 { 5008 char *buffer; 5009 struct sockaddr_conn *sconn; 5010 int len; 5011 5012 sconn = (struct sockaddr_conn *)to; 5013 len = sizeof(struct sctphdr); 5014 newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA); 5015 if (newm == NULL) { 5016 sctp_m_freem(m); 5017 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 5018 return (ENOMEM); 5019 } 5020 SCTP_ALIGN_TO_END(newm, len); 5021 SCTP_BUF_LEN(newm) = len; 5022 SCTP_BUF_NEXT(newm) = m; 5023 m = newm; 5024 packet_length = sctp_calculate_len(m); 5025 m->m_pkthdr.len = packet_length; 5026 sctphdr = mtod(m, struct sctphdr *); 5027 sctphdr->src_port = src_port; 5028 sctphdr->dest_port = dest_port; 5029 sctphdr->v_tag = v_tag; 5030 sctphdr->checksum = 0; 5031 if (use_zero_crc) { 5032 SCTP_STAT_INCR(sctps_sendzerocrc); 5033 } else if (SCTP_BASE_VAR(crc32c_offloaded) == 0) { 5034 sctphdr->checksum = sctp_calculate_cksum(m, 0); 5035 SCTP_STAT_INCR(sctps_sendswcrc); 5036 } else { 5037 SCTP_STAT_INCR(sctps_sendhwcrc); 5038 } 5039 if (tos_value == 0) { 5040 tos_value = inp->ip_inp.inp.inp_ip_tos; 5041 } 5042 tos_value &= 0xfc; 5043 if (ecn_ok) { 5044 tos_value |= sctp_get_ect(stcb); 5045 } 5046 /* Don't alloc/free for each packet */ 5047 if ((buffer = malloc(packet_length)) != NULL) { 5048 m_copydata(m, 0, packet_length, buffer); 5049 ret = SCTP_BASE_VAR(conn_output)(sconn->sconn_addr, buffer, packet_length, tos_value, nofragment_flag); 5050 free(buffer); 5051 } else { 5052 ret = ENOMEM; 5053 } 5054 sctp_m_freem(m); 5055 return (ret); 5056 } 5057 #endif 5058 default: 5059 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n", 5060 ((struct sockaddr *)to)->sa_family); 5061 sctp_m_freem(m); 5062 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT); 5063 return (EFAULT); 5064 } 5065 } 5066 5067 void 5068 sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked) 5069 { 5070 struct mbuf *m, *m_last; 5071 struct sctp_nets *net; 5072 struct sctp_init_chunk *init; 5073 struct sctp_supported_addr_param *sup_addr; 5074 struct sctp_adaptation_layer_indication *ali; 5075 struct sctp_zero_checksum_acceptable *zero_chksum; 5076 struct sctp_supported_chunk_types_param *pr_supported; 5077 struct sctp_paramhdr *ph; 5078 int cnt_inits_to = 0; 5079 int error; 5080 uint16_t num_ext, chunk_len, padding_len, parameter_len; 5081 5082 #if defined(__APPLE__) && !defined(__Userspace__) 5083 if (so_locked) { 5084 sctp_lock_assert(SCTP_INP_SO(inp)); 5085 } else { 5086 sctp_unlock_assert(SCTP_INP_SO(inp)); 5087 } 5088 #endif 5089 /* INIT's always go to the primary (and usually ONLY address) */ 5090 net = stcb->asoc.primary_destination; 5091 if (net == NULL) { 5092 net = TAILQ_FIRST(&stcb->asoc.nets); 5093 if (net == NULL) { 5094 /* TSNH */ 5095 return; 5096 } 5097 /* we confirm any address we send an INIT to */ 5098 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 5099 (void)sctp_set_primary_addr(stcb, NULL, net); 5100 } else { 5101 /* we confirm any address we send an INIT to */ 5102 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 5103 } 5104 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT\n"); 5105 #ifdef INET6 5106 if (net->ro._l_addr.sa.sa_family == AF_INET6) { 5107 /* 5108 * special hook, if we are sending to link local it will not 5109 * show up in our private address count. 5110 */ 5111 if (IN6_IS_ADDR_LINKLOCAL(&net->ro._l_addr.sin6.sin6_addr)) 5112 cnt_inits_to = 1; 5113 } 5114 #endif 5115 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5116 /* This case should not happen */ 5117 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - failed timer?\n"); 5118 return; 5119 } 5120 /* start the INIT timer */ 5121 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net); 5122 5123 m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_NOWAIT, 1, MT_DATA); 5124 if (m == NULL) { 5125 /* No memory, INIT timer will re-attempt. */ 5126 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - mbuf?\n"); 5127 return; 5128 } 5129 chunk_len = (uint16_t)sizeof(struct sctp_init_chunk); 5130 padding_len = 0; 5131 /* Now lets put the chunk header in place */ 5132 init = mtod(m, struct sctp_init_chunk *); 5133 /* now the chunk header */ 5134 init->ch.chunk_type = SCTP_INITIATION; 5135 init->ch.chunk_flags = 0; 5136 /* fill in later from mbuf we build */ 5137 init->ch.chunk_length = 0; 5138 /* place in my tag */ 5139 init->init.initiate_tag = htonl(stcb->asoc.my_vtag); 5140 /* set up some of the credits. */ 5141 init->init.a_rwnd = htonl(max(inp->sctp_socket?SCTP_SB_LIMIT_RCV(inp->sctp_socket):0, 5142 SCTP_MINIMAL_RWND)); 5143 init->init.num_outbound_streams = htons(stcb->asoc.pre_open_streams); 5144 init->init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams); 5145 init->init.initial_tsn = htonl(stcb->asoc.init_seq_number); 5146 5147 /* Adaptation layer indication parameter */ 5148 if (inp->sctp_ep.adaptation_layer_indicator_provided) { 5149 parameter_len = (uint16_t)sizeof(struct sctp_adaptation_layer_indication); 5150 ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t) + chunk_len); 5151 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION); 5152 ali->ph.param_length = htons(parameter_len); 5153 ali->indication = htonl(inp->sctp_ep.adaptation_layer_indicator); 5154 chunk_len += parameter_len; 5155 } 5156 5157 /* ECN parameter */ 5158 if (stcb->asoc.ecn_supported == 1) { 5159 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr); 5160 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len); 5161 ph->param_type = htons(SCTP_ECN_CAPABLE); 5162 ph->param_length = htons(parameter_len); 5163 chunk_len += parameter_len; 5164 } 5165 5166 /* PR-SCTP supported parameter */ 5167 if (stcb->asoc.prsctp_supported == 1) { 5168 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr); 5169 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len); 5170 ph->param_type = htons(SCTP_PRSCTP_SUPPORTED); 5171 ph->param_length = htons(parameter_len); 5172 chunk_len += parameter_len; 5173 } 5174 5175 /* Zero checksum acceptable parameter */ 5176 if (stcb->asoc.rcv_edmid != SCTP_EDMID_NONE) { 5177 parameter_len = (uint16_t)sizeof(struct sctp_zero_checksum_acceptable); 5178 zero_chksum = (struct sctp_zero_checksum_acceptable *)(mtod(m, caddr_t) + chunk_len); 5179 zero_chksum->ph.param_type = htons(SCTP_ZERO_CHECKSUM_ACCEPTABLE); 5180 zero_chksum->ph.param_length = htons(parameter_len); 5181 zero_chksum->edmid = htonl(stcb->asoc.rcv_edmid); 5182 chunk_len += parameter_len; 5183 } 5184 5185 /* Add NAT friendly parameter. */ 5186 if (SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly)) { 5187 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr); 5188 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len); 5189 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT); 5190 ph->param_length = htons(parameter_len); 5191 chunk_len += parameter_len; 5192 } 5193 5194 /* And now tell the peer which extensions we support */ 5195 num_ext = 0; 5196 pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t) + chunk_len); 5197 if (stcb->asoc.prsctp_supported == 1) { 5198 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN; 5199 if (stcb->asoc.idata_supported) { 5200 pr_supported->chunk_types[num_ext++] = SCTP_IFORWARD_CUM_TSN; 5201 } 5202 } 5203 if (stcb->asoc.auth_supported == 1) { 5204 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION; 5205 } 5206 if (stcb->asoc.asconf_supported == 1) { 5207 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF; 5208 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK; 5209 } 5210 if (stcb->asoc.reconfig_supported == 1) { 5211 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET; 5212 } 5213 if (stcb->asoc.idata_supported) { 5214 pr_supported->chunk_types[num_ext++] = SCTP_IDATA; 5215 } 5216 if (stcb->asoc.nrsack_supported == 1) { 5217 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK; 5218 } 5219 if (stcb->asoc.pktdrop_supported == 1) { 5220 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED; 5221 } 5222 if (num_ext > 0) { 5223 parameter_len = (uint16_t)sizeof(struct sctp_supported_chunk_types_param) + num_ext; 5224 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT); 5225 pr_supported->ph.param_length = htons(parameter_len); 5226 padding_len = SCTP_SIZE32(parameter_len) - parameter_len; 5227 chunk_len += parameter_len; 5228 } 5229 /* add authentication parameters */ 5230 if (stcb->asoc.auth_supported) { 5231 /* attach RANDOM parameter, if available */ 5232 if (stcb->asoc.authinfo.random != NULL) { 5233 struct sctp_auth_random *randp; 5234 5235 if (padding_len > 0) { 5236 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len); 5237 chunk_len += padding_len; 5238 padding_len = 0; 5239 } 5240 randp = (struct sctp_auth_random *)(mtod(m, caddr_t) + chunk_len); 5241 parameter_len = (uint16_t)sizeof(struct sctp_auth_random) + stcb->asoc.authinfo.random_len; 5242 /* random key already contains the header */ 5243 memcpy(randp, stcb->asoc.authinfo.random->key, parameter_len); 5244 padding_len = SCTP_SIZE32(parameter_len) - parameter_len; 5245 chunk_len += parameter_len; 5246 } 5247 /* add HMAC_ALGO parameter */ 5248 if (stcb->asoc.local_hmacs != NULL) { 5249 struct sctp_auth_hmac_algo *hmacs; 5250 5251 if (padding_len > 0) { 5252 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len); 5253 chunk_len += padding_len; 5254 padding_len = 0; 5255 } 5256 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t) + chunk_len); 5257 parameter_len = (uint16_t)(sizeof(struct sctp_auth_hmac_algo) + 5258 stcb->asoc.local_hmacs->num_algo * sizeof(uint16_t)); 5259 hmacs->ph.param_type = htons(SCTP_HMAC_LIST); 5260 hmacs->ph.param_length = htons(parameter_len); 5261 sctp_serialize_hmaclist(stcb->asoc.local_hmacs, (uint8_t *)hmacs->hmac_ids); 5262 padding_len = SCTP_SIZE32(parameter_len) - parameter_len; 5263 chunk_len += parameter_len; 5264 } 5265 /* add CHUNKS parameter */ 5266 if (stcb->asoc.local_auth_chunks != NULL) { 5267 struct sctp_auth_chunk_list *chunks; 5268 5269 if (padding_len > 0) { 5270 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len); 5271 chunk_len += padding_len; 5272 padding_len = 0; 5273 } 5274 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t) + chunk_len); 5275 parameter_len = (uint16_t)(sizeof(struct sctp_auth_chunk_list) + 5276 sctp_auth_get_chklist_size(stcb->asoc.local_auth_chunks)); 5277 chunks->ph.param_type = htons(SCTP_CHUNK_LIST); 5278 chunks->ph.param_length = htons(parameter_len); 5279 sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks, chunks->chunk_types); 5280 padding_len = SCTP_SIZE32(parameter_len) - parameter_len; 5281 chunk_len += parameter_len; 5282 } 5283 } 5284 5285 /* now any cookie time extensions */ 5286 if (stcb->asoc.cookie_preserve_req > 0) { 5287 struct sctp_cookie_perserve_param *cookie_preserve; 5288 5289 if (padding_len > 0) { 5290 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len); 5291 chunk_len += padding_len; 5292 padding_len = 0; 5293 } 5294 parameter_len = (uint16_t)sizeof(struct sctp_cookie_perserve_param); 5295 cookie_preserve = (struct sctp_cookie_perserve_param *)(mtod(m, caddr_t) + chunk_len); 5296 cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE); 5297 cookie_preserve->ph.param_length = htons(parameter_len); 5298 cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req); 5299 stcb->asoc.cookie_preserve_req = 0; 5300 chunk_len += parameter_len; 5301 } 5302 5303 if (stcb->asoc.scope.ipv4_addr_legal || stcb->asoc.scope.ipv6_addr_legal) { 5304 uint8_t i; 5305 5306 if (padding_len > 0) { 5307 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len); 5308 chunk_len += padding_len; 5309 padding_len = 0; 5310 } 5311 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr); 5312 if (stcb->asoc.scope.ipv4_addr_legal) { 5313 parameter_len += (uint16_t)sizeof(uint16_t); 5314 } 5315 if (stcb->asoc.scope.ipv6_addr_legal) { 5316 parameter_len += (uint16_t)sizeof(uint16_t); 5317 } 5318 sup_addr = (struct sctp_supported_addr_param *)(mtod(m, caddr_t) + chunk_len); 5319 sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE); 5320 sup_addr->ph.param_length = htons(parameter_len); 5321 i = 0; 5322 if (stcb->asoc.scope.ipv4_addr_legal) { 5323 sup_addr->addr_type[i++] = htons(SCTP_IPV4_ADDRESS); 5324 } 5325 if (stcb->asoc.scope.ipv6_addr_legal) { 5326 sup_addr->addr_type[i++] = htons(SCTP_IPV6_ADDRESS); 5327 } 5328 padding_len = 4 - 2 * i; 5329 chunk_len += parameter_len; 5330 } 5331 5332 SCTP_BUF_LEN(m) = chunk_len; 5333 /* now the addresses */ 5334 /* To optimize this we could put the scoping stuff 5335 * into a structure and remove the individual uint8's from 5336 * the assoc structure. Then we could just sifa in the 5337 * address within the stcb. But for now this is a quick 5338 * hack to get the address stuff teased apart. 5339 */ 5340 m_last = sctp_add_addresses_to_i_ia(inp, stcb, &stcb->asoc.scope, 5341 m, cnt_inits_to, 5342 &padding_len, &chunk_len); 5343 5344 init->ch.chunk_length = htons(chunk_len); 5345 if (padding_len > 0) { 5346 if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) { 5347 sctp_m_freem(m); 5348 return; 5349 } 5350 } 5351 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - calls lowlevel_output\n"); 5352 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net, 5353 (struct sockaddr *)&net->ro._l_addr, 5354 m, 0, NULL, 0, 0, 0, 0, 5355 inp->sctp_lport, stcb->rport, htonl(0), 5356 net->port, NULL, 5357 #if defined(__FreeBSD__) && !defined(__Userspace__) 5358 0, 0, 5359 #endif 5360 false, so_locked))) { 5361 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak send error %d\n", error); 5362 if (error == ENOBUFS) { 5363 stcb->asoc.ifp_had_enobuf = 1; 5364 SCTP_STAT_INCR(sctps_lowlevelerr); 5365 } 5366 } else { 5367 stcb->asoc.ifp_had_enobuf = 0; 5368 } 5369 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 5370 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 5371 } 5372 5373 struct mbuf * 5374 sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt, 5375 int param_offset, int *abort_processing, 5376 struct sctp_chunkhdr *cp, 5377 int *nat_friendly, 5378 int *cookie_found, 5379 uint32_t *edmid) 5380 { 5381 /* 5382 * Given a mbuf containing an INIT or INIT-ACK with the param_offset 5383 * being equal to the beginning of the params i.e. (iphlen + 5384 * sizeof(struct sctp_init_msg) parse through the parameters to the 5385 * end of the mbuf verifying that all parameters are known. 5386 * 5387 * For unknown parameters build and return a mbuf with 5388 * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop 5389 * processing this chunk stop, and set *abort_processing to 1. 5390 * 5391 * By having param_offset be pre-set to where parameters begin it is 5392 * hoped that this routine may be reused in the future by new 5393 * features. 5394 */ 5395 struct sctp_zero_checksum_acceptable zero_chksum, *zero_chksum_p; 5396 struct sctp_paramhdr *phdr, params; 5397 struct mbuf *mat, *m_tmp, *op_err, *op_err_last; 5398 int at, limit, pad_needed; 5399 uint16_t ptype, plen, padded_size; 5400 5401 *abort_processing = 0; 5402 if (cookie_found != NULL) { 5403 *cookie_found = 0; 5404 } 5405 if (edmid != NULL) { 5406 *edmid = SCTP_EDMID_NONE; 5407 } 5408 mat = in_initpkt; 5409 limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk); 5410 at = param_offset; 5411 op_err = NULL; 5412 op_err_last = NULL; 5413 pad_needed = 0; 5414 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Check for unrecognized param's\n"); 5415 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params)); 5416 while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) { 5417 ptype = ntohs(phdr->param_type); 5418 plen = ntohs(phdr->param_length); 5419 if ((plen > limit) || (plen < sizeof(struct sctp_paramhdr))) { 5420 /* wacked parameter */ 5421 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error %d\n", plen); 5422 goto invalid_size; 5423 } 5424 limit -= SCTP_SIZE32(plen); 5425 /*- 5426 * All parameters for all chunks that we know/understand are 5427 * listed here. We process them other places and make 5428 * appropriate stop actions per the upper bits. However this 5429 * is the generic routine processor's can call to get back 5430 * an operr.. to either incorporate (init-ack) or send. 5431 */ 5432 padded_size = SCTP_SIZE32(plen); 5433 switch (ptype) { 5434 /* Param's with variable size */ 5435 case SCTP_HEARTBEAT_INFO: 5436 case SCTP_UNRECOG_PARAM: 5437 case SCTP_ERROR_CAUSE_IND: 5438 /* ok skip fwd */ 5439 at += padded_size; 5440 break; 5441 case SCTP_STATE_COOKIE: 5442 if (cookie_found != NULL) { 5443 *cookie_found = 1; 5444 } 5445 at += padded_size; 5446 break; 5447 /* Param's with variable size within a range */ 5448 case SCTP_CHUNK_LIST: 5449 case SCTP_SUPPORTED_CHUNK_EXT: 5450 if (padded_size > (sizeof(struct sctp_supported_chunk_types_param) + (sizeof(uint8_t) * SCTP_MAX_SUPPORTED_EXT))) { 5451 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error chklist %d\n", plen); 5452 goto invalid_size; 5453 } 5454 at += padded_size; 5455 break; 5456 case SCTP_SUPPORTED_ADDRTYPE: 5457 if (padded_size > SCTP_MAX_ADDR_PARAMS_SIZE) { 5458 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error supaddrtype %d\n", plen); 5459 goto invalid_size; 5460 } 5461 at += padded_size; 5462 break; 5463 case SCTP_ZERO_CHECKSUM_ACCEPTABLE: 5464 if (padded_size != sizeof(struct sctp_zero_checksum_acceptable)) { 5465 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error checksum acceptable %d\n", plen); 5466 goto invalid_size; 5467 } 5468 if (edmid != NULL) { 5469 phdr = sctp_get_next_param(mat, at, 5470 (struct sctp_paramhdr *)&zero_chksum, 5471 sizeof(struct sctp_zero_checksum_acceptable)); 5472 if (phdr != NULL) { 5473 zero_chksum_p = (struct sctp_zero_checksum_acceptable *)phdr; 5474 *edmid = ntohl(zero_chksum_p->edmid); 5475 } 5476 } 5477 at += padded_size; 5478 break; 5479 case SCTP_RANDOM: 5480 if (padded_size > (sizeof(struct sctp_auth_random) + SCTP_RANDOM_MAX_SIZE)) { 5481 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error random %d\n", plen); 5482 goto invalid_size; 5483 } 5484 at += padded_size; 5485 break; 5486 case SCTP_SET_PRIM_ADDR: 5487 case SCTP_DEL_IP_ADDRESS: 5488 case SCTP_ADD_IP_ADDRESS: 5489 if ((padded_size != sizeof(struct sctp_asconf_addrv4_param)) && 5490 (padded_size != sizeof(struct sctp_asconf_addr_param))) { 5491 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error setprim %d\n", plen); 5492 goto invalid_size; 5493 } 5494 at += padded_size; 5495 break; 5496 /* Param's with a fixed size */ 5497 case SCTP_IPV4_ADDRESS: 5498 if (padded_size != sizeof(struct sctp_ipv4addr_param)) { 5499 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv4 addr %d\n", plen); 5500 goto invalid_size; 5501 } 5502 at += padded_size; 5503 break; 5504 case SCTP_IPV6_ADDRESS: 5505 if (padded_size != sizeof(struct sctp_ipv6addr_param)) { 5506 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv6 addr %d\n", plen); 5507 goto invalid_size; 5508 } 5509 at += padded_size; 5510 break; 5511 case SCTP_COOKIE_PRESERVE: 5512 if (padded_size != sizeof(struct sctp_cookie_perserve_param)) { 5513 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error cookie-preserve %d\n", plen); 5514 goto invalid_size; 5515 } 5516 at += padded_size; 5517 break; 5518 case SCTP_HAS_NAT_SUPPORT: 5519 if (padded_size != sizeof(struct sctp_paramhdr)) { 5520 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error nat support %d\n", plen); 5521 goto invalid_size; 5522 } 5523 *nat_friendly = 1; 5524 at += padded_size; 5525 break; 5526 case SCTP_PRSCTP_SUPPORTED: 5527 if (padded_size != sizeof(struct sctp_paramhdr)) { 5528 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error prsctp %d\n", plen); 5529 goto invalid_size; 5530 } 5531 at += padded_size; 5532 break; 5533 case SCTP_ECN_CAPABLE: 5534 if (padded_size != sizeof(struct sctp_paramhdr)) { 5535 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecn %d\n", plen); 5536 goto invalid_size; 5537 } 5538 at += padded_size; 5539 break; 5540 case SCTP_ULP_ADAPTATION: 5541 if (padded_size != sizeof(struct sctp_adaptation_layer_indication)) { 5542 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error adapatation %d\n", plen); 5543 goto invalid_size; 5544 } 5545 at += padded_size; 5546 break; 5547 case SCTP_SUCCESS_REPORT: 5548 if (padded_size != sizeof(struct sctp_asconf_paramhdr)) { 5549 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error success %d\n", plen); 5550 goto invalid_size; 5551 } 5552 at += padded_size; 5553 break; 5554 case SCTP_HOSTNAME_ADDRESS: 5555 { 5556 /* Hostname parameters are deprecated. */ 5557 struct sctp_gen_error_cause *cause; 5558 int l_len; 5559 5560 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Can't handle hostname addresses.. abort processing\n"); 5561 *abort_processing = 1; 5562 sctp_m_freem(op_err); 5563 op_err = NULL; 5564 op_err_last = NULL; 5565 #ifdef INET6 5566 l_len = SCTP_MIN_OVERHEAD; 5567 #else 5568 l_len = SCTP_MIN_V4_OVERHEAD; 5569 #endif 5570 l_len += sizeof(struct sctp_chunkhdr); 5571 l_len += sizeof(struct sctp_gen_error_cause); 5572 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA); 5573 if (op_err != NULL) { 5574 /* 5575 * Pre-reserve space for IP, SCTP, and 5576 * chunk header. 5577 */ 5578 #ifdef INET6 5579 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 5580 #else 5581 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip)); 5582 #endif 5583 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 5584 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 5585 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause); 5586 cause = mtod(op_err, struct sctp_gen_error_cause *); 5587 cause->code = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR); 5588 cause->length = htons((uint16_t)(sizeof(struct sctp_gen_error_cause) + plen)); 5589 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(mat, at, plen, M_NOWAIT); 5590 if (SCTP_BUF_NEXT(op_err) == NULL) { 5591 sctp_m_freem(op_err); 5592 op_err = NULL; 5593 op_err_last = NULL; 5594 } 5595 } 5596 return (op_err); 5597 } 5598 default: 5599 /* 5600 * we do not recognize the parameter figure out what 5601 * we do. 5602 */ 5603 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Hit default param %x\n", ptype); 5604 if ((ptype & 0x4000) == 0x4000) { 5605 /* Report bit is set?? */ 5606 SCTPDBG(SCTP_DEBUG_OUTPUT1, "report op err\n"); 5607 if (op_err == NULL) { 5608 int l_len; 5609 /* Ok need to try to get an mbuf */ 5610 #ifdef INET6 5611 l_len = SCTP_MIN_OVERHEAD; 5612 #else 5613 l_len = SCTP_MIN_V4_OVERHEAD; 5614 #endif 5615 l_len += sizeof(struct sctp_chunkhdr); 5616 l_len += sizeof(struct sctp_paramhdr); 5617 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA); 5618 if (op_err) { 5619 SCTP_BUF_LEN(op_err) = 0; 5620 #ifdef INET6 5621 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 5622 #else 5623 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip)); 5624 #endif 5625 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 5626 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 5627 op_err_last = op_err; 5628 } 5629 } 5630 if (op_err != NULL) { 5631 /* If we have space */ 5632 struct sctp_paramhdr *param; 5633 5634 if (pad_needed > 0) { 5635 op_err_last = sctp_add_pad_tombuf(op_err_last, pad_needed); 5636 } 5637 if (op_err_last == NULL) { 5638 sctp_m_freem(op_err); 5639 op_err = NULL; 5640 op_err_last = NULL; 5641 goto more_processing; 5642 } 5643 if (M_TRAILINGSPACE(op_err_last) < (int)sizeof(struct sctp_paramhdr)) { 5644 m_tmp = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_NOWAIT, 1, MT_DATA); 5645 if (m_tmp == NULL) { 5646 sctp_m_freem(op_err); 5647 op_err = NULL; 5648 op_err_last = NULL; 5649 goto more_processing; 5650 } 5651 SCTP_BUF_LEN(m_tmp) = 0; 5652 SCTP_BUF_NEXT(m_tmp) = NULL; 5653 SCTP_BUF_NEXT(op_err_last) = m_tmp; 5654 op_err_last = m_tmp; 5655 } 5656 param = (struct sctp_paramhdr *)(mtod(op_err_last, caddr_t) + SCTP_BUF_LEN(op_err_last)); 5657 param->param_type = htons(SCTP_UNRECOG_PARAM); 5658 param->param_length = htons((uint16_t)sizeof(struct sctp_paramhdr) + plen); 5659 SCTP_BUF_LEN(op_err_last) += sizeof(struct sctp_paramhdr); 5660 SCTP_BUF_NEXT(op_err_last) = SCTP_M_COPYM(mat, at, plen, M_NOWAIT); 5661 if (SCTP_BUF_NEXT(op_err_last) == NULL) { 5662 sctp_m_freem(op_err); 5663 op_err = NULL; 5664 op_err_last = NULL; 5665 goto more_processing; 5666 } else { 5667 while (SCTP_BUF_NEXT(op_err_last) != NULL) { 5668 op_err_last = SCTP_BUF_NEXT(op_err_last); 5669 } 5670 } 5671 if (plen % 4 != 0) { 5672 pad_needed = 4 - (plen % 4); 5673 } else { 5674 pad_needed = 0; 5675 } 5676 } 5677 } 5678 more_processing: 5679 if ((ptype & 0x8000) == 0x0000) { 5680 SCTPDBG(SCTP_DEBUG_OUTPUT1, "stop proc\n"); 5681 return (op_err); 5682 } else { 5683 /* skip this chunk and continue processing */ 5684 SCTPDBG(SCTP_DEBUG_OUTPUT1, "move on\n"); 5685 at += SCTP_SIZE32(plen); 5686 } 5687 break; 5688 } 5689 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params)); 5690 } 5691 return (op_err); 5692 invalid_size: 5693 SCTPDBG(SCTP_DEBUG_OUTPUT1, "abort flag set\n"); 5694 *abort_processing = 1; 5695 sctp_m_freem(op_err); 5696 op_err = NULL; 5697 op_err_last = NULL; 5698 if (phdr != NULL) { 5699 struct sctp_paramhdr *param; 5700 int l_len; 5701 #ifdef INET6 5702 l_len = SCTP_MIN_OVERHEAD; 5703 #else 5704 l_len = SCTP_MIN_V4_OVERHEAD; 5705 #endif 5706 l_len += sizeof(struct sctp_chunkhdr); 5707 l_len += (2 * sizeof(struct sctp_paramhdr)); 5708 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA); 5709 if (op_err) { 5710 SCTP_BUF_LEN(op_err) = 0; 5711 #ifdef INET6 5712 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 5713 #else 5714 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip)); 5715 #endif 5716 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 5717 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 5718 SCTP_BUF_LEN(op_err) = 2 * sizeof(struct sctp_paramhdr); 5719 param = mtod(op_err, struct sctp_paramhdr *); 5720 param->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 5721 param->param_length = htons(2 * sizeof(struct sctp_paramhdr)); 5722 param++; 5723 param->param_type = htons(ptype); 5724 param->param_length = htons(plen); 5725 } 5726 } 5727 return (op_err); 5728 } 5729 5730 /* 5731 * Given a INIT chunk, look through the parameters to verify that there 5732 * are no new addresses. 5733 * Return true, if there is a new address or there is a problem parsing 5734 the parameters. Provide an optional error cause used when sending an ABORT. 5735 * Return false, if there are no new addresses and there is no problem in 5736 parameter processing. 5737 */ 5738 static bool 5739 sctp_are_there_new_addresses(struct sctp_association *asoc, 5740 struct mbuf *in_initpkt, int offset, int limit, struct sockaddr *src, 5741 struct mbuf **op_err) 5742 { 5743 struct sockaddr *sa_touse; 5744 struct sockaddr *sa; 5745 struct sctp_paramhdr *phdr, params; 5746 struct sctp_nets *net; 5747 #ifdef INET 5748 struct sockaddr_in sin4, *sa4; 5749 #endif 5750 #ifdef INET6 5751 struct sockaddr_in6 sin6, *sa6; 5752 #endif 5753 #if defined(__Userspace__) 5754 struct sockaddr_conn *sac; 5755 #endif 5756 uint16_t ptype, plen; 5757 bool fnd, check_src; 5758 5759 *op_err = NULL; 5760 #ifdef INET 5761 memset(&sin4, 0, sizeof(sin4)); 5762 sin4.sin_family = AF_INET; 5763 #ifdef HAVE_SIN_LEN 5764 sin4.sin_len = sizeof(sin4); 5765 #endif 5766 #endif 5767 #ifdef INET6 5768 memset(&sin6, 0, sizeof(sin6)); 5769 sin6.sin6_family = AF_INET6; 5770 #ifdef HAVE_SIN6_LEN 5771 sin6.sin6_len = sizeof(sin6); 5772 #endif 5773 #endif 5774 /* First what about the src address of the pkt ? */ 5775 check_src = false; 5776 switch (src->sa_family) { 5777 #ifdef INET 5778 case AF_INET: 5779 if (asoc->scope.ipv4_addr_legal) { 5780 check_src = true; 5781 } 5782 break; 5783 #endif 5784 #ifdef INET6 5785 case AF_INET6: 5786 if (asoc->scope.ipv6_addr_legal) { 5787 check_src = true; 5788 } 5789 break; 5790 #endif 5791 #if defined(__Userspace__) 5792 case AF_CONN: 5793 if (asoc->scope.conn_addr_legal) { 5794 check_src = true; 5795 } 5796 break; 5797 #endif 5798 default: 5799 /* TSNH */ 5800 break; 5801 } 5802 if (check_src) { 5803 fnd = false; 5804 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5805 sa = (struct sockaddr *)&net->ro._l_addr; 5806 if (sa->sa_family == src->sa_family) { 5807 #ifdef INET 5808 if (sa->sa_family == AF_INET) { 5809 struct sockaddr_in *src4; 5810 5811 sa4 = (struct sockaddr_in *)sa; 5812 src4 = (struct sockaddr_in *)src; 5813 if (sa4->sin_addr.s_addr == src4->sin_addr.s_addr) { 5814 fnd = true; 5815 break; 5816 } 5817 } 5818 #endif 5819 #ifdef INET6 5820 if (sa->sa_family == AF_INET6) { 5821 struct sockaddr_in6 *src6; 5822 5823 sa6 = (struct sockaddr_in6 *)sa; 5824 src6 = (struct sockaddr_in6 *)src; 5825 if (SCTP6_ARE_ADDR_EQUAL(sa6, src6)) { 5826 fnd = true; 5827 break; 5828 } 5829 } 5830 #endif 5831 #if defined(__Userspace__) 5832 if (sa->sa_family == AF_CONN) { 5833 struct sockaddr_conn *srcc; 5834 5835 sac = (struct sockaddr_conn *)sa; 5836 srcc = (struct sockaddr_conn *)src; 5837 if (sac->sconn_addr == srcc->sconn_addr) { 5838 fnd = true; 5839 break; 5840 } 5841 } 5842 #endif 5843 } 5844 } 5845 if (!fnd) { 5846 /* 5847 * If sending an ABORT in case of an additional address, 5848 * don't use the new address error cause. 5849 * This looks no different than if no listener was 5850 * present. 5851 */ 5852 *op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), "Address added"); 5853 return (true); 5854 } 5855 } 5856 /* Ok so far lets munge through the rest of the packet */ 5857 offset += sizeof(struct sctp_init_chunk); 5858 phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params)); 5859 while (phdr) { 5860 sa_touse = NULL; 5861 ptype = ntohs(phdr->param_type); 5862 plen = ntohs(phdr->param_length); 5863 if (offset + plen > limit) { 5864 *op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, "Partial parameter"); 5865 return (true); 5866 } 5867 if (plen < sizeof(struct sctp_paramhdr)) { 5868 *op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, "Parameter length too small"); 5869 return (true); 5870 } 5871 switch (ptype) { 5872 #ifdef INET 5873 case SCTP_IPV4_ADDRESS: 5874 { 5875 struct sctp_ipv4addr_param *p4, p4_buf; 5876 5877 if (plen != sizeof(struct sctp_ipv4addr_param)) { 5878 *op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, "Parameter length illegal"); 5879 return (true); 5880 } 5881 phdr = sctp_get_next_param(in_initpkt, offset, 5882 (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf)); 5883 if (phdr == NULL) { 5884 *op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, ""); 5885 return (true); 5886 } 5887 if (asoc->scope.ipv4_addr_legal) { 5888 p4 = (struct sctp_ipv4addr_param *)phdr; 5889 sin4.sin_addr.s_addr = p4->addr; 5890 sa_touse = (struct sockaddr *)&sin4; 5891 } 5892 break; 5893 } 5894 #endif 5895 #ifdef INET6 5896 case SCTP_IPV6_ADDRESS: 5897 { 5898 struct sctp_ipv6addr_param *p6, p6_buf; 5899 5900 if (plen != sizeof(struct sctp_ipv6addr_param)) { 5901 *op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, "Parameter length illegal"); 5902 return (true); 5903 } 5904 phdr = sctp_get_next_param(in_initpkt, offset, 5905 (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf)); 5906 if (phdr == NULL) { 5907 *op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, ""); 5908 return (true); 5909 } 5910 if (asoc->scope.ipv6_addr_legal) { 5911 p6 = (struct sctp_ipv6addr_param *)phdr; 5912 memcpy((caddr_t)&sin6.sin6_addr, p6->addr, 5913 sizeof(p6->addr)); 5914 sa_touse = (struct sockaddr *)&sin6; 5915 } 5916 break; 5917 } 5918 #endif 5919 default: 5920 sa_touse = NULL; 5921 break; 5922 } 5923 if (sa_touse) { 5924 /* ok, sa_touse points to one to check */ 5925 fnd = false; 5926 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5927 sa = (struct sockaddr *)&net->ro._l_addr; 5928 if (sa->sa_family != sa_touse->sa_family) { 5929 continue; 5930 } 5931 #ifdef INET 5932 if (sa->sa_family == AF_INET) { 5933 sa4 = (struct sockaddr_in *)sa; 5934 if (sa4->sin_addr.s_addr == 5935 sin4.sin_addr.s_addr) { 5936 fnd = true; 5937 break; 5938 } 5939 } 5940 #endif 5941 #ifdef INET6 5942 if (sa->sa_family == AF_INET6) { 5943 sa6 = (struct sockaddr_in6 *)sa; 5944 if (SCTP6_ARE_ADDR_EQUAL( 5945 sa6, &sin6)) { 5946 fnd = true; 5947 break; 5948 } 5949 } 5950 #endif 5951 } 5952 if (!fnd) { 5953 /* 5954 * If sending an ABORT in case of an additional 5955 * address, don't use the new address error 5956 * cause. 5957 * This looks no different than if no listener 5958 * was present. 5959 */ 5960 *op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), "Address added"); 5961 return (true); 5962 } 5963 } 5964 offset += SCTP_SIZE32(plen); 5965 if (offset >= limit) { 5966 break; 5967 } 5968 phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params)); 5969 } 5970 return (false); 5971 } 5972 5973 /* 5974 * Given a MBUF chain that was sent into us containing an INIT. Build a 5975 * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done 5976 * a pullup to include IPv6/4header, SCTP header and initial part of INIT 5977 * message (i.e. the struct sctp_init_msg). 5978 */ 5979 void 5980 sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 5981 struct sctp_nets *src_net, struct mbuf *init_pkt, 5982 int iphlen, int offset, 5983 struct sockaddr *src, struct sockaddr *dst, 5984 struct sctphdr *sh, struct sctp_init_chunk *init_chk, 5985 #if defined(__FreeBSD__) && !defined(__Userspace__) 5986 uint8_t mflowtype, uint32_t mflowid, 5987 #endif 5988 uint32_t vrf_id, uint16_t port) 5989 { 5990 struct sctp_association *asoc; 5991 struct mbuf *m, *m_tmp, *m_last, *m_cookie, *op_err; 5992 struct sctp_init_ack_chunk *initack; 5993 struct sctp_adaptation_layer_indication *ali; 5994 struct sctp_zero_checksum_acceptable *zero_chksum; 5995 struct sctp_supported_chunk_types_param *pr_supported; 5996 struct sctp_paramhdr *ph; 5997 union sctp_sockstore *over_addr; 5998 struct sctp_scoping scp; 5999 struct timeval now; 6000 #ifdef INET 6001 struct sockaddr_in *dst4 = (struct sockaddr_in *)dst; 6002 struct sockaddr_in *src4 = (struct sockaddr_in *)src; 6003 struct sockaddr_in *sin; 6004 #endif 6005 #ifdef INET6 6006 struct sockaddr_in6 *dst6 = (struct sockaddr_in6 *)dst; 6007 struct sockaddr_in6 *src6 = (struct sockaddr_in6 *)src; 6008 struct sockaddr_in6 *sin6; 6009 #endif 6010 #if defined(__Userspace__) 6011 struct sockaddr_conn *dstconn = (struct sockaddr_conn *)dst; 6012 struct sockaddr_conn *srcconn = (struct sockaddr_conn *)src; 6013 struct sockaddr_conn *sconn; 6014 #endif 6015 struct sockaddr *to; 6016 struct sctp_state_cookie stc; 6017 struct sctp_nets *net = NULL; 6018 uint8_t *signature = NULL; 6019 int cnt_inits_to = 0; 6020 uint16_t his_limit, i_want; 6021 int abort_flag; 6022 int nat_friendly = 0; 6023 int error; 6024 struct socket *so; 6025 uint32_t edmid; 6026 uint16_t num_ext, chunk_len, padding_len, parameter_len; 6027 bool use_zero_crc; 6028 6029 if (stcb) { 6030 asoc = &stcb->asoc; 6031 } else { 6032 asoc = NULL; 6033 } 6034 if ((asoc != NULL) && 6035 (SCTP_GET_STATE(stcb) != SCTP_STATE_COOKIE_WAIT)) { 6036 if (sctp_are_there_new_addresses(asoc, init_pkt, offset, offset + ntohs(init_chk->ch.chunk_length), src, &op_err)) { 6037 /* 6038 * new addresses, out of here in non-cookie-wait states 6039 */ 6040 sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err, 6041 #if defined(__FreeBSD__) && !defined(__Userspace__) 6042 mflowtype, mflowid, inp->fibnum, 6043 #endif 6044 vrf_id, port); 6045 return; 6046 } 6047 if (src_net != NULL && (src_net->port != port)) { 6048 /* 6049 * change of remote encapsulation port, out of here in 6050 * non-cookie-wait states 6051 * 6052 * Send an ABORT, without an specific error cause. 6053 * This looks no different than if no listener 6054 * was present. 6055 */ 6056 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 6057 "Remote encapsulation port changed"); 6058 sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err, 6059 #if defined(__FreeBSD__) && !defined(__Userspace__) 6060 mflowtype, mflowid, inp->fibnum, 6061 #endif 6062 vrf_id, port); 6063 return; 6064 } 6065 } 6066 abort_flag = 0; 6067 op_err = sctp_arethere_unrecognized_parameters(init_pkt, 6068 (offset + sizeof(struct sctp_init_chunk)), 6069 &abort_flag, 6070 (struct sctp_chunkhdr *)init_chk, 6071 &nat_friendly, NULL, &edmid); 6072 if (abort_flag) { 6073 do_a_abort: 6074 if (op_err == NULL) { 6075 char msg[SCTP_DIAG_INFO_LEN]; 6076 6077 SCTP_SNPRINTF(msg, sizeof(msg), "%s:%d at %s", __FILE__, __LINE__, __func__); 6078 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 6079 msg); 6080 } 6081 sctp_send_abort(init_pkt, iphlen, src, dst, sh, 6082 init_chk->init.initiate_tag, op_err, 6083 #if defined(__FreeBSD__) && !defined(__Userspace__) 6084 mflowtype, mflowid, inp->fibnum, 6085 #endif 6086 vrf_id, port); 6087 return; 6088 } 6089 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 6090 if (m == NULL) { 6091 /* No memory, INIT timer will re-attempt. */ 6092 sctp_m_freem(op_err); 6093 return; 6094 } 6095 chunk_len = (uint16_t)sizeof(struct sctp_init_ack_chunk); 6096 padding_len = 0; 6097 6098 /* 6099 * We might not overwrite the identification[] completely and on 6100 * some platforms time_entered will contain some padding. 6101 * Therefore zero out the cookie to avoid putting 6102 * uninitialized memory on the wire. 6103 */ 6104 memset(&stc, 0, sizeof(struct sctp_state_cookie)); 6105 6106 /* the time I built cookie */ 6107 (void)SCTP_GETTIME_TIMEVAL(&now); 6108 stc.time_entered.tv_sec = now.tv_sec; 6109 stc.time_entered.tv_usec = now.tv_usec; 6110 6111 /* populate any tie tags */ 6112 if (asoc != NULL) { 6113 /* unlock before tag selections */ 6114 stc.tie_tag_my_vtag = asoc->my_vtag_nonce; 6115 stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce; 6116 stc.cookie_life = asoc->cookie_life; 6117 net = asoc->primary_destination; 6118 } else { 6119 stc.tie_tag_my_vtag = 0; 6120 stc.tie_tag_peer_vtag = 0; 6121 /* life I will award this cookie */ 6122 stc.cookie_life = inp->sctp_ep.def_cookie_life; 6123 } 6124 6125 /* copy in the ports for later check */ 6126 stc.myport = sh->dest_port; 6127 stc.peerport = sh->src_port; 6128 6129 /* 6130 * If we wanted to honor cookie life extensions, we would add to 6131 * stc.cookie_life. For now we should NOT honor any extension 6132 */ 6133 stc.site_scope = stc.local_scope = stc.loopback_scope = 0; 6134 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 6135 stc.ipv6_addr_legal = 1; 6136 if (SCTP_IPV6_V6ONLY(inp)) { 6137 stc.ipv4_addr_legal = 0; 6138 } else { 6139 stc.ipv4_addr_legal = 1; 6140 } 6141 #if defined(__Userspace__) 6142 stc.conn_addr_legal = 0; 6143 #endif 6144 } else { 6145 stc.ipv6_addr_legal = 0; 6146 #if defined(__Userspace__) 6147 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) { 6148 stc.conn_addr_legal = 1; 6149 stc.ipv4_addr_legal = 0; 6150 } else { 6151 stc.conn_addr_legal = 0; 6152 stc.ipv4_addr_legal = 1; 6153 } 6154 #else 6155 stc.ipv4_addr_legal = 1; 6156 #endif 6157 } 6158 stc.ipv4_scope = 0; 6159 if (net == NULL) { 6160 to = src; 6161 switch (dst->sa_family) { 6162 #ifdef INET 6163 case AF_INET: 6164 { 6165 /* lookup address */ 6166 stc.address[0] = src4->sin_addr.s_addr; 6167 stc.address[1] = 0; 6168 stc.address[2] = 0; 6169 stc.address[3] = 0; 6170 stc.addr_type = SCTP_IPV4_ADDRESS; 6171 /* local from address */ 6172 stc.laddress[0] = dst4->sin_addr.s_addr; 6173 stc.laddress[1] = 0; 6174 stc.laddress[2] = 0; 6175 stc.laddress[3] = 0; 6176 stc.laddr_type = SCTP_IPV4_ADDRESS; 6177 /* scope_id is only for v6 */ 6178 stc.scope_id = 0; 6179 if ((IN4_ISPRIVATE_ADDRESS(&src4->sin_addr)) || 6180 (IN4_ISPRIVATE_ADDRESS(&dst4->sin_addr))) { 6181 stc.ipv4_scope = 1; 6182 } 6183 /* Must use the address in this case */ 6184 if (sctp_is_address_on_local_host(src, vrf_id)) { 6185 stc.loopback_scope = 1; 6186 stc.ipv4_scope = 1; 6187 stc.site_scope = 1; 6188 stc.local_scope = 0; 6189 } 6190 break; 6191 } 6192 #endif 6193 #ifdef INET6 6194 case AF_INET6: 6195 { 6196 stc.addr_type = SCTP_IPV6_ADDRESS; 6197 memcpy(&stc.address, &src6->sin6_addr, sizeof(struct in6_addr)); 6198 #if defined(__FreeBSD__) && !defined(__Userspace__) 6199 stc.scope_id = ntohs(in6_getscope(&src6->sin6_addr)); 6200 #else 6201 stc.scope_id = 0; 6202 #endif 6203 if (sctp_is_address_on_local_host(src, vrf_id)) { 6204 stc.loopback_scope = 1; 6205 stc.local_scope = 0; 6206 stc.site_scope = 1; 6207 stc.ipv4_scope = 1; 6208 } else if (IN6_IS_ADDR_LINKLOCAL(&src6->sin6_addr) || 6209 IN6_IS_ADDR_LINKLOCAL(&dst6->sin6_addr)) { 6210 /* 6211 * If the new destination or source is a 6212 * LINK_LOCAL we must have common both site and 6213 * local scope. Don't set local scope though 6214 * since we must depend on the source to be 6215 * added implicitly. We cannot assure just 6216 * because we share one link that all links are 6217 * common. 6218 */ 6219 #if defined(__APPLE__) && !defined(__Userspace__) 6220 /* Mac OS X currently doesn't have in6_getscope() */ 6221 stc.scope_id = src6->sin6_addr.s6_addr16[1]; 6222 #endif 6223 stc.local_scope = 0; 6224 stc.site_scope = 1; 6225 stc.ipv4_scope = 1; 6226 /* 6227 * we start counting for the private address 6228 * stuff at 1. since the link local we 6229 * source from won't show up in our scoped 6230 * count. 6231 */ 6232 cnt_inits_to = 1; 6233 /* pull out the scope_id from incoming pkt */ 6234 } else if (IN6_IS_ADDR_SITELOCAL(&src6->sin6_addr) || 6235 IN6_IS_ADDR_SITELOCAL(&dst6->sin6_addr)) { 6236 /* 6237 * If the new destination or source is 6238 * SITE_LOCAL then we must have site scope in 6239 * common. 6240 */ 6241 stc.site_scope = 1; 6242 } 6243 memcpy(&stc.laddress, &dst6->sin6_addr, sizeof(struct in6_addr)); 6244 stc.laddr_type = SCTP_IPV6_ADDRESS; 6245 break; 6246 } 6247 #endif 6248 #if defined(__Userspace__) 6249 case AF_CONN: 6250 { 6251 /* lookup address */ 6252 stc.address[0] = 0; 6253 stc.address[1] = 0; 6254 stc.address[2] = 0; 6255 stc.address[3] = 0; 6256 memcpy(&stc.address, &srcconn->sconn_addr, sizeof(void *)); 6257 stc.addr_type = SCTP_CONN_ADDRESS; 6258 /* local from address */ 6259 stc.laddress[0] = 0; 6260 stc.laddress[1] = 0; 6261 stc.laddress[2] = 0; 6262 stc.laddress[3] = 0; 6263 memcpy(&stc.laddress, &dstconn->sconn_addr, sizeof(void *)); 6264 stc.laddr_type = SCTP_CONN_ADDRESS; 6265 /* scope_id is only for v6 */ 6266 stc.scope_id = 0; 6267 break; 6268 } 6269 #endif 6270 default: 6271 /* TSNH */ 6272 goto do_a_abort; 6273 break; 6274 } 6275 } else { 6276 /* set the scope per the existing tcb */ 6277 6278 #ifdef INET6 6279 struct sctp_nets *lnet; 6280 #endif 6281 6282 stc.loopback_scope = asoc->scope.loopback_scope; 6283 stc.ipv4_scope = asoc->scope.ipv4_local_scope; 6284 stc.site_scope = asoc->scope.site_scope; 6285 stc.local_scope = asoc->scope.local_scope; 6286 #ifdef INET6 6287 /* Why do we not consider IPv4 LL addresses? */ 6288 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) { 6289 if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) { 6290 if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) { 6291 /* 6292 * if we have a LL address, start 6293 * counting at 1. 6294 */ 6295 cnt_inits_to = 1; 6296 } 6297 } 6298 } 6299 #endif 6300 /* use the net pointer */ 6301 to = (struct sockaddr *)&net->ro._l_addr; 6302 switch (to->sa_family) { 6303 #ifdef INET 6304 case AF_INET: 6305 sin = (struct sockaddr_in *)to; 6306 stc.address[0] = sin->sin_addr.s_addr; 6307 stc.address[1] = 0; 6308 stc.address[2] = 0; 6309 stc.address[3] = 0; 6310 stc.addr_type = SCTP_IPV4_ADDRESS; 6311 if (net->src_addr_selected == 0) { 6312 /* 6313 * strange case here, the INIT should have 6314 * did the selection. 6315 */ 6316 net->ro._s_addr = sctp_source_address_selection(inp, 6317 stcb, (sctp_route_t *)&net->ro, 6318 net, 0, vrf_id); 6319 if (net->ro._s_addr == NULL) { 6320 sctp_m_freem(op_err); 6321 sctp_m_freem(m); 6322 return; 6323 } 6324 6325 net->src_addr_selected = 1; 6326 } 6327 stc.laddress[0] = net->ro._s_addr->address.sin.sin_addr.s_addr; 6328 stc.laddress[1] = 0; 6329 stc.laddress[2] = 0; 6330 stc.laddress[3] = 0; 6331 stc.laddr_type = SCTP_IPV4_ADDRESS; 6332 /* scope_id is only for v6 */ 6333 stc.scope_id = 0; 6334 break; 6335 #endif 6336 #ifdef INET6 6337 case AF_INET6: 6338 sin6 = (struct sockaddr_in6 *)to; 6339 memcpy(&stc.address, &sin6->sin6_addr, 6340 sizeof(struct in6_addr)); 6341 stc.addr_type = SCTP_IPV6_ADDRESS; 6342 stc.scope_id = sin6->sin6_scope_id; 6343 if (net->src_addr_selected == 0) { 6344 /* 6345 * strange case here, the INIT should have 6346 * done the selection. 6347 */ 6348 net->ro._s_addr = sctp_source_address_selection(inp, 6349 stcb, (sctp_route_t *)&net->ro, 6350 net, 0, vrf_id); 6351 if (net->ro._s_addr == NULL) { 6352 sctp_m_freem(op_err); 6353 sctp_m_freem(m); 6354 return; 6355 } 6356 6357 net->src_addr_selected = 1; 6358 } 6359 memcpy(&stc.laddress, &net->ro._s_addr->address.sin6.sin6_addr, 6360 sizeof(struct in6_addr)); 6361 stc.laddr_type = SCTP_IPV6_ADDRESS; 6362 break; 6363 #endif 6364 #if defined(__Userspace__) 6365 case AF_CONN: 6366 sconn = (struct sockaddr_conn *)to; 6367 stc.address[0] = 0; 6368 stc.address[1] = 0; 6369 stc.address[2] = 0; 6370 stc.address[3] = 0; 6371 memcpy(&stc.address, &sconn->sconn_addr, sizeof(void *)); 6372 stc.addr_type = SCTP_CONN_ADDRESS; 6373 stc.laddress[0] = 0; 6374 stc.laddress[1] = 0; 6375 stc.laddress[2] = 0; 6376 stc.laddress[3] = 0; 6377 memcpy(&stc.laddress, &sconn->sconn_addr, sizeof(void *)); 6378 stc.laddr_type = SCTP_CONN_ADDRESS; 6379 stc.scope_id = 0; 6380 break; 6381 #endif 6382 } 6383 } 6384 if (asoc != NULL) { 6385 stc.rcv_edmid = asoc->rcv_edmid; 6386 } else { 6387 stc.rcv_edmid = inp->rcv_edmid; 6388 } 6389 /* Now lets put the SCTP header in place */ 6390 initack = mtod(m, struct sctp_init_ack_chunk *); 6391 /* Save it off for quick ref */ 6392 stc.peers_vtag = ntohl(init_chk->init.initiate_tag); 6393 /* who are we */ 6394 memcpy(stc.identification, SCTP_VERSION_STRING, 6395 min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification))); 6396 memset(stc.reserved, 0, SCTP_RESERVE_SPACE); 6397 /* now the chunk header */ 6398 initack->ch.chunk_type = SCTP_INITIATION_ACK; 6399 initack->ch.chunk_flags = 0; 6400 /* fill in later from mbuf we build */ 6401 initack->ch.chunk_length = 0; 6402 /* place in my tag */ 6403 if ((asoc != NULL) && 6404 ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 6405 (SCTP_GET_STATE(stcb) == SCTP_STATE_INUSE) || 6406 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED))) { 6407 /* re-use the v-tags and init-seq here */ 6408 initack->init.initiate_tag = htonl(asoc->my_vtag); 6409 initack->init.initial_tsn = htonl(asoc->init_seq_number); 6410 } else { 6411 uint32_t vtag, itsn; 6412 6413 if (asoc) { 6414 atomic_add_int(&asoc->refcnt, 1); 6415 SCTP_TCB_UNLOCK(stcb); 6416 new_tag: 6417 SCTP_INP_INFO_RLOCK(); 6418 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1); 6419 SCTP_INP_INFO_RUNLOCK(); 6420 if ((asoc->peer_supports_nat) && (vtag == asoc->my_vtag)) { 6421 /* Got a duplicate vtag on some guy behind a nat 6422 * make sure we don't use it. 6423 */ 6424 goto new_tag; 6425 } 6426 initack->init.initiate_tag = htonl(vtag); 6427 /* get a TSN to use too */ 6428 itsn = sctp_select_initial_TSN(&inp->sctp_ep); 6429 initack->init.initial_tsn = htonl(itsn); 6430 SCTP_TCB_LOCK(stcb); 6431 atomic_subtract_int(&asoc->refcnt, 1); 6432 } else { 6433 SCTP_INP_INCR_REF(inp); 6434 SCTP_INP_RUNLOCK(inp); 6435 SCTP_INP_INFO_RLOCK(); 6436 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1); 6437 SCTP_INP_INFO_RUNLOCK(); 6438 initack->init.initiate_tag = htonl(vtag); 6439 /* get a TSN to use too */ 6440 initack->init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep)); 6441 SCTP_INP_RLOCK(inp); 6442 SCTP_INP_DECR_REF(inp); 6443 } 6444 } 6445 /* save away my tag to */ 6446 stc.my_vtag = initack->init.initiate_tag; 6447 6448 /* set up some of the credits. */ 6449 so = inp->sctp_socket; 6450 if (so == NULL) { 6451 /* memory problem */ 6452 sctp_m_freem(op_err); 6453 sctp_m_freem(m); 6454 return; 6455 } else { 6456 initack->init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(so), SCTP_MINIMAL_RWND)); 6457 } 6458 /* set what I want */ 6459 his_limit = ntohs(init_chk->init.num_inbound_streams); 6460 /* choose what I want */ 6461 if (asoc != NULL) { 6462 if (asoc->streamoutcnt > asoc->pre_open_streams) { 6463 i_want = asoc->streamoutcnt; 6464 } else { 6465 i_want = asoc->pre_open_streams; 6466 } 6467 } else { 6468 i_want = inp->sctp_ep.pre_open_stream_count; 6469 } 6470 if (his_limit < i_want) { 6471 /* I Want more :< */ 6472 initack->init.num_outbound_streams = init_chk->init.num_inbound_streams; 6473 } else { 6474 /* I can have what I want :> */ 6475 initack->init.num_outbound_streams = htons(i_want); 6476 } 6477 /* tell him his limit. */ 6478 initack->init.num_inbound_streams = 6479 htons(inp->sctp_ep.max_open_streams_intome); 6480 6481 /* adaptation layer indication parameter */ 6482 if (inp->sctp_ep.adaptation_layer_indicator_provided) { 6483 parameter_len = (uint16_t)sizeof(struct sctp_adaptation_layer_indication); 6484 ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t) + chunk_len); 6485 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION); 6486 ali->ph.param_length = htons(parameter_len); 6487 ali->indication = htonl(inp->sctp_ep.adaptation_layer_indicator); 6488 chunk_len += parameter_len; 6489 } 6490 6491 /* ECN parameter */ 6492 if (((asoc != NULL) && (asoc->ecn_supported == 1)) || 6493 ((asoc == NULL) && (inp->ecn_supported == 1))) { 6494 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr); 6495 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len); 6496 ph->param_type = htons(SCTP_ECN_CAPABLE); 6497 ph->param_length = htons(parameter_len); 6498 chunk_len += parameter_len; 6499 } 6500 6501 /* PR-SCTP supported parameter */ 6502 if (((asoc != NULL) && (asoc->prsctp_supported == 1)) || 6503 ((asoc == NULL) && (inp->prsctp_supported == 1))) { 6504 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr); 6505 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len); 6506 ph->param_type = htons(SCTP_PRSCTP_SUPPORTED); 6507 ph->param_length = htons(parameter_len); 6508 chunk_len += parameter_len; 6509 } 6510 6511 /* Zero checksum acceptable parameter */ 6512 if (((asoc != NULL) && (asoc->rcv_edmid != SCTP_EDMID_NONE)) || 6513 ((asoc == NULL) && (inp->rcv_edmid != SCTP_EDMID_NONE))) { 6514 parameter_len = (uint16_t)sizeof(struct sctp_zero_checksum_acceptable); 6515 zero_chksum = (struct sctp_zero_checksum_acceptable *)(mtod(m, caddr_t) + chunk_len); 6516 zero_chksum->ph.param_type = htons(SCTP_ZERO_CHECKSUM_ACCEPTABLE); 6517 zero_chksum->ph.param_length = htons(parameter_len); 6518 if (asoc != NULL) { 6519 zero_chksum->edmid = htonl(asoc->rcv_edmid); 6520 } else { 6521 zero_chksum->edmid = htonl(inp->rcv_edmid); 6522 } 6523 chunk_len += parameter_len; 6524 } 6525 6526 /* Add NAT friendly parameter */ 6527 if (nat_friendly) { 6528 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr); 6529 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len); 6530 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT); 6531 ph->param_length = htons(parameter_len); 6532 chunk_len += parameter_len; 6533 } 6534 6535 /* And now tell the peer which extensions we support */ 6536 num_ext = 0; 6537 pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t) + chunk_len); 6538 if (((asoc != NULL) && (asoc->prsctp_supported == 1)) || 6539 ((asoc == NULL) && (inp->prsctp_supported == 1))) { 6540 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN; 6541 if (((asoc != NULL) && (asoc->idata_supported == 1)) || 6542 ((asoc == NULL) && (inp->idata_supported == 1))) { 6543 pr_supported->chunk_types[num_ext++] = SCTP_IFORWARD_CUM_TSN; 6544 } 6545 } 6546 if (((asoc != NULL) && (asoc->auth_supported == 1)) || 6547 ((asoc == NULL) && (inp->auth_supported == 1))) { 6548 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION; 6549 } 6550 if (((asoc != NULL) && (asoc->asconf_supported == 1)) || 6551 ((asoc == NULL) && (inp->asconf_supported == 1))) { 6552 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF; 6553 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK; 6554 } 6555 if (((asoc != NULL) && (asoc->reconfig_supported == 1)) || 6556 ((asoc == NULL) && (inp->reconfig_supported == 1))) { 6557 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET; 6558 } 6559 if (((asoc != NULL) && (asoc->idata_supported == 1)) || 6560 ((asoc == NULL) && (inp->idata_supported == 1))) { 6561 pr_supported->chunk_types[num_ext++] = SCTP_IDATA; 6562 } 6563 if (((asoc != NULL) && (asoc->nrsack_supported == 1)) || 6564 ((asoc == NULL) && (inp->nrsack_supported == 1))) { 6565 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK; 6566 } 6567 if (((asoc != NULL) && (asoc->pktdrop_supported == 1)) || 6568 ((asoc == NULL) && (inp->pktdrop_supported == 1))) { 6569 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED; 6570 } 6571 if (num_ext > 0) { 6572 parameter_len = (uint16_t)sizeof(struct sctp_supported_chunk_types_param) + num_ext; 6573 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT); 6574 pr_supported->ph.param_length = htons(parameter_len); 6575 padding_len = SCTP_SIZE32(parameter_len) - parameter_len; 6576 chunk_len += parameter_len; 6577 } 6578 6579 /* add authentication parameters */ 6580 if (((asoc != NULL) && (asoc->auth_supported == 1)) || 6581 ((asoc == NULL) && (inp->auth_supported == 1))) { 6582 struct sctp_auth_random *randp; 6583 struct sctp_auth_hmac_algo *hmacs; 6584 struct sctp_auth_chunk_list *chunks; 6585 6586 if (padding_len > 0) { 6587 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len); 6588 chunk_len += padding_len; 6589 padding_len = 0; 6590 } 6591 /* generate and add RANDOM parameter */ 6592 randp = (struct sctp_auth_random *)(mtod(m, caddr_t) + chunk_len); 6593 parameter_len = (uint16_t)sizeof(struct sctp_auth_random) + 6594 SCTP_AUTH_RANDOM_SIZE_DEFAULT; 6595 randp->ph.param_type = htons(SCTP_RANDOM); 6596 randp->ph.param_length = htons(parameter_len); 6597 SCTP_READ_RANDOM(randp->random_data, SCTP_AUTH_RANDOM_SIZE_DEFAULT); 6598 padding_len = SCTP_SIZE32(parameter_len) - parameter_len; 6599 chunk_len += parameter_len; 6600 6601 if (padding_len > 0) { 6602 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len); 6603 chunk_len += padding_len; 6604 padding_len = 0; 6605 } 6606 /* add HMAC_ALGO parameter */ 6607 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t) + chunk_len); 6608 parameter_len = (uint16_t)sizeof(struct sctp_auth_hmac_algo) + 6609 sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs, 6610 (uint8_t *)hmacs->hmac_ids); 6611 hmacs->ph.param_type = htons(SCTP_HMAC_LIST); 6612 hmacs->ph.param_length = htons(parameter_len); 6613 padding_len = SCTP_SIZE32(parameter_len) - parameter_len; 6614 chunk_len += parameter_len; 6615 6616 if (padding_len > 0) { 6617 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len); 6618 chunk_len += padding_len; 6619 padding_len = 0; 6620 } 6621 /* add CHUNKS parameter */ 6622 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t) + chunk_len); 6623 parameter_len = (uint16_t)sizeof(struct sctp_auth_chunk_list) + 6624 sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks, 6625 chunks->chunk_types); 6626 chunks->ph.param_type = htons(SCTP_CHUNK_LIST); 6627 chunks->ph.param_length = htons(parameter_len); 6628 padding_len = SCTP_SIZE32(parameter_len) - parameter_len; 6629 chunk_len += parameter_len; 6630 } 6631 SCTP_BUF_LEN(m) = chunk_len; 6632 m_last = m; 6633 /* now the addresses */ 6634 /* To optimize this we could put the scoping stuff 6635 * into a structure and remove the individual uint8's from 6636 * the stc structure. Then we could just sifa in the 6637 * address within the stc.. but for now this is a quick 6638 * hack to get the address stuff teased apart. 6639 */ 6640 scp.ipv4_addr_legal = stc.ipv4_addr_legal; 6641 scp.ipv6_addr_legal = stc.ipv6_addr_legal; 6642 #if defined(__Userspace__) 6643 scp.conn_addr_legal = stc.conn_addr_legal; 6644 #endif 6645 scp.loopback_scope = stc.loopback_scope; 6646 scp.ipv4_local_scope = stc.ipv4_scope; 6647 scp.local_scope = stc.local_scope; 6648 scp.site_scope = stc.site_scope; 6649 m_last = sctp_add_addresses_to_i_ia(inp, stcb, &scp, m_last, 6650 cnt_inits_to, 6651 &padding_len, &chunk_len); 6652 /* padding_len can only be positive, if no addresses have been added */ 6653 if (padding_len > 0) { 6654 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len); 6655 chunk_len += padding_len; 6656 SCTP_BUF_LEN(m) += padding_len; 6657 padding_len = 0; 6658 } 6659 6660 /* tack on the operational error if present */ 6661 if (op_err) { 6662 parameter_len = 0; 6663 for (m_tmp = op_err; m_tmp != NULL; m_tmp = SCTP_BUF_NEXT(m_tmp)) { 6664 parameter_len += SCTP_BUF_LEN(m_tmp); 6665 } 6666 padding_len = SCTP_SIZE32(parameter_len) - parameter_len; 6667 SCTP_BUF_NEXT(m_last) = op_err; 6668 while (SCTP_BUF_NEXT(m_last) != NULL) { 6669 m_last = SCTP_BUF_NEXT(m_last); 6670 } 6671 chunk_len += parameter_len; 6672 } 6673 if (padding_len > 0) { 6674 m_last = sctp_add_pad_tombuf(m_last, padding_len); 6675 if (m_last == NULL) { 6676 /* Houston we have a problem, no space */ 6677 sctp_m_freem(m); 6678 return; 6679 } 6680 chunk_len += padding_len; 6681 padding_len = 0; 6682 } 6683 /* Now we must build a cookie */ 6684 m_cookie = sctp_add_cookie(init_pkt, offset, m, 0, &stc, &signature); 6685 if (m_cookie == NULL) { 6686 /* memory problem */ 6687 sctp_m_freem(m); 6688 return; 6689 } 6690 /* Now append the cookie to the end and update the space/size */ 6691 SCTP_BUF_NEXT(m_last) = m_cookie; 6692 parameter_len = 0; 6693 for (m_tmp = m_cookie; m_tmp != NULL; m_tmp = SCTP_BUF_NEXT(m_tmp)) { 6694 parameter_len += SCTP_BUF_LEN(m_tmp); 6695 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 6696 m_last = m_tmp; 6697 } 6698 } 6699 padding_len = SCTP_SIZE32(parameter_len) - parameter_len; 6700 chunk_len += parameter_len; 6701 6702 /* Place in the size, but we don't include 6703 * the last pad (if any) in the INIT-ACK. 6704 */ 6705 initack->ch.chunk_length = htons(chunk_len); 6706 6707 /* Time to sign the cookie, we don't sign over the cookie 6708 * signature though thus we set trailer. 6709 */ 6710 (void)sctp_hmac_m(SCTP_HMAC, 6711 (uint8_t *)inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)], 6712 SCTP_SECRET_SIZE, m_cookie, sizeof(struct sctp_paramhdr), 6713 (uint8_t *)signature, SCTP_SIGNATURE_SIZE); 6714 #if defined(__Userspace__) 6715 /* 6716 * Don't put AF_CONN addresses on the wire, in case this is critical 6717 * for the application. However, they are protected by the HMAC and 6718 * need to be reconstructed before checking the HMAC. 6719 * Clearing is only done in the mbuf chain, since the local stc is 6720 * not used anymore. 6721 */ 6722 if (stc.addr_type == SCTP_CONN_ADDRESS) { 6723 const void *p = NULL; 6724 6725 m_copyback(m_cookie, sizeof(struct sctp_paramhdr) + offsetof(struct sctp_state_cookie, address), 6726 (int)sizeof(void *), (caddr_t)&p); 6727 } 6728 if (stc.laddr_type == SCTP_CONN_ADDRESS) { 6729 const void *p = NULL; 6730 6731 m_copyback(m_cookie, sizeof(struct sctp_paramhdr) + offsetof(struct sctp_state_cookie, laddress), 6732 (int)sizeof(void *), (caddr_t)&p); 6733 } 6734 #endif 6735 /* 6736 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return 6737 * here since the timer will drive a retranmission. 6738 */ 6739 if (padding_len > 0) { 6740 if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) { 6741 sctp_m_freem(m); 6742 return; 6743 } 6744 } 6745 if (stc.loopback_scope) { 6746 over_addr = (union sctp_sockstore *)dst; 6747 } else { 6748 over_addr = NULL; 6749 } 6750 6751 if (asoc != NULL) { 6752 use_zero_crc = (asoc->rcv_edmid != SCTP_EDMID_NONE) && (asoc->rcv_edmid == edmid); 6753 } else { 6754 use_zero_crc = (inp->rcv_edmid != SCTP_EDMID_NONE) && (inp->rcv_edmid == edmid); 6755 } 6756 6757 if ((error = sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0, 6758 0, 0, 6759 inp->sctp_lport, sh->src_port, init_chk->init.initiate_tag, 6760 port, over_addr, 6761 #if defined(__FreeBSD__) && !defined(__Userspace__) 6762 mflowtype, mflowid, 6763 #endif 6764 use_zero_crc, 6765 SCTP_SO_NOT_LOCKED))) { 6766 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak send error %d\n", error); 6767 if (error == ENOBUFS) { 6768 if (asoc != NULL) { 6769 asoc->ifp_had_enobuf = 1; 6770 } 6771 SCTP_STAT_INCR(sctps_lowlevelerr); 6772 } 6773 } else { 6774 if (asoc != NULL) { 6775 asoc->ifp_had_enobuf = 0; 6776 } 6777 } 6778 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 6779 } 6780 6781 static void 6782 sctp_prune_prsctp(struct sctp_tcb *stcb, 6783 struct sctp_association *asoc, 6784 struct sctp_nonpad_sndrcvinfo *srcv, 6785 int dataout) 6786 { 6787 int freed_spc = 0; 6788 struct sctp_tmit_chunk *chk, *nchk; 6789 6790 SCTP_TCB_LOCK_ASSERT(stcb); 6791 if ((asoc->prsctp_supported) && 6792 (asoc->sent_queue_cnt_removeable > 0)) { 6793 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 6794 /* 6795 * Look for chunks marked with the PR_SCTP flag AND 6796 * the buffer space flag. If the one being sent is 6797 * equal or greater priority then purge the old one 6798 * and free some space. 6799 */ 6800 if (PR_SCTP_BUF_ENABLED(chk->flags)) { 6801 /* 6802 * This one is PR-SCTP AND buffer space 6803 * limited type 6804 */ 6805 if (chk->rec.data.timetodrop.tv_sec > (long)srcv->sinfo_timetolive) { 6806 /* 6807 * Lower numbers equates to higher 6808 * priority. So if the one we are 6809 * looking at has a larger priority, 6810 * we want to drop the data and NOT 6811 * retransmit it. 6812 */ 6813 if (chk->data) { 6814 /* 6815 * We release the book_size 6816 * if the mbuf is here 6817 */ 6818 int ret_spc; 6819 uint8_t sent; 6820 6821 if (chk->sent > SCTP_DATAGRAM_UNSENT) 6822 sent = 1; 6823 else 6824 sent = 0; 6825 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk, 6826 sent, 6827 SCTP_SO_LOCKED); 6828 freed_spc += ret_spc; 6829 if (freed_spc >= dataout) { 6830 return; 6831 } 6832 } /* if chunk was present */ 6833 } /* if of sufficient priority */ 6834 } /* if chunk has enabled */ 6835 } /* tailqforeach */ 6836 6837 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 6838 /* Here we must move to the sent queue and mark */ 6839 if (PR_SCTP_BUF_ENABLED(chk->flags)) { 6840 if (chk->rec.data.timetodrop.tv_sec > (long)srcv->sinfo_timetolive) { 6841 if (chk->data) { 6842 /* 6843 * We release the book_size 6844 * if the mbuf is here 6845 */ 6846 int ret_spc; 6847 6848 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk, 6849 0, SCTP_SO_LOCKED); 6850 6851 freed_spc += ret_spc; 6852 if (freed_spc >= dataout) { 6853 return; 6854 } 6855 } /* end if chk->data */ 6856 } /* end if right class */ 6857 } /* end if chk pr-sctp */ 6858 } /* tailqforeachsafe (chk) */ 6859 } /* if enabled in asoc */ 6860 } 6861 6862 uint32_t 6863 sctp_get_frag_point(struct sctp_tcb *stcb) 6864 { 6865 struct sctp_association *asoc; 6866 uint32_t frag_point, overhead; 6867 6868 asoc = &stcb->asoc; 6869 /* Consider IP header and SCTP common header. */ 6870 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 6871 overhead = SCTP_MIN_OVERHEAD; 6872 } else { 6873 #if defined(__Userspace__) 6874 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) { 6875 overhead = sizeof(struct sctphdr); 6876 } else { 6877 overhead = SCTP_MIN_V4_OVERHEAD; 6878 } 6879 #else 6880 overhead = SCTP_MIN_V4_OVERHEAD; 6881 #endif 6882 } 6883 /* Consider DATA/IDATA chunk header and AUTH header, if needed. */ 6884 if (asoc->idata_supported) { 6885 overhead += sizeof(struct sctp_idata_chunk); 6886 if (sctp_auth_is_required_chunk(SCTP_IDATA, asoc->peer_auth_chunks)) { 6887 overhead += sctp_get_auth_chunk_len(asoc->peer_hmac_id); 6888 } 6889 } else { 6890 overhead += sizeof(struct sctp_data_chunk); 6891 if (sctp_auth_is_required_chunk(SCTP_DATA, asoc->peer_auth_chunks)) { 6892 overhead += sctp_get_auth_chunk_len(asoc->peer_hmac_id); 6893 } 6894 } 6895 KASSERT(overhead % 4 == 0, 6896 ("overhead (%u) not a multiple of 4", overhead)); 6897 /* Consider padding. */ 6898 if (asoc->smallest_mtu % 4 > 0) { 6899 overhead += (asoc->smallest_mtu % 4); 6900 } 6901 KASSERT(asoc->smallest_mtu > overhead, 6902 ("Association MTU (%u) too small for overhead (%u)", 6903 asoc->smallest_mtu, overhead)); 6904 frag_point = asoc->smallest_mtu - overhead; 6905 KASSERT(frag_point % 4 == 0, 6906 ("frag_point (%u) not a multiple of 4", frag_point)); 6907 /* Honor MAXSEG socket option. */ 6908 if ((asoc->sctp_frag_point > 0) && 6909 (asoc->sctp_frag_point < frag_point)) { 6910 frag_point = asoc->sctp_frag_point; 6911 } 6912 return (frag_point); 6913 } 6914 6915 static void 6916 sctp_set_prsctp_policy(struct sctp_stream_queue_pending *sp) 6917 { 6918 /* 6919 * We assume that the user wants PR_SCTP_TTL if the user 6920 * provides a positive lifetime but does not specify any 6921 * PR_SCTP policy. 6922 */ 6923 if (PR_SCTP_ENABLED(sp->sinfo_flags)) { 6924 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags); 6925 } else if (sp->timetolive > 0) { 6926 sp->sinfo_flags |= SCTP_PR_SCTP_TTL; 6927 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags); 6928 } else { 6929 return; 6930 } 6931 switch (PR_SCTP_POLICY(sp->sinfo_flags)) { 6932 case CHUNK_FLAGS_PR_SCTP_BUF: 6933 /* 6934 * Time to live is a priority stored in tv_sec when 6935 * doing the buffer drop thing. 6936 */ 6937 sp->ts.tv_sec = sp->timetolive; 6938 sp->ts.tv_usec = 0; 6939 break; 6940 case CHUNK_FLAGS_PR_SCTP_TTL: 6941 { 6942 struct timeval tv; 6943 (void)SCTP_GETTIME_TIMEVAL(&sp->ts); 6944 tv.tv_sec = sp->timetolive / 1000; 6945 tv.tv_usec = (sp->timetolive * 1000) % 1000000; 6946 /* TODO sctp_constants.h needs alternative time macros when 6947 * _KERNEL is undefined. 6948 */ 6949 #if !(defined(__FreeBSD__) && !defined(__Userspace__)) 6950 timeradd(&sp->ts, &tv, &sp->ts); 6951 #else 6952 timevaladd(&sp->ts, &tv); 6953 #endif 6954 } 6955 break; 6956 case CHUNK_FLAGS_PR_SCTP_RTX: 6957 /* 6958 * Time to live is a the number or retransmissions 6959 * stored in tv_sec. 6960 */ 6961 sp->ts.tv_sec = sp->timetolive; 6962 sp->ts.tv_usec = 0; 6963 break; 6964 default: 6965 SCTPDBG(SCTP_DEBUG_USRREQ1, 6966 "Unknown PR_SCTP policy %u.\n", 6967 PR_SCTP_POLICY(sp->sinfo_flags)); 6968 break; 6969 } 6970 } 6971 6972 static int 6973 sctp_msg_append(struct sctp_tcb *stcb, 6974 struct sctp_nets *net, 6975 struct mbuf *m, 6976 struct sctp_nonpad_sndrcvinfo *srcv) 6977 { 6978 int error = 0; 6979 struct mbuf *at; 6980 struct sctp_stream_queue_pending *sp = NULL; 6981 struct sctp_stream_out *strm; 6982 6983 SCTP_TCB_LOCK_ASSERT(stcb); 6984 6985 /* Given an mbuf chain, put it 6986 * into the association send queue and 6987 * place it on the wheel 6988 */ 6989 if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) { 6990 /* Invalid stream number */ 6991 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 6992 error = EINVAL; 6993 goto out_now; 6994 } 6995 if ((stcb->asoc.stream_locked) && 6996 (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) { 6997 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 6998 error = EINVAL; 6999 goto out_now; 7000 } 7001 if ((stcb->asoc.strmout[srcv->sinfo_stream].state != SCTP_STREAM_OPEN) && 7002 (stcb->asoc.strmout[srcv->sinfo_stream].state != SCTP_STREAM_OPENING)) { 7003 /* 7004 * Can't queue any data while stream reset is underway. 7005 */ 7006 if (stcb->asoc.strmout[srcv->sinfo_stream].state > SCTP_STREAM_OPEN) { 7007 error = EAGAIN; 7008 } else { 7009 error = EINVAL; 7010 } 7011 goto out_now; 7012 } 7013 /* Now can we send this? */ 7014 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) || 7015 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 7016 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) || 7017 (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) { 7018 /* got data while shutting down */ 7019 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EPIPE); 7020 error = EPIPE; 7021 goto out_now; 7022 } 7023 sctp_alloc_a_strmoq(stcb, sp); 7024 if (sp == NULL) { 7025 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 7026 error = ENOMEM; 7027 goto out_now; 7028 } 7029 sp->sinfo_flags = srcv->sinfo_flags; 7030 sp->timetolive = srcv->sinfo_timetolive; 7031 sp->ppid = srcv->sinfo_ppid; 7032 sp->context = srcv->sinfo_context; 7033 sp->fsn = 0; 7034 if (sp->sinfo_flags & SCTP_ADDR_OVER) { 7035 sp->net = net; 7036 atomic_add_int(&sp->net->ref_count, 1); 7037 } else { 7038 sp->net = NULL; 7039 } 7040 (void)SCTP_GETTIME_TIMEVAL(&sp->ts); 7041 sp->sid = srcv->sinfo_stream; 7042 sp->msg_is_complete = 1; 7043 sp->sender_all_done = 1; 7044 sp->some_taken = 0; 7045 sp->data = m; 7046 sp->tail_mbuf = NULL; 7047 sctp_set_prsctp_policy(sp); 7048 /* We could in theory (for sendall) sifa the length 7049 * in, but we would still have to hunt through the 7050 * chain since we need to setup the tail_mbuf 7051 */ 7052 sp->length = 0; 7053 for (at = m; at; at = SCTP_BUF_NEXT(at)) { 7054 if (SCTP_BUF_NEXT(at) == NULL) 7055 sp->tail_mbuf = at; 7056 sp->length += SCTP_BUF_LEN(at); 7057 } 7058 if (srcv->sinfo_keynumber_valid) { 7059 sp->auth_keyid = srcv->sinfo_keynumber; 7060 } else { 7061 sp->auth_keyid = stcb->asoc.authinfo.active_keyid; 7062 } 7063 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) { 7064 sctp_auth_key_acquire(stcb, sp->auth_keyid); 7065 sp->holds_key_ref = 1; 7066 } 7067 strm = &stcb->asoc.strmout[srcv->sinfo_stream]; 7068 sctp_snd_sb_alloc(stcb, sp->length); 7069 atomic_add_int(&stcb->asoc.stream_queue_cnt, 1); 7070 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next); 7071 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, &stcb->asoc, strm, sp); 7072 m = NULL; 7073 out_now: 7074 if (m) { 7075 sctp_m_freem(m); 7076 } 7077 return (error); 7078 } 7079 7080 static struct mbuf * 7081 sctp_copy_mbufchain(struct mbuf *clonechain, 7082 struct mbuf *outchain, 7083 struct mbuf **endofchain, 7084 int can_take_mbuf, 7085 int sizeofcpy, 7086 uint8_t copy_by_ref) 7087 { 7088 struct mbuf *m; 7089 struct mbuf *appendchain; 7090 caddr_t cp; 7091 int len; 7092 7093 if (endofchain == NULL) { 7094 /* error */ 7095 error_out: 7096 if (outchain) 7097 sctp_m_freem(outchain); 7098 return (NULL); 7099 } 7100 if (can_take_mbuf) { 7101 appendchain = clonechain; 7102 } else { 7103 if (!copy_by_ref && 7104 (sizeofcpy <= (int)((((SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) - 1) * MLEN) + MHLEN)))) { 7105 /* Its not in a cluster */ 7106 if (*endofchain == NULL) { 7107 /* lets get a mbuf cluster */ 7108 if (outchain == NULL) { 7109 /* This is the general case */ 7110 new_mbuf: 7111 outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER); 7112 if (outchain == NULL) { 7113 goto error_out; 7114 } 7115 SCTP_BUF_LEN(outchain) = 0; 7116 *endofchain = outchain; 7117 /* get the prepend space */ 7118 SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV+4)); 7119 } else { 7120 /* We really should not get a NULL in endofchain */ 7121 /* find end */ 7122 m = outchain; 7123 while (m) { 7124 if (SCTP_BUF_NEXT(m) == NULL) { 7125 *endofchain = m; 7126 break; 7127 } 7128 m = SCTP_BUF_NEXT(m); 7129 } 7130 /* sanity */ 7131 if (*endofchain == NULL) { 7132 /* huh, TSNH XXX maybe we should panic */ 7133 sctp_m_freem(outchain); 7134 goto new_mbuf; 7135 } 7136 } 7137 /* get the new end of length */ 7138 len = (int)M_TRAILINGSPACE(*endofchain); 7139 } else { 7140 /* how much is left at the end? */ 7141 len = (int)M_TRAILINGSPACE(*endofchain); 7142 } 7143 /* Find the end of the data, for appending */ 7144 cp = (mtod((*endofchain), caddr_t) + SCTP_BUF_LEN((*endofchain))); 7145 7146 /* Now lets copy it out */ 7147 if (len >= sizeofcpy) { 7148 /* It all fits, copy it in */ 7149 m_copydata(clonechain, 0, sizeofcpy, cp); 7150 SCTP_BUF_LEN((*endofchain)) += sizeofcpy; 7151 } else { 7152 /* fill up the end of the chain */ 7153 if (len > 0) { 7154 m_copydata(clonechain, 0, len, cp); 7155 SCTP_BUF_LEN((*endofchain)) += len; 7156 /* now we need another one */ 7157 sizeofcpy -= len; 7158 } 7159 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER); 7160 if (m == NULL) { 7161 /* We failed */ 7162 goto error_out; 7163 } 7164 SCTP_BUF_NEXT((*endofchain)) = m; 7165 *endofchain = m; 7166 cp = mtod((*endofchain), caddr_t); 7167 m_copydata(clonechain, len, sizeofcpy, cp); 7168 SCTP_BUF_LEN((*endofchain)) += sizeofcpy; 7169 } 7170 return (outchain); 7171 } else { 7172 /* copy the old fashion way */ 7173 appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_NOWAIT); 7174 #ifdef SCTP_MBUF_LOGGING 7175 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 7176 sctp_log_mbc(appendchain, SCTP_MBUF_ICOPY); 7177 } 7178 #endif 7179 } 7180 } 7181 if (appendchain == NULL) { 7182 /* error */ 7183 if (outchain) 7184 sctp_m_freem(outchain); 7185 return (NULL); 7186 } 7187 if (outchain) { 7188 /* tack on to the end */ 7189 if (*endofchain != NULL) { 7190 SCTP_BUF_NEXT(((*endofchain))) = appendchain; 7191 } else { 7192 m = outchain; 7193 while (m) { 7194 if (SCTP_BUF_NEXT(m) == NULL) { 7195 SCTP_BUF_NEXT(m) = appendchain; 7196 break; 7197 } 7198 m = SCTP_BUF_NEXT(m); 7199 } 7200 } 7201 /* 7202 * save off the end and update the end-chain 7203 * position 7204 */ 7205 m = appendchain; 7206 while (m) { 7207 if (SCTP_BUF_NEXT(m) == NULL) { 7208 *endofchain = m; 7209 break; 7210 } 7211 m = SCTP_BUF_NEXT(m); 7212 } 7213 return (outchain); 7214 } else { 7215 /* save off the end and update the end-chain position */ 7216 m = appendchain; 7217 while (m) { 7218 if (SCTP_BUF_NEXT(m) == NULL) { 7219 *endofchain = m; 7220 break; 7221 } 7222 m = SCTP_BUF_NEXT(m); 7223 } 7224 return (appendchain); 7225 } 7226 } 7227 7228 static int 7229 sctp_med_chunk_output(struct sctp_inpcb *inp, 7230 struct sctp_tcb *stcb, 7231 struct sctp_association *asoc, 7232 int *num_out, 7233 int *reason_code, 7234 int control_only, int from_where, 7235 struct timeval *now, int *now_filled, 7236 uint32_t frag_point, int so_locked); 7237 7238 static void 7239 sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr, 7240 uint32_t val SCTP_UNUSED) 7241 { 7242 struct sctp_copy_all *ca; 7243 struct mbuf *m; 7244 int ret = 0; 7245 int added_control = 0; 7246 int un_sent, do_chunk_output = 1; 7247 struct sctp_association *asoc; 7248 struct sctp_nets *net; 7249 7250 ca = (struct sctp_copy_all *)ptr; 7251 if (ca->m == NULL) { 7252 return; 7253 } 7254 if (ca->inp != inp) { 7255 /* TSNH */ 7256 return; 7257 } 7258 if (ca->sndlen > 0) { 7259 m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_NOWAIT); 7260 if (m == NULL) { 7261 /* can't copy so we are done */ 7262 ca->cnt_failed++; 7263 return; 7264 } 7265 #ifdef SCTP_MBUF_LOGGING 7266 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 7267 sctp_log_mbc(m, SCTP_MBUF_ICOPY); 7268 } 7269 #endif 7270 } else { 7271 m = NULL; 7272 } 7273 SCTP_TCB_LOCK_ASSERT(stcb); 7274 if (stcb->asoc.alternate) { 7275 net = stcb->asoc.alternate; 7276 } else { 7277 net = stcb->asoc.primary_destination; 7278 } 7279 if (ca->sndrcv.sinfo_flags & SCTP_ABORT) { 7280 /* Abort this assoc with m as the user defined reason */ 7281 if (m != NULL) { 7282 SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_NOWAIT); 7283 } else { 7284 m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 7285 0, M_NOWAIT, 1, MT_DATA); 7286 if (m != NULL) { 7287 SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr); 7288 } 7289 } 7290 if (m != NULL) { 7291 struct sctp_paramhdr *ph; 7292 7293 ph = mtod(m, struct sctp_paramhdr *); 7294 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 7295 ph->param_length = htons((uint16_t)(sizeof(struct sctp_paramhdr) + ca->sndlen)); 7296 } 7297 /* We add one here to keep the assoc from 7298 * dis-appearing on us. 7299 */ 7300 atomic_add_int(&stcb->asoc.refcnt, 1); 7301 sctp_abort_an_association(inp, stcb, m, false, SCTP_SO_NOT_LOCKED); 7302 /* sctp_abort_an_association calls sctp_free_asoc() 7303 * free association will NOT free it since we 7304 * incremented the refcnt .. we do this to prevent 7305 * it being freed and things getting tricky since 7306 * we could end up (from free_asoc) calling inpcb_free 7307 * which would get a recursive lock call to the 7308 * iterator lock.. But as a consequence of that the 7309 * stcb will return to us un-locked.. since free_asoc 7310 * returns with either no TCB or the TCB unlocked, we 7311 * must relock.. to unlock in the iterator timer :-0 7312 */ 7313 SCTP_TCB_LOCK(stcb); 7314 atomic_subtract_int(&stcb->asoc.refcnt, 1); 7315 goto no_chunk_output; 7316 } else { 7317 if (m != NULL) { 7318 ret = sctp_msg_append(stcb, net, m, &ca->sndrcv); 7319 } 7320 asoc = &stcb->asoc; 7321 if (ca->sndrcv.sinfo_flags & SCTP_EOF) { 7322 /* shutdown this assoc */ 7323 if (TAILQ_EMPTY(&asoc->send_queue) && 7324 TAILQ_EMPTY(&asoc->sent_queue) && 7325 sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED) == 0) { 7326 if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) { 7327 goto abort_anyway; 7328 } 7329 /* there is nothing queued to send, so I'm done... */ 7330 if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) && 7331 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) && 7332 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 7333 /* only send SHUTDOWN the first time through */ 7334 if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) { 7335 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 7336 } 7337 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT); 7338 sctp_stop_timers_for_shutdown(stcb); 7339 sctp_send_shutdown(stcb, net); 7340 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, 7341 net); 7342 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 7343 NULL); 7344 added_control = 1; 7345 do_chunk_output = 0; 7346 } 7347 } else { 7348 /* 7349 * we still got (or just got) data to send, so set 7350 * SHUTDOWN_PENDING 7351 */ 7352 /* 7353 * XXX sockets draft says that SCTP_EOF should be 7354 * sent with no data. currently, we will allow user 7355 * data to be sent first and move to 7356 * SHUTDOWN-PENDING 7357 */ 7358 if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) && 7359 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) && 7360 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 7361 if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) { 7362 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT); 7363 } 7364 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING); 7365 if (TAILQ_EMPTY(&asoc->send_queue) && 7366 TAILQ_EMPTY(&asoc->sent_queue) && 7367 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 7368 struct mbuf *op_err; 7369 char msg[SCTP_DIAG_INFO_LEN]; 7370 7371 abort_anyway: 7372 SCTP_SNPRINTF(msg, sizeof(msg), 7373 "%s:%d at %s", __FILE__, __LINE__, __func__); 7374 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 7375 msg); 7376 atomic_add_int(&stcb->asoc.refcnt, 1); 7377 sctp_abort_an_association(stcb->sctp_ep, stcb, 7378 op_err, false, SCTP_SO_NOT_LOCKED); 7379 atomic_subtract_int(&stcb->asoc.refcnt, 1); 7380 goto no_chunk_output; 7381 } 7382 } 7383 } 7384 } 7385 } 7386 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 7387 (stcb->asoc.stream_queue_cnt * SCTP_DATA_CHUNK_OVERHEAD(stcb))); 7388 7389 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) && 7390 (stcb->asoc.total_flight > 0) && 7391 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) { 7392 do_chunk_output = 0; 7393 } 7394 if (do_chunk_output) 7395 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_NOT_LOCKED); 7396 else if (added_control) { 7397 struct timeval now; 7398 int num_out, reason, now_filled = 0; 7399 7400 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out, 7401 &reason, 1, 1, &now, &now_filled, 7402 sctp_get_frag_point(stcb), 7403 SCTP_SO_NOT_LOCKED); 7404 } 7405 no_chunk_output: 7406 if (ret) { 7407 ca->cnt_failed++; 7408 } else { 7409 ca->cnt_sent++; 7410 } 7411 } 7412 7413 static void 7414 sctp_sendall_completes(void *ptr, uint32_t val SCTP_UNUSED) 7415 { 7416 struct sctp_copy_all *ca; 7417 7418 ca = (struct sctp_copy_all *)ptr; 7419 /* 7420 * Do a notify here? Kacheong suggests that the notify be done at 7421 * the send time.. so you would push up a notification if any send 7422 * failed. Don't know if this is feasible since the only failures we 7423 * have is "memory" related and if you cannot get an mbuf to send 7424 * the data you surely can't get an mbuf to send up to notify the 7425 * user you can't send the data :-> 7426 */ 7427 7428 /* now free everything */ 7429 if (ca->inp) { 7430 /* Lets clear the flag to allow others to run. */ 7431 SCTP_INP_WLOCK(ca->inp); 7432 ca->inp->sctp_flags &= ~SCTP_PCB_FLAGS_SND_ITERATOR_UP; 7433 SCTP_INP_WUNLOCK(ca->inp); 7434 } 7435 sctp_m_freem(ca->m); 7436 SCTP_FREE(ca, SCTP_M_COPYAL); 7437 } 7438 7439 static struct mbuf * 7440 sctp_copy_out_all(struct uio *uio, ssize_t len) 7441 { 7442 struct mbuf *ret, *at; 7443 ssize_t left, willcpy, cancpy, error; 7444 7445 ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAITOK, 1, MT_DATA); 7446 if (ret == NULL) { 7447 /* TSNH */ 7448 return (NULL); 7449 } 7450 left = len; 7451 SCTP_BUF_LEN(ret) = 0; 7452 /* save space for the data chunk header */ 7453 cancpy = (int)M_TRAILINGSPACE(ret); 7454 willcpy = min(cancpy, left); 7455 at = ret; 7456 while (left > 0) { 7457 /* Align data to the end */ 7458 error = uiomove(mtod(at, caddr_t), (int)willcpy, uio); 7459 if (error) { 7460 err_out_now: 7461 sctp_m_freem(at); 7462 return (NULL); 7463 } 7464 SCTP_BUF_LEN(at) = (int)willcpy; 7465 SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0; 7466 left -= willcpy; 7467 if (left > 0) { 7468 SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg((unsigned int)left, 0, M_WAITOK, 1, MT_DATA); 7469 if (SCTP_BUF_NEXT(at) == NULL) { 7470 goto err_out_now; 7471 } 7472 at = SCTP_BUF_NEXT(at); 7473 SCTP_BUF_LEN(at) = 0; 7474 cancpy = (int)M_TRAILINGSPACE(at); 7475 willcpy = min(cancpy, left); 7476 } 7477 } 7478 return (ret); 7479 } 7480 7481 static int 7482 sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m, 7483 struct sctp_nonpad_sndrcvinfo *srcv) 7484 { 7485 struct sctp_copy_all *ca; 7486 struct mbuf *mat; 7487 ssize_t sndlen; 7488 int ret; 7489 7490 if (uio != NULL) { 7491 #if defined(__APPLE__) && !defined(__Userspace__) 7492 #if defined(APPLE_LEOPARD) 7493 sndlen = uio->uio_resid; 7494 #else 7495 sndlen = uio_resid(uio); 7496 #endif 7497 #else 7498 sndlen = uio->uio_resid; 7499 #endif 7500 } else { 7501 sndlen = 0; 7502 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) { 7503 sndlen += SCTP_BUF_LEN(mat); 7504 } 7505 } 7506 if (sndlen > (ssize_t)SCTP_BASE_SYSCTL(sctp_sendall_limit)) { 7507 /* You must not be larger than the limit! */ 7508 return (EMSGSIZE); 7509 } 7510 SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all), 7511 SCTP_M_COPYAL); 7512 if (ca == NULL) { 7513 sctp_m_freem(m); 7514 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 7515 return (ENOMEM); 7516 } 7517 memset(ca, 0, sizeof(struct sctp_copy_all)); 7518 ca->inp = inp; 7519 if (srcv != NULL) { 7520 memcpy(&ca->sndrcv, srcv, sizeof(struct sctp_nonpad_sndrcvinfo)); 7521 } 7522 /* Serialize. */ 7523 SCTP_INP_WLOCK(inp); 7524 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SND_ITERATOR_UP) != 0) { 7525 SCTP_INP_WUNLOCK(inp); 7526 sctp_m_freem(m); 7527 SCTP_FREE(ca, SCTP_M_COPYAL); 7528 return (EBUSY); 7529 } 7530 inp->sctp_flags |= SCTP_PCB_FLAGS_SND_ITERATOR_UP; 7531 SCTP_INP_WUNLOCK(inp); 7532 /* 7533 * take off the sendall flag, it would be bad if we failed to do 7534 * this :-0 7535 */ 7536 ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL; 7537 /* get length and mbuf chain */ 7538 ca->sndlen = sndlen; 7539 if (uio != NULL) { 7540 #if defined(__APPLE__) && !defined(__Userspace__) 7541 SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 0); 7542 #endif 7543 ca->m = sctp_copy_out_all(uio, ca->sndlen); 7544 #if defined(__APPLE__) && !defined(__Userspace__) 7545 SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 0); 7546 #endif 7547 if (ca->m == NULL) { 7548 SCTP_FREE(ca, SCTP_M_COPYAL); 7549 sctp_m_freem(m); 7550 SCTP_INP_WLOCK(inp); 7551 inp->sctp_flags &= ~SCTP_PCB_FLAGS_SND_ITERATOR_UP; 7552 SCTP_INP_WUNLOCK(inp); 7553 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 7554 return (ENOMEM); 7555 } 7556 } else { 7557 ca->m = m; 7558 } 7559 ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator, NULL, 7560 SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES, 7561 SCTP_ASOC_ANY_STATE, 7562 (void *)ca, 0, 7563 sctp_sendall_completes, inp, 1); 7564 if (ret != 0) { 7565 SCTP_INP_WLOCK(inp); 7566 inp->sctp_flags &= ~SCTP_PCB_FLAGS_SND_ITERATOR_UP; 7567 SCTP_INP_WUNLOCK(inp); 7568 SCTP_FREE(ca, SCTP_M_COPYAL); 7569 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT); 7570 return (EFAULT); 7571 } 7572 return (0); 7573 } 7574 7575 void 7576 sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc) 7577 { 7578 struct sctp_tmit_chunk *chk, *nchk; 7579 7580 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) { 7581 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 7582 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); 7583 asoc->ctrl_queue_cnt--; 7584 if (chk->data) { 7585 sctp_m_freem(chk->data); 7586 chk->data = NULL; 7587 } 7588 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 7589 } 7590 } 7591 } 7592 7593 void 7594 sctp_toss_old_asconf(struct sctp_tcb *stcb) 7595 { 7596 struct sctp_association *asoc; 7597 struct sctp_tmit_chunk *chk, *nchk; 7598 struct sctp_asconf_chunk *acp; 7599 7600 asoc = &stcb->asoc; 7601 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) { 7602 /* find SCTP_ASCONF chunk in queue */ 7603 if (chk->rec.chunk_id.id == SCTP_ASCONF) { 7604 if (chk->data) { 7605 acp = mtod(chk->data, struct sctp_asconf_chunk *); 7606 if (SCTP_TSN_GT(ntohl(acp->serial_number), asoc->asconf_seq_out_acked)) { 7607 /* Not Acked yet */ 7608 break; 7609 } 7610 } 7611 TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next); 7612 asoc->ctrl_queue_cnt--; 7613 if (chk->data) { 7614 sctp_m_freem(chk->data); 7615 chk->data = NULL; 7616 } 7617 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 7618 } 7619 } 7620 } 7621 7622 static void 7623 sctp_clean_up_datalist(struct sctp_tcb *stcb, 7624 struct sctp_association *asoc, 7625 struct sctp_tmit_chunk **data_list, 7626 int bundle_at, 7627 struct sctp_nets *net) 7628 { 7629 int i; 7630 struct sctp_tmit_chunk *tp1; 7631 7632 for (i = 0; i < bundle_at; i++) { 7633 /* off of the send queue */ 7634 TAILQ_REMOVE(&asoc->send_queue, data_list[i], sctp_next); 7635 asoc->send_queue_cnt--; 7636 if (i > 0) { 7637 /* 7638 * Any chunk NOT 0 you zap the time chunk 0 gets 7639 * zapped or set based on if a RTO measurement is 7640 * needed. 7641 */ 7642 data_list[i]->do_rtt = 0; 7643 } 7644 /* record time */ 7645 data_list[i]->sent_rcv_time = net->last_sent_time; 7646 data_list[i]->rec.data.cwnd_at_send = net->cwnd; 7647 data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.tsn; 7648 if (data_list[i]->whoTo == NULL) { 7649 data_list[i]->whoTo = net; 7650 atomic_add_int(&net->ref_count, 1); 7651 } 7652 /* on to the sent queue */ 7653 tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead); 7654 if ((tp1) && SCTP_TSN_GT(tp1->rec.data.tsn, data_list[i]->rec.data.tsn)) { 7655 struct sctp_tmit_chunk *tpp; 7656 7657 /* need to move back */ 7658 back_up_more: 7659 tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next); 7660 if (tpp == NULL) { 7661 TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next); 7662 goto all_done; 7663 } 7664 tp1 = tpp; 7665 if (SCTP_TSN_GT(tp1->rec.data.tsn, data_list[i]->rec.data.tsn)) { 7666 goto back_up_more; 7667 } 7668 TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next); 7669 } else { 7670 TAILQ_INSERT_TAIL(&asoc->sent_queue, 7671 data_list[i], 7672 sctp_next); 7673 } 7674 all_done: 7675 /* This does not lower until the cum-ack passes it */ 7676 asoc->sent_queue_cnt++; 7677 if ((asoc->peers_rwnd <= 0) && 7678 (asoc->total_flight == 0) && 7679 (bundle_at == 1)) { 7680 /* Mark the chunk as being a window probe */ 7681 SCTP_STAT_INCR(sctps_windowprobed); 7682 } 7683 #ifdef SCTP_AUDITING_ENABLED 7684 sctp_audit_log(0xC2, 3); 7685 #endif 7686 data_list[i]->sent = SCTP_DATAGRAM_SENT; 7687 data_list[i]->snd_count = 1; 7688 data_list[i]->rec.data.chunk_was_revoked = 0; 7689 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 7690 sctp_misc_ints(SCTP_FLIGHT_LOG_UP, 7691 data_list[i]->whoTo->flight_size, 7692 data_list[i]->book_size, 7693 (uint32_t)(uintptr_t)data_list[i]->whoTo, 7694 data_list[i]->rec.data.tsn); 7695 } 7696 sctp_flight_size_increase(data_list[i]); 7697 sctp_total_flight_increase(stcb, data_list[i]); 7698 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 7699 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND, 7700 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); 7701 } 7702 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd, 7703 (uint32_t) (data_list[i]->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))); 7704 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 7705 /* SWS sender side engages */ 7706 asoc->peers_rwnd = 0; 7707 } 7708 } 7709 if (asoc->cc_functions.sctp_cwnd_update_packet_transmitted) { 7710 (*asoc->cc_functions.sctp_cwnd_update_packet_transmitted)(stcb, net); 7711 } 7712 } 7713 7714 static void 7715 sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc, int so_locked) 7716 { 7717 struct sctp_tmit_chunk *chk, *nchk; 7718 7719 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) { 7720 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) || 7721 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */ 7722 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) || 7723 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) || 7724 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) || 7725 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) || 7726 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) || 7727 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) || 7728 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) || 7729 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) || 7730 (chk->rec.chunk_id.id == SCTP_ECN_CWR) || 7731 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) { 7732 /* Stray chunks must be cleaned up */ 7733 clean_up_anyway: 7734 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); 7735 asoc->ctrl_queue_cnt--; 7736 if (chk->data) { 7737 sctp_m_freem(chk->data); 7738 chk->data = NULL; 7739 } 7740 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) { 7741 asoc->fwd_tsn_cnt--; 7742 } 7743 sctp_free_a_chunk(stcb, chk, so_locked); 7744 } else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) { 7745 /* special handling, we must look into the param */ 7746 if (chk != asoc->str_reset) { 7747 goto clean_up_anyway; 7748 } 7749 } 7750 } 7751 } 7752 7753 static uint32_t 7754 sctp_can_we_split_this(struct sctp_tcb *stcb, uint32_t length, 7755 uint32_t space_left, uint32_t frag_point, int eeor_on) 7756 { 7757 /* Make a decision on if I should split a 7758 * msg into multiple parts. This is only asked of 7759 * incomplete messages. 7760 */ 7761 if (eeor_on) { 7762 /* If we are doing EEOR we need to always send 7763 * it if its the entire thing, since it might 7764 * be all the guy is putting in the hopper. 7765 */ 7766 if (space_left >= length) { 7767 /*- 7768 * If we have data outstanding, 7769 * we get another chance when the sack 7770 * arrives to transmit - wait for more data 7771 */ 7772 if (stcb->asoc.total_flight == 0) { 7773 /* If nothing is in flight, we zero 7774 * the packet counter. 7775 */ 7776 return (length); 7777 } 7778 return (0); 7779 7780 } else { 7781 /* You can fill the rest */ 7782 return (space_left); 7783 } 7784 } 7785 /*- 7786 * For those strange folk that make the send buffer 7787 * smaller than our fragmentation point, we can't 7788 * get a full msg in so we have to allow splitting. 7789 */ 7790 if (SCTP_SB_LIMIT_SND(stcb->sctp_socket) < frag_point) { 7791 return (length); 7792 } 7793 if ((length <= space_left) || 7794 ((length - space_left) < SCTP_BASE_SYSCTL(sctp_min_residual))) { 7795 /* Sub-optimal residual don't split in non-eeor mode. */ 7796 return (0); 7797 } 7798 /* If we reach here length is larger 7799 * than the space_left. Do we wish to split 7800 * it for the sake of packet putting together? 7801 */ 7802 if (space_left >= min(SCTP_BASE_SYSCTL(sctp_min_split_point), frag_point)) { 7803 /* Its ok to split it */ 7804 return (min(space_left, frag_point)); 7805 } 7806 /* Nope, can't split */ 7807 return (0); 7808 } 7809 7810 static uint32_t 7811 sctp_move_to_outqueue(struct sctp_tcb *stcb, 7812 struct sctp_nets *net, 7813 struct sctp_stream_out *strq, 7814 uint32_t space_left, 7815 uint32_t frag_point, 7816 int *giveup, 7817 int eeor_mode, 7818 int *bail, 7819 int so_locked) 7820 { 7821 /* Move from the stream to the send_queue keeping track of the total */ 7822 struct sctp_association *asoc; 7823 struct sctp_stream_queue_pending *sp; 7824 struct sctp_tmit_chunk *chk; 7825 struct sctp_data_chunk *dchkh=NULL; 7826 struct sctp_idata_chunk *ndchkh=NULL; 7827 uint32_t to_move, length; 7828 int leading; 7829 uint8_t rcv_flags = 0; 7830 uint8_t some_taken; 7831 7832 SCTP_TCB_LOCK_ASSERT(stcb); 7833 asoc = &stcb->asoc; 7834 one_more_time: 7835 /*sa_ignore FREED_MEMORY*/ 7836 sp = TAILQ_FIRST(&strq->outqueue); 7837 if (sp == NULL) { 7838 sp = TAILQ_FIRST(&strq->outqueue); 7839 if (sp) { 7840 goto one_more_time; 7841 } 7842 if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_EXPLICIT_EOR) == 0) && 7843 (stcb->asoc.idata_supported == 0) && 7844 (strq->last_msg_incomplete)) { 7845 SCTP_PRINTF("Huh? Stream:%d lm_in_c=%d but queue is NULL\n", 7846 strq->sid, 7847 strq->last_msg_incomplete); 7848 strq->last_msg_incomplete = 0; 7849 } 7850 to_move = 0; 7851 goto out_of; 7852 } 7853 if ((sp->msg_is_complete) && (sp->length == 0)) { 7854 if (sp->sender_all_done) { 7855 /* We are doing deferred cleanup. Last 7856 * time through when we took all the data 7857 * the sender_all_done was not set. 7858 */ 7859 if ((sp->put_last_out == 0) && (sp->discard_rest == 0)) { 7860 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n"); 7861 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d\n", 7862 sp->sender_all_done, 7863 sp->length, 7864 sp->msg_is_complete, 7865 sp->put_last_out); 7866 } 7867 atomic_subtract_int(&asoc->stream_queue_cnt, 1); 7868 TAILQ_REMOVE(&strq->outqueue, sp, next); 7869 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp); 7870 if ((strq->state == SCTP_STREAM_RESET_PENDING) && 7871 (strq->chunks_on_queues == 0) && 7872 TAILQ_EMPTY(&strq->outqueue)) { 7873 stcb->asoc.trigger_reset = 1; 7874 } 7875 if (sp->net) { 7876 sctp_free_remote_addr(sp->net); 7877 sp->net = NULL; 7878 } 7879 if (sp->data) { 7880 sctp_m_freem(sp->data); 7881 sp->data = NULL; 7882 } 7883 sctp_free_a_strmoq(stcb, sp, so_locked); 7884 /* back to get the next msg */ 7885 goto one_more_time; 7886 } else { 7887 /* sender just finished this but 7888 * still holds a reference 7889 */ 7890 *giveup = 1; 7891 to_move = 0; 7892 goto out_of; 7893 } 7894 } else { 7895 /* is there some to get */ 7896 if (sp->length == 0) { 7897 /* no */ 7898 *giveup = 1; 7899 to_move = 0; 7900 goto out_of; 7901 } else if (sp->discard_rest) { 7902 /* Whack down the size */ 7903 atomic_subtract_int(&stcb->asoc.total_output_queue_size, sp->length); 7904 if ((stcb->sctp_socket != NULL) && 7905 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 7906 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { 7907 SCTP_SB_DECR(&stcb->sctp_socket->so_snd, sp->length); 7908 } 7909 if (sp->data) { 7910 sctp_m_freem(sp->data); 7911 sp->data = NULL; 7912 sp->tail_mbuf = NULL; 7913 } 7914 sp->length = 0; 7915 sp->some_taken = 1; 7916 *giveup = 1; 7917 to_move = 0; 7918 goto out_of; 7919 } 7920 } 7921 some_taken = sp->some_taken; 7922 length = sp->length; 7923 if (sp->msg_is_complete) { 7924 /* The message is complete */ 7925 to_move = min(length, frag_point); 7926 if (to_move == length) { 7927 /* All of it fits in the MTU */ 7928 if (sp->some_taken) { 7929 rcv_flags |= SCTP_DATA_LAST_FRAG; 7930 } else { 7931 rcv_flags |= SCTP_DATA_NOT_FRAG; 7932 } 7933 sp->put_last_out = 1; 7934 if (sp->sinfo_flags & SCTP_SACK_IMMEDIATELY) { 7935 rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY; 7936 } 7937 } else { 7938 /* Not all of it fits, we fragment */ 7939 if (sp->some_taken == 0) { 7940 rcv_flags |= SCTP_DATA_FIRST_FRAG; 7941 } 7942 sp->some_taken = 1; 7943 } 7944 } else { 7945 to_move = sctp_can_we_split_this(stcb, length, space_left, frag_point, eeor_mode); 7946 if (to_move > 0) { 7947 if (to_move >= length) { 7948 to_move = length; 7949 } 7950 if (sp->some_taken == 0) { 7951 rcv_flags |= SCTP_DATA_FIRST_FRAG; 7952 sp->some_taken = 1; 7953 } 7954 } else { 7955 /* Nothing to take. */ 7956 *giveup = 1; 7957 to_move = 0; 7958 goto out_of; 7959 } 7960 } 7961 7962 /* If we reach here, we can copy out a chunk */ 7963 sctp_alloc_a_chunk(stcb, chk); 7964 if (chk == NULL) { 7965 /* No chunk memory */ 7966 *giveup = 1; 7967 to_move = 0; 7968 goto out_of; 7969 } 7970 /* Setup for unordered if needed by looking 7971 * at the user sent info flags. 7972 */ 7973 if (sp->sinfo_flags & SCTP_UNORDERED) { 7974 rcv_flags |= SCTP_DATA_UNORDERED; 7975 } 7976 if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) && 7977 (sp->sinfo_flags & SCTP_EOF) == SCTP_EOF) { 7978 rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY; 7979 } 7980 /* clear out the chunk before setting up */ 7981 memset(chk, 0, sizeof(*chk)); 7982 chk->rec.data.rcv_flags = rcv_flags; 7983 7984 if (to_move >= length) { 7985 /* we think we can steal the whole thing */ 7986 if (to_move < sp->length) { 7987 /* bail, it changed */ 7988 goto dont_do_it; 7989 } 7990 chk->data = sp->data; 7991 chk->last_mbuf = sp->tail_mbuf; 7992 /* register the stealing */ 7993 sp->data = sp->tail_mbuf = NULL; 7994 } else { 7995 struct mbuf *m; 7996 dont_do_it: 7997 chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_NOWAIT); 7998 chk->last_mbuf = NULL; 7999 if (chk->data == NULL) { 8000 sp->some_taken = some_taken; 8001 sctp_free_a_chunk(stcb, chk, so_locked); 8002 *bail = 1; 8003 to_move = 0; 8004 goto out_of; 8005 } 8006 #ifdef SCTP_MBUF_LOGGING 8007 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 8008 sctp_log_mbc(chk->data, SCTP_MBUF_ICOPY); 8009 } 8010 #endif 8011 /* Pull off the data */ 8012 m_adj(sp->data, to_move); 8013 /* Now lets work our way down and compact it */ 8014 m = sp->data; 8015 while (m && (SCTP_BUF_LEN(m) == 0)) { 8016 sp->data = SCTP_BUF_NEXT(m); 8017 SCTP_BUF_NEXT(m) = NULL; 8018 if (sp->tail_mbuf == m) { 8019 /*- 8020 * Freeing tail? TSNH since 8021 * we supposedly were taking less 8022 * than the sp->length. 8023 */ 8024 #ifdef INVARIANTS 8025 panic("Huh, freeing tail? - TSNH"); 8026 #else 8027 SCTP_PRINTF("Huh, freeing tail? - TSNH\n"); 8028 sp->tail_mbuf = sp->data = NULL; 8029 sp->length = 0; 8030 #endif 8031 } 8032 sctp_m_free(m); 8033 m = sp->data; 8034 } 8035 } 8036 if (SCTP_BUF_IS_EXTENDED(chk->data)) { 8037 chk->copy_by_ref = 1; 8038 } else { 8039 chk->copy_by_ref = 0; 8040 } 8041 /* get last_mbuf and counts of mb usage 8042 * This is ugly but hopefully its only one mbuf. 8043 */ 8044 if (chk->last_mbuf == NULL) { 8045 chk->last_mbuf = chk->data; 8046 while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) { 8047 chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf); 8048 } 8049 } 8050 8051 if (to_move > length) { 8052 /*- This should not happen either 8053 * since we always lower to_move to the size 8054 * of sp->length if its larger. 8055 */ 8056 #ifdef INVARIANTS 8057 panic("Huh, how can to_move be larger?"); 8058 #else 8059 SCTP_PRINTF("Huh, how can to_move be larger?\n"); 8060 sp->length = 0; 8061 #endif 8062 } else { 8063 atomic_subtract_int(&sp->length, to_move); 8064 } 8065 leading = SCTP_DATA_CHUNK_OVERHEAD(stcb); 8066 if (M_LEADINGSPACE(chk->data) < leading) { 8067 /* Not enough room for a chunk header, get some */ 8068 struct mbuf *m; 8069 8070 m = sctp_get_mbuf_for_msg(1, 0, M_NOWAIT, 1, MT_DATA); 8071 if (m == NULL) { 8072 /* 8073 * we're in trouble here. _PREPEND below will free 8074 * all the data if there is no leading space, so we 8075 * must put the data back and restore. 8076 */ 8077 if (sp->data == NULL) { 8078 /* unsteal the data */ 8079 sp->data = chk->data; 8080 sp->tail_mbuf = chk->last_mbuf; 8081 } else { 8082 struct mbuf *m_tmp; 8083 /* reassemble the data */ 8084 m_tmp = sp->data; 8085 sp->data = chk->data; 8086 SCTP_BUF_NEXT(chk->last_mbuf) = m_tmp; 8087 } 8088 sp->some_taken = some_taken; 8089 atomic_add_int(&sp->length, to_move); 8090 chk->data = NULL; 8091 *bail = 1; 8092 sctp_free_a_chunk(stcb, chk, so_locked); 8093 to_move = 0; 8094 goto out_of; 8095 } else { 8096 SCTP_BUF_LEN(m) = 0; 8097 SCTP_BUF_NEXT(m) = chk->data; 8098 chk->data = m; 8099 M_ALIGN(chk->data, 4); 8100 } 8101 } 8102 SCTP_BUF_PREPEND(chk->data, SCTP_DATA_CHUNK_OVERHEAD(stcb), M_NOWAIT); 8103 if (chk->data == NULL) { 8104 /* HELP, TSNH since we assured it would not above? */ 8105 #ifdef INVARIANTS 8106 panic("prepend fails HELP?"); 8107 #else 8108 SCTP_PRINTF("prepend fails HELP?\n"); 8109 sctp_free_a_chunk(stcb, chk, so_locked); 8110 #endif 8111 *bail = 1; 8112 to_move = 0; 8113 goto out_of; 8114 } 8115 sctp_snd_sb_alloc(stcb, SCTP_DATA_CHUNK_OVERHEAD(stcb)); 8116 chk->book_size = chk->send_size = (uint16_t)(to_move + SCTP_DATA_CHUNK_OVERHEAD(stcb)); 8117 chk->book_size_scale = 0; 8118 chk->sent = SCTP_DATAGRAM_UNSENT; 8119 8120 chk->flags = 0; 8121 chk->asoc = &stcb->asoc; 8122 chk->pad_inplace = 0; 8123 chk->no_fr_allowed = 0; 8124 if (stcb->asoc.idata_supported == 0) { 8125 if (rcv_flags & SCTP_DATA_UNORDERED) { 8126 /* Just use 0. The receiver ignores the values. */ 8127 chk->rec.data.mid = 0; 8128 } else { 8129 chk->rec.data.mid = strq->next_mid_ordered; 8130 if (rcv_flags & SCTP_DATA_LAST_FRAG) { 8131 strq->next_mid_ordered++; 8132 } 8133 } 8134 } else { 8135 if (rcv_flags & SCTP_DATA_UNORDERED) { 8136 chk->rec.data.mid = strq->next_mid_unordered; 8137 if (rcv_flags & SCTP_DATA_LAST_FRAG) { 8138 strq->next_mid_unordered++; 8139 } 8140 } else { 8141 chk->rec.data.mid = strq->next_mid_ordered; 8142 if (rcv_flags & SCTP_DATA_LAST_FRAG) { 8143 strq->next_mid_ordered++; 8144 } 8145 } 8146 } 8147 chk->rec.data.sid = sp->sid; 8148 chk->rec.data.ppid = sp->ppid; 8149 chk->rec.data.context = sp->context; 8150 chk->rec.data.doing_fast_retransmit = 0; 8151 8152 chk->rec.data.timetodrop = sp->ts; 8153 chk->flags = sp->act_flags; 8154 8155 if (sp->net) { 8156 chk->whoTo = sp->net; 8157 atomic_add_int(&chk->whoTo->ref_count, 1); 8158 } else 8159 chk->whoTo = NULL; 8160 8161 if (sp->holds_key_ref) { 8162 chk->auth_keyid = sp->auth_keyid; 8163 sctp_auth_key_acquire(stcb, chk->auth_keyid); 8164 chk->holds_key_ref = 1; 8165 } 8166 stcb->asoc.ss_functions.sctp_ss_scheduled(stcb, net, asoc, strq, to_move); 8167 #if defined(__FreeBSD__) && !defined(__Userspace__) 8168 chk->rec.data.tsn = atomic_fetchadd_int(&asoc->sending_seq, 1); 8169 #else 8170 chk->rec.data.tsn = asoc->sending_seq++; 8171 #endif 8172 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_OUTQ) { 8173 sctp_misc_ints(SCTP_STRMOUT_LOG_SEND, 8174 (uint32_t)(uintptr_t)stcb, sp->length, 8175 (uint32_t)((chk->rec.data.sid << 16) | (0x0000ffff & chk->rec.data.mid)), 8176 chk->rec.data.tsn); 8177 } 8178 if (stcb->asoc.idata_supported == 0) { 8179 dchkh = mtod(chk->data, struct sctp_data_chunk *); 8180 } else { 8181 ndchkh = mtod(chk->data, struct sctp_idata_chunk *); 8182 } 8183 /* 8184 * Put the rest of the things in place now. Size was done 8185 * earlier in previous loop prior to padding. 8186 */ 8187 8188 SCTP_TCB_LOCK_ASSERT(stcb); 8189 #ifdef SCTP_ASOCLOG_OF_TSNS 8190 if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) { 8191 asoc->tsn_out_at = 0; 8192 asoc->tsn_out_wrapped = 1; 8193 } 8194 asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.tsn; 8195 asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.sid; 8196 asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.mid; 8197 asoc->out_tsnlog[asoc->tsn_out_at].sz = chk->send_size; 8198 asoc->out_tsnlog[asoc->tsn_out_at].flgs = chk->rec.data.rcv_flags; 8199 asoc->out_tsnlog[asoc->tsn_out_at].stcb = (void *)stcb; 8200 asoc->out_tsnlog[asoc->tsn_out_at].in_pos = asoc->tsn_out_at; 8201 asoc->out_tsnlog[asoc->tsn_out_at].in_out = 2; 8202 asoc->tsn_out_at++; 8203 #endif 8204 if (stcb->asoc.idata_supported == 0) { 8205 dchkh->ch.chunk_type = SCTP_DATA; 8206 dchkh->ch.chunk_flags = chk->rec.data.rcv_flags; 8207 dchkh->dp.tsn = htonl(chk->rec.data.tsn); 8208 dchkh->dp.sid = htons(strq->sid); 8209 dchkh->dp.ssn = htons((uint16_t)chk->rec.data.mid); 8210 dchkh->dp.ppid = chk->rec.data.ppid; 8211 dchkh->ch.chunk_length = htons(chk->send_size); 8212 } else { 8213 ndchkh->ch.chunk_type = SCTP_IDATA; 8214 ndchkh->ch.chunk_flags = chk->rec.data.rcv_flags; 8215 ndchkh->dp.tsn = htonl(chk->rec.data.tsn); 8216 ndchkh->dp.sid = htons(strq->sid); 8217 ndchkh->dp.reserved = htons(0); 8218 ndchkh->dp.mid = htonl(chk->rec.data.mid); 8219 if (sp->fsn == 0) 8220 ndchkh->dp.ppid_fsn.ppid = chk->rec.data.ppid; 8221 else 8222 ndchkh->dp.ppid_fsn.fsn = htonl(sp->fsn); 8223 sp->fsn++; 8224 ndchkh->ch.chunk_length = htons(chk->send_size); 8225 } 8226 /* Now advance the chk->send_size by the actual pad needed. */ 8227 if (chk->send_size < SCTP_SIZE32(chk->book_size)) { 8228 /* need a pad */ 8229 struct mbuf *lm; 8230 int pads; 8231 8232 pads = SCTP_SIZE32(chk->book_size) - chk->send_size; 8233 lm = sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf); 8234 if (lm != NULL) { 8235 chk->last_mbuf = lm; 8236 chk->pad_inplace = 1; 8237 } 8238 chk->send_size += pads; 8239 } 8240 if (PR_SCTP_ENABLED(chk->flags)) { 8241 asoc->pr_sctp_cnt++; 8242 } 8243 if (sp->msg_is_complete && (sp->length == 0) && (sp->sender_all_done)) { 8244 /* All done pull and kill the message */ 8245 if (sp->put_last_out == 0) { 8246 SCTP_PRINTF("Gak, put out entire msg with NO end!-2\n"); 8247 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d\n", 8248 sp->sender_all_done, 8249 sp->length, 8250 sp->msg_is_complete, 8251 sp->put_last_out); 8252 } 8253 atomic_subtract_int(&asoc->stream_queue_cnt, 1); 8254 TAILQ_REMOVE(&strq->outqueue, sp, next); 8255 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp); 8256 if ((strq->state == SCTP_STREAM_RESET_PENDING) && 8257 (strq->chunks_on_queues == 0) && 8258 TAILQ_EMPTY(&strq->outqueue)) { 8259 stcb->asoc.trigger_reset = 1; 8260 } 8261 if (sp->net) { 8262 sctp_free_remote_addr(sp->net); 8263 sp->net = NULL; 8264 } 8265 if (sp->data) { 8266 sctp_m_freem(sp->data); 8267 sp->data = NULL; 8268 } 8269 sctp_free_a_strmoq(stcb, sp, so_locked); 8270 } 8271 asoc->chunks_on_out_queue++; 8272 strq->chunks_on_queues++; 8273 TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next); 8274 asoc->send_queue_cnt++; 8275 out_of: 8276 return (to_move); 8277 } 8278 8279 static void 8280 sctp_fill_outqueue(struct sctp_tcb *stcb, struct sctp_nets *net, 8281 uint32_t frag_point, int eeor_mode, int *quit_now, 8282 int so_locked) 8283 { 8284 struct sctp_association *asoc; 8285 struct sctp_stream_out *strq; 8286 uint32_t space_left, moved, total_moved; 8287 int bail, giveup; 8288 8289 SCTP_TCB_LOCK_ASSERT(stcb); 8290 asoc = &stcb->asoc; 8291 total_moved = 0; 8292 switch (net->ro._l_addr.sa.sa_family) { 8293 #ifdef INET 8294 case AF_INET: 8295 space_left = net->mtu - SCTP_MIN_V4_OVERHEAD; 8296 break; 8297 #endif 8298 #ifdef INET6 8299 case AF_INET6: 8300 space_left = net->mtu - SCTP_MIN_OVERHEAD; 8301 break; 8302 #endif 8303 #if defined(__Userspace__) 8304 case AF_CONN: 8305 space_left = net->mtu - sizeof(struct sctphdr); 8306 break; 8307 #endif 8308 default: 8309 /* TSNH */ 8310 space_left = net->mtu; 8311 break; 8312 } 8313 /* Need an allowance for the data chunk header too */ 8314 space_left -= SCTP_DATA_CHUNK_OVERHEAD(stcb); 8315 8316 /* must make even word boundary */ 8317 space_left &= 0xfffffffc; 8318 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc); 8319 giveup = 0; 8320 bail = 0; 8321 while ((space_left > 0) && (strq != NULL)) { 8322 moved = sctp_move_to_outqueue(stcb, net, strq, space_left, 8323 frag_point, &giveup, eeor_mode, 8324 &bail, so_locked); 8325 if ((giveup != 0) || (bail != 0)) { 8326 break; 8327 } 8328 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc); 8329 total_moved += moved; 8330 if (space_left >= moved) { 8331 space_left -= moved; 8332 } else { 8333 space_left = 0; 8334 } 8335 if (space_left >= SCTP_DATA_CHUNK_OVERHEAD(stcb)) { 8336 space_left -= SCTP_DATA_CHUNK_OVERHEAD(stcb); 8337 } else { 8338 space_left = 0; 8339 } 8340 space_left &= 0xfffffffc; 8341 } 8342 if (bail != 0) 8343 *quit_now = 1; 8344 8345 stcb->asoc.ss_functions.sctp_ss_packet_done(stcb, net, asoc); 8346 8347 if (total_moved == 0) { 8348 if ((stcb->asoc.sctp_cmt_on_off == 0) && 8349 (net == stcb->asoc.primary_destination)) { 8350 /* ran dry for primary network net */ 8351 SCTP_STAT_INCR(sctps_primary_randry); 8352 } else if (stcb->asoc.sctp_cmt_on_off > 0) { 8353 /* ran dry with CMT on */ 8354 SCTP_STAT_INCR(sctps_cmt_randry); 8355 } 8356 } 8357 } 8358 8359 void 8360 sctp_fix_ecn_echo(struct sctp_association *asoc) 8361 { 8362 struct sctp_tmit_chunk *chk; 8363 8364 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 8365 if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) { 8366 chk->sent = SCTP_DATAGRAM_UNSENT; 8367 } 8368 } 8369 } 8370 8371 void 8372 sctp_move_chunks_from_net(struct sctp_tcb *stcb, struct sctp_nets *net) 8373 { 8374 struct sctp_association *asoc; 8375 struct sctp_tmit_chunk *chk; 8376 struct sctp_stream_queue_pending *sp; 8377 unsigned int i; 8378 8379 if (net == NULL) { 8380 return; 8381 } 8382 asoc = &stcb->asoc; 8383 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 8384 TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) { 8385 if (sp->net == net) { 8386 sctp_free_remote_addr(sp->net); 8387 sp->net = NULL; 8388 } 8389 } 8390 } 8391 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 8392 if (chk->whoTo == net) { 8393 sctp_free_remote_addr(chk->whoTo); 8394 chk->whoTo = NULL; 8395 } 8396 } 8397 } 8398 8399 int 8400 sctp_med_chunk_output(struct sctp_inpcb *inp, 8401 struct sctp_tcb *stcb, 8402 struct sctp_association *asoc, 8403 int *num_out, 8404 int *reason_code, 8405 int control_only, int from_where, 8406 struct timeval *now, int *now_filled, 8407 uint32_t frag_point, int so_locked) 8408 { 8409 /** 8410 * Ok this is the generic chunk service queue. we must do the 8411 * following: 8412 * - Service the stream queue that is next, moving any 8413 * message (note I must get a complete message i.e. FIRST/MIDDLE and 8414 * LAST to the out queue in one pass) and assigning TSN's. This 8415 * only applies though if the peer does not support NDATA. For NDATA 8416 * chunks its ok to not send the entire message ;-) 8417 * - Check to see if the cwnd/rwnd allows any output, if so we go ahead and 8418 * formulate and send the low level chunks. Making sure to combine 8419 * any control in the control chunk queue also. 8420 */ 8421 struct sctp_nets *net, *start_at, *sack_goes_to = NULL, *old_start_at = NULL; 8422 struct mbuf *outchain, *endoutchain; 8423 struct sctp_tmit_chunk *chk, *nchk; 8424 8425 /* temp arrays for unlinking */ 8426 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING]; 8427 int no_fragmentflg, error; 8428 unsigned int max_rwnd_per_dest, max_send_per_dest; 8429 int one_chunk, hbflag, skip_data_for_this_net; 8430 int asconf, cookie, no_out_cnt; 8431 int bundle_at, ctl_cnt, no_data_chunks, eeor_mode; 8432 unsigned int mtu, r_mtu, omtu, mx_mtu, to_out; 8433 int tsns_sent = 0; 8434 uint32_t auth_offset; 8435 struct sctp_auth_chunk *auth; 8436 uint16_t auth_keyid; 8437 int override_ok = 1; 8438 int skip_fill_up = 0; 8439 int data_auth_reqd = 0; 8440 /* JRS 5/14/07 - Add flag for whether a heartbeat is sent to 8441 the destination. */ 8442 int quit_now = 0; 8443 bool use_zero_crc; 8444 8445 #if defined(__APPLE__) && !defined(__Userspace__) 8446 if (so_locked) { 8447 sctp_lock_assert(SCTP_INP_SO(inp)); 8448 } else { 8449 sctp_unlock_assert(SCTP_INP_SO(inp)); 8450 } 8451 #endif 8452 *num_out = 0; 8453 *reason_code = 0; 8454 auth_keyid = stcb->asoc.authinfo.active_keyid; 8455 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 8456 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) || 8457 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) { 8458 eeor_mode = 1; 8459 } else { 8460 eeor_mode = 0; 8461 } 8462 ctl_cnt = no_out_cnt = asconf = cookie = 0; 8463 /* 8464 * First lets prime the pump. For each destination, if there is room 8465 * in the flight size, attempt to pull an MTU's worth out of the 8466 * stream queues into the general send_queue 8467 */ 8468 #ifdef SCTP_AUDITING_ENABLED 8469 sctp_audit_log(0xC2, 2); 8470 #endif 8471 SCTP_TCB_LOCK_ASSERT(stcb); 8472 hbflag = 0; 8473 if (control_only) 8474 no_data_chunks = 1; 8475 else 8476 no_data_chunks = 0; 8477 8478 /* Nothing to possible to send? */ 8479 if ((TAILQ_EMPTY(&asoc->control_send_queue) || 8480 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) && 8481 TAILQ_EMPTY(&asoc->asconf_send_queue) && 8482 TAILQ_EMPTY(&asoc->send_queue) && 8483 sctp_is_there_unsent_data(stcb, so_locked) == 0) { 8484 nothing_to_send: 8485 *reason_code = 9; 8486 return (0); 8487 } 8488 if (asoc->peers_rwnd == 0) { 8489 /* No room in peers rwnd */ 8490 *reason_code = 1; 8491 if (asoc->total_flight > 0) { 8492 /* we are allowed one chunk in flight */ 8493 no_data_chunks = 1; 8494 } 8495 } 8496 if (stcb->asoc.ecn_echo_cnt_onq) { 8497 /* Record where a sack goes, if any */ 8498 if (no_data_chunks && 8499 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) { 8500 /* Nothing but ECNe to send - we don't do that */ 8501 goto nothing_to_send; 8502 } 8503 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 8504 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) || 8505 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) { 8506 sack_goes_to = chk->whoTo; 8507 break; 8508 } 8509 } 8510 } 8511 max_rwnd_per_dest = ((asoc->peers_rwnd + asoc->total_flight) / asoc->numnets); 8512 if (stcb->sctp_socket) 8513 max_send_per_dest = SCTP_SB_LIMIT_SND(stcb->sctp_socket) / asoc->numnets; 8514 else 8515 max_send_per_dest = 0; 8516 if (no_data_chunks == 0) { 8517 /* How many non-directed chunks are there? */ 8518 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 8519 if (chk->whoTo == NULL) { 8520 /* We already have non-directed 8521 * chunks on the queue, no need 8522 * to do a fill-up. 8523 */ 8524 skip_fill_up = 1; 8525 break; 8526 } 8527 } 8528 } 8529 if ((no_data_chunks == 0) && 8530 (skip_fill_up == 0) && 8531 (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc))) { 8532 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 8533 /* 8534 * This for loop we are in takes in 8535 * each net, if its's got space in cwnd and 8536 * has data sent to it (when CMT is off) then it 8537 * calls sctp_fill_outqueue for the net. This gets 8538 * data on the send queue for that network. 8539 * 8540 * In sctp_fill_outqueue TSN's are assigned and 8541 * data is copied out of the stream buffers. Note 8542 * mostly copy by reference (we hope). 8543 */ 8544 net->window_probe = 0; 8545 if ((net != stcb->asoc.alternate) && 8546 ((net->dest_state & SCTP_ADDR_PF) || 8547 ((net->dest_state & SCTP_ADDR_REACHABLE) == 0) || 8548 (net->dest_state & SCTP_ADDR_UNCONFIRMED))) { 8549 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 8550 sctp_log_cwnd(stcb, net, 1, 8551 SCTP_CWND_LOG_FILL_OUTQ_CALLED); 8552 } 8553 continue; 8554 } 8555 if ((stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) && 8556 (net->flight_size == 0)) { 8557 (*stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins)(stcb, net); 8558 } 8559 if (net->flight_size >= net->cwnd) { 8560 /* skip this network, no room - can't fill */ 8561 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 8562 sctp_log_cwnd(stcb, net, 3, 8563 SCTP_CWND_LOG_FILL_OUTQ_CALLED); 8564 } 8565 continue; 8566 } 8567 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 8568 sctp_log_cwnd(stcb, net, 4, SCTP_CWND_LOG_FILL_OUTQ_CALLED); 8569 } 8570 sctp_fill_outqueue(stcb, net, frag_point, eeor_mode, &quit_now, so_locked); 8571 if (quit_now) { 8572 /* memory alloc failure */ 8573 no_data_chunks = 1; 8574 break; 8575 } 8576 } 8577 } 8578 /* now service each destination and send out what we can for it */ 8579 /* Nothing to send? */ 8580 if (TAILQ_EMPTY(&asoc->control_send_queue) && 8581 TAILQ_EMPTY(&asoc->asconf_send_queue) && 8582 TAILQ_EMPTY(&asoc->send_queue)) { 8583 *reason_code = 8; 8584 return (0); 8585 } 8586 8587 if (asoc->sctp_cmt_on_off > 0) { 8588 /* get the last start point */ 8589 start_at = asoc->last_net_cmt_send_started; 8590 if (start_at == NULL) { 8591 /* null so to beginning */ 8592 start_at = TAILQ_FIRST(&asoc->nets); 8593 } else { 8594 start_at = TAILQ_NEXT(asoc->last_net_cmt_send_started, sctp_next); 8595 if (start_at == NULL) { 8596 start_at = TAILQ_FIRST(&asoc->nets); 8597 } 8598 } 8599 asoc->last_net_cmt_send_started = start_at; 8600 } else { 8601 start_at = TAILQ_FIRST(&asoc->nets); 8602 } 8603 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 8604 if (chk->whoTo == NULL) { 8605 if (asoc->alternate) { 8606 chk->whoTo = asoc->alternate; 8607 } else { 8608 chk->whoTo = asoc->primary_destination; 8609 } 8610 atomic_add_int(&chk->whoTo->ref_count, 1); 8611 } 8612 } 8613 old_start_at = NULL; 8614 again_one_more_time: 8615 for (net = start_at; net != NULL; net = TAILQ_NEXT(net, sctp_next)) { 8616 /* how much can we send? */ 8617 /* SCTPDBG("Examine for sending net:%x\n", (uint32_t)net); */ 8618 if (old_start_at && (old_start_at == net)) { 8619 /* through list completely. */ 8620 break; 8621 } 8622 tsns_sent = 0xa; 8623 if (TAILQ_EMPTY(&asoc->control_send_queue) && 8624 TAILQ_EMPTY(&asoc->asconf_send_queue) && 8625 (net->flight_size >= net->cwnd)) { 8626 /* Nothing on control or asconf and flight is full, we can skip 8627 * even in the CMT case. 8628 */ 8629 continue; 8630 } 8631 bundle_at = 0; 8632 endoutchain = outchain = NULL; 8633 auth = NULL; 8634 auth_offset = 0; 8635 no_fragmentflg = 1; 8636 one_chunk = 0; 8637 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) { 8638 skip_data_for_this_net = 1; 8639 } else { 8640 skip_data_for_this_net = 0; 8641 } 8642 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) { 8643 #ifdef INET 8644 case AF_INET: 8645 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD; 8646 break; 8647 #endif 8648 #ifdef INET6 8649 case AF_INET6: 8650 mtu = net->mtu - SCTP_MIN_OVERHEAD; 8651 break; 8652 #endif 8653 #if defined(__Userspace__) 8654 case AF_CONN: 8655 mtu = net->mtu - sizeof(struct sctphdr); 8656 break; 8657 #endif 8658 default: 8659 /* TSNH */ 8660 mtu = net->mtu; 8661 break; 8662 } 8663 mx_mtu = mtu; 8664 to_out = 0; 8665 if (mtu > asoc->peers_rwnd) { 8666 if (asoc->total_flight > 0) { 8667 /* We have a packet in flight somewhere */ 8668 r_mtu = asoc->peers_rwnd; 8669 } else { 8670 /* We are always allowed to send one MTU out */ 8671 one_chunk = 1; 8672 r_mtu = mtu; 8673 } 8674 } else { 8675 r_mtu = mtu; 8676 } 8677 error = 0; 8678 /************************/ 8679 /* ASCONF transmission */ 8680 /************************/ 8681 /* Now first lets go through the asconf queue */ 8682 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) { 8683 if (chk->rec.chunk_id.id != SCTP_ASCONF) { 8684 continue; 8685 } 8686 if (chk->whoTo == NULL) { 8687 if (asoc->alternate == NULL) { 8688 if (asoc->primary_destination != net) { 8689 break; 8690 } 8691 } else { 8692 if (asoc->alternate != net) { 8693 break; 8694 } 8695 } 8696 } else { 8697 if (chk->whoTo != net) { 8698 break; 8699 } 8700 } 8701 if (chk->data == NULL) { 8702 break; 8703 } 8704 if (chk->sent != SCTP_DATAGRAM_UNSENT && 8705 chk->sent != SCTP_DATAGRAM_RESEND) { 8706 break; 8707 } 8708 /* 8709 * if no AUTH is yet included and this chunk 8710 * requires it, make sure to account for it. We 8711 * don't apply the size until the AUTH chunk is 8712 * actually added below in case there is no room for 8713 * this chunk. NOTE: we overload the use of "omtu" 8714 * here 8715 */ 8716 if ((auth == NULL) && 8717 sctp_auth_is_required_chunk(chk->rec.chunk_id.id, 8718 stcb->asoc.peer_auth_chunks)) { 8719 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 8720 } else 8721 omtu = 0; 8722 /* Here we do NOT factor the r_mtu */ 8723 if ((chk->send_size < (int)(mtu - omtu)) || 8724 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) { 8725 /* 8726 * We probably should glom the mbuf chain 8727 * from the chk->data for control but the 8728 * problem is it becomes yet one more level 8729 * of tracking to do if for some reason 8730 * output fails. Then I have got to 8731 * reconstruct the merged control chain.. el 8732 * yucko.. for now we take the easy way and 8733 * do the copy 8734 */ 8735 /* 8736 * Add an AUTH chunk, if chunk requires it 8737 * save the offset into the chain for AUTH 8738 */ 8739 if ((auth == NULL) && 8740 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id, 8741 stcb->asoc.peer_auth_chunks))) { 8742 outchain = sctp_add_auth_chunk(outchain, 8743 &endoutchain, 8744 &auth, 8745 &auth_offset, 8746 stcb, 8747 chk->rec.chunk_id.id); 8748 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 8749 } 8750 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 8751 (int)chk->rec.chunk_id.can_take_data, 8752 chk->send_size, chk->copy_by_ref); 8753 if (outchain == NULL) { 8754 *reason_code = 8; 8755 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 8756 return (ENOMEM); 8757 } 8758 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 8759 /* update our MTU size */ 8760 if (mtu > (chk->send_size + omtu)) 8761 mtu -= (chk->send_size + omtu); 8762 else 8763 mtu = 0; 8764 to_out += (chk->send_size + omtu); 8765 /* Do clear IP_DF ? */ 8766 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) { 8767 no_fragmentflg = 0; 8768 } 8769 if (chk->rec.chunk_id.can_take_data) 8770 chk->data = NULL; 8771 /* 8772 * set hb flag since we can 8773 * use these for RTO 8774 */ 8775 hbflag = 1; 8776 asconf = 1; 8777 /* 8778 * should sysctl this: don't 8779 * bundle data with ASCONF 8780 * since it requires AUTH 8781 */ 8782 no_data_chunks = 1; 8783 chk->sent = SCTP_DATAGRAM_SENT; 8784 if (chk->whoTo == NULL) { 8785 chk->whoTo = net; 8786 atomic_add_int(&net->ref_count, 1); 8787 } 8788 chk->snd_count++; 8789 if (mtu == 0) { 8790 /* 8791 * Ok we are out of room but we can 8792 * output without effecting the 8793 * flight size since this little guy 8794 * is a control only packet. 8795 */ 8796 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net); 8797 /* 8798 * do NOT clear the asconf 8799 * flag as it is used to do 8800 * appropriate source address 8801 * selection. 8802 */ 8803 if (*now_filled == 0) { 8804 (void)SCTP_GETTIME_TIMEVAL(now); 8805 *now_filled = 1; 8806 } 8807 net->last_sent_time = *now; 8808 hbflag = 0; 8809 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net, 8810 (struct sockaddr *)&net->ro._l_addr, 8811 outchain, auth_offset, auth, 8812 stcb->asoc.authinfo.active_keyid, 8813 no_fragmentflg, 0, asconf, 8814 inp->sctp_lport, stcb->rport, 8815 htonl(stcb->asoc.peer_vtag), 8816 net->port, NULL, 8817 #if defined(__FreeBSD__) && !defined(__Userspace__) 8818 0, 0, 8819 #endif 8820 false, so_locked))) { 8821 /* error, we could not output */ 8822 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error); 8823 if (from_where == 0) { 8824 SCTP_STAT_INCR(sctps_lowlevelerrusr); 8825 } 8826 if (error == ENOBUFS) { 8827 asoc->ifp_had_enobuf = 1; 8828 SCTP_STAT_INCR(sctps_lowlevelerr); 8829 } 8830 /* error, could not output */ 8831 if (error == EHOSTUNREACH) { 8832 /* 8833 * Destination went 8834 * unreachable 8835 * during this send 8836 */ 8837 sctp_move_chunks_from_net(stcb, net); 8838 } 8839 asconf = 0; 8840 *reason_code = 7; 8841 break; 8842 } else { 8843 asoc->ifp_had_enobuf = 0; 8844 } 8845 /* 8846 * increase the number we sent, if a 8847 * cookie is sent we don't tell them 8848 * any was sent out. 8849 */ 8850 outchain = endoutchain = NULL; 8851 auth = NULL; 8852 auth_offset = 0; 8853 asconf = 0; 8854 if (!no_out_cnt) 8855 *num_out += ctl_cnt; 8856 /* recalc a clean slate and setup */ 8857 switch (net->ro._l_addr.sa.sa_family) { 8858 #ifdef INET 8859 case AF_INET: 8860 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD; 8861 break; 8862 #endif 8863 #ifdef INET6 8864 case AF_INET6: 8865 mtu = net->mtu - SCTP_MIN_OVERHEAD; 8866 break; 8867 #endif 8868 #if defined(__Userspace__) 8869 case AF_CONN: 8870 mtu = net->mtu - sizeof(struct sctphdr); 8871 break; 8872 #endif 8873 default: 8874 /* TSNH */ 8875 mtu = net->mtu; 8876 break; 8877 } 8878 to_out = 0; 8879 no_fragmentflg = 1; 8880 } 8881 } 8882 } 8883 if (error != 0) { 8884 /* try next net */ 8885 continue; 8886 } 8887 /************************/ 8888 /* Control transmission */ 8889 /************************/ 8890 /* Now first lets go through the control queue */ 8891 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) { 8892 if ((sack_goes_to) && 8893 (chk->rec.chunk_id.id == SCTP_ECN_ECHO) && 8894 (chk->whoTo != sack_goes_to)) { 8895 /* 8896 * if we have a sack in queue, and we are looking at an 8897 * ecn echo that is NOT queued to where the sack is going.. 8898 */ 8899 if (chk->whoTo == net) { 8900 /* Don't transmit it to where its going (current net) */ 8901 continue; 8902 } else if (sack_goes_to == net) { 8903 /* But do transmit it to this address */ 8904 goto skip_net_check; 8905 } 8906 } 8907 if (chk->whoTo == NULL) { 8908 if (asoc->alternate == NULL) { 8909 if (asoc->primary_destination != net) { 8910 continue; 8911 } 8912 } else { 8913 if (asoc->alternate != net) { 8914 continue; 8915 } 8916 } 8917 } else { 8918 if (chk->whoTo != net) { 8919 continue; 8920 } 8921 } 8922 skip_net_check: 8923 if (chk->data == NULL) { 8924 continue; 8925 } 8926 if (chk->sent != SCTP_DATAGRAM_UNSENT) { 8927 /* 8928 * It must be unsent. Cookies and ASCONF's 8929 * hang around but there timers will force 8930 * when marked for resend. 8931 */ 8932 continue; 8933 } 8934 /* 8935 * if no AUTH is yet included and this chunk 8936 * requires it, make sure to account for it. We 8937 * don't apply the size until the AUTH chunk is 8938 * actually added below in case there is no room for 8939 * this chunk. NOTE: we overload the use of "omtu" 8940 * here 8941 */ 8942 if ((auth == NULL) && 8943 sctp_auth_is_required_chunk(chk->rec.chunk_id.id, 8944 stcb->asoc.peer_auth_chunks)) { 8945 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 8946 } else 8947 omtu = 0; 8948 /* Here we do NOT factor the r_mtu */ 8949 if ((chk->send_size <= (int)(mtu - omtu)) || 8950 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) { 8951 /* 8952 * We probably should glom the mbuf chain 8953 * from the chk->data for control but the 8954 * problem is it becomes yet one more level 8955 * of tracking to do if for some reason 8956 * output fails. Then I have got to 8957 * reconstruct the merged control chain.. el 8958 * yucko.. for now we take the easy way and 8959 * do the copy 8960 */ 8961 /* 8962 * Add an AUTH chunk, if chunk requires it 8963 * save the offset into the chain for AUTH 8964 */ 8965 if ((auth == NULL) && 8966 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id, 8967 stcb->asoc.peer_auth_chunks))) { 8968 outchain = sctp_add_auth_chunk(outchain, 8969 &endoutchain, 8970 &auth, 8971 &auth_offset, 8972 stcb, 8973 chk->rec.chunk_id.id); 8974 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 8975 } 8976 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 8977 (int)chk->rec.chunk_id.can_take_data, 8978 chk->send_size, chk->copy_by_ref); 8979 if (outchain == NULL) { 8980 *reason_code = 8; 8981 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 8982 return (ENOMEM); 8983 } 8984 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 8985 /* update our MTU size */ 8986 if (mtu > (chk->send_size + omtu)) 8987 mtu -= (chk->send_size + omtu); 8988 else 8989 mtu = 0; 8990 to_out += (chk->send_size + omtu); 8991 /* Do clear IP_DF ? */ 8992 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) { 8993 no_fragmentflg = 0; 8994 } 8995 if (chk->rec.chunk_id.can_take_data) 8996 chk->data = NULL; 8997 /* Mark things to be removed, if needed */ 8998 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) || 8999 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */ 9000 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) || 9001 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) || 9002 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) || 9003 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) || 9004 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) || 9005 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) || 9006 (chk->rec.chunk_id.id == SCTP_ECN_CWR) || 9007 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) || 9008 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) { 9009 if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) { 9010 hbflag = 1; 9011 } 9012 /* remove these chunks at the end */ 9013 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) || 9014 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) { 9015 /* turn off the timer */ 9016 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 9017 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 9018 inp, stcb, NULL, 9019 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_1); 9020 } 9021 } 9022 ctl_cnt++; 9023 } else { 9024 /* 9025 * Other chunks, since they have 9026 * timers running (i.e. COOKIE) 9027 * we just "trust" that it 9028 * gets sent or retransmitted. 9029 */ 9030 ctl_cnt++; 9031 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 9032 cookie = 1; 9033 no_out_cnt = 1; 9034 } else if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) { 9035 /* 9036 * Increment ecne send count here 9037 * this means we may be over-zealous in 9038 * our counting if the send fails, but its 9039 * the best place to do it (we used to do 9040 * it in the queue of the chunk, but that did 9041 * not tell how many times it was sent. 9042 */ 9043 SCTP_STAT_INCR(sctps_sendecne); 9044 } 9045 chk->sent = SCTP_DATAGRAM_SENT; 9046 if (chk->whoTo == NULL) { 9047 chk->whoTo = net; 9048 atomic_add_int(&net->ref_count, 1); 9049 } 9050 chk->snd_count++; 9051 } 9052 if (mtu == 0) { 9053 /* 9054 * Ok we are out of room but we can 9055 * output without effecting the 9056 * flight size since this little guy 9057 * is a control only packet. 9058 */ 9059 switch (asoc->snd_edmid) { 9060 case SCTP_EDMID_LOWER_LAYER_DTLS: 9061 use_zero_crc = true; 9062 break; 9063 default: 9064 use_zero_crc = false; 9065 break; 9066 } 9067 if (asconf) { 9068 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net); 9069 use_zero_crc = false; 9070 /* 9071 * do NOT clear the asconf 9072 * flag as it is used to do 9073 * appropriate source address 9074 * selection. 9075 */ 9076 } 9077 if (cookie) { 9078 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net); 9079 use_zero_crc = false; 9080 cookie = 0; 9081 } 9082 /* Only HB or ASCONF advances time */ 9083 if (hbflag) { 9084 if (*now_filled == 0) { 9085 (void)SCTP_GETTIME_TIMEVAL(now); 9086 *now_filled = 1; 9087 } 9088 net->last_sent_time = *now; 9089 hbflag = 0; 9090 } 9091 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net, 9092 (struct sockaddr *)&net->ro._l_addr, 9093 outchain, 9094 auth_offset, auth, 9095 stcb->asoc.authinfo.active_keyid, 9096 no_fragmentflg, 0, asconf, 9097 inp->sctp_lport, stcb->rport, 9098 htonl(stcb->asoc.peer_vtag), 9099 net->port, NULL, 9100 #if defined(__FreeBSD__) && !defined(__Userspace__) 9101 0, 0, 9102 #endif 9103 use_zero_crc, so_locked))) { 9104 /* error, we could not output */ 9105 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error); 9106 if (from_where == 0) { 9107 SCTP_STAT_INCR(sctps_lowlevelerrusr); 9108 } 9109 if (error == ENOBUFS) { 9110 asoc->ifp_had_enobuf = 1; 9111 SCTP_STAT_INCR(sctps_lowlevelerr); 9112 } 9113 if (error == EHOSTUNREACH) { 9114 /* 9115 * Destination went 9116 * unreachable 9117 * during this send 9118 */ 9119 sctp_move_chunks_from_net(stcb, net); 9120 } 9121 asconf = 0; 9122 *reason_code = 7; 9123 break; 9124 } else { 9125 asoc->ifp_had_enobuf = 0; 9126 } 9127 /* 9128 * increase the number we sent, if a 9129 * cookie is sent we don't tell them 9130 * any was sent out. 9131 */ 9132 outchain = endoutchain = NULL; 9133 auth = NULL; 9134 auth_offset = 0; 9135 asconf = 0; 9136 if (!no_out_cnt) 9137 *num_out += ctl_cnt; 9138 /* recalc a clean slate and setup */ 9139 switch (net->ro._l_addr.sa.sa_family) { 9140 #ifdef INET 9141 case AF_INET: 9142 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD; 9143 break; 9144 #endif 9145 #ifdef INET6 9146 case AF_INET6: 9147 mtu = net->mtu - SCTP_MIN_OVERHEAD; 9148 break; 9149 #endif 9150 #if defined(__Userspace__) 9151 case AF_CONN: 9152 mtu = net->mtu - sizeof(struct sctphdr); 9153 break; 9154 #endif 9155 default: 9156 /* TSNH */ 9157 mtu = net->mtu; 9158 break; 9159 } 9160 to_out = 0; 9161 no_fragmentflg = 1; 9162 } 9163 } 9164 } 9165 if (error != 0) { 9166 /* try next net */ 9167 continue; 9168 } 9169 /* JRI: if dest is in PF state, do not send data to it */ 9170 if ((asoc->sctp_cmt_on_off > 0) && 9171 (net != stcb->asoc.alternate) && 9172 (net->dest_state & SCTP_ADDR_PF)) { 9173 goto no_data_fill; 9174 } 9175 if (net->flight_size >= net->cwnd) { 9176 goto no_data_fill; 9177 } 9178 if ((asoc->sctp_cmt_on_off > 0) && 9179 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_RECV_BUFFER_SPLITTING) && 9180 (net->flight_size > max_rwnd_per_dest)) { 9181 goto no_data_fill; 9182 } 9183 /* 9184 * We need a specific accounting for the usage of the 9185 * send buffer. We also need to check the number of messages 9186 * per net. For now, this is better than nothing and it 9187 * disabled by default... 9188 */ 9189 if ((asoc->sctp_cmt_on_off > 0) && 9190 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_SEND_BUFFER_SPLITTING) && 9191 (max_send_per_dest > 0) && 9192 (net->flight_size > max_send_per_dest)) { 9193 goto no_data_fill; 9194 } 9195 /*********************/ 9196 /* Data transmission */ 9197 /*********************/ 9198 /* 9199 * if AUTH for DATA is required and no AUTH has been added 9200 * yet, account for this in the mtu now... if no data can be 9201 * bundled, this adjustment won't matter anyways since the 9202 * packet will be going out... 9203 */ 9204 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA, 9205 stcb->asoc.peer_auth_chunks); 9206 if (data_auth_reqd && (auth == NULL)) { 9207 mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 9208 } 9209 /* now lets add any data within the MTU constraints */ 9210 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) { 9211 #ifdef INET 9212 case AF_INET: 9213 if (net->mtu > SCTP_MIN_V4_OVERHEAD) 9214 omtu = net->mtu - SCTP_MIN_V4_OVERHEAD; 9215 else 9216 omtu = 0; 9217 break; 9218 #endif 9219 #ifdef INET6 9220 case AF_INET6: 9221 if (net->mtu > SCTP_MIN_OVERHEAD) 9222 omtu = net->mtu - SCTP_MIN_OVERHEAD; 9223 else 9224 omtu = 0; 9225 break; 9226 #endif 9227 #if defined(__Userspace__) 9228 case AF_CONN: 9229 if (net->mtu > sizeof(struct sctphdr)) { 9230 omtu = net->mtu - sizeof(struct sctphdr); 9231 } else { 9232 omtu = 0; 9233 } 9234 break; 9235 #endif 9236 default: 9237 /* TSNH */ 9238 omtu = 0; 9239 break; 9240 } 9241 if ((((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) || 9242 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) && 9243 (skip_data_for_this_net == 0)) || 9244 (cookie)) { 9245 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 9246 if (no_data_chunks) { 9247 /* let only control go out */ 9248 *reason_code = 1; 9249 break; 9250 } 9251 if (net->flight_size >= net->cwnd) { 9252 /* skip this net, no room for data */ 9253 *reason_code = 2; 9254 break; 9255 } 9256 if ((chk->whoTo != NULL) && 9257 (chk->whoTo != net)) { 9258 /* Don't send the chunk on this net */ 9259 continue; 9260 } 9261 9262 if (asoc->sctp_cmt_on_off == 0) { 9263 if ((asoc->alternate) && 9264 (asoc->alternate != net) && 9265 (chk->whoTo == NULL)) { 9266 continue; 9267 } else if ((net != asoc->primary_destination) && 9268 (asoc->alternate == NULL) && 9269 (chk->whoTo == NULL)) { 9270 continue; 9271 } 9272 } 9273 if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) { 9274 /*- 9275 * strange, we have a chunk that is 9276 * to big for its destination and 9277 * yet no fragment ok flag. 9278 * Something went wrong when the 9279 * PMTU changed...we did not mark 9280 * this chunk for some reason?? I 9281 * will fix it here by letting IP 9282 * fragment it for now and printing 9283 * a warning. This really should not 9284 * happen ... 9285 */ 9286 SCTP_PRINTF("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n", 9287 chk->send_size, mtu); 9288 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 9289 } 9290 if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) && 9291 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) { 9292 struct sctp_data_chunk *dchkh; 9293 9294 dchkh = mtod(chk->data, struct sctp_data_chunk *); 9295 dchkh->ch.chunk_flags |= SCTP_DATA_SACK_IMMEDIATELY; 9296 } 9297 if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) || 9298 ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) { 9299 /* ok we will add this one */ 9300 9301 /* 9302 * Add an AUTH chunk, if chunk 9303 * requires it, save the offset into 9304 * the chain for AUTH 9305 */ 9306 if (data_auth_reqd) { 9307 if (auth == NULL) { 9308 outchain = sctp_add_auth_chunk(outchain, 9309 &endoutchain, 9310 &auth, 9311 &auth_offset, 9312 stcb, 9313 SCTP_DATA); 9314 auth_keyid = chk->auth_keyid; 9315 override_ok = 0; 9316 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 9317 } else if (override_ok) { 9318 /* use this data's keyid */ 9319 auth_keyid = chk->auth_keyid; 9320 override_ok = 0; 9321 } else if (auth_keyid != chk->auth_keyid) { 9322 /* different keyid, so done bundling */ 9323 break; 9324 } 9325 } 9326 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0, 9327 chk->send_size, chk->copy_by_ref); 9328 if (outchain == NULL) { 9329 SCTPDBG(SCTP_DEBUG_OUTPUT3, "No memory?\n"); 9330 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 9331 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 9332 } 9333 *reason_code = 3; 9334 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 9335 return (ENOMEM); 9336 } 9337 /* update our MTU size */ 9338 /* Do clear IP_DF ? */ 9339 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) { 9340 no_fragmentflg = 0; 9341 } 9342 /* unsigned subtraction of mtu */ 9343 if (mtu > chk->send_size) 9344 mtu -= chk->send_size; 9345 else 9346 mtu = 0; 9347 /* unsigned subtraction of r_mtu */ 9348 if (r_mtu > chk->send_size) 9349 r_mtu -= chk->send_size; 9350 else 9351 r_mtu = 0; 9352 9353 to_out += chk->send_size; 9354 if ((to_out > mx_mtu) && no_fragmentflg) { 9355 #ifdef INVARIANTS 9356 panic("Exceeding mtu of %d out size is %d", mx_mtu, to_out); 9357 #else 9358 SCTP_PRINTF("Exceeding mtu of %d out size is %d\n", 9359 mx_mtu, to_out); 9360 #endif 9361 } 9362 chk->window_probe = 0; 9363 data_list[bundle_at++] = chk; 9364 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) { 9365 break; 9366 } 9367 if (chk->sent == SCTP_DATAGRAM_UNSENT) { 9368 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { 9369 SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks); 9370 } else { 9371 SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks); 9372 } 9373 if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) && 9374 ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0)) 9375 /* Count number of user msg's that were fragmented 9376 * we do this by counting when we see a LAST fragment 9377 * only. 9378 */ 9379 SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs); 9380 } 9381 if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) { 9382 if ((one_chunk) && (stcb->asoc.total_flight == 0)) { 9383 data_list[0]->window_probe = 1; 9384 net->window_probe = 1; 9385 } 9386 break; 9387 } 9388 } else { 9389 /* 9390 * Must be sent in order of the 9391 * TSN's (on a network) 9392 */ 9393 break; 9394 } 9395 } /* for (chunk gather loop for this net) */ 9396 } /* if asoc.state OPEN */ 9397 no_data_fill: 9398 /* Is there something to send for this destination? */ 9399 if (outchain) { 9400 switch (asoc->snd_edmid) { 9401 case SCTP_EDMID_LOWER_LAYER_DTLS: 9402 use_zero_crc = true; 9403 break; 9404 default: 9405 use_zero_crc = false; 9406 break; 9407 } 9408 /* We may need to start a control timer or two */ 9409 if (asconf) { 9410 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, 9411 stcb, net); 9412 use_zero_crc = false; 9413 /* 9414 * do NOT clear the asconf flag as it is used 9415 * to do appropriate source address selection. 9416 */ 9417 } 9418 if (cookie) { 9419 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net); 9420 use_zero_crc = false; 9421 cookie = 0; 9422 } 9423 /* must start a send timer if data is being sent */ 9424 if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) { 9425 /* 9426 * no timer running on this destination 9427 * restart it. 9428 */ 9429 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 9430 } 9431 if (bundle_at || hbflag) { 9432 /* For data/asconf and hb set time */ 9433 if (*now_filled == 0) { 9434 (void)SCTP_GETTIME_TIMEVAL(now); 9435 *now_filled = 1; 9436 } 9437 net->last_sent_time = *now; 9438 } 9439 /* Now send it, if there is anything to send :> */ 9440 if ((error = sctp_lowlevel_chunk_output(inp, 9441 stcb, 9442 net, 9443 (struct sockaddr *)&net->ro._l_addr, 9444 outchain, 9445 auth_offset, 9446 auth, 9447 auth_keyid, 9448 no_fragmentflg, 9449 bundle_at, 9450 asconf, 9451 inp->sctp_lport, stcb->rport, 9452 htonl(stcb->asoc.peer_vtag), 9453 net->port, NULL, 9454 #if defined(__FreeBSD__) && !defined(__Userspace__) 9455 0, 0, 9456 #endif 9457 use_zero_crc, 9458 so_locked))) { 9459 /* error, we could not output */ 9460 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error); 9461 if (from_where == 0) { 9462 SCTP_STAT_INCR(sctps_lowlevelerrusr); 9463 } 9464 if (error == ENOBUFS) { 9465 asoc->ifp_had_enobuf = 1; 9466 SCTP_STAT_INCR(sctps_lowlevelerr); 9467 } 9468 if (error == EHOSTUNREACH) { 9469 /* 9470 * Destination went unreachable 9471 * during this send 9472 */ 9473 sctp_move_chunks_from_net(stcb, net); 9474 } 9475 asconf = 0; 9476 *reason_code = 6; 9477 /*- 9478 * I add this line to be paranoid. As far as 9479 * I can tell the continue, takes us back to 9480 * the top of the for, but just to make sure 9481 * I will reset these again here. 9482 */ 9483 ctl_cnt = 0; 9484 continue; /* This takes us back to the for() for the nets. */ 9485 } else { 9486 asoc->ifp_had_enobuf = 0; 9487 } 9488 endoutchain = NULL; 9489 auth = NULL; 9490 auth_offset = 0; 9491 asconf = 0; 9492 if (!no_out_cnt) { 9493 *num_out += (ctl_cnt + bundle_at); 9494 } 9495 if (bundle_at) { 9496 /* setup for a RTO measurement */ 9497 tsns_sent = data_list[0]->rec.data.tsn; 9498 /* fill time if not already filled */ 9499 if (*now_filled == 0) { 9500 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent); 9501 *now_filled = 1; 9502 *now = asoc->time_last_sent; 9503 } else { 9504 asoc->time_last_sent = *now; 9505 } 9506 if (net->rto_needed) { 9507 data_list[0]->do_rtt = 1; 9508 net->rto_needed = 0; 9509 } 9510 SCTP_STAT_INCR_BY(sctps_senddata, bundle_at); 9511 sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net); 9512 } 9513 if (one_chunk) { 9514 break; 9515 } 9516 } 9517 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 9518 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND); 9519 } 9520 } 9521 if (old_start_at == NULL) { 9522 old_start_at = start_at; 9523 start_at = TAILQ_FIRST(&asoc->nets); 9524 if (old_start_at) 9525 goto again_one_more_time; 9526 } 9527 9528 /* 9529 * At the end there should be no NON timed chunks hanging on this 9530 * queue. 9531 */ 9532 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 9533 sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND); 9534 } 9535 if ((*num_out == 0) && (*reason_code == 0)) { 9536 *reason_code = 4; 9537 } else { 9538 *reason_code = 5; 9539 } 9540 sctp_clean_up_ctl(stcb, asoc, so_locked); 9541 return (0); 9542 } 9543 9544 void 9545 sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err) 9546 { 9547 /*- 9548 * Prepend a OPERATIONAL_ERROR chunk header and put on the end of 9549 * the control chunk queue. 9550 */ 9551 struct sctp_chunkhdr *hdr; 9552 struct sctp_tmit_chunk *chk; 9553 struct mbuf *mat, *last_mbuf; 9554 uint32_t chunk_length; 9555 uint16_t padding_length; 9556 9557 SCTP_TCB_LOCK_ASSERT(stcb); 9558 SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_NOWAIT); 9559 if (op_err == NULL) { 9560 return; 9561 } 9562 last_mbuf = NULL; 9563 chunk_length = 0; 9564 for (mat = op_err; mat != NULL; mat = SCTP_BUF_NEXT(mat)) { 9565 chunk_length += SCTP_BUF_LEN(mat); 9566 if (SCTP_BUF_NEXT(mat) == NULL) { 9567 last_mbuf = mat; 9568 } 9569 } 9570 if (chunk_length > SCTP_MAX_CHUNK_LENGTH) { 9571 sctp_m_freem(op_err); 9572 return; 9573 } 9574 padding_length = chunk_length % 4; 9575 if (padding_length != 0) { 9576 padding_length = 4 - padding_length; 9577 } 9578 if (padding_length != 0) { 9579 if (sctp_add_pad_tombuf(last_mbuf, padding_length) == NULL) { 9580 sctp_m_freem(op_err); 9581 return; 9582 } 9583 } 9584 sctp_alloc_a_chunk(stcb, chk); 9585 if (chk == NULL) { 9586 /* no memory */ 9587 sctp_m_freem(op_err); 9588 return; 9589 } 9590 chk->copy_by_ref = 0; 9591 chk->rec.chunk_id.id = SCTP_OPERATION_ERROR; 9592 chk->rec.chunk_id.can_take_data = 0; 9593 chk->flags = 0; 9594 chk->send_size = (uint16_t)chunk_length; 9595 chk->sent = SCTP_DATAGRAM_UNSENT; 9596 chk->snd_count = 0; 9597 chk->asoc = &stcb->asoc; 9598 chk->data = op_err; 9599 chk->whoTo = NULL; 9600 hdr = mtod(op_err, struct sctp_chunkhdr *); 9601 hdr->chunk_type = SCTP_OPERATION_ERROR; 9602 hdr->chunk_flags = 0; 9603 hdr->chunk_length = htons(chk->send_size); 9604 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 9605 chk->asoc->ctrl_queue_cnt++; 9606 } 9607 9608 int 9609 sctp_send_cookie_echo(struct mbuf *m, 9610 int offset, int limit, 9611 struct sctp_tcb *stcb, 9612 struct sctp_nets *net) 9613 { 9614 /*- 9615 * pull out the cookie and put it at the front of the control chunk 9616 * queue. 9617 */ 9618 int at; 9619 struct mbuf *cookie; 9620 struct sctp_paramhdr param, *phdr; 9621 struct sctp_chunkhdr *hdr; 9622 struct sctp_tmit_chunk *chk; 9623 uint16_t ptype, plen; 9624 9625 SCTP_TCB_LOCK_ASSERT(stcb); 9626 /* First find the cookie in the param area */ 9627 cookie = NULL; 9628 at = offset + sizeof(struct sctp_init_chunk); 9629 for (;;) { 9630 phdr = sctp_get_next_param(m, at, ¶m, sizeof(param)); 9631 if (phdr == NULL) { 9632 return (-3); 9633 } 9634 ptype = ntohs(phdr->param_type); 9635 plen = ntohs(phdr->param_length); 9636 if (plen < sizeof(struct sctp_paramhdr)) { 9637 return (-6); 9638 } 9639 if (ptype == SCTP_STATE_COOKIE) { 9640 int pad; 9641 9642 /* found the cookie */ 9643 if (at + plen > limit) { 9644 return (-7); 9645 } 9646 cookie = SCTP_M_COPYM(m, at, plen, M_NOWAIT); 9647 if (cookie == NULL) { 9648 /* No memory */ 9649 return (-2); 9650 } 9651 if ((pad = (plen % 4)) > 0) { 9652 pad = 4 - pad; 9653 } 9654 if (pad > 0) { 9655 if (sctp_pad_lastmbuf(cookie, pad, NULL) == NULL) { 9656 return (-8); 9657 } 9658 } 9659 #ifdef SCTP_MBUF_LOGGING 9660 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 9661 sctp_log_mbc(cookie, SCTP_MBUF_ICOPY); 9662 } 9663 #endif 9664 break; 9665 } 9666 at += SCTP_SIZE32(plen); 9667 } 9668 /* ok, we got the cookie lets change it into a cookie echo chunk */ 9669 /* first the change from param to cookie */ 9670 hdr = mtod(cookie, struct sctp_chunkhdr *); 9671 hdr->chunk_type = SCTP_COOKIE_ECHO; 9672 hdr->chunk_flags = 0; 9673 /* get the chunk stuff now and place it in the FRONT of the queue */ 9674 sctp_alloc_a_chunk(stcb, chk); 9675 if (chk == NULL) { 9676 /* no memory */ 9677 sctp_m_freem(cookie); 9678 return (-5); 9679 } 9680 chk->copy_by_ref = 0; 9681 chk->rec.chunk_id.id = SCTP_COOKIE_ECHO; 9682 chk->rec.chunk_id.can_take_data = 0; 9683 chk->flags = CHUNK_FLAGS_FRAGMENT_OK; 9684 chk->send_size = SCTP_SIZE32(plen); 9685 chk->sent = SCTP_DATAGRAM_UNSENT; 9686 chk->snd_count = 0; 9687 chk->asoc = &stcb->asoc; 9688 chk->data = cookie; 9689 chk->whoTo = net; 9690 atomic_add_int(&chk->whoTo->ref_count, 1); 9691 TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next); 9692 chk->asoc->ctrl_queue_cnt++; 9693 return (0); 9694 } 9695 9696 void 9697 sctp_send_heartbeat_ack(struct sctp_tcb *stcb, 9698 struct mbuf *m, 9699 int offset, 9700 int chk_length, 9701 struct sctp_nets *net) 9702 { 9703 /* 9704 * take a HB request and make it into a HB ack and send it. 9705 */ 9706 struct mbuf *outchain; 9707 struct sctp_chunkhdr *chdr; 9708 struct sctp_tmit_chunk *chk; 9709 9710 if (net == NULL) 9711 /* must have a net pointer */ 9712 return; 9713 9714 outchain = SCTP_M_COPYM(m, offset, chk_length, M_NOWAIT); 9715 if (outchain == NULL) { 9716 /* gak out of memory */ 9717 return; 9718 } 9719 #ifdef SCTP_MBUF_LOGGING 9720 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 9721 sctp_log_mbc(outchain, SCTP_MBUF_ICOPY); 9722 } 9723 #endif 9724 chdr = mtod(outchain, struct sctp_chunkhdr *); 9725 chdr->chunk_type = SCTP_HEARTBEAT_ACK; 9726 chdr->chunk_flags = 0; 9727 if (chk_length % 4 != 0) { 9728 sctp_pad_lastmbuf(outchain, 4 - (chk_length % 4), NULL); 9729 } 9730 sctp_alloc_a_chunk(stcb, chk); 9731 if (chk == NULL) { 9732 /* no memory */ 9733 sctp_m_freem(outchain); 9734 return; 9735 } 9736 chk->copy_by_ref = 0; 9737 chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK; 9738 chk->rec.chunk_id.can_take_data = 1; 9739 chk->flags = 0; 9740 chk->send_size = chk_length; 9741 chk->sent = SCTP_DATAGRAM_UNSENT; 9742 chk->snd_count = 0; 9743 chk->asoc = &stcb->asoc; 9744 chk->data = outchain; 9745 chk->whoTo = net; 9746 atomic_add_int(&chk->whoTo->ref_count, 1); 9747 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 9748 chk->asoc->ctrl_queue_cnt++; 9749 } 9750 9751 void 9752 sctp_send_cookie_ack(struct sctp_tcb *stcb) 9753 { 9754 /* formulate and queue a cookie-ack back to sender */ 9755 struct mbuf *cookie_ack; 9756 struct sctp_chunkhdr *hdr; 9757 struct sctp_tmit_chunk *chk; 9758 9759 SCTP_TCB_LOCK_ASSERT(stcb); 9760 9761 cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER); 9762 if (cookie_ack == NULL) { 9763 /* no mbuf's */ 9764 return; 9765 } 9766 SCTP_BUF_RESV_UF(cookie_ack, SCTP_MIN_OVERHEAD); 9767 sctp_alloc_a_chunk(stcb, chk); 9768 if (chk == NULL) { 9769 /* no memory */ 9770 sctp_m_freem(cookie_ack); 9771 return; 9772 } 9773 chk->copy_by_ref = 0; 9774 chk->rec.chunk_id.id = SCTP_COOKIE_ACK; 9775 chk->rec.chunk_id.can_take_data = 1; 9776 chk->flags = 0; 9777 chk->send_size = sizeof(struct sctp_chunkhdr); 9778 chk->sent = SCTP_DATAGRAM_UNSENT; 9779 chk->snd_count = 0; 9780 chk->asoc = &stcb->asoc; 9781 chk->data = cookie_ack; 9782 if (chk->asoc->last_control_chunk_from != NULL) { 9783 chk->whoTo = chk->asoc->last_control_chunk_from; 9784 atomic_add_int(&chk->whoTo->ref_count, 1); 9785 } else { 9786 chk->whoTo = NULL; 9787 } 9788 hdr = mtod(cookie_ack, struct sctp_chunkhdr *); 9789 hdr->chunk_type = SCTP_COOKIE_ACK; 9790 hdr->chunk_flags = 0; 9791 hdr->chunk_length = htons(chk->send_size); 9792 SCTP_BUF_LEN(cookie_ack) = chk->send_size; 9793 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 9794 chk->asoc->ctrl_queue_cnt++; 9795 return; 9796 } 9797 9798 void 9799 sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net) 9800 { 9801 /* formulate and queue a SHUTDOWN-ACK back to the sender */ 9802 struct mbuf *m_shutdown_ack; 9803 struct sctp_shutdown_ack_chunk *ack_cp; 9804 struct sctp_tmit_chunk *chk; 9805 9806 m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_NOWAIT, 1, MT_HEADER); 9807 if (m_shutdown_ack == NULL) { 9808 /* no mbuf's */ 9809 return; 9810 } 9811 SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD); 9812 sctp_alloc_a_chunk(stcb, chk); 9813 if (chk == NULL) { 9814 /* no memory */ 9815 sctp_m_freem(m_shutdown_ack); 9816 return; 9817 } 9818 chk->copy_by_ref = 0; 9819 chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK; 9820 chk->rec.chunk_id.can_take_data = 1; 9821 chk->flags = 0; 9822 chk->send_size = sizeof(struct sctp_chunkhdr); 9823 chk->sent = SCTP_DATAGRAM_UNSENT; 9824 chk->snd_count = 0; 9825 chk->asoc = &stcb->asoc; 9826 chk->data = m_shutdown_ack; 9827 chk->whoTo = net; 9828 if (chk->whoTo) { 9829 atomic_add_int(&chk->whoTo->ref_count, 1); 9830 } 9831 ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *); 9832 ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK; 9833 ack_cp->ch.chunk_flags = 0; 9834 ack_cp->ch.chunk_length = htons(chk->send_size); 9835 SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size; 9836 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 9837 chk->asoc->ctrl_queue_cnt++; 9838 return; 9839 } 9840 9841 void 9842 sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net) 9843 { 9844 /* formulate and queue a SHUTDOWN to the sender */ 9845 struct mbuf *m_shutdown; 9846 struct sctp_shutdown_chunk *shutdown_cp; 9847 struct sctp_tmit_chunk *chk; 9848 9849 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 9850 if (chk->rec.chunk_id.id == SCTP_SHUTDOWN) { 9851 /* We already have a SHUTDOWN queued. Reuse it. */ 9852 if (chk->whoTo) { 9853 sctp_free_remote_addr(chk->whoTo); 9854 chk->whoTo = NULL; 9855 } 9856 break; 9857 } 9858 } 9859 if (chk == NULL) { 9860 m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_NOWAIT, 1, MT_HEADER); 9861 if (m_shutdown == NULL) { 9862 /* no mbuf's */ 9863 return; 9864 } 9865 SCTP_BUF_RESV_UF(m_shutdown, SCTP_MIN_OVERHEAD); 9866 sctp_alloc_a_chunk(stcb, chk); 9867 if (chk == NULL) { 9868 /* no memory */ 9869 sctp_m_freem(m_shutdown); 9870 return; 9871 } 9872 chk->copy_by_ref = 0; 9873 chk->rec.chunk_id.id = SCTP_SHUTDOWN; 9874 chk->rec.chunk_id.can_take_data = 1; 9875 chk->flags = 0; 9876 chk->send_size = sizeof(struct sctp_shutdown_chunk); 9877 chk->sent = SCTP_DATAGRAM_UNSENT; 9878 chk->snd_count = 0; 9879 chk->asoc = &stcb->asoc; 9880 chk->data = m_shutdown; 9881 chk->whoTo = net; 9882 if (chk->whoTo) { 9883 atomic_add_int(&chk->whoTo->ref_count, 1); 9884 } 9885 shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *); 9886 shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN; 9887 shutdown_cp->ch.chunk_flags = 0; 9888 shutdown_cp->ch.chunk_length = htons(chk->send_size); 9889 shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn); 9890 SCTP_BUF_LEN(m_shutdown) = chk->send_size; 9891 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 9892 chk->asoc->ctrl_queue_cnt++; 9893 } else { 9894 TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk, sctp_next); 9895 chk->whoTo = net; 9896 if (chk->whoTo) { 9897 atomic_add_int(&chk->whoTo->ref_count, 1); 9898 } 9899 shutdown_cp = mtod(chk->data, struct sctp_shutdown_chunk *); 9900 shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn); 9901 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next); 9902 } 9903 return; 9904 } 9905 9906 void 9907 sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net, int addr_locked) 9908 { 9909 /* 9910 * formulate and queue an ASCONF to the peer. 9911 * ASCONF parameters should be queued on the assoc queue. 9912 */ 9913 struct sctp_tmit_chunk *chk; 9914 struct mbuf *m_asconf; 9915 int len; 9916 9917 SCTP_TCB_LOCK_ASSERT(stcb); 9918 9919 if ((!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) && 9920 (!sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS))) { 9921 /* can't send a new one if there is one in flight already */ 9922 return; 9923 } 9924 9925 /* compose an ASCONF chunk, maximum length is PMTU */ 9926 m_asconf = sctp_compose_asconf(stcb, &len, addr_locked); 9927 if (m_asconf == NULL) { 9928 return; 9929 } 9930 9931 sctp_alloc_a_chunk(stcb, chk); 9932 if (chk == NULL) { 9933 /* no memory */ 9934 sctp_m_freem(m_asconf); 9935 return; 9936 } 9937 9938 chk->copy_by_ref = 0; 9939 chk->rec.chunk_id.id = SCTP_ASCONF; 9940 chk->rec.chunk_id.can_take_data = 0; 9941 chk->flags = CHUNK_FLAGS_FRAGMENT_OK; 9942 chk->data = m_asconf; 9943 chk->send_size = len; 9944 chk->sent = SCTP_DATAGRAM_UNSENT; 9945 chk->snd_count = 0; 9946 chk->asoc = &stcb->asoc; 9947 chk->whoTo = net; 9948 if (chk->whoTo) { 9949 atomic_add_int(&chk->whoTo->ref_count, 1); 9950 } 9951 TAILQ_INSERT_TAIL(&chk->asoc->asconf_send_queue, chk, sctp_next); 9952 chk->asoc->ctrl_queue_cnt++; 9953 return; 9954 } 9955 9956 void 9957 sctp_send_asconf_ack(struct sctp_tcb *stcb) 9958 { 9959 /* 9960 * formulate and queue a asconf-ack back to sender. 9961 * the asconf-ack must be stored in the tcb. 9962 */ 9963 struct sctp_tmit_chunk *chk; 9964 struct sctp_asconf_ack *ack, *latest_ack; 9965 struct mbuf *m_ack; 9966 struct sctp_nets *net = NULL; 9967 9968 SCTP_TCB_LOCK_ASSERT(stcb); 9969 /* Get the latest ASCONF-ACK */ 9970 latest_ack = TAILQ_LAST(&stcb->asoc.asconf_ack_sent, sctp_asconf_ackhead); 9971 if (latest_ack == NULL) { 9972 return; 9973 } 9974 if (latest_ack->last_sent_to != NULL && 9975 latest_ack->last_sent_to == stcb->asoc.last_control_chunk_from) { 9976 /* we're doing a retransmission */ 9977 net = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0); 9978 if (net == NULL) { 9979 /* no alternate */ 9980 if (stcb->asoc.last_control_chunk_from == NULL) { 9981 if (stcb->asoc.alternate) { 9982 net = stcb->asoc.alternate; 9983 } else { 9984 net = stcb->asoc.primary_destination; 9985 } 9986 } else { 9987 net = stcb->asoc.last_control_chunk_from; 9988 } 9989 } 9990 } else { 9991 /* normal case */ 9992 if (stcb->asoc.last_control_chunk_from == NULL) { 9993 if (stcb->asoc.alternate) { 9994 net = stcb->asoc.alternate; 9995 } else { 9996 net = stcb->asoc.primary_destination; 9997 } 9998 } else { 9999 net = stcb->asoc.last_control_chunk_from; 10000 } 10001 } 10002 latest_ack->last_sent_to = net; 10003 10004 TAILQ_FOREACH(ack, &stcb->asoc.asconf_ack_sent, next) { 10005 if (ack->data == NULL) { 10006 continue; 10007 } 10008 10009 /* copy the asconf_ack */ 10010 m_ack = SCTP_M_COPYM(ack->data, 0, M_COPYALL, M_NOWAIT); 10011 if (m_ack == NULL) { 10012 /* couldn't copy it */ 10013 return; 10014 } 10015 #ifdef SCTP_MBUF_LOGGING 10016 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 10017 sctp_log_mbc(m_ack, SCTP_MBUF_ICOPY); 10018 } 10019 #endif 10020 10021 sctp_alloc_a_chunk(stcb, chk); 10022 if (chk == NULL) { 10023 /* no memory */ 10024 if (m_ack) 10025 sctp_m_freem(m_ack); 10026 return; 10027 } 10028 chk->copy_by_ref = 0; 10029 chk->rec.chunk_id.id = SCTP_ASCONF_ACK; 10030 chk->rec.chunk_id.can_take_data = 1; 10031 chk->flags = CHUNK_FLAGS_FRAGMENT_OK; 10032 chk->whoTo = net; 10033 if (chk->whoTo) { 10034 atomic_add_int(&chk->whoTo->ref_count, 1); 10035 } 10036 chk->data = m_ack; 10037 chk->send_size = ack->len; 10038 chk->sent = SCTP_DATAGRAM_UNSENT; 10039 chk->snd_count = 0; 10040 chk->asoc = &stcb->asoc; 10041 10042 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 10043 chk->asoc->ctrl_queue_cnt++; 10044 } 10045 return; 10046 } 10047 10048 static int 10049 sctp_chunk_retransmission(struct sctp_inpcb *inp, 10050 struct sctp_tcb *stcb, 10051 struct sctp_association *asoc, 10052 int *cnt_out, struct timeval *now, int *now_filled, int *fr_done, int so_locked) 10053 { 10054 /*- 10055 * send out one MTU of retransmission. If fast_retransmit is 10056 * happening we ignore the cwnd. Otherwise we obey the cwnd and 10057 * rwnd. For a Cookie or Asconf in the control chunk queue we 10058 * retransmit them by themselves. 10059 * 10060 * For data chunks we will pick out the lowest TSN's in the sent_queue 10061 * marked for resend and bundle them all together (up to a MTU of 10062 * destination). The address to send to should have been 10063 * selected/changed where the retransmission was marked (i.e. in FR 10064 * or t3-timeout routines). 10065 */ 10066 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING]; 10067 struct sctp_tmit_chunk *chk, *fwd; 10068 struct mbuf *m, *endofchain; 10069 struct sctp_nets *net = NULL; 10070 uint32_t tsns_sent = 0; 10071 int no_fragmentflg, bundle_at; 10072 unsigned int mtu; 10073 int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started; 10074 struct sctp_auth_chunk *auth = NULL; 10075 uint32_t auth_offset = 0; 10076 uint16_t auth_keyid; 10077 int override_ok = 1; 10078 int data_auth_reqd = 0; 10079 uint32_t dmtu = 0; 10080 bool use_zero_crc; 10081 10082 #if defined(__APPLE__) && !defined(__Userspace__) 10083 if (so_locked) { 10084 sctp_lock_assert(SCTP_INP_SO(inp)); 10085 } else { 10086 sctp_unlock_assert(SCTP_INP_SO(inp)); 10087 } 10088 #endif 10089 SCTP_TCB_LOCK_ASSERT(stcb); 10090 tmr_started = ctl_cnt = 0; 10091 no_fragmentflg = 1; 10092 fwd_tsn = 0; 10093 *cnt_out = 0; 10094 fwd = NULL; 10095 endofchain = m = NULL; 10096 auth_keyid = stcb->asoc.authinfo.active_keyid; 10097 #ifdef SCTP_AUDITING_ENABLED 10098 sctp_audit_log(0xC3, 1); 10099 #endif 10100 if ((TAILQ_EMPTY(&asoc->sent_queue)) && 10101 (TAILQ_EMPTY(&asoc->control_send_queue))) { 10102 SCTPDBG(SCTP_DEBUG_OUTPUT1,"SCTP hits empty queue with cnt set to %d?\n", 10103 asoc->sent_queue_retran_cnt); 10104 asoc->sent_queue_cnt = 0; 10105 asoc->sent_queue_cnt_removeable = 0; 10106 /* send back 0/0 so we enter normal transmission */ 10107 *cnt_out = 0; 10108 return (0); 10109 } 10110 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 10111 if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) || 10112 (chk->rec.chunk_id.id == SCTP_STREAM_RESET) || 10113 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) { 10114 if (chk->sent != SCTP_DATAGRAM_RESEND) { 10115 continue; 10116 } 10117 if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) { 10118 if (chk != asoc->str_reset) { 10119 /* 10120 * not eligible for retran if its 10121 * not ours 10122 */ 10123 continue; 10124 } 10125 } 10126 ctl_cnt++; 10127 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) { 10128 fwd_tsn = 1; 10129 } 10130 /* 10131 * Add an AUTH chunk, if chunk requires it save the 10132 * offset into the chain for AUTH 10133 */ 10134 if ((auth == NULL) && 10135 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id, 10136 stcb->asoc.peer_auth_chunks))) { 10137 m = sctp_add_auth_chunk(m, &endofchain, 10138 &auth, &auth_offset, 10139 stcb, 10140 chk->rec.chunk_id.id); 10141 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 10142 } 10143 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref); 10144 break; 10145 } 10146 } 10147 one_chunk = 0; 10148 /* do we have control chunks to retransmit? */ 10149 if (m != NULL) { 10150 /* Start a timer no matter if we succeed or fail */ 10151 switch (asoc->snd_edmid) { 10152 case SCTP_EDMID_LOWER_LAYER_DTLS: 10153 use_zero_crc = true; 10154 break; 10155 default: 10156 use_zero_crc = false; 10157 break; 10158 } 10159 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 10160 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo); 10161 use_zero_crc = false; 10162 } else if (chk->rec.chunk_id.id == SCTP_ASCONF) { 10163 /* XXXMT: Can this happen? */ 10164 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo); 10165 use_zero_crc = false; 10166 } 10167 chk->snd_count++; /* update our count */ 10168 if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo, 10169 (struct sockaddr *)&chk->whoTo->ro._l_addr, m, 10170 auth_offset, auth, stcb->asoc.authinfo.active_keyid, 10171 no_fragmentflg, 0, 0, 10172 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag), 10173 chk->whoTo->port, NULL, 10174 #if defined(__FreeBSD__) && !defined(__Userspace__) 10175 0, 0, 10176 #endif 10177 use_zero_crc, 10178 so_locked))) { 10179 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error); 10180 if (error == ENOBUFS) { 10181 asoc->ifp_had_enobuf = 1; 10182 SCTP_STAT_INCR(sctps_lowlevelerr); 10183 } 10184 return (error); 10185 } else { 10186 asoc->ifp_had_enobuf = 0; 10187 } 10188 endofchain = NULL; 10189 auth = NULL; 10190 auth_offset = 0; 10191 /* 10192 * We don't want to mark the net->sent time here since this 10193 * we use this for HB and retrans cannot measure RTT 10194 */ 10195 /* (void)SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */ 10196 *cnt_out += 1; 10197 chk->sent = SCTP_DATAGRAM_SENT; 10198 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt); 10199 if (fwd_tsn == 0) { 10200 return (0); 10201 } else { 10202 /* Clean up the fwd-tsn list */ 10203 sctp_clean_up_ctl(stcb, asoc, so_locked); 10204 return (0); 10205 } 10206 } 10207 /* 10208 * Ok, it is just data retransmission we need to do or that and a 10209 * fwd-tsn with it all. 10210 */ 10211 if (TAILQ_EMPTY(&asoc->sent_queue)) { 10212 return (SCTP_RETRAN_DONE); 10213 } 10214 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED) || 10215 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT)) { 10216 /* not yet open, resend the cookie and that is it */ 10217 return (1); 10218 } 10219 #ifdef SCTP_AUDITING_ENABLED 10220 sctp_auditing(20, inp, stcb, NULL); 10221 #endif 10222 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks); 10223 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 10224 if (chk->sent != SCTP_DATAGRAM_RESEND) { 10225 /* No, not sent to this net or not ready for rtx */ 10226 continue; 10227 } 10228 if (chk->data == NULL) { 10229 SCTP_PRINTF("TSN:%x chk->snd_count:%d chk->sent:%d can't retran - no data\n", 10230 chk->rec.data.tsn, chk->snd_count, chk->sent); 10231 continue; 10232 } 10233 if ((SCTP_BASE_SYSCTL(sctp_max_retran_chunk)) && 10234 (chk->snd_count >= SCTP_BASE_SYSCTL(sctp_max_retran_chunk))) { 10235 struct mbuf *op_err; 10236 char msg[SCTP_DIAG_INFO_LEN]; 10237 10238 SCTP_SNPRINTF(msg, sizeof(msg), "TSN %8.8x retransmitted %d times, giving up", 10239 chk->rec.data.tsn, chk->snd_count); 10240 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 10241 msg); 10242 atomic_add_int(&stcb->asoc.refcnt, 1); 10243 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, 10244 false, so_locked); 10245 SCTP_TCB_LOCK(stcb); 10246 atomic_subtract_int(&stcb->asoc.refcnt, 1); 10247 return (SCTP_RETRAN_EXIT); 10248 } 10249 /* pick up the net */ 10250 net = chk->whoTo; 10251 switch (net->ro._l_addr.sa.sa_family) { 10252 #ifdef INET 10253 case AF_INET: 10254 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD; 10255 break; 10256 #endif 10257 #ifdef INET6 10258 case AF_INET6: 10259 mtu = net->mtu - SCTP_MIN_OVERHEAD; 10260 break; 10261 #endif 10262 #if defined(__Userspace__) 10263 case AF_CONN: 10264 mtu = net->mtu - sizeof(struct sctphdr); 10265 break; 10266 #endif 10267 default: 10268 /* TSNH */ 10269 mtu = net->mtu; 10270 break; 10271 } 10272 10273 if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) { 10274 /* No room in peers rwnd */ 10275 uint32_t tsn; 10276 10277 tsn = asoc->last_acked_seq + 1; 10278 if (tsn == chk->rec.data.tsn) { 10279 /* 10280 * we make a special exception for this 10281 * case. The peer has no rwnd but is missing 10282 * the lowest chunk.. which is probably what 10283 * is holding up the rwnd. 10284 */ 10285 goto one_chunk_around; 10286 } 10287 return (1); 10288 } 10289 one_chunk_around: 10290 if (asoc->peers_rwnd < mtu) { 10291 one_chunk = 1; 10292 if ((asoc->peers_rwnd == 0) && 10293 (asoc->total_flight == 0)) { 10294 chk->window_probe = 1; 10295 chk->whoTo->window_probe = 1; 10296 } 10297 } 10298 #ifdef SCTP_AUDITING_ENABLED 10299 sctp_audit_log(0xC3, 2); 10300 #endif 10301 bundle_at = 0; 10302 m = NULL; 10303 net->fast_retran_ip = 0; 10304 if (chk->rec.data.doing_fast_retransmit == 0) { 10305 /* 10306 * if no FR in progress skip destination that have 10307 * flight_size > cwnd. 10308 */ 10309 if (net->flight_size >= net->cwnd) { 10310 continue; 10311 } 10312 } else { 10313 /* 10314 * Mark the destination net to have FR recovery 10315 * limits put on it. 10316 */ 10317 *fr_done = 1; 10318 net->fast_retran_ip = 1; 10319 } 10320 10321 /* 10322 * if no AUTH is yet included and this chunk requires it, 10323 * make sure to account for it. We don't apply the size 10324 * until the AUTH chunk is actually added below in case 10325 * there is no room for this chunk. 10326 */ 10327 if (data_auth_reqd && (auth == NULL)) { 10328 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 10329 } else 10330 dmtu = 0; 10331 10332 if ((chk->send_size <= (mtu - dmtu)) || 10333 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) { 10334 /* ok we will add this one */ 10335 if (data_auth_reqd) { 10336 if (auth == NULL) { 10337 m = sctp_add_auth_chunk(m, 10338 &endofchain, 10339 &auth, 10340 &auth_offset, 10341 stcb, 10342 SCTP_DATA); 10343 auth_keyid = chk->auth_keyid; 10344 override_ok = 0; 10345 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 10346 } else if (override_ok) { 10347 auth_keyid = chk->auth_keyid; 10348 override_ok = 0; 10349 } else if (chk->auth_keyid != auth_keyid) { 10350 /* different keyid, so done bundling */ 10351 break; 10352 } 10353 } 10354 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref); 10355 if (m == NULL) { 10356 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 10357 return (ENOMEM); 10358 } 10359 /* Do clear IP_DF ? */ 10360 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) { 10361 no_fragmentflg = 0; 10362 } 10363 /* update our MTU size */ 10364 if (mtu > (chk->send_size + dmtu)) 10365 mtu -= (chk->send_size + dmtu); 10366 else 10367 mtu = 0; 10368 data_list[bundle_at++] = chk; 10369 if (one_chunk && (asoc->total_flight <= 0)) { 10370 SCTP_STAT_INCR(sctps_windowprobed); 10371 } 10372 } 10373 if (one_chunk == 0) { 10374 /* 10375 * now are there anymore forward from chk to pick 10376 * up? 10377 */ 10378 for (fwd = TAILQ_NEXT(chk, sctp_next); fwd != NULL; fwd = TAILQ_NEXT(fwd, sctp_next)) { 10379 if (fwd->sent != SCTP_DATAGRAM_RESEND) { 10380 /* Nope, not for retran */ 10381 continue; 10382 } 10383 if (fwd->whoTo != net) { 10384 /* Nope, not the net in question */ 10385 continue; 10386 } 10387 if (data_auth_reqd && (auth == NULL)) { 10388 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 10389 } else 10390 dmtu = 0; 10391 if (fwd->send_size <= (mtu - dmtu)) { 10392 if (data_auth_reqd) { 10393 if (auth == NULL) { 10394 m = sctp_add_auth_chunk(m, 10395 &endofchain, 10396 &auth, 10397 &auth_offset, 10398 stcb, 10399 SCTP_DATA); 10400 auth_keyid = fwd->auth_keyid; 10401 override_ok = 0; 10402 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 10403 } else if (override_ok) { 10404 auth_keyid = fwd->auth_keyid; 10405 override_ok = 0; 10406 } else if (fwd->auth_keyid != auth_keyid) { 10407 /* different keyid, so done bundling */ 10408 break; 10409 } 10410 } 10411 m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref); 10412 if (m == NULL) { 10413 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 10414 return (ENOMEM); 10415 } 10416 /* Do clear IP_DF ? */ 10417 if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) { 10418 no_fragmentflg = 0; 10419 } 10420 /* update our MTU size */ 10421 if (mtu > (fwd->send_size + dmtu)) 10422 mtu -= (fwd->send_size + dmtu); 10423 else 10424 mtu = 0; 10425 data_list[bundle_at++] = fwd; 10426 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) { 10427 break; 10428 } 10429 } else { 10430 /* can't fit so we are done */ 10431 break; 10432 } 10433 } 10434 } 10435 /* Is there something to send for this destination? */ 10436 if (m) { 10437 /* 10438 * No matter if we fail/or succeed we should start a 10439 * timer. A failure is like a lost IP packet :-) 10440 */ 10441 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 10442 /* 10443 * no timer running on this destination 10444 * restart it. 10445 */ 10446 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 10447 tmr_started = 1; 10448 } 10449 switch (asoc->snd_edmid) { 10450 case SCTP_EDMID_LOWER_LAYER_DTLS: 10451 use_zero_crc = true; 10452 break; 10453 default: 10454 use_zero_crc = false; 10455 break; 10456 } 10457 /* Now lets send it, if there is anything to send :> */ 10458 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net, 10459 (struct sockaddr *)&net->ro._l_addr, m, 10460 auth_offset, auth, auth_keyid, 10461 no_fragmentflg, 0, 0, 10462 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag), 10463 net->port, NULL, 10464 #if defined(__FreeBSD__) && !defined(__Userspace__) 10465 0, 0, 10466 #endif 10467 use_zero_crc, 10468 so_locked))) { 10469 /* error, we could not output */ 10470 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error); 10471 if (error == ENOBUFS) { 10472 asoc->ifp_had_enobuf = 1; 10473 SCTP_STAT_INCR(sctps_lowlevelerr); 10474 } 10475 return (error); 10476 } else { 10477 asoc->ifp_had_enobuf = 0; 10478 } 10479 endofchain = NULL; 10480 auth = NULL; 10481 auth_offset = 0; 10482 /* For HB's */ 10483 /* 10484 * We don't want to mark the net->sent time here 10485 * since this we use this for HB and retrans cannot 10486 * measure RTT 10487 */ 10488 /* (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */ 10489 10490 /* For auto-close */ 10491 if (*now_filled == 0) { 10492 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent); 10493 *now = asoc->time_last_sent; 10494 *now_filled = 1; 10495 } else { 10496 asoc->time_last_sent = *now; 10497 } 10498 *cnt_out += bundle_at; 10499 #ifdef SCTP_AUDITING_ENABLED 10500 sctp_audit_log(0xC4, bundle_at); 10501 #endif 10502 if (bundle_at) { 10503 tsns_sent = data_list[0]->rec.data.tsn; 10504 } 10505 for (i = 0; i < bundle_at; i++) { 10506 SCTP_STAT_INCR(sctps_sendretransdata); 10507 data_list[i]->sent = SCTP_DATAGRAM_SENT; 10508 /* 10509 * When we have a revoked data, and we 10510 * retransmit it, then we clear the revoked 10511 * flag since this flag dictates if we 10512 * subtracted from the fs 10513 */ 10514 if (data_list[i]->rec.data.chunk_was_revoked) { 10515 /* Deflate the cwnd */ 10516 data_list[i]->whoTo->cwnd -= data_list[i]->book_size; 10517 data_list[i]->rec.data.chunk_was_revoked = 0; 10518 } 10519 data_list[i]->snd_count++; 10520 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 10521 /* record the time */ 10522 data_list[i]->sent_rcv_time = asoc->time_last_sent; 10523 if (data_list[i]->book_size_scale) { 10524 /* 10525 * need to double the book size on 10526 * this one 10527 */ 10528 data_list[i]->book_size_scale = 0; 10529 /* Since we double the booksize, we must 10530 * also double the output queue size, since this 10531 * get shrunk when we free by this amount. 10532 */ 10533 atomic_add_int(&((asoc)->total_output_queue_size), data_list[i]->book_size); 10534 data_list[i]->book_size *= 2; 10535 } else { 10536 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 10537 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND, 10538 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); 10539 } 10540 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd, 10541 (uint32_t) (data_list[i]->send_size + 10542 SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))); 10543 } 10544 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 10545 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_RSND, 10546 data_list[i]->whoTo->flight_size, 10547 data_list[i]->book_size, 10548 (uint32_t)(uintptr_t)data_list[i]->whoTo, 10549 data_list[i]->rec.data.tsn); 10550 } 10551 sctp_flight_size_increase(data_list[i]); 10552 sctp_total_flight_increase(stcb, data_list[i]); 10553 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 10554 /* SWS sender side engages */ 10555 asoc->peers_rwnd = 0; 10556 } 10557 if ((i == 0) && 10558 (data_list[i]->rec.data.doing_fast_retransmit)) { 10559 SCTP_STAT_INCR(sctps_sendfastretrans); 10560 if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) && 10561 (tmr_started == 0)) { 10562 /*- 10563 * ok we just fast-retrans'd 10564 * the lowest TSN, i.e the 10565 * first on the list. In 10566 * this case we want to give 10567 * some more time to get a 10568 * SACK back without a 10569 * t3-expiring. 10570 */ 10571 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net, 10572 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_2); 10573 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 10574 } 10575 } 10576 } 10577 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 10578 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND); 10579 } 10580 #ifdef SCTP_AUDITING_ENABLED 10581 sctp_auditing(21, inp, stcb, NULL); 10582 #endif 10583 } else { 10584 /* None will fit */ 10585 return (1); 10586 } 10587 if (asoc->sent_queue_retran_cnt <= 0) { 10588 /* all done we have no more to retran */ 10589 asoc->sent_queue_retran_cnt = 0; 10590 break; 10591 } 10592 if (one_chunk) { 10593 /* No more room in rwnd */ 10594 return (1); 10595 } 10596 /* stop the for loop here. we sent out a packet */ 10597 break; 10598 } 10599 return (0); 10600 } 10601 10602 static void 10603 sctp_timer_validation(struct sctp_inpcb *inp, 10604 struct sctp_tcb *stcb, 10605 struct sctp_association *asoc) 10606 { 10607 struct sctp_nets *net; 10608 10609 /* Validate that a timer is running somewhere */ 10610 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 10611 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 10612 /* Here is a timer */ 10613 return; 10614 } 10615 } 10616 SCTP_TCB_LOCK_ASSERT(stcb); 10617 /* Gak, we did not have a timer somewhere */ 10618 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Deadlock avoided starting timer on a dest at retran\n"); 10619 if (asoc->alternate) { 10620 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->alternate); 10621 } else { 10622 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination); 10623 } 10624 return; 10625 } 10626 10627 void 10628 sctp_chunk_output(struct sctp_inpcb *inp, 10629 struct sctp_tcb *stcb, 10630 int from_where, 10631 int so_locked) 10632 { 10633 /*- 10634 * Ok this is the generic chunk service queue. we must do the 10635 * following: 10636 * - See if there are retransmits pending, if so we must 10637 * do these first. 10638 * - Service the stream queue that is next, moving any 10639 * message (note I must get a complete message i.e. 10640 * FIRST/MIDDLE and LAST to the out queue in one pass) and assigning 10641 * TSN's 10642 * - Check to see if the cwnd/rwnd allows any output, if so we 10643 * go ahead and formulate and send the low level chunks. Making sure 10644 * to combine any control in the control chunk queue also. 10645 */ 10646 struct sctp_association *asoc; 10647 struct sctp_nets *net; 10648 int error = 0, num_out, tot_out = 0, ret = 0, reason_code; 10649 unsigned int burst_cnt = 0; 10650 struct timeval now; 10651 int now_filled = 0; 10652 int nagle_on; 10653 uint32_t frag_point = sctp_get_frag_point(stcb); 10654 int un_sent = 0; 10655 int fr_done; 10656 unsigned int tot_frs = 0; 10657 10658 #if defined(__APPLE__) && !defined(__Userspace__) 10659 if (so_locked) { 10660 sctp_lock_assert(SCTP_INP_SO(inp)); 10661 } else { 10662 sctp_unlock_assert(SCTP_INP_SO(inp)); 10663 } 10664 #endif 10665 asoc = &stcb->asoc; 10666 do_it_again: 10667 /* The Nagle algorithm is only applied when handling a send call. */ 10668 if (from_where == SCTP_OUTPUT_FROM_USR_SEND) { 10669 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) { 10670 nagle_on = 0; 10671 } else { 10672 nagle_on = 1; 10673 } 10674 } else { 10675 nagle_on = 0; 10676 } 10677 SCTP_TCB_LOCK_ASSERT(stcb); 10678 10679 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight); 10680 10681 if ((un_sent <= 0) && 10682 (TAILQ_EMPTY(&asoc->control_send_queue)) && 10683 (TAILQ_EMPTY(&asoc->asconf_send_queue)) && 10684 (asoc->sent_queue_retran_cnt == 0) && 10685 (asoc->trigger_reset == 0)) { 10686 /* Nothing to do unless there is something to be sent left */ 10687 return; 10688 } 10689 /* Do we have something to send, data or control AND 10690 * a sack timer running, if so piggy-back the sack. 10691 */ 10692 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 10693 sctp_send_sack(stcb, so_locked); 10694 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, 10695 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_3); 10696 } 10697 while (asoc->sent_queue_retran_cnt) { 10698 /*- 10699 * Ok, it is retransmission time only, we send out only ONE 10700 * packet with a single call off to the retran code. 10701 */ 10702 if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) { 10703 /*- 10704 * Special hook for handling cookies discarded 10705 * by peer that carried data. Send cookie-ack only 10706 * and then the next call with get the retran's. 10707 */ 10708 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, 10709 from_where, 10710 &now, &now_filled, frag_point, so_locked); 10711 return; 10712 } else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) { 10713 /* if its not from a HB then do it */ 10714 fr_done = 0; 10715 ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done, so_locked); 10716 if (fr_done) { 10717 tot_frs++; 10718 } 10719 } else { 10720 /* 10721 * its from any other place, we don't allow retran 10722 * output (only control) 10723 */ 10724 ret = 1; 10725 } 10726 if (ret > 0) { 10727 /* Can't send anymore */ 10728 /*- 10729 * now lets push out control by calling med-level 10730 * output once. this assures that we WILL send HB's 10731 * if queued too. 10732 */ 10733 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, 10734 from_where, 10735 &now, &now_filled, frag_point, so_locked); 10736 #ifdef SCTP_AUDITING_ENABLED 10737 sctp_auditing(8, inp, stcb, NULL); 10738 #endif 10739 sctp_timer_validation(inp, stcb, asoc); 10740 return; 10741 } 10742 if (ret < 0) { 10743 /*- 10744 * The count was off.. retran is not happening so do 10745 * the normal retransmission. 10746 */ 10747 #ifdef SCTP_AUDITING_ENABLED 10748 sctp_auditing(9, inp, stcb, NULL); 10749 #endif 10750 if (ret == SCTP_RETRAN_EXIT) { 10751 return; 10752 } 10753 break; 10754 } 10755 if (from_where == SCTP_OUTPUT_FROM_T3) { 10756 /* Only one transmission allowed out of a timeout */ 10757 #ifdef SCTP_AUDITING_ENABLED 10758 sctp_auditing(10, inp, stcb, NULL); 10759 #endif 10760 /* Push out any control */ 10761 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, from_where, 10762 &now, &now_filled, frag_point, so_locked); 10763 return; 10764 } 10765 if ((asoc->fr_max_burst > 0) && (tot_frs >= asoc->fr_max_burst)) { 10766 /* Hit FR burst limit */ 10767 return; 10768 } 10769 if ((num_out == 0) && (ret == 0)) { 10770 /* No more retrans to send */ 10771 break; 10772 } 10773 } 10774 #ifdef SCTP_AUDITING_ENABLED 10775 sctp_auditing(12, inp, stcb, NULL); 10776 #endif 10777 /* Check for bad destinations, if they exist move chunks around. */ 10778 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 10779 if ((net->dest_state & SCTP_ADDR_REACHABLE) == 0) { 10780 /*- 10781 * if possible move things off of this address we 10782 * still may send below due to the dormant state but 10783 * we try to find an alternate address to send to 10784 * and if we have one we move all queued data on the 10785 * out wheel to this alternate address. 10786 */ 10787 if (net->ref_count > 1) 10788 sctp_move_chunks_from_net(stcb, net); 10789 } else { 10790 /*- 10791 * if ((asoc->sat_network) || (net->addr_is_local)) 10792 * { burst_limit = asoc->max_burst * 10793 * SCTP_SAT_NETWORK_BURST_INCR; } 10794 */ 10795 if (asoc->max_burst > 0) { 10796 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst)) { 10797 if ((net->flight_size + (asoc->max_burst * net->mtu)) < net->cwnd) { 10798 /* JRS - Use the congestion control given in the congestion control module */ 10799 asoc->cc_functions.sctp_cwnd_update_after_output(stcb, net, asoc->max_burst); 10800 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) { 10801 sctp_log_maxburst(stcb, net, 0, asoc->max_burst, SCTP_MAX_BURST_APPLIED); 10802 } 10803 SCTP_STAT_INCR(sctps_maxburstqueued); 10804 } 10805 net->fast_retran_ip = 0; 10806 } else { 10807 if (net->flight_size == 0) { 10808 /* Should be decaying the cwnd here */ 10809 ; 10810 } 10811 } 10812 } 10813 } 10814 } 10815 burst_cnt = 0; 10816 do { 10817 error = sctp_med_chunk_output(inp, stcb, asoc, &num_out, 10818 &reason_code, 0, from_where, 10819 &now, &now_filled, frag_point, so_locked); 10820 if (error) { 10821 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Error %d was returned from med-c-op\n", error); 10822 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) { 10823 sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP); 10824 } 10825 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 10826 sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES); 10827 sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES); 10828 } 10829 break; 10830 } 10831 SCTPDBG(SCTP_DEBUG_OUTPUT3, "m-c-o put out %d\n", num_out); 10832 10833 tot_out += num_out; 10834 burst_cnt++; 10835 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 10836 sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES); 10837 if (num_out == 0) { 10838 sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES); 10839 } 10840 } 10841 if (nagle_on) { 10842 /* 10843 * When the Nagle algorithm is used, look at how much 10844 * is unsent, then if its smaller than an MTU and we 10845 * have data in flight we stop, except if we are 10846 * handling a fragmented user message. 10847 */ 10848 un_sent = stcb->asoc.total_output_queue_size - stcb->asoc.total_flight; 10849 if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) && 10850 (stcb->asoc.total_flight > 0)) { 10851 /* && sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {*/ 10852 break; 10853 } 10854 } 10855 if (TAILQ_EMPTY(&asoc->control_send_queue) && 10856 TAILQ_EMPTY(&asoc->send_queue) && 10857 sctp_is_there_unsent_data(stcb, so_locked) == 0) { 10858 /* Nothing left to send */ 10859 break; 10860 } 10861 if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) { 10862 /* Nothing left to send */ 10863 break; 10864 } 10865 } while (num_out && 10866 ((asoc->max_burst == 0) || 10867 SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) || 10868 (burst_cnt < asoc->max_burst))); 10869 10870 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) == 0) { 10871 if ((asoc->max_burst > 0) && (burst_cnt >= asoc->max_burst)) { 10872 SCTP_STAT_INCR(sctps_maxburstqueued); 10873 asoc->burst_limit_applied = 1; 10874 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) { 10875 sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED); 10876 } 10877 } else { 10878 asoc->burst_limit_applied = 0; 10879 } 10880 } 10881 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 10882 sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES); 10883 } 10884 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, we have put out %d chunks\n", 10885 tot_out); 10886 10887 /*- 10888 * Now we need to clean up the control chunk chain if a ECNE is on 10889 * it. It must be marked as UNSENT again so next call will continue 10890 * to send it until such time that we get a CWR, to remove it. 10891 */ 10892 if (stcb->asoc.ecn_echo_cnt_onq) 10893 sctp_fix_ecn_echo(asoc); 10894 10895 if (stcb->asoc.trigger_reset) { 10896 if (sctp_send_stream_reset_out_if_possible(stcb, so_locked) == 0) { 10897 goto do_it_again; 10898 } 10899 } 10900 return; 10901 } 10902 10903 int 10904 sctp_output( 10905 struct sctp_inpcb *inp, 10906 struct mbuf *m, 10907 struct sockaddr *addr, 10908 struct mbuf *control, 10909 #if defined(__FreeBSD__) && !defined(__Userspace__) 10910 struct thread *p, 10911 #elif defined(_WIN32) && !defined(__Userspace__) 10912 PKTHREAD p, 10913 #else 10914 #if defined(__APPLE__) && !defined(__Userspace__) 10915 struct proc *p SCTP_UNUSED, 10916 #else 10917 struct proc *p, 10918 #endif 10919 #endif 10920 int flags) 10921 { 10922 if (inp == NULL) { 10923 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL); 10924 return (EINVAL); 10925 } 10926 10927 if (inp->sctp_socket == NULL) { 10928 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL); 10929 return (EINVAL); 10930 } 10931 return (sctp_sosend(inp->sctp_socket, 10932 addr, 10933 (struct uio *)NULL, 10934 m, 10935 control, 10936 #if defined(__APPLE__) && !defined(__Userspace__) 10937 flags 10938 #else 10939 flags, p 10940 #endif 10941 )); 10942 } 10943 10944 void 10945 send_forward_tsn(struct sctp_tcb *stcb, 10946 struct sctp_association *asoc) 10947 { 10948 struct sctp_tmit_chunk *chk, *at, *tp1, *last; 10949 struct sctp_forward_tsn_chunk *fwdtsn; 10950 struct sctp_strseq *strseq; 10951 struct sctp_strseq_mid *strseq_m; 10952 uint32_t advance_peer_ack_point; 10953 unsigned int cnt_of_space, i, ovh; 10954 unsigned int space_needed; 10955 unsigned int cnt_of_skipped = 0; 10956 10957 SCTP_TCB_LOCK_ASSERT(stcb); 10958 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 10959 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) { 10960 /* mark it to unsent */ 10961 chk->sent = SCTP_DATAGRAM_UNSENT; 10962 chk->snd_count = 0; 10963 /* Do we correct its output location? */ 10964 if (chk->whoTo) { 10965 sctp_free_remote_addr(chk->whoTo); 10966 chk->whoTo = NULL; 10967 } 10968 goto sctp_fill_in_rest; 10969 } 10970 } 10971 /* Ok if we reach here we must build one */ 10972 sctp_alloc_a_chunk(stcb, chk); 10973 if (chk == NULL) { 10974 return; 10975 } 10976 asoc->fwd_tsn_cnt++; 10977 chk->copy_by_ref = 0; 10978 /* 10979 * We don't do the old thing here since 10980 * this is used not for on-wire but to 10981 * tell if we are sending a fwd-tsn by 10982 * the stack during output. And if its 10983 * a IFORWARD or a FORWARD it is a fwd-tsn. 10984 */ 10985 chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN; 10986 chk->rec.chunk_id.can_take_data = 0; 10987 chk->flags = 0; 10988 chk->asoc = asoc; 10989 chk->whoTo = NULL; 10990 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 10991 if (chk->data == NULL) { 10992 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 10993 return; 10994 } 10995 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 10996 chk->sent = SCTP_DATAGRAM_UNSENT; 10997 chk->snd_count = 0; 10998 TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next); 10999 asoc->ctrl_queue_cnt++; 11000 sctp_fill_in_rest: 11001 /*- 11002 * Here we go through and fill out the part that deals with 11003 * stream/seq of the ones we skip. 11004 */ 11005 SCTP_BUF_LEN(chk->data) = 0; 11006 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) { 11007 if ((at->sent != SCTP_FORWARD_TSN_SKIP) && 11008 (at->sent != SCTP_DATAGRAM_NR_ACKED)) { 11009 /* no more to look at */ 11010 break; 11011 } 11012 if (!asoc->idata_supported && (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) { 11013 /* We don't report these */ 11014 continue; 11015 } 11016 cnt_of_skipped++; 11017 } 11018 if (asoc->idata_supported) { 11019 space_needed = (sizeof(struct sctp_forward_tsn_chunk) + 11020 (cnt_of_skipped * sizeof(struct sctp_strseq_mid))); 11021 } else { 11022 space_needed = (sizeof(struct sctp_forward_tsn_chunk) + 11023 (cnt_of_skipped * sizeof(struct sctp_strseq))); 11024 } 11025 cnt_of_space = (unsigned int)M_TRAILINGSPACE(chk->data); 11026 11027 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 11028 ovh = SCTP_MIN_OVERHEAD; 11029 } else { 11030 ovh = SCTP_MIN_V4_OVERHEAD; 11031 } 11032 if (cnt_of_space > (asoc->smallest_mtu - ovh)) { 11033 /* trim to a mtu size */ 11034 cnt_of_space = asoc->smallest_mtu - ovh; 11035 } 11036 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { 11037 sctp_misc_ints(SCTP_FWD_TSN_CHECK, 11038 0xff, 0, cnt_of_skipped, 11039 asoc->advanced_peer_ack_point); 11040 } 11041 advance_peer_ack_point = asoc->advanced_peer_ack_point; 11042 if (cnt_of_space < space_needed) { 11043 /*- 11044 * ok we must trim down the chunk by lowering the 11045 * advance peer ack point. 11046 */ 11047 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { 11048 sctp_misc_ints(SCTP_FWD_TSN_CHECK, 11049 0xff, 0xff, cnt_of_space, 11050 space_needed); 11051 } 11052 cnt_of_skipped = cnt_of_space - sizeof(struct sctp_forward_tsn_chunk); 11053 if (asoc->idata_supported) { 11054 cnt_of_skipped /= sizeof(struct sctp_strseq_mid); 11055 } else { 11056 cnt_of_skipped /= sizeof(struct sctp_strseq); 11057 } 11058 /*- 11059 * Go through and find the TSN that will be the one 11060 * we report. 11061 */ 11062 at = TAILQ_FIRST(&asoc->sent_queue); 11063 if (at != NULL) { 11064 for (i = 0; i < cnt_of_skipped; i++) { 11065 tp1 = TAILQ_NEXT(at, sctp_next); 11066 if (tp1 == NULL) { 11067 break; 11068 } 11069 at = tp1; 11070 } 11071 } 11072 if (at && SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { 11073 sctp_misc_ints(SCTP_FWD_TSN_CHECK, 11074 0xff, cnt_of_skipped, at->rec.data.tsn, 11075 asoc->advanced_peer_ack_point); 11076 } 11077 last = at; 11078 /*- 11079 * last now points to last one I can report, update 11080 * peer ack point 11081 */ 11082 if (last) { 11083 advance_peer_ack_point = last->rec.data.tsn; 11084 } 11085 if (asoc->idata_supported) { 11086 space_needed = sizeof(struct sctp_forward_tsn_chunk) + 11087 cnt_of_skipped * sizeof(struct sctp_strseq_mid); 11088 } else { 11089 space_needed = sizeof(struct sctp_forward_tsn_chunk) + 11090 cnt_of_skipped * sizeof(struct sctp_strseq); 11091 } 11092 } 11093 chk->send_size = space_needed; 11094 /* Setup the chunk */ 11095 fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *); 11096 fwdtsn->ch.chunk_length = htons(chk->send_size); 11097 fwdtsn->ch.chunk_flags = 0; 11098 if (asoc->idata_supported) { 11099 fwdtsn->ch.chunk_type = SCTP_IFORWARD_CUM_TSN; 11100 } else { 11101 fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN; 11102 } 11103 fwdtsn->new_cumulative_tsn = htonl(advance_peer_ack_point); 11104 SCTP_BUF_LEN(chk->data) = chk->send_size; 11105 fwdtsn++; 11106 /*- 11107 * Move pointer to after the fwdtsn and transfer to the 11108 * strseq pointer. 11109 */ 11110 if (asoc->idata_supported) { 11111 strseq_m = (struct sctp_strseq_mid *)fwdtsn; 11112 strseq = NULL; 11113 } else { 11114 strseq = (struct sctp_strseq *)fwdtsn; 11115 strseq_m = NULL; 11116 } 11117 /*- 11118 * Now populate the strseq list. This is done blindly 11119 * without pulling out duplicate stream info. This is 11120 * inefficient but won't harm the process since the peer will 11121 * look at these in sequence and will thus release anything. 11122 * It could mean we exceed the PMTU and chop off some that 11123 * we could have included.. but this is unlikely (aka 1432/4 11124 * would mean 300+ stream seq's would have to be reported in 11125 * one FWD-TSN. With a bit of work we can later FIX this to 11126 * optimize and pull out duplicates.. but it does add more 11127 * overhead. So for now... not! 11128 */ 11129 i = 0; 11130 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) { 11131 if (i >= cnt_of_skipped) { 11132 break; 11133 } 11134 if (!asoc->idata_supported && (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) { 11135 /* We don't report these */ 11136 continue; 11137 } 11138 if (at->rec.data.tsn == advance_peer_ack_point) { 11139 at->rec.data.fwd_tsn_cnt = 0; 11140 } 11141 if (asoc->idata_supported) { 11142 strseq_m->sid = htons(at->rec.data.sid); 11143 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) { 11144 strseq_m->flags = htons(PR_SCTP_UNORDERED_FLAG); 11145 } else { 11146 strseq_m->flags = 0; 11147 } 11148 strseq_m->mid = htonl(at->rec.data.mid); 11149 strseq_m++; 11150 } else { 11151 strseq->sid = htons(at->rec.data.sid); 11152 strseq->ssn = htons((uint16_t)at->rec.data.mid); 11153 strseq++; 11154 } 11155 i++; 11156 } 11157 return; 11158 } 11159 11160 void 11161 sctp_send_sack(struct sctp_tcb *stcb, int so_locked) 11162 { 11163 /*- 11164 * Queue up a SACK or NR-SACK in the control queue. 11165 * We must first check to see if a SACK or NR-SACK is 11166 * somehow on the control queue. 11167 * If so, we will take and and remove the old one. 11168 */ 11169 struct sctp_association *asoc; 11170 struct sctp_tmit_chunk *chk, *a_chk; 11171 struct sctp_sack_chunk *sack; 11172 struct sctp_nr_sack_chunk *nr_sack; 11173 struct sctp_gap_ack_block *gap_descriptor; 11174 const struct sack_track *selector; 11175 int mergeable = 0; 11176 int offset; 11177 caddr_t limit; 11178 uint32_t *dup; 11179 int limit_reached = 0; 11180 unsigned int i, siz, j; 11181 unsigned int num_gap_blocks = 0, num_nr_gap_blocks = 0, space; 11182 int num_dups = 0; 11183 int space_req; 11184 uint32_t highest_tsn; 11185 uint8_t flags; 11186 uint8_t type; 11187 uint8_t tsn_map; 11188 11189 if (stcb->asoc.nrsack_supported == 1) { 11190 type = SCTP_NR_SELECTIVE_ACK; 11191 } else { 11192 type = SCTP_SELECTIVE_ACK; 11193 } 11194 a_chk = NULL; 11195 asoc = &stcb->asoc; 11196 SCTP_TCB_LOCK_ASSERT(stcb); 11197 if (asoc->last_data_chunk_from == NULL) { 11198 /* Hmm we never received anything */ 11199 return; 11200 } 11201 sctp_slide_mapping_arrays(stcb); 11202 sctp_set_rwnd(stcb, asoc); 11203 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 11204 if (chk->rec.chunk_id.id == type) { 11205 /* Hmm, found a sack already on queue, remove it */ 11206 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); 11207 asoc->ctrl_queue_cnt--; 11208 a_chk = chk; 11209 if (a_chk->data) { 11210 sctp_m_freem(a_chk->data); 11211 a_chk->data = NULL; 11212 } 11213 if (a_chk->whoTo) { 11214 sctp_free_remote_addr(a_chk->whoTo); 11215 a_chk->whoTo = NULL; 11216 } 11217 break; 11218 } 11219 } 11220 if (a_chk == NULL) { 11221 sctp_alloc_a_chunk(stcb, a_chk); 11222 if (a_chk == NULL) { 11223 /* No memory so we drop the idea, and set a timer */ 11224 if (stcb->asoc.delayed_ack) { 11225 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 11226 stcb->sctp_ep, stcb, NULL, 11227 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_4); 11228 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 11229 stcb->sctp_ep, stcb, NULL); 11230 } else { 11231 stcb->asoc.send_sack = 1; 11232 } 11233 return; 11234 } 11235 a_chk->copy_by_ref = 0; 11236 a_chk->rec.chunk_id.id = type; 11237 a_chk->rec.chunk_id.can_take_data = 1; 11238 } 11239 /* Clear our pkt counts */ 11240 asoc->data_pkts_seen = 0; 11241 11242 a_chk->flags = 0; 11243 a_chk->asoc = asoc; 11244 a_chk->snd_count = 0; 11245 a_chk->send_size = 0; /* fill in later */ 11246 a_chk->sent = SCTP_DATAGRAM_UNSENT; 11247 a_chk->whoTo = NULL; 11248 11249 if ((asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE) == 0) { 11250 /*- 11251 * Ok, the destination for the SACK is unreachable, lets see if 11252 * we can select an alternate to asoc->last_data_chunk_from 11253 */ 11254 a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0); 11255 if (a_chk->whoTo == NULL) { 11256 /* Nope, no alternate */ 11257 a_chk->whoTo = asoc->last_data_chunk_from; 11258 } 11259 } else { 11260 a_chk->whoTo = asoc->last_data_chunk_from; 11261 } 11262 if (a_chk->whoTo) { 11263 atomic_add_int(&a_chk->whoTo->ref_count, 1); 11264 } 11265 if (SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->highest_tsn_inside_nr_map)) { 11266 highest_tsn = asoc->highest_tsn_inside_map; 11267 } else { 11268 highest_tsn = asoc->highest_tsn_inside_nr_map; 11269 } 11270 if (highest_tsn == asoc->cumulative_tsn) { 11271 /* no gaps */ 11272 if (type == SCTP_SELECTIVE_ACK) { 11273 space_req = sizeof(struct sctp_sack_chunk); 11274 } else { 11275 space_req = sizeof(struct sctp_nr_sack_chunk); 11276 } 11277 } else { 11278 /* gaps get a cluster */ 11279 space_req = MCLBYTES; 11280 } 11281 /* Ok now lets formulate a MBUF with our sack */ 11282 a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_NOWAIT, 1, MT_DATA); 11283 if ((a_chk->data == NULL) || 11284 (a_chk->whoTo == NULL)) { 11285 /* rats, no mbuf memory */ 11286 if (a_chk->data) { 11287 /* was a problem with the destination */ 11288 sctp_m_freem(a_chk->data); 11289 a_chk->data = NULL; 11290 } 11291 sctp_free_a_chunk(stcb, a_chk, so_locked); 11292 /* sa_ignore NO_NULL_CHK */ 11293 if (stcb->asoc.delayed_ack) { 11294 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 11295 stcb->sctp_ep, stcb, NULL, 11296 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5); 11297 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 11298 stcb->sctp_ep, stcb, NULL); 11299 } else { 11300 stcb->asoc.send_sack = 1; 11301 } 11302 return; 11303 } 11304 /* ok, lets go through and fill it in */ 11305 SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD); 11306 space = (unsigned int)M_TRAILINGSPACE(a_chk->data); 11307 if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) { 11308 space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD); 11309 } 11310 limit = mtod(a_chk->data, caddr_t); 11311 limit += space; 11312 11313 flags = 0; 11314 11315 if ((asoc->sctp_cmt_on_off > 0) && 11316 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 11317 /*- 11318 * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been 11319 * received, then set high bit to 1, else 0. Reset 11320 * pkts_rcvd. 11321 */ 11322 flags |= (asoc->cmt_dac_pkts_rcvd << 6); 11323 asoc->cmt_dac_pkts_rcvd = 0; 11324 } 11325 #ifdef SCTP_ASOCLOG_OF_TSNS 11326 stcb->asoc.cumack_logsnt[stcb->asoc.cumack_log_atsnt] = asoc->cumulative_tsn; 11327 stcb->asoc.cumack_log_atsnt++; 11328 if (stcb->asoc.cumack_log_atsnt >= SCTP_TSN_LOG_SIZE) { 11329 stcb->asoc.cumack_log_atsnt = 0; 11330 } 11331 #endif 11332 /* reset the readers interpretation */ 11333 stcb->freed_by_sorcv_sincelast = 0; 11334 11335 if (type == SCTP_SELECTIVE_ACK) { 11336 sack = mtod(a_chk->data, struct sctp_sack_chunk *); 11337 nr_sack = NULL; 11338 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk)); 11339 if (highest_tsn > asoc->mapping_array_base_tsn) { 11340 siz = (((highest_tsn - asoc->mapping_array_base_tsn) + 1) + 7) / 8; 11341 } else { 11342 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + highest_tsn + 7) / 8; 11343 } 11344 } else { 11345 sack = NULL; 11346 nr_sack = mtod(a_chk->data, struct sctp_nr_sack_chunk *); 11347 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)nr_sack + sizeof(struct sctp_nr_sack_chunk)); 11348 if (asoc->highest_tsn_inside_map > asoc->mapping_array_base_tsn) { 11349 siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8; 11350 } else { 11351 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_map + 7) / 8; 11352 } 11353 } 11354 11355 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) { 11356 offset = 1; 11357 } else { 11358 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn; 11359 } 11360 if (((type == SCTP_SELECTIVE_ACK) && 11361 SCTP_TSN_GT(highest_tsn, asoc->cumulative_tsn)) || 11362 ((type == SCTP_NR_SELECTIVE_ACK) && 11363 SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->cumulative_tsn))) { 11364 /* we have a gap .. maybe */ 11365 for (i = 0; i < siz; i++) { 11366 tsn_map = asoc->mapping_array[i]; 11367 if (type == SCTP_SELECTIVE_ACK) { 11368 tsn_map |= asoc->nr_mapping_array[i]; 11369 } 11370 if (i == 0) { 11371 /* 11372 * Clear all bits corresponding to TSNs 11373 * smaller or equal to the cumulative TSN. 11374 */ 11375 tsn_map &= (~0U << (1 - offset)); 11376 } 11377 selector = &sack_array[tsn_map]; 11378 if (mergeable && selector->right_edge) { 11379 /* 11380 * Backup, left and right edges were ok to 11381 * merge. 11382 */ 11383 num_gap_blocks--; 11384 gap_descriptor--; 11385 } 11386 if (selector->num_entries == 0) 11387 mergeable = 0; 11388 else { 11389 for (j = 0; j < selector->num_entries; j++) { 11390 if (mergeable && selector->right_edge) { 11391 /* 11392 * do a merge by NOT setting 11393 * the left side 11394 */ 11395 mergeable = 0; 11396 } else { 11397 /* 11398 * no merge, set the left 11399 * side 11400 */ 11401 mergeable = 0; 11402 gap_descriptor->start = htons((selector->gaps[j].start + offset)); 11403 } 11404 gap_descriptor->end = htons((selector->gaps[j].end + offset)); 11405 num_gap_blocks++; 11406 gap_descriptor++; 11407 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) { 11408 /* no more room */ 11409 limit_reached = 1; 11410 break; 11411 } 11412 } 11413 if (selector->left_edge) { 11414 mergeable = 1; 11415 } 11416 } 11417 if (limit_reached) { 11418 /* Reached the limit stop */ 11419 break; 11420 } 11421 offset += 8; 11422 } 11423 } 11424 if ((type == SCTP_NR_SELECTIVE_ACK) && 11425 (limit_reached == 0)) { 11426 mergeable = 0; 11427 11428 if (asoc->highest_tsn_inside_nr_map > asoc->mapping_array_base_tsn) { 11429 siz = (((asoc->highest_tsn_inside_nr_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8; 11430 } else { 11431 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_nr_map + 7) / 8; 11432 } 11433 11434 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) { 11435 offset = 1; 11436 } else { 11437 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn; 11438 } 11439 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->cumulative_tsn)) { 11440 /* we have a gap .. maybe */ 11441 for (i = 0; i < siz; i++) { 11442 tsn_map = asoc->nr_mapping_array[i]; 11443 if (i == 0) { 11444 /* 11445 * Clear all bits corresponding to TSNs 11446 * smaller or equal to the cumulative TSN. 11447 */ 11448 tsn_map &= (~0U << (1 - offset)); 11449 } 11450 selector = &sack_array[tsn_map]; 11451 if (mergeable && selector->right_edge) { 11452 /* 11453 * Backup, left and right edges were ok to 11454 * merge. 11455 */ 11456 num_nr_gap_blocks--; 11457 gap_descriptor--; 11458 } 11459 if (selector->num_entries == 0) 11460 mergeable = 0; 11461 else { 11462 for (j = 0; j < selector->num_entries; j++) { 11463 if (mergeable && selector->right_edge) { 11464 /* 11465 * do a merge by NOT setting 11466 * the left side 11467 */ 11468 mergeable = 0; 11469 } else { 11470 /* 11471 * no merge, set the left 11472 * side 11473 */ 11474 mergeable = 0; 11475 gap_descriptor->start = htons((selector->gaps[j].start + offset)); 11476 } 11477 gap_descriptor->end = htons((selector->gaps[j].end + offset)); 11478 num_nr_gap_blocks++; 11479 gap_descriptor++; 11480 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) { 11481 /* no more room */ 11482 limit_reached = 1; 11483 break; 11484 } 11485 } 11486 if (selector->left_edge) { 11487 mergeable = 1; 11488 } 11489 } 11490 if (limit_reached) { 11491 /* Reached the limit stop */ 11492 break; 11493 } 11494 offset += 8; 11495 } 11496 } 11497 } 11498 /* now we must add any dups we are going to report. */ 11499 if ((limit_reached == 0) && (asoc->numduptsns)) { 11500 dup = (uint32_t *) gap_descriptor; 11501 for (i = 0; i < asoc->numduptsns; i++) { 11502 *dup = htonl(asoc->dup_tsns[i]); 11503 dup++; 11504 num_dups++; 11505 if (((caddr_t)dup + sizeof(uint32_t)) > limit) { 11506 /* no more room */ 11507 break; 11508 } 11509 } 11510 asoc->numduptsns = 0; 11511 } 11512 /* 11513 * now that the chunk is prepared queue it to the control chunk 11514 * queue. 11515 */ 11516 if (type == SCTP_SELECTIVE_ACK) { 11517 a_chk->send_size = (uint16_t)(sizeof(struct sctp_sack_chunk) + 11518 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) + 11519 num_dups * sizeof(int32_t)); 11520 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size; 11521 sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn); 11522 sack->sack.a_rwnd = htonl(asoc->my_rwnd); 11523 sack->sack.num_gap_ack_blks = htons(num_gap_blocks); 11524 sack->sack.num_dup_tsns = htons(num_dups); 11525 sack->ch.chunk_type = type; 11526 sack->ch.chunk_flags = flags; 11527 sack->ch.chunk_length = htons(a_chk->send_size); 11528 } else { 11529 a_chk->send_size = (uint16_t)(sizeof(struct sctp_nr_sack_chunk) + 11530 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) + 11531 num_dups * sizeof(int32_t)); 11532 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size; 11533 nr_sack->nr_sack.cum_tsn_ack = htonl(asoc->cumulative_tsn); 11534 nr_sack->nr_sack.a_rwnd = htonl(asoc->my_rwnd); 11535 nr_sack->nr_sack.num_gap_ack_blks = htons(num_gap_blocks); 11536 nr_sack->nr_sack.num_nr_gap_ack_blks = htons(num_nr_gap_blocks); 11537 nr_sack->nr_sack.num_dup_tsns = htons(num_dups); 11538 nr_sack->nr_sack.reserved = 0; 11539 nr_sack->ch.chunk_type = type; 11540 nr_sack->ch.chunk_flags = flags; 11541 nr_sack->ch.chunk_length = htons(a_chk->send_size); 11542 } 11543 TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next); 11544 asoc->my_last_reported_rwnd = asoc->my_rwnd; 11545 asoc->ctrl_queue_cnt++; 11546 asoc->send_sack = 0; 11547 SCTP_STAT_INCR(sctps_sendsacks); 11548 return; 11549 } 11550 11551 void 11552 sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked) 11553 { 11554 struct mbuf *m_abort, *m, *m_last; 11555 struct mbuf *m_out, *m_end = NULL; 11556 struct sctp_abort_chunk *abort; 11557 struct sctp_auth_chunk *auth = NULL; 11558 struct sctp_nets *net; 11559 uint32_t vtag; 11560 uint32_t auth_offset = 0; 11561 int error; 11562 uint16_t cause_len, chunk_len, padding_len; 11563 bool use_zero_crc; 11564 11565 #if defined(__APPLE__) && !defined(__Userspace__) 11566 if (so_locked) { 11567 sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep)); 11568 } else { 11569 sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep)); 11570 } 11571 #endif 11572 SCTP_TCB_LOCK_ASSERT(stcb); 11573 /*- 11574 * Add an AUTH chunk, if chunk requires it and save the offset into 11575 * the chain for AUTH 11576 */ 11577 if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION, 11578 stcb->asoc.peer_auth_chunks)) { 11579 m_out = sctp_add_auth_chunk(NULL, &m_end, &auth, &auth_offset, 11580 stcb, SCTP_ABORT_ASSOCIATION); 11581 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 11582 } else { 11583 m_out = NULL; 11584 } 11585 switch (stcb->asoc.snd_edmid) { 11586 case SCTP_EDMID_LOWER_LAYER_DTLS: 11587 use_zero_crc = true; 11588 break; 11589 default: 11590 use_zero_crc = false; 11591 break; 11592 } 11593 m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_NOWAIT, 1, MT_HEADER); 11594 if (m_abort == NULL) { 11595 if (m_out) { 11596 sctp_m_freem(m_out); 11597 } 11598 if (operr) { 11599 sctp_m_freem(operr); 11600 } 11601 return; 11602 } 11603 /* link in any error */ 11604 SCTP_BUF_NEXT(m_abort) = operr; 11605 cause_len = 0; 11606 m_last = NULL; 11607 for (m = operr; m; m = SCTP_BUF_NEXT(m)) { 11608 cause_len += (uint16_t)SCTP_BUF_LEN(m); 11609 if (SCTP_BUF_NEXT(m) == NULL) { 11610 m_last = m; 11611 } 11612 } 11613 SCTP_BUF_LEN(m_abort) = sizeof(struct sctp_abort_chunk); 11614 chunk_len = (uint16_t)sizeof(struct sctp_abort_chunk) + cause_len; 11615 padding_len = SCTP_SIZE32(chunk_len) - chunk_len; 11616 if (m_out == NULL) { 11617 /* NO Auth chunk prepended, so reserve space in front */ 11618 SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD); 11619 m_out = m_abort; 11620 } else { 11621 /* Put AUTH chunk at the front of the chain */ 11622 SCTP_BUF_NEXT(m_end) = m_abort; 11623 } 11624 if (stcb->asoc.alternate) { 11625 net = stcb->asoc.alternate; 11626 } else { 11627 net = stcb->asoc.primary_destination; 11628 } 11629 /* Fill in the ABORT chunk header. */ 11630 abort = mtod(m_abort, struct sctp_abort_chunk *); 11631 abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION; 11632 if (stcb->asoc.peer_vtag == 0) { 11633 /* This happens iff the assoc is in COOKIE-WAIT state. */ 11634 vtag = stcb->asoc.my_vtag; 11635 abort->ch.chunk_flags = SCTP_HAD_NO_TCB; 11636 } else { 11637 vtag = stcb->asoc.peer_vtag; 11638 abort->ch.chunk_flags = 0; 11639 } 11640 abort->ch.chunk_length = htons(chunk_len); 11641 /* Add padding, if necessary. */ 11642 if (padding_len > 0) { 11643 if ((m_last == NULL) || 11644 (sctp_add_pad_tombuf(m_last, padding_len) == NULL)) { 11645 sctp_m_freem(m_out); 11646 return; 11647 } 11648 } 11649 if ((error = sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net, 11650 (struct sockaddr *)&net->ro._l_addr, 11651 m_out, auth_offset, auth, stcb->asoc.authinfo.active_keyid, 1, 0, 0, 11652 stcb->sctp_ep->sctp_lport, stcb->rport, htonl(vtag), 11653 stcb->asoc.primary_destination->port, NULL, 11654 #if defined(__FreeBSD__) && !defined(__Userspace__) 11655 0, 0, 11656 #endif 11657 use_zero_crc, 11658 so_locked))) { 11659 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error); 11660 if (error == ENOBUFS) { 11661 stcb->asoc.ifp_had_enobuf = 1; 11662 SCTP_STAT_INCR(sctps_lowlevelerr); 11663 } 11664 } else { 11665 stcb->asoc.ifp_had_enobuf = 0; 11666 } 11667 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 11668 } 11669 11670 void 11671 sctp_send_shutdown_complete(struct sctp_tcb *stcb, 11672 struct sctp_nets *net, 11673 int reflect_vtag) 11674 { 11675 /* formulate and SEND a SHUTDOWN-COMPLETE */ 11676 struct mbuf *m_shutdown_comp; 11677 struct sctp_shutdown_complete_chunk *shutdown_complete; 11678 uint32_t vtag; 11679 int error; 11680 uint8_t flags; 11681 bool use_zero_crc; 11682 11683 m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER); 11684 if (m_shutdown_comp == NULL) { 11685 /* no mbuf's */ 11686 return; 11687 } 11688 if (reflect_vtag) { 11689 flags = SCTP_HAD_NO_TCB; 11690 vtag = stcb->asoc.my_vtag; 11691 } else { 11692 flags = 0; 11693 vtag = stcb->asoc.peer_vtag; 11694 } 11695 switch (stcb->asoc.snd_edmid) { 11696 case SCTP_EDMID_LOWER_LAYER_DTLS: 11697 use_zero_crc = true; 11698 break; 11699 default: 11700 use_zero_crc = false; 11701 break; 11702 } 11703 shutdown_complete = mtod(m_shutdown_comp, struct sctp_shutdown_complete_chunk *); 11704 shutdown_complete->ch.chunk_type = SCTP_SHUTDOWN_COMPLETE; 11705 shutdown_complete->ch.chunk_flags = flags; 11706 shutdown_complete->ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk)); 11707 SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_chunk); 11708 if ((error = sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net, 11709 (struct sockaddr *)&net->ro._l_addr, 11710 m_shutdown_comp, 0, NULL, 0, 1, 0, 0, 11711 stcb->sctp_ep->sctp_lport, stcb->rport, 11712 htonl(vtag), 11713 net->port, NULL, 11714 #if defined(__FreeBSD__) && !defined(__Userspace__) 11715 0, 0, 11716 #endif 11717 use_zero_crc, 11718 SCTP_SO_NOT_LOCKED))) { 11719 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error); 11720 if (error == ENOBUFS) { 11721 stcb->asoc.ifp_had_enobuf = 1; 11722 SCTP_STAT_INCR(sctps_lowlevelerr); 11723 } 11724 } else { 11725 stcb->asoc.ifp_had_enobuf = 0; 11726 } 11727 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 11728 return; 11729 } 11730 11731 #if defined(__FreeBSD__) && !defined(__Userspace__) 11732 static void 11733 sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst, 11734 struct sctphdr *sh, uint32_t vtag, 11735 uint8_t type, struct mbuf *cause, 11736 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, 11737 uint32_t vrf_id, uint16_t port) 11738 #else 11739 static void 11740 sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst, 11741 struct sctphdr *sh, uint32_t vtag, 11742 uint8_t type, struct mbuf *cause, 11743 uint32_t vrf_id SCTP_UNUSED, uint16_t port) 11744 #endif 11745 { 11746 struct mbuf *o_pak; 11747 struct mbuf *mout; 11748 struct sctphdr *shout; 11749 struct sctp_chunkhdr *ch; 11750 #if defined(INET) || defined(INET6) 11751 struct udphdr *udp; 11752 #endif 11753 int ret, len, cause_len, padding_len; 11754 #ifdef INET 11755 #if defined(__APPLE__) && !defined(__Userspace__) 11756 sctp_route_t ro; 11757 #endif 11758 struct sockaddr_in *src_sin, *dst_sin; 11759 struct ip *ip; 11760 #endif 11761 #ifdef INET6 11762 struct sockaddr_in6 *src_sin6, *dst_sin6; 11763 struct ip6_hdr *ip6; 11764 #endif 11765 11766 /* Compute the length of the cause and add final padding. */ 11767 cause_len = 0; 11768 if (cause != NULL) { 11769 struct mbuf *m_at, *m_last = NULL; 11770 11771 for (m_at = cause; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 11772 if (SCTP_BUF_NEXT(m_at) == NULL) 11773 m_last = m_at; 11774 cause_len += SCTP_BUF_LEN(m_at); 11775 } 11776 padding_len = cause_len % 4; 11777 if (padding_len != 0) { 11778 padding_len = 4 - padding_len; 11779 } 11780 if (padding_len != 0) { 11781 if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) { 11782 sctp_m_freem(cause); 11783 return; 11784 } 11785 } 11786 } else { 11787 padding_len = 0; 11788 } 11789 /* Get an mbuf for the header. */ 11790 len = sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); 11791 switch (dst->sa_family) { 11792 #ifdef INET 11793 case AF_INET: 11794 len += sizeof(struct ip); 11795 break; 11796 #endif 11797 #ifdef INET6 11798 case AF_INET6: 11799 len += sizeof(struct ip6_hdr); 11800 break; 11801 #endif 11802 default: 11803 break; 11804 } 11805 #if defined(INET) || defined(INET6) 11806 if (port) { 11807 len += sizeof(struct udphdr); 11808 } 11809 #endif 11810 #if defined(__APPLE__) && !defined(__Userspace__) 11811 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) 11812 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_NOWAIT, 1, MT_DATA); 11813 #else 11814 mout = sctp_get_mbuf_for_msg(len + SCTP_MAX_LINKHDR, 1, M_NOWAIT, 1, MT_DATA); 11815 #endif 11816 #else 11817 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_NOWAIT, 1, MT_DATA); 11818 #endif 11819 if (mout == NULL) { 11820 if (cause) { 11821 sctp_m_freem(cause); 11822 } 11823 return; 11824 } 11825 #if defined(__APPLE__) && !defined(__Userspace__) 11826 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) 11827 SCTP_BUF_RESV_UF(mout, max_linkhdr); 11828 #else 11829 SCTP_BUF_RESV_UF(mout, SCTP_MAX_LINKHDR); 11830 #endif 11831 #else 11832 SCTP_BUF_RESV_UF(mout, max_linkhdr); 11833 #endif 11834 SCTP_BUF_LEN(mout) = len; 11835 SCTP_BUF_NEXT(mout) = cause; 11836 #if defined(__FreeBSD__) && !defined(__Userspace__) 11837 M_SETFIB(mout, fibnum); 11838 mout->m_pkthdr.flowid = mflowid; 11839 M_HASHTYPE_SET(mout, mflowtype); 11840 #endif 11841 #ifdef INET 11842 ip = NULL; 11843 #endif 11844 #ifdef INET6 11845 ip6 = NULL; 11846 #endif 11847 switch (dst->sa_family) { 11848 #ifdef INET 11849 case AF_INET: 11850 src_sin = (struct sockaddr_in *)src; 11851 dst_sin = (struct sockaddr_in *)dst; 11852 ip = mtod(mout, struct ip *); 11853 ip->ip_v = IPVERSION; 11854 ip->ip_hl = (sizeof(struct ip) >> 2); 11855 ip->ip_tos = 0; 11856 #if defined(__FreeBSD__) && !defined(__Userspace__) 11857 ip->ip_off = htons(IP_DF); 11858 #elif defined(WITH_CONVERT_IP_OFF) || defined(__APPLE__) 11859 ip->ip_off = IP_DF; 11860 #else 11861 ip->ip_off = htons(IP_DF); 11862 #endif 11863 #if defined(__Userspace__) 11864 ip->ip_id = htons(ip_id++); 11865 #elif defined(__FreeBSD__) 11866 ip_fillid(ip); 11867 #elif defined(__APPLE__) 11868 #if RANDOM_IP_ID 11869 ip->ip_id = ip_randomid(); 11870 #else 11871 ip->ip_id = htons(ip_id++); 11872 #endif 11873 #else 11874 ip->ip_id = ip_id++; 11875 #endif 11876 ip->ip_ttl = MODULE_GLOBAL(ip_defttl); 11877 if (port) { 11878 ip->ip_p = IPPROTO_UDP; 11879 } else { 11880 ip->ip_p = IPPROTO_SCTP; 11881 } 11882 ip->ip_src.s_addr = dst_sin->sin_addr.s_addr; 11883 ip->ip_dst.s_addr = src_sin->sin_addr.s_addr; 11884 ip->ip_sum = 0; 11885 len = sizeof(struct ip); 11886 shout = (struct sctphdr *)((caddr_t)ip + len); 11887 break; 11888 #endif 11889 #ifdef INET6 11890 case AF_INET6: 11891 src_sin6 = (struct sockaddr_in6 *)src; 11892 dst_sin6 = (struct sockaddr_in6 *)dst; 11893 ip6 = mtod(mout, struct ip6_hdr *); 11894 ip6->ip6_flow = htonl(0x60000000); 11895 #if defined(__FreeBSD__) && !defined(__Userspace__) 11896 if (V_ip6_auto_flowlabel) { 11897 ip6->ip6_flow |= (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK); 11898 } 11899 #endif 11900 #if defined(__Userspace__) 11901 ip6->ip6_hlim = IPv6_HOP_LIMIT; 11902 #else 11903 ip6->ip6_hlim = MODULE_GLOBAL(ip6_defhlim); 11904 #endif 11905 if (port) { 11906 ip6->ip6_nxt = IPPROTO_UDP; 11907 } else { 11908 ip6->ip6_nxt = IPPROTO_SCTP; 11909 } 11910 ip6->ip6_src = dst_sin6->sin6_addr; 11911 ip6->ip6_dst = src_sin6->sin6_addr; 11912 len = sizeof(struct ip6_hdr); 11913 shout = (struct sctphdr *)((caddr_t)ip6 + len); 11914 break; 11915 #endif 11916 default: 11917 len = 0; 11918 shout = mtod(mout, struct sctphdr *); 11919 break; 11920 } 11921 #if defined(INET) || defined(INET6) 11922 if (port) { 11923 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) { 11924 sctp_m_freem(mout); 11925 return; 11926 } 11927 udp = (struct udphdr *)shout; 11928 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)); 11929 udp->uh_dport = port; 11930 udp->uh_sum = 0; 11931 udp->uh_ulen = htons((uint16_t)(sizeof(struct udphdr) + 11932 sizeof(struct sctphdr) + 11933 sizeof(struct sctp_chunkhdr) + 11934 cause_len + padding_len)); 11935 len += sizeof(struct udphdr); 11936 shout = (struct sctphdr *)((caddr_t)shout + sizeof(struct udphdr)); 11937 } else { 11938 udp = NULL; 11939 } 11940 #endif 11941 shout->src_port = sh->dest_port; 11942 shout->dest_port = sh->src_port; 11943 shout->checksum = 0; 11944 if (vtag) { 11945 shout->v_tag = htonl(vtag); 11946 } else { 11947 shout->v_tag = sh->v_tag; 11948 } 11949 len += sizeof(struct sctphdr); 11950 ch = (struct sctp_chunkhdr *)((caddr_t)shout + sizeof(struct sctphdr)); 11951 ch->chunk_type = type; 11952 if (vtag) { 11953 ch->chunk_flags = 0; 11954 } else { 11955 ch->chunk_flags = SCTP_HAD_NO_TCB; 11956 } 11957 ch->chunk_length = htons((uint16_t)(sizeof(struct sctp_chunkhdr) + cause_len)); 11958 len += sizeof(struct sctp_chunkhdr); 11959 len += cause_len + padding_len; 11960 11961 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) { 11962 sctp_m_freem(mout); 11963 return; 11964 } 11965 SCTP_ATTACH_CHAIN(o_pak, mout, len); 11966 switch (dst->sa_family) { 11967 #ifdef INET 11968 case AF_INET: 11969 #if defined(__APPLE__) && !defined(__Userspace__) 11970 /* zap the stack pointer to the route */ 11971 memset(&ro, 0, sizeof(sctp_route_t)); 11972 #endif 11973 if (port) { 11974 #if !defined(_WIN32) && !defined(__Userspace__) 11975 #if defined(__FreeBSD__) 11976 if (V_udp_cksum) { 11977 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP)); 11978 } else { 11979 udp->uh_sum = 0; 11980 } 11981 #else 11982 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP)); 11983 #endif 11984 #else 11985 udp->uh_sum = 0; 11986 #endif 11987 } 11988 #if defined(__FreeBSD__) && !defined(__Userspace__) 11989 ip->ip_len = htons(len); 11990 #elif defined(__APPLE__) || defined(__Userspace__) 11991 ip->ip_len = len; 11992 #else 11993 ip->ip_len = htons(len); 11994 #endif 11995 if (port) { 11996 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip) + sizeof(struct udphdr)); 11997 SCTP_STAT_INCR(sctps_sendswcrc); 11998 #if !defined(_WIN32) && !defined(__Userspace__) 11999 #if defined(__FreeBSD__) 12000 if (V_udp_cksum) { 12001 SCTP_ENABLE_UDP_CSUM(o_pak); 12002 } 12003 #else 12004 SCTP_ENABLE_UDP_CSUM(o_pak); 12005 #endif 12006 #endif 12007 } else { 12008 #if defined(__FreeBSD__) && !defined(__Userspace__) 12009 mout->m_pkthdr.csum_flags = CSUM_SCTP; 12010 mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum); 12011 SCTP_STAT_INCR(sctps_sendhwcrc); 12012 #else 12013 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip)); 12014 SCTP_STAT_INCR(sctps_sendswcrc); 12015 #endif 12016 } 12017 #ifdef SCTP_PACKET_LOGGING 12018 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) { 12019 sctp_packet_log(o_pak); 12020 } 12021 #endif 12022 #if defined(__APPLE__) && !defined(__Userspace__) 12023 SCTP_IP_OUTPUT(ret, o_pak, &ro, NULL, vrf_id); 12024 /* Free the route if we got one back */ 12025 if (ro.ro_rt) { 12026 RTFREE(ro.ro_rt); 12027 ro.ro_rt = NULL; 12028 } 12029 #else 12030 #if defined(__FreeBSD__) && !defined(__Userspace__) 12031 SCTP_PROBE5(send, NULL, NULL, ip, NULL, shout); 12032 #endif 12033 SCTP_IP_OUTPUT(ret, o_pak, NULL, NULL, vrf_id); 12034 #endif 12035 break; 12036 #endif 12037 #ifdef INET6 12038 case AF_INET6: 12039 ip6->ip6_plen = htons((uint16_t)(len - sizeof(struct ip6_hdr))); 12040 if (port) { 12041 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr)); 12042 SCTP_STAT_INCR(sctps_sendswcrc); 12043 #if !defined(__Userspace__) 12044 #if defined(_WIN32) 12045 udp->uh_sum = 0; 12046 #else 12047 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) { 12048 udp->uh_sum = 0xffff; 12049 } 12050 #endif 12051 #endif 12052 } else { 12053 #if defined(__FreeBSD__) && !defined(__Userspace__) 12054 mout->m_pkthdr.csum_flags = CSUM_SCTP_IPV6; 12055 mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum); 12056 SCTP_STAT_INCR(sctps_sendhwcrc); 12057 #else 12058 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr)); 12059 SCTP_STAT_INCR(sctps_sendswcrc); 12060 #endif 12061 } 12062 #ifdef SCTP_PACKET_LOGGING 12063 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) { 12064 sctp_packet_log(o_pak); 12065 } 12066 #endif 12067 #if defined(__FreeBSD__) && !defined(__Userspace__) 12068 SCTP_PROBE5(send, NULL, NULL, ip6, NULL, shout); 12069 #endif 12070 SCTP_IP6_OUTPUT(ret, o_pak, NULL, NULL, NULL, vrf_id); 12071 break; 12072 #endif 12073 #if defined(__Userspace__) 12074 case AF_CONN: 12075 { 12076 char *buffer; 12077 struct sockaddr_conn *sconn; 12078 12079 sconn = (struct sockaddr_conn *)src; 12080 if (SCTP_BASE_VAR(crc32c_offloaded) == 0) { 12081 shout->checksum = sctp_calculate_cksum(o_pak, 0); 12082 SCTP_STAT_INCR(sctps_sendswcrc); 12083 } else { 12084 SCTP_STAT_INCR(sctps_sendhwcrc); 12085 } 12086 #ifdef SCTP_PACKET_LOGGING 12087 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) { 12088 sctp_packet_log(mout); 12089 } 12090 #endif 12091 /* Don't alloc/free for each packet */ 12092 if ((buffer = malloc(len)) != NULL) { 12093 m_copydata(o_pak, 0, len, buffer); 12094 ret = SCTP_BASE_VAR(conn_output)(sconn->sconn_addr, buffer, len, 0, 0); 12095 free(buffer); 12096 } else { 12097 ret = ENOMEM; 12098 } 12099 sctp_m_freem(o_pak); 12100 break; 12101 } 12102 #endif 12103 default: 12104 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n", 12105 dst->sa_family); 12106 sctp_m_freem(mout); 12107 SCTP_LTRACE_ERR_RET_PKT(mout, NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT); 12108 return; 12109 } 12110 SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret); 12111 #if defined(__FreeBSD__) && !defined(__Userspace__) 12112 if (port) { 12113 UDPSTAT_INC(udps_opackets); 12114 } 12115 #endif 12116 SCTP_STAT_INCR(sctps_sendpackets); 12117 SCTP_STAT_INCR_COUNTER64(sctps_outpackets); 12118 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 12119 if (ret) { 12120 SCTP_STAT_INCR(sctps_senderrors); 12121 } 12122 return; 12123 } 12124 12125 void 12126 sctp_send_shutdown_complete2(struct sockaddr *src, struct sockaddr *dst, 12127 struct sctphdr *sh, 12128 #if defined(__FreeBSD__) && !defined(__Userspace__) 12129 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, 12130 #endif 12131 uint32_t vrf_id, uint16_t port) 12132 { 12133 sctp_send_resp_msg(src, dst, sh, 0, SCTP_SHUTDOWN_COMPLETE, NULL, 12134 #if defined(__FreeBSD__) && !defined(__Userspace__) 12135 mflowtype, mflowid, fibnum, 12136 #endif 12137 vrf_id, port); 12138 } 12139 12140 void 12141 sctp_send_hb(struct sctp_tcb *stcb, struct sctp_nets *net,int so_locked) 12142 { 12143 struct sctp_tmit_chunk *chk; 12144 struct sctp_heartbeat_chunk *hb; 12145 struct timeval now; 12146 12147 SCTP_TCB_LOCK_ASSERT(stcb); 12148 if (net == NULL) { 12149 return; 12150 } 12151 (void)SCTP_GETTIME_TIMEVAL(&now); 12152 switch (net->ro._l_addr.sa.sa_family) { 12153 #ifdef INET 12154 case AF_INET: 12155 break; 12156 #endif 12157 #ifdef INET6 12158 case AF_INET6: 12159 break; 12160 #endif 12161 #if defined(__Userspace__) 12162 case AF_CONN: 12163 break; 12164 #endif 12165 default: 12166 return; 12167 } 12168 sctp_alloc_a_chunk(stcb, chk); 12169 if (chk == NULL) { 12170 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak, can't get a chunk for hb\n"); 12171 return; 12172 } 12173 12174 chk->copy_by_ref = 0; 12175 chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST; 12176 chk->rec.chunk_id.can_take_data = 1; 12177 chk->flags = 0; 12178 chk->asoc = &stcb->asoc; 12179 chk->send_size = sizeof(struct sctp_heartbeat_chunk); 12180 12181 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER); 12182 if (chk->data == NULL) { 12183 sctp_free_a_chunk(stcb, chk, so_locked); 12184 return; 12185 } 12186 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 12187 SCTP_BUF_LEN(chk->data) = chk->send_size; 12188 chk->sent = SCTP_DATAGRAM_UNSENT; 12189 chk->snd_count = 0; 12190 chk->whoTo = net; 12191 atomic_add_int(&chk->whoTo->ref_count, 1); 12192 /* Now we have a mbuf that we can fill in with the details */ 12193 hb = mtod(chk->data, struct sctp_heartbeat_chunk *); 12194 memset(hb, 0, sizeof(struct sctp_heartbeat_chunk)); 12195 /* fill out chunk header */ 12196 hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST; 12197 hb->ch.chunk_flags = 0; 12198 hb->ch.chunk_length = htons(chk->send_size); 12199 /* Fill out hb parameter */ 12200 hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO); 12201 hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param)); 12202 hb->heartbeat.hb_info.time_value_1 = now.tv_sec; 12203 hb->heartbeat.hb_info.time_value_2 = now.tv_usec; 12204 /* Did our user request this one, put it in */ 12205 hb->heartbeat.hb_info.addr_family = (uint8_t)net->ro._l_addr.sa.sa_family; 12206 #ifdef HAVE_SA_LEN 12207 hb->heartbeat.hb_info.addr_len = net->ro._l_addr.sa.sa_len; 12208 #else 12209 switch (net->ro._l_addr.sa.sa_family) { 12210 #ifdef INET 12211 case AF_INET: 12212 hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_in); 12213 break; 12214 #endif 12215 #ifdef INET6 12216 case AF_INET6: 12217 hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_in6); 12218 break; 12219 #endif 12220 #if defined(__Userspace__) 12221 case AF_CONN: 12222 hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_conn); 12223 break; 12224 #endif 12225 default: 12226 hb->heartbeat.hb_info.addr_len = 0; 12227 break; 12228 } 12229 #endif 12230 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) { 12231 /* 12232 * we only take from the entropy pool if the address is not 12233 * confirmed. 12234 */ 12235 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 12236 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 12237 } else { 12238 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0; 12239 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0; 12240 } 12241 switch (net->ro._l_addr.sa.sa_family) { 12242 #ifdef INET 12243 case AF_INET: 12244 memcpy(hb->heartbeat.hb_info.address, 12245 &net->ro._l_addr.sin.sin_addr, 12246 sizeof(net->ro._l_addr.sin.sin_addr)); 12247 break; 12248 #endif 12249 #ifdef INET6 12250 case AF_INET6: 12251 memcpy(hb->heartbeat.hb_info.address, 12252 &net->ro._l_addr.sin6.sin6_addr, 12253 sizeof(net->ro._l_addr.sin6.sin6_addr)); 12254 break; 12255 #endif 12256 #if defined(__Userspace__) 12257 case AF_CONN: 12258 memcpy(hb->heartbeat.hb_info.address, 12259 &net->ro._l_addr.sconn.sconn_addr, 12260 sizeof(net->ro._l_addr.sconn.sconn_addr)); 12261 break; 12262 #endif 12263 default: 12264 if (chk->data) { 12265 sctp_m_freem(chk->data); 12266 chk->data = NULL; 12267 } 12268 sctp_free_a_chunk(stcb, chk, so_locked); 12269 return; 12270 break; 12271 } 12272 net->hb_responded = 0; 12273 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next); 12274 stcb->asoc.ctrl_queue_cnt++; 12275 SCTP_STAT_INCR(sctps_sendheartbeat); 12276 return; 12277 } 12278 12279 void 12280 sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net, 12281 uint32_t high_tsn) 12282 { 12283 struct sctp_association *asoc; 12284 struct sctp_ecne_chunk *ecne; 12285 struct sctp_tmit_chunk *chk; 12286 12287 if (net == NULL) { 12288 return; 12289 } 12290 asoc = &stcb->asoc; 12291 SCTP_TCB_LOCK_ASSERT(stcb); 12292 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 12293 if ((chk->rec.chunk_id.id == SCTP_ECN_ECHO) && (net == chk->whoTo)) { 12294 /* found a previous ECN_ECHO update it if needed */ 12295 uint32_t cnt, ctsn; 12296 ecne = mtod(chk->data, struct sctp_ecne_chunk *); 12297 ctsn = ntohl(ecne->tsn); 12298 if (SCTP_TSN_GT(high_tsn, ctsn)) { 12299 ecne->tsn = htonl(high_tsn); 12300 SCTP_STAT_INCR(sctps_queue_upd_ecne); 12301 } 12302 cnt = ntohl(ecne->num_pkts_since_cwr); 12303 cnt++; 12304 ecne->num_pkts_since_cwr = htonl(cnt); 12305 return; 12306 } 12307 } 12308 /* nope could not find one to update so we must build one */ 12309 sctp_alloc_a_chunk(stcb, chk); 12310 if (chk == NULL) { 12311 return; 12312 } 12313 SCTP_STAT_INCR(sctps_queue_upd_ecne); 12314 chk->copy_by_ref = 0; 12315 chk->rec.chunk_id.id = SCTP_ECN_ECHO; 12316 chk->rec.chunk_id.can_take_data = 0; 12317 chk->flags = 0; 12318 chk->asoc = &stcb->asoc; 12319 chk->send_size = sizeof(struct sctp_ecne_chunk); 12320 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER); 12321 if (chk->data == NULL) { 12322 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 12323 return; 12324 } 12325 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 12326 SCTP_BUF_LEN(chk->data) = chk->send_size; 12327 chk->sent = SCTP_DATAGRAM_UNSENT; 12328 chk->snd_count = 0; 12329 chk->whoTo = net; 12330 atomic_add_int(&chk->whoTo->ref_count, 1); 12331 12332 stcb->asoc.ecn_echo_cnt_onq++; 12333 ecne = mtod(chk->data, struct sctp_ecne_chunk *); 12334 ecne->ch.chunk_type = SCTP_ECN_ECHO; 12335 ecne->ch.chunk_flags = 0; 12336 ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk)); 12337 ecne->tsn = htonl(high_tsn); 12338 ecne->num_pkts_since_cwr = htonl(1); 12339 TAILQ_INSERT_HEAD(&stcb->asoc.control_send_queue, chk, sctp_next); 12340 asoc->ctrl_queue_cnt++; 12341 } 12342 12343 void 12344 sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net, 12345 struct mbuf *m, int len, int iphlen, int bad_crc) 12346 { 12347 struct sctp_association *asoc; 12348 struct sctp_pktdrop_chunk *drp; 12349 struct sctp_tmit_chunk *chk; 12350 uint8_t *datap; 12351 int was_trunc = 0; 12352 int fullsz = 0; 12353 long spc; 12354 int offset; 12355 struct sctp_chunkhdr *ch, chunk_buf; 12356 unsigned int chk_length; 12357 12358 if (!stcb) { 12359 return; 12360 } 12361 asoc = &stcb->asoc; 12362 SCTP_TCB_LOCK_ASSERT(stcb); 12363 if (asoc->pktdrop_supported == 0) { 12364 /*- 12365 * peer must declare support before I send one. 12366 */ 12367 return; 12368 } 12369 if (stcb->sctp_socket == NULL) { 12370 return; 12371 } 12372 sctp_alloc_a_chunk(stcb, chk); 12373 if (chk == NULL) { 12374 return; 12375 } 12376 chk->copy_by_ref = 0; 12377 chk->rec.chunk_id.id = SCTP_PACKET_DROPPED; 12378 chk->rec.chunk_id.can_take_data = 1; 12379 chk->flags = 0; 12380 len -= iphlen; 12381 chk->send_size = len; 12382 /* Validate that we do not have an ABORT in here. */ 12383 offset = iphlen + sizeof(struct sctphdr); 12384 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 12385 sizeof(*ch), (uint8_t *) & chunk_buf); 12386 while (ch != NULL) { 12387 chk_length = ntohs(ch->chunk_length); 12388 if (chk_length < sizeof(*ch)) { 12389 /* break to abort land */ 12390 break; 12391 } 12392 switch (ch->chunk_type) { 12393 case SCTP_PACKET_DROPPED: 12394 case SCTP_ABORT_ASSOCIATION: 12395 case SCTP_INITIATION_ACK: 12396 /** 12397 * We don't respond with an PKT-DROP to an ABORT 12398 * or PKT-DROP. We also do not respond to an 12399 * INIT-ACK, because we can't know if the initiation 12400 * tag is correct or not. 12401 */ 12402 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 12403 return; 12404 default: 12405 break; 12406 } 12407 offset += SCTP_SIZE32(chk_length); 12408 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 12409 sizeof(*ch), (uint8_t *) & chunk_buf); 12410 } 12411 12412 if ((len + SCTP_MAX_OVERHEAD + sizeof(struct sctp_pktdrop_chunk)) > 12413 min(stcb->asoc.smallest_mtu, MCLBYTES)) { 12414 /* only send 1 mtu worth, trim off the 12415 * excess on the end. 12416 */ 12417 fullsz = len; 12418 len = min(stcb->asoc.smallest_mtu, MCLBYTES) - SCTP_MAX_OVERHEAD; 12419 was_trunc = 1; 12420 } 12421 chk->asoc = &stcb->asoc; 12422 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 12423 if (chk->data == NULL) { 12424 jump_out: 12425 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 12426 return; 12427 } 12428 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 12429 drp = mtod(chk->data, struct sctp_pktdrop_chunk *); 12430 if (drp == NULL) { 12431 sctp_m_freem(chk->data); 12432 chk->data = NULL; 12433 goto jump_out; 12434 } 12435 chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) + 12436 sizeof(struct sctphdr) + SCTP_MED_OVERHEAD)); 12437 chk->book_size_scale = 0; 12438 if (was_trunc) { 12439 drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED; 12440 drp->trunc_len = htons(fullsz); 12441 /* Len is already adjusted to size minus overhead above 12442 * take out the pkt_drop chunk itself from it. 12443 */ 12444 chk->send_size = (uint16_t)(len - sizeof(struct sctp_pktdrop_chunk)); 12445 len = chk->send_size; 12446 } else { 12447 /* no truncation needed */ 12448 drp->ch.chunk_flags = 0; 12449 drp->trunc_len = htons(0); 12450 } 12451 if (bad_crc) { 12452 drp->ch.chunk_flags |= SCTP_BADCRC; 12453 } 12454 chk->send_size += sizeof(struct sctp_pktdrop_chunk); 12455 SCTP_BUF_LEN(chk->data) = chk->send_size; 12456 chk->sent = SCTP_DATAGRAM_UNSENT; 12457 chk->snd_count = 0; 12458 if (net) { 12459 /* we should hit here */ 12460 chk->whoTo = net; 12461 atomic_add_int(&chk->whoTo->ref_count, 1); 12462 } else { 12463 chk->whoTo = NULL; 12464 } 12465 drp->ch.chunk_type = SCTP_PACKET_DROPPED; 12466 drp->ch.chunk_length = htons(chk->send_size); 12467 spc = SCTP_SB_LIMIT_RCV(stcb->sctp_socket); 12468 if (spc < 0) { 12469 spc = 0; 12470 } 12471 drp->bottle_bw = htonl(spc); 12472 if (asoc->my_rwnd) { 12473 drp->current_onq = htonl(asoc->size_on_reasm_queue + 12474 asoc->size_on_all_streams + 12475 asoc->my_rwnd_control_len + 12476 SCTP_SBAVAIL(&stcb->sctp_socket->so_rcv)); 12477 } else { 12478 /*- 12479 * If my rwnd is 0, possibly from mbuf depletion as well as 12480 * space used, tell the peer there is NO space aka onq == bw 12481 */ 12482 drp->current_onq = htonl(spc); 12483 } 12484 drp->reserved = 0; 12485 datap = drp->data; 12486 m_copydata(m, iphlen, len, (caddr_t)datap); 12487 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next); 12488 asoc->ctrl_queue_cnt++; 12489 } 12490 12491 void 12492 sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn, uint8_t override) 12493 { 12494 struct sctp_association *asoc; 12495 struct sctp_cwr_chunk *cwr; 12496 struct sctp_tmit_chunk *chk; 12497 12498 SCTP_TCB_LOCK_ASSERT(stcb); 12499 if (net == NULL) { 12500 return; 12501 } 12502 asoc = &stcb->asoc; 12503 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 12504 if ((chk->rec.chunk_id.id == SCTP_ECN_CWR) && (net == chk->whoTo)) { 12505 /* found a previous CWR queued to same destination update it if needed */ 12506 uint32_t ctsn; 12507 cwr = mtod(chk->data, struct sctp_cwr_chunk *); 12508 ctsn = ntohl(cwr->tsn); 12509 if (SCTP_TSN_GT(high_tsn, ctsn)) { 12510 cwr->tsn = htonl(high_tsn); 12511 } 12512 if (override & SCTP_CWR_REDUCE_OVERRIDE) { 12513 /* Make sure override is carried */ 12514 cwr->ch.chunk_flags |= SCTP_CWR_REDUCE_OVERRIDE; 12515 } 12516 return; 12517 } 12518 } 12519 sctp_alloc_a_chunk(stcb, chk); 12520 if (chk == NULL) { 12521 return; 12522 } 12523 chk->copy_by_ref = 0; 12524 chk->rec.chunk_id.id = SCTP_ECN_CWR; 12525 chk->rec.chunk_id.can_take_data = 1; 12526 chk->flags = 0; 12527 chk->asoc = asoc; 12528 chk->send_size = sizeof(struct sctp_cwr_chunk); 12529 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER); 12530 if (chk->data == NULL) { 12531 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 12532 return; 12533 } 12534 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 12535 SCTP_BUF_LEN(chk->data) = chk->send_size; 12536 chk->sent = SCTP_DATAGRAM_UNSENT; 12537 chk->snd_count = 0; 12538 chk->whoTo = net; 12539 atomic_add_int(&chk->whoTo->ref_count, 1); 12540 cwr = mtod(chk->data, struct sctp_cwr_chunk *); 12541 cwr->ch.chunk_type = SCTP_ECN_CWR; 12542 cwr->ch.chunk_flags = override; 12543 cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk)); 12544 cwr->tsn = htonl(high_tsn); 12545 TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next); 12546 asoc->ctrl_queue_cnt++; 12547 } 12548 12549 static int 12550 sctp_add_stream_reset_out(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk, 12551 uint32_t seq, uint32_t resp_seq, uint32_t last_sent) 12552 { 12553 uint16_t len, old_len, i; 12554 struct sctp_stream_reset_out_request *req_out; 12555 struct sctp_chunkhdr *ch; 12556 int at; 12557 int number_entries=0; 12558 12559 ch = mtod(chk->data, struct sctp_chunkhdr *); 12560 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 12561 /* get to new offset for the param. */ 12562 req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len); 12563 /* now how long will this param be? */ 12564 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 12565 if ((stcb->asoc.strmout[i].state == SCTP_STREAM_RESET_PENDING) && 12566 (stcb->asoc.strmout[i].chunks_on_queues == 0) && 12567 TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) { 12568 number_entries++; 12569 } 12570 } 12571 if (number_entries == 0) { 12572 return (0); 12573 } 12574 if (number_entries == stcb->asoc.streamoutcnt) { 12575 number_entries = 0; 12576 } 12577 if (number_entries > SCTP_MAX_STREAMS_AT_ONCE_RESET) { 12578 number_entries = SCTP_MAX_STREAMS_AT_ONCE_RESET; 12579 } 12580 len = (uint16_t)(sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries)); 12581 req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST); 12582 req_out->ph.param_length = htons(len); 12583 req_out->request_seq = htonl(seq); 12584 req_out->response_seq = htonl(resp_seq); 12585 req_out->send_reset_at_tsn = htonl(last_sent); 12586 at = 0; 12587 if (number_entries) { 12588 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 12589 if ((stcb->asoc.strmout[i].state == SCTP_STREAM_RESET_PENDING) && 12590 (stcb->asoc.strmout[i].chunks_on_queues == 0) && 12591 TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) { 12592 req_out->list_of_streams[at] = htons(i); 12593 at++; 12594 stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_IN_FLIGHT; 12595 if (at >= number_entries) { 12596 break; 12597 } 12598 } 12599 } 12600 } else { 12601 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 12602 stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_IN_FLIGHT; 12603 } 12604 } 12605 if (SCTP_SIZE32(len) > len) { 12606 /*- 12607 * Need to worry about the pad we may end up adding to the 12608 * end. This is easy since the struct is either aligned to 4 12609 * bytes or 2 bytes off. 12610 */ 12611 req_out->list_of_streams[number_entries] = 0; 12612 } 12613 /* now fix the chunk length */ 12614 ch->chunk_length = htons(len + old_len); 12615 chk->book_size = len + old_len; 12616 chk->book_size_scale = 0; 12617 chk->send_size = SCTP_SIZE32(chk->book_size); 12618 SCTP_BUF_LEN(chk->data) = chk->send_size; 12619 return (1); 12620 } 12621 12622 static void 12623 sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk, 12624 int number_entries, uint16_t *list, 12625 uint32_t seq) 12626 { 12627 uint16_t len, old_len, i; 12628 struct sctp_stream_reset_in_request *req_in; 12629 struct sctp_chunkhdr *ch; 12630 12631 ch = mtod(chk->data, struct sctp_chunkhdr *); 12632 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 12633 12634 /* get to new offset for the param. */ 12635 req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len); 12636 /* now how long will this param be? */ 12637 len = (uint16_t)(sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries)); 12638 req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST); 12639 req_in->ph.param_length = htons(len); 12640 req_in->request_seq = htonl(seq); 12641 if (number_entries) { 12642 for (i = 0; i < number_entries; i++) { 12643 req_in->list_of_streams[i] = htons(list[i]); 12644 } 12645 } 12646 if (SCTP_SIZE32(len) > len) { 12647 /*- 12648 * Need to worry about the pad we may end up adding to the 12649 * end. This is easy since the struct is either aligned to 4 12650 * bytes or 2 bytes off. 12651 */ 12652 req_in->list_of_streams[number_entries] = 0; 12653 } 12654 /* now fix the chunk length */ 12655 ch->chunk_length = htons(len + old_len); 12656 chk->book_size = len + old_len; 12657 chk->book_size_scale = 0; 12658 chk->send_size = SCTP_SIZE32(chk->book_size); 12659 SCTP_BUF_LEN(chk->data) = chk->send_size; 12660 return; 12661 } 12662 12663 static void 12664 sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk, 12665 uint32_t seq) 12666 { 12667 uint16_t len, old_len; 12668 struct sctp_stream_reset_tsn_request *req_tsn; 12669 struct sctp_chunkhdr *ch; 12670 12671 ch = mtod(chk->data, struct sctp_chunkhdr *); 12672 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 12673 12674 /* get to new offset for the param. */ 12675 req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len); 12676 /* now how long will this param be? */ 12677 len = sizeof(struct sctp_stream_reset_tsn_request); 12678 req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST); 12679 req_tsn->ph.param_length = htons(len); 12680 req_tsn->request_seq = htonl(seq); 12681 12682 /* now fix the chunk length */ 12683 ch->chunk_length = htons(len + old_len); 12684 chk->send_size = len + old_len; 12685 chk->book_size = SCTP_SIZE32(chk->send_size); 12686 chk->book_size_scale = 0; 12687 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size); 12688 return; 12689 } 12690 12691 void 12692 sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk, 12693 uint32_t resp_seq, uint32_t result) 12694 { 12695 uint16_t len, old_len; 12696 struct sctp_stream_reset_response *resp; 12697 struct sctp_chunkhdr *ch; 12698 12699 ch = mtod(chk->data, struct sctp_chunkhdr *); 12700 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 12701 12702 /* get to new offset for the param. */ 12703 resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len); 12704 /* now how long will this param be? */ 12705 len = sizeof(struct sctp_stream_reset_response); 12706 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE); 12707 resp->ph.param_length = htons(len); 12708 resp->response_seq = htonl(resp_seq); 12709 resp->result = ntohl(result); 12710 12711 /* now fix the chunk length */ 12712 ch->chunk_length = htons(len + old_len); 12713 chk->book_size = len + old_len; 12714 chk->book_size_scale = 0; 12715 chk->send_size = SCTP_SIZE32(chk->book_size); 12716 SCTP_BUF_LEN(chk->data) = chk->send_size; 12717 return; 12718 } 12719 12720 void 12721 sctp_send_deferred_reset_response(struct sctp_tcb *stcb, 12722 struct sctp_stream_reset_list *ent, 12723 int response) 12724 { 12725 struct sctp_association *asoc; 12726 struct sctp_tmit_chunk *chk; 12727 struct sctp_chunkhdr *ch; 12728 12729 asoc = &stcb->asoc; 12730 12731 /* 12732 * Reset our last reset action to the new one IP -> response 12733 * (PERFORMED probably). This assures that if we fail to send, a 12734 * retran from the peer will get the new response. 12735 */ 12736 asoc->last_reset_action[0] = response; 12737 if (asoc->stream_reset_outstanding) { 12738 return; 12739 } 12740 sctp_alloc_a_chunk(stcb, chk); 12741 if (chk == NULL) { 12742 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 12743 return; 12744 } 12745 chk->copy_by_ref = 0; 12746 chk->rec.chunk_id.id = SCTP_STREAM_RESET; 12747 chk->rec.chunk_id.can_take_data = 0; 12748 chk->flags = 0; 12749 chk->asoc = &stcb->asoc; 12750 chk->book_size = sizeof(struct sctp_chunkhdr); 12751 chk->send_size = SCTP_SIZE32(chk->book_size); 12752 chk->book_size_scale = 0; 12753 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 12754 if (chk->data == NULL) { 12755 sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED); 12756 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 12757 return; 12758 } 12759 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 12760 /* setup chunk parameters */ 12761 chk->sent = SCTP_DATAGRAM_UNSENT; 12762 chk->snd_count = 0; 12763 if (stcb->asoc.alternate) { 12764 chk->whoTo = stcb->asoc.alternate; 12765 } else { 12766 chk->whoTo = stcb->asoc.primary_destination; 12767 } 12768 ch = mtod(chk->data, struct sctp_chunkhdr *); 12769 ch->chunk_type = SCTP_STREAM_RESET; 12770 ch->chunk_flags = 0; 12771 ch->chunk_length = htons(chk->book_size); 12772 atomic_add_int(&chk->whoTo->ref_count, 1); 12773 SCTP_BUF_LEN(chk->data) = chk->send_size; 12774 sctp_add_stream_reset_result(chk, ent->seq, response); 12775 /* insert the chunk for sending */ 12776 TAILQ_INSERT_TAIL(&asoc->control_send_queue, 12777 chk, 12778 sctp_next); 12779 asoc->ctrl_queue_cnt++; 12780 } 12781 12782 void 12783 sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk, 12784 uint32_t resp_seq, uint32_t result, 12785 uint32_t send_una, uint32_t recv_next) 12786 { 12787 uint16_t len, old_len; 12788 struct sctp_stream_reset_response_tsn *resp; 12789 struct sctp_chunkhdr *ch; 12790 12791 ch = mtod(chk->data, struct sctp_chunkhdr *); 12792 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 12793 12794 /* get to new offset for the param. */ 12795 resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len); 12796 /* now how long will this param be? */ 12797 len = sizeof(struct sctp_stream_reset_response_tsn); 12798 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE); 12799 resp->ph.param_length = htons(len); 12800 resp->response_seq = htonl(resp_seq); 12801 resp->result = htonl(result); 12802 resp->senders_next_tsn = htonl(send_una); 12803 resp->receivers_next_tsn = htonl(recv_next); 12804 12805 /* now fix the chunk length */ 12806 ch->chunk_length = htons(len + old_len); 12807 chk->book_size = len + old_len; 12808 chk->send_size = SCTP_SIZE32(chk->book_size); 12809 chk->book_size_scale = 0; 12810 SCTP_BUF_LEN(chk->data) = chk->send_size; 12811 return; 12812 } 12813 12814 static void 12815 sctp_add_an_out_stream(struct sctp_tmit_chunk *chk, 12816 uint32_t seq, 12817 uint16_t adding) 12818 { 12819 uint16_t len, old_len; 12820 struct sctp_chunkhdr *ch; 12821 struct sctp_stream_reset_add_strm *addstr; 12822 12823 ch = mtod(chk->data, struct sctp_chunkhdr *); 12824 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 12825 12826 /* get to new offset for the param. */ 12827 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len); 12828 /* now how long will this param be? */ 12829 len = sizeof(struct sctp_stream_reset_add_strm); 12830 12831 /* Fill it out. */ 12832 addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_OUT_STREAMS); 12833 addstr->ph.param_length = htons(len); 12834 addstr->request_seq = htonl(seq); 12835 addstr->number_of_streams = htons(adding); 12836 addstr->reserved = 0; 12837 12838 /* now fix the chunk length */ 12839 ch->chunk_length = htons(len + old_len); 12840 chk->send_size = len + old_len; 12841 chk->book_size = SCTP_SIZE32(chk->send_size); 12842 chk->book_size_scale = 0; 12843 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size); 12844 return; 12845 } 12846 12847 static void 12848 sctp_add_an_in_stream(struct sctp_tmit_chunk *chk, 12849 uint32_t seq, 12850 uint16_t adding) 12851 { 12852 uint16_t len, old_len; 12853 struct sctp_chunkhdr *ch; 12854 struct sctp_stream_reset_add_strm *addstr; 12855 12856 ch = mtod(chk->data, struct sctp_chunkhdr *); 12857 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 12858 12859 /* get to new offset for the param. */ 12860 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len); 12861 /* now how long will this param be? */ 12862 len = sizeof(struct sctp_stream_reset_add_strm); 12863 /* Fill it out. */ 12864 addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_IN_STREAMS); 12865 addstr->ph.param_length = htons(len); 12866 addstr->request_seq = htonl(seq); 12867 addstr->number_of_streams = htons(adding); 12868 addstr->reserved = 0; 12869 12870 /* now fix the chunk length */ 12871 ch->chunk_length = htons(len + old_len); 12872 chk->send_size = len + old_len; 12873 chk->book_size = SCTP_SIZE32(chk->send_size); 12874 chk->book_size_scale = 0; 12875 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size); 12876 return; 12877 } 12878 12879 int 12880 sctp_send_stream_reset_out_if_possible(struct sctp_tcb *stcb, int so_locked) 12881 { 12882 struct sctp_association *asoc; 12883 struct sctp_tmit_chunk *chk; 12884 struct sctp_chunkhdr *ch; 12885 uint32_t seq; 12886 12887 asoc = &stcb->asoc; 12888 asoc->trigger_reset = 0; 12889 if (asoc->stream_reset_outstanding) { 12890 return (EALREADY); 12891 } 12892 sctp_alloc_a_chunk(stcb, chk); 12893 if (chk == NULL) { 12894 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 12895 return (ENOMEM); 12896 } 12897 chk->copy_by_ref = 0; 12898 chk->rec.chunk_id.id = SCTP_STREAM_RESET; 12899 chk->rec.chunk_id.can_take_data = 0; 12900 chk->flags = 0; 12901 chk->asoc = &stcb->asoc; 12902 chk->book_size = sizeof(struct sctp_chunkhdr); 12903 chk->send_size = SCTP_SIZE32(chk->book_size); 12904 chk->book_size_scale = 0; 12905 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 12906 if (chk->data == NULL) { 12907 sctp_free_a_chunk(stcb, chk, so_locked); 12908 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 12909 return (ENOMEM); 12910 } 12911 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 12912 12913 /* setup chunk parameters */ 12914 chk->sent = SCTP_DATAGRAM_UNSENT; 12915 chk->snd_count = 0; 12916 if (stcb->asoc.alternate) { 12917 chk->whoTo = stcb->asoc.alternate; 12918 } else { 12919 chk->whoTo = stcb->asoc.primary_destination; 12920 } 12921 ch = mtod(chk->data, struct sctp_chunkhdr *); 12922 ch->chunk_type = SCTP_STREAM_RESET; 12923 ch->chunk_flags = 0; 12924 ch->chunk_length = htons(chk->book_size); 12925 atomic_add_int(&chk->whoTo->ref_count, 1); 12926 SCTP_BUF_LEN(chk->data) = chk->send_size; 12927 seq = stcb->asoc.str_reset_seq_out; 12928 if (sctp_add_stream_reset_out(stcb, chk, seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1))) { 12929 seq++; 12930 asoc->stream_reset_outstanding++; 12931 } else { 12932 m_freem(chk->data); 12933 chk->data = NULL; 12934 sctp_free_a_chunk(stcb, chk, so_locked); 12935 return (ENOENT); 12936 } 12937 asoc->str_reset = chk; 12938 /* insert the chunk for sending */ 12939 TAILQ_INSERT_TAIL(&asoc->control_send_queue, 12940 chk, 12941 sctp_next); 12942 asoc->ctrl_queue_cnt++; 12943 12944 if (stcb->asoc.send_sack) { 12945 sctp_send_sack(stcb, so_locked); 12946 } 12947 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo); 12948 return (0); 12949 } 12950 12951 int 12952 sctp_send_str_reset_req(struct sctp_tcb *stcb, 12953 uint16_t number_entries, uint16_t *list, 12954 uint8_t send_in_req, 12955 uint8_t send_tsn_req, 12956 uint8_t add_stream, 12957 uint16_t adding_o, 12958 uint16_t adding_i, uint8_t peer_asked) 12959 { 12960 struct sctp_association *asoc; 12961 struct sctp_tmit_chunk *chk; 12962 struct sctp_chunkhdr *ch; 12963 int can_send_out_req=0; 12964 uint32_t seq; 12965 12966 SCTP_TCB_LOCK_ASSERT(stcb); 12967 12968 asoc = &stcb->asoc; 12969 if (asoc->stream_reset_outstanding) { 12970 /*- 12971 * Already one pending, must get ACK back to clear the flag. 12972 */ 12973 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EBUSY); 12974 return (EBUSY); 12975 } 12976 if ((send_in_req == 0) && (send_tsn_req == 0) && 12977 (add_stream == 0)) { 12978 /* nothing to do */ 12979 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL); 12980 return (EINVAL); 12981 } 12982 if (send_tsn_req && send_in_req) { 12983 /* error, can't do that */ 12984 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL); 12985 return (EINVAL); 12986 } else if (send_in_req) { 12987 can_send_out_req = 1; 12988 } 12989 if (number_entries > (MCLBYTES - 12990 SCTP_MIN_OVERHEAD - 12991 sizeof(struct sctp_chunkhdr) - 12992 sizeof(struct sctp_stream_reset_out_request)) / 12993 sizeof(uint16_t)) { 12994 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 12995 return (ENOMEM); 12996 } 12997 sctp_alloc_a_chunk(stcb, chk); 12998 if (chk == NULL) { 12999 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 13000 return (ENOMEM); 13001 } 13002 chk->copy_by_ref = 0; 13003 chk->rec.chunk_id.id = SCTP_STREAM_RESET; 13004 chk->rec.chunk_id.can_take_data = 0; 13005 chk->flags = 0; 13006 chk->asoc = &stcb->asoc; 13007 chk->book_size = sizeof(struct sctp_chunkhdr); 13008 chk->send_size = SCTP_SIZE32(chk->book_size); 13009 chk->book_size_scale = 0; 13010 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 13011 if (chk->data == NULL) { 13012 sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED); 13013 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 13014 return (ENOMEM); 13015 } 13016 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 13017 13018 /* setup chunk parameters */ 13019 chk->sent = SCTP_DATAGRAM_UNSENT; 13020 chk->snd_count = 0; 13021 if (stcb->asoc.alternate) { 13022 chk->whoTo = stcb->asoc.alternate; 13023 } else { 13024 chk->whoTo = stcb->asoc.primary_destination; 13025 } 13026 atomic_add_int(&chk->whoTo->ref_count, 1); 13027 ch = mtod(chk->data, struct sctp_chunkhdr *); 13028 ch->chunk_type = SCTP_STREAM_RESET; 13029 ch->chunk_flags = 0; 13030 ch->chunk_length = htons(chk->book_size); 13031 SCTP_BUF_LEN(chk->data) = chk->send_size; 13032 13033 seq = stcb->asoc.str_reset_seq_out; 13034 if (can_send_out_req) { 13035 int ret; 13036 13037 ret = sctp_add_stream_reset_out(stcb, chk, seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1)); 13038 if (ret) { 13039 seq++; 13040 asoc->stream_reset_outstanding++; 13041 } 13042 } 13043 if ((add_stream & 1) && 13044 ((stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt) < adding_o)) { 13045 /* Need to allocate more */ 13046 struct sctp_stream_out *oldstream; 13047 struct sctp_stream_queue_pending *sp, *nsp; 13048 int i; 13049 #if defined(SCTP_DETAILED_STR_STATS) 13050 int j; 13051 #endif 13052 13053 oldstream = stcb->asoc.strmout; 13054 /* get some more */ 13055 SCTP_MALLOC(stcb->asoc.strmout, struct sctp_stream_out *, 13056 (stcb->asoc.streamoutcnt + adding_o) * sizeof(struct sctp_stream_out), 13057 SCTP_M_STRMO); 13058 if (stcb->asoc.strmout == NULL) { 13059 uint8_t x; 13060 stcb->asoc.strmout = oldstream; 13061 /* Turn off the bit */ 13062 x = add_stream & 0xfe; 13063 add_stream = x; 13064 goto skip_stuff; 13065 } 13066 /* Ok now we proceed with copying the old out stuff and 13067 * initializing the new stuff. 13068 */ 13069 stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, false); 13070 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 13071 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue); 13072 /* FIX ME FIX ME */ 13073 /* This should be a SS_COPY operation FIX ME STREAM SCHEDULER EXPERT */ 13074 stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], &oldstream[i]); 13075 stcb->asoc.strmout[i].chunks_on_queues = oldstream[i].chunks_on_queues; 13076 #if defined(SCTP_DETAILED_STR_STATS) 13077 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) { 13078 stcb->asoc.strmout[i].abandoned_sent[j] = oldstream[i].abandoned_sent[j]; 13079 stcb->asoc.strmout[i].abandoned_unsent[j] = oldstream[i].abandoned_unsent[j]; 13080 } 13081 #else 13082 stcb->asoc.strmout[i].abandoned_sent[0] = oldstream[i].abandoned_sent[0]; 13083 stcb->asoc.strmout[i].abandoned_unsent[0] = oldstream[i].abandoned_unsent[0]; 13084 #endif 13085 stcb->asoc.strmout[i].next_mid_ordered = oldstream[i].next_mid_ordered; 13086 stcb->asoc.strmout[i].next_mid_unordered = oldstream[i].next_mid_unordered; 13087 stcb->asoc.strmout[i].last_msg_incomplete = oldstream[i].last_msg_incomplete; 13088 stcb->asoc.strmout[i].sid = i; 13089 stcb->asoc.strmout[i].state = oldstream[i].state; 13090 /* now anything on those queues? */ 13091 TAILQ_FOREACH_SAFE(sp, &oldstream[i].outqueue, next, nsp) { 13092 TAILQ_REMOVE(&oldstream[i].outqueue, sp, next); 13093 TAILQ_INSERT_TAIL(&stcb->asoc.strmout[i].outqueue, sp, next); 13094 } 13095 } 13096 /* now the new streams */ 13097 stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc); 13098 for (i = stcb->asoc.streamoutcnt; i < (stcb->asoc.streamoutcnt + adding_o); i++) { 13099 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue); 13100 stcb->asoc.strmout[i].chunks_on_queues = 0; 13101 #if defined(SCTP_DETAILED_STR_STATS) 13102 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) { 13103 stcb->asoc.strmout[i].abandoned_sent[j] = 0; 13104 stcb->asoc.strmout[i].abandoned_unsent[j] = 0; 13105 } 13106 #else 13107 stcb->asoc.strmout[i].abandoned_sent[0] = 0; 13108 stcb->asoc.strmout[i].abandoned_unsent[0] = 0; 13109 #endif 13110 stcb->asoc.strmout[i].next_mid_ordered = 0; 13111 stcb->asoc.strmout[i].next_mid_unordered = 0; 13112 stcb->asoc.strmout[i].sid = i; 13113 stcb->asoc.strmout[i].last_msg_incomplete = 0; 13114 stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], NULL); 13115 stcb->asoc.strmout[i].state = SCTP_STREAM_CLOSED; 13116 } 13117 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt + adding_o; 13118 SCTP_FREE(oldstream, SCTP_M_STRMO); 13119 } 13120 skip_stuff: 13121 if ((add_stream & 1) && (adding_o > 0)) { 13122 asoc->strm_pending_add_size = adding_o; 13123 asoc->peer_req_out = peer_asked; 13124 sctp_add_an_out_stream(chk, seq, adding_o); 13125 seq++; 13126 asoc->stream_reset_outstanding++; 13127 } 13128 if ((add_stream & 2) && (adding_i > 0)) { 13129 sctp_add_an_in_stream(chk, seq, adding_i); 13130 seq++; 13131 asoc->stream_reset_outstanding++; 13132 } 13133 if (send_in_req) { 13134 sctp_add_stream_reset_in(chk, number_entries, list, seq); 13135 seq++; 13136 asoc->stream_reset_outstanding++; 13137 } 13138 if (send_tsn_req) { 13139 sctp_add_stream_reset_tsn(chk, seq); 13140 asoc->stream_reset_outstanding++; 13141 } 13142 asoc->str_reset = chk; 13143 /* insert the chunk for sending */ 13144 TAILQ_INSERT_TAIL(&asoc->control_send_queue, 13145 chk, 13146 sctp_next); 13147 asoc->ctrl_queue_cnt++; 13148 if (stcb->asoc.send_sack) { 13149 sctp_send_sack(stcb, SCTP_SO_LOCKED); 13150 } 13151 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo); 13152 return (0); 13153 } 13154 13155 void 13156 sctp_send_abort(struct mbuf *m, int iphlen, struct sockaddr *src, struct sockaddr *dst, 13157 struct sctphdr *sh, uint32_t vtag, struct mbuf *cause, 13158 #if defined(__FreeBSD__) && !defined(__Userspace__) 13159 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, 13160 #endif 13161 uint32_t vrf_id, uint16_t port) 13162 { 13163 /* Don't respond to an ABORT with an ABORT. */ 13164 if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) { 13165 if (cause) 13166 sctp_m_freem(cause); 13167 return; 13168 } 13169 sctp_send_resp_msg(src, dst, sh, vtag, SCTP_ABORT_ASSOCIATION, cause, 13170 #if defined(__FreeBSD__) && !defined(__Userspace__) 13171 mflowtype, mflowid, fibnum, 13172 #endif 13173 vrf_id, port); 13174 return; 13175 } 13176 13177 void 13178 sctp_send_operr_to(struct sockaddr *src, struct sockaddr *dst, 13179 struct sctphdr *sh, uint32_t vtag, struct mbuf *cause, 13180 #if defined(__FreeBSD__) && !defined(__Userspace__) 13181 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum, 13182 #endif 13183 uint32_t vrf_id, uint16_t port) 13184 { 13185 sctp_send_resp_msg(src, dst, sh, vtag, SCTP_OPERATION_ERROR, cause, 13186 #if defined(__FreeBSD__) && !defined(__Userspace__) 13187 mflowtype, mflowid, fibnum, 13188 #endif 13189 vrf_id, port); 13190 return; 13191 } 13192 13193 static struct mbuf * 13194 sctp_copy_resume(struct uio *uio, 13195 int max_send_len, 13196 #if defined(__FreeBSD__) || defined(__Userspace__) 13197 int user_marks_eor, 13198 #endif 13199 int *error, 13200 uint32_t *sndout, 13201 struct mbuf **new_tail) 13202 { 13203 #if defined(__FreeBSD__) || defined(__Userspace__) 13204 struct mbuf *m; 13205 13206 m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0, 13207 (M_PKTHDR | (user_marks_eor ? M_EOR : 0))); 13208 if (m == NULL) { 13209 /* The only possible error is EFAULT. */ 13210 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT); 13211 *error = EFAULT; 13212 } else { 13213 *sndout = m_length(m, NULL); 13214 *new_tail = m_last(m); 13215 } 13216 return (m); 13217 #else 13218 int left, cancpy, willcpy; 13219 struct mbuf *m, *head; 13220 13221 #if defined(__APPLE__) && !defined(__Userspace__) 13222 #if defined(APPLE_LEOPARD) 13223 left = (int)min(uio->uio_resid, max_send_len); 13224 #else 13225 left = (int)min(uio_resid(uio), max_send_len); 13226 #endif 13227 #else 13228 left = (int)min(uio->uio_resid, max_send_len); 13229 #endif 13230 /* Always get a header just in case */ 13231 head = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA); 13232 if (head == NULL) { 13233 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS); 13234 *error = ENOBUFS; 13235 return (NULL); 13236 } 13237 cancpy = (int)M_TRAILINGSPACE(head); 13238 willcpy = min(cancpy, left); 13239 *error = uiomove(mtod(head, caddr_t), willcpy, uio); 13240 if (*error != 0) { 13241 sctp_m_freem(head); 13242 return (NULL); 13243 } 13244 *sndout += willcpy; 13245 left -= willcpy; 13246 SCTP_BUF_LEN(head) = willcpy; 13247 m = head; 13248 *new_tail = head; 13249 while (left > 0) { 13250 /* move in user data */ 13251 SCTP_BUF_NEXT(m) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA); 13252 if (SCTP_BUF_NEXT(m) == NULL) { 13253 sctp_m_freem(head); 13254 *new_tail = NULL; 13255 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS); 13256 *error = ENOBUFS; 13257 return (NULL); 13258 } 13259 m = SCTP_BUF_NEXT(m); 13260 cancpy = (int)M_TRAILINGSPACE(m); 13261 willcpy = min(cancpy, left); 13262 *error = uiomove(mtod(m, caddr_t), willcpy, uio); 13263 if (*error != 0) { 13264 sctp_m_freem(head); 13265 *new_tail = NULL; 13266 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, *error); 13267 return (NULL); 13268 } 13269 SCTP_BUF_LEN(m) = willcpy; 13270 left -= willcpy; 13271 *sndout += willcpy; 13272 *new_tail = m; 13273 if (left == 0) { 13274 SCTP_BUF_NEXT(m) = NULL; 13275 } 13276 } 13277 return (head); 13278 #endif 13279 } 13280 13281 static int 13282 sctp_copy_one(struct sctp_stream_queue_pending *sp, 13283 struct uio *uio, 13284 int resv_upfront) 13285 { 13286 #if defined(__FreeBSD__) || defined(__Userspace__) 13287 sp->data = m_uiotombuf(uio, M_WAITOK, sp->length, resv_upfront, 0); 13288 if (sp->data == NULL) { 13289 /* The only possible error is EFAULT. */ 13290 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT); 13291 return (EFAULT); 13292 } 13293 sp->tail_mbuf = m_last(sp->data); 13294 return (0); 13295 #else 13296 int left; 13297 int cancpy, willcpy, error; 13298 struct mbuf *m, *head; 13299 int cpsz = 0; 13300 13301 /* First one gets a header */ 13302 left = sp->length; 13303 head = m = sctp_get_mbuf_for_msg((left + resv_upfront), 0, M_WAITOK, 0, MT_DATA); 13304 if (m == NULL) { 13305 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS); 13306 return (ENOBUFS); 13307 } 13308 /*- 13309 * Add this one for m in now, that way if the alloc fails we won't 13310 * have a bad cnt. 13311 */ 13312 SCTP_BUF_RESV_UF(m, resv_upfront); 13313 cancpy = (int)M_TRAILINGSPACE(m); 13314 willcpy = min(cancpy, left); 13315 while (left > 0) { 13316 /* move in user data */ 13317 error = uiomove(mtod(m, caddr_t), willcpy, uio); 13318 if (error) { 13319 sctp_m_freem(head); 13320 return (error); 13321 } 13322 SCTP_BUF_LEN(m) = willcpy; 13323 left -= willcpy; 13324 cpsz += willcpy; 13325 if (left > 0) { 13326 SCTP_BUF_NEXT(m) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA); 13327 if (SCTP_BUF_NEXT(m) == NULL) { 13328 /* 13329 * the head goes back to caller, he can free 13330 * the rest 13331 */ 13332 sctp_m_freem(head); 13333 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS); 13334 return (ENOBUFS); 13335 } 13336 m = SCTP_BUF_NEXT(m); 13337 cancpy = (int)M_TRAILINGSPACE(m); 13338 willcpy = min(cancpy, left); 13339 } else { 13340 sp->tail_mbuf = m; 13341 SCTP_BUF_NEXT(m) = NULL; 13342 } 13343 } 13344 sp->data = head; 13345 sp->length = cpsz; 13346 return (0); 13347 #endif 13348 } 13349 13350 static struct sctp_stream_queue_pending * 13351 sctp_copy_it_in(struct sctp_tcb *stcb, 13352 struct sctp_association *asoc, 13353 struct sctp_nonpad_sndrcvinfo *srcv, 13354 struct uio *uio, 13355 struct sctp_nets *net, 13356 ssize_t max_send_len, 13357 int user_marks_eor, 13358 int *error) 13359 13360 { 13361 /*- 13362 * This routine must be very careful in its work. Protocol 13363 * processing is up and running so care must be taken to spl...() 13364 * when you need to do something that may effect the stcb/asoc. The 13365 * sb is locked however. When data is copied the protocol processing 13366 * should be enabled since this is a slower operation... 13367 */ 13368 struct sctp_stream_queue_pending *sp; 13369 int resv_in_first; 13370 13371 *error = 0; 13372 sctp_alloc_a_strmoq(stcb, sp); 13373 if (sp == NULL) { 13374 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 13375 *error = ENOMEM; 13376 goto out_now; 13377 } 13378 sp->act_flags = 0; 13379 sp->sender_all_done = 0; 13380 sp->sinfo_flags = srcv->sinfo_flags; 13381 sp->timetolive = srcv->sinfo_timetolive; 13382 sp->ppid = srcv->sinfo_ppid; 13383 sp->context = srcv->sinfo_context; 13384 sp->fsn = 0; 13385 (void)SCTP_GETTIME_TIMEVAL(&sp->ts); 13386 sp->sid = srcv->sinfo_stream; 13387 #if defined(__APPLE__) && !defined(__Userspace__) 13388 #if defined(APPLE_LEOPARD) 13389 sp->length = (uint32_t)min(uio->uio_resid, max_send_len); 13390 #else 13391 sp->length = (uint32_t)min(uio_resid(uio), max_send_len); 13392 #endif 13393 #else 13394 sp->length = (uint32_t)min(uio->uio_resid, max_send_len); 13395 #endif 13396 #if defined(__APPLE__) && !defined(__Userspace__) 13397 #if defined(APPLE_LEOPARD) 13398 if ((sp->length == (uint32_t)uio->uio_resid) && 13399 #else 13400 if ((sp->length == (uint32_t)uio_resid(uio)) && 13401 #endif 13402 #else 13403 if ((sp->length == (uint32_t)uio->uio_resid) && 13404 #endif 13405 ((user_marks_eor == 0) || 13406 (srcv->sinfo_flags & SCTP_EOF) || 13407 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) { 13408 sp->msg_is_complete = 1; 13409 } else { 13410 sp->msg_is_complete = 0; 13411 } 13412 sp->sender_all_done = 0; 13413 sp->some_taken = 0; 13414 sp->put_last_out = 0; 13415 resv_in_first = SCTP_DATA_CHUNK_OVERHEAD(stcb); 13416 sp->data = sp->tail_mbuf = NULL; 13417 if (sp->length == 0) { 13418 goto skip_copy; 13419 } 13420 if (srcv->sinfo_keynumber_valid) { 13421 sp->auth_keyid = srcv->sinfo_keynumber; 13422 } else { 13423 sp->auth_keyid = stcb->asoc.authinfo.active_keyid; 13424 } 13425 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) { 13426 sctp_auth_key_acquire(stcb, sp->auth_keyid); 13427 sp->holds_key_ref = 1; 13428 } 13429 #if defined(__APPLE__) && !defined(__Userspace__) 13430 SCTP_SOCKET_UNLOCK(SCTP_INP_SO(stcb->sctp_ep), 0); 13431 #endif 13432 *error = sctp_copy_one(sp, uio, resv_in_first); 13433 #if defined(__APPLE__) && !defined(__Userspace__) 13434 SCTP_SOCKET_LOCK(SCTP_INP_SO(stcb->sctp_ep), 0); 13435 #endif 13436 skip_copy: 13437 if (*error) { 13438 #if defined(__Userspace__) 13439 SCTP_TCB_LOCK(stcb); 13440 #endif 13441 sctp_free_a_strmoq(stcb, sp, SCTP_SO_LOCKED); 13442 #if defined(__Userspace__) 13443 SCTP_TCB_UNLOCK(stcb); 13444 #endif 13445 sp = NULL; 13446 } else { 13447 if (sp->sinfo_flags & SCTP_ADDR_OVER) { 13448 sp->net = net; 13449 atomic_add_int(&sp->net->ref_count, 1); 13450 } else { 13451 sp->net = NULL; 13452 } 13453 sctp_set_prsctp_policy(sp); 13454 } 13455 out_now: 13456 return (sp); 13457 } 13458 13459 int 13460 sctp_sosend(struct socket *so, 13461 struct sockaddr *addr, 13462 struct uio *uio, 13463 struct mbuf *top, 13464 struct mbuf *control, 13465 #if defined(__APPLE__) && !defined(__Userspace__) 13466 int flags) 13467 #else 13468 int flags, 13469 #if defined(__FreeBSD__) && !defined(__Userspace__) 13470 struct thread *p) 13471 #elif defined(_WIN32) && !defined(__Userspace__) 13472 PKTHREAD p) 13473 #else 13474 #if defined(__Userspace__) 13475 /* 13476 * proc is a dummy in __Userspace__ and will not be passed 13477 * to sctp_lower_sosend 13478 */ 13479 #endif 13480 struct proc *p) 13481 #endif 13482 #endif 13483 { 13484 struct sctp_sndrcvinfo sndrcvninfo; 13485 #if defined(INET) && defined(INET6) 13486 struct sockaddr_in sin; 13487 #endif 13488 struct sockaddr *addr_to_use; 13489 #if defined(__APPLE__) && !defined(__Userspace__) 13490 struct proc *p = current_proc(); 13491 #endif 13492 int error; 13493 bool use_sndinfo; 13494 13495 if (control != NULL) { 13496 /* process cmsg snd/rcv info (maybe a assoc-id) */ 13497 use_sndinfo = sctp_find_cmsg(SCTP_SNDRCV, (void *)&sndrcvninfo, control, sizeof(sndrcvninfo)); 13498 } else { 13499 use_sndinfo = false; 13500 } 13501 #if defined(INET) && defined(INET6) 13502 if ((addr != NULL) && (addr->sa_family == AF_INET6)) { 13503 struct sockaddr_in6 *sin6; 13504 13505 #ifdef HAVE_SA_LEN 13506 if (addr->sa_len != sizeof(struct sockaddr_in6)) { 13507 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL); 13508 return (EINVAL); 13509 } 13510 #endif 13511 sin6 = (struct sockaddr_in6 *)addr; 13512 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 13513 in6_sin6_2_sin(&sin, sin6); 13514 addr_to_use = (struct sockaddr *)&sin; 13515 } else { 13516 addr_to_use = addr; 13517 } 13518 } else { 13519 addr_to_use = addr; 13520 } 13521 #else 13522 addr_to_use = addr; 13523 #endif 13524 #if defined(__APPLE__) && !defined(__Userspace__) 13525 SCTP_SOCKET_LOCK(so, 1); 13526 #endif 13527 error = sctp_lower_sosend(so, addr_to_use, uio, top, control, flags, 13528 #if defined(__Userspace__) 13529 use_sndinfo ? &sndrcvninfo : NULL); 13530 #else 13531 use_sndinfo ? &sndrcvninfo : NULL, p); 13532 #endif 13533 #if defined(__APPLE__) && !defined(__Userspace__) 13534 SCTP_SOCKET_UNLOCK(so, 1); 13535 #endif 13536 return (error); 13537 } 13538 13539 int 13540 sctp_lower_sosend(struct socket *so, 13541 struct sockaddr *addr, 13542 struct uio *uio, 13543 struct mbuf *top, 13544 struct mbuf *control, 13545 int flags, 13546 #if defined(__Userspace__) 13547 struct sctp_sndrcvinfo *srcv) 13548 #else 13549 struct sctp_sndrcvinfo *srcv, 13550 #if defined(__FreeBSD__) 13551 struct thread *p) 13552 #elif defined(_WIN32) 13553 PKTHREAD p) 13554 #else 13555 struct proc *p) 13556 #endif 13557 #endif 13558 { 13559 struct sctp_nonpad_sndrcvinfo sndrcvninfo_buf; 13560 #if defined(__FreeBSD__) && !defined(__Userspace__) 13561 struct epoch_tracker et; 13562 #endif 13563 struct timeval now; 13564 struct sctp_block_entry be; 13565 struct sctp_inpcb *inp; 13566 struct sctp_tcb *stcb = NULL; 13567 struct sctp_nets *net; 13568 struct sctp_association *asoc; 13569 struct sctp_inpcb *t_inp; 13570 struct sctp_nonpad_sndrcvinfo *sndrcvninfo; 13571 ssize_t sndlen = 0, max_len, local_add_more; 13572 ssize_t local_soresv = 0; 13573 sctp_assoc_t sinfo_assoc_id; 13574 int user_marks_eor; 13575 int nagle_applies = 0; 13576 int error; 13577 int queue_only = 0, queue_only_for_init = 0; 13578 int un_sent; 13579 int now_filled = 0; 13580 unsigned int inqueue_bytes = 0; 13581 uint16_t port; 13582 uint16_t sinfo_flags; 13583 uint16_t sinfo_stream; 13584 bool create_lock_applied = false; 13585 bool free_cnt_applied = false; 13586 bool some_on_control; 13587 bool got_all_of_the_send = false; 13588 bool non_blocking = false; 13589 13590 error = 0; 13591 net = NULL; 13592 stcb = NULL; 13593 13594 #if defined(__APPLE__) && !defined(__Userspace__) 13595 sctp_lock_assert(so); 13596 #endif 13597 if ((uio == NULL) && (top == NULL)) { 13598 error = EINVAL; 13599 goto out_unlocked; 13600 } 13601 if (addr != NULL) { 13602 union sctp_sockstore *raddr = (union sctp_sockstore *)addr; 13603 13604 switch (raddr->sa.sa_family) { 13605 #ifdef INET 13606 case AF_INET: 13607 #ifdef HAVE_SIN_LEN 13608 if (raddr->sin.sin_len != sizeof(struct sockaddr_in)) { 13609 error = EINVAL; 13610 goto out_unlocked; 13611 } 13612 #endif 13613 port = raddr->sin.sin_port; 13614 break; 13615 #endif 13616 #ifdef INET6 13617 case AF_INET6: 13618 #ifdef HAVE_SIN6_LEN 13619 if (raddr->sin6.sin6_len != sizeof(struct sockaddr_in6)) { 13620 error = EINVAL; 13621 goto out_unlocked; 13622 } 13623 #endif 13624 port = raddr->sin6.sin6_port; 13625 break; 13626 #endif 13627 #if defined(__Userspace__) 13628 case AF_CONN: 13629 #ifdef HAVE_SCONN_LEN 13630 if (raddr->sconn.sconn_len != sizeof(struct sockaddr_conn)) { 13631 error = EINVAL; 13632 goto out_unlocked; 13633 } 13634 #endif 13635 port = raddr->sconn.sconn_port; 13636 break; 13637 #endif 13638 default: 13639 error = EAFNOSUPPORT; 13640 goto out_unlocked; 13641 } 13642 } else { 13643 port = 0; 13644 } 13645 if (uio != NULL) { 13646 #if defined(__APPLE__) && !defined(__Userspace__) 13647 #if defined(APPLE_LEOPARD) 13648 if (uio->uio_resid < 0) { 13649 #else 13650 if (uio_resid(uio) < 0) { 13651 #endif 13652 #else 13653 if (uio->uio_resid < 0) { 13654 #endif 13655 error = EINVAL; 13656 goto out_unlocked; 13657 } 13658 #if defined(__APPLE__) && !defined(__Userspace__) 13659 #if defined(APPLE_LEOPARD) 13660 sndlen = uio->uio_resid; 13661 #else 13662 sndlen = uio_resid(uio); 13663 #endif 13664 #else 13665 sndlen = uio->uio_resid; 13666 #endif 13667 } else { 13668 sndlen = SCTP_HEADER_LEN(top); 13669 } 13670 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Send called addr:%p send length %zd\n", 13671 (void *)addr, sndlen); 13672 13673 t_inp = inp = (struct sctp_inpcb *)so->so_pcb; 13674 if (inp == NULL) { 13675 error = EINVAL; 13676 goto out_unlocked; 13677 } 13678 user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 13679 if ((uio == NULL) && (user_marks_eor != 0)) { 13680 /*- 13681 * We do not support eeor mode for 13682 * sending with mbuf chains (like sendfile). 13683 */ 13684 error = EINVAL; 13685 goto out_unlocked; 13686 } 13687 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 13688 SCTP_IS_LISTENING(inp)) { 13689 /* The listener can NOT send. */ 13690 error = EINVAL; 13691 goto out_unlocked; 13692 } 13693 atomic_add_int(&inp->total_sends, 1); 13694 13695 if (srcv != NULL) { 13696 sndrcvninfo = (struct sctp_nonpad_sndrcvinfo *)srcv; 13697 sinfo_assoc_id = sndrcvninfo->sinfo_assoc_id; 13698 sinfo_flags = sndrcvninfo->sinfo_flags; 13699 if (INVALID_SINFO_FLAG(sinfo_flags) || 13700 PR_SCTP_INVALID_POLICY(sinfo_flags)) { 13701 error = EINVAL; 13702 goto out_unlocked; 13703 } 13704 if (sinfo_flags != 0) { 13705 SCTP_STAT_INCR(sctps_sends_with_flags); 13706 } 13707 } else { 13708 sndrcvninfo = NULL; 13709 sinfo_flags = inp->def_send.sinfo_flags; 13710 sinfo_assoc_id = inp->def_send.sinfo_assoc_id; 13711 } 13712 #if defined(__FreeBSD__) && !defined(__Userspace__) 13713 if (flags & MSG_EOR) { 13714 sinfo_flags |= SCTP_EOR; 13715 } 13716 if (flags & MSG_EOF) { 13717 sinfo_flags |= SCTP_EOF; 13718 } 13719 #endif 13720 if ((sinfo_flags & SCTP_ADDR_OVER) && (addr == NULL)) { 13721 error = EINVAL; 13722 goto out_unlocked; 13723 } 13724 SCTP_INP_RLOCK(inp); 13725 if ((sinfo_flags & SCTP_SENDALL) && 13726 (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) { 13727 SCTP_INP_RUNLOCK(inp); 13728 error = sctp_sendall(inp, uio, top, sndrcvninfo); 13729 top = NULL; 13730 goto out_unlocked; 13731 } 13732 /* Now we must find the association. */ 13733 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) || 13734 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 13735 stcb = LIST_FIRST(&inp->sctp_asoc_list); 13736 if (stcb != NULL) { 13737 SCTP_TCB_LOCK(stcb); 13738 } 13739 SCTP_INP_RUNLOCK(inp); 13740 } else if (sinfo_assoc_id > SCTP_ALL_ASSOC) { 13741 stcb = sctp_findasoc_ep_asocid_locked(inp, sinfo_assoc_id, 1); 13742 SCTP_INP_RUNLOCK(inp); 13743 if (stcb != NULL) { 13744 SCTP_TCB_LOCK_ASSERT(stcb); 13745 } 13746 } else if (addr != NULL) { 13747 /*- 13748 * Since we did not use findep we must 13749 * increment it, and if we don't find a tcb 13750 * decrement it. 13751 */ 13752 SCTP_INP_INCR_REF(inp); 13753 SCTP_INP_RUNLOCK(inp); 13754 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL); 13755 if (stcb == NULL) { 13756 SCTP_INP_WLOCK(inp); 13757 SCTP_INP_DECR_REF(inp); 13758 SCTP_INP_WUNLOCK(inp); 13759 } else { 13760 SCTP_TCB_LOCK_ASSERT(stcb); 13761 } 13762 } else { 13763 SCTP_INP_RUNLOCK(inp); 13764 } 13765 13766 #ifdef INVARIANTS 13767 if (stcb != NULL) { 13768 SCTP_TCB_LOCK_ASSERT(stcb); 13769 } 13770 #endif 13771 13772 if ((stcb == NULL) && (addr != NULL)) { 13773 /* Possible implicit send? */ 13774 SCTP_ASOC_CREATE_LOCK(inp); 13775 create_lock_applied = true; 13776 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 13777 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 13778 error = EINVAL; 13779 goto out_unlocked; 13780 } 13781 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) && 13782 (addr->sa_family == AF_INET6)) { 13783 error = EINVAL; 13784 goto out_unlocked; 13785 } 13786 SCTP_INP_WLOCK(inp); 13787 SCTP_INP_INCR_REF(inp); 13788 SCTP_INP_WUNLOCK(inp); 13789 /* With the lock applied look again */ 13790 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL); 13791 #if defined(INET) || defined(INET6) 13792 if ((stcb == NULL) && (control != NULL) && (port > 0)) { 13793 stcb = sctp_findassociation_cmsgs(&t_inp, port, control, &net, &error); 13794 } 13795 #endif 13796 if (stcb == NULL) { 13797 SCTP_INP_WLOCK(inp); 13798 SCTP_INP_DECR_REF(inp); 13799 SCTP_INP_WUNLOCK(inp); 13800 } else { 13801 SCTP_TCB_LOCK_ASSERT(stcb); 13802 SCTP_ASOC_CREATE_UNLOCK(inp); 13803 create_lock_applied = false; 13804 } 13805 if (error != 0) { 13806 goto out_unlocked; 13807 } 13808 if (t_inp != inp) { 13809 error = ENOTCONN; 13810 goto out_unlocked; 13811 } 13812 } 13813 if (stcb == NULL) { 13814 if (addr == NULL) { 13815 error = ENOENT; 13816 goto out_unlocked; 13817 } else { 13818 /* We must go ahead and start the INIT process */ 13819 uint32_t vrf_id; 13820 13821 if ((sinfo_flags & SCTP_ABORT) || 13822 ((sinfo_flags & SCTP_EOF) && (sndlen == 0))) { 13823 /*- 13824 * User asks to abort a non-existent assoc, 13825 * or EOF a non-existent assoc with no data 13826 */ 13827 error = ENOENT; 13828 goto out_unlocked; 13829 } 13830 /* get an asoc/stcb struct */ 13831 vrf_id = inp->def_vrf_id; 13832 KASSERT(create_lock_applied, ("create_lock_applied is false")); 13833 stcb = sctp_aloc_assoc_connected(inp, addr, &error, 0, 0, vrf_id, 13834 inp->sctp_ep.pre_open_stream_count, 13835 inp->sctp_ep.port, 13836 #if !defined(__Userspace__) 13837 p, 13838 #else 13839 (struct proc *)NULL, 13840 #endif 13841 SCTP_INITIALIZE_AUTH_PARAMS); 13842 if (stcb == NULL) { 13843 /* error is setup for us in the call. */ 13844 KASSERT(error != 0, ("error is 0 although stcb is NULL")); 13845 goto out_unlocked; 13846 } 13847 SCTP_TCB_LOCK_ASSERT(stcb); 13848 SCTP_ASOC_CREATE_UNLOCK(inp); 13849 create_lock_applied = false; 13850 /* Turn on queue only flag to prevent data from being sent */ 13851 queue_only = 1; 13852 SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT); 13853 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 13854 if (control != NULL) { 13855 if (sctp_process_cmsgs_for_init(stcb, control, &error)) { 13856 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 13857 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_6); 13858 stcb = NULL; 13859 KASSERT(error != 0, 13860 ("error is 0 although sctp_process_cmsgs_for_init() indicated an error")); 13861 goto out_unlocked; 13862 } 13863 } 13864 /* out with the INIT */ 13865 queue_only_for_init = 1; 13866 /*- 13867 * we may want to dig in after this call and adjust the MTU 13868 * value. It defaulted to 1500 (constant) but the ro 13869 * structure may now have an update and thus we may need to 13870 * change it BEFORE we append the message. 13871 */ 13872 } 13873 } 13874 13875 KASSERT(!create_lock_applied, ("create_lock_applied is true")); 13876 KASSERT(stcb != NULL, ("stcb is NULL")); 13877 SCTP_TCB_LOCK_ASSERT(stcb); 13878 13879 asoc = &stcb->asoc; 13880 if ((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) || 13881 (asoc->state & SCTP_STATE_WAS_ABORTED)) { 13882 if (asoc->state & SCTP_STATE_WAS_ABORTED) { 13883 /* XXX: Could also be ECONNABORTED, not enough info. */ 13884 error = ECONNRESET; 13885 } else { 13886 error = ENOTCONN; 13887 } 13888 goto out_unlocked; 13889 } 13890 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 13891 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 13892 queue_only = 1; 13893 } 13894 /* Keep the stcb from being freed under our feet. */ 13895 atomic_add_int(&asoc->refcnt, 1); 13896 free_cnt_applied = true; 13897 if (sndrcvninfo == NULL) { 13898 /* Use a local copy to have a consistent view. */ 13899 sndrcvninfo_buf = asoc->def_send; 13900 sndrcvninfo = &sndrcvninfo_buf; 13901 sinfo_flags = sndrcvninfo->sinfo_flags; 13902 #if defined(__FreeBSD__) && !defined(__Userspace__) 13903 if (flags & MSG_EOR) { 13904 sinfo_flags |= SCTP_EOR; 13905 } 13906 if (flags & MSG_EOF) { 13907 sinfo_flags |= SCTP_EOF; 13908 } 13909 #endif 13910 } 13911 /* Are we aborting? */ 13912 if (sinfo_flags & SCTP_ABORT) { 13913 struct mbuf *mm; 13914 struct sctp_paramhdr *ph; 13915 ssize_t tot_demand, tot_out = 0, max_out; 13916 13917 SCTP_STAT_INCR(sctps_sends_with_abort); 13918 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) || 13919 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) { 13920 /* It has to be up before we abort. */ 13921 error = EINVAL; 13922 goto out_unlocked; 13923 } 13924 /* How big is the user initiated abort? */ 13925 if (top != NULL) { 13926 struct mbuf *cntm; 13927 13928 if (sndlen != 0) { 13929 for (cntm = top; cntm; cntm = SCTP_BUF_NEXT(cntm)) { 13930 tot_out += SCTP_BUF_LEN(cntm); 13931 } 13932 } 13933 mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_NOWAIT, 1, MT_DATA); 13934 } else { 13935 /* Must fit in a MTU */ 13936 tot_out = sndlen; 13937 tot_demand = (tot_out + sizeof(struct sctp_paramhdr)); 13938 if (tot_demand > SCTP_DEFAULT_ADD_MORE) { 13939 error = EMSGSIZE; 13940 goto out_unlocked; 13941 } 13942 mm = sctp_get_mbuf_for_msg((unsigned int)tot_demand, 0, M_NOWAIT, 1, MT_DATA); 13943 } 13944 if (mm == NULL) { 13945 error = ENOMEM; 13946 goto out_unlocked; 13947 } 13948 max_out = asoc->smallest_mtu - sizeof(struct sctp_paramhdr); 13949 max_out -= sizeof(struct sctp_abort_msg); 13950 if (tot_out > max_out) { 13951 tot_out = max_out; 13952 } 13953 ph = mtod(mm, struct sctp_paramhdr *); 13954 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 13955 ph->param_length = htons((uint16_t)(sizeof(struct sctp_paramhdr) + tot_out)); 13956 ph++; 13957 SCTP_BUF_LEN(mm) = (int)(tot_out + sizeof(struct sctp_paramhdr)); 13958 if (top == NULL) { 13959 SCTP_TCB_UNLOCK(stcb); 13960 #if defined(__APPLE__) && !defined(__Userspace__) 13961 SCTP_SOCKET_UNLOCK(so, 0); 13962 #endif 13963 error = uiomove((caddr_t)ph, (int)tot_out, uio); 13964 #if defined(__APPLE__) && !defined(__Userspace__) 13965 SCTP_SOCKET_LOCK(so, 0); 13966 #endif 13967 SCTP_TCB_LOCK(stcb); 13968 if ((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) || 13969 (asoc->state & SCTP_STATE_WAS_ABORTED)) { 13970 sctp_m_freem(mm); 13971 if (asoc->state & SCTP_STATE_WAS_ABORTED) { 13972 /* XXX: Could also be ECONNABORTED, not enough info. */ 13973 error = ECONNRESET; 13974 } else { 13975 error = ENOTCONN; 13976 } 13977 goto out_unlocked; 13978 } 13979 if (error != 0) { 13980 /*- 13981 * Here if we can't get his data we 13982 * still abort we just don't get to 13983 * send the users note :-0 13984 */ 13985 sctp_m_freem(mm); 13986 mm = NULL; 13987 error = 0; 13988 } 13989 } else { 13990 if (sndlen != 0) { 13991 SCTP_BUF_NEXT(mm) = top; 13992 } 13993 } 13994 atomic_subtract_int(&asoc->refcnt, 1); 13995 free_cnt_applied = false; 13996 /* release this lock, otherwise we hang on ourselves */ 13997 #if defined(__FreeBSD__) && !defined(__Userspace__) 13998 NET_EPOCH_ENTER(et); 13999 #endif 14000 sctp_abort_an_association(stcb->sctp_ep, stcb, mm, false, SCTP_SO_LOCKED); 14001 #if defined(__FreeBSD__) && !defined(__Userspace__) 14002 NET_EPOCH_EXIT(et); 14003 #endif 14004 stcb = NULL; 14005 /* In this case top is already chained to mm 14006 * avoid double free, since we free it below if 14007 * top != NULL and driver would free it after sending 14008 * the packet out 14009 */ 14010 if (sndlen != 0) { 14011 top = NULL; 14012 } 14013 goto out_unlocked; 14014 } 14015 14016 KASSERT(stcb != NULL, ("stcb is NULL")); 14017 SCTP_TCB_LOCK_ASSERT(stcb); 14018 KASSERT((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0, 14019 ("Association about to be freed")); 14020 KASSERT((asoc->state & SCTP_STATE_WAS_ABORTED) == 0, 14021 ("Association was aborted")); 14022 14023 if (sinfo_flags & SCTP_ADDR_OVER) { 14024 if (addr != NULL) { 14025 net = sctp_findnet(stcb, addr); 14026 } else { 14027 net = NULL; 14028 } 14029 if ((net == NULL) || 14030 ((port != 0) && (port != stcb->rport))) { 14031 error = EINVAL; 14032 goto out_unlocked; 14033 } 14034 } else { 14035 if (asoc->alternate != NULL) { 14036 net = asoc->alternate; 14037 } else { 14038 net = asoc->primary_destination; 14039 } 14040 } 14041 if (sndlen == 0) { 14042 if (sinfo_flags & SCTP_EOF) { 14043 got_all_of_the_send = true; 14044 goto dataless_eof; 14045 } else { 14046 error = EINVAL; 14047 goto out_unlocked; 14048 } 14049 } 14050 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT)) { 14051 if (sndlen > (ssize_t)asoc->smallest_mtu) { 14052 error = EMSGSIZE; 14053 goto out_unlocked; 14054 } 14055 } 14056 sinfo_stream = sndrcvninfo->sinfo_stream; 14057 /* Is the stream no. valid? */ 14058 if (sinfo_stream >= asoc->streamoutcnt) { 14059 /* Invalid stream number */ 14060 error = EINVAL; 14061 goto out_unlocked; 14062 } 14063 if ((asoc->strmout[sinfo_stream].state != SCTP_STREAM_OPEN) && 14064 (asoc->strmout[sinfo_stream].state != SCTP_STREAM_OPENING)) { 14065 /* 14066 * Can't queue any data while stream reset is underway. 14067 */ 14068 if (asoc->strmout[sinfo_stream].state > SCTP_STREAM_OPEN) { 14069 error = EAGAIN; 14070 } else { 14071 error = EINVAL; 14072 } 14073 goto out_unlocked; 14074 } 14075 atomic_add_int(&stcb->total_sends, 1); 14076 #if defined(__Userspace__) 14077 if (inp->recv_callback != NULL) { 14078 non_blocking = true; 14079 } 14080 #endif 14081 #if defined(__FreeBSD__) && !defined(__Userspace__) 14082 if (SCTP_SO_IS_NBIO(so) || (flags & (MSG_NBIO | MSG_DONTWAIT)) != 0) { 14083 #else 14084 if (SCTP_SO_IS_NBIO(so)) { 14085 #endif 14086 non_blocking = true; 14087 } 14088 if (non_blocking) { 14089 ssize_t amount; 14090 14091 inqueue_bytes = asoc->total_output_queue_size - (asoc->chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb)); 14092 if (user_marks_eor == 0) { 14093 amount = sndlen; 14094 } else { 14095 amount = 1; 14096 } 14097 if ((SCTP_SB_LIMIT_SND(so) < (amount + inqueue_bytes + asoc->sb_send_resv)) || 14098 (asoc->chunks_on_out_queue >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) { 14099 if ((sndlen > (ssize_t)SCTP_SB_LIMIT_SND(so)) && 14100 (user_marks_eor == 0)) { 14101 error = EMSGSIZE; 14102 } else { 14103 error = EWOULDBLOCK; 14104 } 14105 goto out_unlocked; 14106 } 14107 } 14108 atomic_add_int(&asoc->sb_send_resv, (int)sndlen); 14109 local_soresv = sndlen; 14110 14111 KASSERT(stcb != NULL, ("stcb is NULL")); 14112 SCTP_TCB_LOCK_ASSERT(stcb); 14113 KASSERT((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0, 14114 ("Association about to be freed")); 14115 KASSERT((asoc->state & SCTP_STATE_WAS_ABORTED) == 0, 14116 ("Association was aborted")); 14117 14118 /* Ok, we will attempt a msgsnd :> */ 14119 #if !(defined(_WIN32) || defined(__Userspace__)) 14120 if (p != NULL) { 14121 #if defined(__FreeBSD__) 14122 p->td_ru.ru_msgsnd++; 14123 #else 14124 p->p_stats->p_ru.ru_msgsnd++; 14125 #endif 14126 } 14127 #endif 14128 /* Calculate the maximum we can send */ 14129 inqueue_bytes = asoc->total_output_queue_size - (asoc->chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb)); 14130 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) { 14131 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes; 14132 } else { 14133 max_len = 0; 14134 } 14135 /* Unless E_EOR mode is on, we must make a send FIT in one call. */ 14136 if ((user_marks_eor == 0) && 14137 (sndlen > (ssize_t)SCTP_SB_LIMIT_SND(stcb->sctp_socket))) { 14138 /* It will NEVER fit. */ 14139 error = EMSGSIZE; 14140 goto out_unlocked; 14141 } 14142 if (user_marks_eor != 0) { 14143 local_add_more = (ssize_t)min(SCTP_SB_LIMIT_SND(so), SCTP_BASE_SYSCTL(sctp_add_more_threshold)); 14144 } else { 14145 /*- 14146 * For non-eeor the whole message must fit in 14147 * the socket send buffer. 14148 */ 14149 local_add_more = sndlen; 14150 } 14151 if (non_blocking) { 14152 goto skip_preblock; 14153 } 14154 if (((max_len <= local_add_more) && ((ssize_t)SCTP_SB_LIMIT_SND(so) >= local_add_more)) || 14155 (max_len == 0) || 14156 ((asoc->chunks_on_out_queue + asoc->stream_queue_cnt) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) { 14157 /* No room right now! */ 14158 inqueue_bytes = asoc->total_output_queue_size - (asoc->chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb)); 14159 SOCKBUF_LOCK(&so->so_snd); 14160 while ((SCTP_SB_LIMIT_SND(so) < (inqueue_bytes + local_add_more)) || 14161 ((asoc->stream_queue_cnt + asoc->chunks_on_out_queue) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) { 14162 SCTPDBG(SCTP_DEBUG_OUTPUT1,"pre_block limit:%u <(inq:%d + %zd) || (%d+%d > %d)\n", 14163 (unsigned int)SCTP_SB_LIMIT_SND(so), 14164 inqueue_bytes, 14165 local_add_more, 14166 asoc->stream_queue_cnt, 14167 asoc->chunks_on_out_queue, 14168 SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)); 14169 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) { 14170 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, asoc, sndlen); 14171 } 14172 be.error = 0; 14173 #if !(defined(_WIN32) && !defined(__Userspace__)) 14174 stcb->block_entry = &be; 14175 #endif 14176 SCTP_TCB_UNLOCK(stcb); 14177 #if defined(__FreeBSD__) && !defined(__Userspace__) 14178 error = sbwait(so, SO_SND); 14179 #else 14180 error = sbwait(&so->so_snd); 14181 #endif 14182 if (error == 0) { 14183 if (so->so_error != 0) { 14184 error = so->so_error; 14185 } 14186 if (be.error != 0) { 14187 error = be.error; 14188 } 14189 } 14190 SOCKBUF_UNLOCK(&so->so_snd); 14191 SCTP_TCB_LOCK(stcb); 14192 stcb->block_entry = NULL; 14193 if (error != 0) { 14194 goto out_unlocked; 14195 } 14196 if ((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) || 14197 (asoc->state & SCTP_STATE_WAS_ABORTED)) { 14198 if (asoc->state & SCTP_STATE_WAS_ABORTED) { 14199 /* XXX: Could also be ECONNABORTED, not enough info. */ 14200 error = ECONNRESET; 14201 } else { 14202 error = ENOTCONN; 14203 } 14204 goto out_unlocked; 14205 } 14206 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) { 14207 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK, 14208 asoc, asoc->total_output_queue_size); 14209 } 14210 inqueue_bytes = asoc->total_output_queue_size - (asoc->chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb)); 14211 SOCKBUF_LOCK(&so->so_snd); 14212 } 14213 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) { 14214 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes; 14215 } else { 14216 max_len = 0; 14217 } 14218 SOCKBUF_UNLOCK(&so->so_snd); 14219 } 14220 14221 skip_preblock: 14222 KASSERT(stcb != NULL, ("stcb is NULL")); 14223 SCTP_TCB_LOCK_ASSERT(stcb); 14224 KASSERT((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0, 14225 ("Association about to be freed")); 14226 KASSERT((asoc->state & SCTP_STATE_WAS_ABORTED) == 0, 14227 ("Association was aborted")); 14228 14229 #if defined(__APPLE__) && !defined(__Userspace__) 14230 error = sblock(&so->so_snd, SBLOCKWAIT(flags)); 14231 if (error != 0) { 14232 goto out_unlocked; 14233 } 14234 #endif 14235 /* sndlen covers for mbuf case 14236 * uio_resid covers for the non-mbuf case 14237 * NOTE: uio will be null when top/mbuf is passed 14238 */ 14239 if (top == NULL) { 14240 struct sctp_stream_queue_pending *sp; 14241 struct sctp_stream_out *strm; 14242 uint32_t sndout; 14243 14244 if ((asoc->stream_locked) && 14245 (asoc->stream_locked_on != sinfo_stream)) { 14246 error = EINVAL; 14247 goto out; 14248 } 14249 strm = &asoc->strmout[sinfo_stream]; 14250 if (strm->last_msg_incomplete == 0) { 14251 do_a_copy_in: 14252 SCTP_TCB_UNLOCK(stcb); 14253 sp = sctp_copy_it_in(stcb, asoc, sndrcvninfo, uio, net, max_len, user_marks_eor, &error); 14254 SCTP_TCB_LOCK(stcb); 14255 if ((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) || 14256 (asoc->state & SCTP_STATE_WAS_ABORTED)) { 14257 if (asoc->state & SCTP_STATE_WAS_ABORTED) { 14258 /* XXX: Could also be ECONNABORTED, not enough info. */ 14259 error = ECONNRESET; 14260 } else { 14261 error = ENOTCONN; 14262 } 14263 goto out; 14264 } 14265 if (error != 0) { 14266 goto out; 14267 } 14268 /* 14269 * Reject the sending of a new user message, if the 14270 * association is about to be shut down. 14271 */ 14272 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) || 14273 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) || 14274 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 14275 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) { 14276 if (sp->data != 0) { 14277 sctp_m_freem(sp->data); 14278 sp->data = NULL; 14279 sp->tail_mbuf = NULL; 14280 sp->length = 0; 14281 } 14282 if (sp->net != NULL) { 14283 sctp_free_remote_addr(sp->net); 14284 sp->net = NULL; 14285 } 14286 sctp_free_a_strmoq(stcb, sp, SCTP_SO_LOCKED); 14287 error = EPIPE; 14288 goto out_unlocked; 14289 } 14290 /* The out streams might be reallocated. */ 14291 strm = &asoc->strmout[sinfo_stream]; 14292 if (sp->msg_is_complete) { 14293 strm->last_msg_incomplete = 0; 14294 asoc->stream_locked = 0; 14295 } else { 14296 /* Just got locked to this guy in 14297 * case of an interrupt. 14298 */ 14299 strm->last_msg_incomplete = 1; 14300 if (asoc->idata_supported == 0) { 14301 asoc->stream_locked = 1; 14302 asoc->stream_locked_on = sinfo_stream; 14303 } 14304 sp->sender_all_done = 0; 14305 } 14306 sctp_snd_sb_alloc(stcb, sp->length); 14307 atomic_add_int(&asoc->stream_queue_cnt, 1); 14308 if (sinfo_flags & SCTP_UNORDERED) { 14309 SCTP_STAT_INCR(sctps_sends_with_unord); 14310 } 14311 sp->processing = 1; 14312 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next); 14313 asoc->ss_functions.sctp_ss_add_to_stream(stcb, asoc, strm, sp); 14314 } else { 14315 sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead); 14316 if (sp == NULL) { 14317 /* ???? Huh ??? last msg is gone */ 14318 #ifdef INVARIANTS 14319 panic("Warning: Last msg marked incomplete, yet nothing left?"); 14320 #else 14321 SCTP_PRINTF("Warning: Last msg marked incomplete, yet nothing left?\n"); 14322 strm->last_msg_incomplete = 0; 14323 #endif 14324 goto do_a_copy_in; 14325 } 14326 if (sp->processing != 0) { 14327 error = EINVAL; 14328 goto out; 14329 } else { 14330 sp->processing = 1; 14331 } 14332 } 14333 14334 KASSERT(stcb != NULL, ("stcb is NULL")); 14335 SCTP_TCB_LOCK_ASSERT(stcb); 14336 KASSERT((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0, 14337 ("Association about to be freed")); 14338 KASSERT((asoc->state & SCTP_STATE_WAS_ABORTED) == 0, 14339 ("Association was aborted")); 14340 14341 #if defined(__APPLE__) && !defined(__Userspace__) 14342 #if defined(APPLE_LEOPARD) 14343 while (uio->uio_resid > 0) { 14344 #else 14345 while (uio_resid(uio) > 0) { 14346 #endif 14347 #else 14348 while (uio->uio_resid > 0) { 14349 #endif 14350 /* How much room do we have? */ 14351 struct mbuf *new_tail, *mm; 14352 14353 inqueue_bytes = asoc->total_output_queue_size - (asoc->chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb)); 14354 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) { 14355 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes; 14356 } else { 14357 max_len = 0; 14358 } 14359 if ((max_len > (ssize_t)SCTP_BASE_SYSCTL(sctp_add_more_threshold)) || 14360 ((max_len > 0 ) && (SCTP_SB_LIMIT_SND(so) < SCTP_BASE_SYSCTL(sctp_add_more_threshold))) || 14361 #if defined(__APPLE__) && !defined(__Userspace__) 14362 #if defined(APPLE_LEOPARD) 14363 (uio->uio_resid <= max_len)) { 14364 #else 14365 (uio_resid(uio) <= max_len)) { 14366 #endif 14367 #else 14368 (uio->uio_resid <= max_len)) { 14369 #endif 14370 SCTP_TCB_UNLOCK(stcb); 14371 #if defined(__APPLE__) && !defined(__Userspace__) 14372 SCTP_SOCKET_UNLOCK(so, 0); 14373 #endif 14374 sndout = 0; 14375 new_tail = NULL; 14376 #if defined(__FreeBSD__) || defined(__Userspace__) 14377 mm = sctp_copy_resume(uio, (int)max_len, user_marks_eor, &error, &sndout, &new_tail); 14378 #else 14379 mm = sctp_copy_resume(uio, (int)max_len, &error, &sndout, &new_tail); 14380 #endif 14381 #if defined(__APPLE__) && !defined(__Userspace__) 14382 SCTP_SOCKET_LOCK(so, 0); 14383 #endif 14384 SCTP_TCB_LOCK(stcb); 14385 if ((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) || 14386 (asoc->state & SCTP_STATE_WAS_ABORTED)) { 14387 /* We need to get out. 14388 * Peer probably aborted. 14389 */ 14390 sctp_m_freem(mm); 14391 if (asoc->state & SCTP_STATE_WAS_ABORTED) { 14392 /* XXX: Could also be ECONNABORTED, not enough info. */ 14393 error = ECONNRESET; 14394 } else { 14395 error = ENOTCONN; 14396 } 14397 goto out; 14398 } 14399 if ((mm == NULL) || (error != 0)) { 14400 if (mm != NULL) { 14401 sctp_m_freem(mm); 14402 } 14403 if (sp != NULL) { 14404 sp->processing = 0; 14405 } 14406 goto out; 14407 } 14408 /* Update the mbuf and count */ 14409 if (sp->tail_mbuf != NULL) { 14410 /* Tack it to the end. */ 14411 SCTP_BUF_NEXT(sp->tail_mbuf) = mm; 14412 } else { 14413 /* A stolen mbuf. */ 14414 sp->data = mm; 14415 } 14416 sp->tail_mbuf = new_tail; 14417 sctp_snd_sb_alloc(stcb, sndout); 14418 atomic_add_int(&sp->length, sndout); 14419 if (sinfo_flags & SCTP_SACK_IMMEDIATELY) { 14420 sp->sinfo_flags |= SCTP_SACK_IMMEDIATELY; 14421 } 14422 14423 /* Did we reach EOR? */ 14424 #if defined(__APPLE__) && !defined(__Userspace__) 14425 #if defined(APPLE_LEOPARD) 14426 if ((uio->uio_resid == 0) && 14427 #else 14428 if ((uio_resid(uio) == 0) && 14429 #endif 14430 #else 14431 if ((uio->uio_resid == 0) && 14432 #endif 14433 ((user_marks_eor == 0) || 14434 (sinfo_flags & SCTP_EOF) || 14435 (user_marks_eor && (sinfo_flags & SCTP_EOR)))) { 14436 sp->msg_is_complete = 1; 14437 } else { 14438 sp->msg_is_complete = 0; 14439 } 14440 } 14441 14442 KASSERT(stcb != NULL, ("stcb is NULL")); 14443 SCTP_TCB_LOCK_ASSERT(stcb); 14444 KASSERT((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0, 14445 ("Association about to be freed")); 14446 KASSERT((asoc->state & SCTP_STATE_WAS_ABORTED) == 0, 14447 ("Association was aborted")); 14448 14449 #if defined(__APPLE__) && !defined(__Userspace__) 14450 #if defined(APPLE_LEOPARD) 14451 if (uio->uio_resid == 0) { 14452 #else 14453 if (uio_resid(uio) == 0) { 14454 #endif 14455 #else 14456 if (uio->uio_resid == 0) { 14457 #endif 14458 /* got it all? */ 14459 continue; 14460 } 14461 /* PR-SCTP? */ 14462 if ((asoc->prsctp_supported) && (asoc->sent_queue_cnt_removeable > 0)) { 14463 /* This is ugly but we must assure locking order */ 14464 sctp_prune_prsctp(stcb, asoc, sndrcvninfo, (int)sndlen); 14465 inqueue_bytes = asoc->total_output_queue_size - (asoc->chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb)); 14466 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) 14467 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes; 14468 else 14469 max_len = 0; 14470 if (max_len > 0) { 14471 continue; 14472 } 14473 } 14474 /* wait for space now */ 14475 if (non_blocking) { 14476 /* Non-blocking io in place out */ 14477 if (sp != NULL) { 14478 sp->processing = 0; 14479 } 14480 goto skip_out_eof; 14481 } 14482 /* What about the INIT, send it maybe */ 14483 if (queue_only_for_init) { 14484 if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) { 14485 /* a collision took us forward? */ 14486 queue_only = 0; 14487 } else { 14488 #if defined(__FreeBSD__) && !defined(__Userspace__) 14489 NET_EPOCH_ENTER(et); 14490 #endif 14491 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); 14492 #if defined(__FreeBSD__) && !defined(__Userspace__) 14493 NET_EPOCH_EXIT(et); 14494 #endif 14495 SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT); 14496 queue_only = 1; 14497 } 14498 } 14499 if ((net->flight_size > net->cwnd) && 14500 (asoc->sctp_cmt_on_off == 0)) { 14501 SCTP_STAT_INCR(sctps_send_cwnd_avoid); 14502 queue_only = 1; 14503 } else if (asoc->ifp_had_enobuf) { 14504 SCTP_STAT_INCR(sctps_ifnomemqueued); 14505 if (net->flight_size > (2 * net->mtu)) { 14506 queue_only = 1; 14507 } 14508 asoc->ifp_had_enobuf = 0; 14509 } 14510 un_sent = asoc->total_output_queue_size - asoc->total_flight; 14511 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) && 14512 (asoc->total_flight > 0) && 14513 (asoc->stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) && 14514 (un_sent < (int)(asoc->smallest_mtu - SCTP_MIN_OVERHEAD))) { 14515 /*- 14516 * Ok, Nagle is set on and we have data outstanding. 14517 * Don't send anything and let SACKs drive out the 14518 * data unless we have a "full" segment to send. 14519 */ 14520 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) { 14521 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED); 14522 } 14523 SCTP_STAT_INCR(sctps_naglequeued); 14524 nagle_applies = 1; 14525 } else { 14526 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) { 14527 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) 14528 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED); 14529 } 14530 SCTP_STAT_INCR(sctps_naglesent); 14531 nagle_applies = 0; 14532 } 14533 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) { 14534 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only, 14535 nagle_applies, un_sent); 14536 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, asoc->total_output_queue_size, 14537 asoc->total_flight, 14538 asoc->chunks_on_out_queue, asoc->total_flight_count); 14539 } 14540 if (queue_only_for_init) { 14541 queue_only_for_init = 0; 14542 } 14543 if ((queue_only == 0) && (nagle_applies == 0)) { 14544 /*- 14545 * need to start chunk output 14546 * before blocking.. note that if 14547 * a lock is already applied, then 14548 * the input via the net is happening 14549 * and I don't need to start output :-D 14550 */ 14551 #if defined(__FreeBSD__) && !defined(__Userspace__) 14552 NET_EPOCH_ENTER(et); 14553 #endif 14554 sctp_chunk_output(inp, stcb, 14555 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED); 14556 #if defined(__FreeBSD__) && !defined(__Userspace__) 14557 NET_EPOCH_EXIT(et); 14558 #endif 14559 } 14560 /*- 14561 * This is a bit strange, but I think it will 14562 * work. The total_output_queue_size is locked and 14563 * protected by the TCB_LOCK, which we just released. 14564 * There is a race that can occur between releasing it 14565 * above, and me getting the socket lock, where sacks 14566 * come in but we have not put the SB_WAIT on the 14567 * so_snd buffer to get the wakeup. After the LOCK 14568 * is applied the sack_processing will also need to 14569 * LOCK the so->so_snd to do the actual sowwakeup(). So 14570 * once we have the socket buffer lock if we recheck the 14571 * size we KNOW we will get to sleep safely with the 14572 * wakeup flag in place. 14573 */ 14574 inqueue_bytes = asoc->total_output_queue_size - (asoc->chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb)); 14575 SOCKBUF_LOCK(&so->so_snd); 14576 if (SCTP_SB_LIMIT_SND(so) <= (inqueue_bytes + 14577 min(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTP_SB_LIMIT_SND(so)))) { 14578 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) { 14579 #if defined(__APPLE__) && !defined(__Userspace__) 14580 #if defined(APPLE_LEOPARD) 14581 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK, 14582 asoc, uio->uio_resid); 14583 #else 14584 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK, 14585 asoc, uio_resid(uio)); 14586 #endif 14587 #else 14588 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK, 14589 asoc, uio->uio_resid); 14590 #endif 14591 } 14592 be.error = 0; 14593 #if !(defined(_WIN32) && !defined(__Userspace__)) 14594 stcb->block_entry = &be; 14595 #endif 14596 SCTP_TCB_UNLOCK(stcb); 14597 #if defined(__APPLE__) && !defined(__Userspace__) 14598 sbunlock(&so->so_snd, 1); 14599 #endif 14600 #if defined(__FreeBSD__) && !defined(__Userspace__) 14601 error = sbwait(so, SO_SND); 14602 #else 14603 error = sbwait(&so->so_snd); 14604 #endif 14605 if (error == 0) { 14606 if (so->so_error != 0) 14607 error = so->so_error; 14608 if (be.error != 0) { 14609 error = be.error; 14610 } 14611 } 14612 SOCKBUF_UNLOCK(&so->so_snd); 14613 SCTP_TCB_LOCK(stcb); 14614 stcb->block_entry = NULL; 14615 if ((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) || 14616 (asoc->state & SCTP_STATE_WAS_ABORTED)) { 14617 if (asoc->state & SCTP_STATE_WAS_ABORTED) { 14618 /* XXX: Could also be ECONNABORTED, not enough info. */ 14619 error = ECONNRESET; 14620 } else { 14621 error = ENOTCONN; 14622 } 14623 goto out_unlocked; 14624 } 14625 if (error != 0) { 14626 if (sp != NULL) { 14627 sp->processing = 0; 14628 } 14629 goto out_unlocked; 14630 } 14631 #if defined(__APPLE__) && !defined(__Userspace__) 14632 error = sblock(&so->so_snd, SBLOCKWAIT(flags)); 14633 if (error != 0) { 14634 goto out_unlocked; 14635 } 14636 #endif 14637 } else { 14638 SOCKBUF_UNLOCK(&so->so_snd); 14639 } 14640 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) { 14641 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK, 14642 asoc, asoc->total_output_queue_size); 14643 } 14644 } 14645 14646 KASSERT(stcb != NULL, ("stcb is NULL")); 14647 SCTP_TCB_LOCK_ASSERT(stcb); 14648 KASSERT((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0, 14649 ("Association about to be freed")); 14650 KASSERT((asoc->state & SCTP_STATE_WAS_ABORTED) == 0, 14651 ("Association was aborted")); 14652 14653 /* The out streams might be reallocated. */ 14654 strm = &asoc->strmout[sinfo_stream]; 14655 if (sp != NULL) { 14656 if (sp->msg_is_complete == 0) { 14657 strm->last_msg_incomplete = 1; 14658 if (asoc->idata_supported == 0) { 14659 asoc->stream_locked = 1; 14660 asoc->stream_locked_on = sinfo_stream; 14661 } 14662 } else { 14663 sp->sender_all_done = 1; 14664 strm->last_msg_incomplete = 0; 14665 asoc->stream_locked = 0; 14666 } 14667 sp->processing = 0; 14668 } else { 14669 SCTP_PRINTF("Huh no sp TSNH?\n"); 14670 strm->last_msg_incomplete = 0; 14671 asoc->stream_locked = 0; 14672 } 14673 #if defined(__APPLE__) && !defined(__Userspace__) 14674 #if defined(APPLE_LEOPARD) 14675 if (uio->uio_resid == 0) { 14676 #else 14677 if (uio_resid(uio) == 0) { 14678 #endif 14679 #else 14680 if (uio->uio_resid == 0) { 14681 #endif 14682 got_all_of_the_send = true; 14683 } 14684 } else { 14685 error = sctp_msg_append(stcb, net, top, sndrcvninfo); 14686 top = NULL; 14687 if ((sinfo_flags & SCTP_EOF) != 0) { 14688 got_all_of_the_send = true; 14689 } 14690 } 14691 if (error != 0) { 14692 goto out; 14693 } 14694 14695 dataless_eof: 14696 KASSERT(stcb != NULL, ("stcb is NULL")); 14697 SCTP_TCB_LOCK_ASSERT(stcb); 14698 KASSERT((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0, 14699 ("Association about to be freed")); 14700 KASSERT((asoc->state & SCTP_STATE_WAS_ABORTED) == 0, 14701 ("Association was aborted")); 14702 14703 /* EOF thing ? */ 14704 if ((sinfo_flags & SCTP_EOF) && got_all_of_the_send) { 14705 SCTP_STAT_INCR(sctps_sends_with_eof); 14706 error = 0; 14707 if (TAILQ_EMPTY(&asoc->send_queue) && 14708 TAILQ_EMPTY(&asoc->sent_queue) && 14709 sctp_is_there_unsent_data(stcb, SCTP_SO_LOCKED) == 0) { 14710 if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) { 14711 goto abort_anyway; 14712 } 14713 /* there is nothing queued to send, so I'm done... */ 14714 if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) && 14715 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) && 14716 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 14717 struct sctp_nets *netp; 14718 14719 /* only send SHUTDOWN the first time through */ 14720 if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) { 14721 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 14722 } 14723 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT); 14724 sctp_stop_timers_for_shutdown(stcb); 14725 if (asoc->alternate != NULL) { 14726 netp = asoc->alternate; 14727 } else { 14728 netp = asoc->primary_destination; 14729 } 14730 sctp_send_shutdown(stcb, netp); 14731 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, 14732 netp); 14733 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 14734 NULL); 14735 } 14736 } else { 14737 /*- 14738 * we still got (or just got) data to send, so set 14739 * SHUTDOWN_PENDING 14740 */ 14741 /*- 14742 * XXX sockets draft says that SCTP_EOF should be 14743 * sent with no data. currently, we will allow user 14744 * data to be sent first and move to 14745 * SHUTDOWN-PENDING 14746 */ 14747 if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) && 14748 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) && 14749 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 14750 if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) { 14751 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT); 14752 } 14753 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING); 14754 if (TAILQ_EMPTY(&asoc->send_queue) && 14755 TAILQ_EMPTY(&asoc->sent_queue) && 14756 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 14757 struct mbuf *op_err; 14758 char msg[SCTP_DIAG_INFO_LEN]; 14759 14760 abort_anyway: 14761 if (free_cnt_applied) { 14762 atomic_subtract_int(&asoc->refcnt, 1); 14763 free_cnt_applied = false; 14764 } 14765 SCTP_SNPRINTF(msg, sizeof(msg), 14766 "%s:%d at %s", __FILE__, __LINE__, __func__); 14767 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 14768 msg); 14769 #if defined(__FreeBSD__) && !defined(__Userspace__) 14770 NET_EPOCH_ENTER(et); 14771 #endif 14772 sctp_abort_an_association(stcb->sctp_ep, stcb, 14773 op_err, false, SCTP_SO_LOCKED); 14774 #if defined(__FreeBSD__) && !defined(__Userspace__) 14775 NET_EPOCH_EXIT(et); 14776 #endif 14777 stcb = NULL; 14778 error = ECONNABORTED; 14779 goto out; 14780 } 14781 sctp_feature_off(inp, SCTP_PCB_FLAGS_NODELAY); 14782 } 14783 } 14784 } 14785 14786 skip_out_eof: 14787 KASSERT(stcb != NULL, ("stcb is NULL")); 14788 SCTP_TCB_LOCK_ASSERT(stcb); 14789 KASSERT((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0, 14790 ("Association about to be freed")); 14791 KASSERT((asoc->state & SCTP_STATE_WAS_ABORTED) == 0, 14792 ("Association was aborted")); 14793 14794 some_on_control = !TAILQ_EMPTY(&asoc->control_send_queue); 14795 if (queue_only_for_init) { 14796 if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) { 14797 /* a collision took us forward? */ 14798 queue_only = 0; 14799 } else { 14800 #if defined(__FreeBSD__) && !defined(__Userspace__) 14801 NET_EPOCH_ENTER(et); 14802 #endif 14803 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); 14804 #if defined(__FreeBSD__) && !defined(__Userspace__) 14805 NET_EPOCH_EXIT(et); 14806 #endif 14807 SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT); 14808 queue_only = 1; 14809 } 14810 } 14811 14812 KASSERT(stcb != NULL, ("stcb is NULL")); 14813 SCTP_TCB_LOCK_ASSERT(stcb); 14814 KASSERT((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0, 14815 ("Association about to be freed")); 14816 KASSERT((asoc->state & SCTP_STATE_WAS_ABORTED) == 0, 14817 ("Association was aborted")); 14818 14819 if ((net->flight_size > net->cwnd) && 14820 (asoc->sctp_cmt_on_off == 0)) { 14821 SCTP_STAT_INCR(sctps_send_cwnd_avoid); 14822 queue_only = 1; 14823 } else if (asoc->ifp_had_enobuf) { 14824 SCTP_STAT_INCR(sctps_ifnomemqueued); 14825 if (net->flight_size > (2 * net->mtu)) { 14826 queue_only = 1; 14827 } 14828 asoc->ifp_had_enobuf = 0; 14829 } 14830 un_sent = asoc->total_output_queue_size - asoc->total_flight; 14831 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) && 14832 (asoc->total_flight > 0) && 14833 (asoc->stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) && 14834 (un_sent < (int)(asoc->smallest_mtu - SCTP_MIN_OVERHEAD))) { 14835 /*- 14836 * Ok, Nagle is set on and we have data outstanding. 14837 * Don't send anything and let SACKs drive out the 14838 * data unless wen have a "full" segment to send. 14839 */ 14840 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) { 14841 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED); 14842 } 14843 SCTP_STAT_INCR(sctps_naglequeued); 14844 nagle_applies = 1; 14845 } else { 14846 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) { 14847 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) 14848 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED); 14849 } 14850 SCTP_STAT_INCR(sctps_naglesent); 14851 nagle_applies = 0; 14852 } 14853 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) { 14854 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only, 14855 nagle_applies, un_sent); 14856 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, asoc->total_output_queue_size, 14857 asoc->total_flight, 14858 asoc->chunks_on_out_queue, asoc->total_flight_count); 14859 } 14860 14861 KASSERT(stcb != NULL, ("stcb is NULL")); 14862 SCTP_TCB_LOCK_ASSERT(stcb); 14863 KASSERT((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0, 14864 ("Association about to be freed")); 14865 KASSERT((asoc->state & SCTP_STATE_WAS_ABORTED) == 0, 14866 ("Association was aborted")); 14867 14868 #if defined(__FreeBSD__) && !defined(__Userspace__) 14869 NET_EPOCH_ENTER(et); 14870 #endif 14871 if ((queue_only == 0) && (nagle_applies == 0) && (asoc->peers_rwnd && un_sent)) { 14872 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED); 14873 } else if ((queue_only == 0) && 14874 (asoc->peers_rwnd == 0) && 14875 (asoc->total_flight == 0)) { 14876 /* We get to have a probe outstanding */ 14877 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED); 14878 } else if (some_on_control) { 14879 int num_out, reason; 14880 14881 /* Here we do control only */ 14882 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, 14883 &reason, 1, 1, &now, &now_filled, 14884 sctp_get_frag_point(stcb), 14885 SCTP_SO_LOCKED); 14886 } 14887 #if defined(__FreeBSD__) && !defined(__Userspace__) 14888 NET_EPOCH_EXIT(et); 14889 #endif 14890 SCTPDBG(SCTP_DEBUG_OUTPUT1, "USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d err:%d\n", 14891 queue_only, asoc->peers_rwnd, un_sent, 14892 asoc->total_flight, asoc->chunks_on_out_queue, 14893 asoc->total_output_queue_size, error); 14894 14895 KASSERT(stcb != NULL, ("stcb is NULL")); 14896 SCTP_TCB_LOCK_ASSERT(stcb); 14897 KASSERT((asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0, 14898 ("Association about to be freed")); 14899 KASSERT((asoc->state & SCTP_STATE_WAS_ABORTED) == 0, 14900 ("Association was aborted")); 14901 14902 out: 14903 #if defined(__APPLE__) && !defined(__Userspace__) 14904 sbunlock(&so->so_snd, 1); 14905 #endif 14906 out_unlocked: 14907 if (create_lock_applied) { 14908 SCTP_ASOC_CREATE_UNLOCK(inp); 14909 } 14910 if (stcb != NULL) { 14911 if (local_soresv) { 14912 atomic_subtract_int(&asoc->sb_send_resv, (int)sndlen); 14913 } 14914 if (free_cnt_applied) { 14915 atomic_subtract_int(&asoc->refcnt, 1); 14916 } 14917 SCTP_TCB_UNLOCK(stcb); 14918 } 14919 if (top != NULL) { 14920 sctp_m_freem(top); 14921 } 14922 if (control != NULL) { 14923 sctp_m_freem(control); 14924 } 14925 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, error); 14926 return (error); 14927 } 14928 14929 /* 14930 * generate an AUTHentication chunk, if required 14931 */ 14932 struct mbuf * 14933 sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end, 14934 struct sctp_auth_chunk **auth_ret, uint32_t * offset, 14935 struct sctp_tcb *stcb, uint8_t chunk) 14936 { 14937 struct mbuf *m_auth; 14938 struct sctp_auth_chunk *auth; 14939 int chunk_len; 14940 struct mbuf *cn; 14941 14942 if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) || 14943 (stcb == NULL)) 14944 return (m); 14945 14946 if (stcb->asoc.auth_supported == 0) { 14947 return (m); 14948 } 14949 /* does the requested chunk require auth? */ 14950 if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) { 14951 return (m); 14952 } 14953 m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_NOWAIT, 1, MT_HEADER); 14954 if (m_auth == NULL) { 14955 /* no mbuf's */ 14956 return (m); 14957 } 14958 /* reserve some space if this will be the first mbuf */ 14959 if (m == NULL) 14960 SCTP_BUF_RESV_UF(m_auth, SCTP_MIN_OVERHEAD); 14961 /* fill in the AUTH chunk details */ 14962 auth = mtod(m_auth, struct sctp_auth_chunk *); 14963 memset(auth, 0, sizeof(*auth)); 14964 auth->ch.chunk_type = SCTP_AUTHENTICATION; 14965 auth->ch.chunk_flags = 0; 14966 chunk_len = sizeof(*auth) + 14967 sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id); 14968 auth->ch.chunk_length = htons(chunk_len); 14969 auth->hmac_id = htons(stcb->asoc.peer_hmac_id); 14970 /* key id and hmac digest will be computed and filled in upon send */ 14971 14972 /* save the offset where the auth was inserted into the chain */ 14973 *offset = 0; 14974 for (cn = m; cn; cn = SCTP_BUF_NEXT(cn)) { 14975 *offset += SCTP_BUF_LEN(cn); 14976 } 14977 14978 /* update length and return pointer to the auth chunk */ 14979 SCTP_BUF_LEN(m_auth) = chunk_len; 14980 m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0); 14981 if (auth_ret != NULL) 14982 *auth_ret = auth; 14983 14984 return (m); 14985 } 14986 14987 #if (defined(__FreeBSD__) || defined(__APPLE__)) && !defined(__Userspace__) 14988 #ifdef INET6 14989 int 14990 sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro) 14991 { 14992 struct nd_prefix *pfx = NULL; 14993 struct nd_pfxrouter *pfxrtr = NULL; 14994 struct sockaddr_in6 gw6; 14995 14996 #if defined(__FreeBSD__) 14997 if (ro == NULL || ro->ro_nh == NULL || src6->sin6_family != AF_INET6) 14998 #else 14999 if (ro == NULL || ro->ro_rt == NULL || src6->sin6_family != AF_INET6) 15000 #endif 15001 return (0); 15002 15003 /* get prefix entry of address */ 15004 #if defined(__FreeBSD__) 15005 ND6_RLOCK(); 15006 #endif 15007 LIST_FOREACH(pfx, &MODULE_GLOBAL(nd_prefix), ndpr_entry) { 15008 if (pfx->ndpr_stateflags & NDPRF_DETACHED) 15009 continue; 15010 if (IN6_ARE_MASKED_ADDR_EQUAL(&pfx->ndpr_prefix.sin6_addr, 15011 &src6->sin6_addr, &pfx->ndpr_mask)) 15012 break; 15013 } 15014 /* no prefix entry in the prefix list */ 15015 if (pfx == NULL) { 15016 #if defined(__FreeBSD__) 15017 ND6_RUNLOCK(); 15018 #endif 15019 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefix entry for "); 15020 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6); 15021 return (0); 15022 } 15023 15024 SCTPDBG(SCTP_DEBUG_OUTPUT2, "v6src_match_nexthop(), Prefix entry is "); 15025 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6); 15026 15027 /* search installed gateway from prefix entry */ 15028 LIST_FOREACH(pfxrtr, &pfx->ndpr_advrtrs, pfr_entry) { 15029 memset(&gw6, 0, sizeof(struct sockaddr_in6)); 15030 gw6.sin6_family = AF_INET6; 15031 #ifdef HAVE_SIN6_LEN 15032 gw6.sin6_len = sizeof(struct sockaddr_in6); 15033 #endif 15034 memcpy(&gw6.sin6_addr, &pfxrtr->router->rtaddr, 15035 sizeof(struct in6_addr)); 15036 SCTPDBG(SCTP_DEBUG_OUTPUT2, "prefix router is "); 15037 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&gw6); 15038 SCTPDBG(SCTP_DEBUG_OUTPUT2, "installed router is "); 15039 #if defined(__FreeBSD__) 15040 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ro->ro_nh->gw_sa); 15041 #else 15042 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway); 15043 #endif 15044 #if defined(__FreeBSD__) 15045 if (sctp_cmpaddr((struct sockaddr *)&gw6, &ro->ro_nh->gw_sa)) { 15046 ND6_RUNLOCK(); 15047 #else 15048 if (sctp_cmpaddr((struct sockaddr *)&gw6, ro->ro_rt->rt_gateway)) { 15049 #endif 15050 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is installed\n"); 15051 return (1); 15052 } 15053 } 15054 #if defined(__FreeBSD__) 15055 ND6_RUNLOCK(); 15056 #endif 15057 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is not installed\n"); 15058 return (0); 15059 } 15060 #endif 15061 15062 int 15063 sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t *ro) 15064 { 15065 #ifdef INET 15066 struct sockaddr_in *sin, *mask; 15067 struct ifaddr *ifa; 15068 struct in_addr srcnetaddr, gwnetaddr; 15069 15070 #if defined(__FreeBSD__) 15071 if (ro == NULL || ro->ro_nh == NULL || 15072 #else 15073 if (ro == NULL || ro->ro_rt == NULL || 15074 #endif 15075 sifa->address.sa.sa_family != AF_INET) { 15076 return (0); 15077 } 15078 ifa = (struct ifaddr *)sifa->ifa; 15079 mask = (struct sockaddr_in *)(ifa->ifa_netmask); 15080 sin = &sifa->address.sin; 15081 srcnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr); 15082 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: src address is "); 15083 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa); 15084 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", srcnetaddr.s_addr); 15085 15086 #if defined(__FreeBSD__) 15087 sin = &ro->ro_nh->gw4_sa; 15088 #else 15089 sin = (struct sockaddr_in *)ro->ro_rt->rt_gateway; 15090 #endif 15091 gwnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr); 15092 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: nexthop is "); 15093 #if defined(__FreeBSD__) 15094 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ro->ro_nh->gw_sa); 15095 #else 15096 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway); 15097 #endif 15098 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", gwnetaddr.s_addr); 15099 if (srcnetaddr.s_addr == gwnetaddr.s_addr) { 15100 return (1); 15101 } 15102 #endif 15103 return (0); 15104 } 15105 #elif defined(__Userspace__) 15106 /* TODO __Userspace__ versions of sctp_vXsrc_match_nexthop(). */ 15107 int 15108 sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro) 15109 { 15110 return (0); 15111 } 15112 int 15113 sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t *ro) 15114 { 15115 return (0); 15116 } 15117 15118 #endif