tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

user_mbuf.c (38732B)


      1 /*-
      2 * Copyright (c) 1982, 1986, 1988, 1993
      3 *      The Regents of the University of California.
      4 * All rights reserved.
      5 *
      6 * Redistribution and use in source and binary forms, with or without
      7 * modification, are permitted provided that the following conditions
      8 * are met:
      9 * 1. Redistributions of source code must retain the above copyright
     10 *    notice, this list of conditions and the following disclaimer.
     11 * 2. Redistributions in binary form must reproduce the above copyright
     12 *    notice, this list of conditions and the following disclaimer in the
     13 *    documentation and/or other materials provided with the distribution.
     14 * 3. Neither the name of the University nor the names of its contributors
     15 *    may be used to endorse or promote products derived from this software
     16 *    without specific prior written permission.
     17 *
     18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     21 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     28 * SUCH DAMAGE.
     29 *
     30 */
     31 
     32 /*
     33 *  __Userspace__ version of /usr/src/sys/kern/kern_mbuf.c
     34 *  We are initializing two zones for Mbufs and Clusters.
     35 *
     36 */
     37 
     38 #if defined(_WIN32) && !defined(_MSC_VER)
     39 #include <minmax.h>
     40 #endif
     41 
     42 #include <stdio.h>
     43 #include <string.h>
     44 /* #include <sys/param.h> This defines MSIZE 256 */
     45 #if !defined(SCTP_SIMPLE_ALLOCATOR)
     46 #include "umem.h"
     47 #endif
     48 #include "user_mbuf.h"
     49 #include "user_environment.h"
     50 #include "user_atomic.h"
     51 #include "netinet/sctp_pcb.h"
     52 
     53 #define KIPC_MAX_LINKHDR        4       /* int: max length of link header (see sys/sysclt.h) */
     54 #define KIPC_MAX_PROTOHDR	5	/* int: max length of network header (see sys/sysclt.h)*/
     55 int max_linkhdr = KIPC_MAX_LINKHDR;
     56 int max_protohdr = KIPC_MAX_PROTOHDR; /* Size of largest protocol layer header. */
     57 
     58 /*
     59 * Zones from which we allocate.
     60 */
     61 sctp_zone_t	zone_mbuf;
     62 sctp_zone_t	zone_clust;
     63 sctp_zone_t	zone_ext_refcnt;
     64 
     65 /* __Userspace__ clust_mb_args will be passed as callback data to mb_ctor_clust
     66 * and mb_dtor_clust.
     67 * Note: I had to use struct clust_args as an encapsulation for an mbuf pointer.
     68 * struct mbuf * clust_mb_args; does not work.
     69 */
     70 struct clust_args clust_mb_args;
     71 
     72 
     73 /* __Userspace__
     74 * Local prototypes.
     75 */
     76 static int	mb_ctor_mbuf(void *, void *, int);
     77 static int      mb_ctor_clust(void *, void *, int);
     78 static void	mb_dtor_mbuf(void *,  void *);
     79 static void	mb_dtor_clust(void *, void *);
     80 
     81 
     82 /***************** Functions taken from user_mbuf.h *************/
     83 
     84 static int mbuf_constructor_dup(struct mbuf *m, int pkthdr, short type)
     85 {
     86 int flags = pkthdr;
     87 
     88 m->m_next = NULL;
     89 m->m_nextpkt = NULL;
     90 m->m_len = 0;
     91 m->m_flags = flags;
     92 m->m_type = type;
     93 if (flags & M_PKTHDR) {
     94 	m->m_data = m->m_pktdat;
     95 	m->m_pkthdr.rcvif = NULL;
     96 	m->m_pkthdr.len = 0;
     97 	m->m_pkthdr.header = NULL;
     98 	m->m_pkthdr.csum_flags = 0;
     99 	m->m_pkthdr.csum_data = 0;
    100 	m->m_pkthdr.tso_segsz = 0;
    101 	m->m_pkthdr.ether_vtag = 0;
    102 	SLIST_INIT(&m->m_pkthdr.tags);
    103 } else
    104 	m->m_data = m->m_dat;
    105 
    106 return (0);
    107 }
    108 
    109 /* __Userspace__ */
    110 struct mbuf *
    111 m_get(int how, short type)
    112 {
    113 struct mbuf *mret;
    114 #if defined(SCTP_SIMPLE_ALLOCATOR)
    115 struct mb_args mbuf_mb_args;
    116 
    117 /* The following setter function is not yet being enclosed within
    118  * #if USING_MBUF_CONSTRUCTOR - #endif, until I have thoroughly tested
    119  * mb_dtor_mbuf. See comment there
    120  */
    121 mbuf_mb_args.flags = 0;
    122 mbuf_mb_args.type = type;
    123 #endif
    124 /* Mbuf master zone, zone_mbuf, has already been
    125  * created in mbuf_initialize() */
    126 mret = SCTP_ZONE_GET(zone_mbuf, struct mbuf);
    127 #if defined(SCTP_SIMPLE_ALLOCATOR)
    128 mb_ctor_mbuf(mret, &mbuf_mb_args, 0);
    129 #endif
    130 /*mret =  ((struct mbuf *)umem_cache_alloc(zone_mbuf, UMEM_DEFAULT));*/
    131 
    132 /* There are cases when an object available in the current CPU's
    133  * loaded magazine and in those cases the object's constructor is not applied.
    134  * If that is the case, then we are duplicating constructor initialization here,
    135  * so that the mbuf is properly constructed before returning it.
    136  */
    137 if (mret) {
    138 #if USING_MBUF_CONSTRUCTOR
    139 	if (! (mret->m_type == type) ) {
    140 		mbuf_constructor_dup(mret, 0, type);
    141 	}
    142 #else
    143 	mbuf_constructor_dup(mret, 0, type);
    144 #endif
    145 
    146 }
    147 return mret;
    148 }
    149 
    150 
    151 /* __Userspace__ */
    152 struct mbuf *
    153 m_gethdr(int how, short type)
    154 {
    155 struct mbuf *mret;
    156 #if defined(SCTP_SIMPLE_ALLOCATOR)
    157 struct mb_args mbuf_mb_args;
    158 
    159 /* The following setter function is not yet being enclosed within
    160  * #if USING_MBUF_CONSTRUCTOR - #endif, until I have thoroughly tested
    161  * mb_dtor_mbuf. See comment there
    162  */
    163 mbuf_mb_args.flags = M_PKTHDR;
    164 mbuf_mb_args.type = type;
    165 #endif
    166 mret = SCTP_ZONE_GET(zone_mbuf, struct mbuf);
    167 #if defined(SCTP_SIMPLE_ALLOCATOR)
    168 mb_ctor_mbuf(mret, &mbuf_mb_args, 0);
    169 #endif
    170 /*mret = ((struct mbuf *)umem_cache_alloc(zone_mbuf, UMEM_DEFAULT));*/
    171 /* There are cases when an object available in the current CPU's
    172  * loaded magazine and in those cases the object's constructor is not applied.
    173  * If that is the case, then we are duplicating constructor initialization here,
    174  * so that the mbuf is properly constructed before returning it.
    175  */
    176 if (mret) {
    177 #if USING_MBUF_CONSTRUCTOR
    178 	if (! ((mret->m_flags & M_PKTHDR) && (mret->m_type == type)) ) {
    179 		mbuf_constructor_dup(mret, M_PKTHDR, type);
    180 	}
    181 #else
    182 	mbuf_constructor_dup(mret, M_PKTHDR, type);
    183 #endif
    184 }
    185 return mret;
    186 }
    187 
    188 /* __Userspace__ */
    189 struct mbuf *
    190 m_free(struct mbuf *m)
    191 {
    192 
    193 struct mbuf *n = m->m_next;
    194 
    195 if (m->m_flags & M_EXT)
    196 	mb_free_ext(m);
    197 else if ((m->m_flags & M_NOFREE) == 0) {
    198 #if defined(SCTP_SIMPLE_ALLOCATOR)
    199 	mb_dtor_mbuf(m, NULL);
    200 #endif
    201 	SCTP_ZONE_FREE(zone_mbuf, m);
    202 }
    203 	/*umem_cache_free(zone_mbuf, m);*/
    204 return (n);
    205 }
    206 
    207 
    208 static void
    209 clust_constructor_dup(caddr_t m_clust, struct mbuf* m)
    210 {
    211 u_int *refcnt;
    212 int type, size;
    213 
    214 if (m == NULL) {
    215 	return;
    216 }
    217 /* Assigning cluster of MCLBYTES. TODO: Add jumbo frame functionality */
    218 type = EXT_CLUSTER;
    219 size = MCLBYTES;
    220 
    221 refcnt = SCTP_ZONE_GET(zone_ext_refcnt, u_int);
    222 /*refcnt = (u_int *)umem_cache_alloc(zone_ext_refcnt, UMEM_DEFAULT);*/
    223 #if !defined(SCTP_SIMPLE_ALLOCATOR)
    224 if (refcnt == NULL) {
    225 	umem_reap();
    226 	refcnt = SCTP_ZONE_GET(zone_ext_refcnt, u_int);
    227 	/*refcnt = (u_int *)umem_cache_alloc(zone_ext_refcnt, UMEM_DEFAULT);*/
    228 }
    229 #endif
    230 *refcnt = 1;
    231 m->m_ext.ext_buf = (caddr_t)m_clust;
    232 m->m_data = m->m_ext.ext_buf;
    233 m->m_flags |= M_EXT;
    234 m->m_ext.ext_free = NULL;
    235 m->m_ext.ext_args = NULL;
    236 m->m_ext.ext_size = size;
    237 m->m_ext.ext_type = type;
    238 m->m_ext.ref_cnt = refcnt;
    239 return;
    240 }
    241 
    242 
    243 /* __Userspace__ */
    244 void
    245 m_clget(struct mbuf *m, int how)
    246 {
    247 caddr_t mclust_ret;
    248 #if defined(SCTP_SIMPLE_ALLOCATOR)
    249 struct clust_args clust_mb_args_l;
    250 #endif
    251 if (m->m_flags & M_EXT) {
    252 	SCTPDBG(SCTP_DEBUG_USR, "%s: %p mbuf already has cluster\n", __func__, (void *)m);
    253 }
    254 m->m_ext.ext_buf = (char *)NULL;
    255 #if defined(SCTP_SIMPLE_ALLOCATOR)
    256 clust_mb_args_l.parent_mbuf = m;
    257 #endif
    258 mclust_ret = SCTP_ZONE_GET(zone_clust, char);
    259 #if defined(SCTP_SIMPLE_ALLOCATOR)
    260 mb_ctor_clust(mclust_ret, &clust_mb_args_l, 0);
    261 #endif
    262 /*mclust_ret = umem_cache_alloc(zone_clust, UMEM_DEFAULT);*/
    263 /*
    264  On a cluster allocation failure, call umem_reap() and retry.
    265  */
    266 
    267 if (mclust_ret == NULL) {
    268 #if !defined(SCTP_SIMPLE_ALLOCATOR)
    269 /*	mclust_ret = SCTP_ZONE_GET(zone_clust, char);
    270 	mb_ctor_clust(mclust_ret, &clust_mb_args, 0);
    271 #else*/
    272 	umem_reap();
    273 	mclust_ret = SCTP_ZONE_GET(zone_clust, char);
    274 #endif
    275 	/*mclust_ret = umem_cache_alloc(zone_clust, UMEM_DEFAULT);*/
    276 	/* if (NULL == mclust_ret) { */
    277 	SCTPDBG(SCTP_DEBUG_USR, "Memory allocation failure in %s\n", __func__);
    278 	/* } */
    279 }
    280 
    281 #if USING_MBUF_CONSTRUCTOR
    282 if ((m->m_ext.ext_buf == NULL)) {
    283 	clust_constructor_dup(mclust_ret, m);
    284 }
    285 #else
    286 clust_constructor_dup(mclust_ret, m);
    287 #endif
    288 }
    289 
    290 struct mbuf *
    291 m_getm2(struct mbuf *m, int len, int how, short type, int flags, int allonebuf)
    292 {
    293 struct mbuf *mb, *nm = NULL, *mtail = NULL;
    294 int size, mbuf_threshold, space_needed = len;
    295 
    296 KASSERT(len >= 0, ("%s: len is < 0", __func__));
    297 
    298 /* Validate flags. */
    299 flags &= (M_PKTHDR | M_EOR);
    300 
    301 /* Packet header mbuf must be first in chain. */
    302 if ((flags & M_PKTHDR) && m != NULL) {
    303 	flags &= ~M_PKTHDR;
    304 }
    305 
    306 if (allonebuf == 0)
    307 	mbuf_threshold = SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count);
    308 else
    309 	mbuf_threshold = 1;
    310 
    311 /* Loop and append maximum sized mbufs to the chain tail. */
    312 while (len > 0) {
    313 	if ((!allonebuf && len >= MCLBYTES) || (len > (int)(((mbuf_threshold - 1) * MLEN) + MHLEN))) {
    314 		mb = m_gethdr(how, type);
    315 		MCLGET(mb, how);
    316 		size = MCLBYTES;
    317 		/* SCTP_BUF_LEN(mb) = MCLBYTES; */
    318 	} else if (flags & M_PKTHDR) {
    319 		mb = m_gethdr(how, type);
    320 		if (len < MHLEN) {
    321 			size = len;
    322 		} else {
    323 			size = MHLEN;
    324 		}
    325 	} else {
    326 		mb = m_get(how, type);
    327 		if (len < MLEN) {
    328 			size = len;
    329 		} else {
    330 			size = MLEN;
    331 		}
    332 	}
    333 
    334 	/* Fail the whole operation if one mbuf can't be allocated. */
    335 	if (mb == NULL) {
    336 		if (nm != NULL)
    337 			m_freem(nm);
    338 		return (NULL);
    339 	}
    340 
    341 	if (allonebuf != 0 && size < space_needed) {
    342 		m_freem(mb);
    343 		return (NULL);
    344 	}
    345 
    346 	/* Book keeping. */
    347 	len -= size;
    348 	if (mtail != NULL)
    349 		mtail->m_next = mb;
    350 	else
    351 		nm = mb;
    352 	mtail = mb;
    353 	flags &= ~M_PKTHDR;     /* Only valid on the first mbuf. */
    354 }
    355 if (flags & M_EOR) {
    356 	mtail->m_flags |= M_EOR;  /* Only valid on the last mbuf. */
    357 }
    358 
    359 /* If mbuf was supplied, append new chain to the end of it. */
    360 if (m != NULL) {
    361 	for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next);
    362 	mtail->m_next = nm;
    363 	mtail->m_flags &= ~M_EOR;
    364 } else {
    365 	m = nm;
    366 }
    367 
    368 return (m);
    369 }
    370 
    371 /*
    372 * Copy the contents of uio into a properly sized mbuf chain.
    373 */
    374 struct mbuf *
    375 m_uiotombuf(struct uio *uio, int how, int len, int align, int flags)
    376 {
    377 struct mbuf *m, *mb;
    378 int error, length;
    379 ssize_t total;
    380 int progress = 0;
    381 
    382 /*
    383  * len can be zero or an arbitrary large value bound by
    384  * the total data supplied by the uio.
    385  */
    386 if (len > 0)
    387 	total = min(uio->uio_resid, len);
    388 else
    389 	total = uio->uio_resid;
    390 /*
    391  * The smallest unit returned by m_getm2() is a single mbuf
    392  * with pkthdr.  We can't align past it.
    393  */
    394 if (align >= MHLEN)
    395 	return (NULL);
    396 /*
    397  * Give us the full allocation or nothing.
    398  * If len is zero return the smallest empty mbuf.
    399  */
    400 m = m_getm2(NULL, (int)max(total + align, 1), how, MT_DATA, flags, 0);
    401 if (m == NULL)
    402 	return (NULL);
    403 m->m_data += align;
    404 
    405 /* Fill all mbufs with uio data and update header information. */
    406 for (mb = m; mb != NULL; mb = mb->m_next) {
    407 	length = (int)min(M_TRAILINGSPACE(mb), total - progress);
    408 	error = uiomove(mtod(mb, void *), length, uio);
    409 	if (error) {
    410 		m_freem(m);
    411 		return (NULL);
    412 	}
    413 
    414 	mb->m_len = length;
    415 	progress += length;
    416 	if (flags & M_PKTHDR)
    417 		m->m_pkthdr.len += length;
    418 }
    419 KASSERT(progress == total, ("%s: progress != total", __func__));
    420 
    421 return (m);
    422 }
    423 
    424 u_int
    425 m_length(struct mbuf *m0, struct mbuf **last)
    426 {
    427 struct mbuf *m;
    428 u_int len;
    429 
    430 len = 0;
    431 for (m = m0; m != NULL; m = m->m_next) {
    432 	len += m->m_len;
    433 	if (m->m_next == NULL)
    434 		break;
    435 }
    436 if (last != NULL)
    437 *last = m;
    438 return (len);
    439 }
    440 
    441 struct mbuf *
    442 m_last(struct mbuf *m)
    443 {
    444 while (m->m_next) {
    445 	m = m->m_next;
    446 }
    447 return (m);
    448 }
    449 
    450 /*
    451 * Unlink a tag from the list of tags associated with an mbuf.
    452 */
    453 static __inline void
    454 m_tag_unlink(struct mbuf *m, struct m_tag *t)
    455 {
    456 
    457 SLIST_REMOVE(&m->m_pkthdr.tags, t, m_tag, m_tag_link);
    458 }
    459 
    460 /*
    461 * Reclaim resources associated with a tag.
    462 */
    463 static __inline void
    464 m_tag_free(struct m_tag *t)
    465 {
    466 
    467 (*t->m_tag_free)(t);
    468 }
    469 
    470 /*
    471 * Set up the contents of a tag.  Note that this does not fill in the free
    472 * method; the caller is expected to do that.
    473 *
    474 * XXX probably should be called m_tag_init, but that was already taken.
    475 */
    476 static __inline void
    477 m_tag_setup(struct m_tag *t, uint32_t cookie, int type, int len)
    478 {
    479 
    480 t->m_tag_id = type;
    481 t->m_tag_len = len;
    482 t->m_tag_cookie = cookie;
    483 }
    484 
    485 /************ End functions from user_mbuf.h  ******************/
    486 
    487 
    488 
    489 /************ End functions to substitute umem_cache_alloc and umem_cache_free **************/
    490 
    491 void
    492 mbuf_initialize(void *dummy)
    493 {
    494 
    495 /*
    496  * __Userspace__Configure UMA zones for Mbufs and Clusters.
    497  * (TODO: m_getcl() - using packet secondary zone).
    498  * There is no provision for trash_init and trash_fini in umem.
    499  *
    500  */
    501 /* zone_mbuf = umem_cache_create(MBUF_MEM_NAME, MSIZE, 0,
    502 			mb_ctor_mbuf, mb_dtor_mbuf, NULL,
    503 			&mbuf_mb_args,
    504 			NULL, 0);
    505 zone_mbuf = umem_cache_create(MBUF_MEM_NAME, MSIZE, 0, NULL, NULL, NULL, NULL, NULL, 0);*/
    506 #if defined(SCTP_SIMPLE_ALLOCATOR)
    507 SCTP_ZONE_INIT(zone_mbuf, MBUF_MEM_NAME, MSIZE, 0);
    508 #else
    509 zone_mbuf = umem_cache_create(MBUF_MEM_NAME, MSIZE, 0,
    510                               mb_ctor_mbuf, mb_dtor_mbuf, NULL,
    511                               NULL,
    512                               NULL, 0);
    513 #endif
    514 /*zone_ext_refcnt = umem_cache_create(MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int), 0,
    515 			NULL, NULL, NULL,
    516 			NULL,
    517 			NULL, 0);*/
    518 SCTP_ZONE_INIT(zone_ext_refcnt, MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int), 0);
    519 
    520  /*zone_clust = umem_cache_create(MBUF_CLUSTER_MEM_NAME, MCLBYTES, 0,
    521 			 mb_ctor_clust, mb_dtor_clust, NULL,
    522 			 &clust_mb_args,
    523 			 NULL, 0);
    524 zone_clust = umem_cache_create(MBUF_CLUSTER_MEM_NAME, MCLBYTES, 0, NULL, NULL, NULL, NULL, NULL,0);*/
    525 #if defined(SCTP_SIMPLE_ALLOCATOR)
    526 SCTP_ZONE_INIT(zone_clust, MBUF_CLUSTER_MEM_NAME, MCLBYTES, 0);
    527 #else
    528 zone_clust = umem_cache_create(MBUF_CLUSTER_MEM_NAME, MCLBYTES, 0,
    529 							   mb_ctor_clust, mb_dtor_clust, NULL,
    530 							   &clust_mb_args,
    531 							   NULL, 0);
    532 #endif
    533 
    534 /* uma_prealloc() goes here... */
    535 
    536 /* __Userspace__ Add umem_reap here for low memory situation?
    537  *
    538  */
    539 
    540 }
    541 
    542 
    543 
    544 /*
    545 * __Userspace__
    546 *
    547 * Constructor for Mbuf master zone. We have a different constructor
    548 * for allocating the cluster.
    549 *
    550 * The 'arg' pointer points to a mb_args structure which
    551 * contains call-specific information required to support the
    552 * mbuf allocation API.  See user_mbuf.h.
    553 *
    554 * The flgs parameter below can be UMEM_DEFAULT or UMEM_NOFAIL depending on what
    555 * was passed when umem_cache_alloc was called.
    556 * TODO: Use UMEM_NOFAIL in umem_cache_alloc and also define a failure handler
    557 * and call umem_nofail_callback(my_failure_handler) in the stack initialization routines
    558 * The advantage of using UMEM_NOFAIL is that we don't have to check if umem_cache_alloc
    559 * was successful or not. The failure handler would take care of it, if we use the UMEM_NOFAIL
    560 * flag.
    561 *
    562 * NOTE Ref: http://docs.sun.com/app/docs/doc/819-2243/6n4i099p2?l=en&a=view&q=umem_zalloc)
    563 * The umem_nofail_callback() function sets the **process-wide** UMEM_NOFAIL callback.
    564 * It also mentions that umem_nofail_callback is Evolving.
    565 *
    566 */
    567 static int
    568 mb_ctor_mbuf(void *mem, void *arg, int flgs)
    569 {
    570 #if USING_MBUF_CONSTRUCTOR
    571 struct mbuf *m;
    572 struct mb_args *args;
    573 
    574 int flags;
    575 short type;
    576 
    577 m = (struct mbuf *)mem;
    578 args = (struct mb_args *)arg;
    579 flags = args->flags;
    580 type = args->type;
    581 
    582 m->m_next = NULL;
    583 m->m_nextpkt = NULL;
    584 m->m_len = 0;
    585 m->m_flags = flags;
    586 m->m_type = type;
    587 if (flags & M_PKTHDR) {
    588 	m->m_data = m->m_pktdat;
    589 	m->m_pkthdr.rcvif = NULL;
    590 	m->m_pkthdr.len = 0;
    591 	m->m_pkthdr.header = NULL;
    592 	m->m_pkthdr.csum_flags = 0;
    593 	m->m_pkthdr.csum_data = 0;
    594 	m->m_pkthdr.tso_segsz = 0;
    595 	m->m_pkthdr.ether_vtag = 0;
    596 	SLIST_INIT(&m->m_pkthdr.tags);
    597 } else
    598 	m->m_data = m->m_dat;
    599 #endif
    600 return (0);
    601 }
    602 
    603 
    604 /*
    605 * __Userspace__
    606 * The Mbuf master zone destructor.
    607 * This would be called in response to umem_cache_destroy
    608 * TODO: Recheck if this is what we want to do in this destructor.
    609 * (Note: the number of times mb_dtor_mbuf is called is equal to the
    610 * number of individual mbufs allocated from zone_mbuf.
    611 */
    612 static void
    613 mb_dtor_mbuf(void *mem, void *arg)
    614 {
    615 struct mbuf *m;
    616 
    617 m = (struct mbuf *)mem;
    618 if ((m->m_flags & M_PKTHDR) != 0) {
    619 	m_tag_delete_chain(m, NULL);
    620 }
    621 }
    622 
    623 
    624 /* __Userspace__
    625 * The Cluster zone constructor.
    626 *
    627 * Here the 'arg' pointer points to the Mbuf which we
    628 * are configuring cluster storage for.  If 'arg' is
    629 * empty we allocate just the cluster without setting
    630 * the mbuf to it.  See mbuf.h.
    631 */
    632 static int
    633 mb_ctor_clust(void *mem, void *arg, int flgs)
    634 {
    635 
    636 #if USING_MBUF_CONSTRUCTOR
    637 struct mbuf *m;
    638 struct clust_args * cla;
    639 u_int *refcnt;
    640 int type, size;
    641 sctp_zone_t zone;
    642 
    643 /* Assigning cluster of MCLBYTES. TODO: Add jumbo frame functionality */
    644 type = EXT_CLUSTER;
    645 zone = zone_clust;
    646 size = MCLBYTES;
    647 
    648 cla = (struct clust_args *)arg;
    649 m = cla->parent_mbuf;
    650 
    651 refcnt = SCTP_ZONE_GET(zone_ext_refcnt, u_int);
    652 /*refcnt = (u_int *)umem_cache_alloc(zone_ext_refcnt, UMEM_DEFAULT);*/
    653 *refcnt = 1;
    654 
    655 if (m != NULL) {
    656 	m->m_ext.ext_buf = (caddr_t)mem;
    657 	m->m_data = m->m_ext.ext_buf;
    658 	m->m_flags |= M_EXT;
    659 	m->m_ext.ext_free = NULL;
    660 	m->m_ext.ext_args = NULL;
    661 	m->m_ext.ext_size = size;
    662 	m->m_ext.ext_type = type;
    663 	m->m_ext.ref_cnt = refcnt;
    664 }
    665 #endif
    666 return (0);
    667 }
    668 
    669 /* __Userspace__ */
    670 static void
    671 mb_dtor_clust(void *mem, void *arg)
    672 {
    673 
    674  /* mem is of type caddr_t.  In sys/types.h we have typedef char * caddr_t;  */
    675  /* mb_dtor_clust is called at time of umem_cache_destroy() (the number of times
    676   * mb_dtor_clust is called is equal to the number of individual mbufs allocated
    677   * from zone_clust. Similarly for mb_dtor_mbuf).
    678   * At this point the following:
    679   *  struct mbuf *m;
    680   *   m = (struct mbuf *)arg;
    681   *  assert (*(m->m_ext.ref_cnt) == 0); is not meaningful since  m->m_ext.ref_cnt = NULL;
    682   *  has been done in mb_free_ext().
    683   */
    684 
    685 }
    686 
    687 
    688 
    689 
    690 /* Unlink and free a packet tag. */
    691 void
    692 m_tag_delete(struct mbuf *m, struct m_tag *t)
    693 {
    694 KASSERT(m && t, ("m_tag_delete: null argument, m %p t %p", (void *)m, (void *)t));
    695 m_tag_unlink(m, t);
    696 m_tag_free(t);
    697 }
    698 
    699 
    700 /* Unlink and free a packet tag chain, starting from given tag. */
    701 void
    702 m_tag_delete_chain(struct mbuf *m, struct m_tag *t)
    703 {
    704 
    705 struct m_tag *p, *q;
    706 
    707 KASSERT(m, ("m_tag_delete_chain: null mbuf"));
    708 if (t != NULL)
    709 	p = t;
    710 else
    711 	p = SLIST_FIRST(&m->m_pkthdr.tags);
    712 if (p == NULL)
    713 	return;
    714 while ((q = SLIST_NEXT(p, m_tag_link)) != NULL)
    715 	m_tag_delete(m, q);
    716 m_tag_delete(m, p);
    717 }
    718 
    719 #if 0
    720 static void
    721 sctp_print_mbuf_chain(struct mbuf *m)
    722 {
    723 SCTP_DEBUG_USR(SCTP_DEBUG_USR, "Printing mbuf chain %p.\n", (void *)m);
    724 for(; m; m=m->m_next) {
    725 	SCTP_DEBUG_USR(SCTP_DEBUG_USR, "%p: m_len = %ld, m_type = %x, m_next = %p.\n", (void *)m, m->m_len, m->m_type, (void *)m->m_next);
    726 	if (m->m_flags & M_EXT)
    727 		SCTP_DEBUG_USR(SCTP_DEBUG_USR, "%p: extend_size = %d, extend_buffer = %p, ref_cnt = %d.\n", (void *)m, m->m_ext.ext_size, (void *)m->m_ext.ext_buf, *(m->m_ext.ref_cnt));
    728 }
    729 }
    730 #endif
    731 
    732 /*
    733 * Free an entire chain of mbufs and associated external buffers, if
    734 * applicable.
    735 */
    736 void
    737 m_freem(struct mbuf *mb)
    738 {
    739 while (mb != NULL)
    740 	mb = m_free(mb);
    741 }
    742 
    743 /*
    744 * __Userspace__
    745 * clean mbufs with M_EXT storage attached to them
    746 * if the reference count hits 1.
    747 */
    748 void
    749 mb_free_ext(struct mbuf *m)
    750 {
    751 
    752 int skipmbuf;
    753 
    754 KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
    755 KASSERT(m->m_ext.ref_cnt != NULL, ("%s: ref_cnt not set", __func__));
    756 
    757 /*
    758  * check if the header is embedded in the cluster
    759  */
    760 skipmbuf = (m->m_flags & M_NOFREE);
    761 
    762 /* Free the external attached storage if this
    763  * mbuf is the only reference to it.
    764  *__Userspace__ TODO: jumbo frames
    765  *
    766 */
    767 /* NOTE: We had the same code that SCTP_DECREMENT_AND_CHECK_REFCOUNT
    768          reduces to here before but the IPHONE malloc commit had changed
    769          this to compare to 0 instead of 1 (see next line).  Why?
    770         . .. this caused a huge memory leak in Linux.
    771 */
    772 #ifdef IPHONE
    773 if (atomic_fetchadd_int(m->m_ext.ref_cnt, -1) == 0)
    774 #else
    775 if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(m->m_ext.ref_cnt))
    776 #endif
    777 {
    778 	if (m->m_ext.ext_type == EXT_CLUSTER){
    779 #if defined(SCTP_SIMPLE_ALLOCATOR)
    780 		mb_dtor_clust(m->m_ext.ext_buf, &clust_mb_args);
    781 #endif
    782 		SCTP_ZONE_FREE(zone_clust, m->m_ext.ext_buf);
    783 		SCTP_ZONE_FREE(zone_ext_refcnt, (u_int*)m->m_ext.ref_cnt);
    784 		m->m_ext.ref_cnt = NULL;
    785 	}
    786 }
    787 
    788 if (skipmbuf)
    789 	return;
    790 
    791 
    792 /* __Userspace__ Also freeing the storage for ref_cnt
    793  * Free this mbuf back to the mbuf zone with all m_ext
    794  * information purged.
    795  */
    796 m->m_ext.ext_buf = NULL;
    797 m->m_ext.ext_free = NULL;
    798 m->m_ext.ext_args = NULL;
    799 m->m_ext.ref_cnt = NULL;
    800 m->m_ext.ext_size = 0;
    801 m->m_ext.ext_type = 0;
    802 m->m_flags &= ~M_EXT;
    803 #if defined(SCTP_SIMPLE_ALLOCATOR)
    804 mb_dtor_mbuf(m, NULL);
    805 #endif
    806 SCTP_ZONE_FREE(zone_mbuf, m);
    807 
    808 /*umem_cache_free(zone_mbuf, m);*/
    809 }
    810 
    811 /*
    812 * "Move" mbuf pkthdr from "from" to "to".
    813 * "from" must have M_PKTHDR set, and "to" must be empty.
    814 */
    815 void
    816 m_move_pkthdr(struct mbuf *to, struct mbuf *from)
    817 {
    818 
    819 to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
    820 if ((to->m_flags & M_EXT) == 0)
    821 	to->m_data = to->m_pktdat;
    822 to->m_pkthdr = from->m_pkthdr;		/* especially tags */
    823 SLIST_INIT(&from->m_pkthdr.tags);	/* purge tags from src */
    824 from->m_flags &= ~M_PKTHDR;
    825 }
    826 
    827 
    828 /*
    829 * Rearange an mbuf chain so that len bytes are contiguous
    830 * and in the data area of an mbuf (so that mtod and dtom
    831 * will work for a structure of size len).  Returns the resulting
    832 * mbuf chain on success, frees it and returns null on failure.
    833 * If there is room, it will add up to max_protohdr-len extra bytes to the
    834 * contiguous region in an attempt to avoid being called next time.
    835 */
    836 struct mbuf *
    837 m_pullup(struct mbuf *n, int len)
    838 {
    839 struct mbuf *m;
    840 int count;
    841 int space;
    842 
    843 /*
    844  * If first mbuf has no cluster, and has room for len bytes
    845  * without shifting current data, pullup into it,
    846  * otherwise allocate a new mbuf to prepend to the chain.
    847  */
    848 if ((n->m_flags & M_EXT) == 0 &&
    849     n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
    850 	if (n->m_len >= len)
    851 		return (n);
    852 	m = n;
    853 	n = n->m_next;
    854 	len -= m->m_len;
    855 } else {
    856 	if (len > MHLEN)
    857 		goto bad;
    858 	MGET(m, M_NOWAIT, n->m_type);
    859 	if (m == NULL)
    860 		goto bad;
    861 	m->m_len = 0;
    862 	if (n->m_flags & M_PKTHDR)
    863 		M_MOVE_PKTHDR(m, n);
    864 }
    865 space = (int)(&m->m_dat[MLEN] - (m->m_data + m->m_len));
    866 do {
    867 	count = min(min(max(len, max_protohdr), space), n->m_len);
    868 	memcpy(mtod(m, caddr_t) + m->m_len,mtod(n, caddr_t), (u_int)count);
    869 	len -= count;
    870 	m->m_len += count;
    871 	n->m_len -= count;
    872 	space -= count;
    873 	if (n->m_len)
    874 		n->m_data += count;
    875 	else
    876 		n = m_free(n);
    877 } while (len > 0 && n);
    878 if (len > 0) {
    879 	(void) m_free(m);
    880 	goto bad;
    881 }
    882 m->m_next = n;
    883 return (m);
    884 bad:
    885 m_freem(n);
    886 return (NULL);
    887 }
    888 
    889 
    890 static struct mbuf *
    891 m_dup1(struct mbuf *m, int off, int len, int wait)
    892 {
    893 struct mbuf *n = NULL;
    894 int copyhdr;
    895 
    896 if (len > MCLBYTES)
    897 	return NULL;
    898 if (off == 0 && (m->m_flags & M_PKTHDR) != 0)
    899 	copyhdr = 1;
    900 else
    901 	copyhdr = 0;
    902 if (len >= MINCLSIZE) {
    903 	if (copyhdr == 1) {
    904 		m_clget(n, wait); /* TODO: include code for copying the header */
    905 		m_dup_pkthdr(n, m, wait);
    906 	} else
    907 		m_clget(n, wait);
    908 } else {
    909 	if (copyhdr == 1)
    910 		n = m_gethdr(wait, m->m_type);
    911 	else
    912 		n = m_get(wait, m->m_type);
    913 }
    914 if (!n)
    915 	return NULL; /* ENOBUFS */
    916 
    917 if (copyhdr && !m_dup_pkthdr(n, m, wait)) {
    918 	m_free(n);
    919 	return NULL;
    920 }
    921 m_copydata(m, off, len, mtod(n, caddr_t));
    922 n->m_len = len;
    923 return n;
    924 }
    925 
    926 
    927 /* Taken from sys/kern/uipc_mbuf2.c */
    928 struct mbuf *
    929 m_pulldown(struct mbuf *m, int off, int len, int *offp)
    930 {
    931 struct mbuf *n, *o;
    932 int hlen, tlen, olen;
    933 int writable;
    934 
    935 /* check invalid arguments. */
    936 KASSERT(m, ("m == NULL in m_pulldown()"));
    937 if (len > MCLBYTES) {
    938 	m_freem(m);
    939 	return NULL;    /* impossible */
    940 }
    941 
    942 #ifdef PULLDOWN_DEBUG
    943 {
    944 	struct mbuf *t;
    945 	SCTP_DEBUG_USR(SCTP_DEBUG_USR, "before:");
    946 	for (t = m; t; t = t->m_next)
    947 		SCTP_DEBUG_USR(SCTP_DEBUG_USR, " %d", t->m_len);
    948 	SCTP_DEBUG_USR(SCTP_DEBUG_USR, "\n");
    949 }
    950 #endif
    951 n = m;
    952 while (n != NULL && off > 0) {
    953 	if (n->m_len > off)
    954 		break;
    955 	off -= n->m_len;
    956 	n = n->m_next;
    957 }
    958 /* be sure to point non-empty mbuf */
    959 while (n != NULL && n->m_len == 0)
    960 	n = n->m_next;
    961 if (!n) {
    962 	m_freem(m);
    963 	return NULL;    /* mbuf chain too short */
    964 }
    965 
    966 writable = 0;
    967 if ((n->m_flags & M_EXT) == 0 ||
    968     (n->m_ext.ext_type == EXT_CLUSTER && M_WRITABLE(n)))
    969 	writable = 1;
    970 
    971 /*
    972  * the target data is on <n, off>.
    973  * if we got enough data on the mbuf "n", we're done.
    974  */
    975 if ((off == 0 || offp) && len <= n->m_len - off && writable)
    976 	goto ok;
    977 
    978 /*
    979  * when len <= n->m_len - off and off != 0, it is a special case.
    980  * len bytes from <n, off> sits in single mbuf, but the caller does
    981  * not like the starting position (off).
    982  * chop the current mbuf into two pieces, set off to 0.
    983  */
    984 if (len <= n->m_len - off) {
    985 	o = m_dup1(n, off, n->m_len - off, M_NOWAIT);
    986 	if (o == NULL) {
    987 		m_freem(m);
    988 	return NULL;    /* ENOBUFS */
    989 	}
    990 	n->m_len = off;
    991 	o->m_next = n->m_next;
    992 	n->m_next = o;
    993 	n = n->m_next;
    994 	off = 0;
    995 	goto ok;
    996 }
    997 /*
    998  * we need to take hlen from <n, off> and tlen from <n->m_next, 0>,
    999  * and construct contiguous mbuf with m_len == len.
   1000  * note that hlen + tlen == len, and tlen > 0.
   1001  */
   1002 hlen = n->m_len - off;
   1003 tlen = len - hlen;
   1004 
   1005 /*
   1006  * ensure that we have enough trailing data on mbuf chain.
   1007  * if not, we can do nothing about the chain.
   1008  */
   1009 olen = 0;
   1010 for (o = n->m_next; o != NULL; o = o->m_next)
   1011 	olen += o->m_len;
   1012 if (hlen + olen < len) {
   1013 	m_freem(m);
   1014 	return NULL;    /* mbuf chain too short */
   1015 }
   1016 
   1017 /*
   1018  * easy cases first.
   1019  * we need to use m_copydata() to get data from <n->m_next, 0>.
   1020  */
   1021 if ((off == 0 || offp) && (M_TRAILINGSPACE(n) >= tlen) && writable) {
   1022 	m_copydata(n->m_next, 0, tlen, mtod(n, caddr_t) + n->m_len);
   1023 	n->m_len += tlen;
   1024 	m_adj(n->m_next, tlen);
   1025 	goto ok;
   1026 }
   1027 
   1028 if ((off == 0 || offp) && (M_LEADINGSPACE(n->m_next) >= hlen) && writable) {
   1029 	n->m_next->m_data -= hlen;
   1030 	n->m_next->m_len += hlen;
   1031 	memcpy( mtod(n->m_next, caddr_t), mtod(n, caddr_t) + off,hlen);
   1032 	n->m_len -= hlen;
   1033 	n = n->m_next;
   1034 	off = 0;
   1035 	goto ok;
   1036 }
   1037 
   1038 /*
   1039  * now, we need to do the hard way.  don't m_copy as there's no room
   1040  * on both end.
   1041  */
   1042 if (len > MLEN)
   1043 	m_clget(o, M_NOWAIT);
   1044 	/* o = m_getcl(M_NOWAIT, m->m_type, 0);*/
   1045 else
   1046 	o = m_get(M_NOWAIT, m->m_type);
   1047 if (!o) {
   1048 	m_freem(m);
   1049 	return NULL;    /* ENOBUFS */
   1050 }
   1051 /* get hlen from <n, off> into <o, 0> */
   1052 o->m_len = hlen;
   1053 memcpy(mtod(o, caddr_t), mtod(n, caddr_t) + off, hlen);
   1054 n->m_len -= hlen;
   1055 /* get tlen from <n->m_next, 0> into <o, hlen> */
   1056 m_copydata(n->m_next, 0, tlen, mtod(o, caddr_t) + o->m_len);
   1057 o->m_len += tlen;
   1058 m_adj(n->m_next, tlen);
   1059 o->m_next = n->m_next;
   1060 n->m_next = o;
   1061 n = o;
   1062 off = 0;
   1063 ok:
   1064 #ifdef PULLDOWN_DEBUG
   1065 {
   1066 	struct mbuf *t;
   1067 	SCTP_DEBUG_USR(SCTP_DEBUG_USR, "after:");
   1068 	for (t = m; t; t = t->m_next)
   1069 		SCTP_DEBUG_USR(SCTP_DEBUG_USR, "%c%d", t == n ? '*' : ' ', t->m_len);
   1070 	SCTP_DEBUG_USR(SCTP_DEBUG_USR, " (off=%d)\n", off);
   1071 }
   1072 #endif
   1073 if (offp)
   1074 	*offp = off;
   1075 return n;
   1076 }
   1077 
   1078 /*
   1079 * Attach the the cluster from *m to *n, set up m_ext in *n
   1080 * and bump the refcount of the cluster.
   1081 */
   1082 static void
   1083 mb_dupcl(struct mbuf *n, struct mbuf *m)
   1084 {
   1085 KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
   1086 KASSERT(m->m_ext.ref_cnt != NULL, ("%s: ref_cnt not set", __func__));
   1087 KASSERT((n->m_flags & M_EXT) == 0, ("%s: M_EXT set", __func__));
   1088 
   1089 if (*(m->m_ext.ref_cnt) == 1)
   1090 	*(m->m_ext.ref_cnt) += 1;
   1091 else
   1092 	atomic_add_int(m->m_ext.ref_cnt, 1);
   1093 n->m_ext.ext_buf = m->m_ext.ext_buf;
   1094 n->m_ext.ext_free = m->m_ext.ext_free;
   1095 n->m_ext.ext_args = m->m_ext.ext_args;
   1096 n->m_ext.ext_size = m->m_ext.ext_size;
   1097 n->m_ext.ref_cnt = m->m_ext.ref_cnt;
   1098 n->m_ext.ext_type = m->m_ext.ext_type;
   1099 n->m_flags |= M_EXT;
   1100 }
   1101 
   1102 
   1103 /*
   1104 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
   1105 * continuing for "len" bytes.  If len is M_COPYALL, copy to end of mbuf.
   1106 * The wait parameter is a choice of M_TRYWAIT/M_NOWAIT from caller.
   1107 * Note that the copy is read-only, because clusters are not copied,
   1108 * only their reference counts are incremented.
   1109 */
   1110 
   1111 struct mbuf *
   1112 m_copym(struct mbuf *m, int off0, int len, int wait)
   1113 {
   1114 struct mbuf *n, **np;
   1115 int off = off0;
   1116 struct mbuf *top;
   1117 int copyhdr = 0;
   1118 
   1119 KASSERT(off >= 0, ("m_copym, negative off %d", off));
   1120 KASSERT(len >= 0, ("m_copym, negative len %d", len));
   1121 KASSERT(m != NULL, ("m_copym, m is NULL"));
   1122 
   1123 #if !defined(INVARIANTS)
   1124 if (m == NULL) {
   1125 	return (NULL);
   1126 }
   1127 #endif
   1128 if (off == 0 && m->m_flags & M_PKTHDR)
   1129 	copyhdr = 1;
   1130 while (off > 0) {
   1131 	KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
   1132 	if (off < m->m_len)
   1133 		break;
   1134 	off -= m->m_len;
   1135 	m = m->m_next;
   1136 }
   1137 np = &top;
   1138 top = 0;
   1139 while (len > 0) {
   1140 	if (m == NULL) {
   1141 		KASSERT(len == M_COPYALL, ("m_copym, length > size of mbuf chain"));
   1142 		break;
   1143 	}
   1144 	if (copyhdr)
   1145 		MGETHDR(n, wait, m->m_type);
   1146 	else
   1147 		MGET(n, wait, m->m_type);
   1148 	*np = n;
   1149 	if (n == NULL)
   1150 		goto nospace;
   1151 	if (copyhdr) {
   1152 		if (!m_dup_pkthdr(n, m, wait))
   1153 			goto nospace;
   1154 		if (len == M_COPYALL)
   1155 			n->m_pkthdr.len -= off0;
   1156 		else
   1157 			n->m_pkthdr.len = len;
   1158 		copyhdr = 0;
   1159 	}
   1160 	n->m_len = min(len, m->m_len - off);
   1161 	if (m->m_flags & M_EXT) {
   1162 		n->m_data = m->m_data + off;
   1163 		mb_dupcl(n, m);
   1164 	} else
   1165 		memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, (u_int)n->m_len);
   1166 	if (len != M_COPYALL)
   1167 		len -= n->m_len;
   1168 	off = 0;
   1169 	m = m->m_next;
   1170 	np = &n->m_next;
   1171 }
   1172 
   1173 return (top);
   1174 nospace:
   1175 m_freem(top);
   1176 return (NULL);
   1177 }
   1178 
   1179 
   1180 int
   1181 m_tag_copy_chain(struct mbuf *to, struct mbuf *from, int how)
   1182 {
   1183 struct m_tag *p, *t, *tprev = NULL;
   1184 
   1185 KASSERT(to && from, ("m_tag_copy_chain: null argument, to %p from %p", (void *)to, (void *)from));
   1186 m_tag_delete_chain(to, NULL);
   1187 SLIST_FOREACH(p, &from->m_pkthdr.tags, m_tag_link) {
   1188 	t = m_tag_copy(p, how);
   1189 	if (t == NULL) {
   1190 		m_tag_delete_chain(to, NULL);
   1191 		return 0;
   1192 	}
   1193 	if (tprev == NULL)
   1194 		SLIST_INSERT_HEAD(&to->m_pkthdr.tags, t, m_tag_link);
   1195 	else
   1196 		SLIST_INSERT_AFTER(tprev, t, m_tag_link);
   1197 	tprev = t;
   1198 }
   1199 return 1;
   1200 }
   1201 
   1202 /*
   1203 * Duplicate "from"'s mbuf pkthdr in "to".
   1204 * "from" must have M_PKTHDR set, and "to" must be empty.
   1205 * In particular, this does a deep copy of the packet tags.
   1206 */
   1207 int
   1208 m_dup_pkthdr(struct mbuf *to, struct mbuf *from, int how)
   1209 {
   1210 
   1211 KASSERT(to, ("m_dup_pkthdr: to is NULL"));
   1212 KASSERT(from, ("m_dup_pkthdr: from is NULL"));
   1213 to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
   1214 if ((to->m_flags & M_EXT) == 0)
   1215 	to->m_data = to->m_pktdat;
   1216 to->m_pkthdr = from->m_pkthdr;
   1217 SLIST_INIT(&to->m_pkthdr.tags);
   1218 return (m_tag_copy_chain(to, from, MBTOM(how)));
   1219 }
   1220 
   1221 /* Copy a single tag. */
   1222 struct m_tag *
   1223 m_tag_copy(struct m_tag *t, int how)
   1224 {
   1225 struct m_tag *p;
   1226 
   1227 KASSERT(t, ("m_tag_copy: null tag"));
   1228 p = m_tag_alloc(t->m_tag_cookie, t->m_tag_id, t->m_tag_len, how);
   1229 if (p == NULL)
   1230 	return (NULL);
   1231 memcpy(p + 1, t + 1, t->m_tag_len); /* Copy the data */
   1232 return p;
   1233 }
   1234 
   1235 /* Get a packet tag structure along with specified data following. */
   1236 struct m_tag *
   1237 m_tag_alloc(uint32_t cookie, int type, int len, int wait)
   1238 {
   1239 struct m_tag *t;
   1240 
   1241 if (len < 0)
   1242 	return NULL;
   1243 t = malloc(len + sizeof(struct m_tag));
   1244 if (t == NULL)
   1245 	return NULL;
   1246 m_tag_setup(t, cookie, type, len);
   1247 t->m_tag_free = m_tag_free_default;
   1248 return t;
   1249 }
   1250 
   1251 /* Free a packet tag. */
   1252 void
   1253 m_tag_free_default(struct m_tag *t)
   1254 {
   1255  free(t);
   1256 }
   1257 
   1258 /*
   1259 * Copy data from a buffer back into the indicated mbuf chain,
   1260 * starting "off" bytes from the beginning, extending the mbuf
   1261 * chain if necessary.
   1262 */
   1263 void
   1264 m_copyback(struct mbuf *m0, int off, int len, caddr_t cp)
   1265 {
   1266 int mlen;
   1267 struct mbuf *m = m0, *n;
   1268 int totlen = 0;
   1269 
   1270 if (m0 == NULL)
   1271 	return;
   1272 while (off > (mlen = m->m_len)) {
   1273 	off -= mlen;
   1274 	totlen += mlen;
   1275 	if (m->m_next == NULL) {
   1276 		n = m_get(M_NOWAIT, m->m_type);
   1277 		if (n == NULL)
   1278 			goto out;
   1279 		memset(mtod(n, caddr_t), 0, MLEN);
   1280 		n->m_len = min(MLEN, len + off);
   1281 		m->m_next = n;
   1282 	}
   1283 	m = m->m_next;
   1284 }
   1285 while (len > 0) {
   1286 	mlen = min (m->m_len - off, len);
   1287 	memcpy(off + mtod(m, caddr_t), cp, (u_int)mlen);
   1288 	cp += mlen;
   1289 	len -= mlen;
   1290 	mlen += off;
   1291 	off = 0;
   1292 	totlen += mlen;
   1293 	if (len == 0)
   1294 		break;
   1295 	if (m->m_next == NULL) {
   1296 		n = m_get(M_NOWAIT, m->m_type);
   1297 		if (n == NULL)
   1298 			break;
   1299 		n->m_len = min(MLEN, len);
   1300 		m->m_next = n;
   1301 	}
   1302 	m = m->m_next;
   1303 }
   1304 out:	if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
   1305 	m->m_pkthdr.len = totlen;
   1306 }
   1307 
   1308 /*
   1309 * Apply function f to the data in an mbuf chain starting "off" bytes from
   1310 * the beginning, continuing for "len" bytes.
   1311 */
   1312 int
   1313 m_apply(struct mbuf *m, int off, int len,
   1314        int (*f)(void *, void *, u_int), void *arg)
   1315 {
   1316 u_int count;
   1317 int rval;
   1318 
   1319 KASSERT(off >= 0, ("m_apply, negative off %d", off));
   1320 KASSERT(len >= 0, ("m_apply, negative len %d", len));
   1321 while (off > 0) {
   1322 	KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
   1323 	if (off < m->m_len)
   1324 		break;
   1325 	off -= m->m_len;
   1326 	m = m->m_next;
   1327 }
   1328 while (len > 0) {
   1329 	KASSERT(m != NULL, ("m_apply, offset > size of mbuf chain"));
   1330 	count = min(m->m_len - off, len);
   1331 	rval = (*f)(arg, mtod(m, caddr_t) + off, count);
   1332 	if (rval)
   1333 		return (rval);
   1334 	len -= count;
   1335 	off = 0;
   1336 	m = m->m_next;
   1337 }
   1338 return (0);
   1339 }
   1340 
   1341 /*
   1342 * Lesser-used path for M_PREPEND:
   1343 * allocate new mbuf to prepend to chain,
   1344 * copy junk along.
   1345 */
   1346 struct mbuf *
   1347 m_prepend(struct mbuf *m, int len, int how)
   1348 {
   1349 struct mbuf *mn;
   1350 
   1351 if (m->m_flags & M_PKTHDR)
   1352 	MGETHDR(mn, how, m->m_type);
   1353 else
   1354 	MGET(mn, how, m->m_type);
   1355 if (mn == NULL) {
   1356 	m_freem(m);
   1357 	return (NULL);
   1358 }
   1359 if (m->m_flags & M_PKTHDR)
   1360 	M_MOVE_PKTHDR(mn, m);
   1361 mn->m_next = m;
   1362 m = mn;
   1363 if (m->m_flags & M_PKTHDR) {
   1364 	if (len < MHLEN)
   1365 		MH_ALIGN(m, len);
   1366 } else {
   1367 	if (len < MLEN)
   1368 		M_ALIGN(m, len);
   1369 }
   1370 m->m_len = len;
   1371 return (m);
   1372 }
   1373 
   1374 /*
   1375 * Copy data from an mbuf chain starting "off" bytes from the beginning,
   1376 * continuing for "len" bytes, into the indicated buffer.
   1377 */
   1378 void
   1379 m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
   1380 {
   1381 u_int count;
   1382 
   1383 KASSERT(off >= 0, ("m_copydata, negative off %d", off));
   1384 KASSERT(len >= 0, ("m_copydata, negative len %d", len));
   1385 while (off > 0) {
   1386 	KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
   1387 	if (off < m->m_len)
   1388 		break;
   1389 	off -= m->m_len;
   1390 	m = m->m_next;
   1391 }
   1392 while (len > 0) {
   1393 	KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
   1394 	count = min(m->m_len - off, len);
   1395 	memcpy(cp, mtod(m, caddr_t) + off, count);
   1396 	len -= count;
   1397 	cp += count;
   1398 	off = 0;
   1399 	m = m->m_next;
   1400 }
   1401 }
   1402 
   1403 
   1404 /*
   1405 * Concatenate mbuf chain n to m.
   1406 * Both chains must be of the same type (e.g. MT_DATA).
   1407 * Any m_pkthdr is not updated.
   1408 */
   1409 void
   1410 m_cat(struct mbuf *m, struct mbuf *n)
   1411 {
   1412 while (m->m_next)
   1413 	m = m->m_next;
   1414 while (n) {
   1415 	if (m->m_flags & M_EXT ||
   1416 	    m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
   1417 		/* just join the two chains */
   1418 		m->m_next = n;
   1419 		return;
   1420 	}
   1421 	/* splat the data from one into the other */
   1422 	memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t), (u_int)n->m_len);
   1423 	m->m_len += n->m_len;
   1424 	n = m_free(n);
   1425 }
   1426 }
   1427 
   1428 
   1429 void
   1430 m_adj(struct mbuf *mp, int req_len)
   1431 {
   1432 int len = req_len;
   1433 struct mbuf *m;
   1434 int count;
   1435 
   1436 if ((m = mp) == NULL)
   1437 	return;
   1438 if (len >= 0) {
   1439 	/*
   1440 	 * Trim from head.
   1441 	 */
   1442 	while (m != NULL && len > 0) {
   1443 		if (m->m_len <= len) {
   1444 			len -= m->m_len;
   1445 			m->m_len = 0;
   1446 			m = m->m_next;
   1447 		} else {
   1448 			m->m_len -= len;
   1449 			m->m_data += len;
   1450 			len = 0;
   1451 		}
   1452 	}
   1453 	m = mp;
   1454 	if (mp->m_flags & M_PKTHDR)
   1455 		m->m_pkthdr.len -= (req_len - len);
   1456 } else {
   1457 	/*
   1458 	 * Trim from tail.  Scan the mbuf chain,
   1459 	 * calculating its length and finding the last mbuf.
   1460 	 * If the adjustment only affects this mbuf, then just
   1461 	 * adjust and return.  Otherwise, rescan and truncate
   1462 	 * after the remaining size.
   1463 	 */
   1464 	len = -len;
   1465 	count = 0;
   1466 	for (;;) {
   1467 		count += m->m_len;
   1468 		if (m->m_next == (struct mbuf *)0)
   1469 			break;
   1470 		m = m->m_next;
   1471 	}
   1472 	if (m->m_len >= len) {
   1473 		m->m_len -= len;
   1474 		if (mp->m_flags & M_PKTHDR)
   1475 			mp->m_pkthdr.len -= len;
   1476 		return;
   1477 	}
   1478 	count -= len;
   1479 	if (count < 0)
   1480 		count = 0;
   1481 	/*
   1482 	 * Correct length for chain is "count".
   1483 	 * Find the mbuf with last data, adjust its length,
   1484 	 * and toss data from remaining mbufs on chain.
   1485 	 */
   1486 	m = mp;
   1487 	if (m->m_flags & M_PKTHDR)
   1488 		m->m_pkthdr.len = count;
   1489 	for (; m; m = m->m_next) {
   1490 		if (m->m_len >= count) {
   1491 			m->m_len = count;
   1492 			if (m->m_next != NULL) {
   1493 				m_freem(m->m_next);
   1494 				m->m_next = NULL;
   1495 			}
   1496 			break;
   1497 		}
   1498 		count -= m->m_len;
   1499 	}
   1500 }
   1501 }
   1502 
   1503 
   1504 /* m_split is used within sctp_handle_cookie_echo. */
   1505 
   1506 /*
   1507 * Partition an mbuf chain in two pieces, returning the tail --
   1508 * all but the first len0 bytes.  In case of failure, it returns NULL and
   1509 * attempts to restore the chain to its original state.
   1510 *
   1511 * Note that the resulting mbufs might be read-only, because the new
   1512 * mbuf can end up sharing an mbuf cluster with the original mbuf if
   1513 * the "breaking point" happens to lie within a cluster mbuf. Use the
   1514 * M_WRITABLE() macro to check for this case.
   1515 */
   1516 struct mbuf *
   1517 m_split(struct mbuf *m0, int len0, int wait)
   1518 {
   1519 struct mbuf *m, *n;
   1520 u_int len = len0, remain;
   1521 
   1522 /* MBUF_CHECKSLEEP(wait); */
   1523 for (m = m0; m && (int)len > m->m_len; m = m->m_next)
   1524 	len -= m->m_len;
   1525 if (m == NULL)
   1526 	return (NULL);
   1527 remain = m->m_len - len;
   1528 if (m0->m_flags & M_PKTHDR) {
   1529 	MGETHDR(n, wait, m0->m_type);
   1530 	if (n == NULL)
   1531 		return (NULL);
   1532 	n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
   1533 	n->m_pkthdr.len = m0->m_pkthdr.len - len0;
   1534 	m0->m_pkthdr.len = len0;
   1535 	if (m->m_flags & M_EXT)
   1536 		goto extpacket;
   1537 	if (remain > MHLEN) {
   1538 		/* m can't be the lead packet */
   1539 		MH_ALIGN(n, 0);
   1540 		n->m_next = m_split(m, len, wait);
   1541 		if (n->m_next == NULL) {
   1542 			(void) m_free(n);
   1543 			return (NULL);
   1544 		} else {
   1545 			n->m_len = 0;
   1546 			return (n);
   1547 		}
   1548 	} else
   1549 		MH_ALIGN(n, remain);
   1550 } else if (remain == 0) {
   1551 	n = m->m_next;
   1552 	m->m_next = NULL;
   1553 	return (n);
   1554 } else {
   1555 	MGET(n, wait, m->m_type);
   1556 	if (n == NULL)
   1557 		return (NULL);
   1558 	M_ALIGN(n, remain);
   1559 }
   1560 extpacket:
   1561 if (m->m_flags & M_EXT) {
   1562 	n->m_data = m->m_data + len;
   1563 	mb_dupcl(n, m);
   1564 } else {
   1565 	memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + len, remain);
   1566 }
   1567 n->m_len = remain;
   1568 m->m_len = len;
   1569 n->m_next = m->m_next;
   1570 m->m_next = NULL;
   1571 return (n);
   1572 }
   1573 
   1574 
   1575 
   1576 
   1577 int
   1578 pack_send_buffer(caddr_t buffer, struct mbuf* mb){
   1579 
   1580 int count_to_copy;
   1581 int total_count_copied = 0;
   1582 int offset = 0;
   1583 
   1584 do {
   1585 	count_to_copy = mb->m_len;
   1586 	memcpy(buffer+offset, mtod(mb, caddr_t), count_to_copy);
   1587 	offset += count_to_copy;
   1588 	total_count_copied += count_to_copy;
   1589 	mb = mb->m_next;
   1590 } while(mb);
   1591 
   1592 return (total_count_copied);
   1593 }