netwerk/sctp/src/user_mbuf.c

Fri, 16 Jan 2015 04:50:19 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Fri, 16 Jan 2015 04:50:19 +0100
branch
TOR_BUG_9701
changeset 13
44a2da4a2ab2
permissions
-rwxr-xr-x

Replace accessor implementation with direct member state manipulation, by
request https://trac.torproject.org/projects/tor/ticket/9701#comment:32

     1 /*-
     2  * Copyright (c) 1982, 1986, 1988, 1993
     3  *      The Regents of the University of California.
     4  * All rights reserved.
     5  *
     6  * Redistribution and use in source and binary forms, with or without
     7  * modification, are permitted provided that the following conditions
     8  * are met:
     9  * 1. Redistributions of source code must retain the above copyright
    10  *    notice, this list of conditions and the following disclaimer.
    11  * 2. Redistributions in binary form must reproduce the above copyright
    12  *    notice, this list of conditions and the following disclaimer in the
    13  *    documentation and/or other materials provided with the distribution.
    14  * 3. Neither the name of the University nor the names of its contributors
    15  *    may be used to endorse or promote products derived from this software
    16  *    without specific prior written permission.
    17  *
    18  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
    19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
    20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
    21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
    22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
    23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
    24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
    25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
    26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
    27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
    28  * SUCH DAMAGE.
    29  *
    30  */
    32 /*
    33  *  __Userspace__ version of /usr/src/sys/kern/kern_mbuf.c
    34  *  We are initializing two zones for Mbufs and Clusters.
    35  *
    36  */
    38 #include <stdio.h>
    39 #include <string.h>
    40 /* #include <sys/param.h> This defines MSIZE 256 */
    41 #if !defined(SCTP_SIMPLE_ALLOCATOR)
    42 #include "umem.h"
    43 #endif
    44 #include "user_mbuf.h"
    45 #include "user_environment.h"
    46 #include "user_atomic.h"
    47 #include "netinet/sctp_pcb.h"
    49 struct mbstat mbstat;
    50 #define KIPC_MAX_LINKHDR        4       /* int: max length of link header (see sys/sysclt.h) */
    51 #define KIPC_MAX_PROTOHDR	5	/* int: max length of network header (see sys/sysclt.h)*/
    52 int max_linkhdr = KIPC_MAX_LINKHDR;
    53 int max_protohdr = KIPC_MAX_PROTOHDR; /* Size of largest protocol layer header. */
    55 /*
    56  * Zones from which we allocate.
    57  */
    58 sctp_zone_t	zone_mbuf;
    59 sctp_zone_t	zone_clust;
    60 sctp_zone_t	zone_ext_refcnt;
    62 /* __Userspace__ clust_mb_args will be passed as callback data to mb_ctor_clust
    63  * and mb_dtor_clust.
    64  * Note: I had to use struct clust_args as an encapsulation for an mbuf pointer.
    65  * struct mbuf * clust_mb_args; does not work.
    66  */
    67 struct clust_args clust_mb_args;
    70 /* __Userspace__
    71  * Local prototypes.
    72  */
    73 static int	mb_ctor_mbuf(void *, void *, int);
    74 static int      mb_ctor_clust(void *, void *, int);
    75 static void	mb_dtor_mbuf(void *,  void *);
    76 static void	mb_dtor_clust(void *, void *);
    79 /***************** Functions taken from user_mbuf.h *************/
    81 static int mbuf_constructor_dup(struct mbuf *m, int pkthdr, short type)
    82 {
    83 	int flags = pkthdr;
    84 	if (type == MT_NOINIT)
    85 		return (0);
    87 	m->m_next = NULL;
    88 	m->m_nextpkt = NULL;
    89 	m->m_len = 0;
    90 	m->m_flags = flags;
    91 	m->m_type = type;
    92 	if (flags & M_PKTHDR) {
    93 		m->m_data = m->m_pktdat;
    94 		m->m_pkthdr.rcvif = NULL;
    95 		m->m_pkthdr.len = 0;
    96 		m->m_pkthdr.header = NULL;
    97 		m->m_pkthdr.csum_flags = 0;
    98 		m->m_pkthdr.csum_data = 0;
    99 		m->m_pkthdr.tso_segsz = 0;
   100 		m->m_pkthdr.ether_vtag = 0;
   101 		SLIST_INIT(&m->m_pkthdr.tags);
   102 	} else
   103 		m->m_data = m->m_dat;
   105 	return (0);
   106 }
   108 /* __Userspace__ */
   109 struct mbuf *
   110 m_get(int how, short type)
   111 {
   112 	struct mbuf *mret;
   113 #if defined(SCTP_SIMPLE_ALLOCATOR)
   114 	struct mb_args mbuf_mb_args;
   116 	/* The following setter function is not yet being enclosed within
   117 	 * #if USING_MBUF_CONSTRUCTOR - #endif, until I have thoroughly tested
   118 	 * mb_dtor_mbuf. See comment there
   119 	 */
   120 	mbuf_mb_args.flags = 0;
   121 	mbuf_mb_args.type = type;
   122 #endif
   123 	/* Mbuf master zone, zone_mbuf, has already been
   124 	 * created in mbuf_init() */
   125 	mret = SCTP_ZONE_GET(zone_mbuf, struct mbuf);
   126 #if defined(SCTP_SIMPLE_ALLOCATOR)
   127 	mb_ctor_mbuf(mret, &mbuf_mb_args, 0);
   128 #endif
   129 	/*mret =  ((struct mbuf *)umem_cache_alloc(zone_mbuf, UMEM_DEFAULT));*/
   131 	/* There are cases when an object available in the current CPU's
   132 	 * loaded magazine and in those cases the object's constructor is not applied.
   133 	 * If that is the case, then we are duplicating constructor initialization here,
   134 	 * so that the mbuf is properly constructed before returning it.
   135 	 */
   136 	if (mret) {
   137 #if USING_MBUF_CONSTRUCTOR
   138 		if (! (mret->m_type == type) ) {
   139 			mbuf_constructor_dup(mret, 0, type);
   140 		}
   141 #else
   142 		mbuf_constructor_dup(mret, 0, type);
   143 #endif
   145 	}
   146 	return mret;
   147 }
   150 /* __Userspace__ */
   151 struct mbuf *
   152 m_gethdr(int how, short type)
   153 {
   154 	struct mbuf *mret;
   155 #if defined(SCTP_SIMPLE_ALLOCATOR)
   156 	struct mb_args mbuf_mb_args;
   158 	/* The following setter function is not yet being enclosed within
   159 	 * #if USING_MBUF_CONSTRUCTOR - #endif, until I have thoroughly tested
   160 	 * mb_dtor_mbuf. See comment there
   161 	 */
   162 	mbuf_mb_args.flags = M_PKTHDR;
   163 	mbuf_mb_args.type = type;
   164 #endif
   165 	mret = SCTP_ZONE_GET(zone_mbuf, struct mbuf);
   166 #if defined(SCTP_SIMPLE_ALLOCATOR)
   167 	mb_ctor_mbuf(mret, &mbuf_mb_args, 0);
   168 #endif
   169 	/*mret = ((struct mbuf *)umem_cache_alloc(zone_mbuf, UMEM_DEFAULT));*/
   170 	/* There are cases when an object available in the current CPU's
   171 	 * loaded magazine and in those cases the object's constructor is not applied.
   172 	 * If that is the case, then we are duplicating constructor initialization here,
   173 	 * so that the mbuf is properly constructed before returning it.
   174 	 */
   175 	if (mret) {
   176 #if USING_MBUF_CONSTRUCTOR
   177 		if (! ((mret->m_flags & M_PKTHDR) && (mret->m_type == type)) ) {
   178 			mbuf_constructor_dup(mret, M_PKTHDR, type);
   179 		}
   180 #else
   181 		mbuf_constructor_dup(mret, M_PKTHDR, type);
   182 #endif
   183 	}
   184 	return mret;
   185 }
   187 /* __Userspace__ */
   188 struct mbuf *
   189 m_free(struct mbuf *m)
   190 {
   192 	struct mbuf *n = m->m_next;
   194 	if (m->m_flags & M_EXT)
   195 		mb_free_ext(m);
   196 	else if ((m->m_flags & M_NOFREE) == 0) {
   197 #if defined(SCTP_SIMPLE_ALLOCATOR)
   198 		mb_dtor_mbuf(m, NULL);
   199 #endif
   200 		SCTP_ZONE_FREE(zone_mbuf, m);
   201 	}
   202 		/*umem_cache_free(zone_mbuf, m);*/
   203 	return (n);
   204 }
   207 static int clust_constructor_dup(caddr_t m_clust, struct mbuf* m)
   208 {
   209 	u_int *refcnt;
   210 	int type, size;
   212 	/* Assigning cluster of MCLBYTES. TODO: Add jumbo frame functionality */
   213 	type = EXT_CLUSTER;
   214 	size = MCLBYTES;
   216 	refcnt = SCTP_ZONE_GET(zone_ext_refcnt, u_int);
   217 	/*refcnt = (u_int *)umem_cache_alloc(zone_ext_refcnt, UMEM_DEFAULT);*/
   218 	if (refcnt == NULL) {
   219 #if !defined(SCTP_SIMPLE_ALLOCATOR)
   220 		umem_reap();
   221 #endif
   222 		refcnt = SCTP_ZONE_GET(zone_ext_refcnt, u_int);
   223 		/*refcnt = (u_int *)umem_cache_alloc(zone_ext_refcnt, UMEM_DEFAULT);*/
   224 	}
   225 	*refcnt = 1;
   226 	if (m != NULL) {
   227 		m->m_ext.ext_buf = (caddr_t)m_clust;
   228 		m->m_data = m->m_ext.ext_buf;
   229 		m->m_flags |= M_EXT;
   230 		m->m_ext.ext_free = NULL;
   231 		m->m_ext.ext_args = NULL;
   232 		m->m_ext.ext_size = size;
   233 		m->m_ext.ext_type = type;
   234 		m->m_ext.ref_cnt = refcnt;
   235 	}
   237 	return (0);
   238 }
   242 /* __Userspace__ */
   243 void
   244 m_clget(struct mbuf *m, int how)
   245 {
   246 	caddr_t mclust_ret;
   247 #if defined(SCTP_SIMPLE_ALLOCATOR)
   248 	struct clust_args clust_mb_args;
   249 #endif
   250 	if (m->m_flags & M_EXT) {
   251 		SCTPDBG(SCTP_DEBUG_USR, "%s: %p mbuf already has cluster\n", __func__, (void *)m);
   252 	}
   253 	m->m_ext.ext_buf = (char *)NULL;
   254 #if defined(SCTP_SIMPLE_ALLOCATOR)
   255 	clust_mb_args.parent_mbuf = m;
   256 #endif
   257 	mclust_ret = SCTP_ZONE_GET(zone_clust, char);
   258 #if defined(SCTP_SIMPLE_ALLOCATOR)
   259 	mb_ctor_clust(mclust_ret, &clust_mb_args, 0);
   260 #endif
   261 	/*mclust_ret = umem_cache_alloc(zone_clust, UMEM_DEFAULT);*/
   262 	/*
   263 	 On a cluster allocation failure, call umem_reap() and retry.
   264 	 */
   266 	if (mclust_ret == NULL) {
   267 #if !defined(SCTP_SIMPLE_ALLOCATOR)
   268 	/*	mclust_ret = SCTP_ZONE_GET(zone_clust, char);
   269 		mb_ctor_clust(mclust_ret, &clust_mb_args, 0);
   270 #else*/
   271 		umem_reap();
   272 		mclust_ret = SCTP_ZONE_GET(zone_clust, char);
   273 #endif
   274 		/*mclust_ret = umem_cache_alloc(zone_clust, UMEM_DEFAULT);*/
   275 		if (NULL == mclust_ret) {
   276 			SCTPDBG(SCTP_DEBUG_USR, "Memory allocation failure in %s\n", __func__);
   277 		}
   278 	}
   280 #if USING_MBUF_CONSTRUCTOR
   281 	if ((m->m_ext.ext_buf == NULL)) {
   282 		clust_constructor_dup(mclust_ret, m);
   283 	}
   284 #else
   285 	clust_constructor_dup(mclust_ret, m);
   286 #endif
   287 }
   289 /*
   290  * Unlink a tag from the list of tags associated with an mbuf.
   291  */
   292 static __inline void
   293 m_tag_unlink(struct mbuf *m, struct m_tag *t)
   294 {
   296 	SLIST_REMOVE(&m->m_pkthdr.tags, t, m_tag, m_tag_link);
   297 }
   299 /*
   300  * Reclaim resources associated with a tag.
   301  */
   302 static __inline void
   303 m_tag_free(struct m_tag *t)
   304 {
   306 	(*t->m_tag_free)(t);
   307 }
   309 /*
   310  * Set up the contents of a tag.  Note that this does not fill in the free
   311  * method; the caller is expected to do that.
   312  *
   313  * XXX probably should be called m_tag_init, but that was already taken.
   314  */
   315 static __inline void
   316 m_tag_setup(struct m_tag *t, u_int32_t cookie, int type, int len)
   317 {
   319 	t->m_tag_id = type;
   320 	t->m_tag_len = len;
   321 	t->m_tag_cookie = cookie;
   322 }
   324 /************ End functions from user_mbuf.h  ******************/
   328 /************ End functions to substitute umem_cache_alloc and umem_cache_free **************/
   330 /* __Userspace__
   331  * TODO: mbuf_init must be called in the initialization routines
   332  * of userspace stack.
   333  */
   334 void
   335 mbuf_init(void *dummy)
   336 {
   338 	/*
   339 	 * __Userspace__Configure UMA zones for Mbufs and Clusters.
   340 	 * (TODO: m_getcl() - using packet secondary zone).
   341 	 * There is no provision for trash_init and trash_fini in umem.
   342 	 *
   343 	 */
   344  /* zone_mbuf = umem_cache_create(MBUF_MEM_NAME, MSIZE, 0,
   345 				mb_ctor_mbuf, mb_dtor_mbuf, NULL,
   346 				&mbuf_mb_args,
   347 				NULL, 0);
   348 	zone_mbuf = umem_cache_create(MBUF_MEM_NAME, MSIZE, 0, NULL, NULL, NULL, NULL, NULL, 0);*/
   349 #if defined(SCTP_SIMPLE_ALLOCATOR)
   350 	SCTP_ZONE_INIT(zone_mbuf, MBUF_MEM_NAME, MSIZE, 0);
   351 #else
   352 	zone_mbuf = umem_cache_create(MBUF_MEM_NAME, MSIZE, 0,
   353 	                              mb_ctor_mbuf, mb_dtor_mbuf, NULL,
   354 	                              NUULL,
   355 	                              NULL, 0);
   356 #endif
   357 	/*zone_ext_refcnt = umem_cache_create(MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int), 0,
   358 				NULL, NULL, NULL,
   359 				NULL,
   360 				NULL, 0);*/
   361 	SCTP_ZONE_INIT(zone_ext_refcnt, MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int), 0);
   363   /*zone_clust = umem_cache_create(MBUF_CLUSTER_MEM_NAME, MCLBYTES, 0,
   364 				 mb_ctor_clust, mb_dtor_clust, NULL,
   365 				 &clust_mb_args,
   366 				 NULL, 0);
   367 	zone_clust = umem_cache_create(MBUF_CLUSTER_MEM_NAME, MCLBYTES, 0, NULL, NULL, NULL, NULL, NULL,0);*/
   368 #if defined(SCTP_SIMPLE_ALLOCATOR)
   369 	SCTP_ZONE_INIT(zone_clust, MBUF_CLUSTER_MEM_NAME, MCLBYTES, 0);
   370 #else
   371 	zone_clust = umem_cache_create(MBUF_CLUSTER_MEM_NAME, MCLBYTES, 0,
   372 								   mb_ctor_clust, mb_dtor_clust, NULL,
   373 								   &clust_mb_args,
   374 								   NULL, 0);
   375 #endif
   377 	/* uma_prealloc() goes here... */
   379 	/* __Userspace__ Add umem_reap here for low memory situation?
   380 	 *
   381 	 */
   384 	/*
   385 	 * [Re]set counters and local statistics knobs.
   386 	 *
   387 	 */
   389 	mbstat.m_mbufs = 0;
   390 	mbstat.m_mclusts = 0;
   391 	mbstat.m_drain = 0;
   392 	mbstat.m_msize = MSIZE;
   393 	mbstat.m_mclbytes = MCLBYTES;
   394 	mbstat.m_minclsize = MINCLSIZE;
   395 	mbstat.m_mlen = MLEN;
   396 	mbstat.m_mhlen = MHLEN;
   397 	mbstat.m_numtypes = MT_NTYPES;
   399 	mbstat.m_mcfail = mbstat.m_mpfail = 0;
   400 	mbstat.sf_iocnt = 0;
   401 	mbstat.sf_allocwait = mbstat.sf_allocfail = 0;
   403 }
   407 /*
   408  * __Userspace__
   409  *
   410  * Constructor for Mbuf master zone. We have a different constructor
   411  * for allocating the cluster.
   412  *
   413  * The 'arg' pointer points to a mb_args structure which
   414  * contains call-specific information required to support the
   415  * mbuf allocation API.  See user_mbuf.h.
   416  *
   417  * The flgs parameter below can be UMEM_DEFAULT or UMEM_NOFAIL depending on what
   418  * was passed when umem_cache_alloc was called.
   419  * TODO: Use UMEM_NOFAIL in umem_cache_alloc and also define a failure handler
   420  * and call umem_nofail_callback(my_failure_handler) in the stack initialization routines
   421  * The advantage of using UMEM_NOFAIL is that we don't have to check if umem_cache_alloc
   422  * was successful or not. The failure handler would take care of it, if we use the UMEM_NOFAIL
   423  * flag.
   424  *
   425  * NOTE Ref: http://docs.sun.com/app/docs/doc/819-2243/6n4i099p2?l=en&a=view&q=umem_zalloc)
   426  * The umem_nofail_callback() function sets the **process-wide** UMEM_NOFAIL callback.
   427  * It also mentions that umem_nofail_callback is Evolving.
   428  *
   429  */
   430 static int
   431 mb_ctor_mbuf(void *mem, void *arg, int flgs)
   432 {
   433 #if USING_MBUF_CONSTRUCTOR
   434 	struct mbuf *m;
   435 	struct mb_args *args;
   437 	int flags;
   438 	short type;
   440 	m = (struct mbuf *)mem;
   441 	args = (struct mb_args *)arg;
   442 	flags = args->flags;
   443 	type = args->type;
   445 	/*
   446 	 * The mbuf is initialized later.
   447 	 *
   448 	 */
   449 	if (type == MT_NOINIT)
   450 		return (0);
   452 	m->m_next = NULL;
   453 	m->m_nextpkt = NULL;
   454 	m->m_len = 0;
   455 	m->m_flags = flags;
   456 	m->m_type = type;
   457 	if (flags & M_PKTHDR) {
   458 		m->m_data = m->m_pktdat;
   459 		m->m_pkthdr.rcvif = NULL;
   460 		m->m_pkthdr.len = 0;
   461 		m->m_pkthdr.header = NULL;
   462 		m->m_pkthdr.csum_flags = 0;
   463 		m->m_pkthdr.csum_data = 0;
   464 		m->m_pkthdr.tso_segsz = 0;
   465 		m->m_pkthdr.ether_vtag = 0;
   466 		SLIST_INIT(&m->m_pkthdr.tags);
   467 	} else
   468 		m->m_data = m->m_dat;
   469 #endif
   470 	return (0);
   471 }
   474 /*
   475  * __Userspace__
   476  * The Mbuf master zone destructor.
   477  * This would be called in response to umem_cache_destroy
   478  * TODO: Recheck if this is what we want to do in this destructor.
   479  * (Note: the number of times mb_dtor_mbuf is called is equal to the
   480  * number of individual mbufs allocated from zone_mbuf.
   481  */
   482 static void
   483 mb_dtor_mbuf(void *mem, void *arg)
   484 {
   485 	struct mbuf *m;
   487 	m = (struct mbuf *)mem;
   488 	if ((m->m_flags & M_PKTHDR) != 0) {
   489 		m_tag_delete_chain(m, NULL);
   490 	}
   491 }
   494 /* __Userspace__
   495  * The Cluster zone constructor.
   496  *
   497  * Here the 'arg' pointer points to the Mbuf which we
   498  * are configuring cluster storage for.  If 'arg' is
   499  * empty we allocate just the cluster without setting
   500  * the mbuf to it.  See mbuf.h.
   501  */
   502 static int
   503 mb_ctor_clust(void *mem, void *arg, int flgs)
   504 {
   506 #if USING_MBUF_CONSTRUCTOR
   507 	struct mbuf *m;
   508 	struct clust_args * cla;
   509 	u_int *refcnt;
   510 	int type, size;
   511 	sctp_zone_t zone;
   513 	/* Assigning cluster of MCLBYTES. TODO: Add jumbo frame functionality */
   514 	type = EXT_CLUSTER;
   515 	zone = zone_clust;
   516 	size = MCLBYTES;
   518 	cla = (struct clust_args *)arg;
   519 	m = cla->parent_mbuf;
   521 	refcnt = SCTP_ZONE_GET(zone_ext_refcnt, u_int);
   522 	/*refcnt = (u_int *)umem_cache_alloc(zone_ext_refcnt, UMEM_DEFAULT);*/
   523 	*refcnt = 1;
   525 	if (m != NULL) {
   526 		m->m_ext.ext_buf = (caddr_t)mem;
   527 		m->m_data = m->m_ext.ext_buf;
   528 		m->m_flags |= M_EXT;
   529 		m->m_ext.ext_free = NULL;
   530 		m->m_ext.ext_args = NULL;
   531 		m->m_ext.ext_size = size;
   532 		m->m_ext.ext_type = type;
   533 		m->m_ext.ref_cnt = refcnt;
   534 	}
   535 #endif
   536 	return (0);
   537 }
   539 /* __Userspace__ */
   540 static void
   541 mb_dtor_clust(void *mem, void *arg)
   542 {
   544   /* mem is of type caddr_t.  In sys/types.h we have typedef char * caddr_t;  */
   545   /* mb_dtor_clust is called at time of umem_cache_destroy() (the number of times
   546    * mb_dtor_clust is called is equal to the number of individual mbufs allocated
   547    * from zone_clust. Similarly for mb_dtor_mbuf).
   548    * At this point the following:
   549    *  struct mbuf *m;
   550    *   m = (struct mbuf *)arg;
   551    *  assert (*(m->m_ext.ref_cnt) == 0); is not meaningful since  m->m_ext.ref_cnt = NULL;
   552    *  has been done in mb_free_ext().
   553    */
   555 }
   560 /* Unlink and free a packet tag. */
   561 void
   562 m_tag_delete(struct mbuf *m, struct m_tag *t)
   563 {
   564 	KASSERT(m && t, ("m_tag_delete: null argument, m %p t %p", (void *)m, (void *)t));
   565 	m_tag_unlink(m, t);
   566 	m_tag_free(t);
   567 }
   570 /* Unlink and free a packet tag chain, starting from given tag. */
   571 void
   572 m_tag_delete_chain(struct mbuf *m, struct m_tag *t)
   573 {
   575 	struct m_tag *p, *q;
   577 	KASSERT(m, ("m_tag_delete_chain: null mbuf"));
   578 	if (t != NULL)
   579 		p = t;
   580 	else
   581 		p = SLIST_FIRST(&m->m_pkthdr.tags);
   582 	if (p == NULL)
   583 		return;
   584 	while ((q = SLIST_NEXT(p, m_tag_link)) != NULL)
   585 		m_tag_delete(m, q);
   586 	m_tag_delete(m, p);
   587 }
   589 #if 0
   590 static void
   591 sctp_print_mbuf_chain(struct mbuf *m)
   592 {
   593 	SCTP_DEBUG_USR(SCTP_DEBUG_USR, "Printing mbuf chain %p.\n", (void *)m);
   594 	for(; m; m=m->m_next) {
   595 		SCTP_DEBUG_USR(SCTP_DEBUG_USR, "%p: m_len = %ld, m_type = %x, m_next = %p.\n", (void *)m, m->m_len, m->m_type, (void *)m->m_next);
   596 		if (m->m_flags & M_EXT)
   597 			SCTP_DEBUG_USR(SCTP_DEBUG_USR, "%p: extend_size = %d, extend_buffer = %p, ref_cnt = %d.\n", (void *)m, m->m_ext.ext_size, (void *)m->m_ext.ext_buf, *(m->m_ext.ref_cnt));
   598 	}
   599 }
   600 #endif
   602 /*
   603  * Free an entire chain of mbufs and associated external buffers, if
   604  * applicable.
   605  */
   606 void
   607 m_freem(struct mbuf *mb)
   608 {
   609 	while (mb != NULL)
   610 		mb = m_free(mb);
   611 }
   613 /*
   614  * __Userspace__
   615  * clean mbufs with M_EXT storage attached to them
   616  * if the reference count hits 1.
   617  */
   618 void
   619 mb_free_ext(struct mbuf *m)
   620 {
   622 	int skipmbuf;
   624 	KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
   625 	KASSERT(m->m_ext.ref_cnt != NULL, ("%s: ref_cnt not set", __func__));
   627 	/*
   628 	 * check if the header is embedded in the cluster
   629 	 */
   630 	skipmbuf = (m->m_flags & M_NOFREE);
   632 	/* Free the external attached storage if this
   633 	 * mbuf is the only reference to it.
   634 	 *__Userspace__ TODO: jumbo frames
   635 	 *
   636 	*/
   637 	/* NOTE: We had the same code that SCTP_DECREMENT_AND_CHECK_REFCOUNT
   638 	         reduces to here before but the IPHONE malloc commit had changed
   639 	         this to compare to 0 instead of 1 (see next line).  Why?
   640 	        . .. this caused a huge memory leak in Linux.
   641 	*/
   642 #ifdef IPHONE
   643 	if (atomic_fetchadd_int(m->m_ext.ref_cnt, -1) == 0)
   644 #else
   645 	if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(m->m_ext.ref_cnt))
   646 #endif
   647 	{
   648 		if (m->m_ext.ext_type == EXT_CLUSTER){
   649 #if defined(SCTP_SIMPLE_ALLOCATOR)
   650 			mb_dtor_clust(m->m_ext.ext_buf, &clust_mb_args);
   651 #endif
   652 			SCTP_ZONE_FREE(zone_clust, m->m_ext.ext_buf);
   653 			SCTP_ZONE_FREE(zone_ext_refcnt, (u_int*)m->m_ext.ref_cnt);
   654 			m->m_ext.ref_cnt = NULL;
   655 		}
   656 	}
   658 	if (skipmbuf)
   659 		return;
   662 	/* __Userspace__ Also freeing the storage for ref_cnt
   663 	 * Free this mbuf back to the mbuf zone with all m_ext
   664 	 * information purged.
   665 	 */
   666 	m->m_ext.ext_buf = NULL;
   667 	m->m_ext.ext_free = NULL;
   668 	m->m_ext.ext_args = NULL;
   669 	m->m_ext.ref_cnt = NULL;
   670 	m->m_ext.ext_size = 0;
   671 	m->m_ext.ext_type = 0;
   672 	m->m_flags &= ~M_EXT;
   673 #if defined(SCTP_SIMPLE_ALLOCATOR)
   674 	mb_dtor_mbuf(m, NULL);
   675 #endif
   676 	SCTP_ZONE_FREE(zone_mbuf, m);
   678 	/*umem_cache_free(zone_mbuf, m);*/
   679 }
   681 /*
   682  * "Move" mbuf pkthdr from "from" to "to".
   683  * "from" must have M_PKTHDR set, and "to" must be empty.
   684  */
   685 void
   686 m_move_pkthdr(struct mbuf *to, struct mbuf *from)
   687 {
   689 	to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
   690 	if ((to->m_flags & M_EXT) == 0)
   691 		to->m_data = to->m_pktdat;
   692 	to->m_pkthdr = from->m_pkthdr;		/* especially tags */
   693 	SLIST_INIT(&from->m_pkthdr.tags);	/* purge tags from src */
   694 	from->m_flags &= ~M_PKTHDR;
   695 }
   698 /*
   699  * Rearange an mbuf chain so that len bytes are contiguous
   700  * and in the data area of an mbuf (so that mtod and dtom
   701  * will work for a structure of size len).  Returns the resulting
   702  * mbuf chain on success, frees it and returns null on failure.
   703  * If there is room, it will add up to max_protohdr-len extra bytes to the
   704  * contiguous region in an attempt to avoid being called next time.
   705  */
   706 struct mbuf *
   707 m_pullup(struct mbuf *n, int len)
   708 {
   709 	struct mbuf *m;
   710 	int count;
   711 	int space;
   713 	/*
   714 	 * If first mbuf has no cluster, and has room for len bytes
   715 	 * without shifting current data, pullup into it,
   716 	 * otherwise allocate a new mbuf to prepend to the chain.
   717 	 */
   718 	if ((n->m_flags & M_EXT) == 0 &&
   719 	    n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
   720 		if (n->m_len >= len)
   721 			return (n);
   722 		m = n;
   723 		n = n->m_next;
   724 		len -= m->m_len;
   725 	} else {
   726 		if (len > MHLEN)
   727 			goto bad;
   728 		MGET(m, M_NOWAIT, n->m_type);
   729 		if (m == NULL)
   730 			goto bad;
   731 		m->m_len = 0;
   732 		if (n->m_flags & M_PKTHDR)
   733 			M_MOVE_PKTHDR(m, n);
   734 	}
   735 	space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
   736 	do {
   737 		count = min(min(max(len, max_protohdr), space), n->m_len);
   738 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
   739 		  (u_int)count);
   740 		len -= count;
   741 		m->m_len += count;
   742 		n->m_len -= count;
   743 		space -= count;
   744 		if (n->m_len)
   745 			n->m_data += count;
   746 		else
   747 			n = m_free(n);
   748 	} while (len > 0 && n);
   749 	if (len > 0) {
   750 		(void) m_free(m);
   751 		goto bad;
   752 	}
   753 	m->m_next = n;
   754 	return (m);
   755 bad:
   756 	m_freem(n);
   757 	mbstat.m_mpfail++;	/* XXX: No consistency. */
   758 	return (NULL);
   759 }
   762 static struct mbuf *
   763 m_dup1(struct mbuf *m, int off, int len, int wait)
   764 {
   765 	struct mbuf *n = NULL;
   766 	int copyhdr;
   768 	if (len > MCLBYTES)
   769 		return NULL;
   770 	if (off == 0 && (m->m_flags & M_PKTHDR) != 0)
   771 		copyhdr = 1;
   772 	else
   773 		copyhdr = 0;
   774 	if (len >= MINCLSIZE) {
   775 		if (copyhdr == 1) {
   776 			m_clget(n, wait); /* TODO: include code for copying the header */
   777 			m_dup_pkthdr(n, m, wait);
   778 		} else
   779 			m_clget(n, wait);
   780 	} else {
   781 		if (copyhdr == 1)
   782 			n = m_gethdr(wait, m->m_type);
   783 		else
   784 			n = m_get(wait, m->m_type);
   785 	}
   786 	if (!n)
   787 		return NULL; /* ENOBUFS */
   789 	if (copyhdr && !m_dup_pkthdr(n, m, wait)) {
   790 		m_free(n);
   791 		return NULL;
   792 	}
   793 	m_copydata(m, off, len, mtod(n, caddr_t));
   794 	n->m_len = len;
   795 	return n;
   796 }
   799 /* Taken from sys/kern/uipc_mbuf2.c */
   800 struct mbuf *
   801 m_pulldown(struct mbuf *m, int off, int len, int *offp)
   802 {
   803 	struct mbuf *n, *o;
   804 	int hlen, tlen, olen;
   805 	int writable;
   807 	/* check invalid arguments. */
   808 	KASSERT(m, ("m == NULL in m_pulldown()"));
   809 	if (len > MCLBYTES) {
   810 		m_freem(m);
   811 		return NULL;    /* impossible */
   812 	}
   814 #ifdef PULLDOWN_DEBUG
   815 	{
   816 		struct mbuf *t;
   817 		SCTP_DEBUG_USR(SCTP_DEBUG_USR, "before:");
   818 		for (t = m; t; t = t->m_next)
   819 			SCTP_DEBUG_USR(SCTP_DEBUG_USR, " %d", t->m_len);
   820 		SCTP_DEBUG_USR(SCTP_DEBUG_USR, "\n");
   821 	}
   822 #endif
   823 	n = m;
   824 	while (n != NULL && off > 0) {
   825 		if (n->m_len > off)
   826 			break;
   827 		off -= n->m_len;
   828 		n = n->m_next;
   829 	}
   830 	/* be sure to point non-empty mbuf */
   831 	while (n != NULL && n->m_len == 0)
   832 		n = n->m_next;
   833 	if (!n) {
   834 		m_freem(m);
   835 		return NULL;    /* mbuf chain too short */
   836 	}
   838 	writable = 0;
   839 	if ((n->m_flags & M_EXT) == 0 ||
   840 	    (n->m_ext.ext_type == EXT_CLUSTER && M_WRITABLE(n)))
   841 		writable = 1;
   843 	/*
   844 	 * the target data is on <n, off>.
   845 	 * if we got enough data on the mbuf "n", we're done.
   846 	 */
   847 	if ((off == 0 || offp) && len <= n->m_len - off && writable)
   848 		goto ok;
   850 	/*
   851 	 * when len <= n->m_len - off and off != 0, it is a special case.
   852 	 * len bytes from <n, off> sits in single mbuf, but the caller does
   853 	 * not like the starting position (off).
   854 	 * chop the current mbuf into two pieces, set off to 0.
   855 	 */
   856 	if (len <= n->m_len - off) {
   857 		o = m_dup1(n, off, n->m_len - off, M_NOWAIT);
   858 		if (o == NULL) {
   859 			m_freem(m);
   860 		return NULL;    /* ENOBUFS */
   861 		}
   862 		n->m_len = off;
   863 		o->m_next = n->m_next;
   864 		n->m_next = o;
   865 		n = n->m_next;
   866 		off = 0;
   867 		goto ok;
   868 	}
   869 	/*
   870 	 * we need to take hlen from <n, off> and tlen from <n->m_next, 0>,
   871 	 * and construct contiguous mbuf with m_len == len.
   872 	 * note that hlen + tlen == len, and tlen > 0.
   873 	 */
   874 	hlen = n->m_len - off;
   875 	tlen = len - hlen;
   877 	/*
   878 	 * ensure that we have enough trailing data on mbuf chain.
   879 	 * if not, we can do nothing about the chain.
   880 	 */
   881 	olen = 0;
   882 	for (o = n->m_next; o != NULL; o = o->m_next)
   883 		olen += o->m_len;
   884 	if (hlen + olen < len) {
   885 		m_freem(m);
   886 		return NULL;    /* mbuf chain too short */
   887 	}
   889 	/*
   890 	 * easy cases first.
   891 	 * we need to use m_copydata() to get data from <n->m_next, 0>.
   892 	 */
   893 	if ((off == 0 || offp) && M_TRAILINGSPACE(n) >= tlen
   894 	    && writable) {
   895 		m_copydata(n->m_next, 0, tlen, mtod(n, caddr_t) + n->m_len);
   896 		n->m_len += tlen;
   897 		m_adj(n->m_next, tlen);
   898 		goto ok;
   899 	}
   901 	if ((off == 0 || offp) && M_LEADINGSPACE(n->m_next) >= hlen
   902 	    && writable) {
   903 		n->m_next->m_data -= hlen;
   904 		n->m_next->m_len += hlen;
   905 		bcopy(mtod(n, caddr_t) + off, mtod(n->m_next, caddr_t), hlen);
   906 		n->m_len -= hlen;
   907 		n = n->m_next;
   908 		off = 0;
   909 		goto ok;
   910 	}
   912 	/*
   913 	 * now, we need to do the hard way.  don't m_copy as there's no room
   914 	 * on both end.
   915 	 */
   916 	if (len > MLEN)
   917 		m_clget(o, M_NOWAIT);
   918 		/* o = m_getcl(M_NOWAIT, m->m_type, 0);*/
   919 	else
   920 		o = m_get(M_NOWAIT, m->m_type);
   921 	if (!o) {
   922 		m_freem(m);
   923 		return NULL;    /* ENOBUFS */
   924 	}
   925 	/* get hlen from <n, off> into <o, 0> */
   926 	o->m_len = hlen;
   927 	bcopy(mtod(n, caddr_t) + off, mtod(o, caddr_t), hlen);
   928 	n->m_len -= hlen;
   929 	/* get tlen from <n->m_next, 0> into <o, hlen> */
   930 	m_copydata(n->m_next, 0, tlen, mtod(o, caddr_t) + o->m_len);
   931 	o->m_len += tlen;
   932 	m_adj(n->m_next, tlen);
   933 	o->m_next = n->m_next;
   934 	n->m_next = o;
   935 	n = o;
   936 	off = 0;
   937 ok:
   938 #ifdef PULLDOWN_DEBUG
   939 	{
   940 		struct mbuf *t;
   941 		SCTP_DEBUG_USR(SCTP_DEBUG_USR, "after:");
   942 		for (t = m; t; t = t->m_next)
   943 			SCTP_DEBUG_USR(SCTP_DEBUG_USR, "%c%d", t == n ? '*' : ' ', t->m_len);
   944 		SCTP_DEBUG_USR(SCTP_DEBUG_USR, " (off=%d)\n", off);
   945 	}
   946 #endif
   947 	if (offp)
   948 		*offp = off;
   949 	return n;
   950 }
   952 /*
   953  * Attach the the cluster from *m to *n, set up m_ext in *n
   954  * and bump the refcount of the cluster.
   955  */
   956 static void
   957 mb_dupcl(struct mbuf *n, struct mbuf *m)
   958 {
   959 	KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
   960 	KASSERT(m->m_ext.ref_cnt != NULL, ("%s: ref_cnt not set", __func__));
   961 	KASSERT((n->m_flags & M_EXT) == 0, ("%s: M_EXT set", __func__));
   963 	if (*(m->m_ext.ref_cnt) == 1)
   964 		*(m->m_ext.ref_cnt) += 1;
   965 	else
   966 		atomic_add_int(m->m_ext.ref_cnt, 1);
   967 	n->m_ext.ext_buf = m->m_ext.ext_buf;
   968 	n->m_ext.ext_free = m->m_ext.ext_free;
   969 	n->m_ext.ext_args = m->m_ext.ext_args;
   970 	n->m_ext.ext_size = m->m_ext.ext_size;
   971 	n->m_ext.ref_cnt = m->m_ext.ref_cnt;
   972 	n->m_ext.ext_type = m->m_ext.ext_type;
   973 	n->m_flags |= M_EXT;
   974 }
   977 /*
   978  * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
   979  * continuing for "len" bytes.  If len is M_COPYALL, copy to end of mbuf.
   980  * The wait parameter is a choice of M_TRYWAIT/M_NOWAIT from caller.
   981  * Note that the copy is read-only, because clusters are not copied,
   982  * only their reference counts are incremented.
   983  */
   985 struct mbuf *
   986 m_copym(struct mbuf *m, int off0, int len, int wait)
   987 {
   988 	struct mbuf *n, **np;
   989 	int off = off0;
   990 	struct mbuf *top;
   991 	int copyhdr = 0;
   993 	KASSERT(off >= 0, ("m_copym, negative off %d", off));
   994 	KASSERT(len >= 0, ("m_copym, negative len %d", len));
   996 	if (off == 0 && m->m_flags & M_PKTHDR)
   997 		copyhdr = 1;
   998 	while (off > 0) {
   999 		KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
  1000 		if (off < m->m_len)
  1001 			break;
  1002 		off -= m->m_len;
  1003 		m = m->m_next;
  1005 	np = &top;
  1006 	top = 0;
  1007 	while (len > 0) {
  1008 		if (m == NULL) {
  1009 			KASSERT(len == M_COPYALL, ("m_copym, length > size of mbuf chain"));
  1010 			break;
  1012 		if (copyhdr)
  1013 			MGETHDR(n, wait, m->m_type);
  1014 		else
  1015 			MGET(n, wait, m->m_type);
  1016 		*np = n;
  1017 		if (n == NULL)
  1018 			goto nospace;
  1019 		if (copyhdr) {
  1020 			if (!m_dup_pkthdr(n, m, wait))
  1021 				goto nospace;
  1022 			if (len == M_COPYALL)
  1023 				n->m_pkthdr.len -= off0;
  1024 			else
  1025 				n->m_pkthdr.len = len;
  1026 			copyhdr = 0;
  1028 		n->m_len = min(len, m->m_len - off);
  1029 		if (m->m_flags & M_EXT) {
  1030 			n->m_data = m->m_data + off;
  1031 			mb_dupcl(n, m);
  1032 		} else
  1033 			bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
  1034 			    (u_int)n->m_len);
  1035 		if (len != M_COPYALL)
  1036 			len -= n->m_len;
  1037 		off = 0;
  1038 		m = m->m_next;
  1039 		np = &n->m_next;
  1041 	if (top == NULL)
  1042             mbstat.m_mcfail++;	/* XXX: No consistency. */
  1044 	return (top);
  1045 nospace:
  1046 	m_freem(top);
  1047 	mbstat.m_mcfail++;	/* XXX: No consistency. */
  1048 	return (NULL);
  1052 int
  1053 m_tag_copy_chain(struct mbuf *to, struct mbuf *from, int how)
  1055 	struct m_tag *p, *t, *tprev = NULL;
  1057 	KASSERT(to && from, ("m_tag_copy_chain: null argument, to %p from %p", (void *)to, (void *)from));
  1058 	m_tag_delete_chain(to, NULL);
  1059 	SLIST_FOREACH(p, &from->m_pkthdr.tags, m_tag_link) {
  1060 		t = m_tag_copy(p, how);
  1061 		if (t == NULL) {
  1062 			m_tag_delete_chain(to, NULL);
  1063 			return 0;
  1065 		if (tprev == NULL)
  1066 			SLIST_INSERT_HEAD(&to->m_pkthdr.tags, t, m_tag_link);
  1067 		else
  1068 			SLIST_INSERT_AFTER(tprev, t, m_tag_link);
  1069 		tprev = t;
  1071 	return 1;
  1074 /*
  1075  * Duplicate "from"'s mbuf pkthdr in "to".
  1076  * "from" must have M_PKTHDR set, and "to" must be empty.
  1077  * In particular, this does a deep copy of the packet tags.
  1078  */
  1079 int
  1080 m_dup_pkthdr(struct mbuf *to, struct mbuf *from, int how)
  1083 	to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
  1084 	if ((to->m_flags & M_EXT) == 0)
  1085 		to->m_data = to->m_pktdat;
  1086 	to->m_pkthdr = from->m_pkthdr;
  1087 	SLIST_INIT(&to->m_pkthdr.tags);
  1088 	return (m_tag_copy_chain(to, from, MBTOM(how)));
  1091 /* Copy a single tag. */
  1092 struct m_tag *
  1093 m_tag_copy(struct m_tag *t, int how)
  1095 	struct m_tag *p;
  1097 	KASSERT(t, ("m_tag_copy: null tag"));
  1098 	p = m_tag_alloc(t->m_tag_cookie, t->m_tag_id, t->m_tag_len, how);
  1099 	if (p == NULL)
  1100 		return (NULL);
  1101 	bcopy(t + 1, p + 1, t->m_tag_len); /* Copy the data */
  1102 	return p;
  1105 /* Get a packet tag structure along with specified data following. */
  1106 struct m_tag *
  1107 m_tag_alloc(u_int32_t cookie, int type, int len, int wait)
  1109 	struct m_tag *t;
  1111 	if (len < 0)
  1112 		return NULL;
  1113 	t = malloc(len + sizeof(struct m_tag));
  1114 	if (t == NULL)
  1115 		return NULL;
  1116 	m_tag_setup(t, cookie, type, len);
  1117 	t->m_tag_free = m_tag_free_default;
  1118 	return t;
  1121 /* Free a packet tag. */
  1122 void
  1123 m_tag_free_default(struct m_tag *t)
  1125   free(t);
  1128 /*
  1129  * Copy data from a buffer back into the indicated mbuf chain,
  1130  * starting "off" bytes from the beginning, extending the mbuf
  1131  * chain if necessary.
  1132  */
  1133 void
  1134 m_copyback(struct mbuf *m0, int off, int len, caddr_t cp)
  1136 	int mlen;
  1137 	struct mbuf *m = m0, *n;
  1138 	int totlen = 0;
  1140 	if (m0 == NULL)
  1141 		return;
  1142 	while (off > (mlen = m->m_len)) {
  1143 		off -= mlen;
  1144 		totlen += mlen;
  1145 		if (m->m_next == NULL) {
  1146 			n = m_get(M_NOWAIT, m->m_type);
  1147 			if (n == NULL)
  1148 				goto out;
  1149 			bzero(mtod(n, caddr_t), MLEN);
  1150 			n->m_len = min(MLEN, len + off);
  1151 			m->m_next = n;
  1153 		m = m->m_next;
  1155 	while (len > 0) {
  1156 		mlen = min (m->m_len - off, len);
  1157 		bcopy(cp, off + mtod(m, caddr_t), (u_int)mlen);
  1158 		cp += mlen;
  1159 		len -= mlen;
  1160 		mlen += off;
  1161 		off = 0;
  1162 		totlen += mlen;
  1163 		if (len == 0)
  1164 			break;
  1165 		if (m->m_next == NULL) {
  1166 			n = m_get(M_NOWAIT, m->m_type);
  1167 			if (n == NULL)
  1168 				break;
  1169 			n->m_len = min(MLEN, len);
  1170 			m->m_next = n;
  1172 		m = m->m_next;
  1174 out:	if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
  1175 		m->m_pkthdr.len = totlen;
  1179 /*
  1180  * Lesser-used path for M_PREPEND:
  1181  * allocate new mbuf to prepend to chain,
  1182  * copy junk along.
  1183  */
  1184 struct mbuf *
  1185 m_prepend(struct mbuf *m, int len, int how)
  1187 	struct mbuf *mn;
  1189 	if (m->m_flags & M_PKTHDR)
  1190 		MGETHDR(mn, how, m->m_type);
  1191 	else
  1192 		MGET(mn, how, m->m_type);
  1193 	if (mn == NULL) {
  1194 		m_freem(m);
  1195 		return (NULL);
  1197 	if (m->m_flags & M_PKTHDR)
  1198 		M_MOVE_PKTHDR(mn, m);
  1199 	mn->m_next = m;
  1200 	m = mn;
  1201 	if(m->m_flags & M_PKTHDR) {
  1202 		if (len < MHLEN)
  1203 			MH_ALIGN(m, len);
  1204 	} else {
  1205 		if (len < MLEN)
  1206 			M_ALIGN(m, len);
  1208 	m->m_len = len;
  1209 	return (m);
  1212 /*
  1213  * Copy data from an mbuf chain starting "off" bytes from the beginning,
  1214  * continuing for "len" bytes, into the indicated buffer.
  1215  */
  1216 void
  1217 m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
  1219 	u_int count;
  1221 	KASSERT(off >= 0, ("m_copydata, negative off %d", off));
  1222 	KASSERT(len >= 0, ("m_copydata, negative len %d", len));
  1223 	while (off > 0) {
  1224 		KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
  1225 		if (off < m->m_len)
  1226 			break;
  1227 		off -= m->m_len;
  1228 		m = m->m_next;
  1230 	while (len > 0) {
  1231 		KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
  1232 		count = min(m->m_len - off, len);
  1233 		bcopy(mtod(m, caddr_t) + off, cp, count);
  1234 		len -= count;
  1235 		cp += count;
  1236 		off = 0;
  1237 		m = m->m_next;
  1242 /*
  1243  * Concatenate mbuf chain n to m.
  1244  * Both chains must be of the same type (e.g. MT_DATA).
  1245  * Any m_pkthdr is not updated.
  1246  */
  1247 void
  1248 m_cat(struct mbuf *m, struct mbuf *n)
  1250 	while (m->m_next)
  1251 		m = m->m_next;
  1252 	while (n) {
  1253 		if (m->m_flags & M_EXT ||
  1254 		    m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
  1255 			/* just join the two chains */
  1256 			m->m_next = n;
  1257 			return;
  1259 		/* splat the data from one into the other */
  1260 		bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, (u_int)n->m_len);
  1261 		m->m_len += n->m_len;
  1262 		n = m_free(n);
  1267 void
  1268 m_adj(struct mbuf *mp, int req_len)
  1270 	int len = req_len;
  1271 	struct mbuf *m;
  1272 	int count;
  1274 	if ((m = mp) == NULL)
  1275 		return;
  1276 	if (len >= 0) {
  1277 		/*
  1278 		 * Trim from head.
  1279 		 */
  1280 		while (m != NULL && len > 0) {
  1281 			if (m->m_len <= len) {
  1282 				len -= m->m_len;
  1283 				m->m_len = 0;
  1284 				m = m->m_next;
  1285 			} else {
  1286 				m->m_len -= len;
  1287 				m->m_data += len;
  1288 				len = 0;
  1291 		m = mp;
  1292 		if (mp->m_flags & M_PKTHDR)
  1293 			m->m_pkthdr.len -= (req_len - len);
  1294 	} else {
  1295 		/*
  1296 		 * Trim from tail.  Scan the mbuf chain,
  1297 		 * calculating its length and finding the last mbuf.
  1298 		 * If the adjustment only affects this mbuf, then just
  1299 		 * adjust and return.  Otherwise, rescan and truncate
  1300 		 * after the remaining size.
  1301 		 */
  1302 		len = -len;
  1303 		count = 0;
  1304 		for (;;) {
  1305 			count += m->m_len;
  1306 			if (m->m_next == (struct mbuf *)0)
  1307 				break;
  1308 			m = m->m_next;
  1310 		if (m->m_len >= len) {
  1311 			m->m_len -= len;
  1312 			if (mp->m_flags & M_PKTHDR)
  1313 				mp->m_pkthdr.len -= len;
  1314 			return;
  1316 		count -= len;
  1317 		if (count < 0)
  1318 			count = 0;
  1319 		/*
  1320 		 * Correct length for chain is "count".
  1321 		 * Find the mbuf with last data, adjust its length,
  1322 		 * and toss data from remaining mbufs on chain.
  1323 		 */
  1324 		m = mp;
  1325 		if (m->m_flags & M_PKTHDR)
  1326 			m->m_pkthdr.len = count;
  1327 		for (; m; m = m->m_next) {
  1328 			if (m->m_len >= count) {
  1329 				m->m_len = count;
  1330 				if (m->m_next != NULL) {
  1331 					m_freem(m->m_next);
  1332 					m->m_next = NULL;
  1334 				break;
  1336 			count -= m->m_len;
  1342 /* m_split is used within sctp_handle_cookie_echo. */
  1344 /*
  1345  * Partition an mbuf chain in two pieces, returning the tail --
  1346  * all but the first len0 bytes.  In case of failure, it returns NULL and
  1347  * attempts to restore the chain to its original state.
  1349  * Note that the resulting mbufs might be read-only, because the new
  1350  * mbuf can end up sharing an mbuf cluster with the original mbuf if
  1351  * the "breaking point" happens to lie within a cluster mbuf. Use the
  1352  * M_WRITABLE() macro to check for this case.
  1353  */
  1354 struct mbuf *
  1355 m_split(struct mbuf *m0, int len0, int wait)
  1357 	struct mbuf *m, *n;
  1358 	u_int len = len0, remain;
  1360 	/* MBUF_CHECKSLEEP(wait); */
  1361 	for (m = m0; m && (int)len > m->m_len; m = m->m_next)
  1362 		len -= m->m_len;
  1363 	if (m == NULL)
  1364 		return (NULL);
  1365 	remain = m->m_len - len;
  1366 	if (m0->m_flags & M_PKTHDR) {
  1367 		MGETHDR(n, wait, m0->m_type);
  1368 		if (n == NULL)
  1369 			return (NULL);
  1370 		n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
  1371 		n->m_pkthdr.len = m0->m_pkthdr.len - len0;
  1372 		m0->m_pkthdr.len = len0;
  1373 		if (m->m_flags & M_EXT)
  1374 			goto extpacket;
  1375 		if (remain > MHLEN) {
  1376 			/* m can't be the lead packet */
  1377 			MH_ALIGN(n, 0);
  1378 			n->m_next = m_split(m, len, wait);
  1379 			if (n->m_next == NULL) {
  1380 				(void) m_free(n);
  1381 				return (NULL);
  1382 			} else {
  1383 				n->m_len = 0;
  1384 				return (n);
  1386 		} else
  1387 			MH_ALIGN(n, remain);
  1388 	} else if (remain == 0) {
  1389 		n = m->m_next;
  1390 		m->m_next = NULL;
  1391 		return (n);
  1392 	} else {
  1393 		MGET(n, wait, m->m_type);
  1394 		if (n == NULL)
  1395 			return (NULL);
  1396 		M_ALIGN(n, remain);
  1398 extpacket:
  1399 	if (m->m_flags & M_EXT) {
  1400 		n->m_data = m->m_data + len;
  1401 		mb_dupcl(n, m);
  1402 	} else {
  1403 		bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
  1405 	n->m_len = remain;
  1406 	m->m_len = len;
  1407 	n->m_next = m->m_next;
  1408 	m->m_next = NULL;
  1409 	return (n);
  1415 int
  1416 pack_send_buffer(caddr_t buffer, struct mbuf* mb){
  1418 	int count_to_copy;
  1419 	int total_count_copied = 0;
  1420 	int offset = 0;
  1422 	do {
  1423 		count_to_copy = mb->m_len;
  1424 		bcopy(mtod(mb, caddr_t), buffer+offset, count_to_copy);
  1425 		offset += count_to_copy;
  1426 		total_count_copied += count_to_copy;
  1427 		mb = mb->m_next;
  1428 	} while(mb);
  1430 	return (total_count_copied);

mercurial