1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/netwerk/sctp/src/user_mbuf.c Wed Dec 31 06:09:35 2014 +0100 1.3 @@ -0,0 +1,1431 @@ 1.4 +/*- 1.5 + * Copyright (c) 1982, 1986, 1988, 1993 1.6 + * The Regents of the University of California. 1.7 + * All rights reserved. 1.8 + * 1.9 + * Redistribution and use in source and binary forms, with or without 1.10 + * modification, are permitted provided that the following conditions 1.11 + * are met: 1.12 + * 1. Redistributions of source code must retain the above copyright 1.13 + * notice, this list of conditions and the following disclaimer. 1.14 + * 2. Redistributions in binary form must reproduce the above copyright 1.15 + * notice, this list of conditions and the following disclaimer in the 1.16 + * documentation and/or other materials provided with the distribution. 1.17 + * 3. Neither the name of the University nor the names of its contributors 1.18 + * may be used to endorse or promote products derived from this software 1.19 + * without specific prior written permission. 1.20 + * 1.21 + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 1.22 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 1.23 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 1.24 + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 1.25 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 1.26 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 1.27 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 1.28 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 1.29 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 1.30 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 1.31 + * SUCH DAMAGE. 1.32 + * 1.33 + */ 1.34 + 1.35 +/* 1.36 + * __Userspace__ version of /usr/src/sys/kern/kern_mbuf.c 1.37 + * We are initializing two zones for Mbufs and Clusters. 1.38 + * 1.39 + */ 1.40 + 1.41 +#include <stdio.h> 1.42 +#include <string.h> 1.43 +/* #include <sys/param.h> This defines MSIZE 256 */ 1.44 +#if !defined(SCTP_SIMPLE_ALLOCATOR) 1.45 +#include "umem.h" 1.46 +#endif 1.47 +#include "user_mbuf.h" 1.48 +#include "user_environment.h" 1.49 +#include "user_atomic.h" 1.50 +#include "netinet/sctp_pcb.h" 1.51 + 1.52 +struct mbstat mbstat; 1.53 +#define KIPC_MAX_LINKHDR 4 /* int: max length of link header (see sys/sysclt.h) */ 1.54 +#define KIPC_MAX_PROTOHDR 5 /* int: max length of network header (see sys/sysclt.h)*/ 1.55 +int max_linkhdr = KIPC_MAX_LINKHDR; 1.56 +int max_protohdr = KIPC_MAX_PROTOHDR; /* Size of largest protocol layer header. */ 1.57 + 1.58 +/* 1.59 + * Zones from which we allocate. 1.60 + */ 1.61 +sctp_zone_t zone_mbuf; 1.62 +sctp_zone_t zone_clust; 1.63 +sctp_zone_t zone_ext_refcnt; 1.64 + 1.65 +/* __Userspace__ clust_mb_args will be passed as callback data to mb_ctor_clust 1.66 + * and mb_dtor_clust. 1.67 + * Note: I had to use struct clust_args as an encapsulation for an mbuf pointer. 1.68 + * struct mbuf * clust_mb_args; does not work. 1.69 + */ 1.70 +struct clust_args clust_mb_args; 1.71 + 1.72 + 1.73 +/* __Userspace__ 1.74 + * Local prototypes. 1.75 + */ 1.76 +static int mb_ctor_mbuf(void *, void *, int); 1.77 +static int mb_ctor_clust(void *, void *, int); 1.78 +static void mb_dtor_mbuf(void *, void *); 1.79 +static void mb_dtor_clust(void *, void *); 1.80 + 1.81 + 1.82 +/***************** Functions taken from user_mbuf.h *************/ 1.83 + 1.84 +static int mbuf_constructor_dup(struct mbuf *m, int pkthdr, short type) 1.85 +{ 1.86 + int flags = pkthdr; 1.87 + if (type == MT_NOINIT) 1.88 + return (0); 1.89 + 1.90 + m->m_next = NULL; 1.91 + m->m_nextpkt = NULL; 1.92 + m->m_len = 0; 1.93 + m->m_flags = flags; 1.94 + m->m_type = type; 1.95 + if (flags & M_PKTHDR) { 1.96 + m->m_data = m->m_pktdat; 1.97 + m->m_pkthdr.rcvif = NULL; 1.98 + m->m_pkthdr.len = 0; 1.99 + m->m_pkthdr.header = NULL; 1.100 + m->m_pkthdr.csum_flags = 0; 1.101 + m->m_pkthdr.csum_data = 0; 1.102 + m->m_pkthdr.tso_segsz = 0; 1.103 + m->m_pkthdr.ether_vtag = 0; 1.104 + SLIST_INIT(&m->m_pkthdr.tags); 1.105 + } else 1.106 + m->m_data = m->m_dat; 1.107 + 1.108 + return (0); 1.109 +} 1.110 + 1.111 +/* __Userspace__ */ 1.112 +struct mbuf * 1.113 +m_get(int how, short type) 1.114 +{ 1.115 + struct mbuf *mret; 1.116 +#if defined(SCTP_SIMPLE_ALLOCATOR) 1.117 + struct mb_args mbuf_mb_args; 1.118 + 1.119 + /* The following setter function is not yet being enclosed within 1.120 + * #if USING_MBUF_CONSTRUCTOR - #endif, until I have thoroughly tested 1.121 + * mb_dtor_mbuf. See comment there 1.122 + */ 1.123 + mbuf_mb_args.flags = 0; 1.124 + mbuf_mb_args.type = type; 1.125 +#endif 1.126 + /* Mbuf master zone, zone_mbuf, has already been 1.127 + * created in mbuf_init() */ 1.128 + mret = SCTP_ZONE_GET(zone_mbuf, struct mbuf); 1.129 +#if defined(SCTP_SIMPLE_ALLOCATOR) 1.130 + mb_ctor_mbuf(mret, &mbuf_mb_args, 0); 1.131 +#endif 1.132 + /*mret = ((struct mbuf *)umem_cache_alloc(zone_mbuf, UMEM_DEFAULT));*/ 1.133 + 1.134 + /* There are cases when an object available in the current CPU's 1.135 + * loaded magazine and in those cases the object's constructor is not applied. 1.136 + * If that is the case, then we are duplicating constructor initialization here, 1.137 + * so that the mbuf is properly constructed before returning it. 1.138 + */ 1.139 + if (mret) { 1.140 +#if USING_MBUF_CONSTRUCTOR 1.141 + if (! (mret->m_type == type) ) { 1.142 + mbuf_constructor_dup(mret, 0, type); 1.143 + } 1.144 +#else 1.145 + mbuf_constructor_dup(mret, 0, type); 1.146 +#endif 1.147 + 1.148 + } 1.149 + return mret; 1.150 +} 1.151 + 1.152 + 1.153 +/* __Userspace__ */ 1.154 +struct mbuf * 1.155 +m_gethdr(int how, short type) 1.156 +{ 1.157 + struct mbuf *mret; 1.158 +#if defined(SCTP_SIMPLE_ALLOCATOR) 1.159 + struct mb_args mbuf_mb_args; 1.160 + 1.161 + /* The following setter function is not yet being enclosed within 1.162 + * #if USING_MBUF_CONSTRUCTOR - #endif, until I have thoroughly tested 1.163 + * mb_dtor_mbuf. See comment there 1.164 + */ 1.165 + mbuf_mb_args.flags = M_PKTHDR; 1.166 + mbuf_mb_args.type = type; 1.167 +#endif 1.168 + mret = SCTP_ZONE_GET(zone_mbuf, struct mbuf); 1.169 +#if defined(SCTP_SIMPLE_ALLOCATOR) 1.170 + mb_ctor_mbuf(mret, &mbuf_mb_args, 0); 1.171 +#endif 1.172 + /*mret = ((struct mbuf *)umem_cache_alloc(zone_mbuf, UMEM_DEFAULT));*/ 1.173 + /* There are cases when an object available in the current CPU's 1.174 + * loaded magazine and in those cases the object's constructor is not applied. 1.175 + * If that is the case, then we are duplicating constructor initialization here, 1.176 + * so that the mbuf is properly constructed before returning it. 1.177 + */ 1.178 + if (mret) { 1.179 +#if USING_MBUF_CONSTRUCTOR 1.180 + if (! ((mret->m_flags & M_PKTHDR) && (mret->m_type == type)) ) { 1.181 + mbuf_constructor_dup(mret, M_PKTHDR, type); 1.182 + } 1.183 +#else 1.184 + mbuf_constructor_dup(mret, M_PKTHDR, type); 1.185 +#endif 1.186 + } 1.187 + return mret; 1.188 +} 1.189 + 1.190 +/* __Userspace__ */ 1.191 +struct mbuf * 1.192 +m_free(struct mbuf *m) 1.193 +{ 1.194 + 1.195 + struct mbuf *n = m->m_next; 1.196 + 1.197 + if (m->m_flags & M_EXT) 1.198 + mb_free_ext(m); 1.199 + else if ((m->m_flags & M_NOFREE) == 0) { 1.200 +#if defined(SCTP_SIMPLE_ALLOCATOR) 1.201 + mb_dtor_mbuf(m, NULL); 1.202 +#endif 1.203 + SCTP_ZONE_FREE(zone_mbuf, m); 1.204 + } 1.205 + /*umem_cache_free(zone_mbuf, m);*/ 1.206 + return (n); 1.207 +} 1.208 + 1.209 + 1.210 +static int clust_constructor_dup(caddr_t m_clust, struct mbuf* m) 1.211 +{ 1.212 + u_int *refcnt; 1.213 + int type, size; 1.214 + 1.215 + /* Assigning cluster of MCLBYTES. TODO: Add jumbo frame functionality */ 1.216 + type = EXT_CLUSTER; 1.217 + size = MCLBYTES; 1.218 + 1.219 + refcnt = SCTP_ZONE_GET(zone_ext_refcnt, u_int); 1.220 + /*refcnt = (u_int *)umem_cache_alloc(zone_ext_refcnt, UMEM_DEFAULT);*/ 1.221 + if (refcnt == NULL) { 1.222 +#if !defined(SCTP_SIMPLE_ALLOCATOR) 1.223 + umem_reap(); 1.224 +#endif 1.225 + refcnt = SCTP_ZONE_GET(zone_ext_refcnt, u_int); 1.226 + /*refcnt = (u_int *)umem_cache_alloc(zone_ext_refcnt, UMEM_DEFAULT);*/ 1.227 + } 1.228 + *refcnt = 1; 1.229 + if (m != NULL) { 1.230 + m->m_ext.ext_buf = (caddr_t)m_clust; 1.231 + m->m_data = m->m_ext.ext_buf; 1.232 + m->m_flags |= M_EXT; 1.233 + m->m_ext.ext_free = NULL; 1.234 + m->m_ext.ext_args = NULL; 1.235 + m->m_ext.ext_size = size; 1.236 + m->m_ext.ext_type = type; 1.237 + m->m_ext.ref_cnt = refcnt; 1.238 + } 1.239 + 1.240 + return (0); 1.241 +} 1.242 + 1.243 + 1.244 + 1.245 +/* __Userspace__ */ 1.246 +void 1.247 +m_clget(struct mbuf *m, int how) 1.248 +{ 1.249 + caddr_t mclust_ret; 1.250 +#if defined(SCTP_SIMPLE_ALLOCATOR) 1.251 + struct clust_args clust_mb_args; 1.252 +#endif 1.253 + if (m->m_flags & M_EXT) { 1.254 + SCTPDBG(SCTP_DEBUG_USR, "%s: %p mbuf already has cluster\n", __func__, (void *)m); 1.255 + } 1.256 + m->m_ext.ext_buf = (char *)NULL; 1.257 +#if defined(SCTP_SIMPLE_ALLOCATOR) 1.258 + clust_mb_args.parent_mbuf = m; 1.259 +#endif 1.260 + mclust_ret = SCTP_ZONE_GET(zone_clust, char); 1.261 +#if defined(SCTP_SIMPLE_ALLOCATOR) 1.262 + mb_ctor_clust(mclust_ret, &clust_mb_args, 0); 1.263 +#endif 1.264 + /*mclust_ret = umem_cache_alloc(zone_clust, UMEM_DEFAULT);*/ 1.265 + /* 1.266 + On a cluster allocation failure, call umem_reap() and retry. 1.267 + */ 1.268 + 1.269 + if (mclust_ret == NULL) { 1.270 +#if !defined(SCTP_SIMPLE_ALLOCATOR) 1.271 + /* mclust_ret = SCTP_ZONE_GET(zone_clust, char); 1.272 + mb_ctor_clust(mclust_ret, &clust_mb_args, 0); 1.273 +#else*/ 1.274 + umem_reap(); 1.275 + mclust_ret = SCTP_ZONE_GET(zone_clust, char); 1.276 +#endif 1.277 + /*mclust_ret = umem_cache_alloc(zone_clust, UMEM_DEFAULT);*/ 1.278 + if (NULL == mclust_ret) { 1.279 + SCTPDBG(SCTP_DEBUG_USR, "Memory allocation failure in %s\n", __func__); 1.280 + } 1.281 + } 1.282 + 1.283 +#if USING_MBUF_CONSTRUCTOR 1.284 + if ((m->m_ext.ext_buf == NULL)) { 1.285 + clust_constructor_dup(mclust_ret, m); 1.286 + } 1.287 +#else 1.288 + clust_constructor_dup(mclust_ret, m); 1.289 +#endif 1.290 +} 1.291 + 1.292 +/* 1.293 + * Unlink a tag from the list of tags associated with an mbuf. 1.294 + */ 1.295 +static __inline void 1.296 +m_tag_unlink(struct mbuf *m, struct m_tag *t) 1.297 +{ 1.298 + 1.299 + SLIST_REMOVE(&m->m_pkthdr.tags, t, m_tag, m_tag_link); 1.300 +} 1.301 + 1.302 +/* 1.303 + * Reclaim resources associated with a tag. 1.304 + */ 1.305 +static __inline void 1.306 +m_tag_free(struct m_tag *t) 1.307 +{ 1.308 + 1.309 + (*t->m_tag_free)(t); 1.310 +} 1.311 + 1.312 +/* 1.313 + * Set up the contents of a tag. Note that this does not fill in the free 1.314 + * method; the caller is expected to do that. 1.315 + * 1.316 + * XXX probably should be called m_tag_init, but that was already taken. 1.317 + */ 1.318 +static __inline void 1.319 +m_tag_setup(struct m_tag *t, u_int32_t cookie, int type, int len) 1.320 +{ 1.321 + 1.322 + t->m_tag_id = type; 1.323 + t->m_tag_len = len; 1.324 + t->m_tag_cookie = cookie; 1.325 +} 1.326 + 1.327 +/************ End functions from user_mbuf.h ******************/ 1.328 + 1.329 + 1.330 + 1.331 +/************ End functions to substitute umem_cache_alloc and umem_cache_free **************/ 1.332 + 1.333 +/* __Userspace__ 1.334 + * TODO: mbuf_init must be called in the initialization routines 1.335 + * of userspace stack. 1.336 + */ 1.337 +void 1.338 +mbuf_init(void *dummy) 1.339 +{ 1.340 + 1.341 + /* 1.342 + * __Userspace__Configure UMA zones for Mbufs and Clusters. 1.343 + * (TODO: m_getcl() - using packet secondary zone). 1.344 + * There is no provision for trash_init and trash_fini in umem. 1.345 + * 1.346 + */ 1.347 + /* zone_mbuf = umem_cache_create(MBUF_MEM_NAME, MSIZE, 0, 1.348 + mb_ctor_mbuf, mb_dtor_mbuf, NULL, 1.349 + &mbuf_mb_args, 1.350 + NULL, 0); 1.351 + zone_mbuf = umem_cache_create(MBUF_MEM_NAME, MSIZE, 0, NULL, NULL, NULL, NULL, NULL, 0);*/ 1.352 +#if defined(SCTP_SIMPLE_ALLOCATOR) 1.353 + SCTP_ZONE_INIT(zone_mbuf, MBUF_MEM_NAME, MSIZE, 0); 1.354 +#else 1.355 + zone_mbuf = umem_cache_create(MBUF_MEM_NAME, MSIZE, 0, 1.356 + mb_ctor_mbuf, mb_dtor_mbuf, NULL, 1.357 + NUULL, 1.358 + NULL, 0); 1.359 +#endif 1.360 + /*zone_ext_refcnt = umem_cache_create(MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int), 0, 1.361 + NULL, NULL, NULL, 1.362 + NULL, 1.363 + NULL, 0);*/ 1.364 + SCTP_ZONE_INIT(zone_ext_refcnt, MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int), 0); 1.365 + 1.366 + /*zone_clust = umem_cache_create(MBUF_CLUSTER_MEM_NAME, MCLBYTES, 0, 1.367 + mb_ctor_clust, mb_dtor_clust, NULL, 1.368 + &clust_mb_args, 1.369 + NULL, 0); 1.370 + zone_clust = umem_cache_create(MBUF_CLUSTER_MEM_NAME, MCLBYTES, 0, NULL, NULL, NULL, NULL, NULL,0);*/ 1.371 +#if defined(SCTP_SIMPLE_ALLOCATOR) 1.372 + SCTP_ZONE_INIT(zone_clust, MBUF_CLUSTER_MEM_NAME, MCLBYTES, 0); 1.373 +#else 1.374 + zone_clust = umem_cache_create(MBUF_CLUSTER_MEM_NAME, MCLBYTES, 0, 1.375 + mb_ctor_clust, mb_dtor_clust, NULL, 1.376 + &clust_mb_args, 1.377 + NULL, 0); 1.378 +#endif 1.379 + 1.380 + /* uma_prealloc() goes here... */ 1.381 + 1.382 + /* __Userspace__ Add umem_reap here for low memory situation? 1.383 + * 1.384 + */ 1.385 + 1.386 + 1.387 + /* 1.388 + * [Re]set counters and local statistics knobs. 1.389 + * 1.390 + */ 1.391 + 1.392 + mbstat.m_mbufs = 0; 1.393 + mbstat.m_mclusts = 0; 1.394 + mbstat.m_drain = 0; 1.395 + mbstat.m_msize = MSIZE; 1.396 + mbstat.m_mclbytes = MCLBYTES; 1.397 + mbstat.m_minclsize = MINCLSIZE; 1.398 + mbstat.m_mlen = MLEN; 1.399 + mbstat.m_mhlen = MHLEN; 1.400 + mbstat.m_numtypes = MT_NTYPES; 1.401 + 1.402 + mbstat.m_mcfail = mbstat.m_mpfail = 0; 1.403 + mbstat.sf_iocnt = 0; 1.404 + mbstat.sf_allocwait = mbstat.sf_allocfail = 0; 1.405 + 1.406 +} 1.407 + 1.408 + 1.409 + 1.410 +/* 1.411 + * __Userspace__ 1.412 + * 1.413 + * Constructor for Mbuf master zone. We have a different constructor 1.414 + * for allocating the cluster. 1.415 + * 1.416 + * The 'arg' pointer points to a mb_args structure which 1.417 + * contains call-specific information required to support the 1.418 + * mbuf allocation API. See user_mbuf.h. 1.419 + * 1.420 + * The flgs parameter below can be UMEM_DEFAULT or UMEM_NOFAIL depending on what 1.421 + * was passed when umem_cache_alloc was called. 1.422 + * TODO: Use UMEM_NOFAIL in umem_cache_alloc and also define a failure handler 1.423 + * and call umem_nofail_callback(my_failure_handler) in the stack initialization routines 1.424 + * The advantage of using UMEM_NOFAIL is that we don't have to check if umem_cache_alloc 1.425 + * was successful or not. The failure handler would take care of it, if we use the UMEM_NOFAIL 1.426 + * flag. 1.427 + * 1.428 + * NOTE Ref: http://docs.sun.com/app/docs/doc/819-2243/6n4i099p2?l=en&a=view&q=umem_zalloc) 1.429 + * The umem_nofail_callback() function sets the **process-wide** UMEM_NOFAIL callback. 1.430 + * It also mentions that umem_nofail_callback is Evolving. 1.431 + * 1.432 + */ 1.433 +static int 1.434 +mb_ctor_mbuf(void *mem, void *arg, int flgs) 1.435 +{ 1.436 +#if USING_MBUF_CONSTRUCTOR 1.437 + struct mbuf *m; 1.438 + struct mb_args *args; 1.439 + 1.440 + int flags; 1.441 + short type; 1.442 + 1.443 + m = (struct mbuf *)mem; 1.444 + args = (struct mb_args *)arg; 1.445 + flags = args->flags; 1.446 + type = args->type; 1.447 + 1.448 + /* 1.449 + * The mbuf is initialized later. 1.450 + * 1.451 + */ 1.452 + if (type == MT_NOINIT) 1.453 + return (0); 1.454 + 1.455 + m->m_next = NULL; 1.456 + m->m_nextpkt = NULL; 1.457 + m->m_len = 0; 1.458 + m->m_flags = flags; 1.459 + m->m_type = type; 1.460 + if (flags & M_PKTHDR) { 1.461 + m->m_data = m->m_pktdat; 1.462 + m->m_pkthdr.rcvif = NULL; 1.463 + m->m_pkthdr.len = 0; 1.464 + m->m_pkthdr.header = NULL; 1.465 + m->m_pkthdr.csum_flags = 0; 1.466 + m->m_pkthdr.csum_data = 0; 1.467 + m->m_pkthdr.tso_segsz = 0; 1.468 + m->m_pkthdr.ether_vtag = 0; 1.469 + SLIST_INIT(&m->m_pkthdr.tags); 1.470 + } else 1.471 + m->m_data = m->m_dat; 1.472 +#endif 1.473 + return (0); 1.474 +} 1.475 + 1.476 + 1.477 +/* 1.478 + * __Userspace__ 1.479 + * The Mbuf master zone destructor. 1.480 + * This would be called in response to umem_cache_destroy 1.481 + * TODO: Recheck if this is what we want to do in this destructor. 1.482 + * (Note: the number of times mb_dtor_mbuf is called is equal to the 1.483 + * number of individual mbufs allocated from zone_mbuf. 1.484 + */ 1.485 +static void 1.486 +mb_dtor_mbuf(void *mem, void *arg) 1.487 +{ 1.488 + struct mbuf *m; 1.489 + 1.490 + m = (struct mbuf *)mem; 1.491 + if ((m->m_flags & M_PKTHDR) != 0) { 1.492 + m_tag_delete_chain(m, NULL); 1.493 + } 1.494 +} 1.495 + 1.496 + 1.497 +/* __Userspace__ 1.498 + * The Cluster zone constructor. 1.499 + * 1.500 + * Here the 'arg' pointer points to the Mbuf which we 1.501 + * are configuring cluster storage for. If 'arg' is 1.502 + * empty we allocate just the cluster without setting 1.503 + * the mbuf to it. See mbuf.h. 1.504 + */ 1.505 +static int 1.506 +mb_ctor_clust(void *mem, void *arg, int flgs) 1.507 +{ 1.508 + 1.509 +#if USING_MBUF_CONSTRUCTOR 1.510 + struct mbuf *m; 1.511 + struct clust_args * cla; 1.512 + u_int *refcnt; 1.513 + int type, size; 1.514 + sctp_zone_t zone; 1.515 + 1.516 + /* Assigning cluster of MCLBYTES. TODO: Add jumbo frame functionality */ 1.517 + type = EXT_CLUSTER; 1.518 + zone = zone_clust; 1.519 + size = MCLBYTES; 1.520 + 1.521 + cla = (struct clust_args *)arg; 1.522 + m = cla->parent_mbuf; 1.523 + 1.524 + refcnt = SCTP_ZONE_GET(zone_ext_refcnt, u_int); 1.525 + /*refcnt = (u_int *)umem_cache_alloc(zone_ext_refcnt, UMEM_DEFAULT);*/ 1.526 + *refcnt = 1; 1.527 + 1.528 + if (m != NULL) { 1.529 + m->m_ext.ext_buf = (caddr_t)mem; 1.530 + m->m_data = m->m_ext.ext_buf; 1.531 + m->m_flags |= M_EXT; 1.532 + m->m_ext.ext_free = NULL; 1.533 + m->m_ext.ext_args = NULL; 1.534 + m->m_ext.ext_size = size; 1.535 + m->m_ext.ext_type = type; 1.536 + m->m_ext.ref_cnt = refcnt; 1.537 + } 1.538 +#endif 1.539 + return (0); 1.540 +} 1.541 + 1.542 +/* __Userspace__ */ 1.543 +static void 1.544 +mb_dtor_clust(void *mem, void *arg) 1.545 +{ 1.546 + 1.547 + /* mem is of type caddr_t. In sys/types.h we have typedef char * caddr_t; */ 1.548 + /* mb_dtor_clust is called at time of umem_cache_destroy() (the number of times 1.549 + * mb_dtor_clust is called is equal to the number of individual mbufs allocated 1.550 + * from zone_clust. Similarly for mb_dtor_mbuf). 1.551 + * At this point the following: 1.552 + * struct mbuf *m; 1.553 + * m = (struct mbuf *)arg; 1.554 + * assert (*(m->m_ext.ref_cnt) == 0); is not meaningful since m->m_ext.ref_cnt = NULL; 1.555 + * has been done in mb_free_ext(). 1.556 + */ 1.557 + 1.558 +} 1.559 + 1.560 + 1.561 + 1.562 + 1.563 +/* Unlink and free a packet tag. */ 1.564 +void 1.565 +m_tag_delete(struct mbuf *m, struct m_tag *t) 1.566 +{ 1.567 + KASSERT(m && t, ("m_tag_delete: null argument, m %p t %p", (void *)m, (void *)t)); 1.568 + m_tag_unlink(m, t); 1.569 + m_tag_free(t); 1.570 +} 1.571 + 1.572 + 1.573 +/* Unlink and free a packet tag chain, starting from given tag. */ 1.574 +void 1.575 +m_tag_delete_chain(struct mbuf *m, struct m_tag *t) 1.576 +{ 1.577 + 1.578 + struct m_tag *p, *q; 1.579 + 1.580 + KASSERT(m, ("m_tag_delete_chain: null mbuf")); 1.581 + if (t != NULL) 1.582 + p = t; 1.583 + else 1.584 + p = SLIST_FIRST(&m->m_pkthdr.tags); 1.585 + if (p == NULL) 1.586 + return; 1.587 + while ((q = SLIST_NEXT(p, m_tag_link)) != NULL) 1.588 + m_tag_delete(m, q); 1.589 + m_tag_delete(m, p); 1.590 +} 1.591 + 1.592 +#if 0 1.593 +static void 1.594 +sctp_print_mbuf_chain(struct mbuf *m) 1.595 +{ 1.596 + SCTP_DEBUG_USR(SCTP_DEBUG_USR, "Printing mbuf chain %p.\n", (void *)m); 1.597 + for(; m; m=m->m_next) { 1.598 + SCTP_DEBUG_USR(SCTP_DEBUG_USR, "%p: m_len = %ld, m_type = %x, m_next = %p.\n", (void *)m, m->m_len, m->m_type, (void *)m->m_next); 1.599 + if (m->m_flags & M_EXT) 1.600 + SCTP_DEBUG_USR(SCTP_DEBUG_USR, "%p: extend_size = %d, extend_buffer = %p, ref_cnt = %d.\n", (void *)m, m->m_ext.ext_size, (void *)m->m_ext.ext_buf, *(m->m_ext.ref_cnt)); 1.601 + } 1.602 +} 1.603 +#endif 1.604 + 1.605 +/* 1.606 + * Free an entire chain of mbufs and associated external buffers, if 1.607 + * applicable. 1.608 + */ 1.609 +void 1.610 +m_freem(struct mbuf *mb) 1.611 +{ 1.612 + while (mb != NULL) 1.613 + mb = m_free(mb); 1.614 +} 1.615 + 1.616 +/* 1.617 + * __Userspace__ 1.618 + * clean mbufs with M_EXT storage attached to them 1.619 + * if the reference count hits 1. 1.620 + */ 1.621 +void 1.622 +mb_free_ext(struct mbuf *m) 1.623 +{ 1.624 + 1.625 + int skipmbuf; 1.626 + 1.627 + KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__)); 1.628 + KASSERT(m->m_ext.ref_cnt != NULL, ("%s: ref_cnt not set", __func__)); 1.629 + 1.630 + /* 1.631 + * check if the header is embedded in the cluster 1.632 + */ 1.633 + skipmbuf = (m->m_flags & M_NOFREE); 1.634 + 1.635 + /* Free the external attached storage if this 1.636 + * mbuf is the only reference to it. 1.637 + *__Userspace__ TODO: jumbo frames 1.638 + * 1.639 + */ 1.640 + /* NOTE: We had the same code that SCTP_DECREMENT_AND_CHECK_REFCOUNT 1.641 + reduces to here before but the IPHONE malloc commit had changed 1.642 + this to compare to 0 instead of 1 (see next line). Why? 1.643 + . .. this caused a huge memory leak in Linux. 1.644 + */ 1.645 +#ifdef IPHONE 1.646 + if (atomic_fetchadd_int(m->m_ext.ref_cnt, -1) == 0) 1.647 +#else 1.648 + if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(m->m_ext.ref_cnt)) 1.649 +#endif 1.650 + { 1.651 + if (m->m_ext.ext_type == EXT_CLUSTER){ 1.652 +#if defined(SCTP_SIMPLE_ALLOCATOR) 1.653 + mb_dtor_clust(m->m_ext.ext_buf, &clust_mb_args); 1.654 +#endif 1.655 + SCTP_ZONE_FREE(zone_clust, m->m_ext.ext_buf); 1.656 + SCTP_ZONE_FREE(zone_ext_refcnt, (u_int*)m->m_ext.ref_cnt); 1.657 + m->m_ext.ref_cnt = NULL; 1.658 + } 1.659 + } 1.660 + 1.661 + if (skipmbuf) 1.662 + return; 1.663 + 1.664 + 1.665 + /* __Userspace__ Also freeing the storage for ref_cnt 1.666 + * Free this mbuf back to the mbuf zone with all m_ext 1.667 + * information purged. 1.668 + */ 1.669 + m->m_ext.ext_buf = NULL; 1.670 + m->m_ext.ext_free = NULL; 1.671 + m->m_ext.ext_args = NULL; 1.672 + m->m_ext.ref_cnt = NULL; 1.673 + m->m_ext.ext_size = 0; 1.674 + m->m_ext.ext_type = 0; 1.675 + m->m_flags &= ~M_EXT; 1.676 +#if defined(SCTP_SIMPLE_ALLOCATOR) 1.677 + mb_dtor_mbuf(m, NULL); 1.678 +#endif 1.679 + SCTP_ZONE_FREE(zone_mbuf, m); 1.680 + 1.681 + /*umem_cache_free(zone_mbuf, m);*/ 1.682 +} 1.683 + 1.684 +/* 1.685 + * "Move" mbuf pkthdr from "from" to "to". 1.686 + * "from" must have M_PKTHDR set, and "to" must be empty. 1.687 + */ 1.688 +void 1.689 +m_move_pkthdr(struct mbuf *to, struct mbuf *from) 1.690 +{ 1.691 + 1.692 + to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT); 1.693 + if ((to->m_flags & M_EXT) == 0) 1.694 + to->m_data = to->m_pktdat; 1.695 + to->m_pkthdr = from->m_pkthdr; /* especially tags */ 1.696 + SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */ 1.697 + from->m_flags &= ~M_PKTHDR; 1.698 +} 1.699 + 1.700 + 1.701 +/* 1.702 + * Rearange an mbuf chain so that len bytes are contiguous 1.703 + * and in the data area of an mbuf (so that mtod and dtom 1.704 + * will work for a structure of size len). Returns the resulting 1.705 + * mbuf chain on success, frees it and returns null on failure. 1.706 + * If there is room, it will add up to max_protohdr-len extra bytes to the 1.707 + * contiguous region in an attempt to avoid being called next time. 1.708 + */ 1.709 +struct mbuf * 1.710 +m_pullup(struct mbuf *n, int len) 1.711 +{ 1.712 + struct mbuf *m; 1.713 + int count; 1.714 + int space; 1.715 + 1.716 + /* 1.717 + * If first mbuf has no cluster, and has room for len bytes 1.718 + * without shifting current data, pullup into it, 1.719 + * otherwise allocate a new mbuf to prepend to the chain. 1.720 + */ 1.721 + if ((n->m_flags & M_EXT) == 0 && 1.722 + n->m_data + len < &n->m_dat[MLEN] && n->m_next) { 1.723 + if (n->m_len >= len) 1.724 + return (n); 1.725 + m = n; 1.726 + n = n->m_next; 1.727 + len -= m->m_len; 1.728 + } else { 1.729 + if (len > MHLEN) 1.730 + goto bad; 1.731 + MGET(m, M_NOWAIT, n->m_type); 1.732 + if (m == NULL) 1.733 + goto bad; 1.734 + m->m_len = 0; 1.735 + if (n->m_flags & M_PKTHDR) 1.736 + M_MOVE_PKTHDR(m, n); 1.737 + } 1.738 + space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 1.739 + do { 1.740 + count = min(min(max(len, max_protohdr), space), n->m_len); 1.741 + bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 1.742 + (u_int)count); 1.743 + len -= count; 1.744 + m->m_len += count; 1.745 + n->m_len -= count; 1.746 + space -= count; 1.747 + if (n->m_len) 1.748 + n->m_data += count; 1.749 + else 1.750 + n = m_free(n); 1.751 + } while (len > 0 && n); 1.752 + if (len > 0) { 1.753 + (void) m_free(m); 1.754 + goto bad; 1.755 + } 1.756 + m->m_next = n; 1.757 + return (m); 1.758 +bad: 1.759 + m_freem(n); 1.760 + mbstat.m_mpfail++; /* XXX: No consistency. */ 1.761 + return (NULL); 1.762 +} 1.763 + 1.764 + 1.765 +static struct mbuf * 1.766 +m_dup1(struct mbuf *m, int off, int len, int wait) 1.767 +{ 1.768 + struct mbuf *n = NULL; 1.769 + int copyhdr; 1.770 + 1.771 + if (len > MCLBYTES) 1.772 + return NULL; 1.773 + if (off == 0 && (m->m_flags & M_PKTHDR) != 0) 1.774 + copyhdr = 1; 1.775 + else 1.776 + copyhdr = 0; 1.777 + if (len >= MINCLSIZE) { 1.778 + if (copyhdr == 1) { 1.779 + m_clget(n, wait); /* TODO: include code for copying the header */ 1.780 + m_dup_pkthdr(n, m, wait); 1.781 + } else 1.782 + m_clget(n, wait); 1.783 + } else { 1.784 + if (copyhdr == 1) 1.785 + n = m_gethdr(wait, m->m_type); 1.786 + else 1.787 + n = m_get(wait, m->m_type); 1.788 + } 1.789 + if (!n) 1.790 + return NULL; /* ENOBUFS */ 1.791 + 1.792 + if (copyhdr && !m_dup_pkthdr(n, m, wait)) { 1.793 + m_free(n); 1.794 + return NULL; 1.795 + } 1.796 + m_copydata(m, off, len, mtod(n, caddr_t)); 1.797 + n->m_len = len; 1.798 + return n; 1.799 +} 1.800 + 1.801 + 1.802 +/* Taken from sys/kern/uipc_mbuf2.c */ 1.803 +struct mbuf * 1.804 +m_pulldown(struct mbuf *m, int off, int len, int *offp) 1.805 +{ 1.806 + struct mbuf *n, *o; 1.807 + int hlen, tlen, olen; 1.808 + int writable; 1.809 + 1.810 + /* check invalid arguments. */ 1.811 + KASSERT(m, ("m == NULL in m_pulldown()")); 1.812 + if (len > MCLBYTES) { 1.813 + m_freem(m); 1.814 + return NULL; /* impossible */ 1.815 + } 1.816 + 1.817 +#ifdef PULLDOWN_DEBUG 1.818 + { 1.819 + struct mbuf *t; 1.820 + SCTP_DEBUG_USR(SCTP_DEBUG_USR, "before:"); 1.821 + for (t = m; t; t = t->m_next) 1.822 + SCTP_DEBUG_USR(SCTP_DEBUG_USR, " %d", t->m_len); 1.823 + SCTP_DEBUG_USR(SCTP_DEBUG_USR, "\n"); 1.824 + } 1.825 +#endif 1.826 + n = m; 1.827 + while (n != NULL && off > 0) { 1.828 + if (n->m_len > off) 1.829 + break; 1.830 + off -= n->m_len; 1.831 + n = n->m_next; 1.832 + } 1.833 + /* be sure to point non-empty mbuf */ 1.834 + while (n != NULL && n->m_len == 0) 1.835 + n = n->m_next; 1.836 + if (!n) { 1.837 + m_freem(m); 1.838 + return NULL; /* mbuf chain too short */ 1.839 + } 1.840 + 1.841 + writable = 0; 1.842 + if ((n->m_flags & M_EXT) == 0 || 1.843 + (n->m_ext.ext_type == EXT_CLUSTER && M_WRITABLE(n))) 1.844 + writable = 1; 1.845 + 1.846 + /* 1.847 + * the target data is on <n, off>. 1.848 + * if we got enough data on the mbuf "n", we're done. 1.849 + */ 1.850 + if ((off == 0 || offp) && len <= n->m_len - off && writable) 1.851 + goto ok; 1.852 + 1.853 + /* 1.854 + * when len <= n->m_len - off and off != 0, it is a special case. 1.855 + * len bytes from <n, off> sits in single mbuf, but the caller does 1.856 + * not like the starting position (off). 1.857 + * chop the current mbuf into two pieces, set off to 0. 1.858 + */ 1.859 + if (len <= n->m_len - off) { 1.860 + o = m_dup1(n, off, n->m_len - off, M_NOWAIT); 1.861 + if (o == NULL) { 1.862 + m_freem(m); 1.863 + return NULL; /* ENOBUFS */ 1.864 + } 1.865 + n->m_len = off; 1.866 + o->m_next = n->m_next; 1.867 + n->m_next = o; 1.868 + n = n->m_next; 1.869 + off = 0; 1.870 + goto ok; 1.871 + } 1.872 + /* 1.873 + * we need to take hlen from <n, off> and tlen from <n->m_next, 0>, 1.874 + * and construct contiguous mbuf with m_len == len. 1.875 + * note that hlen + tlen == len, and tlen > 0. 1.876 + */ 1.877 + hlen = n->m_len - off; 1.878 + tlen = len - hlen; 1.879 + 1.880 + /* 1.881 + * ensure that we have enough trailing data on mbuf chain. 1.882 + * if not, we can do nothing about the chain. 1.883 + */ 1.884 + olen = 0; 1.885 + for (o = n->m_next; o != NULL; o = o->m_next) 1.886 + olen += o->m_len; 1.887 + if (hlen + olen < len) { 1.888 + m_freem(m); 1.889 + return NULL; /* mbuf chain too short */ 1.890 + } 1.891 + 1.892 + /* 1.893 + * easy cases first. 1.894 + * we need to use m_copydata() to get data from <n->m_next, 0>. 1.895 + */ 1.896 + if ((off == 0 || offp) && M_TRAILINGSPACE(n) >= tlen 1.897 + && writable) { 1.898 + m_copydata(n->m_next, 0, tlen, mtod(n, caddr_t) + n->m_len); 1.899 + n->m_len += tlen; 1.900 + m_adj(n->m_next, tlen); 1.901 + goto ok; 1.902 + } 1.903 + 1.904 + if ((off == 0 || offp) && M_LEADINGSPACE(n->m_next) >= hlen 1.905 + && writable) { 1.906 + n->m_next->m_data -= hlen; 1.907 + n->m_next->m_len += hlen; 1.908 + bcopy(mtod(n, caddr_t) + off, mtod(n->m_next, caddr_t), hlen); 1.909 + n->m_len -= hlen; 1.910 + n = n->m_next; 1.911 + off = 0; 1.912 + goto ok; 1.913 + } 1.914 + 1.915 + /* 1.916 + * now, we need to do the hard way. don't m_copy as there's no room 1.917 + * on both end. 1.918 + */ 1.919 + if (len > MLEN) 1.920 + m_clget(o, M_NOWAIT); 1.921 + /* o = m_getcl(M_NOWAIT, m->m_type, 0);*/ 1.922 + else 1.923 + o = m_get(M_NOWAIT, m->m_type); 1.924 + if (!o) { 1.925 + m_freem(m); 1.926 + return NULL; /* ENOBUFS */ 1.927 + } 1.928 + /* get hlen from <n, off> into <o, 0> */ 1.929 + o->m_len = hlen; 1.930 + bcopy(mtod(n, caddr_t) + off, mtod(o, caddr_t), hlen); 1.931 + n->m_len -= hlen; 1.932 + /* get tlen from <n->m_next, 0> into <o, hlen> */ 1.933 + m_copydata(n->m_next, 0, tlen, mtod(o, caddr_t) + o->m_len); 1.934 + o->m_len += tlen; 1.935 + m_adj(n->m_next, tlen); 1.936 + o->m_next = n->m_next; 1.937 + n->m_next = o; 1.938 + n = o; 1.939 + off = 0; 1.940 +ok: 1.941 +#ifdef PULLDOWN_DEBUG 1.942 + { 1.943 + struct mbuf *t; 1.944 + SCTP_DEBUG_USR(SCTP_DEBUG_USR, "after:"); 1.945 + for (t = m; t; t = t->m_next) 1.946 + SCTP_DEBUG_USR(SCTP_DEBUG_USR, "%c%d", t == n ? '*' : ' ', t->m_len); 1.947 + SCTP_DEBUG_USR(SCTP_DEBUG_USR, " (off=%d)\n", off); 1.948 + } 1.949 +#endif 1.950 + if (offp) 1.951 + *offp = off; 1.952 + return n; 1.953 +} 1.954 + 1.955 +/* 1.956 + * Attach the the cluster from *m to *n, set up m_ext in *n 1.957 + * and bump the refcount of the cluster. 1.958 + */ 1.959 +static void 1.960 +mb_dupcl(struct mbuf *n, struct mbuf *m) 1.961 +{ 1.962 + KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__)); 1.963 + KASSERT(m->m_ext.ref_cnt != NULL, ("%s: ref_cnt not set", __func__)); 1.964 + KASSERT((n->m_flags & M_EXT) == 0, ("%s: M_EXT set", __func__)); 1.965 + 1.966 + if (*(m->m_ext.ref_cnt) == 1) 1.967 + *(m->m_ext.ref_cnt) += 1; 1.968 + else 1.969 + atomic_add_int(m->m_ext.ref_cnt, 1); 1.970 + n->m_ext.ext_buf = m->m_ext.ext_buf; 1.971 + n->m_ext.ext_free = m->m_ext.ext_free; 1.972 + n->m_ext.ext_args = m->m_ext.ext_args; 1.973 + n->m_ext.ext_size = m->m_ext.ext_size; 1.974 + n->m_ext.ref_cnt = m->m_ext.ref_cnt; 1.975 + n->m_ext.ext_type = m->m_ext.ext_type; 1.976 + n->m_flags |= M_EXT; 1.977 +} 1.978 + 1.979 + 1.980 +/* 1.981 + * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 1.982 + * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 1.983 + * The wait parameter is a choice of M_TRYWAIT/M_NOWAIT from caller. 1.984 + * Note that the copy is read-only, because clusters are not copied, 1.985 + * only their reference counts are incremented. 1.986 + */ 1.987 + 1.988 +struct mbuf * 1.989 +m_copym(struct mbuf *m, int off0, int len, int wait) 1.990 +{ 1.991 + struct mbuf *n, **np; 1.992 + int off = off0; 1.993 + struct mbuf *top; 1.994 + int copyhdr = 0; 1.995 + 1.996 + KASSERT(off >= 0, ("m_copym, negative off %d", off)); 1.997 + KASSERT(len >= 0, ("m_copym, negative len %d", len)); 1.998 + 1.999 + if (off == 0 && m->m_flags & M_PKTHDR) 1.1000 + copyhdr = 1; 1.1001 + while (off > 0) { 1.1002 + KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain")); 1.1003 + if (off < m->m_len) 1.1004 + break; 1.1005 + off -= m->m_len; 1.1006 + m = m->m_next; 1.1007 + } 1.1008 + np = ⊤ 1.1009 + top = 0; 1.1010 + while (len > 0) { 1.1011 + if (m == NULL) { 1.1012 + KASSERT(len == M_COPYALL, ("m_copym, length > size of mbuf chain")); 1.1013 + break; 1.1014 + } 1.1015 + if (copyhdr) 1.1016 + MGETHDR(n, wait, m->m_type); 1.1017 + else 1.1018 + MGET(n, wait, m->m_type); 1.1019 + *np = n; 1.1020 + if (n == NULL) 1.1021 + goto nospace; 1.1022 + if (copyhdr) { 1.1023 + if (!m_dup_pkthdr(n, m, wait)) 1.1024 + goto nospace; 1.1025 + if (len == M_COPYALL) 1.1026 + n->m_pkthdr.len -= off0; 1.1027 + else 1.1028 + n->m_pkthdr.len = len; 1.1029 + copyhdr = 0; 1.1030 + } 1.1031 + n->m_len = min(len, m->m_len - off); 1.1032 + if (m->m_flags & M_EXT) { 1.1033 + n->m_data = m->m_data + off; 1.1034 + mb_dupcl(n, m); 1.1035 + } else 1.1036 + bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 1.1037 + (u_int)n->m_len); 1.1038 + if (len != M_COPYALL) 1.1039 + len -= n->m_len; 1.1040 + off = 0; 1.1041 + m = m->m_next; 1.1042 + np = &n->m_next; 1.1043 + } 1.1044 + if (top == NULL) 1.1045 + mbstat.m_mcfail++; /* XXX: No consistency. */ 1.1046 + 1.1047 + return (top); 1.1048 +nospace: 1.1049 + m_freem(top); 1.1050 + mbstat.m_mcfail++; /* XXX: No consistency. */ 1.1051 + return (NULL); 1.1052 +} 1.1053 + 1.1054 + 1.1055 +int 1.1056 +m_tag_copy_chain(struct mbuf *to, struct mbuf *from, int how) 1.1057 +{ 1.1058 + struct m_tag *p, *t, *tprev = NULL; 1.1059 + 1.1060 + KASSERT(to && from, ("m_tag_copy_chain: null argument, to %p from %p", (void *)to, (void *)from)); 1.1061 + m_tag_delete_chain(to, NULL); 1.1062 + SLIST_FOREACH(p, &from->m_pkthdr.tags, m_tag_link) { 1.1063 + t = m_tag_copy(p, how); 1.1064 + if (t == NULL) { 1.1065 + m_tag_delete_chain(to, NULL); 1.1066 + return 0; 1.1067 + } 1.1068 + if (tprev == NULL) 1.1069 + SLIST_INSERT_HEAD(&to->m_pkthdr.tags, t, m_tag_link); 1.1070 + else 1.1071 + SLIST_INSERT_AFTER(tprev, t, m_tag_link); 1.1072 + tprev = t; 1.1073 + } 1.1074 + return 1; 1.1075 +} 1.1076 + 1.1077 +/* 1.1078 + * Duplicate "from"'s mbuf pkthdr in "to". 1.1079 + * "from" must have M_PKTHDR set, and "to" must be empty. 1.1080 + * In particular, this does a deep copy of the packet tags. 1.1081 + */ 1.1082 +int 1.1083 +m_dup_pkthdr(struct mbuf *to, struct mbuf *from, int how) 1.1084 +{ 1.1085 + 1.1086 + to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT); 1.1087 + if ((to->m_flags & M_EXT) == 0) 1.1088 + to->m_data = to->m_pktdat; 1.1089 + to->m_pkthdr = from->m_pkthdr; 1.1090 + SLIST_INIT(&to->m_pkthdr.tags); 1.1091 + return (m_tag_copy_chain(to, from, MBTOM(how))); 1.1092 +} 1.1093 + 1.1094 +/* Copy a single tag. */ 1.1095 +struct m_tag * 1.1096 +m_tag_copy(struct m_tag *t, int how) 1.1097 +{ 1.1098 + struct m_tag *p; 1.1099 + 1.1100 + KASSERT(t, ("m_tag_copy: null tag")); 1.1101 + p = m_tag_alloc(t->m_tag_cookie, t->m_tag_id, t->m_tag_len, how); 1.1102 + if (p == NULL) 1.1103 + return (NULL); 1.1104 + bcopy(t + 1, p + 1, t->m_tag_len); /* Copy the data */ 1.1105 + return p; 1.1106 +} 1.1107 + 1.1108 +/* Get a packet tag structure along with specified data following. */ 1.1109 +struct m_tag * 1.1110 +m_tag_alloc(u_int32_t cookie, int type, int len, int wait) 1.1111 +{ 1.1112 + struct m_tag *t; 1.1113 + 1.1114 + if (len < 0) 1.1115 + return NULL; 1.1116 + t = malloc(len + sizeof(struct m_tag)); 1.1117 + if (t == NULL) 1.1118 + return NULL; 1.1119 + m_tag_setup(t, cookie, type, len); 1.1120 + t->m_tag_free = m_tag_free_default; 1.1121 + return t; 1.1122 +} 1.1123 + 1.1124 +/* Free a packet tag. */ 1.1125 +void 1.1126 +m_tag_free_default(struct m_tag *t) 1.1127 +{ 1.1128 + free(t); 1.1129 +} 1.1130 + 1.1131 +/* 1.1132 + * Copy data from a buffer back into the indicated mbuf chain, 1.1133 + * starting "off" bytes from the beginning, extending the mbuf 1.1134 + * chain if necessary. 1.1135 + */ 1.1136 +void 1.1137 +m_copyback(struct mbuf *m0, int off, int len, caddr_t cp) 1.1138 +{ 1.1139 + int mlen; 1.1140 + struct mbuf *m = m0, *n; 1.1141 + int totlen = 0; 1.1142 + 1.1143 + if (m0 == NULL) 1.1144 + return; 1.1145 + while (off > (mlen = m->m_len)) { 1.1146 + off -= mlen; 1.1147 + totlen += mlen; 1.1148 + if (m->m_next == NULL) { 1.1149 + n = m_get(M_NOWAIT, m->m_type); 1.1150 + if (n == NULL) 1.1151 + goto out; 1.1152 + bzero(mtod(n, caddr_t), MLEN); 1.1153 + n->m_len = min(MLEN, len + off); 1.1154 + m->m_next = n; 1.1155 + } 1.1156 + m = m->m_next; 1.1157 + } 1.1158 + while (len > 0) { 1.1159 + mlen = min (m->m_len - off, len); 1.1160 + bcopy(cp, off + mtod(m, caddr_t), (u_int)mlen); 1.1161 + cp += mlen; 1.1162 + len -= mlen; 1.1163 + mlen += off; 1.1164 + off = 0; 1.1165 + totlen += mlen; 1.1166 + if (len == 0) 1.1167 + break; 1.1168 + if (m->m_next == NULL) { 1.1169 + n = m_get(M_NOWAIT, m->m_type); 1.1170 + if (n == NULL) 1.1171 + break; 1.1172 + n->m_len = min(MLEN, len); 1.1173 + m->m_next = n; 1.1174 + } 1.1175 + m = m->m_next; 1.1176 + } 1.1177 +out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) 1.1178 + m->m_pkthdr.len = totlen; 1.1179 +} 1.1180 + 1.1181 + 1.1182 +/* 1.1183 + * Lesser-used path for M_PREPEND: 1.1184 + * allocate new mbuf to prepend to chain, 1.1185 + * copy junk along. 1.1186 + */ 1.1187 +struct mbuf * 1.1188 +m_prepend(struct mbuf *m, int len, int how) 1.1189 +{ 1.1190 + struct mbuf *mn; 1.1191 + 1.1192 + if (m->m_flags & M_PKTHDR) 1.1193 + MGETHDR(mn, how, m->m_type); 1.1194 + else 1.1195 + MGET(mn, how, m->m_type); 1.1196 + if (mn == NULL) { 1.1197 + m_freem(m); 1.1198 + return (NULL); 1.1199 + } 1.1200 + if (m->m_flags & M_PKTHDR) 1.1201 + M_MOVE_PKTHDR(mn, m); 1.1202 + mn->m_next = m; 1.1203 + m = mn; 1.1204 + if(m->m_flags & M_PKTHDR) { 1.1205 + if (len < MHLEN) 1.1206 + MH_ALIGN(m, len); 1.1207 + } else { 1.1208 + if (len < MLEN) 1.1209 + M_ALIGN(m, len); 1.1210 + } 1.1211 + m->m_len = len; 1.1212 + return (m); 1.1213 +} 1.1214 + 1.1215 +/* 1.1216 + * Copy data from an mbuf chain starting "off" bytes from the beginning, 1.1217 + * continuing for "len" bytes, into the indicated buffer. 1.1218 + */ 1.1219 +void 1.1220 +m_copydata(const struct mbuf *m, int off, int len, caddr_t cp) 1.1221 +{ 1.1222 + u_int count; 1.1223 + 1.1224 + KASSERT(off >= 0, ("m_copydata, negative off %d", off)); 1.1225 + KASSERT(len >= 0, ("m_copydata, negative len %d", len)); 1.1226 + while (off > 0) { 1.1227 + KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain")); 1.1228 + if (off < m->m_len) 1.1229 + break; 1.1230 + off -= m->m_len; 1.1231 + m = m->m_next; 1.1232 + } 1.1233 + while (len > 0) { 1.1234 + KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain")); 1.1235 + count = min(m->m_len - off, len); 1.1236 + bcopy(mtod(m, caddr_t) + off, cp, count); 1.1237 + len -= count; 1.1238 + cp += count; 1.1239 + off = 0; 1.1240 + m = m->m_next; 1.1241 + } 1.1242 +} 1.1243 + 1.1244 + 1.1245 +/* 1.1246 + * Concatenate mbuf chain n to m. 1.1247 + * Both chains must be of the same type (e.g. MT_DATA). 1.1248 + * Any m_pkthdr is not updated. 1.1249 + */ 1.1250 +void 1.1251 +m_cat(struct mbuf *m, struct mbuf *n) 1.1252 +{ 1.1253 + while (m->m_next) 1.1254 + m = m->m_next; 1.1255 + while (n) { 1.1256 + if (m->m_flags & M_EXT || 1.1257 + m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { 1.1258 + /* just join the two chains */ 1.1259 + m->m_next = n; 1.1260 + return; 1.1261 + } 1.1262 + /* splat the data from one into the other */ 1.1263 + bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, (u_int)n->m_len); 1.1264 + m->m_len += n->m_len; 1.1265 + n = m_free(n); 1.1266 + } 1.1267 +} 1.1268 + 1.1269 + 1.1270 +void 1.1271 +m_adj(struct mbuf *mp, int req_len) 1.1272 +{ 1.1273 + int len = req_len; 1.1274 + struct mbuf *m; 1.1275 + int count; 1.1276 + 1.1277 + if ((m = mp) == NULL) 1.1278 + return; 1.1279 + if (len >= 0) { 1.1280 + /* 1.1281 + * Trim from head. 1.1282 + */ 1.1283 + while (m != NULL && len > 0) { 1.1284 + if (m->m_len <= len) { 1.1285 + len -= m->m_len; 1.1286 + m->m_len = 0; 1.1287 + m = m->m_next; 1.1288 + } else { 1.1289 + m->m_len -= len; 1.1290 + m->m_data += len; 1.1291 + len = 0; 1.1292 + } 1.1293 + } 1.1294 + m = mp; 1.1295 + if (mp->m_flags & M_PKTHDR) 1.1296 + m->m_pkthdr.len -= (req_len - len); 1.1297 + } else { 1.1298 + /* 1.1299 + * Trim from tail. Scan the mbuf chain, 1.1300 + * calculating its length and finding the last mbuf. 1.1301 + * If the adjustment only affects this mbuf, then just 1.1302 + * adjust and return. Otherwise, rescan and truncate 1.1303 + * after the remaining size. 1.1304 + */ 1.1305 + len = -len; 1.1306 + count = 0; 1.1307 + for (;;) { 1.1308 + count += m->m_len; 1.1309 + if (m->m_next == (struct mbuf *)0) 1.1310 + break; 1.1311 + m = m->m_next; 1.1312 + } 1.1313 + if (m->m_len >= len) { 1.1314 + m->m_len -= len; 1.1315 + if (mp->m_flags & M_PKTHDR) 1.1316 + mp->m_pkthdr.len -= len; 1.1317 + return; 1.1318 + } 1.1319 + count -= len; 1.1320 + if (count < 0) 1.1321 + count = 0; 1.1322 + /* 1.1323 + * Correct length for chain is "count". 1.1324 + * Find the mbuf with last data, adjust its length, 1.1325 + * and toss data from remaining mbufs on chain. 1.1326 + */ 1.1327 + m = mp; 1.1328 + if (m->m_flags & M_PKTHDR) 1.1329 + m->m_pkthdr.len = count; 1.1330 + for (; m; m = m->m_next) { 1.1331 + if (m->m_len >= count) { 1.1332 + m->m_len = count; 1.1333 + if (m->m_next != NULL) { 1.1334 + m_freem(m->m_next); 1.1335 + m->m_next = NULL; 1.1336 + } 1.1337 + break; 1.1338 + } 1.1339 + count -= m->m_len; 1.1340 + } 1.1341 + } 1.1342 +} 1.1343 + 1.1344 + 1.1345 +/* m_split is used within sctp_handle_cookie_echo. */ 1.1346 + 1.1347 +/* 1.1348 + * Partition an mbuf chain in two pieces, returning the tail -- 1.1349 + * all but the first len0 bytes. In case of failure, it returns NULL and 1.1350 + * attempts to restore the chain to its original state. 1.1351 + * 1.1352 + * Note that the resulting mbufs might be read-only, because the new 1.1353 + * mbuf can end up sharing an mbuf cluster with the original mbuf if 1.1354 + * the "breaking point" happens to lie within a cluster mbuf. Use the 1.1355 + * M_WRITABLE() macro to check for this case. 1.1356 + */ 1.1357 +struct mbuf * 1.1358 +m_split(struct mbuf *m0, int len0, int wait) 1.1359 +{ 1.1360 + struct mbuf *m, *n; 1.1361 + u_int len = len0, remain; 1.1362 + 1.1363 + /* MBUF_CHECKSLEEP(wait); */ 1.1364 + for (m = m0; m && (int)len > m->m_len; m = m->m_next) 1.1365 + len -= m->m_len; 1.1366 + if (m == NULL) 1.1367 + return (NULL); 1.1368 + remain = m->m_len - len; 1.1369 + if (m0->m_flags & M_PKTHDR) { 1.1370 + MGETHDR(n, wait, m0->m_type); 1.1371 + if (n == NULL) 1.1372 + return (NULL); 1.1373 + n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 1.1374 + n->m_pkthdr.len = m0->m_pkthdr.len - len0; 1.1375 + m0->m_pkthdr.len = len0; 1.1376 + if (m->m_flags & M_EXT) 1.1377 + goto extpacket; 1.1378 + if (remain > MHLEN) { 1.1379 + /* m can't be the lead packet */ 1.1380 + MH_ALIGN(n, 0); 1.1381 + n->m_next = m_split(m, len, wait); 1.1382 + if (n->m_next == NULL) { 1.1383 + (void) m_free(n); 1.1384 + return (NULL); 1.1385 + } else { 1.1386 + n->m_len = 0; 1.1387 + return (n); 1.1388 + } 1.1389 + } else 1.1390 + MH_ALIGN(n, remain); 1.1391 + } else if (remain == 0) { 1.1392 + n = m->m_next; 1.1393 + m->m_next = NULL; 1.1394 + return (n); 1.1395 + } else { 1.1396 + MGET(n, wait, m->m_type); 1.1397 + if (n == NULL) 1.1398 + return (NULL); 1.1399 + M_ALIGN(n, remain); 1.1400 + } 1.1401 +extpacket: 1.1402 + if (m->m_flags & M_EXT) { 1.1403 + n->m_data = m->m_data + len; 1.1404 + mb_dupcl(n, m); 1.1405 + } else { 1.1406 + bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain); 1.1407 + } 1.1408 + n->m_len = remain; 1.1409 + m->m_len = len; 1.1410 + n->m_next = m->m_next; 1.1411 + m->m_next = NULL; 1.1412 + return (n); 1.1413 +} 1.1414 + 1.1415 + 1.1416 + 1.1417 + 1.1418 +int 1.1419 +pack_send_buffer(caddr_t buffer, struct mbuf* mb){ 1.1420 + 1.1421 + int count_to_copy; 1.1422 + int total_count_copied = 0; 1.1423 + int offset = 0; 1.1424 + 1.1425 + do { 1.1426 + count_to_copy = mb->m_len; 1.1427 + bcopy(mtod(mb, caddr_t), buffer+offset, count_to_copy); 1.1428 + offset += count_to_copy; 1.1429 + total_count_copied += count_to_copy; 1.1430 + mb = mb->m_next; 1.1431 + } while(mb); 1.1432 + 1.1433 + return (total_count_copied); 1.1434 +}