netwerk/sctp/src/user_mbuf.c

Thu, 15 Jan 2015 21:03:48 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Thu, 15 Jan 2015 21:03:48 +0100
branch
TOR_BUG_9701
changeset 11
deefc01c0e14
permissions
-rwxr-xr-x

Integrate friendly tips from Tor colleagues to make (or not) 4.5 alpha 3;
This includes removal of overloaded (but unused) methods, and addition of
a overlooked call to DataStruct::SetData(nsISupports, uint32_t, bool.)

michael@0 1 /*-
michael@0 2 * Copyright (c) 1982, 1986, 1988, 1993
michael@0 3 * The Regents of the University of California.
michael@0 4 * All rights reserved.
michael@0 5 *
michael@0 6 * Redistribution and use in source and binary forms, with or without
michael@0 7 * modification, are permitted provided that the following conditions
michael@0 8 * are met:
michael@0 9 * 1. Redistributions of source code must retain the above copyright
michael@0 10 * notice, this list of conditions and the following disclaimer.
michael@0 11 * 2. Redistributions in binary form must reproduce the above copyright
michael@0 12 * notice, this list of conditions and the following disclaimer in the
michael@0 13 * documentation and/or other materials provided with the distribution.
michael@0 14 * 3. Neither the name of the University nor the names of its contributors
michael@0 15 * may be used to endorse or promote products derived from this software
michael@0 16 * without specific prior written permission.
michael@0 17 *
michael@0 18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
michael@0 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
michael@0 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
michael@0 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
michael@0 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
michael@0 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
michael@0 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
michael@0 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
michael@0 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
michael@0 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
michael@0 28 * SUCH DAMAGE.
michael@0 29 *
michael@0 30 */
michael@0 31
michael@0 32 /*
michael@0 33 * __Userspace__ version of /usr/src/sys/kern/kern_mbuf.c
michael@0 34 * We are initializing two zones for Mbufs and Clusters.
michael@0 35 *
michael@0 36 */
michael@0 37
michael@0 38 #include <stdio.h>
michael@0 39 #include <string.h>
michael@0 40 /* #include <sys/param.h> This defines MSIZE 256 */
michael@0 41 #if !defined(SCTP_SIMPLE_ALLOCATOR)
michael@0 42 #include "umem.h"
michael@0 43 #endif
michael@0 44 #include "user_mbuf.h"
michael@0 45 #include "user_environment.h"
michael@0 46 #include "user_atomic.h"
michael@0 47 #include "netinet/sctp_pcb.h"
michael@0 48
michael@0 49 struct mbstat mbstat;
michael@0 50 #define KIPC_MAX_LINKHDR 4 /* int: max length of link header (see sys/sysclt.h) */
michael@0 51 #define KIPC_MAX_PROTOHDR 5 /* int: max length of network header (see sys/sysclt.h)*/
michael@0 52 int max_linkhdr = KIPC_MAX_LINKHDR;
michael@0 53 int max_protohdr = KIPC_MAX_PROTOHDR; /* Size of largest protocol layer header. */
michael@0 54
michael@0 55 /*
michael@0 56 * Zones from which we allocate.
michael@0 57 */
michael@0 58 sctp_zone_t zone_mbuf;
michael@0 59 sctp_zone_t zone_clust;
michael@0 60 sctp_zone_t zone_ext_refcnt;
michael@0 61
michael@0 62 /* __Userspace__ clust_mb_args will be passed as callback data to mb_ctor_clust
michael@0 63 * and mb_dtor_clust.
michael@0 64 * Note: I had to use struct clust_args as an encapsulation for an mbuf pointer.
michael@0 65 * struct mbuf * clust_mb_args; does not work.
michael@0 66 */
michael@0 67 struct clust_args clust_mb_args;
michael@0 68
michael@0 69
michael@0 70 /* __Userspace__
michael@0 71 * Local prototypes.
michael@0 72 */
michael@0 73 static int mb_ctor_mbuf(void *, void *, int);
michael@0 74 static int mb_ctor_clust(void *, void *, int);
michael@0 75 static void mb_dtor_mbuf(void *, void *);
michael@0 76 static void mb_dtor_clust(void *, void *);
michael@0 77
michael@0 78
michael@0 79 /***************** Functions taken from user_mbuf.h *************/
michael@0 80
michael@0 81 static int mbuf_constructor_dup(struct mbuf *m, int pkthdr, short type)
michael@0 82 {
michael@0 83 int flags = pkthdr;
michael@0 84 if (type == MT_NOINIT)
michael@0 85 return (0);
michael@0 86
michael@0 87 m->m_next = NULL;
michael@0 88 m->m_nextpkt = NULL;
michael@0 89 m->m_len = 0;
michael@0 90 m->m_flags = flags;
michael@0 91 m->m_type = type;
michael@0 92 if (flags & M_PKTHDR) {
michael@0 93 m->m_data = m->m_pktdat;
michael@0 94 m->m_pkthdr.rcvif = NULL;
michael@0 95 m->m_pkthdr.len = 0;
michael@0 96 m->m_pkthdr.header = NULL;
michael@0 97 m->m_pkthdr.csum_flags = 0;
michael@0 98 m->m_pkthdr.csum_data = 0;
michael@0 99 m->m_pkthdr.tso_segsz = 0;
michael@0 100 m->m_pkthdr.ether_vtag = 0;
michael@0 101 SLIST_INIT(&m->m_pkthdr.tags);
michael@0 102 } else
michael@0 103 m->m_data = m->m_dat;
michael@0 104
michael@0 105 return (0);
michael@0 106 }
michael@0 107
michael@0 108 /* __Userspace__ */
michael@0 109 struct mbuf *
michael@0 110 m_get(int how, short type)
michael@0 111 {
michael@0 112 struct mbuf *mret;
michael@0 113 #if defined(SCTP_SIMPLE_ALLOCATOR)
michael@0 114 struct mb_args mbuf_mb_args;
michael@0 115
michael@0 116 /* The following setter function is not yet being enclosed within
michael@0 117 * #if USING_MBUF_CONSTRUCTOR - #endif, until I have thoroughly tested
michael@0 118 * mb_dtor_mbuf. See comment there
michael@0 119 */
michael@0 120 mbuf_mb_args.flags = 0;
michael@0 121 mbuf_mb_args.type = type;
michael@0 122 #endif
michael@0 123 /* Mbuf master zone, zone_mbuf, has already been
michael@0 124 * created in mbuf_init() */
michael@0 125 mret = SCTP_ZONE_GET(zone_mbuf, struct mbuf);
michael@0 126 #if defined(SCTP_SIMPLE_ALLOCATOR)
michael@0 127 mb_ctor_mbuf(mret, &mbuf_mb_args, 0);
michael@0 128 #endif
michael@0 129 /*mret = ((struct mbuf *)umem_cache_alloc(zone_mbuf, UMEM_DEFAULT));*/
michael@0 130
michael@0 131 /* There are cases when an object available in the current CPU's
michael@0 132 * loaded magazine and in those cases the object's constructor is not applied.
michael@0 133 * If that is the case, then we are duplicating constructor initialization here,
michael@0 134 * so that the mbuf is properly constructed before returning it.
michael@0 135 */
michael@0 136 if (mret) {
michael@0 137 #if USING_MBUF_CONSTRUCTOR
michael@0 138 if (! (mret->m_type == type) ) {
michael@0 139 mbuf_constructor_dup(mret, 0, type);
michael@0 140 }
michael@0 141 #else
michael@0 142 mbuf_constructor_dup(mret, 0, type);
michael@0 143 #endif
michael@0 144
michael@0 145 }
michael@0 146 return mret;
michael@0 147 }
michael@0 148
michael@0 149
michael@0 150 /* __Userspace__ */
michael@0 151 struct mbuf *
michael@0 152 m_gethdr(int how, short type)
michael@0 153 {
michael@0 154 struct mbuf *mret;
michael@0 155 #if defined(SCTP_SIMPLE_ALLOCATOR)
michael@0 156 struct mb_args mbuf_mb_args;
michael@0 157
michael@0 158 /* The following setter function is not yet being enclosed within
michael@0 159 * #if USING_MBUF_CONSTRUCTOR - #endif, until I have thoroughly tested
michael@0 160 * mb_dtor_mbuf. See comment there
michael@0 161 */
michael@0 162 mbuf_mb_args.flags = M_PKTHDR;
michael@0 163 mbuf_mb_args.type = type;
michael@0 164 #endif
michael@0 165 mret = SCTP_ZONE_GET(zone_mbuf, struct mbuf);
michael@0 166 #if defined(SCTP_SIMPLE_ALLOCATOR)
michael@0 167 mb_ctor_mbuf(mret, &mbuf_mb_args, 0);
michael@0 168 #endif
michael@0 169 /*mret = ((struct mbuf *)umem_cache_alloc(zone_mbuf, UMEM_DEFAULT));*/
michael@0 170 /* There are cases when an object available in the current CPU's
michael@0 171 * loaded magazine and in those cases the object's constructor is not applied.
michael@0 172 * If that is the case, then we are duplicating constructor initialization here,
michael@0 173 * so that the mbuf is properly constructed before returning it.
michael@0 174 */
michael@0 175 if (mret) {
michael@0 176 #if USING_MBUF_CONSTRUCTOR
michael@0 177 if (! ((mret->m_flags & M_PKTHDR) && (mret->m_type == type)) ) {
michael@0 178 mbuf_constructor_dup(mret, M_PKTHDR, type);
michael@0 179 }
michael@0 180 #else
michael@0 181 mbuf_constructor_dup(mret, M_PKTHDR, type);
michael@0 182 #endif
michael@0 183 }
michael@0 184 return mret;
michael@0 185 }
michael@0 186
michael@0 187 /* __Userspace__ */
michael@0 188 struct mbuf *
michael@0 189 m_free(struct mbuf *m)
michael@0 190 {
michael@0 191
michael@0 192 struct mbuf *n = m->m_next;
michael@0 193
michael@0 194 if (m->m_flags & M_EXT)
michael@0 195 mb_free_ext(m);
michael@0 196 else if ((m->m_flags & M_NOFREE) == 0) {
michael@0 197 #if defined(SCTP_SIMPLE_ALLOCATOR)
michael@0 198 mb_dtor_mbuf(m, NULL);
michael@0 199 #endif
michael@0 200 SCTP_ZONE_FREE(zone_mbuf, m);
michael@0 201 }
michael@0 202 /*umem_cache_free(zone_mbuf, m);*/
michael@0 203 return (n);
michael@0 204 }
michael@0 205
michael@0 206
michael@0 207 static int clust_constructor_dup(caddr_t m_clust, struct mbuf* m)
michael@0 208 {
michael@0 209 u_int *refcnt;
michael@0 210 int type, size;
michael@0 211
michael@0 212 /* Assigning cluster of MCLBYTES. TODO: Add jumbo frame functionality */
michael@0 213 type = EXT_CLUSTER;
michael@0 214 size = MCLBYTES;
michael@0 215
michael@0 216 refcnt = SCTP_ZONE_GET(zone_ext_refcnt, u_int);
michael@0 217 /*refcnt = (u_int *)umem_cache_alloc(zone_ext_refcnt, UMEM_DEFAULT);*/
michael@0 218 if (refcnt == NULL) {
michael@0 219 #if !defined(SCTP_SIMPLE_ALLOCATOR)
michael@0 220 umem_reap();
michael@0 221 #endif
michael@0 222 refcnt = SCTP_ZONE_GET(zone_ext_refcnt, u_int);
michael@0 223 /*refcnt = (u_int *)umem_cache_alloc(zone_ext_refcnt, UMEM_DEFAULT);*/
michael@0 224 }
michael@0 225 *refcnt = 1;
michael@0 226 if (m != NULL) {
michael@0 227 m->m_ext.ext_buf = (caddr_t)m_clust;
michael@0 228 m->m_data = m->m_ext.ext_buf;
michael@0 229 m->m_flags |= M_EXT;
michael@0 230 m->m_ext.ext_free = NULL;
michael@0 231 m->m_ext.ext_args = NULL;
michael@0 232 m->m_ext.ext_size = size;
michael@0 233 m->m_ext.ext_type = type;
michael@0 234 m->m_ext.ref_cnt = refcnt;
michael@0 235 }
michael@0 236
michael@0 237 return (0);
michael@0 238 }
michael@0 239
michael@0 240
michael@0 241
michael@0 242 /* __Userspace__ */
michael@0 243 void
michael@0 244 m_clget(struct mbuf *m, int how)
michael@0 245 {
michael@0 246 caddr_t mclust_ret;
michael@0 247 #if defined(SCTP_SIMPLE_ALLOCATOR)
michael@0 248 struct clust_args clust_mb_args;
michael@0 249 #endif
michael@0 250 if (m->m_flags & M_EXT) {
michael@0 251 SCTPDBG(SCTP_DEBUG_USR, "%s: %p mbuf already has cluster\n", __func__, (void *)m);
michael@0 252 }
michael@0 253 m->m_ext.ext_buf = (char *)NULL;
michael@0 254 #if defined(SCTP_SIMPLE_ALLOCATOR)
michael@0 255 clust_mb_args.parent_mbuf = m;
michael@0 256 #endif
michael@0 257 mclust_ret = SCTP_ZONE_GET(zone_clust, char);
michael@0 258 #if defined(SCTP_SIMPLE_ALLOCATOR)
michael@0 259 mb_ctor_clust(mclust_ret, &clust_mb_args, 0);
michael@0 260 #endif
michael@0 261 /*mclust_ret = umem_cache_alloc(zone_clust, UMEM_DEFAULT);*/
michael@0 262 /*
michael@0 263 On a cluster allocation failure, call umem_reap() and retry.
michael@0 264 */
michael@0 265
michael@0 266 if (mclust_ret == NULL) {
michael@0 267 #if !defined(SCTP_SIMPLE_ALLOCATOR)
michael@0 268 /* mclust_ret = SCTP_ZONE_GET(zone_clust, char);
michael@0 269 mb_ctor_clust(mclust_ret, &clust_mb_args, 0);
michael@0 270 #else*/
michael@0 271 umem_reap();
michael@0 272 mclust_ret = SCTP_ZONE_GET(zone_clust, char);
michael@0 273 #endif
michael@0 274 /*mclust_ret = umem_cache_alloc(zone_clust, UMEM_DEFAULT);*/
michael@0 275 if (NULL == mclust_ret) {
michael@0 276 SCTPDBG(SCTP_DEBUG_USR, "Memory allocation failure in %s\n", __func__);
michael@0 277 }
michael@0 278 }
michael@0 279
michael@0 280 #if USING_MBUF_CONSTRUCTOR
michael@0 281 if ((m->m_ext.ext_buf == NULL)) {
michael@0 282 clust_constructor_dup(mclust_ret, m);
michael@0 283 }
michael@0 284 #else
michael@0 285 clust_constructor_dup(mclust_ret, m);
michael@0 286 #endif
michael@0 287 }
michael@0 288
michael@0 289 /*
michael@0 290 * Unlink a tag from the list of tags associated with an mbuf.
michael@0 291 */
michael@0 292 static __inline void
michael@0 293 m_tag_unlink(struct mbuf *m, struct m_tag *t)
michael@0 294 {
michael@0 295
michael@0 296 SLIST_REMOVE(&m->m_pkthdr.tags, t, m_tag, m_tag_link);
michael@0 297 }
michael@0 298
michael@0 299 /*
michael@0 300 * Reclaim resources associated with a tag.
michael@0 301 */
michael@0 302 static __inline void
michael@0 303 m_tag_free(struct m_tag *t)
michael@0 304 {
michael@0 305
michael@0 306 (*t->m_tag_free)(t);
michael@0 307 }
michael@0 308
michael@0 309 /*
michael@0 310 * Set up the contents of a tag. Note that this does not fill in the free
michael@0 311 * method; the caller is expected to do that.
michael@0 312 *
michael@0 313 * XXX probably should be called m_tag_init, but that was already taken.
michael@0 314 */
michael@0 315 static __inline void
michael@0 316 m_tag_setup(struct m_tag *t, u_int32_t cookie, int type, int len)
michael@0 317 {
michael@0 318
michael@0 319 t->m_tag_id = type;
michael@0 320 t->m_tag_len = len;
michael@0 321 t->m_tag_cookie = cookie;
michael@0 322 }
michael@0 323
michael@0 324 /************ End functions from user_mbuf.h ******************/
michael@0 325
michael@0 326
michael@0 327
michael@0 328 /************ End functions to substitute umem_cache_alloc and umem_cache_free **************/
michael@0 329
michael@0 330 /* __Userspace__
michael@0 331 * TODO: mbuf_init must be called in the initialization routines
michael@0 332 * of userspace stack.
michael@0 333 */
michael@0 334 void
michael@0 335 mbuf_init(void *dummy)
michael@0 336 {
michael@0 337
michael@0 338 /*
michael@0 339 * __Userspace__Configure UMA zones for Mbufs and Clusters.
michael@0 340 * (TODO: m_getcl() - using packet secondary zone).
michael@0 341 * There is no provision for trash_init and trash_fini in umem.
michael@0 342 *
michael@0 343 */
michael@0 344 /* zone_mbuf = umem_cache_create(MBUF_MEM_NAME, MSIZE, 0,
michael@0 345 mb_ctor_mbuf, mb_dtor_mbuf, NULL,
michael@0 346 &mbuf_mb_args,
michael@0 347 NULL, 0);
michael@0 348 zone_mbuf = umem_cache_create(MBUF_MEM_NAME, MSIZE, 0, NULL, NULL, NULL, NULL, NULL, 0);*/
michael@0 349 #if defined(SCTP_SIMPLE_ALLOCATOR)
michael@0 350 SCTP_ZONE_INIT(zone_mbuf, MBUF_MEM_NAME, MSIZE, 0);
michael@0 351 #else
michael@0 352 zone_mbuf = umem_cache_create(MBUF_MEM_NAME, MSIZE, 0,
michael@0 353 mb_ctor_mbuf, mb_dtor_mbuf, NULL,
michael@0 354 NUULL,
michael@0 355 NULL, 0);
michael@0 356 #endif
michael@0 357 /*zone_ext_refcnt = umem_cache_create(MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int), 0,
michael@0 358 NULL, NULL, NULL,
michael@0 359 NULL,
michael@0 360 NULL, 0);*/
michael@0 361 SCTP_ZONE_INIT(zone_ext_refcnt, MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int), 0);
michael@0 362
michael@0 363 /*zone_clust = umem_cache_create(MBUF_CLUSTER_MEM_NAME, MCLBYTES, 0,
michael@0 364 mb_ctor_clust, mb_dtor_clust, NULL,
michael@0 365 &clust_mb_args,
michael@0 366 NULL, 0);
michael@0 367 zone_clust = umem_cache_create(MBUF_CLUSTER_MEM_NAME, MCLBYTES, 0, NULL, NULL, NULL, NULL, NULL,0);*/
michael@0 368 #if defined(SCTP_SIMPLE_ALLOCATOR)
michael@0 369 SCTP_ZONE_INIT(zone_clust, MBUF_CLUSTER_MEM_NAME, MCLBYTES, 0);
michael@0 370 #else
michael@0 371 zone_clust = umem_cache_create(MBUF_CLUSTER_MEM_NAME, MCLBYTES, 0,
michael@0 372 mb_ctor_clust, mb_dtor_clust, NULL,
michael@0 373 &clust_mb_args,
michael@0 374 NULL, 0);
michael@0 375 #endif
michael@0 376
michael@0 377 /* uma_prealloc() goes here... */
michael@0 378
michael@0 379 /* __Userspace__ Add umem_reap here for low memory situation?
michael@0 380 *
michael@0 381 */
michael@0 382
michael@0 383
michael@0 384 /*
michael@0 385 * [Re]set counters and local statistics knobs.
michael@0 386 *
michael@0 387 */
michael@0 388
michael@0 389 mbstat.m_mbufs = 0;
michael@0 390 mbstat.m_mclusts = 0;
michael@0 391 mbstat.m_drain = 0;
michael@0 392 mbstat.m_msize = MSIZE;
michael@0 393 mbstat.m_mclbytes = MCLBYTES;
michael@0 394 mbstat.m_minclsize = MINCLSIZE;
michael@0 395 mbstat.m_mlen = MLEN;
michael@0 396 mbstat.m_mhlen = MHLEN;
michael@0 397 mbstat.m_numtypes = MT_NTYPES;
michael@0 398
michael@0 399 mbstat.m_mcfail = mbstat.m_mpfail = 0;
michael@0 400 mbstat.sf_iocnt = 0;
michael@0 401 mbstat.sf_allocwait = mbstat.sf_allocfail = 0;
michael@0 402
michael@0 403 }
michael@0 404
michael@0 405
michael@0 406
michael@0 407 /*
michael@0 408 * __Userspace__
michael@0 409 *
michael@0 410 * Constructor for Mbuf master zone. We have a different constructor
michael@0 411 * for allocating the cluster.
michael@0 412 *
michael@0 413 * The 'arg' pointer points to a mb_args structure which
michael@0 414 * contains call-specific information required to support the
michael@0 415 * mbuf allocation API. See user_mbuf.h.
michael@0 416 *
michael@0 417 * The flgs parameter below can be UMEM_DEFAULT or UMEM_NOFAIL depending on what
michael@0 418 * was passed when umem_cache_alloc was called.
michael@0 419 * TODO: Use UMEM_NOFAIL in umem_cache_alloc and also define a failure handler
michael@0 420 * and call umem_nofail_callback(my_failure_handler) in the stack initialization routines
michael@0 421 * The advantage of using UMEM_NOFAIL is that we don't have to check if umem_cache_alloc
michael@0 422 * was successful or not. The failure handler would take care of it, if we use the UMEM_NOFAIL
michael@0 423 * flag.
michael@0 424 *
michael@0 425 * NOTE Ref: http://docs.sun.com/app/docs/doc/819-2243/6n4i099p2?l=en&a=view&q=umem_zalloc)
michael@0 426 * The umem_nofail_callback() function sets the **process-wide** UMEM_NOFAIL callback.
michael@0 427 * It also mentions that umem_nofail_callback is Evolving.
michael@0 428 *
michael@0 429 */
michael@0 430 static int
michael@0 431 mb_ctor_mbuf(void *mem, void *arg, int flgs)
michael@0 432 {
michael@0 433 #if USING_MBUF_CONSTRUCTOR
michael@0 434 struct mbuf *m;
michael@0 435 struct mb_args *args;
michael@0 436
michael@0 437 int flags;
michael@0 438 short type;
michael@0 439
michael@0 440 m = (struct mbuf *)mem;
michael@0 441 args = (struct mb_args *)arg;
michael@0 442 flags = args->flags;
michael@0 443 type = args->type;
michael@0 444
michael@0 445 /*
michael@0 446 * The mbuf is initialized later.
michael@0 447 *
michael@0 448 */
michael@0 449 if (type == MT_NOINIT)
michael@0 450 return (0);
michael@0 451
michael@0 452 m->m_next = NULL;
michael@0 453 m->m_nextpkt = NULL;
michael@0 454 m->m_len = 0;
michael@0 455 m->m_flags = flags;
michael@0 456 m->m_type = type;
michael@0 457 if (flags & M_PKTHDR) {
michael@0 458 m->m_data = m->m_pktdat;
michael@0 459 m->m_pkthdr.rcvif = NULL;
michael@0 460 m->m_pkthdr.len = 0;
michael@0 461 m->m_pkthdr.header = NULL;
michael@0 462 m->m_pkthdr.csum_flags = 0;
michael@0 463 m->m_pkthdr.csum_data = 0;
michael@0 464 m->m_pkthdr.tso_segsz = 0;
michael@0 465 m->m_pkthdr.ether_vtag = 0;
michael@0 466 SLIST_INIT(&m->m_pkthdr.tags);
michael@0 467 } else
michael@0 468 m->m_data = m->m_dat;
michael@0 469 #endif
michael@0 470 return (0);
michael@0 471 }
michael@0 472
michael@0 473
michael@0 474 /*
michael@0 475 * __Userspace__
michael@0 476 * The Mbuf master zone destructor.
michael@0 477 * This would be called in response to umem_cache_destroy
michael@0 478 * TODO: Recheck if this is what we want to do in this destructor.
michael@0 479 * (Note: the number of times mb_dtor_mbuf is called is equal to the
michael@0 480 * number of individual mbufs allocated from zone_mbuf.
michael@0 481 */
michael@0 482 static void
michael@0 483 mb_dtor_mbuf(void *mem, void *arg)
michael@0 484 {
michael@0 485 struct mbuf *m;
michael@0 486
michael@0 487 m = (struct mbuf *)mem;
michael@0 488 if ((m->m_flags & M_PKTHDR) != 0) {
michael@0 489 m_tag_delete_chain(m, NULL);
michael@0 490 }
michael@0 491 }
michael@0 492
michael@0 493
michael@0 494 /* __Userspace__
michael@0 495 * The Cluster zone constructor.
michael@0 496 *
michael@0 497 * Here the 'arg' pointer points to the Mbuf which we
michael@0 498 * are configuring cluster storage for. If 'arg' is
michael@0 499 * empty we allocate just the cluster without setting
michael@0 500 * the mbuf to it. See mbuf.h.
michael@0 501 */
michael@0 502 static int
michael@0 503 mb_ctor_clust(void *mem, void *arg, int flgs)
michael@0 504 {
michael@0 505
michael@0 506 #if USING_MBUF_CONSTRUCTOR
michael@0 507 struct mbuf *m;
michael@0 508 struct clust_args * cla;
michael@0 509 u_int *refcnt;
michael@0 510 int type, size;
michael@0 511 sctp_zone_t zone;
michael@0 512
michael@0 513 /* Assigning cluster of MCLBYTES. TODO: Add jumbo frame functionality */
michael@0 514 type = EXT_CLUSTER;
michael@0 515 zone = zone_clust;
michael@0 516 size = MCLBYTES;
michael@0 517
michael@0 518 cla = (struct clust_args *)arg;
michael@0 519 m = cla->parent_mbuf;
michael@0 520
michael@0 521 refcnt = SCTP_ZONE_GET(zone_ext_refcnt, u_int);
michael@0 522 /*refcnt = (u_int *)umem_cache_alloc(zone_ext_refcnt, UMEM_DEFAULT);*/
michael@0 523 *refcnt = 1;
michael@0 524
michael@0 525 if (m != NULL) {
michael@0 526 m->m_ext.ext_buf = (caddr_t)mem;
michael@0 527 m->m_data = m->m_ext.ext_buf;
michael@0 528 m->m_flags |= M_EXT;
michael@0 529 m->m_ext.ext_free = NULL;
michael@0 530 m->m_ext.ext_args = NULL;
michael@0 531 m->m_ext.ext_size = size;
michael@0 532 m->m_ext.ext_type = type;
michael@0 533 m->m_ext.ref_cnt = refcnt;
michael@0 534 }
michael@0 535 #endif
michael@0 536 return (0);
michael@0 537 }
michael@0 538
michael@0 539 /* __Userspace__ */
michael@0 540 static void
michael@0 541 mb_dtor_clust(void *mem, void *arg)
michael@0 542 {
michael@0 543
michael@0 544 /* mem is of type caddr_t. In sys/types.h we have typedef char * caddr_t; */
michael@0 545 /* mb_dtor_clust is called at time of umem_cache_destroy() (the number of times
michael@0 546 * mb_dtor_clust is called is equal to the number of individual mbufs allocated
michael@0 547 * from zone_clust. Similarly for mb_dtor_mbuf).
michael@0 548 * At this point the following:
michael@0 549 * struct mbuf *m;
michael@0 550 * m = (struct mbuf *)arg;
michael@0 551 * assert (*(m->m_ext.ref_cnt) == 0); is not meaningful since m->m_ext.ref_cnt = NULL;
michael@0 552 * has been done in mb_free_ext().
michael@0 553 */
michael@0 554
michael@0 555 }
michael@0 556
michael@0 557
michael@0 558
michael@0 559
michael@0 560 /* Unlink and free a packet tag. */
michael@0 561 void
michael@0 562 m_tag_delete(struct mbuf *m, struct m_tag *t)
michael@0 563 {
michael@0 564 KASSERT(m && t, ("m_tag_delete: null argument, m %p t %p", (void *)m, (void *)t));
michael@0 565 m_tag_unlink(m, t);
michael@0 566 m_tag_free(t);
michael@0 567 }
michael@0 568
michael@0 569
michael@0 570 /* Unlink and free a packet tag chain, starting from given tag. */
michael@0 571 void
michael@0 572 m_tag_delete_chain(struct mbuf *m, struct m_tag *t)
michael@0 573 {
michael@0 574
michael@0 575 struct m_tag *p, *q;
michael@0 576
michael@0 577 KASSERT(m, ("m_tag_delete_chain: null mbuf"));
michael@0 578 if (t != NULL)
michael@0 579 p = t;
michael@0 580 else
michael@0 581 p = SLIST_FIRST(&m->m_pkthdr.tags);
michael@0 582 if (p == NULL)
michael@0 583 return;
michael@0 584 while ((q = SLIST_NEXT(p, m_tag_link)) != NULL)
michael@0 585 m_tag_delete(m, q);
michael@0 586 m_tag_delete(m, p);
michael@0 587 }
michael@0 588
michael@0 589 #if 0
michael@0 590 static void
michael@0 591 sctp_print_mbuf_chain(struct mbuf *m)
michael@0 592 {
michael@0 593 SCTP_DEBUG_USR(SCTP_DEBUG_USR, "Printing mbuf chain %p.\n", (void *)m);
michael@0 594 for(; m; m=m->m_next) {
michael@0 595 SCTP_DEBUG_USR(SCTP_DEBUG_USR, "%p: m_len = %ld, m_type = %x, m_next = %p.\n", (void *)m, m->m_len, m->m_type, (void *)m->m_next);
michael@0 596 if (m->m_flags & M_EXT)
michael@0 597 SCTP_DEBUG_USR(SCTP_DEBUG_USR, "%p: extend_size = %d, extend_buffer = %p, ref_cnt = %d.\n", (void *)m, m->m_ext.ext_size, (void *)m->m_ext.ext_buf, *(m->m_ext.ref_cnt));
michael@0 598 }
michael@0 599 }
michael@0 600 #endif
michael@0 601
michael@0 602 /*
michael@0 603 * Free an entire chain of mbufs and associated external buffers, if
michael@0 604 * applicable.
michael@0 605 */
michael@0 606 void
michael@0 607 m_freem(struct mbuf *mb)
michael@0 608 {
michael@0 609 while (mb != NULL)
michael@0 610 mb = m_free(mb);
michael@0 611 }
michael@0 612
michael@0 613 /*
michael@0 614 * __Userspace__
michael@0 615 * clean mbufs with M_EXT storage attached to them
michael@0 616 * if the reference count hits 1.
michael@0 617 */
michael@0 618 void
michael@0 619 mb_free_ext(struct mbuf *m)
michael@0 620 {
michael@0 621
michael@0 622 int skipmbuf;
michael@0 623
michael@0 624 KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
michael@0 625 KASSERT(m->m_ext.ref_cnt != NULL, ("%s: ref_cnt not set", __func__));
michael@0 626
michael@0 627 /*
michael@0 628 * check if the header is embedded in the cluster
michael@0 629 */
michael@0 630 skipmbuf = (m->m_flags & M_NOFREE);
michael@0 631
michael@0 632 /* Free the external attached storage if this
michael@0 633 * mbuf is the only reference to it.
michael@0 634 *__Userspace__ TODO: jumbo frames
michael@0 635 *
michael@0 636 */
michael@0 637 /* NOTE: We had the same code that SCTP_DECREMENT_AND_CHECK_REFCOUNT
michael@0 638 reduces to here before but the IPHONE malloc commit had changed
michael@0 639 this to compare to 0 instead of 1 (see next line). Why?
michael@0 640 . .. this caused a huge memory leak in Linux.
michael@0 641 */
michael@0 642 #ifdef IPHONE
michael@0 643 if (atomic_fetchadd_int(m->m_ext.ref_cnt, -1) == 0)
michael@0 644 #else
michael@0 645 if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(m->m_ext.ref_cnt))
michael@0 646 #endif
michael@0 647 {
michael@0 648 if (m->m_ext.ext_type == EXT_CLUSTER){
michael@0 649 #if defined(SCTP_SIMPLE_ALLOCATOR)
michael@0 650 mb_dtor_clust(m->m_ext.ext_buf, &clust_mb_args);
michael@0 651 #endif
michael@0 652 SCTP_ZONE_FREE(zone_clust, m->m_ext.ext_buf);
michael@0 653 SCTP_ZONE_FREE(zone_ext_refcnt, (u_int*)m->m_ext.ref_cnt);
michael@0 654 m->m_ext.ref_cnt = NULL;
michael@0 655 }
michael@0 656 }
michael@0 657
michael@0 658 if (skipmbuf)
michael@0 659 return;
michael@0 660
michael@0 661
michael@0 662 /* __Userspace__ Also freeing the storage for ref_cnt
michael@0 663 * Free this mbuf back to the mbuf zone with all m_ext
michael@0 664 * information purged.
michael@0 665 */
michael@0 666 m->m_ext.ext_buf = NULL;
michael@0 667 m->m_ext.ext_free = NULL;
michael@0 668 m->m_ext.ext_args = NULL;
michael@0 669 m->m_ext.ref_cnt = NULL;
michael@0 670 m->m_ext.ext_size = 0;
michael@0 671 m->m_ext.ext_type = 0;
michael@0 672 m->m_flags &= ~M_EXT;
michael@0 673 #if defined(SCTP_SIMPLE_ALLOCATOR)
michael@0 674 mb_dtor_mbuf(m, NULL);
michael@0 675 #endif
michael@0 676 SCTP_ZONE_FREE(zone_mbuf, m);
michael@0 677
michael@0 678 /*umem_cache_free(zone_mbuf, m);*/
michael@0 679 }
michael@0 680
michael@0 681 /*
michael@0 682 * "Move" mbuf pkthdr from "from" to "to".
michael@0 683 * "from" must have M_PKTHDR set, and "to" must be empty.
michael@0 684 */
michael@0 685 void
michael@0 686 m_move_pkthdr(struct mbuf *to, struct mbuf *from)
michael@0 687 {
michael@0 688
michael@0 689 to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
michael@0 690 if ((to->m_flags & M_EXT) == 0)
michael@0 691 to->m_data = to->m_pktdat;
michael@0 692 to->m_pkthdr = from->m_pkthdr; /* especially tags */
michael@0 693 SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */
michael@0 694 from->m_flags &= ~M_PKTHDR;
michael@0 695 }
michael@0 696
michael@0 697
michael@0 698 /*
michael@0 699 * Rearange an mbuf chain so that len bytes are contiguous
michael@0 700 * and in the data area of an mbuf (so that mtod and dtom
michael@0 701 * will work for a structure of size len). Returns the resulting
michael@0 702 * mbuf chain on success, frees it and returns null on failure.
michael@0 703 * If there is room, it will add up to max_protohdr-len extra bytes to the
michael@0 704 * contiguous region in an attempt to avoid being called next time.
michael@0 705 */
michael@0 706 struct mbuf *
michael@0 707 m_pullup(struct mbuf *n, int len)
michael@0 708 {
michael@0 709 struct mbuf *m;
michael@0 710 int count;
michael@0 711 int space;
michael@0 712
michael@0 713 /*
michael@0 714 * If first mbuf has no cluster, and has room for len bytes
michael@0 715 * without shifting current data, pullup into it,
michael@0 716 * otherwise allocate a new mbuf to prepend to the chain.
michael@0 717 */
michael@0 718 if ((n->m_flags & M_EXT) == 0 &&
michael@0 719 n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
michael@0 720 if (n->m_len >= len)
michael@0 721 return (n);
michael@0 722 m = n;
michael@0 723 n = n->m_next;
michael@0 724 len -= m->m_len;
michael@0 725 } else {
michael@0 726 if (len > MHLEN)
michael@0 727 goto bad;
michael@0 728 MGET(m, M_NOWAIT, n->m_type);
michael@0 729 if (m == NULL)
michael@0 730 goto bad;
michael@0 731 m->m_len = 0;
michael@0 732 if (n->m_flags & M_PKTHDR)
michael@0 733 M_MOVE_PKTHDR(m, n);
michael@0 734 }
michael@0 735 space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
michael@0 736 do {
michael@0 737 count = min(min(max(len, max_protohdr), space), n->m_len);
michael@0 738 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
michael@0 739 (u_int)count);
michael@0 740 len -= count;
michael@0 741 m->m_len += count;
michael@0 742 n->m_len -= count;
michael@0 743 space -= count;
michael@0 744 if (n->m_len)
michael@0 745 n->m_data += count;
michael@0 746 else
michael@0 747 n = m_free(n);
michael@0 748 } while (len > 0 && n);
michael@0 749 if (len > 0) {
michael@0 750 (void) m_free(m);
michael@0 751 goto bad;
michael@0 752 }
michael@0 753 m->m_next = n;
michael@0 754 return (m);
michael@0 755 bad:
michael@0 756 m_freem(n);
michael@0 757 mbstat.m_mpfail++; /* XXX: No consistency. */
michael@0 758 return (NULL);
michael@0 759 }
michael@0 760
michael@0 761
michael@0 762 static struct mbuf *
michael@0 763 m_dup1(struct mbuf *m, int off, int len, int wait)
michael@0 764 {
michael@0 765 struct mbuf *n = NULL;
michael@0 766 int copyhdr;
michael@0 767
michael@0 768 if (len > MCLBYTES)
michael@0 769 return NULL;
michael@0 770 if (off == 0 && (m->m_flags & M_PKTHDR) != 0)
michael@0 771 copyhdr = 1;
michael@0 772 else
michael@0 773 copyhdr = 0;
michael@0 774 if (len >= MINCLSIZE) {
michael@0 775 if (copyhdr == 1) {
michael@0 776 m_clget(n, wait); /* TODO: include code for copying the header */
michael@0 777 m_dup_pkthdr(n, m, wait);
michael@0 778 } else
michael@0 779 m_clget(n, wait);
michael@0 780 } else {
michael@0 781 if (copyhdr == 1)
michael@0 782 n = m_gethdr(wait, m->m_type);
michael@0 783 else
michael@0 784 n = m_get(wait, m->m_type);
michael@0 785 }
michael@0 786 if (!n)
michael@0 787 return NULL; /* ENOBUFS */
michael@0 788
michael@0 789 if (copyhdr && !m_dup_pkthdr(n, m, wait)) {
michael@0 790 m_free(n);
michael@0 791 return NULL;
michael@0 792 }
michael@0 793 m_copydata(m, off, len, mtod(n, caddr_t));
michael@0 794 n->m_len = len;
michael@0 795 return n;
michael@0 796 }
michael@0 797
michael@0 798
michael@0 799 /* Taken from sys/kern/uipc_mbuf2.c */
michael@0 800 struct mbuf *
michael@0 801 m_pulldown(struct mbuf *m, int off, int len, int *offp)
michael@0 802 {
michael@0 803 struct mbuf *n, *o;
michael@0 804 int hlen, tlen, olen;
michael@0 805 int writable;
michael@0 806
michael@0 807 /* check invalid arguments. */
michael@0 808 KASSERT(m, ("m == NULL in m_pulldown()"));
michael@0 809 if (len > MCLBYTES) {
michael@0 810 m_freem(m);
michael@0 811 return NULL; /* impossible */
michael@0 812 }
michael@0 813
michael@0 814 #ifdef PULLDOWN_DEBUG
michael@0 815 {
michael@0 816 struct mbuf *t;
michael@0 817 SCTP_DEBUG_USR(SCTP_DEBUG_USR, "before:");
michael@0 818 for (t = m; t; t = t->m_next)
michael@0 819 SCTP_DEBUG_USR(SCTP_DEBUG_USR, " %d", t->m_len);
michael@0 820 SCTP_DEBUG_USR(SCTP_DEBUG_USR, "\n");
michael@0 821 }
michael@0 822 #endif
michael@0 823 n = m;
michael@0 824 while (n != NULL && off > 0) {
michael@0 825 if (n->m_len > off)
michael@0 826 break;
michael@0 827 off -= n->m_len;
michael@0 828 n = n->m_next;
michael@0 829 }
michael@0 830 /* be sure to point non-empty mbuf */
michael@0 831 while (n != NULL && n->m_len == 0)
michael@0 832 n = n->m_next;
michael@0 833 if (!n) {
michael@0 834 m_freem(m);
michael@0 835 return NULL; /* mbuf chain too short */
michael@0 836 }
michael@0 837
michael@0 838 writable = 0;
michael@0 839 if ((n->m_flags & M_EXT) == 0 ||
michael@0 840 (n->m_ext.ext_type == EXT_CLUSTER && M_WRITABLE(n)))
michael@0 841 writable = 1;
michael@0 842
michael@0 843 /*
michael@0 844 * the target data is on <n, off>.
michael@0 845 * if we got enough data on the mbuf "n", we're done.
michael@0 846 */
michael@0 847 if ((off == 0 || offp) && len <= n->m_len - off && writable)
michael@0 848 goto ok;
michael@0 849
michael@0 850 /*
michael@0 851 * when len <= n->m_len - off and off != 0, it is a special case.
michael@0 852 * len bytes from <n, off> sits in single mbuf, but the caller does
michael@0 853 * not like the starting position (off).
michael@0 854 * chop the current mbuf into two pieces, set off to 0.
michael@0 855 */
michael@0 856 if (len <= n->m_len - off) {
michael@0 857 o = m_dup1(n, off, n->m_len - off, M_NOWAIT);
michael@0 858 if (o == NULL) {
michael@0 859 m_freem(m);
michael@0 860 return NULL; /* ENOBUFS */
michael@0 861 }
michael@0 862 n->m_len = off;
michael@0 863 o->m_next = n->m_next;
michael@0 864 n->m_next = o;
michael@0 865 n = n->m_next;
michael@0 866 off = 0;
michael@0 867 goto ok;
michael@0 868 }
michael@0 869 /*
michael@0 870 * we need to take hlen from <n, off> and tlen from <n->m_next, 0>,
michael@0 871 * and construct contiguous mbuf with m_len == len.
michael@0 872 * note that hlen + tlen == len, and tlen > 0.
michael@0 873 */
michael@0 874 hlen = n->m_len - off;
michael@0 875 tlen = len - hlen;
michael@0 876
michael@0 877 /*
michael@0 878 * ensure that we have enough trailing data on mbuf chain.
michael@0 879 * if not, we can do nothing about the chain.
michael@0 880 */
michael@0 881 olen = 0;
michael@0 882 for (o = n->m_next; o != NULL; o = o->m_next)
michael@0 883 olen += o->m_len;
michael@0 884 if (hlen + olen < len) {
michael@0 885 m_freem(m);
michael@0 886 return NULL; /* mbuf chain too short */
michael@0 887 }
michael@0 888
michael@0 889 /*
michael@0 890 * easy cases first.
michael@0 891 * we need to use m_copydata() to get data from <n->m_next, 0>.
michael@0 892 */
michael@0 893 if ((off == 0 || offp) && M_TRAILINGSPACE(n) >= tlen
michael@0 894 && writable) {
michael@0 895 m_copydata(n->m_next, 0, tlen, mtod(n, caddr_t) + n->m_len);
michael@0 896 n->m_len += tlen;
michael@0 897 m_adj(n->m_next, tlen);
michael@0 898 goto ok;
michael@0 899 }
michael@0 900
michael@0 901 if ((off == 0 || offp) && M_LEADINGSPACE(n->m_next) >= hlen
michael@0 902 && writable) {
michael@0 903 n->m_next->m_data -= hlen;
michael@0 904 n->m_next->m_len += hlen;
michael@0 905 bcopy(mtod(n, caddr_t) + off, mtod(n->m_next, caddr_t), hlen);
michael@0 906 n->m_len -= hlen;
michael@0 907 n = n->m_next;
michael@0 908 off = 0;
michael@0 909 goto ok;
michael@0 910 }
michael@0 911
michael@0 912 /*
michael@0 913 * now, we need to do the hard way. don't m_copy as there's no room
michael@0 914 * on both end.
michael@0 915 */
michael@0 916 if (len > MLEN)
michael@0 917 m_clget(o, M_NOWAIT);
michael@0 918 /* o = m_getcl(M_NOWAIT, m->m_type, 0);*/
michael@0 919 else
michael@0 920 o = m_get(M_NOWAIT, m->m_type);
michael@0 921 if (!o) {
michael@0 922 m_freem(m);
michael@0 923 return NULL; /* ENOBUFS */
michael@0 924 }
michael@0 925 /* get hlen from <n, off> into <o, 0> */
michael@0 926 o->m_len = hlen;
michael@0 927 bcopy(mtod(n, caddr_t) + off, mtod(o, caddr_t), hlen);
michael@0 928 n->m_len -= hlen;
michael@0 929 /* get tlen from <n->m_next, 0> into <o, hlen> */
michael@0 930 m_copydata(n->m_next, 0, tlen, mtod(o, caddr_t) + o->m_len);
michael@0 931 o->m_len += tlen;
michael@0 932 m_adj(n->m_next, tlen);
michael@0 933 o->m_next = n->m_next;
michael@0 934 n->m_next = o;
michael@0 935 n = o;
michael@0 936 off = 0;
michael@0 937 ok:
michael@0 938 #ifdef PULLDOWN_DEBUG
michael@0 939 {
michael@0 940 struct mbuf *t;
michael@0 941 SCTP_DEBUG_USR(SCTP_DEBUG_USR, "after:");
michael@0 942 for (t = m; t; t = t->m_next)
michael@0 943 SCTP_DEBUG_USR(SCTP_DEBUG_USR, "%c%d", t == n ? '*' : ' ', t->m_len);
michael@0 944 SCTP_DEBUG_USR(SCTP_DEBUG_USR, " (off=%d)\n", off);
michael@0 945 }
michael@0 946 #endif
michael@0 947 if (offp)
michael@0 948 *offp = off;
michael@0 949 return n;
michael@0 950 }
michael@0 951
michael@0 952 /*
michael@0 953 * Attach the the cluster from *m to *n, set up m_ext in *n
michael@0 954 * and bump the refcount of the cluster.
michael@0 955 */
michael@0 956 static void
michael@0 957 mb_dupcl(struct mbuf *n, struct mbuf *m)
michael@0 958 {
michael@0 959 KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
michael@0 960 KASSERT(m->m_ext.ref_cnt != NULL, ("%s: ref_cnt not set", __func__));
michael@0 961 KASSERT((n->m_flags & M_EXT) == 0, ("%s: M_EXT set", __func__));
michael@0 962
michael@0 963 if (*(m->m_ext.ref_cnt) == 1)
michael@0 964 *(m->m_ext.ref_cnt) += 1;
michael@0 965 else
michael@0 966 atomic_add_int(m->m_ext.ref_cnt, 1);
michael@0 967 n->m_ext.ext_buf = m->m_ext.ext_buf;
michael@0 968 n->m_ext.ext_free = m->m_ext.ext_free;
michael@0 969 n->m_ext.ext_args = m->m_ext.ext_args;
michael@0 970 n->m_ext.ext_size = m->m_ext.ext_size;
michael@0 971 n->m_ext.ref_cnt = m->m_ext.ref_cnt;
michael@0 972 n->m_ext.ext_type = m->m_ext.ext_type;
michael@0 973 n->m_flags |= M_EXT;
michael@0 974 }
michael@0 975
michael@0 976
michael@0 977 /*
michael@0 978 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
michael@0 979 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
michael@0 980 * The wait parameter is a choice of M_TRYWAIT/M_NOWAIT from caller.
michael@0 981 * Note that the copy is read-only, because clusters are not copied,
michael@0 982 * only their reference counts are incremented.
michael@0 983 */
michael@0 984
michael@0 985 struct mbuf *
michael@0 986 m_copym(struct mbuf *m, int off0, int len, int wait)
michael@0 987 {
michael@0 988 struct mbuf *n, **np;
michael@0 989 int off = off0;
michael@0 990 struct mbuf *top;
michael@0 991 int copyhdr = 0;
michael@0 992
michael@0 993 KASSERT(off >= 0, ("m_copym, negative off %d", off));
michael@0 994 KASSERT(len >= 0, ("m_copym, negative len %d", len));
michael@0 995
michael@0 996 if (off == 0 && m->m_flags & M_PKTHDR)
michael@0 997 copyhdr = 1;
michael@0 998 while (off > 0) {
michael@0 999 KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
michael@0 1000 if (off < m->m_len)
michael@0 1001 break;
michael@0 1002 off -= m->m_len;
michael@0 1003 m = m->m_next;
michael@0 1004 }
michael@0 1005 np = &top;
michael@0 1006 top = 0;
michael@0 1007 while (len > 0) {
michael@0 1008 if (m == NULL) {
michael@0 1009 KASSERT(len == M_COPYALL, ("m_copym, length > size of mbuf chain"));
michael@0 1010 break;
michael@0 1011 }
michael@0 1012 if (copyhdr)
michael@0 1013 MGETHDR(n, wait, m->m_type);
michael@0 1014 else
michael@0 1015 MGET(n, wait, m->m_type);
michael@0 1016 *np = n;
michael@0 1017 if (n == NULL)
michael@0 1018 goto nospace;
michael@0 1019 if (copyhdr) {
michael@0 1020 if (!m_dup_pkthdr(n, m, wait))
michael@0 1021 goto nospace;
michael@0 1022 if (len == M_COPYALL)
michael@0 1023 n->m_pkthdr.len -= off0;
michael@0 1024 else
michael@0 1025 n->m_pkthdr.len = len;
michael@0 1026 copyhdr = 0;
michael@0 1027 }
michael@0 1028 n->m_len = min(len, m->m_len - off);
michael@0 1029 if (m->m_flags & M_EXT) {
michael@0 1030 n->m_data = m->m_data + off;
michael@0 1031 mb_dupcl(n, m);
michael@0 1032 } else
michael@0 1033 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
michael@0 1034 (u_int)n->m_len);
michael@0 1035 if (len != M_COPYALL)
michael@0 1036 len -= n->m_len;
michael@0 1037 off = 0;
michael@0 1038 m = m->m_next;
michael@0 1039 np = &n->m_next;
michael@0 1040 }
michael@0 1041 if (top == NULL)
michael@0 1042 mbstat.m_mcfail++; /* XXX: No consistency. */
michael@0 1043
michael@0 1044 return (top);
michael@0 1045 nospace:
michael@0 1046 m_freem(top);
michael@0 1047 mbstat.m_mcfail++; /* XXX: No consistency. */
michael@0 1048 return (NULL);
michael@0 1049 }
michael@0 1050
michael@0 1051
michael@0 1052 int
michael@0 1053 m_tag_copy_chain(struct mbuf *to, struct mbuf *from, int how)
michael@0 1054 {
michael@0 1055 struct m_tag *p, *t, *tprev = NULL;
michael@0 1056
michael@0 1057 KASSERT(to && from, ("m_tag_copy_chain: null argument, to %p from %p", (void *)to, (void *)from));
michael@0 1058 m_tag_delete_chain(to, NULL);
michael@0 1059 SLIST_FOREACH(p, &from->m_pkthdr.tags, m_tag_link) {
michael@0 1060 t = m_tag_copy(p, how);
michael@0 1061 if (t == NULL) {
michael@0 1062 m_tag_delete_chain(to, NULL);
michael@0 1063 return 0;
michael@0 1064 }
michael@0 1065 if (tprev == NULL)
michael@0 1066 SLIST_INSERT_HEAD(&to->m_pkthdr.tags, t, m_tag_link);
michael@0 1067 else
michael@0 1068 SLIST_INSERT_AFTER(tprev, t, m_tag_link);
michael@0 1069 tprev = t;
michael@0 1070 }
michael@0 1071 return 1;
michael@0 1072 }
michael@0 1073
michael@0 1074 /*
michael@0 1075 * Duplicate "from"'s mbuf pkthdr in "to".
michael@0 1076 * "from" must have M_PKTHDR set, and "to" must be empty.
michael@0 1077 * In particular, this does a deep copy of the packet tags.
michael@0 1078 */
michael@0 1079 int
michael@0 1080 m_dup_pkthdr(struct mbuf *to, struct mbuf *from, int how)
michael@0 1081 {
michael@0 1082
michael@0 1083 to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
michael@0 1084 if ((to->m_flags & M_EXT) == 0)
michael@0 1085 to->m_data = to->m_pktdat;
michael@0 1086 to->m_pkthdr = from->m_pkthdr;
michael@0 1087 SLIST_INIT(&to->m_pkthdr.tags);
michael@0 1088 return (m_tag_copy_chain(to, from, MBTOM(how)));
michael@0 1089 }
michael@0 1090
michael@0 1091 /* Copy a single tag. */
michael@0 1092 struct m_tag *
michael@0 1093 m_tag_copy(struct m_tag *t, int how)
michael@0 1094 {
michael@0 1095 struct m_tag *p;
michael@0 1096
michael@0 1097 KASSERT(t, ("m_tag_copy: null tag"));
michael@0 1098 p = m_tag_alloc(t->m_tag_cookie, t->m_tag_id, t->m_tag_len, how);
michael@0 1099 if (p == NULL)
michael@0 1100 return (NULL);
michael@0 1101 bcopy(t + 1, p + 1, t->m_tag_len); /* Copy the data */
michael@0 1102 return p;
michael@0 1103 }
michael@0 1104
michael@0 1105 /* Get a packet tag structure along with specified data following. */
michael@0 1106 struct m_tag *
michael@0 1107 m_tag_alloc(u_int32_t cookie, int type, int len, int wait)
michael@0 1108 {
michael@0 1109 struct m_tag *t;
michael@0 1110
michael@0 1111 if (len < 0)
michael@0 1112 return NULL;
michael@0 1113 t = malloc(len + sizeof(struct m_tag));
michael@0 1114 if (t == NULL)
michael@0 1115 return NULL;
michael@0 1116 m_tag_setup(t, cookie, type, len);
michael@0 1117 t->m_tag_free = m_tag_free_default;
michael@0 1118 return t;
michael@0 1119 }
michael@0 1120
michael@0 1121 /* Free a packet tag. */
michael@0 1122 void
michael@0 1123 m_tag_free_default(struct m_tag *t)
michael@0 1124 {
michael@0 1125 free(t);
michael@0 1126 }
michael@0 1127
michael@0 1128 /*
michael@0 1129 * Copy data from a buffer back into the indicated mbuf chain,
michael@0 1130 * starting "off" bytes from the beginning, extending the mbuf
michael@0 1131 * chain if necessary.
michael@0 1132 */
michael@0 1133 void
michael@0 1134 m_copyback(struct mbuf *m0, int off, int len, caddr_t cp)
michael@0 1135 {
michael@0 1136 int mlen;
michael@0 1137 struct mbuf *m = m0, *n;
michael@0 1138 int totlen = 0;
michael@0 1139
michael@0 1140 if (m0 == NULL)
michael@0 1141 return;
michael@0 1142 while (off > (mlen = m->m_len)) {
michael@0 1143 off -= mlen;
michael@0 1144 totlen += mlen;
michael@0 1145 if (m->m_next == NULL) {
michael@0 1146 n = m_get(M_NOWAIT, m->m_type);
michael@0 1147 if (n == NULL)
michael@0 1148 goto out;
michael@0 1149 bzero(mtod(n, caddr_t), MLEN);
michael@0 1150 n->m_len = min(MLEN, len + off);
michael@0 1151 m->m_next = n;
michael@0 1152 }
michael@0 1153 m = m->m_next;
michael@0 1154 }
michael@0 1155 while (len > 0) {
michael@0 1156 mlen = min (m->m_len - off, len);
michael@0 1157 bcopy(cp, off + mtod(m, caddr_t), (u_int)mlen);
michael@0 1158 cp += mlen;
michael@0 1159 len -= mlen;
michael@0 1160 mlen += off;
michael@0 1161 off = 0;
michael@0 1162 totlen += mlen;
michael@0 1163 if (len == 0)
michael@0 1164 break;
michael@0 1165 if (m->m_next == NULL) {
michael@0 1166 n = m_get(M_NOWAIT, m->m_type);
michael@0 1167 if (n == NULL)
michael@0 1168 break;
michael@0 1169 n->m_len = min(MLEN, len);
michael@0 1170 m->m_next = n;
michael@0 1171 }
michael@0 1172 m = m->m_next;
michael@0 1173 }
michael@0 1174 out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
michael@0 1175 m->m_pkthdr.len = totlen;
michael@0 1176 }
michael@0 1177
michael@0 1178
michael@0 1179 /*
michael@0 1180 * Lesser-used path for M_PREPEND:
michael@0 1181 * allocate new mbuf to prepend to chain,
michael@0 1182 * copy junk along.
michael@0 1183 */
michael@0 1184 struct mbuf *
michael@0 1185 m_prepend(struct mbuf *m, int len, int how)
michael@0 1186 {
michael@0 1187 struct mbuf *mn;
michael@0 1188
michael@0 1189 if (m->m_flags & M_PKTHDR)
michael@0 1190 MGETHDR(mn, how, m->m_type);
michael@0 1191 else
michael@0 1192 MGET(mn, how, m->m_type);
michael@0 1193 if (mn == NULL) {
michael@0 1194 m_freem(m);
michael@0 1195 return (NULL);
michael@0 1196 }
michael@0 1197 if (m->m_flags & M_PKTHDR)
michael@0 1198 M_MOVE_PKTHDR(mn, m);
michael@0 1199 mn->m_next = m;
michael@0 1200 m = mn;
michael@0 1201 if(m->m_flags & M_PKTHDR) {
michael@0 1202 if (len < MHLEN)
michael@0 1203 MH_ALIGN(m, len);
michael@0 1204 } else {
michael@0 1205 if (len < MLEN)
michael@0 1206 M_ALIGN(m, len);
michael@0 1207 }
michael@0 1208 m->m_len = len;
michael@0 1209 return (m);
michael@0 1210 }
michael@0 1211
michael@0 1212 /*
michael@0 1213 * Copy data from an mbuf chain starting "off" bytes from the beginning,
michael@0 1214 * continuing for "len" bytes, into the indicated buffer.
michael@0 1215 */
michael@0 1216 void
michael@0 1217 m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
michael@0 1218 {
michael@0 1219 u_int count;
michael@0 1220
michael@0 1221 KASSERT(off >= 0, ("m_copydata, negative off %d", off));
michael@0 1222 KASSERT(len >= 0, ("m_copydata, negative len %d", len));
michael@0 1223 while (off > 0) {
michael@0 1224 KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
michael@0 1225 if (off < m->m_len)
michael@0 1226 break;
michael@0 1227 off -= m->m_len;
michael@0 1228 m = m->m_next;
michael@0 1229 }
michael@0 1230 while (len > 0) {
michael@0 1231 KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
michael@0 1232 count = min(m->m_len - off, len);
michael@0 1233 bcopy(mtod(m, caddr_t) + off, cp, count);
michael@0 1234 len -= count;
michael@0 1235 cp += count;
michael@0 1236 off = 0;
michael@0 1237 m = m->m_next;
michael@0 1238 }
michael@0 1239 }
michael@0 1240
michael@0 1241
michael@0 1242 /*
michael@0 1243 * Concatenate mbuf chain n to m.
michael@0 1244 * Both chains must be of the same type (e.g. MT_DATA).
michael@0 1245 * Any m_pkthdr is not updated.
michael@0 1246 */
michael@0 1247 void
michael@0 1248 m_cat(struct mbuf *m, struct mbuf *n)
michael@0 1249 {
michael@0 1250 while (m->m_next)
michael@0 1251 m = m->m_next;
michael@0 1252 while (n) {
michael@0 1253 if (m->m_flags & M_EXT ||
michael@0 1254 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
michael@0 1255 /* just join the two chains */
michael@0 1256 m->m_next = n;
michael@0 1257 return;
michael@0 1258 }
michael@0 1259 /* splat the data from one into the other */
michael@0 1260 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, (u_int)n->m_len);
michael@0 1261 m->m_len += n->m_len;
michael@0 1262 n = m_free(n);
michael@0 1263 }
michael@0 1264 }
michael@0 1265
michael@0 1266
michael@0 1267 void
michael@0 1268 m_adj(struct mbuf *mp, int req_len)
michael@0 1269 {
michael@0 1270 int len = req_len;
michael@0 1271 struct mbuf *m;
michael@0 1272 int count;
michael@0 1273
michael@0 1274 if ((m = mp) == NULL)
michael@0 1275 return;
michael@0 1276 if (len >= 0) {
michael@0 1277 /*
michael@0 1278 * Trim from head.
michael@0 1279 */
michael@0 1280 while (m != NULL && len > 0) {
michael@0 1281 if (m->m_len <= len) {
michael@0 1282 len -= m->m_len;
michael@0 1283 m->m_len = 0;
michael@0 1284 m = m->m_next;
michael@0 1285 } else {
michael@0 1286 m->m_len -= len;
michael@0 1287 m->m_data += len;
michael@0 1288 len = 0;
michael@0 1289 }
michael@0 1290 }
michael@0 1291 m = mp;
michael@0 1292 if (mp->m_flags & M_PKTHDR)
michael@0 1293 m->m_pkthdr.len -= (req_len - len);
michael@0 1294 } else {
michael@0 1295 /*
michael@0 1296 * Trim from tail. Scan the mbuf chain,
michael@0 1297 * calculating its length and finding the last mbuf.
michael@0 1298 * If the adjustment only affects this mbuf, then just
michael@0 1299 * adjust and return. Otherwise, rescan and truncate
michael@0 1300 * after the remaining size.
michael@0 1301 */
michael@0 1302 len = -len;
michael@0 1303 count = 0;
michael@0 1304 for (;;) {
michael@0 1305 count += m->m_len;
michael@0 1306 if (m->m_next == (struct mbuf *)0)
michael@0 1307 break;
michael@0 1308 m = m->m_next;
michael@0 1309 }
michael@0 1310 if (m->m_len >= len) {
michael@0 1311 m->m_len -= len;
michael@0 1312 if (mp->m_flags & M_PKTHDR)
michael@0 1313 mp->m_pkthdr.len -= len;
michael@0 1314 return;
michael@0 1315 }
michael@0 1316 count -= len;
michael@0 1317 if (count < 0)
michael@0 1318 count = 0;
michael@0 1319 /*
michael@0 1320 * Correct length for chain is "count".
michael@0 1321 * Find the mbuf with last data, adjust its length,
michael@0 1322 * and toss data from remaining mbufs on chain.
michael@0 1323 */
michael@0 1324 m = mp;
michael@0 1325 if (m->m_flags & M_PKTHDR)
michael@0 1326 m->m_pkthdr.len = count;
michael@0 1327 for (; m; m = m->m_next) {
michael@0 1328 if (m->m_len >= count) {
michael@0 1329 m->m_len = count;
michael@0 1330 if (m->m_next != NULL) {
michael@0 1331 m_freem(m->m_next);
michael@0 1332 m->m_next = NULL;
michael@0 1333 }
michael@0 1334 break;
michael@0 1335 }
michael@0 1336 count -= m->m_len;
michael@0 1337 }
michael@0 1338 }
michael@0 1339 }
michael@0 1340
michael@0 1341
michael@0 1342 /* m_split is used within sctp_handle_cookie_echo. */
michael@0 1343
michael@0 1344 /*
michael@0 1345 * Partition an mbuf chain in two pieces, returning the tail --
michael@0 1346 * all but the first len0 bytes. In case of failure, it returns NULL and
michael@0 1347 * attempts to restore the chain to its original state.
michael@0 1348 *
michael@0 1349 * Note that the resulting mbufs might be read-only, because the new
michael@0 1350 * mbuf can end up sharing an mbuf cluster with the original mbuf if
michael@0 1351 * the "breaking point" happens to lie within a cluster mbuf. Use the
michael@0 1352 * M_WRITABLE() macro to check for this case.
michael@0 1353 */
michael@0 1354 struct mbuf *
michael@0 1355 m_split(struct mbuf *m0, int len0, int wait)
michael@0 1356 {
michael@0 1357 struct mbuf *m, *n;
michael@0 1358 u_int len = len0, remain;
michael@0 1359
michael@0 1360 /* MBUF_CHECKSLEEP(wait); */
michael@0 1361 for (m = m0; m && (int)len > m->m_len; m = m->m_next)
michael@0 1362 len -= m->m_len;
michael@0 1363 if (m == NULL)
michael@0 1364 return (NULL);
michael@0 1365 remain = m->m_len - len;
michael@0 1366 if (m0->m_flags & M_PKTHDR) {
michael@0 1367 MGETHDR(n, wait, m0->m_type);
michael@0 1368 if (n == NULL)
michael@0 1369 return (NULL);
michael@0 1370 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
michael@0 1371 n->m_pkthdr.len = m0->m_pkthdr.len - len0;
michael@0 1372 m0->m_pkthdr.len = len0;
michael@0 1373 if (m->m_flags & M_EXT)
michael@0 1374 goto extpacket;
michael@0 1375 if (remain > MHLEN) {
michael@0 1376 /* m can't be the lead packet */
michael@0 1377 MH_ALIGN(n, 0);
michael@0 1378 n->m_next = m_split(m, len, wait);
michael@0 1379 if (n->m_next == NULL) {
michael@0 1380 (void) m_free(n);
michael@0 1381 return (NULL);
michael@0 1382 } else {
michael@0 1383 n->m_len = 0;
michael@0 1384 return (n);
michael@0 1385 }
michael@0 1386 } else
michael@0 1387 MH_ALIGN(n, remain);
michael@0 1388 } else if (remain == 0) {
michael@0 1389 n = m->m_next;
michael@0 1390 m->m_next = NULL;
michael@0 1391 return (n);
michael@0 1392 } else {
michael@0 1393 MGET(n, wait, m->m_type);
michael@0 1394 if (n == NULL)
michael@0 1395 return (NULL);
michael@0 1396 M_ALIGN(n, remain);
michael@0 1397 }
michael@0 1398 extpacket:
michael@0 1399 if (m->m_flags & M_EXT) {
michael@0 1400 n->m_data = m->m_data + len;
michael@0 1401 mb_dupcl(n, m);
michael@0 1402 } else {
michael@0 1403 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
michael@0 1404 }
michael@0 1405 n->m_len = remain;
michael@0 1406 m->m_len = len;
michael@0 1407 n->m_next = m->m_next;
michael@0 1408 m->m_next = NULL;
michael@0 1409 return (n);
michael@0 1410 }
michael@0 1411
michael@0 1412
michael@0 1413
michael@0 1414
michael@0 1415 int
michael@0 1416 pack_send_buffer(caddr_t buffer, struct mbuf* mb){
michael@0 1417
michael@0 1418 int count_to_copy;
michael@0 1419 int total_count_copied = 0;
michael@0 1420 int offset = 0;
michael@0 1421
michael@0 1422 do {
michael@0 1423 count_to_copy = mb->m_len;
michael@0 1424 bcopy(mtod(mb, caddr_t), buffer+offset, count_to_copy);
michael@0 1425 offset += count_to_copy;
michael@0 1426 total_count_copied += count_to_copy;
michael@0 1427 mb = mb->m_next;
michael@0 1428 } while(mb);
michael@0 1429
michael@0 1430 return (total_count_copied);
michael@0 1431 }

mercurial