Tue, 06 Jan 2015 21:39:09 +0100
Conditionally force memory storage according to privacy.thirdparty.isolate;
This solves Tor bug #9701, complying with disk avoidance documented in
https://www.torproject.org/projects/torbrowser/design/#disk-avoidance.
michael@0 | 1 | commit dfec2c249915560cedd2b49326c6629ad8a0b0f2 |
michael@0 | 2 | Author: Jeff Muizelaar <jmuizelaar@mozilla.com> |
michael@0 | 3 | Date: Tue Mar 2 16:01:41 2010 -0500 |
michael@0 | 4 | |
michael@0 | 5 | add a stash of cairo_t's |
michael@0 | 6 | |
michael@0 | 7 | diff --git a/src/cairo.c b/src/cairo.c |
michael@0 | 8 | index 3c9d892..4b27b83 100644 |
michael@0 | 9 | --- a/src/cairo.c |
michael@0 | 10 | +++ b/src/cairo.c |
michael@0 | 11 | @@ -119,7 +119,63 @@ _cairo_set_error (cairo_t *cr, cairo_status_t status) |
michael@0 | 12 | _cairo_status_set_error (&cr->status, _cairo_error (status)); |
michael@0 | 13 | } |
michael@0 | 14 | |
michael@0 | 15 | -#if HAS_ATOMIC_OPS |
michael@0 | 16 | +#if defined(_MSC_VER) |
michael@0 | 17 | +#pragma intrinsic(_BitScanForward) |
michael@0 | 18 | +static __forceinline int |
michael@0 | 19 | +ffs(int x) |
michael@0 | 20 | +{ |
michael@0 | 21 | + unsigned long i; |
michael@0 | 22 | + |
michael@0 | 23 | + if (_BitScanForward(&i, x) != 0) |
michael@0 | 24 | + return i + 1; |
michael@0 | 25 | + |
michael@0 | 26 | + return 0; |
michael@0 | 27 | +} |
michael@0 | 28 | +#endif |
michael@0 | 29 | + |
michael@0 | 30 | + |
michael@0 | 31 | +#if CAIRO_NO_MUTEX |
michael@0 | 32 | +/* We keep a small stash of contexts to reduce malloc pressure */ |
michael@0 | 33 | +#define CAIRO_STASH_SIZE 4 |
michael@0 | 34 | +static struct { |
michael@0 | 35 | + cairo_t pool[CAIRO_STASH_SIZE]; |
michael@0 | 36 | + int occupied; |
michael@0 | 37 | +} _context_stash; |
michael@0 | 38 | + |
michael@0 | 39 | +static cairo_t * |
michael@0 | 40 | +_context_get (void) |
michael@0 | 41 | +{ |
michael@0 | 42 | + int avail, old, new; |
michael@0 | 43 | + |
michael@0 | 44 | + old = _context_stash.occupied; |
michael@0 | 45 | + avail = ffs (~old) - 1; |
michael@0 | 46 | + if (avail >= CAIRO_STASH_SIZE) |
michael@0 | 47 | + return malloc (sizeof (cairo_t)); |
michael@0 | 48 | + |
michael@0 | 49 | + new = old | (1 << avail); |
michael@0 | 50 | + _context_stash.occupied = new; |
michael@0 | 51 | + |
michael@0 | 52 | + return &_context_stash.pool[avail]; |
michael@0 | 53 | +} |
michael@0 | 54 | + |
michael@0 | 55 | +static void |
michael@0 | 56 | +_context_put (cairo_t *cr) |
michael@0 | 57 | +{ |
michael@0 | 58 | + int old, new, avail; |
michael@0 | 59 | + |
michael@0 | 60 | + if (cr < &_context_stash.pool[0] || |
michael@0 | 61 | + cr >= &_context_stash.pool[CAIRO_STASH_SIZE]) |
michael@0 | 62 | + { |
michael@0 | 63 | + free (cr); |
michael@0 | 64 | + return; |
michael@0 | 65 | + } |
michael@0 | 66 | + |
michael@0 | 67 | + avail = ~(1 << (cr - &_context_stash.pool[0])); |
michael@0 | 68 | + old = _context_stash.occupied; |
michael@0 | 69 | + new = old & avail; |
michael@0 | 70 | + _context_stash.occupied = new; |
michael@0 | 71 | +} |
michael@0 | 72 | +#elif HAS_ATOMIC_OPS |
michael@0 | 73 | /* We keep a small stash of contexts to reduce malloc pressure */ |
michael@0 | 74 | #define CAIRO_STASH_SIZE 4 |
michael@0 | 75 | static struct { |