Tue, 06 Jan 2015 21:39:09 +0100
Conditionally force memory storage according to privacy.thirdparty.isolate;
This solves Tor bug #9701, complying with disk avoidance documented in
https://www.torproject.org/projects/torbrowser/design/#disk-avoidance.
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 /*
8 * Operations for zeroing POD types, arrays, and so on.
9 *
10 * These operations are preferable to memset, memcmp, and the like because they
11 * don't require remembering to multiply by sizeof(T), array lengths, and so on
12 * everywhere.
13 */
15 #ifndef mozilla_PodOperations_h
16 #define mozilla_PodOperations_h
18 #include "mozilla/Array.h"
19 #include "mozilla/ArrayUtils.h"
20 #include "mozilla/Attributes.h"
22 #include <stdint.h>
23 #include <string.h>
25 namespace mozilla {
27 /** Set the contents of |t| to 0. */
28 template<typename T>
29 static MOZ_ALWAYS_INLINE void
30 PodZero(T* t)
31 {
32 memset(t, 0, sizeof(T));
33 }
35 /** Set the contents of |nelem| elements starting at |t| to 0. */
36 template<typename T>
37 static MOZ_ALWAYS_INLINE void
38 PodZero(T* t, size_t nelem)
39 {
40 /*
41 * This function is often called with 'nelem' small; we use an inline loop
42 * instead of calling 'memset' with a non-constant length. The compiler
43 * should inline the memset call with constant size, though.
44 */
45 for (T* end = t + nelem; t < end; t++)
46 memset(t, 0, sizeof(T));
47 }
49 /*
50 * Arrays implicitly convert to pointers to their first element, which is
51 * dangerous when combined with the above PodZero definitions. Adding an
52 * overload for arrays is ambiguous, so we need another identifier. The
53 * ambiguous overload is left to catch mistaken uses of PodZero; if you get a
54 * compile error involving PodZero and array types, use PodArrayZero instead.
55 */
56 template<typename T, size_t N>
57 static void PodZero(T (&t)[N]) MOZ_DELETE;
58 template<typename T, size_t N>
59 static void PodZero(T (&t)[N], size_t nelem) MOZ_DELETE;
61 /** Set the contents of the array |t| to zero. */
62 template <class T, size_t N>
63 static MOZ_ALWAYS_INLINE void
64 PodArrayZero(T (&t)[N])
65 {
66 memset(t, 0, N * sizeof(T));
67 }
69 template <typename T, size_t N>
70 static MOZ_ALWAYS_INLINE void
71 PodArrayZero(Array<T, N>& arr)
72 {
73 memset(&arr[0], 0, N * sizeof(T));
74 }
76 /**
77 * Assign |*src| to |*dst|. The locations must not be the same and must not
78 * overlap.
79 */
80 template<typename T>
81 static MOZ_ALWAYS_INLINE void
82 PodAssign(T* dst, const T* src)
83 {
84 MOZ_ASSERT(dst != src);
85 MOZ_ASSERT_IF(src < dst, PointerRangeSize(src, static_cast<const T*>(dst)) >= 1);
86 MOZ_ASSERT_IF(dst < src, PointerRangeSize(static_cast<const T*>(dst), src) >= 1);
87 memcpy(reinterpret_cast<char*>(dst), reinterpret_cast<const char*>(src), sizeof(T));
88 }
90 /**
91 * Copy |nelem| T elements from |src| to |dst|. The two memory ranges must not
92 * overlap!
93 */
94 template<typename T>
95 static MOZ_ALWAYS_INLINE void
96 PodCopy(T* dst, const T* src, size_t nelem)
97 {
98 MOZ_ASSERT(dst != src);
99 MOZ_ASSERT_IF(src < dst, PointerRangeSize(src, static_cast<const T*>(dst)) >= nelem);
100 MOZ_ASSERT_IF(dst < src, PointerRangeSize(static_cast<const T*>(dst), src) >= nelem);
102 if (nelem < 128) {
103 /*
104 * Avoid using operator= in this loop, as it may have been
105 * intentionally deleted by the POD type.
106 */
107 for (const T* srcend = src + nelem; src < srcend; src++, dst++)
108 PodAssign(dst, src);
109 } else {
110 memcpy(dst, src, nelem * sizeof(T));
111 }
112 }
114 template<typename T>
115 static MOZ_ALWAYS_INLINE void
116 PodCopy(volatile T* dst, const volatile T* src, size_t nelem)
117 {
118 MOZ_ASSERT(dst != src);
119 MOZ_ASSERT_IF(src < dst,
120 PointerRangeSize(src, static_cast<const volatile T*>(dst)) >= nelem);
121 MOZ_ASSERT_IF(dst < src,
122 PointerRangeSize(static_cast<const volatile T*>(dst), src) >= nelem);
124 /*
125 * Volatile |dst| requires extra work, because it's undefined behavior to
126 * modify volatile objects using the mem* functions. Just write out the
127 * loops manually, using operator= rather than memcpy for the same reason,
128 * and let the compiler optimize to the extent it can.
129 */
130 for (const volatile T* srcend = src + nelem; src < srcend; src++, dst++)
131 *dst = *src;
132 }
134 /*
135 * Copy the contents of the array |src| into the array |dst|, both of size N.
136 * The arrays must not overlap!
137 */
138 template <class T, size_t N>
139 static MOZ_ALWAYS_INLINE void
140 PodArrayCopy(T (&dst)[N], const T (&src)[N])
141 {
142 PodCopy(dst, src, N);
143 }
145 /**
146 * Copy the memory for |nelem| T elements from |src| to |dst|. If the two
147 * memory ranges overlap, then the effect is as if the |nelem| elements are
148 * first copied from |src| to a temporary array, and then from the temporary
149 * array to |dst|.
150 */
151 template<typename T>
152 static MOZ_ALWAYS_INLINE void
153 PodMove(T* dst, const T* src, size_t nelem)
154 {
155 MOZ_ASSERT(nelem <= SIZE_MAX / sizeof(T),
156 "trying to move an impossible number of elements");
157 memmove(dst, src, nelem * sizeof(T));
158 }
160 /**
161 * Determine whether the |len| elements at |one| are memory-identical to the
162 * |len| elements at |two|.
163 */
164 template<typename T>
165 static MOZ_ALWAYS_INLINE bool
166 PodEqual(const T* one, const T* two, size_t len)
167 {
168 if (len < 128) {
169 const T* p1end = one + len;
170 const T* p1 = one;
171 const T* p2 = two;
172 for (; p1 < p1end; p1++, p2++) {
173 if (*p1 != *p2)
174 return false;
175 }
176 return true;
177 }
179 return !memcmp(one, two, len * sizeof(T));
180 }
182 } // namespace mozilla
184 #endif /* mozilla_PodOperations_h */