|
1 /* |
|
2 * Copyright © 2007,2008,2009,2010 Red Hat, Inc. |
|
3 * Copyright © 2012 Google, Inc. |
|
4 * |
|
5 * This is part of HarfBuzz, a text shaping library. |
|
6 * |
|
7 * Permission is hereby granted, without written agreement and without |
|
8 * license or royalty fees, to use, copy, modify, and distribute this |
|
9 * software and its documentation for any purpose, provided that the |
|
10 * above copyright notice and the following two paragraphs appear in |
|
11 * all copies of this software. |
|
12 * |
|
13 * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR |
|
14 * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES |
|
15 * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN |
|
16 * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH |
|
17 * DAMAGE. |
|
18 * |
|
19 * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, |
|
20 * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND |
|
21 * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS |
|
22 * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO |
|
23 * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. |
|
24 * |
|
25 * Red Hat Author(s): Behdad Esfahbod |
|
26 * Google Author(s): Behdad Esfahbod |
|
27 */ |
|
28 |
|
29 #ifndef HB_OPEN_TYPE_PRIVATE_HH |
|
30 #define HB_OPEN_TYPE_PRIVATE_HH |
|
31 |
|
32 #include "hb-private.hh" |
|
33 |
|
34 |
|
35 namespace OT { |
|
36 |
|
37 |
|
38 |
|
39 /* |
|
40 * Casts |
|
41 */ |
|
42 |
|
43 /* Cast to struct T, reference to reference */ |
|
44 template<typename Type, typename TObject> |
|
45 inline const Type& CastR(const TObject &X) |
|
46 { return reinterpret_cast<const Type&> (X); } |
|
47 template<typename Type, typename TObject> |
|
48 inline Type& CastR(TObject &X) |
|
49 { return reinterpret_cast<Type&> (X); } |
|
50 |
|
51 /* Cast to struct T, pointer to pointer */ |
|
52 template<typename Type, typename TObject> |
|
53 inline const Type* CastP(const TObject *X) |
|
54 { return reinterpret_cast<const Type*> (X); } |
|
55 template<typename Type, typename TObject> |
|
56 inline Type* CastP(TObject *X) |
|
57 { return reinterpret_cast<Type*> (X); } |
|
58 |
|
59 /* StructAtOffset<T>(P,Ofs) returns the struct T& that is placed at memory |
|
60 * location pointed to by P plus Ofs bytes. */ |
|
61 template<typename Type> |
|
62 inline const Type& StructAtOffset(const void *P, unsigned int offset) |
|
63 { return * reinterpret_cast<const Type*> ((const char *) P + offset); } |
|
64 template<typename Type> |
|
65 inline Type& StructAtOffset(void *P, unsigned int offset) |
|
66 { return * reinterpret_cast<Type*> ((char *) P + offset); } |
|
67 |
|
68 /* StructAfter<T>(X) returns the struct T& that is placed after X. |
|
69 * Works with X of variable size also. X must implement get_size() */ |
|
70 template<typename Type, typename TObject> |
|
71 inline const Type& StructAfter(const TObject &X) |
|
72 { return StructAtOffset<Type>(&X, X.get_size()); } |
|
73 template<typename Type, typename TObject> |
|
74 inline Type& StructAfter(TObject &X) |
|
75 { return StructAtOffset<Type>(&X, X.get_size()); } |
|
76 |
|
77 |
|
78 |
|
79 /* |
|
80 * Size checking |
|
81 */ |
|
82 |
|
83 /* Check _assertion in a method environment */ |
|
84 #define _DEFINE_INSTANCE_ASSERTION1(_line, _assertion) \ |
|
85 inline void _instance_assertion_on_line_##_line (void) const \ |
|
86 { \ |
|
87 ASSERT_STATIC (_assertion); \ |
|
88 ASSERT_INSTANCE_POD (*this); /* Make sure it's POD. */ \ |
|
89 } |
|
90 # define _DEFINE_INSTANCE_ASSERTION0(_line, _assertion) _DEFINE_INSTANCE_ASSERTION1 (_line, _assertion) |
|
91 # define DEFINE_INSTANCE_ASSERTION(_assertion) _DEFINE_INSTANCE_ASSERTION0 (__LINE__, _assertion) |
|
92 |
|
93 /* Check that _code compiles in a method environment */ |
|
94 #define _DEFINE_COMPILES_ASSERTION1(_line, _code) \ |
|
95 inline void _compiles_assertion_on_line_##_line (void) const \ |
|
96 { _code; } |
|
97 # define _DEFINE_COMPILES_ASSERTION0(_line, _code) _DEFINE_COMPILES_ASSERTION1 (_line, _code) |
|
98 # define DEFINE_COMPILES_ASSERTION(_code) _DEFINE_COMPILES_ASSERTION0 (__LINE__, _code) |
|
99 |
|
100 |
|
101 #define DEFINE_SIZE_STATIC(size) \ |
|
102 DEFINE_INSTANCE_ASSERTION (sizeof (*this) == (size)); \ |
|
103 static const unsigned int static_size = (size); \ |
|
104 static const unsigned int min_size = (size) |
|
105 |
|
106 /* Size signifying variable-sized array */ |
|
107 #define VAR 1 |
|
108 |
|
109 #define DEFINE_SIZE_UNION(size, _member) \ |
|
110 DEFINE_INSTANCE_ASSERTION (this->u._member.static_size == (size)); \ |
|
111 static const unsigned int min_size = (size) |
|
112 |
|
113 #define DEFINE_SIZE_MIN(size) \ |
|
114 DEFINE_INSTANCE_ASSERTION (sizeof (*this) >= (size)); \ |
|
115 static const unsigned int min_size = (size) |
|
116 |
|
117 #define DEFINE_SIZE_ARRAY(size, array) \ |
|
118 DEFINE_INSTANCE_ASSERTION (sizeof (*this) == (size) + sizeof (array[0])); \ |
|
119 DEFINE_COMPILES_ASSERTION ((void) array[0].static_size) \ |
|
120 static const unsigned int min_size = (size) |
|
121 |
|
122 #define DEFINE_SIZE_ARRAY2(size, array1, array2) \ |
|
123 DEFINE_INSTANCE_ASSERTION (sizeof (*this) == (size) + sizeof (this->array1[0]) + sizeof (this->array2[0])); \ |
|
124 DEFINE_COMPILES_ASSERTION ((void) array1[0].static_size; (void) array2[0].static_size) \ |
|
125 static const unsigned int min_size = (size) |
|
126 |
|
127 |
|
128 |
|
129 /* |
|
130 * Null objects |
|
131 */ |
|
132 |
|
133 /* Global nul-content Null pool. Enlarge as necessary. */ |
|
134 /* TODO This really should be a extern HB_INTERNAL and defined somewhere... */ |
|
135 static const void *_NullPool[64 / sizeof (void *)]; |
|
136 |
|
137 /* Generic nul-content Null objects. */ |
|
138 template <typename Type> |
|
139 static inline const Type& Null (void) { |
|
140 ASSERT_STATIC (sizeof (Type) <= sizeof (_NullPool)); |
|
141 return *CastP<Type> (_NullPool); |
|
142 } |
|
143 |
|
144 /* Specializaiton for arbitrary-content arbitrary-sized Null objects. */ |
|
145 #define DEFINE_NULL_DATA(Type, data) \ |
|
146 static const char _Null##Type[sizeof (Type) + 1] = data; /* +1 is for nul-termination in data */ \ |
|
147 template <> \ |
|
148 inline const Type& Null<Type> (void) { \ |
|
149 return *CastP<Type> (_Null##Type); \ |
|
150 } /* The following line really exists such that we end in a place needing semicolon */ \ |
|
151 ASSERT_STATIC (Type::min_size + 1 <= sizeof (_Null##Type)) |
|
152 |
|
153 /* Accessor macro. */ |
|
154 #define Null(Type) Null<Type>() |
|
155 |
|
156 |
|
157 |
|
158 /* |
|
159 * Sanitize |
|
160 */ |
|
161 |
|
162 #ifndef HB_DEBUG_SANITIZE |
|
163 #define HB_DEBUG_SANITIZE (HB_DEBUG+0) |
|
164 #endif |
|
165 |
|
166 |
|
167 #define TRACE_SANITIZE(this) \ |
|
168 hb_auto_trace_t<HB_DEBUG_SANITIZE, bool> trace \ |
|
169 (&c->debug_depth, c->get_name (), this, HB_FUNC, \ |
|
170 ""); |
|
171 |
|
172 /* This limits sanitizing time on really broken fonts. */ |
|
173 #ifndef HB_SANITIZE_MAX_EDITS |
|
174 #define HB_SANITIZE_MAX_EDITS 100 |
|
175 #endif |
|
176 |
|
177 struct hb_sanitize_context_t |
|
178 { |
|
179 inline const char *get_name (void) { return "SANITIZE"; } |
|
180 static const unsigned int max_debug_depth = HB_DEBUG_SANITIZE; |
|
181 typedef bool return_t; |
|
182 template <typename T> |
|
183 inline return_t dispatch (const T &obj) { return obj.sanitize (this); } |
|
184 static return_t default_return_value (void) { return true; } |
|
185 bool stop_sublookup_iteration (const return_t r HB_UNUSED) const { return false; } |
|
186 |
|
187 inline void init (hb_blob_t *b) |
|
188 { |
|
189 this->blob = hb_blob_reference (b); |
|
190 this->writable = false; |
|
191 } |
|
192 |
|
193 inline void start_processing (void) |
|
194 { |
|
195 this->start = hb_blob_get_data (this->blob, NULL); |
|
196 this->end = this->start + hb_blob_get_length (this->blob); |
|
197 this->edit_count = 0; |
|
198 this->debug_depth = 0; |
|
199 |
|
200 DEBUG_MSG_LEVEL (SANITIZE, this->blob, 0, +1, |
|
201 "start [%p..%p] (%lu bytes)", |
|
202 this->start, this->end, |
|
203 (unsigned long) (this->end - this->start)); |
|
204 } |
|
205 |
|
206 inline void end_processing (void) |
|
207 { |
|
208 DEBUG_MSG_LEVEL (SANITIZE, this->blob, 0, -1, |
|
209 "end [%p..%p] %u edit requests", |
|
210 this->start, this->end, this->edit_count); |
|
211 |
|
212 hb_blob_destroy (this->blob); |
|
213 this->blob = NULL; |
|
214 this->start = this->end = NULL; |
|
215 } |
|
216 |
|
217 inline bool check_range (const void *base, unsigned int len) const |
|
218 { |
|
219 const char *p = (const char *) base; |
|
220 |
|
221 hb_auto_trace_t<HB_DEBUG_SANITIZE, bool> trace |
|
222 (&this->debug_depth, "SANITIZE", this->blob, NULL, |
|
223 "check_range [%p..%p] (%d bytes) in [%p..%p]", |
|
224 p, p + len, len, |
|
225 this->start, this->end); |
|
226 |
|
227 return TRACE_RETURN (likely (this->start <= p && p <= this->end && (unsigned int) (this->end - p) >= len)); |
|
228 } |
|
229 |
|
230 inline bool check_array (const void *base, unsigned int record_size, unsigned int len) const |
|
231 { |
|
232 const char *p = (const char *) base; |
|
233 bool overflows = _hb_unsigned_int_mul_overflows (len, record_size); |
|
234 |
|
235 hb_auto_trace_t<HB_DEBUG_SANITIZE, bool> trace |
|
236 (&this->debug_depth, "SANITIZE", this->blob, NULL, |
|
237 "check_array [%p..%p] (%d*%d=%ld bytes) in [%p..%p]", |
|
238 p, p + (record_size * len), record_size, len, (unsigned long) record_size * len, |
|
239 this->start, this->end); |
|
240 |
|
241 return TRACE_RETURN (likely (!overflows && this->check_range (base, record_size * len))); |
|
242 } |
|
243 |
|
244 template <typename Type> |
|
245 inline bool check_struct (const Type *obj) const |
|
246 { |
|
247 return likely (this->check_range (obj, obj->min_size)); |
|
248 } |
|
249 |
|
250 inline bool may_edit (const void *base HB_UNUSED, unsigned int len HB_UNUSED) |
|
251 { |
|
252 if (this->edit_count >= HB_SANITIZE_MAX_EDITS) |
|
253 return false; |
|
254 |
|
255 const char *p = (const char *) base; |
|
256 this->edit_count++; |
|
257 |
|
258 hb_auto_trace_t<HB_DEBUG_SANITIZE, bool> trace |
|
259 (&this->debug_depth, "SANITIZE", this->blob, NULL, |
|
260 "may_edit(%u) [%p..%p] (%d bytes) in [%p..%p] -> %s", |
|
261 this->edit_count, |
|
262 p, p + len, len, |
|
263 this->start, this->end, |
|
264 this->writable ? "GRANTED" : "DENIED"); |
|
265 |
|
266 return TRACE_RETURN (this->writable); |
|
267 } |
|
268 |
|
269 mutable unsigned int debug_depth; |
|
270 const char *start, *end; |
|
271 bool writable; |
|
272 unsigned int edit_count; |
|
273 hb_blob_t *blob; |
|
274 }; |
|
275 |
|
276 |
|
277 |
|
278 /* Template to sanitize an object. */ |
|
279 template <typename Type> |
|
280 struct Sanitizer |
|
281 { |
|
282 static hb_blob_t *sanitize (hb_blob_t *blob) { |
|
283 hb_sanitize_context_t c[1] = {{0}}; |
|
284 bool sane; |
|
285 |
|
286 /* TODO is_sane() stuff */ |
|
287 |
|
288 c->init (blob); |
|
289 |
|
290 retry: |
|
291 DEBUG_MSG_FUNC (SANITIZE, blob, "start"); |
|
292 |
|
293 c->start_processing (); |
|
294 |
|
295 if (unlikely (!c->start)) { |
|
296 c->end_processing (); |
|
297 return blob; |
|
298 } |
|
299 |
|
300 Type *t = CastP<Type> (const_cast<char *> (c->start)); |
|
301 |
|
302 sane = t->sanitize (c); |
|
303 if (sane) { |
|
304 if (c->edit_count) { |
|
305 DEBUG_MSG_FUNC (SANITIZE, blob, "passed first round with %d edits; going for second round", c->edit_count); |
|
306 |
|
307 /* sanitize again to ensure no toe-stepping */ |
|
308 c->edit_count = 0; |
|
309 sane = t->sanitize (c); |
|
310 if (c->edit_count) { |
|
311 DEBUG_MSG_FUNC (SANITIZE, blob, "requested %d edits in second round; FAILLING", c->edit_count); |
|
312 sane = false; |
|
313 } |
|
314 } |
|
315 } else { |
|
316 unsigned int edit_count = c->edit_count; |
|
317 if (edit_count && !c->writable) { |
|
318 c->start = hb_blob_get_data_writable (blob, NULL); |
|
319 c->end = c->start + hb_blob_get_length (blob); |
|
320 |
|
321 if (c->start) { |
|
322 c->writable = true; |
|
323 /* ok, we made it writable by relocating. try again */ |
|
324 DEBUG_MSG_FUNC (SANITIZE, blob, "retry"); |
|
325 goto retry; |
|
326 } |
|
327 } |
|
328 } |
|
329 |
|
330 c->end_processing (); |
|
331 |
|
332 DEBUG_MSG_FUNC (SANITIZE, blob, sane ? "PASSED" : "FAILED"); |
|
333 if (sane) |
|
334 return blob; |
|
335 else { |
|
336 hb_blob_destroy (blob); |
|
337 return hb_blob_get_empty (); |
|
338 } |
|
339 } |
|
340 |
|
341 static const Type* lock_instance (hb_blob_t *blob) { |
|
342 hb_blob_make_immutable (blob); |
|
343 const char *base = hb_blob_get_data (blob, NULL); |
|
344 return unlikely (!base) ? &Null(Type) : CastP<Type> (base); |
|
345 } |
|
346 }; |
|
347 |
|
348 |
|
349 |
|
350 /* |
|
351 * Serialize |
|
352 */ |
|
353 |
|
354 #ifndef HB_DEBUG_SERIALIZE |
|
355 #define HB_DEBUG_SERIALIZE (HB_DEBUG+0) |
|
356 #endif |
|
357 |
|
358 |
|
359 #define TRACE_SERIALIZE(this) \ |
|
360 hb_auto_trace_t<HB_DEBUG_SERIALIZE, bool> trace \ |
|
361 (&c->debug_depth, "SERIALIZE", c, HB_FUNC, \ |
|
362 ""); |
|
363 |
|
364 |
|
365 struct hb_serialize_context_t |
|
366 { |
|
367 inline hb_serialize_context_t (void *start, unsigned int size) |
|
368 { |
|
369 this->start = (char *) start; |
|
370 this->end = this->start + size; |
|
371 |
|
372 this->ran_out_of_room = false; |
|
373 this->head = this->start; |
|
374 this->debug_depth = 0; |
|
375 } |
|
376 |
|
377 template <typename Type> |
|
378 inline Type *start_serialize (void) |
|
379 { |
|
380 DEBUG_MSG_LEVEL (SERIALIZE, this->start, 0, +1, |
|
381 "start [%p..%p] (%lu bytes)", |
|
382 this->start, this->end, |
|
383 (unsigned long) (this->end - this->start)); |
|
384 |
|
385 return start_embed<Type> (); |
|
386 } |
|
387 |
|
388 inline void end_serialize (void) |
|
389 { |
|
390 DEBUG_MSG_LEVEL (SERIALIZE, this->start, 0, -1, |
|
391 "end [%p..%p] serialized %d bytes; %s", |
|
392 this->start, this->end, |
|
393 (int) (this->head - this->start), |
|
394 this->ran_out_of_room ? "RAN OUT OF ROOM" : "did not ran out of room"); |
|
395 |
|
396 } |
|
397 |
|
398 template <typename Type> |
|
399 inline Type *copy (void) |
|
400 { |
|
401 assert (!this->ran_out_of_room); |
|
402 unsigned int len = this->head - this->start; |
|
403 void *p = malloc (len); |
|
404 if (p) |
|
405 memcpy (p, this->start, len); |
|
406 return reinterpret_cast<Type *> (p); |
|
407 } |
|
408 |
|
409 template <typename Type> |
|
410 inline Type *allocate_size (unsigned int size) |
|
411 { |
|
412 if (unlikely (this->ran_out_of_room || this->end - this->head < ptrdiff_t (size))) { |
|
413 this->ran_out_of_room = true; |
|
414 return NULL; |
|
415 } |
|
416 memset (this->head, 0, size); |
|
417 char *ret = this->head; |
|
418 this->head += size; |
|
419 return reinterpret_cast<Type *> (ret); |
|
420 } |
|
421 |
|
422 template <typename Type> |
|
423 inline Type *allocate_min (void) |
|
424 { |
|
425 return this->allocate_size<Type> (Type::min_size); |
|
426 } |
|
427 |
|
428 template <typename Type> |
|
429 inline Type *start_embed (void) |
|
430 { |
|
431 Type *ret = reinterpret_cast<Type *> (this->head); |
|
432 return ret; |
|
433 } |
|
434 |
|
435 template <typename Type> |
|
436 inline Type *embed (const Type &obj) |
|
437 { |
|
438 unsigned int size = obj.get_size (); |
|
439 Type *ret = this->allocate_size<Type> (size); |
|
440 if (unlikely (!ret)) return NULL; |
|
441 memcpy (ret, obj, size); |
|
442 return ret; |
|
443 } |
|
444 |
|
445 template <typename Type> |
|
446 inline Type *extend_min (Type &obj) |
|
447 { |
|
448 unsigned int size = obj.min_size; |
|
449 assert (this->start <= (char *) &obj && (char *) &obj <= this->head && (char *) &obj + size >= this->head); |
|
450 if (unlikely (!this->allocate_size<Type> (((char *) &obj) + size - this->head))) return NULL; |
|
451 return reinterpret_cast<Type *> (&obj); |
|
452 } |
|
453 |
|
454 template <typename Type> |
|
455 inline Type *extend (Type &obj) |
|
456 { |
|
457 unsigned int size = obj.get_size (); |
|
458 assert (this->start < (char *) &obj && (char *) &obj <= this->head && (char *) &obj + size >= this->head); |
|
459 if (unlikely (!this->allocate_size<Type> (((char *) &obj) + size - this->head))) return NULL; |
|
460 return reinterpret_cast<Type *> (&obj); |
|
461 } |
|
462 |
|
463 inline void truncate (void *head) |
|
464 { |
|
465 assert (this->start < head && head <= this->head); |
|
466 this->head = (char *) head; |
|
467 } |
|
468 |
|
469 unsigned int debug_depth; |
|
470 char *start, *end, *head; |
|
471 bool ran_out_of_room; |
|
472 }; |
|
473 |
|
474 template <typename Type> |
|
475 struct Supplier |
|
476 { |
|
477 inline Supplier (const Type *array, unsigned int len_) |
|
478 { |
|
479 head = array; |
|
480 len = len_; |
|
481 } |
|
482 inline const Type operator [] (unsigned int i) const |
|
483 { |
|
484 if (unlikely (i >= len)) return Type (); |
|
485 return head[i]; |
|
486 } |
|
487 |
|
488 inline void advance (unsigned int count) |
|
489 { |
|
490 if (unlikely (count > len)) |
|
491 count = len; |
|
492 len -= count; |
|
493 head += count; |
|
494 } |
|
495 |
|
496 private: |
|
497 inline Supplier (const Supplier<Type> &); /* Disallow copy */ |
|
498 inline Supplier<Type>& operator= (const Supplier<Type> &); /* Disallow copy */ |
|
499 |
|
500 unsigned int len; |
|
501 const Type *head; |
|
502 }; |
|
503 |
|
504 |
|
505 |
|
506 |
|
507 /* |
|
508 * |
|
509 * The OpenType Font File: Data Types |
|
510 */ |
|
511 |
|
512 |
|
513 /* "The following data types are used in the OpenType font file. |
|
514 * All OpenType fonts use Motorola-style byte ordering (Big Endian):" */ |
|
515 |
|
516 /* |
|
517 * Int types |
|
518 */ |
|
519 |
|
520 |
|
521 template <typename Type, int Bytes> struct BEInt; |
|
522 |
|
523 template <typename Type> |
|
524 struct BEInt<Type, 2> |
|
525 { |
|
526 public: |
|
527 inline void set (Type i) { hb_be_uint16_put (v,i); } |
|
528 inline operator Type (void) const { return hb_be_uint16_get (v); } |
|
529 inline bool operator == (const BEInt<Type, 2>& o) const { return hb_be_uint16_eq (v, o.v); } |
|
530 inline bool operator != (const BEInt<Type, 2>& o) const { return !(*this == o); } |
|
531 private: uint8_t v[2]; |
|
532 }; |
|
533 template <typename Type> |
|
534 struct BEInt<Type, 4> |
|
535 { |
|
536 public: |
|
537 inline void set (Type i) { hb_be_uint32_put (v,i); } |
|
538 inline operator Type (void) const { return hb_be_uint32_get (v); } |
|
539 inline bool operator == (const BEInt<Type, 4>& o) const { return hb_be_uint32_eq (v, o.v); } |
|
540 inline bool operator != (const BEInt<Type, 4>& o) const { return !(*this == o); } |
|
541 private: uint8_t v[4]; |
|
542 }; |
|
543 template <typename Type> |
|
544 struct BEInt<Type, 3> |
|
545 { |
|
546 public: |
|
547 inline void set (Type i) { hb_be_uint24_put (v,i); } |
|
548 inline operator Type (void) const { return hb_be_uint24_get (v); } |
|
549 inline bool operator == (const BEInt<Type, 3>& o) const { return hb_be_uint24_eq (v, o.v); } |
|
550 inline bool operator != (const BEInt<Type, 3>& o) const { return !(*this == o); } |
|
551 private: uint8_t v[3]; |
|
552 }; |
|
553 |
|
554 /* Integer types in big-endian order and no alignment requirement */ |
|
555 template <typename Type, unsigned int Size> |
|
556 struct IntType |
|
557 { |
|
558 inline void set (Type i) { v.set (i); } |
|
559 inline operator Type(void) const { return v; } |
|
560 inline bool operator == (const IntType<Type,Size> &o) const { return v == o.v; } |
|
561 inline bool operator != (const IntType<Type,Size> &o) const { return v != o.v; } |
|
562 static inline int cmp (const IntType<Type,Size> *a, const IntType<Type,Size> *b) { return b->cmp (*a); } |
|
563 inline int cmp (IntType<Type,Size> va) const { Type a = va; Type b = v; return a < b ? -1 : a == b ? 0 : +1; } |
|
564 inline int cmp (Type a) const { Type b = v; return a < b ? -1 : a == b ? 0 : +1; } |
|
565 inline bool sanitize (hb_sanitize_context_t *c) { |
|
566 TRACE_SANITIZE (this); |
|
567 return TRACE_RETURN (likely (c->check_struct (this))); |
|
568 } |
|
569 protected: |
|
570 BEInt<Type, Size> v; |
|
571 public: |
|
572 DEFINE_SIZE_STATIC (Size); |
|
573 }; |
|
574 |
|
575 typedef IntType<uint16_t, 2> USHORT; /* 16-bit unsigned integer. */ |
|
576 typedef IntType<int16_t, 2> SHORT; /* 16-bit signed integer. */ |
|
577 typedef IntType<uint32_t, 4> ULONG; /* 32-bit unsigned integer. */ |
|
578 typedef IntType<int32_t, 4> LONG; /* 32-bit signed integer. */ |
|
579 typedef IntType<uint32_t, 3> UINT24; /* 24-bit unsigned integer. */ |
|
580 |
|
581 /* 16-bit signed integer (SHORT) that describes a quantity in FUnits. */ |
|
582 typedef SHORT FWORD; |
|
583 |
|
584 /* 16-bit unsigned integer (USHORT) that describes a quantity in FUnits. */ |
|
585 typedef USHORT UFWORD; |
|
586 |
|
587 /* Date represented in number of seconds since 12:00 midnight, January 1, |
|
588 * 1904. The value is represented as a signed 64-bit integer. */ |
|
589 struct LONGDATETIME |
|
590 { |
|
591 inline bool sanitize (hb_sanitize_context_t *c) { |
|
592 TRACE_SANITIZE (this); |
|
593 return TRACE_RETURN (likely (c->check_struct (this))); |
|
594 } |
|
595 protected: |
|
596 LONG major; |
|
597 ULONG minor; |
|
598 public: |
|
599 DEFINE_SIZE_STATIC (8); |
|
600 }; |
|
601 |
|
602 /* Array of four uint8s (length = 32 bits) used to identify a script, language |
|
603 * system, feature, or baseline */ |
|
604 struct Tag : ULONG |
|
605 { |
|
606 /* What the char* converters return is NOT nul-terminated. Print using "%.4s" */ |
|
607 inline operator const char* (void) const { return reinterpret_cast<const char *> (&this->v); } |
|
608 inline operator char* (void) { return reinterpret_cast<char *> (&this->v); } |
|
609 public: |
|
610 DEFINE_SIZE_STATIC (4); |
|
611 }; |
|
612 DEFINE_NULL_DATA (Tag, " "); |
|
613 |
|
614 /* Glyph index number, same as uint16 (length = 16 bits) */ |
|
615 typedef USHORT GlyphID; |
|
616 |
|
617 /* Script/language-system/feature index */ |
|
618 struct Index : USHORT { |
|
619 static const unsigned int NOT_FOUND_INDEX = 0xFFFF; |
|
620 }; |
|
621 DEFINE_NULL_DATA (Index, "\xff\xff"); |
|
622 |
|
623 /* Offset to a table, same as uint16 (length = 16 bits), Null offset = 0x0000 */ |
|
624 struct Offset : USHORT |
|
625 { |
|
626 inline bool is_null (void) const { return 0 == *this; } |
|
627 public: |
|
628 DEFINE_SIZE_STATIC (2); |
|
629 }; |
|
630 |
|
631 /* LongOffset to a table, same as uint32 (length = 32 bits), Null offset = 0x00000000 */ |
|
632 struct LongOffset : ULONG |
|
633 { |
|
634 inline bool is_null (void) const { return 0 == *this; } |
|
635 public: |
|
636 DEFINE_SIZE_STATIC (4); |
|
637 }; |
|
638 |
|
639 |
|
640 /* CheckSum */ |
|
641 struct CheckSum : ULONG |
|
642 { |
|
643 /* This is reference implementation from the spec. */ |
|
644 static inline uint32_t CalcTableChecksum (const ULONG *Table, uint32_t Length) |
|
645 { |
|
646 uint32_t Sum = 0L; |
|
647 const ULONG *EndPtr = Table+((Length+3) & ~3) / ULONG::static_size; |
|
648 |
|
649 while (Table < EndPtr) |
|
650 Sum += *Table++; |
|
651 return Sum; |
|
652 } |
|
653 |
|
654 /* Note: data should be 4byte aligned and have 4byte padding at the end. */ |
|
655 inline void set_for_data (const void *data, unsigned int length) |
|
656 { set (CalcTableChecksum ((const ULONG *) data, length)); } |
|
657 |
|
658 public: |
|
659 DEFINE_SIZE_STATIC (4); |
|
660 }; |
|
661 |
|
662 |
|
663 /* |
|
664 * Version Numbers |
|
665 */ |
|
666 |
|
667 struct FixedVersion |
|
668 { |
|
669 inline uint32_t to_int (void) const { return (major << 16) + minor; } |
|
670 |
|
671 inline bool sanitize (hb_sanitize_context_t *c) { |
|
672 TRACE_SANITIZE (this); |
|
673 return TRACE_RETURN (c->check_struct (this)); |
|
674 } |
|
675 |
|
676 USHORT major; |
|
677 USHORT minor; |
|
678 public: |
|
679 DEFINE_SIZE_STATIC (4); |
|
680 }; |
|
681 |
|
682 |
|
683 |
|
684 /* |
|
685 * Template subclasses of Offset and LongOffset that do the dereferencing. |
|
686 * Use: (base+offset) |
|
687 */ |
|
688 |
|
689 template <typename OffsetType, typename Type> |
|
690 struct GenericOffsetTo : OffsetType |
|
691 { |
|
692 inline const Type& operator () (const void *base) const |
|
693 { |
|
694 unsigned int offset = *this; |
|
695 if (unlikely (!offset)) return Null(Type); |
|
696 return StructAtOffset<Type> (base, offset); |
|
697 } |
|
698 |
|
699 inline Type& serialize (hb_serialize_context_t *c, void *base) |
|
700 { |
|
701 Type *t = c->start_embed<Type> (); |
|
702 this->set ((char *) t - (char *) base); /* TODO(serialize) Overflow? */ |
|
703 return *t; |
|
704 } |
|
705 |
|
706 inline bool sanitize (hb_sanitize_context_t *c, void *base) { |
|
707 TRACE_SANITIZE (this); |
|
708 if (unlikely (!c->check_struct (this))) return TRACE_RETURN (false); |
|
709 unsigned int offset = *this; |
|
710 if (unlikely (!offset)) return TRACE_RETURN (true); |
|
711 Type &obj = StructAtOffset<Type> (base, offset); |
|
712 return TRACE_RETURN (likely (obj.sanitize (c)) || neuter (c)); |
|
713 } |
|
714 template <typename T> |
|
715 inline bool sanitize (hb_sanitize_context_t *c, void *base, T user_data) { |
|
716 TRACE_SANITIZE (this); |
|
717 if (unlikely (!c->check_struct (this))) return TRACE_RETURN (false); |
|
718 unsigned int offset = *this; |
|
719 if (unlikely (!offset)) return TRACE_RETURN (true); |
|
720 Type &obj = StructAtOffset<Type> (base, offset); |
|
721 return TRACE_RETURN (likely (obj.sanitize (c, user_data)) || neuter (c)); |
|
722 } |
|
723 |
|
724 inline bool try_set (hb_sanitize_context_t *c, const OffsetType &v) { |
|
725 if (c->may_edit (this, this->static_size)) { |
|
726 this->set (v); |
|
727 return true; |
|
728 } |
|
729 return false; |
|
730 } |
|
731 /* Set the offset to Null */ |
|
732 inline bool neuter (hb_sanitize_context_t *c) { |
|
733 if (c->may_edit (this, this->static_size)) { |
|
734 this->set (0); /* 0 is Null offset */ |
|
735 return true; |
|
736 } |
|
737 return false; |
|
738 } |
|
739 }; |
|
740 template <typename Base, typename OffsetType, typename Type> |
|
741 inline const Type& operator + (const Base &base, const GenericOffsetTo<OffsetType, Type> &offset) { return offset (base); } |
|
742 template <typename Base, typename OffsetType, typename Type> |
|
743 inline Type& operator + (Base &base, GenericOffsetTo<OffsetType, Type> &offset) { return offset (base); } |
|
744 |
|
745 template <typename Type> |
|
746 struct OffsetTo : GenericOffsetTo<Offset, Type> {}; |
|
747 |
|
748 template <typename Type> |
|
749 struct LongOffsetTo : GenericOffsetTo<LongOffset, Type> {}; |
|
750 |
|
751 |
|
752 /* |
|
753 * Array Types |
|
754 */ |
|
755 |
|
756 template <typename LenType, typename Type> |
|
757 struct GenericArrayOf |
|
758 { |
|
759 const Type *sub_array (unsigned int start_offset, unsigned int *pcount /* IN/OUT */) const |
|
760 { |
|
761 unsigned int count = len; |
|
762 if (unlikely (start_offset > count)) |
|
763 count = 0; |
|
764 else |
|
765 count -= start_offset; |
|
766 count = MIN (count, *pcount); |
|
767 *pcount = count; |
|
768 return array + start_offset; |
|
769 } |
|
770 |
|
771 inline const Type& operator [] (unsigned int i) const |
|
772 { |
|
773 if (unlikely (i >= len)) return Null(Type); |
|
774 return array[i]; |
|
775 } |
|
776 inline Type& operator [] (unsigned int i) |
|
777 { |
|
778 return array[i]; |
|
779 } |
|
780 inline unsigned int get_size (void) const |
|
781 { return len.static_size + len * Type::static_size; } |
|
782 |
|
783 inline bool serialize (hb_serialize_context_t *c, |
|
784 unsigned int items_len) |
|
785 { |
|
786 TRACE_SERIALIZE (this); |
|
787 if (unlikely (!c->extend_min (*this))) return TRACE_RETURN (false); |
|
788 len.set (items_len); /* TODO(serialize) Overflow? */ |
|
789 if (unlikely (!c->extend (*this))) return TRACE_RETURN (false); |
|
790 return TRACE_RETURN (true); |
|
791 } |
|
792 |
|
793 inline bool serialize (hb_serialize_context_t *c, |
|
794 Supplier<Type> &items, |
|
795 unsigned int items_len) |
|
796 { |
|
797 TRACE_SERIALIZE (this); |
|
798 if (unlikely (!serialize (c, items_len))) return TRACE_RETURN (false); |
|
799 for (unsigned int i = 0; i < items_len; i++) |
|
800 array[i] = items[i]; |
|
801 items.advance (items_len); |
|
802 return TRACE_RETURN (true); |
|
803 } |
|
804 |
|
805 inline bool sanitize (hb_sanitize_context_t *c) { |
|
806 TRACE_SANITIZE (this); |
|
807 if (unlikely (!sanitize_shallow (c))) return TRACE_RETURN (false); |
|
808 |
|
809 /* Note: for structs that do not reference other structs, |
|
810 * we do not need to call their sanitize() as we already did |
|
811 * a bound check on the aggregate array size. We just include |
|
812 * a small unreachable expression to make sure the structs |
|
813 * pointed to do have a simple sanitize(), ie. they do not |
|
814 * reference other structs via offsets. |
|
815 */ |
|
816 (void) (false && array[0].sanitize (c)); |
|
817 |
|
818 return TRACE_RETURN (true); |
|
819 } |
|
820 inline bool sanitize (hb_sanitize_context_t *c, void *base) { |
|
821 TRACE_SANITIZE (this); |
|
822 if (unlikely (!sanitize_shallow (c))) return TRACE_RETURN (false); |
|
823 unsigned int count = len; |
|
824 for (unsigned int i = 0; i < count; i++) |
|
825 if (unlikely (!array[i].sanitize (c, base))) |
|
826 return TRACE_RETURN (false); |
|
827 return TRACE_RETURN (true); |
|
828 } |
|
829 template <typename T> |
|
830 inline bool sanitize (hb_sanitize_context_t *c, void *base, T user_data) { |
|
831 TRACE_SANITIZE (this); |
|
832 if (unlikely (!sanitize_shallow (c))) return TRACE_RETURN (false); |
|
833 unsigned int count = len; |
|
834 for (unsigned int i = 0; i < count; i++) |
|
835 if (unlikely (!array[i].sanitize (c, base, user_data))) |
|
836 return TRACE_RETURN (false); |
|
837 return TRACE_RETURN (true); |
|
838 } |
|
839 |
|
840 private: |
|
841 inline bool sanitize_shallow (hb_sanitize_context_t *c) { |
|
842 TRACE_SANITIZE (this); |
|
843 return TRACE_RETURN (c->check_struct (this) && c->check_array (this, Type::static_size, len)); |
|
844 } |
|
845 |
|
846 public: |
|
847 LenType len; |
|
848 Type array[VAR]; |
|
849 public: |
|
850 DEFINE_SIZE_ARRAY (sizeof (LenType), array); |
|
851 }; |
|
852 |
|
853 /* An array with a USHORT number of elements. */ |
|
854 template <typename Type> |
|
855 struct ArrayOf : GenericArrayOf<USHORT, Type> {}; |
|
856 |
|
857 /* An array with a ULONG number of elements. */ |
|
858 template <typename Type> |
|
859 struct LongArrayOf : GenericArrayOf<ULONG, Type> {}; |
|
860 |
|
861 /* Array of Offset's */ |
|
862 template <typename Type> |
|
863 struct OffsetArrayOf : ArrayOf<OffsetTo<Type> > {}; |
|
864 |
|
865 /* Array of LongOffset's */ |
|
866 template <typename Type> |
|
867 struct LongOffsetArrayOf : ArrayOf<LongOffsetTo<Type> > {}; |
|
868 |
|
869 /* LongArray of LongOffset's */ |
|
870 template <typename Type> |
|
871 struct LongOffsetLongArrayOf : LongArrayOf<LongOffsetTo<Type> > {}; |
|
872 |
|
873 /* Array of offsets relative to the beginning of the array itself. */ |
|
874 template <typename Type> |
|
875 struct OffsetListOf : OffsetArrayOf<Type> |
|
876 { |
|
877 inline const Type& operator [] (unsigned int i) const |
|
878 { |
|
879 if (unlikely (i >= this->len)) return Null(Type); |
|
880 return this+this->array[i]; |
|
881 } |
|
882 |
|
883 inline bool sanitize (hb_sanitize_context_t *c) { |
|
884 TRACE_SANITIZE (this); |
|
885 return TRACE_RETURN (OffsetArrayOf<Type>::sanitize (c, this)); |
|
886 } |
|
887 template <typename T> |
|
888 inline bool sanitize (hb_sanitize_context_t *c, T user_data) { |
|
889 TRACE_SANITIZE (this); |
|
890 return TRACE_RETURN (OffsetArrayOf<Type>::sanitize (c, this, user_data)); |
|
891 } |
|
892 }; |
|
893 |
|
894 |
|
895 /* An array with a USHORT number of elements, |
|
896 * starting at second element. */ |
|
897 template <typename Type> |
|
898 struct HeadlessArrayOf |
|
899 { |
|
900 inline const Type& operator [] (unsigned int i) const |
|
901 { |
|
902 if (unlikely (i >= len || !i)) return Null(Type); |
|
903 return array[i-1]; |
|
904 } |
|
905 inline unsigned int get_size (void) const |
|
906 { return len.static_size + (len ? len - 1 : 0) * Type::static_size; } |
|
907 |
|
908 inline bool serialize (hb_serialize_context_t *c, |
|
909 Supplier<Type> &items, |
|
910 unsigned int items_len) |
|
911 { |
|
912 TRACE_SERIALIZE (this); |
|
913 if (unlikely (!c->extend_min (*this))) return TRACE_RETURN (false); |
|
914 len.set (items_len); /* TODO(serialize) Overflow? */ |
|
915 if (unlikely (!items_len)) return TRACE_RETURN (true); |
|
916 if (unlikely (!c->extend (*this))) return TRACE_RETURN (false); |
|
917 for (unsigned int i = 0; i < items_len - 1; i++) |
|
918 array[i] = items[i]; |
|
919 items.advance (items_len - 1); |
|
920 return TRACE_RETURN (true); |
|
921 } |
|
922 |
|
923 inline bool sanitize_shallow (hb_sanitize_context_t *c) { |
|
924 return c->check_struct (this) |
|
925 && c->check_array (this, Type::static_size, len); |
|
926 } |
|
927 |
|
928 inline bool sanitize (hb_sanitize_context_t *c) { |
|
929 TRACE_SANITIZE (this); |
|
930 if (unlikely (!sanitize_shallow (c))) return TRACE_RETURN (false); |
|
931 |
|
932 /* Note: for structs that do not reference other structs, |
|
933 * we do not need to call their sanitize() as we already did |
|
934 * a bound check on the aggregate array size. We just include |
|
935 * a small unreachable expression to make sure the structs |
|
936 * pointed to do have a simple sanitize(), ie. they do not |
|
937 * reference other structs via offsets. |
|
938 */ |
|
939 (void) (false && array[0].sanitize (c)); |
|
940 |
|
941 return TRACE_RETURN (true); |
|
942 } |
|
943 |
|
944 USHORT len; |
|
945 Type array[VAR]; |
|
946 public: |
|
947 DEFINE_SIZE_ARRAY (sizeof (USHORT), array); |
|
948 }; |
|
949 |
|
950 |
|
951 /* An array with sorted elements. Supports binary searching. */ |
|
952 template <typename Type> |
|
953 struct SortedArrayOf : ArrayOf<Type> { |
|
954 |
|
955 template <typename SearchType> |
|
956 inline int search (const SearchType &x) const |
|
957 { |
|
958 /* Hand-coded bsearch here since this is in the hot inner loop. */ |
|
959 int min = 0, max = (int) this->len - 1; |
|
960 while (min <= max) |
|
961 { |
|
962 int mid = (min + max) / 2; |
|
963 int c = this->array[mid].cmp (x); |
|
964 if (c < 0) |
|
965 max = mid - 1; |
|
966 else if (c > 0) |
|
967 min = mid + 1; |
|
968 else |
|
969 return mid; |
|
970 } |
|
971 return -1; |
|
972 } |
|
973 }; |
|
974 |
|
975 |
|
976 } /* namespace OT */ |
|
977 |
|
978 |
|
979 #endif /* HB_OPEN_TYPE_PRIVATE_HH */ |