gfx/harfbuzz/src/hb-ot-map-private.hh

Fri, 16 Jan 2015 18:13:44 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Fri, 16 Jan 2015 18:13:44 +0100
branch
TOR_BUG_9701
changeset 14
925c144e1f1f
permissions
-rw-r--r--

Integrate suggestion from review to improve consistency with existing code.

michael@0 1 /*
michael@0 2 * Copyright © 2009,2010 Red Hat, Inc.
michael@0 3 * Copyright © 2010,2011,2012,2013 Google, Inc.
michael@0 4 *
michael@0 5 * This is part of HarfBuzz, a text shaping library.
michael@0 6 *
michael@0 7 * Permission is hereby granted, without written agreement and without
michael@0 8 * license or royalty fees, to use, copy, modify, and distribute this
michael@0 9 * software and its documentation for any purpose, provided that the
michael@0 10 * above copyright notice and the following two paragraphs appear in
michael@0 11 * all copies of this software.
michael@0 12 *
michael@0 13 * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
michael@0 14 * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
michael@0 15 * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
michael@0 16 * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
michael@0 17 * DAMAGE.
michael@0 18 *
michael@0 19 * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
michael@0 20 * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
michael@0 21 * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
michael@0 22 * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
michael@0 23 * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
michael@0 24 *
michael@0 25 * Red Hat Author(s): Behdad Esfahbod
michael@0 26 * Google Author(s): Behdad Esfahbod
michael@0 27 */
michael@0 28
michael@0 29 #ifndef HB_OT_MAP_PRIVATE_HH
michael@0 30 #define HB_OT_MAP_PRIVATE_HH
michael@0 31
michael@0 32 #include "hb-buffer-private.hh"
michael@0 33
michael@0 34
michael@0 35 struct hb_ot_shape_plan_t;
michael@0 36
michael@0 37 static const hb_tag_t table_tags[2] = {HB_OT_TAG_GSUB, HB_OT_TAG_GPOS};
michael@0 38
michael@0 39 struct hb_ot_map_t
michael@0 40 {
michael@0 41 friend struct hb_ot_map_builder_t;
michael@0 42
michael@0 43 public:
michael@0 44
michael@0 45 struct feature_map_t {
michael@0 46 hb_tag_t tag; /* should be first for our bsearch to work */
michael@0 47 unsigned int index[2]; /* GSUB/GPOS */
michael@0 48 unsigned int stage[2]; /* GSUB/GPOS */
michael@0 49 unsigned int shift;
michael@0 50 hb_mask_t mask;
michael@0 51 hb_mask_t _1_mask; /* mask for value=1, for quick access */
michael@0 52 unsigned int needs_fallback : 1;
michael@0 53 unsigned int auto_zwj : 1;
michael@0 54
michael@0 55 static int cmp (const feature_map_t *a, const feature_map_t *b)
michael@0 56 { return a->tag < b->tag ? -1 : a->tag > b->tag ? 1 : 0; }
michael@0 57 };
michael@0 58
michael@0 59 struct lookup_map_t {
michael@0 60 unsigned short index;
michael@0 61 unsigned short auto_zwj : 1;
michael@0 62 hb_mask_t mask;
michael@0 63
michael@0 64 static int cmp (const lookup_map_t *a, const lookup_map_t *b)
michael@0 65 { return a->index < b->index ? -1 : a->index > b->index ? 1 : 0; }
michael@0 66 };
michael@0 67
michael@0 68 typedef void (*pause_func_t) (const struct hb_ot_shape_plan_t *plan, hb_font_t *font, hb_buffer_t *buffer);
michael@0 69
michael@0 70 struct stage_map_t {
michael@0 71 unsigned int last_lookup; /* Cumulative */
michael@0 72 pause_func_t pause_func;
michael@0 73 };
michael@0 74
michael@0 75
michael@0 76 hb_ot_map_t (void) { memset (this, 0, sizeof (*this)); }
michael@0 77
michael@0 78 inline hb_mask_t get_global_mask (void) const { return global_mask; }
michael@0 79
michael@0 80 inline hb_mask_t get_mask (hb_tag_t feature_tag, unsigned int *shift = NULL) const {
michael@0 81 const feature_map_t *map = features.bsearch (&feature_tag);
michael@0 82 if (shift) *shift = map ? map->shift : 0;
michael@0 83 return map ? map->mask : 0;
michael@0 84 }
michael@0 85
michael@0 86 inline bool needs_fallback (hb_tag_t feature_tag) const {
michael@0 87 const feature_map_t *map = features.bsearch (&feature_tag);
michael@0 88 return map ? map->needs_fallback : false;
michael@0 89 }
michael@0 90
michael@0 91 inline hb_mask_t get_1_mask (hb_tag_t feature_tag) const {
michael@0 92 const feature_map_t *map = features.bsearch (&feature_tag);
michael@0 93 return map ? map->_1_mask : 0;
michael@0 94 }
michael@0 95
michael@0 96 inline unsigned int get_feature_index (unsigned int table_index, hb_tag_t feature_tag) const {
michael@0 97 const feature_map_t *map = features.bsearch (&feature_tag);
michael@0 98 return map ? map->index[table_index] : HB_OT_LAYOUT_NO_FEATURE_INDEX;
michael@0 99 }
michael@0 100
michael@0 101 inline unsigned int get_feature_stage (unsigned int table_index, hb_tag_t feature_tag) const {
michael@0 102 const feature_map_t *map = features.bsearch (&feature_tag);
michael@0 103 return map ? map->stage[table_index] : (unsigned int) -1;
michael@0 104 }
michael@0 105
michael@0 106 inline void get_stage_lookups (unsigned int table_index, unsigned int stage,
michael@0 107 const struct lookup_map_t **plookups, unsigned int *lookup_count) const {
michael@0 108 if (unlikely (stage == (unsigned int) -1)) {
michael@0 109 *plookups = NULL;
michael@0 110 *lookup_count = 0;
michael@0 111 return;
michael@0 112 }
michael@0 113 assert (stage <= stages[table_index].len);
michael@0 114 unsigned int start = stage ? stages[table_index][stage - 1].last_lookup : 0;
michael@0 115 unsigned int end = stage < stages[table_index].len ? stages[table_index][stage].last_lookup : lookups[table_index].len;
michael@0 116 *plookups = &lookups[table_index][start];
michael@0 117 *lookup_count = end - start;
michael@0 118 }
michael@0 119
michael@0 120 HB_INTERNAL void collect_lookups (unsigned int table_index, hb_set_t *lookups) const;
michael@0 121 template <typename Proxy>
michael@0 122 HB_INTERNAL inline void apply (const Proxy &proxy,
michael@0 123 const struct hb_ot_shape_plan_t *plan, hb_font_t *font, hb_buffer_t *buffer) const;
michael@0 124 HB_INTERNAL void substitute (const struct hb_ot_shape_plan_t *plan, hb_font_t *font, hb_buffer_t *buffer) const;
michael@0 125 HB_INTERNAL void position (const struct hb_ot_shape_plan_t *plan, hb_font_t *font, hb_buffer_t *buffer) const;
michael@0 126
michael@0 127 inline void finish (void) {
michael@0 128 features.finish ();
michael@0 129 for (unsigned int table_index = 0; table_index < 2; table_index++)
michael@0 130 {
michael@0 131 lookups[table_index].finish ();
michael@0 132 stages[table_index].finish ();
michael@0 133 }
michael@0 134 }
michael@0 135
michael@0 136 public:
michael@0 137 hb_tag_t chosen_script[2];
michael@0 138 bool found_script[2];
michael@0 139
michael@0 140 private:
michael@0 141
michael@0 142 HB_INTERNAL void add_lookups (hb_face_t *face,
michael@0 143 unsigned int table_index,
michael@0 144 unsigned int feature_index,
michael@0 145 hb_mask_t mask,
michael@0 146 bool auto_zwj);
michael@0 147
michael@0 148 hb_mask_t global_mask;
michael@0 149
michael@0 150 hb_prealloced_array_t<feature_map_t, 8> features;
michael@0 151 hb_prealloced_array_t<lookup_map_t, 32> lookups[2]; /* GSUB/GPOS */
michael@0 152 hb_prealloced_array_t<stage_map_t, 4> stages[2]; /* GSUB/GPOS */
michael@0 153 };
michael@0 154
michael@0 155 enum hb_ot_map_feature_flags_t {
michael@0 156 F_NONE = 0x0000,
michael@0 157 F_GLOBAL = 0x0001,
michael@0 158 F_HAS_FALLBACK = 0x0002,
michael@0 159 F_MANUAL_ZWJ = 0x0004
michael@0 160 };
michael@0 161 /* Macro version for where const is desired. */
michael@0 162 #define F_COMBINE(l,r) (hb_ot_map_feature_flags_t ((unsigned int) (l) | (unsigned int) (r)))
michael@0 163 inline hb_ot_map_feature_flags_t
michael@0 164 operator | (hb_ot_map_feature_flags_t l, hb_ot_map_feature_flags_t r)
michael@0 165 { return hb_ot_map_feature_flags_t ((unsigned int) l | (unsigned int) r); }
michael@0 166 inline hb_ot_map_feature_flags_t
michael@0 167 operator & (hb_ot_map_feature_flags_t l, hb_ot_map_feature_flags_t r)
michael@0 168 { return hb_ot_map_feature_flags_t ((unsigned int) l & (unsigned int) r); }
michael@0 169 inline hb_ot_map_feature_flags_t
michael@0 170 operator ~ (hb_ot_map_feature_flags_t r)
michael@0 171 { return hb_ot_map_feature_flags_t (~(unsigned int) r); }
michael@0 172 inline hb_ot_map_feature_flags_t&
michael@0 173 operator |= (hb_ot_map_feature_flags_t &l, hb_ot_map_feature_flags_t r)
michael@0 174 { l = l | r; return l; }
michael@0 175 inline hb_ot_map_feature_flags_t&
michael@0 176 operator &= (hb_ot_map_feature_flags_t& l, hb_ot_map_feature_flags_t r)
michael@0 177 { l = l & r; return l; }
michael@0 178
michael@0 179
michael@0 180 struct hb_ot_map_builder_t
michael@0 181 {
michael@0 182 public:
michael@0 183
michael@0 184 HB_INTERNAL hb_ot_map_builder_t (hb_face_t *face_,
michael@0 185 const hb_segment_properties_t *props_);
michael@0 186
michael@0 187 HB_INTERNAL void add_feature (hb_tag_t tag, unsigned int value,
michael@0 188 hb_ot_map_feature_flags_t flags);
michael@0 189
michael@0 190 inline void add_global_bool_feature (hb_tag_t tag)
michael@0 191 { add_feature (tag, 1, F_GLOBAL); }
michael@0 192
michael@0 193 inline void add_gsub_pause (hb_ot_map_t::pause_func_t pause_func)
michael@0 194 { add_pause (0, pause_func); }
michael@0 195 inline void add_gpos_pause (hb_ot_map_t::pause_func_t pause_func)
michael@0 196 { add_pause (1, pause_func); }
michael@0 197
michael@0 198 HB_INTERNAL void compile (struct hb_ot_map_t &m);
michael@0 199
michael@0 200 inline void finish (void) {
michael@0 201 feature_infos.finish ();
michael@0 202 for (unsigned int table_index = 0; table_index < 2; table_index++)
michael@0 203 {
michael@0 204 stages[table_index].finish ();
michael@0 205 }
michael@0 206 }
michael@0 207
michael@0 208 private:
michael@0 209
michael@0 210 struct feature_info_t {
michael@0 211 hb_tag_t tag;
michael@0 212 unsigned int seq; /* sequence#, used for stable sorting only */
michael@0 213 unsigned int max_value;
michael@0 214 hb_ot_map_feature_flags_t flags;
michael@0 215 unsigned int default_value; /* for non-global features, what should the unset glyphs take */
michael@0 216 unsigned int stage[2]; /* GSUB/GPOS */
michael@0 217
michael@0 218 static int cmp (const feature_info_t *a, const feature_info_t *b)
michael@0 219 { return (a->tag != b->tag) ? (a->tag < b->tag ? -1 : 1) : (a->seq < b->seq ? -1 : 1); }
michael@0 220 };
michael@0 221
michael@0 222 struct stage_info_t {
michael@0 223 unsigned int index;
michael@0 224 hb_ot_map_t::pause_func_t pause_func;
michael@0 225 };
michael@0 226
michael@0 227 HB_INTERNAL void add_pause (unsigned int table_index, hb_ot_map_t::pause_func_t pause_func);
michael@0 228
michael@0 229 public:
michael@0 230
michael@0 231 hb_face_t *face;
michael@0 232 hb_segment_properties_t props;
michael@0 233
michael@0 234 hb_tag_t chosen_script[2];
michael@0 235 bool found_script[2];
michael@0 236 unsigned int script_index[2], language_index[2];
michael@0 237
michael@0 238 private:
michael@0 239
michael@0 240 unsigned int current_stage[2]; /* GSUB/GPOS */
michael@0 241 hb_prealloced_array_t<feature_info_t, 32> feature_infos;
michael@0 242 hb_prealloced_array_t<stage_info_t, 8> stages[2]; /* GSUB/GPOS */
michael@0 243 };
michael@0 244
michael@0 245
michael@0 246
michael@0 247 #endif /* HB_OT_MAP_PRIVATE_HH */

mercurial