1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/testing/xpcshell/node-http2/node_modules/http2-protocol/lib/compressor.js Wed Dec 31 06:09:35 2014 +0100 1.3 @@ -0,0 +1,1452 @@ 1.4 +// The implementation of the [HTTP/2 Header Compression][http2-compression] spec is separated from 1.5 +// the 'integration' part which handles HEADERS and PUSH_PROMISE frames. The compression itself is 1.6 +// implemented in the first part of the file, and consists of three classes: `HeaderTable`, 1.7 +// `HeaderSetDecompressor` and `HeaderSetCompressor`. The two latter classes are 1.8 +// [Transform Stream][node-transform] subclasses that operate in [object mode][node-objectmode]. 1.9 +// These transform chunks of binary data into `[name, value]` pairs and vice versa, and store their 1.10 +// state in `HeaderTable` instances. 1.11 +// 1.12 +// The 'integration' part is also implemented by two [Transform Stream][node-transform] subclasses 1.13 +// that operate in [object mode][node-objectmode]: the `Compressor` and the `Decompressor`. These 1.14 +// provide a layer between the [framer](framer.html) and the 1.15 +// [connection handling component](connection.html). 1.16 +// 1.17 +// [node-transform]: http://nodejs.org/api/stream.html#stream_class_stream_transform 1.18 +// [node-objectmode]: http://nodejs.org/api/stream.html#stream_new_stream_readable_options 1.19 +// [http2-compression]: http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-05 1.20 + 1.21 +exports.HeaderTable = HeaderTable; 1.22 +exports.HuffmanTable = HuffmanTable; 1.23 +exports.HeaderSetCompressor = HeaderSetCompressor; 1.24 +exports.HeaderSetDecompressor = HeaderSetDecompressor; 1.25 +exports.Compressor = Compressor; 1.26 +exports.Decompressor = Decompressor; 1.27 + 1.28 +var TransformStream = require('stream').Transform; 1.29 +var assert = require('assert'); 1.30 +var util = require('util'); 1.31 + 1.32 +// Header compression 1.33 +// ================== 1.34 + 1.35 +// The HeaderTable class 1.36 +// --------------------- 1.37 + 1.38 +// The [Header Table] is a component used to associate headers to index values. It is basically an 1.39 +// ordered list of `[name, value]` pairs, so it's implemented as a subclass of `Array`. 1.40 +// In this implementation, the Header Table and the [Static Table] are handled as a single table. 1.41 +// [Header Table]: http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-05#section-3.1.2 1.42 +// [Static Table]: http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-05#appendix-B 1.43 +function HeaderTable(log, limit) { 1.44 + var self = HeaderTable.staticTable.map(entryFromPair); 1.45 + self._log = log; 1.46 + self._limit = limit || DEFAULT_HEADER_TABLE_LIMIT; 1.47 + self._staticLength = self.length; 1.48 + self._size = 0; 1.49 + self._enforceLimit = HeaderTable.prototype._enforceLimit; 1.50 + self.add = HeaderTable.prototype.add; 1.51 + self.setSizeLimit = HeaderTable.prototype.setSizeLimit; 1.52 + return self; 1.53 +} 1.54 + 1.55 +// There are few more sets that are needed for the compression/decompression process that are all 1.56 +// subsets of the Header Table, and are implemented as flags on header table entries: 1.57 +// 1.58 +// * [Reference Set][referenceset]: contains a group of headers used as a reference for the 1.59 +// differential encoding of a new set of headers. (`reference` flag) 1.60 +// * Emitted headers: the headers that are already emitted as part of the current decompression 1.61 +// process (not part of the spec, `emitted` flag) 1.62 +// * Headers to be kept: headers that should not be removed as the last step of the encoding process 1.63 +// (not part of the spec, `keep` flag) 1.64 +// 1.65 +// [referenceset]: http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-05#section-3.1.3 1.66 +// 1.67 +// Relations of the sets: 1.68 +// 1.69 +// ,----------------------------------. 1.70 +// | Header Table | 1.71 +// | | 1.72 +// | ,----------------------------. | 1.73 +// | | Reference Set | | 1.74 +// | | | | 1.75 +// | | ,---------. ,---------. | | 1.76 +// | | | Keep | | Emitted | | | 1.77 +// | | | | | | | | 1.78 +// | | `---------' `---------' | | 1.79 +// | `----------------------------' | 1.80 +// `----------------------------------' 1.81 +function entryFromPair(pair) { 1.82 + var entry = pair.slice(); 1.83 + entry.reference = false; 1.84 + entry.emitted = false; 1.85 + entry.keep = false; 1.86 + entry._size = size(entry); 1.87 + return entry; 1.88 +} 1.89 + 1.90 +// The encoder decides how to update the header table and as such can control how much memory is 1.91 +// used by the header table. To limit the memory requirements on the decoder side, the header table 1.92 +// size is bounded. 1.93 +// 1.94 +// * The default header table size limit is 4096 bytes. 1.95 +// * The size of an entry is defined as follows: the size of an entry is the sum of its name's 1.96 +// length in bytes, of its value's length in bytes and of 32 bytes. 1.97 +// * The size of a header table is the sum of the size of its entries. 1.98 +var DEFAULT_HEADER_TABLE_LIMIT = 4096; 1.99 + 1.100 +function size(entry) { 1.101 + return (new Buffer(entry[0] + entry[1], 'utf8')).length + 32; 1.102 +} 1.103 + 1.104 +// The `add(index, entry)` can be used to [manage the header table][tablemgmt]: 1.105 +// [tablemgmt]: http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-05#section-3.3 1.106 +// 1.107 +// * it pushes the new `entry` at the beggining of the table 1.108 +// * before doing such a modification, it has to be ensured that the header table size will stay 1.109 +// lower than or equal to the header table size limit. To achieve this, entries are evicted from 1.110 +// the end of the header table until the size of the header table is less than or equal to 1.111 +// `(this._limit - entry.size)`, or until the table is empty. 1.112 +// 1.113 +// <---------- Index Address Space ----------> 1.114 +// <-- Header Table --> <-- Static Table --> 1.115 +// +---+-----------+---+ +---+-----------+---+ 1.116 +// | 0 | ... | k | |k+1| ... | n | 1.117 +// +---+-----------+---+ +---+-----------+---+ 1.118 +// ^ | 1.119 +// | V 1.120 +// Insertion Point Drop Point 1.121 + 1.122 +HeaderTable.prototype._enforceLimit = function _enforceLimit(limit) { 1.123 + var droppedEntries = []; 1.124 + var dropPoint = this.length - this._staticLength; 1.125 + while ((this._size > limit) && (dropPoint > 0)) { 1.126 + dropPoint -= 1; 1.127 + var dropped = this.splice(dropPoint, 1)[0]; 1.128 + this._size -= dropped._size; 1.129 + droppedEntries[dropPoint] = dropped; 1.130 + } 1.131 + return droppedEntries; 1.132 +}; 1.133 + 1.134 +HeaderTable.prototype.add = function(entry) { 1.135 + var limit = this._limit - entry._size; 1.136 + var droppedEntries = this._enforceLimit(limit); 1.137 + 1.138 + if (this._size <= limit) { 1.139 + this.unshift(entry); 1.140 + this._size += entry._size; 1.141 + } 1.142 + 1.143 + return droppedEntries; 1.144 +}; 1.145 + 1.146 +// The table size limit can be changed externally. In this case, the same eviction algorithm is used 1.147 +HeaderTable.prototype.setSizeLimit = function setSizeLimit(limit) { 1.148 + this._limit = limit; 1.149 + this._enforceLimit(this._limit); 1.150 +}; 1.151 + 1.152 +// [The Static Table](http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-05#appendix-B) 1.153 +// ------------------ 1.154 +// [statictable]:http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-05#appendix-B 1.155 + 1.156 +// The table is generated with feeding the table from the spec to the following sed command: 1.157 +// 1.158 +// sed -re "s/\s*\| [0-9]+\s*\| ([^ ]*)/ [ '\1'/g" -e "s/\|\s([^ ]*)/, '\1'/g" -e 's/ \|/],/g' 1.159 + 1.160 +HeaderTable.staticTable = [ 1.161 + [ ':authority' , '' ], 1.162 + [ ':method' , 'GET' ], 1.163 + [ ':method' , 'POST' ], 1.164 + [ ':path' , '/' ], 1.165 + [ ':path' , '/index.html' ], 1.166 + [ ':scheme' , 'http' ], 1.167 + [ ':scheme' , 'https' ], 1.168 + [ ':status' , '200' ], 1.169 + [ ':status' , '500' ], 1.170 + [ ':status' , '404' ], 1.171 + [ ':status' , '403' ], 1.172 + [ ':status' , '400' ], 1.173 + [ ':status' , '401' ], 1.174 + [ 'accept-charset' , '' ], 1.175 + [ 'accept-encoding' , '' ], 1.176 + [ 'accept-language' , '' ], 1.177 + [ 'accept-ranges' , '' ], 1.178 + [ 'accept' , '' ], 1.179 + [ 'access-control-allow-origin' , '' ], 1.180 + [ 'age' , '' ], 1.181 + [ 'allow' , '' ], 1.182 + [ 'authorization' , '' ], 1.183 + [ 'cache-control' , '' ], 1.184 + [ 'content-disposition' , '' ], 1.185 + [ 'content-encoding' , '' ], 1.186 + [ 'content-language' , '' ], 1.187 + [ 'content-length' , '' ], 1.188 + [ 'content-location' , '' ], 1.189 + [ 'content-range' , '' ], 1.190 + [ 'content-type' , '' ], 1.191 + [ 'cookie' , '' ], 1.192 + [ 'date' , '' ], 1.193 + [ 'etag' , '' ], 1.194 + [ 'expect' , '' ], 1.195 + [ 'expires' , '' ], 1.196 + [ 'from' , '' ], 1.197 + [ 'host' , '' ], 1.198 + [ 'if-match' , '' ], 1.199 + [ 'if-modified-since' , '' ], 1.200 + [ 'if-none-match' , '' ], 1.201 + [ 'if-range' , '' ], 1.202 + [ 'if-unmodified-since' , '' ], 1.203 + [ 'last-modified' , '' ], 1.204 + [ 'link' , '' ], 1.205 + [ 'location' , '' ], 1.206 + [ 'max-forwards' , '' ], 1.207 + [ 'proxy-authenticate' , '' ], 1.208 + [ 'proxy-authorization' , '' ], 1.209 + [ 'range' , '' ], 1.210 + [ 'referer' , '' ], 1.211 + [ 'refresh' , '' ], 1.212 + [ 'retry-after' , '' ], 1.213 + [ 'server' , '' ], 1.214 + [ 'set-cookie' , '' ], 1.215 + [ 'strict-transport-security' , '' ], 1.216 + [ 'transfer-encoding' , '' ], 1.217 + [ 'user-agent' , '' ], 1.218 + [ 'vary' , '' ], 1.219 + [ 'via' , '' ], 1.220 + [ 'www-authenticate' , '' ] 1.221 +]; 1.222 + 1.223 +// The HeaderSetDecompressor class 1.224 +// ------------------------------- 1.225 + 1.226 +// A `HeaderSetDecompressor` instance is a transform stream that can be used to *decompress a 1.227 +// single header set*. Its input is a stream of binary data chunks and its output is a stream of 1.228 +// `[name, value]` pairs. 1.229 +// 1.230 +// Currently, it is not a proper streaming decompressor implementation, since it buffer its input 1.231 +// until the end os the stream, and then processes the whole header block at once. 1.232 + 1.233 +util.inherits(HeaderSetDecompressor, TransformStream); 1.234 +function HeaderSetDecompressor(log, table) { 1.235 + TransformStream.call(this, { objectMode: true }); 1.236 + 1.237 + this._log = log.child({ component: 'compressor' }); 1.238 + this._table = table; 1.239 + this._chunks = []; 1.240 +} 1.241 + 1.242 +// `_transform` is the implementation of the [corresponding virtual function][_transform] of the 1.243 +// TransformStream class. It collects the data chunks for later processing. 1.244 +// [_transform]: http://nodejs.org/api/stream.html#stream_transform_transform_chunk_encoding_callback 1.245 +HeaderSetDecompressor.prototype._transform = function _transform(chunk, encoding, callback) { 1.246 + this._chunks.push(chunk); 1.247 + callback(); 1.248 +}; 1.249 + 1.250 +// `execute(rep)` executes the given [header representation][representation]. 1.251 +// [representation]: http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-05#section-3.1.4 1.252 + 1.253 +// The *JavaScript object representation* of a header representation: 1.254 +// 1.255 +// { 1.256 +// name: String || Integer, // string literal or index 1.257 +// value: String || Integer, // string literal or index 1.258 +// index: Boolean // with or without indexing 1.259 +// } 1.260 +// 1.261 +// *Important:* to ease the indexing of the header table, indexes start at 0 instead of 1. 1.262 +// 1.263 +// Examples: 1.264 +// 1.265 +// Indexed: 1.266 +// { name: 2 , value: 2 , index: false } 1.267 +// { name: -1 , value: -1 , index: false } // reference set emptying 1.268 +// Literal: 1.269 +// { name: 2 , value: 'X', index: false } // without indexing 1.270 +// { name: 2 , value: 'Y', index: true } // with indexing 1.271 +// { name: 'A', value: 'Z', index: true } // with indexing, literal name 1.272 +HeaderSetDecompressor.prototype._execute = function _execute(rep) { 1.273 + this._log.trace({ key: rep.name, value: rep.value, index: rep.index }, 1.274 + 'Executing header representation'); 1.275 + 1.276 + var entry, pair; 1.277 + 1.278 + // * An _indexed representation_ with an index value of 0 (in our representation, it means -1) 1.279 + // entails the following actions: 1.280 + // * If the following byte starts with a set bit, the reference set is emptied. 1.281 + // * Else, reduce the size of the header table to the value encoded with a 7-bit prefix 1.282 + // * An _indexed representation_ corresponding to an entry _present_ in the reference set 1.283 + // entails the following actions: 1.284 + // * The entry is removed from the reference set. 1.285 + // * An _indexed representation_ corresponding to an entry _not present_ in the reference set 1.286 + // entails the following actions: 1.287 + // * If referencing an element of the static table: 1.288 + // * The header field corresponding to the referenced entry is emitted 1.289 + // * The referenced static entry is added to the header table 1.290 + // * A reference to this new header table entry is added to the reference set (except if 1.291 + // this new entry didn't fit in the header table) 1.292 + // * If referencing an element of the header table: 1.293 + // * The header field corresponding to the referenced entry is emitted 1.294 + // * The referenced header table entry is added to the reference set 1.295 + if (typeof rep.value === 'number') { 1.296 + var index = rep.value; 1.297 + entry = this._table[index]; 1.298 + 1.299 + if (index == -1) { 1.300 + if (rep.index) { 1.301 + for (var i = 0; i < this._table.length; i++) { 1.302 + this._table[i].reference = false; 1.303 + } 1.304 + } else { 1.305 + // Set a new maximum size 1.306 + this.setTableSizeLimit(rep.name); 1.307 + } 1.308 + } 1.309 + 1.310 + else if (entry.reference) { 1.311 + entry.reference = false; 1.312 + } 1.313 + 1.314 + else { 1.315 + pair = entry.slice(); 1.316 + this.push(pair); 1.317 + 1.318 + if (index >= this._table.length - this._table._staticLength) { 1.319 + entry = entryFromPair(pair); 1.320 + this._table.add(entry); 1.321 + } 1.322 + 1.323 + entry.reference = true; 1.324 + entry.emitted = true; 1.325 + } 1.326 + } 1.327 + 1.328 + // * A _literal representation_ that is _not added_ to the header table entails the following 1.329 + // action: 1.330 + // * The header is emitted. 1.331 + // * A _literal representation_ that is _added_ to the header table entails the following further 1.332 + // actions: 1.333 + // * The header is added to the header table. 1.334 + // * The new entry is added to the reference set. 1.335 + else { 1.336 + if (typeof rep.name === 'number') { 1.337 + pair = [this._table[rep.name][0], rep.value]; 1.338 + } else { 1.339 + pair = [rep.name, rep.value]; 1.340 + } 1.341 + 1.342 + if (rep.index) { 1.343 + entry = entryFromPair(pair); 1.344 + entry.reference = true; 1.345 + entry.emitted = true; 1.346 + this._table.add(entry); 1.347 + } 1.348 + 1.349 + this.push(pair); 1.350 + } 1.351 +}; 1.352 + 1.353 +// `_flush` is the implementation of the [corresponding virtual function][_flush] of the 1.354 +// TransformStream class. The whole decompressing process is done in `_flush`. It gets called when 1.355 +// the input stream is over. 1.356 +// [_flush]: http://nodejs.org/api/stream.html#stream_transform_flush_callback 1.357 +HeaderSetDecompressor.prototype._flush = function _flush(callback) { 1.358 + var buffer = concat(this._chunks); 1.359 + 1.360 + // * processes the header representations 1.361 + buffer.cursor = 0; 1.362 + while (buffer.cursor < buffer.length) { 1.363 + this._execute(HeaderSetDecompressor.header(buffer)); 1.364 + } 1.365 + 1.366 + // * [emits the reference set](http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-05#section-3.2.2) 1.367 + for (var index = 0; index < this._table.length; index++) { 1.368 + var entry = this._table[index]; 1.369 + if (entry.reference && !entry.emitted) { 1.370 + this.push(entry.slice()); 1.371 + } 1.372 + entry.emitted = false; 1.373 + } 1.374 + 1.375 + callback(); 1.376 +}; 1.377 + 1.378 +// The HeaderSetCompressor class 1.379 +// ----------------------------- 1.380 + 1.381 +// A `HeaderSetCompressor` instance is a transform stream that can be used to *compress a single 1.382 +// header set*. Its input is a stream of `[name, value]` pairs and its output is a stream of 1.383 +// binary data chunks. 1.384 +// 1.385 +// It is a real streaming compressor, since it does not wait until the header set is complete. 1.386 +// 1.387 +// The compression algorithm is (intentionally) not specified by the spec. Therefore, the current 1.388 +// compression algorithm can probably be improved in the future. 1.389 + 1.390 +util.inherits(HeaderSetCompressor, TransformStream); 1.391 +function HeaderSetCompressor(log, table) { 1.392 + TransformStream.call(this, { objectMode: true }); 1.393 + 1.394 + this._log = log.child({ component: 'compressor' }); 1.395 + this._table = table; 1.396 + this.push = TransformStream.prototype.push.bind(this); 1.397 +} 1.398 + 1.399 +HeaderSetCompressor.prototype.send = function send(rep) { 1.400 + this._log.trace({ key: rep.name, value: rep.value, index: rep.index }, 1.401 + 'Emitting header representation'); 1.402 + 1.403 + if (!rep.chunks) { 1.404 + rep.chunks = HeaderSetCompressor.header(rep); 1.405 + } 1.406 + rep.chunks.forEach(this.push); 1.407 +}; 1.408 + 1.409 +// `_transform` is the implementation of the [corresponding virtual function][_transform] of the 1.410 +// TransformStream class. It processes the input headers one by one: 1.411 +// [_transform]: http://nodejs.org/api/stream.html#stream_transform_transform_chunk_encoding_callback 1.412 +HeaderSetCompressor.prototype._transform = function _transform(pair, encoding, callback) { 1.413 + var name = pair[0].toLowerCase(); 1.414 + var value = pair[1]; 1.415 + var entry, rep; 1.416 + 1.417 + // * tries to find full (name, value) or name match in the header table 1.418 + var nameMatch = -1, fullMatch = -1; 1.419 + for (var droppedIndex = 0; droppedIndex < this._table.length; droppedIndex++) { 1.420 + entry = this._table[droppedIndex]; 1.421 + if (entry[0] === name) { 1.422 + if (entry[1] === value) { 1.423 + fullMatch = droppedIndex; 1.424 + break; 1.425 + } else if (nameMatch === -1) { 1.426 + nameMatch = droppedIndex; 1.427 + } 1.428 + } 1.429 + } 1.430 + 1.431 + // * if there's full match, it will be an indexed representation (or more than one) depending 1.432 + // on its presence in the reference, the emitted and the keep set: 1.433 + // 1.434 + // * If the entry is outside the reference set, then a single indexed representation puts the 1.435 + // entry into it and emits the header. Note that if the matched entry is in the static table, 1.436 + // then it has to be added to the header table. 1.437 + // 1.438 + // * If it's already in the keep set, then 4 indexed representations are needed: 1.439 + // 1.440 + // 1. removes it from the reference set 1.441 + // 2. puts it back in the reference set and emits the header once 1.442 + // 3. removes it again 1.443 + // 4. puts it back and emits it again for the second time 1.444 + // 1.445 + // It won't be emitted at the end of the decoding process since it's now in the emitted set. 1.446 + // 1.447 + // * If it's in the emitted set, then 2 indexed representations are needed: 1.448 + // 1.449 + // 1. removes it from the reference set 1.450 + // 2. puts it back in the reference set and emits the header once 1.451 + // 1.452 + // * If it's in the reference set, but outside the keep set and the emitted set, then this 1.453 + // header is common with the previous header set, and is still untouched. We mark it to keep 1.454 + // in the reference set (that means don't remove at the end of the encoding process). 1.455 + if (fullMatch !== -1) { 1.456 + rep = { name: fullMatch, value: fullMatch, index: false }; 1.457 + 1.458 + if (!entry.reference) { 1.459 + if (fullMatch >= this._table.length - this._table._staticLength) { 1.460 + entry = entryFromPair(pair); 1.461 + this._table.add(entry); 1.462 + } 1.463 + this.send(rep); 1.464 + entry.reference = true; 1.465 + entry.emitted = true; 1.466 + } 1.467 + 1.468 + else if (entry.keep) { 1.469 + this.send(rep); 1.470 + this.send(rep); 1.471 + this.send(rep); 1.472 + this.send(rep); 1.473 + entry.keep = false; 1.474 + entry.emitted = true; 1.475 + } 1.476 + 1.477 + else if (entry.emitted) { 1.478 + this.send(rep); 1.479 + this.send(rep); 1.480 + } 1.481 + 1.482 + else { 1.483 + entry.keep = true; 1.484 + } 1.485 + } 1.486 + 1.487 + // * otherwise, it will be a literal representation (with a name index if there's a name match) 1.488 + else { 1.489 + entry = entryFromPair(pair); 1.490 + entry.emitted = true; 1.491 + 1.492 + var indexing = (entry._size < this._table._limit / 2); 1.493 + 1.494 + if (indexing) { 1.495 + entry.reference = true; 1.496 + var droppedEntries = this._table.add(entry); 1.497 + for (droppedIndex in droppedEntries) { 1.498 + droppedIndex = Number(droppedIndex) 1.499 + var dropped = droppedEntries[droppedIndex]; 1.500 + if (dropped.keep) { 1.501 + rep = { name: droppedIndex, value: droppedIndex, index: false }; 1.502 + this.send(rep); 1.503 + this.send(rep); 1.504 + } 1.505 + } 1.506 + } 1.507 + 1.508 + this.send({ name: (nameMatch !== -1) ? nameMatch : name, value: value, index: indexing }); 1.509 + } 1.510 + 1.511 + callback(); 1.512 +}; 1.513 + 1.514 +// `_flush` is the implementation of the [corresponding virtual function][_flush] of the 1.515 +// TransformStream class. It gets called when there's no more header to compress. The final step: 1.516 +// [_flush]: http://nodejs.org/api/stream.html#stream_transform_flush_callback 1.517 +HeaderSetCompressor.prototype._flush = function _flush(callback) { 1.518 + // * removing entries from the header set that are not marked to be kept or emitted 1.519 + for (var index = 0; index < this._table.length; index++) { 1.520 + var entry = this._table[index]; 1.521 + if (entry.reference && !entry.keep && !entry.emitted) { 1.522 + this.send({ name: index, value: index, index: false }); 1.523 + entry.reference = false; 1.524 + } 1.525 + entry.keep = false; 1.526 + entry.emitted = false; 1.527 + } 1.528 + 1.529 + callback(); 1.530 +}; 1.531 + 1.532 +// [Detailed Format](http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-05#section-4) 1.533 +// ----------------- 1.534 + 1.535 +// ### Integer representation ### 1.536 +// 1.537 +// The algorithm to represent an integer I is as follows: 1.538 +// 1.539 +// 1. If I < 2^N - 1, encode I on N bits 1.540 +// 2. Else, encode 2^N - 1 on N bits and do the following steps: 1.541 +// 1. Set I to (I - (2^N - 1)) and Q to 1 1.542 +// 2. While Q > 0 1.543 +// 1. Compute Q and R, quotient and remainder of I divided by 2^7 1.544 +// 2. If Q is strictly greater than 0, write one 1 bit; otherwise, write one 0 bit 1.545 +// 3. Encode R on the next 7 bits 1.546 +// 4. I = Q 1.547 + 1.548 +HeaderSetCompressor.integer = function writeInteger(I, N) { 1.549 + var limit = Math.pow(2,N) - 1; 1.550 + if (I < limit) { 1.551 + return [new Buffer([I])]; 1.552 + } 1.553 + 1.554 + var bytes = []; 1.555 + if (N !== 0) { 1.556 + bytes.push(limit); 1.557 + } 1.558 + I -= limit; 1.559 + 1.560 + var Q = 1, R; 1.561 + while (Q > 0) { 1.562 + Q = Math.floor(I / 128); 1.563 + R = I % 128; 1.564 + 1.565 + if (Q > 0) { 1.566 + R += 128; 1.567 + } 1.568 + bytes.push(R); 1.569 + 1.570 + I = Q; 1.571 + } 1.572 + 1.573 + return [new Buffer(bytes)]; 1.574 +}; 1.575 + 1.576 +// The inverse algorithm: 1.577 +// 1.578 +// 1. Set I to the number coded on the lower N bits of the first byte 1.579 +// 2. If I is smaller than 2^N - 1 then return I 1.580 +// 2. Else the number is encoded on more than one byte, so do the following steps: 1.581 +// 1. Set M to 0 1.582 +// 2. While returning with I 1.583 +// 1. Let B be the next byte (the first byte if N is 0) 1.584 +// 2. Read out the lower 7 bits of B and multiply it with 2^M 1.585 +// 3. Increase I with this number 1.586 +// 4. Increase M by 7 1.587 +// 5. Return I if the most significant bit of B is 0 1.588 + 1.589 +HeaderSetDecompressor.integer = function readInteger(buffer, N) { 1.590 + var limit = Math.pow(2,N) - 1; 1.591 + 1.592 + var I = buffer[buffer.cursor] & limit; 1.593 + if (N !== 0) { 1.594 + buffer.cursor += 1; 1.595 + } 1.596 + 1.597 + if (I === limit) { 1.598 + var M = 0; 1.599 + do { 1.600 + I += (buffer[buffer.cursor] & 127) << M; 1.601 + M += 7; 1.602 + buffer.cursor += 1; 1.603 + } while (buffer[buffer.cursor - 1] & 128); 1.604 + } 1.605 + 1.606 + return I; 1.607 +}; 1.608 + 1.609 +// ### Huffman Encoding ### 1.610 + 1.611 +function HuffmanTable(table) { 1.612 + function createTree(codes, position) { 1.613 + if (codes.length === 1) { 1.614 + return [table.indexOf(codes[0])]; 1.615 + } 1.616 + 1.617 + else { 1.618 + position = position || 0; 1.619 + var zero = []; 1.620 + var one = []; 1.621 + for (var i = 0; i < codes.length; i++) { 1.622 + var string = codes[i]; 1.623 + if (string[position] === '0') { 1.624 + zero.push(string); 1.625 + } else { 1.626 + one.push(string); 1.627 + } 1.628 + } 1.629 + return [createTree(zero, position + 1), createTree(one, position + 1)]; 1.630 + } 1.631 + } 1.632 + 1.633 + this.tree = createTree(table); 1.634 + 1.635 + this.codes = table.map(function(bits) { 1.636 + return parseInt(bits, 2); 1.637 + }); 1.638 + this.lengths = table.map(function(bits) { 1.639 + return bits.length; 1.640 + }); 1.641 +} 1.642 + 1.643 +HuffmanTable.prototype.encode = function encode(buffer) { 1.644 + var result = []; 1.645 + var space = 8; 1.646 + 1.647 + function add(data) { 1.648 + if (space === 8) { 1.649 + result.push(data); 1.650 + } else { 1.651 + result[result.length - 1] |= data; 1.652 + } 1.653 + } 1.654 + 1.655 + for (var i = 0; i < buffer.length; i++) { 1.656 + var byte = buffer[i]; 1.657 + var code = this.codes[byte]; 1.658 + var length = this.lengths[byte]; 1.659 + 1.660 + while (length !== 0) { 1.661 + if (space >= length) { 1.662 + add(code << (space - length)); 1.663 + code = 0; 1.664 + space -= length; 1.665 + length = 0; 1.666 + } else { 1.667 + var shift = length - space; 1.668 + var msb = code >> shift; 1.669 + add(msb); 1.670 + code -= msb << shift; 1.671 + length -= space; 1.672 + space = 0; 1.673 + } 1.674 + 1.675 + if (space === 0) { 1.676 + space = 8; 1.677 + } 1.678 + } 1.679 + } 1.680 + 1.681 + if (space !== 8) { 1.682 + add(this.codes[256] >> (this.lengths[256] - space)); 1.683 + } 1.684 + 1.685 + return new Buffer(result); 1.686 +} 1.687 + 1.688 +HuffmanTable.prototype.decode = function decode(buffer) { 1.689 + var result = []; 1.690 + var subtree = this.tree; 1.691 + 1.692 + for (var i = 0; i < buffer.length; i++) { 1.693 + var byte = buffer[i]; 1.694 + 1.695 + for (var j = 0; j < 8; j++) { 1.696 + var bit = (byte & 128) ? 1 : 0; 1.697 + byte = byte << 1; 1.698 + 1.699 + subtree = subtree[bit]; 1.700 + if (subtree.length === 1) { 1.701 + result.push(subtree[0]); 1.702 + subtree = this.tree; 1.703 + } 1.704 + } 1.705 + } 1.706 + 1.707 + return new Buffer(result); 1.708 +} 1.709 + 1.710 +// The initializer arrays for the Huffman tables are generated with feeding the tables from the 1.711 +// spec to this sed command: 1.712 +// 1.713 +// sed -e "s/^.* [|]//g" -e "s/|//g" -e "s/ .*//g" -e "s/^/ '/g" -e "s/$/',/g" 1.714 + 1.715 +HuffmanTable.huffmanTable = new HuffmanTable([ 1.716 + '111111111111111111110111010', 1.717 + '111111111111111111110111011', 1.718 + '111111111111111111110111100', 1.719 + '111111111111111111110111101', 1.720 + '111111111111111111110111110', 1.721 + '111111111111111111110111111', 1.722 + '111111111111111111111000000', 1.723 + '111111111111111111111000001', 1.724 + '111111111111111111111000010', 1.725 + '111111111111111111111000011', 1.726 + '111111111111111111111000100', 1.727 + '111111111111111111111000101', 1.728 + '111111111111111111111000110', 1.729 + '111111111111111111111000111', 1.730 + '111111111111111111111001000', 1.731 + '111111111111111111111001001', 1.732 + '111111111111111111111001010', 1.733 + '111111111111111111111001011', 1.734 + '111111111111111111111001100', 1.735 + '111111111111111111111001101', 1.736 + '111111111111111111111001110', 1.737 + '111111111111111111111001111', 1.738 + '111111111111111111111010000', 1.739 + '111111111111111111111010001', 1.740 + '111111111111111111111010010', 1.741 + '111111111111111111111010011', 1.742 + '111111111111111111111010100', 1.743 + '111111111111111111111010101', 1.744 + '111111111111111111111010110', 1.745 + '111111111111111111111010111', 1.746 + '111111111111111111111011000', 1.747 + '111111111111111111111011001', 1.748 + '11101000', 1.749 + '111111111100', 1.750 + '11111111111010', 1.751 + '111111111111100', 1.752 + '111111111111101', 1.753 + '100100', 1.754 + '1101110', 1.755 + '111111111111110', 1.756 + '11111111010', 1.757 + '11111111011', 1.758 + '1111111010', 1.759 + '11111111100', 1.760 + '11101001', 1.761 + '100101', 1.762 + '00100', 1.763 + '0000', 1.764 + '00101', 1.765 + '00110', 1.766 + '00111', 1.767 + '100110', 1.768 + '100111', 1.769 + '101000', 1.770 + '101001', 1.771 + '101010', 1.772 + '101011', 1.773 + '101100', 1.774 + '111101100', 1.775 + '11101010', 1.776 + '111111111111111110', 1.777 + '101101', 1.778 + '11111111111111100', 1.779 + '111101101', 1.780 + '11111111111011', 1.781 + '1101111', 1.782 + '11101011', 1.783 + '11101100', 1.784 + '11101101', 1.785 + '11101110', 1.786 + '1110000', 1.787 + '111101110', 1.788 + '111101111', 1.789 + '111110000', 1.790 + '111110001', 1.791 + '1111111011', 1.792 + '111110010', 1.793 + '11101111', 1.794 + '111110011', 1.795 + '111110100', 1.796 + '111110101', 1.797 + '111110110', 1.798 + '111110111', 1.799 + '11110000', 1.800 + '11110001', 1.801 + '111111000', 1.802 + '111111001', 1.803 + '111111010', 1.804 + '111111011', 1.805 + '111111100', 1.806 + '1111111100', 1.807 + '11111111111100', 1.808 + '111111111111111111111011010', 1.809 + '1111111111100', 1.810 + '11111111111101', 1.811 + '101110', 1.812 + '1111111111111111110', 1.813 + '01000', 1.814 + '101111', 1.815 + '01001', 1.816 + '110000', 1.817 + '0001', 1.818 + '110001', 1.819 + '110010', 1.820 + '110011', 1.821 + '01010', 1.822 + '1110001', 1.823 + '1110010', 1.824 + '01011', 1.825 + '110100', 1.826 + '01100', 1.827 + '01101', 1.828 + '01110', 1.829 + '11110010', 1.830 + '01111', 1.831 + '10000', 1.832 + '10001', 1.833 + '110101', 1.834 + '1110011', 1.835 + '110110', 1.836 + '11110011', 1.837 + '11110100', 1.838 + '11110101', 1.839 + '11111111111111101', 1.840 + '11111111101', 1.841 + '11111111111111110', 1.842 + '111111111101', 1.843 + '111111111111111111111011011', 1.844 + '111111111111111111111011100', 1.845 + '111111111111111111111011101', 1.846 + '111111111111111111111011110', 1.847 + '111111111111111111111011111', 1.848 + '111111111111111111111100000', 1.849 + '111111111111111111111100001', 1.850 + '111111111111111111111100010', 1.851 + '111111111111111111111100011', 1.852 + '111111111111111111111100100', 1.853 + '111111111111111111111100101', 1.854 + '111111111111111111111100110', 1.855 + '111111111111111111111100111', 1.856 + '111111111111111111111101000', 1.857 + '111111111111111111111101001', 1.858 + '111111111111111111111101010', 1.859 + '111111111111111111111101011', 1.860 + '111111111111111111111101100', 1.861 + '111111111111111111111101101', 1.862 + '111111111111111111111101110', 1.863 + '111111111111111111111101111', 1.864 + '111111111111111111111110000', 1.865 + '111111111111111111111110001', 1.866 + '111111111111111111111110010', 1.867 + '111111111111111111111110011', 1.868 + '111111111111111111111110100', 1.869 + '111111111111111111111110101', 1.870 + '111111111111111111111110110', 1.871 + '111111111111111111111110111', 1.872 + '111111111111111111111111000', 1.873 + '111111111111111111111111001', 1.874 + '111111111111111111111111010', 1.875 + '111111111111111111111111011', 1.876 + '111111111111111111111111100', 1.877 + '111111111111111111111111101', 1.878 + '111111111111111111111111110', 1.879 + '111111111111111111111111111', 1.880 + '11111111111111111110000000', 1.881 + '11111111111111111110000001', 1.882 + '11111111111111111110000010', 1.883 + '11111111111111111110000011', 1.884 + '11111111111111111110000100', 1.885 + '11111111111111111110000101', 1.886 + '11111111111111111110000110', 1.887 + '11111111111111111110000111', 1.888 + '11111111111111111110001000', 1.889 + '11111111111111111110001001', 1.890 + '11111111111111111110001010', 1.891 + '11111111111111111110001011', 1.892 + '11111111111111111110001100', 1.893 + '11111111111111111110001101', 1.894 + '11111111111111111110001110', 1.895 + '11111111111111111110001111', 1.896 + '11111111111111111110010000', 1.897 + '11111111111111111110010001', 1.898 + '11111111111111111110010010', 1.899 + '11111111111111111110010011', 1.900 + '11111111111111111110010100', 1.901 + '11111111111111111110010101', 1.902 + '11111111111111111110010110', 1.903 + '11111111111111111110010111', 1.904 + '11111111111111111110011000', 1.905 + '11111111111111111110011001', 1.906 + '11111111111111111110011010', 1.907 + '11111111111111111110011011', 1.908 + '11111111111111111110011100', 1.909 + '11111111111111111110011101', 1.910 + '11111111111111111110011110', 1.911 + '11111111111111111110011111', 1.912 + '11111111111111111110100000', 1.913 + '11111111111111111110100001', 1.914 + '11111111111111111110100010', 1.915 + '11111111111111111110100011', 1.916 + '11111111111111111110100100', 1.917 + '11111111111111111110100101', 1.918 + '11111111111111111110100110', 1.919 + '11111111111111111110100111', 1.920 + '11111111111111111110101000', 1.921 + '11111111111111111110101001', 1.922 + '11111111111111111110101010', 1.923 + '11111111111111111110101011', 1.924 + '11111111111111111110101100', 1.925 + '11111111111111111110101101', 1.926 + '11111111111111111110101110', 1.927 + '11111111111111111110101111', 1.928 + '11111111111111111110110000', 1.929 + '11111111111111111110110001', 1.930 + '11111111111111111110110010', 1.931 + '11111111111111111110110011', 1.932 + '11111111111111111110110100', 1.933 + '11111111111111111110110101', 1.934 + '11111111111111111110110110', 1.935 + '11111111111111111110110111', 1.936 + '11111111111111111110111000', 1.937 + '11111111111111111110111001', 1.938 + '11111111111111111110111010', 1.939 + '11111111111111111110111011', 1.940 + '11111111111111111110111100', 1.941 + '11111111111111111110111101', 1.942 + '11111111111111111110111110', 1.943 + '11111111111111111110111111', 1.944 + '11111111111111111111000000', 1.945 + '11111111111111111111000001', 1.946 + '11111111111111111111000010', 1.947 + '11111111111111111111000011', 1.948 + '11111111111111111111000100', 1.949 + '11111111111111111111000101', 1.950 + '11111111111111111111000110', 1.951 + '11111111111111111111000111', 1.952 + '11111111111111111111001000', 1.953 + '11111111111111111111001001', 1.954 + '11111111111111111111001010', 1.955 + '11111111111111111111001011', 1.956 + '11111111111111111111001100', 1.957 + '11111111111111111111001101', 1.958 + '11111111111111111111001110', 1.959 + '11111111111111111111001111', 1.960 + '11111111111111111111010000', 1.961 + '11111111111111111111010001', 1.962 + '11111111111111111111010010', 1.963 + '11111111111111111111010011', 1.964 + '11111111111111111111010100', 1.965 + '11111111111111111111010101', 1.966 + '11111111111111111111010110', 1.967 + '11111111111111111111010111', 1.968 + '11111111111111111111011000', 1.969 + '11111111111111111111011001', 1.970 + '11111111111111111111011010', 1.971 + '11111111111111111111011011', 1.972 + '11111111111111111111011100' 1.973 +]); 1.974 + 1.975 +// ### String literal representation ### 1.976 +// 1.977 +// Literal **strings** can represent header names or header values. There's two variant of the 1.978 +// string encoding: 1.979 +// 1.980 +// String literal with Huffman encoding: 1.981 +// 1.982 +// 0 1 2 3 4 5 6 7 1.983 +// +---+---+---+---+---+---+---+---+ 1.984 +// | 1 | Value Length Prefix (7) | 1.985 +// +---+---+---+---+---+---+---+---+ 1.986 +// | Value Length (0-N bytes) | 1.987 +// +---+---+---+---+---+---+---+---+ 1.988 +// ... 1.989 +// +---+---+---+---+---+---+---+---+ 1.990 +// | Huffman Encoded Data |Padding| 1.991 +// +---+---+---+---+---+---+---+---+ 1.992 +// 1.993 +// String literal without Huffman encoding: 1.994 +// 1.995 +// 0 1 2 3 4 5 6 7 1.996 +// +---+---+---+---+---+---+---+---+ 1.997 +// | 0 | Value Length Prefix (7) | 1.998 +// +---+---+---+---+---+---+---+---+ 1.999 +// | Value Length (0-N bytes) | 1.1000 +// +---+---+---+---+---+---+---+---+ 1.1001 +// ... 1.1002 +// +---+---+---+---+---+---+---+---+ 1.1003 +// | Field Bytes Without Encoding | 1.1004 +// +---+---+---+---+---+---+---+---+ 1.1005 + 1.1006 +HeaderSetCompressor.string = function writeString(str) { 1.1007 + str = new Buffer(str, 'utf8'); 1.1008 + 1.1009 + var huffman = HuffmanTable.huffmanTable.encode(str); 1.1010 + if (huffman.length < str.length) { 1.1011 + var length = HeaderSetCompressor.integer(huffman.length, 7) 1.1012 + length[0][0] |= 128; 1.1013 + return length.concat(huffman); 1.1014 + } 1.1015 + 1.1016 + else { 1.1017 + length = HeaderSetCompressor.integer(str.length, 7) 1.1018 + return length.concat(str); 1.1019 + } 1.1020 +}; 1.1021 + 1.1022 +HeaderSetDecompressor.string = function readString(buffer) { 1.1023 + var huffman = buffer[buffer.cursor] & 128; 1.1024 + var length = HeaderSetDecompressor.integer(buffer, 7); 1.1025 + var encoded = buffer.slice(buffer.cursor, buffer.cursor + length); 1.1026 + buffer.cursor += length; 1.1027 + return (huffman ? HuffmanTable.huffmanTable.decode(encoded) : encoded).toString('utf8'); 1.1028 +}; 1.1029 + 1.1030 +// ### Header represenations ### 1.1031 + 1.1032 +// The JavaScript object representation is described near the 1.1033 +// `HeaderSetDecompressor.prototype._execute()` method definition. 1.1034 +// 1.1035 +// **All binary header representations** start with a prefix signaling the representation type and 1.1036 +// an index represented using prefix coded integers: 1.1037 +// 1.1038 +// 0 1 2 3 4 5 6 7 1.1039 +// +---+---+---+---+---+---+---+---+ 1.1040 +// | 1 | Index (7+) | Indexed Representation 1.1041 +// +---+---------------------------+ 1.1042 +// 1.1043 +// 0 1 2 3 4 5 6 7 1.1044 +// +---+---+---+---+---+---+---+---+ 1.1045 +// | 0 | 1 | Index (6+) | 1.1046 +// +---+---+---+-------------------+ Literal w/o Indexing 1.1047 +// | Value Length (8+) | 1.1048 +// +-------------------------------+ w/ Indexed Name 1.1049 +// | Value String (Length octets) | 1.1050 +// +-------------------------------+ 1.1051 +// 1.1052 +// 0 1 2 3 4 5 6 7 1.1053 +// +---+---+---+---+---+---+---+---+ 1.1054 +// | 0 | 1 | 0 | 1.1055 +// +---+---+---+-------------------+ 1.1056 +// | Name Length (8+) | 1.1057 +// +-------------------------------+ Literal w/o Indexing 1.1058 +// | Name String (Length octets) | 1.1059 +// +-------------------------------+ w/ New Name 1.1060 +// | Value Length (8+) | 1.1061 +// +-------------------------------+ 1.1062 +// | Value String (Length octets) | 1.1063 +// +-------------------------------+ 1.1064 +// 1.1065 +// 0 1 2 3 4 5 6 7 1.1066 +// +---+---+---+---+---+---+---+---+ 1.1067 +// | 0 | 0 | Index (6+) | 1.1068 +// +---+---+---+-------------------+ Literal w/ Incremental Indexing 1.1069 +// | Value Length (8+) | 1.1070 +// +-------------------------------+ w/ Indexed Name 1.1071 +// | Value String (Length octets) | 1.1072 +// +-------------------------------+ 1.1073 +// 1.1074 +// 0 1 2 3 4 5 6 7 1.1075 +// +---+---+---+---+---+---+---+---+ 1.1076 +// | 0 | 0 | 0 | 1.1077 +// +---+---+---+-------------------+ 1.1078 +// | Name Length (8+) | 1.1079 +// +-------------------------------+ Literal w/ Incremental Indexing 1.1080 +// | Name String (Length octets) | 1.1081 +// +-------------------------------+ w/ New Name 1.1082 +// | Value Length (8+) | 1.1083 +// +-------------------------------+ 1.1084 +// | Value String (Length octets) | 1.1085 +// +-------------------------------+ 1.1086 +// 1.1087 +// The **Indexed Representation** consists of the 1-bit prefix and the Index that is represented as 1.1088 +// a 7-bit prefix coded integer and nothing else. 1.1089 +// 1.1090 +// After the first bits, **all literal representations** specify the header name, either as a 1.1091 +// pointer to the Header Table (Index) or a string literal. When the string literal representation 1.1092 +// is used, the Index is set to 0 and the string literal starts at the second byte. 1.1093 +// 1.1094 +// For **all literal representations**, the specification of the header value comes next. It is 1.1095 +// always represented as a string. 1.1096 + 1.1097 +var representations = { 1.1098 + indexed : { prefix: 7, pattern: 0x80 }, 1.1099 + literal : { prefix: 6, pattern: 0x40 }, 1.1100 + literalIncremental : { prefix: 6, pattern: 0x00 } 1.1101 +}; 1.1102 + 1.1103 +HeaderSetCompressor.header = function writeHeader(header) { 1.1104 + var representation, buffers = []; 1.1105 + 1.1106 + if (typeof header.value === 'number') { 1.1107 + representation = representations.indexed; 1.1108 + } else if (header.index) { 1.1109 + representation = representations.literalIncremental; 1.1110 + } else { 1.1111 + representation = representations.literal; 1.1112 + } 1.1113 + 1.1114 + if (representation === representations.indexed) { 1.1115 + buffers.push(HeaderSetCompressor.integer(header.value + 1, representation.prefix)); 1.1116 + if (header.value == -1) { 1.1117 + if (header.index) { 1.1118 + buffers.push(HeaderSetCompressor.integer(0x80, 8)); 1.1119 + } else { 1.1120 + buffers.push(HeaderSetCompressor.integer(header.name, 7)); 1.1121 + } 1.1122 + } 1.1123 + } 1.1124 + 1.1125 + else { 1.1126 + if (typeof header.name === 'number') { 1.1127 + buffers.push(HeaderSetCompressor.integer(header.name + 1, representation.prefix)); 1.1128 + } else { 1.1129 + buffers.push(HeaderSetCompressor.integer(0, representation.prefix)); 1.1130 + buffers.push(HeaderSetCompressor.string(header.name)); 1.1131 + } 1.1132 + buffers.push(HeaderSetCompressor.string(header.value)); 1.1133 + } 1.1134 + 1.1135 + buffers[0][0][0] |= representation.pattern; 1.1136 + 1.1137 + return Array.prototype.concat.apply([], buffers); // array of arrays of buffers -> array of buffers 1.1138 +}; 1.1139 + 1.1140 +HeaderSetDecompressor.header = function readHeader(buffer) { 1.1141 + var representation, header = {}; 1.1142 + 1.1143 + var firstByte = buffer[buffer.cursor]; 1.1144 + if (firstByte & 0x80) { 1.1145 + representation = representations.indexed; 1.1146 + } else if (firstByte & 0x40) { 1.1147 + representation = representations.literal; 1.1148 + } else { 1.1149 + representation = representations.literalIncremental; 1.1150 + } 1.1151 + 1.1152 + if (representation === representations.indexed) { 1.1153 + header.value = header.name = HeaderSetDecompressor.integer(buffer, representation.prefix) - 1; 1.1154 + header.index = false; 1.1155 + if (header.value === -1) { 1.1156 + if (buffer[buffer.cursor] & 0x80) { 1.1157 + header.index = true; 1.1158 + buffer.cursor += 1; 1.1159 + } else { 1.1160 + header.name = HeaderSetDecompressor.integer(buffer, 7); 1.1161 + } 1.1162 + } 1.1163 + } 1.1164 + 1.1165 + else { 1.1166 + header.name = HeaderSetDecompressor.integer(buffer, representation.prefix) - 1; 1.1167 + if (header.name === -1) { 1.1168 + header.name = HeaderSetDecompressor.string(buffer); 1.1169 + } 1.1170 + header.value = HeaderSetDecompressor.string(buffer); 1.1171 + header.index = (representation === representations.literalIncremental); 1.1172 + } 1.1173 + 1.1174 + return header; 1.1175 +}; 1.1176 + 1.1177 +// Integration with HTTP/2 1.1178 +// ======================= 1.1179 + 1.1180 +// This section describes the interaction between the compressor/decompressor and the rest of the 1.1181 +// HTTP/2 implementation. The `Compressor` and the `Decompressor` makes up a layer between the 1.1182 +// [framer](framer.html) and the [connection handling component](connection.html). They let most 1.1183 +// frames pass through, except HEADERS and PUSH_PROMISE frames. They convert the frames between 1.1184 +// these two representations: 1.1185 +// 1.1186 +// { { 1.1187 +// type: 'HEADERS', type: 'HEADERS', 1.1188 +// flags: {}, flags: {}, 1.1189 +// stream: 1, <===> stream: 1, 1.1190 +// headers: { data: Buffer 1.1191 +// N1: 'V1', } 1.1192 +// N2: ['V1', 'V2', ...], 1.1193 +// // ... 1.1194 +// } 1.1195 +// } 1.1196 +// 1.1197 +// There are possibly several binary frame that belong to a single non-binary frame. 1.1198 + 1.1199 +var MAX_HTTP_PAYLOAD_SIZE = 16383; 1.1200 + 1.1201 +// The Compressor class 1.1202 +// -------------------- 1.1203 + 1.1204 +// The Compressor transform stream is basically stateless. 1.1205 +util.inherits(Compressor, TransformStream); 1.1206 +function Compressor(log, type) { 1.1207 + TransformStream.call(this, { objectMode: true }); 1.1208 + 1.1209 + this._log = log.child({ component: 'compressor' }); 1.1210 + 1.1211 + assert((type === 'REQUEST') || (type === 'RESPONSE')); 1.1212 + this._table = new HeaderTable(this._log); 1.1213 +} 1.1214 + 1.1215 +// Changing the header table size 1.1216 +Compressor.prototype.setTableSizeLimit = function setTableSizeLimit(size) { 1.1217 + this._table.setSizeLimit(size); 1.1218 +}; 1.1219 + 1.1220 +// `compress` takes a header set, and compresses it using a new `HeaderSetCompressor` stream 1.1221 +// instance. This means that from now on, the advantages of streaming header encoding are lost, 1.1222 +// but the API becomes simpler. 1.1223 +Compressor.prototype.compress = function compress(headers) { 1.1224 + var compressor = new HeaderSetCompressor(this._log, this._table); 1.1225 + for (var name in headers) { 1.1226 + var value = headers[name]; 1.1227 + name = String(name).toLowerCase(); 1.1228 + 1.1229 + // * To allow for better compression efficiency, the Cookie header field MAY be split into 1.1230 + // separate header fields, each with one or more cookie-pairs. 1.1231 + if (name == 'cookie') { 1.1232 + if (!(value instanceof Array)) { 1.1233 + value = [value] 1.1234 + } 1.1235 + value = Array.prototype.concat.apply([], value.map(function(cookie) { 1.1236 + return String(cookie).split(';').map(trim) 1.1237 + })); 1.1238 + } 1.1239 + 1.1240 + // * To preserve the order of a comma-separated list, the ordered values for a single header 1.1241 + // field name appearing in different header fields are concatenated into a single value. 1.1242 + // A zero-valued octet (0x0) is used to delimit multiple values. 1.1243 + // * Header fields containing multiple values MUST be concatenated into a single value unless 1.1244 + // the ordering of that header field is known to be not significant. 1.1245 + // * Currently, only the Cookie header is considered to be order-insensitive. 1.1246 + if ((value instanceof Array) && (name !== 'cookie')) { 1.1247 + value = value.join('\0'); 1.1248 + } 1.1249 + 1.1250 + if (value instanceof Array) { 1.1251 + for (var i = 0; i < value.length; i++) { 1.1252 + compressor.write([name, String(value[i])]); 1.1253 + } 1.1254 + } else { 1.1255 + compressor.write([name, String(value)]); 1.1256 + } 1.1257 + } 1.1258 + compressor.end(); 1.1259 + 1.1260 + var chunk, chunks = []; 1.1261 + while (chunk = compressor.read()) { 1.1262 + chunks.push(chunk); 1.1263 + } 1.1264 + return concat(chunks); 1.1265 +}; 1.1266 + 1.1267 +// When a `frame` arrives 1.1268 +Compressor.prototype._transform = function _transform(frame, encoding, done) { 1.1269 + // * and it is a HEADERS or PUSH_PROMISE frame 1.1270 + // * it generates a header block using the compress method 1.1271 + // * cuts the header block into `chunks` that are not larger than `MAX_HTTP_PAYLOAD_SIZE` 1.1272 + // * for each chunk, it pushes out a chunk frame that is identical to the original, except 1.1273 + // the `data` property which holds the given chunk, the type of the frame which is always 1.1274 + // CONTINUATION except for the first frame, and the END_HEADERS/END_PUSH_STREAM flag that 1.1275 + // marks the last frame and the END_STREAM flag which is always false before the end 1.1276 + if (frame.type === 'HEADERS' || frame.type === 'PUSH_PROMISE') { 1.1277 + var buffer = this.compress(frame.headers); 1.1278 + 1.1279 + var chunks = cut(buffer, MAX_HTTP_PAYLOAD_SIZE); 1.1280 + 1.1281 + for (var i = 0; i < chunks.length; i++) { 1.1282 + var chunkFrame; 1.1283 + var first = (i === 0); 1.1284 + var last = (i === chunks.length - 1); 1.1285 + 1.1286 + if (first) { 1.1287 + chunkFrame = util._extend({}, frame); 1.1288 + chunkFrame.flags = util._extend({}, frame.flags); 1.1289 + chunkFrame.flags['END_' + frame.type] = last; 1.1290 + } else { 1.1291 + chunkFrame = { 1.1292 + type: 'CONTINUATION', 1.1293 + flags: { END_HEADERS: last }, 1.1294 + stream: frame.stream 1.1295 + }; 1.1296 + } 1.1297 + chunkFrame.data = chunks[i]; 1.1298 + 1.1299 + this.push(chunkFrame); 1.1300 + } 1.1301 + } 1.1302 + 1.1303 + // * otherwise, the frame is forwarded without taking any action 1.1304 + else { 1.1305 + this.push(frame); 1.1306 + } 1.1307 + 1.1308 + done(); 1.1309 +}; 1.1310 + 1.1311 +// The Decompressor class 1.1312 +// ---------------------- 1.1313 + 1.1314 +// The Decompressor is a stateful transform stream, since it has to collect multiple frames first, 1.1315 +// and the decoding comes after unifying the payload of those frames. 1.1316 +// 1.1317 +// If there's a frame in progress, `this._inProgress` is `true`. The frames are collected in 1.1318 +// `this._frames`, and the type of the frame and the stream identifier is stored in `this._type` 1.1319 +// and `this._stream` respectively. 1.1320 +util.inherits(Decompressor, TransformStream); 1.1321 +function Decompressor(log, type) { 1.1322 + TransformStream.call(this, { objectMode: true }); 1.1323 + 1.1324 + this._log = log.child({ component: 'compressor' }); 1.1325 + 1.1326 + assert((type === 'REQUEST') || (type === 'RESPONSE')); 1.1327 + this._table = new HeaderTable(this._log); 1.1328 + 1.1329 + this._inProgress = false; 1.1330 + this._base = undefined; 1.1331 +} 1.1332 + 1.1333 +// Changing the header table size 1.1334 +Decompressor.prototype.setTableSizeLimit = function setTableSizeLimit(size) { 1.1335 + this._table.setSizeLimit(size); 1.1336 +}; 1.1337 + 1.1338 +// `decompress` takes a full header block, and decompresses it using a new `HeaderSetDecompressor` 1.1339 +// stream instance. This means that from now on, the advantages of streaming header decoding are 1.1340 +// lost, but the API becomes simpler. 1.1341 +Decompressor.prototype.decompress = function decompress(block) { 1.1342 + var decompressor = new HeaderSetDecompressor(this._log, this._table); 1.1343 + decompressor.end(block); 1.1344 + 1.1345 + var headers = {}; 1.1346 + var pair; 1.1347 + while (pair = decompressor.read()) { 1.1348 + var name = pair[0]; 1.1349 + // * After decompression, header fields that have values containing zero octets (0x0) MUST be 1.1350 + // split into multiple header fields before being processed. 1.1351 + var values = pair[1].split('\0'); 1.1352 + for (var i = 0; i < values.length; i++) { 1.1353 + var value = values[i]; 1.1354 + if (name in headers) { 1.1355 + if (headers[name] instanceof Array) { 1.1356 + headers[name].push(value); 1.1357 + } else { 1.1358 + headers[name] = [headers[name], value]; 1.1359 + } 1.1360 + } else { 1.1361 + headers[name] = value; 1.1362 + } 1.1363 + } 1.1364 + } 1.1365 + 1.1366 + // * If there are multiple Cookie header fields after decompression, these MUST be concatenated 1.1367 + // into a single octet string using the two octet delimiter of 0x3B, 0x20 (the ASCII 1.1368 + // string "; "). 1.1369 + if (('cookie' in headers) && (headers['cookie'] instanceof Array)) { 1.1370 + headers['cookie'] = headers['cookie'].join('; ') 1.1371 + } 1.1372 + 1.1373 + return headers; 1.1374 +}; 1.1375 + 1.1376 +// When a `frame` arrives 1.1377 +Decompressor.prototype._transform = function _transform(frame, encoding, done) { 1.1378 + // * and the collection process is already `_inProgress`, the frame is simply stored, except if 1.1379 + // it's an illegal frame 1.1380 + if (this._inProgress) { 1.1381 + if ((frame.type !== 'CONTINUATION') || (frame.stream !== this._base.stream)) { 1.1382 + this._log.error('A series of HEADER frames were not continuous'); 1.1383 + this.emit('error', 'PROTOCOL_ERROR'); 1.1384 + return; 1.1385 + } 1.1386 + this._frames.push(frame); 1.1387 + } 1.1388 + 1.1389 + // * and the collection process is not `_inProgress`, but the new frame's type is HEADERS or 1.1390 + // PUSH_PROMISE, a new collection process begins 1.1391 + else if ((frame.type === 'HEADERS') || (frame.type === 'PUSH_PROMISE')) { 1.1392 + this._inProgress = true; 1.1393 + this._base = util._extend({}, frame); 1.1394 + this._frames = [frame]; 1.1395 + } 1.1396 + 1.1397 + // * otherwise, the frame is forwarded without taking any action 1.1398 + else { 1.1399 + this.push(frame); 1.1400 + } 1.1401 + 1.1402 + // * When the frame signals that it's the last in the series, the header block chunks are 1.1403 + // concatenated, the headers are decompressed, and a new frame gets pushed out with the 1.1404 + // decompressed headers. 1.1405 + if (this._inProgress && (frame.flags.END_HEADERS || frame.flags.END_PUSH_PROMISE)) { 1.1406 + var buffer = concat(this._frames.map(function(frame) { 1.1407 + return frame.data; 1.1408 + })); 1.1409 + try { 1.1410 + var headers = this.decompress(buffer); 1.1411 + } catch(error) { 1.1412 + this._log.error({ err: error }, 'Header decompression error'); 1.1413 + this.emit('error', 'COMPRESSION_ERROR'); 1.1414 + return; 1.1415 + } 1.1416 + this.push(util._extend(this._base, { headers: headers })); 1.1417 + this._inProgress = false; 1.1418 + } 1.1419 + 1.1420 + done(); 1.1421 +}; 1.1422 + 1.1423 +// Helper functions 1.1424 +// ================ 1.1425 + 1.1426 +// Concatenate an array of buffers into a new buffer 1.1427 +function concat(buffers) { 1.1428 + var size = 0; 1.1429 + for (var i = 0; i < buffers.length; i++) { 1.1430 + size += buffers[i].length; 1.1431 + } 1.1432 + 1.1433 + var concatenated = new Buffer(size); 1.1434 + for (var cursor = 0, j = 0; j < buffers.length; cursor += buffers[j].length, j++) { 1.1435 + buffers[j].copy(concatenated, cursor); 1.1436 + } 1.1437 + 1.1438 + return concatenated; 1.1439 +} 1.1440 + 1.1441 +// Cut `buffer` into chunks not larger than `size` 1.1442 +function cut(buffer, size) { 1.1443 + var chunks = []; 1.1444 + var cursor = 0; 1.1445 + do { 1.1446 + var chunkSize = Math.min(size, buffer.length - cursor); 1.1447 + chunks.push(buffer.slice(cursor, cursor + chunkSize)); 1.1448 + cursor += chunkSize; 1.1449 + } while(cursor < buffer.length); 1.1450 + return chunks; 1.1451 +} 1.1452 + 1.1453 +function trim(string) { 1.1454 + return string.trim() 1.1455 +}