276 lines
7.9 KiB
JavaScript
276 lines
7.9 KiB
JavaScript
"use strict";
|
|
|
|
var utils = require("./utils");
|
|
var support = require("./support");
|
|
var nodejsUtils = require("./nodejsUtils");
|
|
var GenericWorker = require("./stream/GenericWorker");
|
|
|
|
/**
|
|
* The following functions come from pako, from pako/lib/utils/strings
|
|
* released under the MIT license, see pako https://github.com/nodeca/pako/
|
|
*/
|
|
|
|
// Table with utf8 lengths (calculated by first byte of sequence)
|
|
// Note, that 5 & 6-byte values and some 4-byte values can not be represented in JS,
|
|
// because max possible codepoint is 0x10ffff
|
|
var _utf8len = new Array(256);
|
|
for (var i=0; i<256; i++) {
|
|
_utf8len[i] = (i >= 252 ? 6 : i >= 248 ? 5 : i >= 240 ? 4 : i >= 224 ? 3 : i >= 192 ? 2 : 1);
|
|
}
|
|
_utf8len[254]=_utf8len[254]=1; // Invalid sequence start
|
|
|
|
// convert string to array (typed, when possible)
|
|
var string2buf = function (str) {
|
|
var buf, c, c2, m_pos, i, str_len = str.length, buf_len = 0;
|
|
|
|
// count binary size
|
|
for (m_pos = 0; m_pos < str_len; m_pos++) {
|
|
c = str.charCodeAt(m_pos);
|
|
if ((c & 0xfc00) === 0xd800 && (m_pos+1 < str_len)) {
|
|
c2 = str.charCodeAt(m_pos+1);
|
|
if ((c2 & 0xfc00) === 0xdc00) {
|
|
c = 0x10000 + ((c - 0xd800) << 10) + (c2 - 0xdc00);
|
|
m_pos++;
|
|
}
|
|
}
|
|
buf_len += c < 0x80 ? 1 : c < 0x800 ? 2 : c < 0x10000 ? 3 : 4;
|
|
}
|
|
|
|
// allocate buffer
|
|
if (support.uint8array) {
|
|
buf = new Uint8Array(buf_len);
|
|
} else {
|
|
buf = new Array(buf_len);
|
|
}
|
|
|
|
// convert
|
|
for (i=0, m_pos = 0; i < buf_len; m_pos++) {
|
|
c = str.charCodeAt(m_pos);
|
|
if ((c & 0xfc00) === 0xd800 && (m_pos+1 < str_len)) {
|
|
c2 = str.charCodeAt(m_pos+1);
|
|
if ((c2 & 0xfc00) === 0xdc00) {
|
|
c = 0x10000 + ((c - 0xd800) << 10) + (c2 - 0xdc00);
|
|
m_pos++;
|
|
}
|
|
}
|
|
if (c < 0x80) {
|
|
/* one byte */
|
|
buf[i++] = c;
|
|
} else if (c < 0x800) {
|
|
/* two bytes */
|
|
buf[i++] = 0xC0 | (c >>> 6);
|
|
buf[i++] = 0x80 | (c & 0x3f);
|
|
} else if (c < 0x10000) {
|
|
/* three bytes */
|
|
buf[i++] = 0xE0 | (c >>> 12);
|
|
buf[i++] = 0x80 | (c >>> 6 & 0x3f);
|
|
buf[i++] = 0x80 | (c & 0x3f);
|
|
} else {
|
|
/* four bytes */
|
|
buf[i++] = 0xf0 | (c >>> 18);
|
|
buf[i++] = 0x80 | (c >>> 12 & 0x3f);
|
|
buf[i++] = 0x80 | (c >>> 6 & 0x3f);
|
|
buf[i++] = 0x80 | (c & 0x3f);
|
|
}
|
|
}
|
|
|
|
return buf;
|
|
};
|
|
|
|
// Calculate max possible position in utf8 buffer,
|
|
// that will not break sequence. If that's not possible
|
|
// - (very small limits) return max size as is.
|
|
//
|
|
// buf[] - utf8 bytes array
|
|
// max - length limit (mandatory);
|
|
var utf8border = function(buf, max) {
|
|
var pos;
|
|
|
|
max = max || buf.length;
|
|
if (max > buf.length) { max = buf.length; }
|
|
|
|
// go back from last position, until start of sequence found
|
|
pos = max-1;
|
|
while (pos >= 0 && (buf[pos] & 0xC0) === 0x80) { pos--; }
|
|
|
|
// Fuckup - very small and broken sequence,
|
|
// return max, because we should return something anyway.
|
|
if (pos < 0) { return max; }
|
|
|
|
// If we came to start of buffer - that means vuffer is too small,
|
|
// return max too.
|
|
if (pos === 0) { return max; }
|
|
|
|
return (pos + _utf8len[buf[pos]] > max) ? pos : max;
|
|
};
|
|
|
|
// convert array to string
|
|
var buf2string = function (buf) {
|
|
var i, out, c, c_len;
|
|
var len = buf.length;
|
|
|
|
// Reserve max possible length (2 words per char)
|
|
// NB: by unknown reasons, Array is significantly faster for
|
|
// String.fromCharCode.apply than Uint16Array.
|
|
var utf16buf = new Array(len*2);
|
|
|
|
for (out=0, i=0; i<len;) {
|
|
c = buf[i++];
|
|
// quick process ascii
|
|
if (c < 0x80) { utf16buf[out++] = c; continue; }
|
|
|
|
c_len = _utf8len[c];
|
|
// skip 5 & 6 byte codes
|
|
if (c_len > 4) { utf16buf[out++] = 0xfffd; i += c_len-1; continue; }
|
|
|
|
// apply mask on first byte
|
|
c &= c_len === 2 ? 0x1f : c_len === 3 ? 0x0f : 0x07;
|
|
// join the rest
|
|
while (c_len > 1 && i < len) {
|
|
c = (c << 6) | (buf[i++] & 0x3f);
|
|
c_len--;
|
|
}
|
|
|
|
// terminated by end of string?
|
|
if (c_len > 1) { utf16buf[out++] = 0xfffd; continue; }
|
|
|
|
if (c < 0x10000) {
|
|
utf16buf[out++] = c;
|
|
} else {
|
|
c -= 0x10000;
|
|
utf16buf[out++] = 0xd800 | ((c >> 10) & 0x3ff);
|
|
utf16buf[out++] = 0xdc00 | (c & 0x3ff);
|
|
}
|
|
}
|
|
|
|
// shrinkBuf(utf16buf, out)
|
|
if (utf16buf.length !== out) {
|
|
if(utf16buf.subarray) {
|
|
utf16buf = utf16buf.subarray(0, out);
|
|
} else {
|
|
utf16buf.length = out;
|
|
}
|
|
}
|
|
|
|
// return String.fromCharCode.apply(null, utf16buf);
|
|
return utils.applyFromCharCode(utf16buf);
|
|
};
|
|
|
|
|
|
// That's all for the pako functions.
|
|
|
|
|
|
/**
|
|
* Transform a javascript string into an array (typed if possible) of bytes,
|
|
* UTF-8 encoded.
|
|
* @param {String} str the string to encode
|
|
* @return {Array|Uint8Array|Buffer} the UTF-8 encoded string.
|
|
*/
|
|
exports.utf8encode = function utf8encode(str) {
|
|
if (support.nodebuffer) {
|
|
return nodejsUtils.newBufferFrom(str, "utf-8");
|
|
}
|
|
|
|
return string2buf(str);
|
|
};
|
|
|
|
|
|
/**
|
|
* Transform a bytes array (or a representation) representing an UTF-8 encoded
|
|
* string into a javascript string.
|
|
* @param {Array|Uint8Array|Buffer} buf the data de decode
|
|
* @return {String} the decoded string.
|
|
*/
|
|
exports.utf8decode = function utf8decode(buf) {
|
|
if (support.nodebuffer) {
|
|
return utils.transformTo("nodebuffer", buf).toString("utf-8");
|
|
}
|
|
|
|
buf = utils.transformTo(support.uint8array ? "uint8array" : "array", buf);
|
|
|
|
return buf2string(buf);
|
|
};
|
|
|
|
/**
|
|
* A worker to decode utf8 encoded binary chunks into string chunks.
|
|
* @constructor
|
|
*/
|
|
function Utf8DecodeWorker() {
|
|
GenericWorker.call(this, "utf-8 decode");
|
|
// the last bytes if a chunk didn't end with a complete codepoint.
|
|
this.leftOver = null;
|
|
}
|
|
utils.inherits(Utf8DecodeWorker, GenericWorker);
|
|
|
|
/**
|
|
* @see GenericWorker.processChunk
|
|
*/
|
|
Utf8DecodeWorker.prototype.processChunk = function (chunk) {
|
|
|
|
var data = utils.transformTo(support.uint8array ? "uint8array" : "array", chunk.data);
|
|
|
|
// 1st step, re-use what's left of the previous chunk
|
|
if (this.leftOver && this.leftOver.length) {
|
|
if(support.uint8array) {
|
|
var previousData = data;
|
|
data = new Uint8Array(previousData.length + this.leftOver.length);
|
|
data.set(this.leftOver, 0);
|
|
data.set(previousData, this.leftOver.length);
|
|
} else {
|
|
data = this.leftOver.concat(data);
|
|
}
|
|
this.leftOver = null;
|
|
}
|
|
|
|
var nextBoundary = utf8border(data);
|
|
var usableData = data;
|
|
if (nextBoundary !== data.length) {
|
|
if (support.uint8array) {
|
|
usableData = data.subarray(0, nextBoundary);
|
|
this.leftOver = data.subarray(nextBoundary, data.length);
|
|
} else {
|
|
usableData = data.slice(0, nextBoundary);
|
|
this.leftOver = data.slice(nextBoundary, data.length);
|
|
}
|
|
}
|
|
|
|
this.push({
|
|
data : exports.utf8decode(usableData),
|
|
meta : chunk.meta
|
|
});
|
|
};
|
|
|
|
/**
|
|
* @see GenericWorker.flush
|
|
*/
|
|
Utf8DecodeWorker.prototype.flush = function () {
|
|
if(this.leftOver && this.leftOver.length) {
|
|
this.push({
|
|
data : exports.utf8decode(this.leftOver),
|
|
meta : {}
|
|
});
|
|
this.leftOver = null;
|
|
}
|
|
};
|
|
exports.Utf8DecodeWorker = Utf8DecodeWorker;
|
|
|
|
/**
|
|
* A worker to endcode string chunks into utf8 encoded binary chunks.
|
|
* @constructor
|
|
*/
|
|
function Utf8EncodeWorker() {
|
|
GenericWorker.call(this, "utf-8 encode");
|
|
}
|
|
utils.inherits(Utf8EncodeWorker, GenericWorker);
|
|
|
|
/**
|
|
* @see GenericWorker.processChunk
|
|
*/
|
|
Utf8EncodeWorker.prototype.processChunk = function (chunk) {
|
|
this.push({
|
|
data : exports.utf8encode(chunk.data),
|
|
meta : chunk.meta
|
|
});
|
|
};
|
|
exports.Utf8EncodeWorker = Utf8EncodeWorker;
|