forked from mirror/brotli
Faster bit writing.
Replace the functions in write_bits.go with a bitWriter type based on the compress/flate package.
This commit is contained in:
parent
ef7a42160d
commit
c3da72aa01
|
@ -121,7 +121,7 @@ func encodeMlen(length uint, bits *uint64, numbits *uint, nibblesbits *uint64) {
|
||||||
*bits = uint64(length) - 1
|
*bits = uint64(length) - 1
|
||||||
}
|
}
|
||||||
|
|
||||||
func storeCommandExtra(cmd *command, storage_ix *uint, storage []byte) {
|
func storeCommandExtra(cmd *command, bw *bitWriter) {
|
||||||
var copylen_code uint32 = commandCopyLenCode(cmd)
|
var copylen_code uint32 = commandCopyLenCode(cmd)
|
||||||
var inscode uint16 = getInsertLengthCode(uint(cmd.insert_len_))
|
var inscode uint16 = getInsertLengthCode(uint(cmd.insert_len_))
|
||||||
var copycode uint16 = getCopyLengthCode(uint(copylen_code))
|
var copycode uint16 = getCopyLengthCode(uint(copylen_code))
|
||||||
|
@ -129,7 +129,7 @@ func storeCommandExtra(cmd *command, storage_ix *uint, storage []byte) {
|
||||||
var insextraval uint64 = uint64(cmd.insert_len_) - uint64(getInsertBase(inscode))
|
var insextraval uint64 = uint64(cmd.insert_len_) - uint64(getInsertBase(inscode))
|
||||||
var copyextraval uint64 = uint64(copylen_code) - uint64(getCopyBase(copycode))
|
var copyextraval uint64 = uint64(copylen_code) - uint64(getCopyBase(copycode))
|
||||||
var bits uint64 = copyextraval<<insnumextra | insextraval
|
var bits uint64 = copyextraval<<insnumextra | insextraval
|
||||||
writeBits(uint(insnumextra+getCopyExtra(copycode)), bits, storage_ix, storage)
|
bw.writeBits(uint(insnumextra+getCopyExtra(copycode)), bits)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Data structure that stores almost everything that is needed to encode each
|
/* Data structure that stores almost everything that is needed to encode each
|
||||||
|
@ -143,21 +143,21 @@ type blockSplitCode struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Stores a number between 0 and 255. */
|
/* Stores a number between 0 and 255. */
|
||||||
func storeVarLenUint8(n uint, storage_ix *uint, storage []byte) {
|
func storeVarLenUint8(n uint, bw *bitWriter) {
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
writeBits(1, 0, storage_ix, storage)
|
bw.writeBits(1, 0)
|
||||||
} else {
|
} else {
|
||||||
var nbits uint = uint(log2FloorNonZero(n))
|
var nbits uint = uint(log2FloorNonZero(n))
|
||||||
writeBits(1, 1, storage_ix, storage)
|
bw.writeBits(1, 1)
|
||||||
writeBits(3, uint64(nbits), storage_ix, storage)
|
bw.writeBits(3, uint64(nbits))
|
||||||
writeBits(nbits, uint64(n)-(uint64(uint(1))<<nbits), storage_ix, storage)
|
bw.writeBits(nbits, uint64(n)-(uint64(uint(1))<<nbits))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Stores the compressed meta-block header.
|
/* Stores the compressed meta-block header.
|
||||||
REQUIRES: length > 0
|
REQUIRES: length > 0
|
||||||
REQUIRES: length <= (1 << 24) */
|
REQUIRES: length <= (1 << 24) */
|
||||||
func storeCompressedMetaBlockHeader(is_final_block bool, length uint, storage_ix *uint, storage []byte) {
|
func storeCompressedMetaBlockHeader(is_final_block bool, length uint, bw *bitWriter) {
|
||||||
var lenbits uint64
|
var lenbits uint64
|
||||||
var nlenbits uint
|
var nlenbits uint
|
||||||
var nibblesbits uint64
|
var nibblesbits uint64
|
||||||
|
@ -169,41 +169,41 @@ func storeCompressedMetaBlockHeader(is_final_block bool, length uint, storage_ix
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Write ISLAST bit. */
|
/* Write ISLAST bit. */
|
||||||
writeBits(1, is_final, storage_ix, storage)
|
bw.writeBits(1, is_final)
|
||||||
|
|
||||||
/* Write ISEMPTY bit. */
|
/* Write ISEMPTY bit. */
|
||||||
if is_final_block {
|
if is_final_block {
|
||||||
writeBits(1, 0, storage_ix, storage)
|
bw.writeBits(1, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
encodeMlen(length, &lenbits, &nlenbits, &nibblesbits)
|
encodeMlen(length, &lenbits, &nlenbits, &nibblesbits)
|
||||||
writeBits(2, nibblesbits, storage_ix, storage)
|
bw.writeBits(2, nibblesbits)
|
||||||
writeBits(nlenbits, lenbits, storage_ix, storage)
|
bw.writeBits(nlenbits, lenbits)
|
||||||
|
|
||||||
if !is_final_block {
|
if !is_final_block {
|
||||||
/* Write ISUNCOMPRESSED bit. */
|
/* Write ISUNCOMPRESSED bit. */
|
||||||
writeBits(1, 0, storage_ix, storage)
|
bw.writeBits(1, 0)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Stores the uncompressed meta-block header.
|
/* Stores the uncompressed meta-block header.
|
||||||
REQUIRES: length > 0
|
REQUIRES: length > 0
|
||||||
REQUIRES: length <= (1 << 24) */
|
REQUIRES: length <= (1 << 24) */
|
||||||
func storeUncompressedMetaBlockHeader(length uint, storage_ix *uint, storage []byte) {
|
func storeUncompressedMetaBlockHeader(length uint, bw *bitWriter) {
|
||||||
var lenbits uint64
|
var lenbits uint64
|
||||||
var nlenbits uint
|
var nlenbits uint
|
||||||
var nibblesbits uint64
|
var nibblesbits uint64
|
||||||
|
|
||||||
/* Write ISLAST bit.
|
/* Write ISLAST bit.
|
||||||
Uncompressed block cannot be the last one, so set to 0. */
|
Uncompressed block cannot be the last one, so set to 0. */
|
||||||
writeBits(1, 0, storage_ix, storage)
|
bw.writeBits(1, 0)
|
||||||
|
|
||||||
encodeMlen(length, &lenbits, &nlenbits, &nibblesbits)
|
encodeMlen(length, &lenbits, &nlenbits, &nibblesbits)
|
||||||
writeBits(2, nibblesbits, storage_ix, storage)
|
bw.writeBits(2, nibblesbits)
|
||||||
writeBits(nlenbits, lenbits, storage_ix, storage)
|
bw.writeBits(nlenbits, lenbits)
|
||||||
|
|
||||||
/* Write ISUNCOMPRESSED bit. */
|
/* Write ISUNCOMPRESSED bit. */
|
||||||
writeBits(1, 1, storage_ix, storage)
|
bw.writeBits(1, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
var storeHuffmanTreeOfHuffmanTreeToBitMask_kStorageOrder = [codeLengthCodes]byte{1, 2, 3, 4, 0, 5, 17, 6, 16, 7, 8, 9, 10, 11, 12, 13, 14, 15}
|
var storeHuffmanTreeOfHuffmanTreeToBitMask_kStorageOrder = [codeLengthCodes]byte{1, 2, 3, 4, 0, 5, 17, 6, 16, 7, 8, 9, 10, 11, 12, 13, 14, 15}
|
||||||
|
@ -211,7 +211,7 @@ var storeHuffmanTreeOfHuffmanTreeToBitMask_kStorageOrder = [codeLengthCodes]byte
|
||||||
var storeHuffmanTreeOfHuffmanTreeToBitMask_kHuffmanBitLengthHuffmanCodeSymbols = [6]byte{0, 7, 3, 2, 1, 15}
|
var storeHuffmanTreeOfHuffmanTreeToBitMask_kHuffmanBitLengthHuffmanCodeSymbols = [6]byte{0, 7, 3, 2, 1, 15}
|
||||||
var storeHuffmanTreeOfHuffmanTreeToBitMask_kHuffmanBitLengthHuffmanCodeBitLengths = [6]byte{2, 4, 3, 2, 2, 4}
|
var storeHuffmanTreeOfHuffmanTreeToBitMask_kHuffmanBitLengthHuffmanCodeBitLengths = [6]byte{2, 4, 3, 2, 2, 4}
|
||||||
|
|
||||||
func storeHuffmanTreeOfHuffmanTreeToBitMask(num_codes int, code_length_bitdepth []byte, storage_ix *uint, storage []byte) {
|
func storeHuffmanTreeOfHuffmanTreeToBitMask(num_codes int, code_length_bitdepth []byte, bw *bitWriter) {
|
||||||
var skip_some uint = 0
|
var skip_some uint = 0
|
||||||
var codes_to_store uint = codeLengthCodes
|
var codes_to_store uint = codeLengthCodes
|
||||||
/* The bit lengths of the Huffman code over the code length alphabet
|
/* The bit lengths of the Huffman code over the code length alphabet
|
||||||
|
@ -241,38 +241,38 @@ func storeHuffmanTreeOfHuffmanTreeToBitMask(num_codes int, code_length_bitdepth
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
writeBits(2, uint64(skip_some), storage_ix, storage)
|
bw.writeBits(2, uint64(skip_some))
|
||||||
{
|
{
|
||||||
var i uint
|
var i uint
|
||||||
for i = skip_some; i < codes_to_store; i++ {
|
for i = skip_some; i < codes_to_store; i++ {
|
||||||
var l uint = uint(code_length_bitdepth[storeHuffmanTreeOfHuffmanTreeToBitMask_kStorageOrder[i]])
|
var l uint = uint(code_length_bitdepth[storeHuffmanTreeOfHuffmanTreeToBitMask_kStorageOrder[i]])
|
||||||
writeBits(uint(storeHuffmanTreeOfHuffmanTreeToBitMask_kHuffmanBitLengthHuffmanCodeBitLengths[l]), uint64(storeHuffmanTreeOfHuffmanTreeToBitMask_kHuffmanBitLengthHuffmanCodeSymbols[l]), storage_ix, storage)
|
bw.writeBits(uint(storeHuffmanTreeOfHuffmanTreeToBitMask_kHuffmanBitLengthHuffmanCodeBitLengths[l]), uint64(storeHuffmanTreeOfHuffmanTreeToBitMask_kHuffmanBitLengthHuffmanCodeSymbols[l]))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func storeHuffmanTreeToBitMask(huffman_tree_size uint, huffman_tree []byte, huffman_tree_extra_bits []byte, code_length_bitdepth []byte, code_length_bitdepth_symbols []uint16, storage_ix *uint, storage []byte) {
|
func storeHuffmanTreeToBitMask(huffman_tree_size uint, huffman_tree []byte, huffman_tree_extra_bits []byte, code_length_bitdepth []byte, code_length_bitdepth_symbols []uint16, bw *bitWriter) {
|
||||||
var i uint
|
var i uint
|
||||||
for i = 0; i < huffman_tree_size; i++ {
|
for i = 0; i < huffman_tree_size; i++ {
|
||||||
var ix uint = uint(huffman_tree[i])
|
var ix uint = uint(huffman_tree[i])
|
||||||
writeBits(uint(code_length_bitdepth[ix]), uint64(code_length_bitdepth_symbols[ix]), storage_ix, storage)
|
bw.writeBits(uint(code_length_bitdepth[ix]), uint64(code_length_bitdepth_symbols[ix]))
|
||||||
|
|
||||||
/* Extra bits */
|
/* Extra bits */
|
||||||
switch ix {
|
switch ix {
|
||||||
case repeatPreviousCodeLength:
|
case repeatPreviousCodeLength:
|
||||||
writeBits(2, uint64(huffman_tree_extra_bits[i]), storage_ix, storage)
|
bw.writeBits(2, uint64(huffman_tree_extra_bits[i]))
|
||||||
|
|
||||||
case repeatZeroCodeLength:
|
case repeatZeroCodeLength:
|
||||||
writeBits(3, uint64(huffman_tree_extra_bits[i]), storage_ix, storage)
|
bw.writeBits(3, uint64(huffman_tree_extra_bits[i]))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func storeSimpleHuffmanTree(depths []byte, symbols []uint, num_symbols uint, max_bits uint, storage_ix *uint, storage []byte) {
|
func storeSimpleHuffmanTree(depths []byte, symbols []uint, num_symbols uint, max_bits uint, bw *bitWriter) {
|
||||||
/* value of 1 indicates a simple Huffman code */
|
/* value of 1 indicates a simple Huffman code */
|
||||||
writeBits(2, 1, storage_ix, storage)
|
bw.writeBits(2, 1)
|
||||||
|
|
||||||
writeBits(2, uint64(num_symbols)-1, storage_ix, storage) /* NSYM - 1 */
|
bw.writeBits(2, uint64(num_symbols)-1) /* NSYM - 1 */
|
||||||
{
|
{
|
||||||
/* Sort */
|
/* Sort */
|
||||||
var i uint
|
var i uint
|
||||||
|
@ -289,17 +289,17 @@ func storeSimpleHuffmanTree(depths []byte, symbols []uint, num_symbols uint, max
|
||||||
}
|
}
|
||||||
|
|
||||||
if num_symbols == 2 {
|
if num_symbols == 2 {
|
||||||
writeBits(max_bits, uint64(symbols[0]), storage_ix, storage)
|
bw.writeBits(max_bits, uint64(symbols[0]))
|
||||||
writeBits(max_bits, uint64(symbols[1]), storage_ix, storage)
|
bw.writeBits(max_bits, uint64(symbols[1]))
|
||||||
} else if num_symbols == 3 {
|
} else if num_symbols == 3 {
|
||||||
writeBits(max_bits, uint64(symbols[0]), storage_ix, storage)
|
bw.writeBits(max_bits, uint64(symbols[0]))
|
||||||
writeBits(max_bits, uint64(symbols[1]), storage_ix, storage)
|
bw.writeBits(max_bits, uint64(symbols[1]))
|
||||||
writeBits(max_bits, uint64(symbols[2]), storage_ix, storage)
|
bw.writeBits(max_bits, uint64(symbols[2]))
|
||||||
} else {
|
} else {
|
||||||
writeBits(max_bits, uint64(symbols[0]), storage_ix, storage)
|
bw.writeBits(max_bits, uint64(symbols[0]))
|
||||||
writeBits(max_bits, uint64(symbols[1]), storage_ix, storage)
|
bw.writeBits(max_bits, uint64(symbols[1]))
|
||||||
writeBits(max_bits, uint64(symbols[2]), storage_ix, storage)
|
bw.writeBits(max_bits, uint64(symbols[2]))
|
||||||
writeBits(max_bits, uint64(symbols[3]), storage_ix, storage)
|
bw.writeBits(max_bits, uint64(symbols[3]))
|
||||||
|
|
||||||
/* tree-select */
|
/* tree-select */
|
||||||
var tmp int
|
var tmp int
|
||||||
|
@ -308,13 +308,13 @@ func storeSimpleHuffmanTree(depths []byte, symbols []uint, num_symbols uint, max
|
||||||
} else {
|
} else {
|
||||||
tmp = 0
|
tmp = 0
|
||||||
}
|
}
|
||||||
writeBits(1, uint64(tmp), storage_ix, storage)
|
bw.writeBits(1, uint64(tmp))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* num = alphabet size
|
/* num = alphabet size
|
||||||
depths = symbol depths */
|
depths = symbol depths */
|
||||||
func storeHuffmanTree(depths []byte, num uint, tree []huffmanTree, storage_ix *uint, storage []byte) {
|
func storeHuffmanTree(depths []byte, num uint, tree []huffmanTree, bw *bitWriter) {
|
||||||
var huffman_tree [numCommandSymbols]byte
|
var huffman_tree [numCommandSymbols]byte
|
||||||
var huffman_tree_extra_bits [numCommandSymbols]byte
|
var huffman_tree_extra_bits [numCommandSymbols]byte
|
||||||
var huffman_tree_size uint = 0
|
var huffman_tree_size uint = 0
|
||||||
|
@ -357,19 +357,19 @@ func storeHuffmanTree(depths []byte, num uint, tree []huffmanTree, storage_ix *u
|
||||||
convertBitDepthsToSymbols(code_length_bitdepth[:], codeLengthCodes, code_length_bitdepth_symbols[:])
|
convertBitDepthsToSymbols(code_length_bitdepth[:], codeLengthCodes, code_length_bitdepth_symbols[:])
|
||||||
|
|
||||||
/* Now, we have all the data, let's start storing it */
|
/* Now, we have all the data, let's start storing it */
|
||||||
storeHuffmanTreeOfHuffmanTreeToBitMask(num_codes, code_length_bitdepth[:], storage_ix, storage)
|
storeHuffmanTreeOfHuffmanTreeToBitMask(num_codes, code_length_bitdepth[:], bw)
|
||||||
|
|
||||||
if num_codes == 1 {
|
if num_codes == 1 {
|
||||||
code_length_bitdepth[code] = 0
|
code_length_bitdepth[code] = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Store the real Huffman tree now. */
|
/* Store the real Huffman tree now. */
|
||||||
storeHuffmanTreeToBitMask(huffman_tree_size, huffman_tree[:], huffman_tree_extra_bits[:], code_length_bitdepth[:], code_length_bitdepth_symbols[:], storage_ix, storage)
|
storeHuffmanTreeToBitMask(huffman_tree_size, huffman_tree[:], huffman_tree_extra_bits[:], code_length_bitdepth[:], code_length_bitdepth_symbols[:], bw)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Builds a Huffman tree from histogram[0:length] into depth[0:length] and
|
/* Builds a Huffman tree from histogram[0:length] into depth[0:length] and
|
||||||
bits[0:length] and stores the encoded tree to the bit stream. */
|
bits[0:length] and stores the encoded tree to the bit stream. */
|
||||||
func buildAndStoreHuffmanTree(histogram []uint32, histogram_length uint, alphabet_size uint, tree []huffmanTree, depth []byte, bits []uint16, storage_ix *uint, storage []byte) {
|
func buildAndStoreHuffmanTree(histogram []uint32, histogram_length uint, alphabet_size uint, tree []huffmanTree, depth []byte, bits []uint16, bw *bitWriter) {
|
||||||
var count uint = 0
|
var count uint = 0
|
||||||
var s4 = [4]uint{0}
|
var s4 = [4]uint{0}
|
||||||
var i uint
|
var i uint
|
||||||
|
@ -394,8 +394,8 @@ func buildAndStoreHuffmanTree(histogram []uint32, histogram_length uint, alphabe
|
||||||
}
|
}
|
||||||
|
|
||||||
if count <= 1 {
|
if count <= 1 {
|
||||||
writeBits(4, 1, storage_ix, storage)
|
bw.writeBits(4, 1)
|
||||||
writeBits(max_bits, uint64(s4[0]), storage_ix, storage)
|
bw.writeBits(max_bits, uint64(s4[0]))
|
||||||
depth[s4[0]] = 0
|
depth[s4[0]] = 0
|
||||||
bits[s4[0]] = 0
|
bits[s4[0]] = 0
|
||||||
return
|
return
|
||||||
|
@ -408,9 +408,9 @@ func buildAndStoreHuffmanTree(histogram []uint32, histogram_length uint, alphabe
|
||||||
convertBitDepthsToSymbols(depth, histogram_length, bits)
|
convertBitDepthsToSymbols(depth, histogram_length, bits)
|
||||||
|
|
||||||
if count <= 4 {
|
if count <= 4 {
|
||||||
storeSimpleHuffmanTree(depth, s4[:], count, max_bits, storage_ix, storage)
|
storeSimpleHuffmanTree(depth, s4[:], count, max_bits, bw)
|
||||||
} else {
|
} else {
|
||||||
storeHuffmanTree(depth, histogram_length, tree, storage_ix, storage)
|
storeHuffmanTree(depth, histogram_length, tree, bw)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -420,7 +420,7 @@ func sortHuffmanTree1(v0 huffmanTree, v1 huffmanTree) bool {
|
||||||
|
|
||||||
var huffmanTreePool sync.Pool
|
var huffmanTreePool sync.Pool
|
||||||
|
|
||||||
func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_bits uint, depth []byte, bits []uint16, storage_ix *uint, storage []byte) {
|
func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_bits uint, depth []byte, bits []uint16, bw *bitWriter) {
|
||||||
var count uint = 0
|
var count uint = 0
|
||||||
var symbols = [4]uint{0}
|
var symbols = [4]uint{0}
|
||||||
var length uint = 0
|
var length uint = 0
|
||||||
|
@ -439,8 +439,8 @@ func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_
|
||||||
}
|
}
|
||||||
|
|
||||||
if count <= 1 {
|
if count <= 1 {
|
||||||
writeBits(4, 1, storage_ix, storage)
|
bw.writeBits(4, 1)
|
||||||
writeBits(max_bits, uint64(symbols[0]), storage_ix, storage)
|
bw.writeBits(max_bits, uint64(symbols[0]))
|
||||||
depth[symbols[0]] = 0
|
depth[symbols[0]] = 0
|
||||||
bits[symbols[0]] = 0
|
bits[symbols[0]] = 0
|
||||||
return
|
return
|
||||||
|
@ -544,9 +544,9 @@ func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_
|
||||||
var i uint
|
var i uint
|
||||||
|
|
||||||
/* value of 1 indicates a simple Huffman code */
|
/* value of 1 indicates a simple Huffman code */
|
||||||
writeBits(2, 1, storage_ix, storage)
|
bw.writeBits(2, 1)
|
||||||
|
|
||||||
writeBits(2, uint64(count)-1, storage_ix, storage) /* NSYM - 1 */
|
bw.writeBits(2, uint64(count)-1) /* NSYM - 1 */
|
||||||
|
|
||||||
/* Sort */
|
/* Sort */
|
||||||
for i = 0; i < count; i++ {
|
for i = 0; i < count; i++ {
|
||||||
|
@ -561,33 +561,27 @@ func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_
|
||||||
}
|
}
|
||||||
|
|
||||||
if count == 2 {
|
if count == 2 {
|
||||||
writeBits(max_bits, uint64(symbols[0]), storage_ix, storage)
|
bw.writeBits(max_bits, uint64(symbols[0]))
|
||||||
writeBits(max_bits, uint64(symbols[1]), storage_ix, storage)
|
bw.writeBits(max_bits, uint64(symbols[1]))
|
||||||
} else if count == 3 {
|
} else if count == 3 {
|
||||||
writeBits(max_bits, uint64(symbols[0]), storage_ix, storage)
|
bw.writeBits(max_bits, uint64(symbols[0]))
|
||||||
writeBits(max_bits, uint64(symbols[1]), storage_ix, storage)
|
bw.writeBits(max_bits, uint64(symbols[1]))
|
||||||
writeBits(max_bits, uint64(symbols[2]), storage_ix, storage)
|
bw.writeBits(max_bits, uint64(symbols[2]))
|
||||||
} else {
|
} else {
|
||||||
writeBits(max_bits, uint64(symbols[0]), storage_ix, storage)
|
bw.writeBits(max_bits, uint64(symbols[0]))
|
||||||
writeBits(max_bits, uint64(symbols[1]), storage_ix, storage)
|
bw.writeBits(max_bits, uint64(symbols[1]))
|
||||||
writeBits(max_bits, uint64(symbols[2]), storage_ix, storage)
|
bw.writeBits(max_bits, uint64(symbols[2]))
|
||||||
writeBits(max_bits, uint64(symbols[3]), storage_ix, storage)
|
bw.writeBits(max_bits, uint64(symbols[3]))
|
||||||
|
|
||||||
/* tree-select */
|
/* tree-select */
|
||||||
var tmp int
|
bw.writeSingleBit(depth[symbols[0]] == 1)
|
||||||
if depth[symbols[0]] == 1 {
|
|
||||||
tmp = 1
|
|
||||||
} else {
|
|
||||||
tmp = 0
|
|
||||||
}
|
|
||||||
writeBits(1, uint64(tmp), storage_ix, storage)
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
var previous_value byte = 8
|
var previous_value byte = 8
|
||||||
var i uint
|
var i uint
|
||||||
|
|
||||||
/* Complex Huffman Tree */
|
/* Complex Huffman Tree */
|
||||||
storeStaticCodeLengthCode(storage_ix, storage)
|
storeStaticCodeLengthCode(bw)
|
||||||
|
|
||||||
/* Actual RLE coding. */
|
/* Actual RLE coding. */
|
||||||
for i = 0; i < length; {
|
for i = 0; i < length; {
|
||||||
|
@ -600,21 +594,21 @@ func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_
|
||||||
|
|
||||||
i += reps
|
i += reps
|
||||||
if value == 0 {
|
if value == 0 {
|
||||||
writeBits(uint(kZeroRepsDepth[reps]), kZeroRepsBits[reps], storage_ix, storage)
|
bw.writeBits(uint(kZeroRepsDepth[reps]), kZeroRepsBits[reps])
|
||||||
} else {
|
} else {
|
||||||
if previous_value != value {
|
if previous_value != value {
|
||||||
writeBits(uint(kCodeLengthDepth[value]), uint64(kCodeLengthBits[value]), storage_ix, storage)
|
bw.writeBits(uint(kCodeLengthDepth[value]), uint64(kCodeLengthBits[value]))
|
||||||
reps--
|
reps--
|
||||||
}
|
}
|
||||||
|
|
||||||
if reps < 3 {
|
if reps < 3 {
|
||||||
for reps != 0 {
|
for reps != 0 {
|
||||||
reps--
|
reps--
|
||||||
writeBits(uint(kCodeLengthDepth[value]), uint64(kCodeLengthBits[value]), storage_ix, storage)
|
bw.writeBits(uint(kCodeLengthDepth[value]), uint64(kCodeLengthBits[value]))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
reps -= 3
|
reps -= 3
|
||||||
writeBits(uint(kNonZeroRepsDepth[reps]), kNonZeroRepsBits[reps], storage_ix, storage)
|
bw.writeBits(uint(kNonZeroRepsDepth[reps]), kNonZeroRepsBits[reps])
|
||||||
}
|
}
|
||||||
|
|
||||||
previous_value = value
|
previous_value = value
|
||||||
|
@ -739,7 +733,7 @@ const symbolBits = 9
|
||||||
|
|
||||||
var encodeContextMap_kSymbolMask uint32 = (1 << symbolBits) - 1
|
var encodeContextMap_kSymbolMask uint32 = (1 << symbolBits) - 1
|
||||||
|
|
||||||
func encodeContextMap(context_map []uint32, context_map_size uint, num_clusters uint, tree []huffmanTree, storage_ix *uint, storage []byte) {
|
func encodeContextMap(context_map []uint32, context_map_size uint, num_clusters uint, tree []huffmanTree, bw *bitWriter) {
|
||||||
var i uint
|
var i uint
|
||||||
var rle_symbols []uint32
|
var rle_symbols []uint32
|
||||||
var max_run_length_prefix uint32 = 6
|
var max_run_length_prefix uint32 = 6
|
||||||
|
@ -748,7 +742,7 @@ func encodeContextMap(context_map []uint32, context_map_size uint, num_clusters
|
||||||
var depths [maxContextMapSymbols]byte
|
var depths [maxContextMapSymbols]byte
|
||||||
var bits [maxContextMapSymbols]uint16
|
var bits [maxContextMapSymbols]uint16
|
||||||
|
|
||||||
storeVarLenUint8(num_clusters-1, storage_ix, storage)
|
storeVarLenUint8(num_clusters-1, bw)
|
||||||
|
|
||||||
if num_clusters == 1 {
|
if num_clusters == 1 {
|
||||||
return
|
return
|
||||||
|
@ -763,45 +757,45 @@ func encodeContextMap(context_map []uint32, context_map_size uint, num_clusters
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
var use_rle bool = (max_run_length_prefix > 0)
|
var use_rle bool = (max_run_length_prefix > 0)
|
||||||
writeSingleBit(use_rle, storage_ix, storage)
|
bw.writeSingleBit(use_rle)
|
||||||
if use_rle {
|
if use_rle {
|
||||||
writeBits(4, uint64(max_run_length_prefix)-1, storage_ix, storage)
|
bw.writeBits(4, uint64(max_run_length_prefix)-1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
buildAndStoreHuffmanTree(histogram[:], uint(uint32(num_clusters)+max_run_length_prefix), uint(uint32(num_clusters)+max_run_length_prefix), tree, depths[:], bits[:], storage_ix, storage)
|
buildAndStoreHuffmanTree(histogram[:], uint(uint32(num_clusters)+max_run_length_prefix), uint(uint32(num_clusters)+max_run_length_prefix), tree, depths[:], bits[:], bw)
|
||||||
for i = 0; i < num_rle_symbols; i++ {
|
for i = 0; i < num_rle_symbols; i++ {
|
||||||
var rle_symbol uint32 = rle_symbols[i] & encodeContextMap_kSymbolMask
|
var rle_symbol uint32 = rle_symbols[i] & encodeContextMap_kSymbolMask
|
||||||
var extra_bits_val uint32 = rle_symbols[i] >> symbolBits
|
var extra_bits_val uint32 = rle_symbols[i] >> symbolBits
|
||||||
writeBits(uint(depths[rle_symbol]), uint64(bits[rle_symbol]), storage_ix, storage)
|
bw.writeBits(uint(depths[rle_symbol]), uint64(bits[rle_symbol]))
|
||||||
if rle_symbol > 0 && rle_symbol <= max_run_length_prefix {
|
if rle_symbol > 0 && rle_symbol <= max_run_length_prefix {
|
||||||
writeBits(uint(rle_symbol), uint64(extra_bits_val), storage_ix, storage)
|
bw.writeBits(uint(rle_symbol), uint64(extra_bits_val))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
writeBits(1, 1, storage_ix, storage) /* use move-to-front */
|
bw.writeBits(1, 1) /* use move-to-front */
|
||||||
rle_symbols = nil
|
rle_symbols = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Stores the block switch command with index block_ix to the bit stream. */
|
/* Stores the block switch command with index block_ix to the bit stream. */
|
||||||
func storeBlockSwitch(code *blockSplitCode, block_len uint32, block_type byte, is_first_block bool, storage_ix *uint, storage []byte) {
|
func storeBlockSwitch(code *blockSplitCode, block_len uint32, block_type byte, is_first_block bool, bw *bitWriter) {
|
||||||
var typecode uint = nextBlockTypeCode(&code.type_code_calculator, block_type)
|
var typecode uint = nextBlockTypeCode(&code.type_code_calculator, block_type)
|
||||||
var lencode uint
|
var lencode uint
|
||||||
var len_nextra uint32
|
var len_nextra uint32
|
||||||
var len_extra uint32
|
var len_extra uint32
|
||||||
if !is_first_block {
|
if !is_first_block {
|
||||||
writeBits(uint(code.type_depths[typecode]), uint64(code.type_bits[typecode]), storage_ix, storage)
|
bw.writeBits(uint(code.type_depths[typecode]), uint64(code.type_bits[typecode]))
|
||||||
}
|
}
|
||||||
|
|
||||||
getBlockLengthPrefixCode(block_len, &lencode, &len_nextra, &len_extra)
|
getBlockLengthPrefixCode(block_len, &lencode, &len_nextra, &len_extra)
|
||||||
|
|
||||||
writeBits(uint(code.length_depths[lencode]), uint64(code.length_bits[lencode]), storage_ix, storage)
|
bw.writeBits(uint(code.length_depths[lencode]), uint64(code.length_bits[lencode]))
|
||||||
writeBits(uint(len_nextra), uint64(len_extra), storage_ix, storage)
|
bw.writeBits(uint(len_nextra), uint64(len_extra))
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Builds a BlockSplitCode data structure from the block split given by the
|
/* Builds a BlockSplitCode data structure from the block split given by the
|
||||||
vector of block types and block lengths and stores it to the bit stream. */
|
vector of block types and block lengths and stores it to the bit stream. */
|
||||||
func buildAndStoreBlockSplitCode(types []byte, lengths []uint32, num_blocks uint, num_types uint, tree []huffmanTree, code *blockSplitCode, storage_ix *uint, storage []byte) {
|
func buildAndStoreBlockSplitCode(types []byte, lengths []uint32, num_blocks uint, num_types uint, tree []huffmanTree, code *blockSplitCode, bw *bitWriter) {
|
||||||
var type_histo [maxBlockTypeSymbols]uint32
|
var type_histo [maxBlockTypeSymbols]uint32
|
||||||
var length_histo [numBlockLenSymbols]uint32
|
var length_histo [numBlockLenSymbols]uint32
|
||||||
var i uint
|
var i uint
|
||||||
|
@ -819,17 +813,17 @@ func buildAndStoreBlockSplitCode(types []byte, lengths []uint32, num_blocks uint
|
||||||
length_histo[blockLengthPrefixCode(lengths[i])]++
|
length_histo[blockLengthPrefixCode(lengths[i])]++
|
||||||
}
|
}
|
||||||
|
|
||||||
storeVarLenUint8(num_types-1, storage_ix, storage)
|
storeVarLenUint8(num_types-1, bw)
|
||||||
if num_types > 1 { /* TODO: else? could StoreBlockSwitch occur? */
|
if num_types > 1 { /* TODO: else? could StoreBlockSwitch occur? */
|
||||||
buildAndStoreHuffmanTree(type_histo[0:], num_types+2, num_types+2, tree, code.type_depths[0:], code.type_bits[0:], storage_ix, storage)
|
buildAndStoreHuffmanTree(type_histo[0:], num_types+2, num_types+2, tree, code.type_depths[0:], code.type_bits[0:], bw)
|
||||||
buildAndStoreHuffmanTree(length_histo[0:], numBlockLenSymbols, numBlockLenSymbols, tree, code.length_depths[0:], code.length_bits[0:], storage_ix, storage)
|
buildAndStoreHuffmanTree(length_histo[0:], numBlockLenSymbols, numBlockLenSymbols, tree, code.length_depths[0:], code.length_bits[0:], bw)
|
||||||
storeBlockSwitch(code, lengths[0], types[0], true, storage_ix, storage)
|
storeBlockSwitch(code, lengths[0], types[0], true, bw)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Stores a context map where the histogram type is always the block type. */
|
/* Stores a context map where the histogram type is always the block type. */
|
||||||
func storeTrivialContextMap(num_types uint, context_bits uint, tree []huffmanTree, storage_ix *uint, storage []byte) {
|
func storeTrivialContextMap(num_types uint, context_bits uint, tree []huffmanTree, bw *bitWriter) {
|
||||||
storeVarLenUint8(num_types-1, storage_ix, storage)
|
storeVarLenUint8(num_types-1, bw)
|
||||||
if num_types > 1 {
|
if num_types > 1 {
|
||||||
var repeat_code uint = context_bits - 1
|
var repeat_code uint = context_bits - 1
|
||||||
var repeat_bits uint = (1 << repeat_code) - 1
|
var repeat_bits uint = (1 << repeat_code) - 1
|
||||||
|
@ -843,16 +837,16 @@ func storeTrivialContextMap(num_types uint, context_bits uint, tree []huffmanTre
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Write RLEMAX. */
|
/* Write RLEMAX. */
|
||||||
writeBits(1, 1, storage_ix, storage)
|
bw.writeBits(1, 1)
|
||||||
|
|
||||||
writeBits(4, uint64(repeat_code)-1, storage_ix, storage)
|
bw.writeBits(4, uint64(repeat_code)-1)
|
||||||
histogram[repeat_code] = uint32(num_types)
|
histogram[repeat_code] = uint32(num_types)
|
||||||
histogram[0] = 1
|
histogram[0] = 1
|
||||||
for i = context_bits; i < alphabet_size; i++ {
|
for i = context_bits; i < alphabet_size; i++ {
|
||||||
histogram[i] = 1
|
histogram[i] = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
buildAndStoreHuffmanTree(histogram[:], alphabet_size, alphabet_size, tree, depths[:], bits[:], storage_ix, storage)
|
buildAndStoreHuffmanTree(histogram[:], alphabet_size, alphabet_size, tree, depths[:], bits[:], bw)
|
||||||
for i = 0; i < num_types; i++ {
|
for i = 0; i < num_types; i++ {
|
||||||
var tmp uint
|
var tmp uint
|
||||||
if i == 0 {
|
if i == 0 {
|
||||||
|
@ -861,13 +855,13 @@ func storeTrivialContextMap(num_types uint, context_bits uint, tree []huffmanTre
|
||||||
tmp = i + context_bits - 1
|
tmp = i + context_bits - 1
|
||||||
}
|
}
|
||||||
var code uint = tmp
|
var code uint = tmp
|
||||||
writeBits(uint(depths[code]), uint64(bits[code]), storage_ix, storage)
|
bw.writeBits(uint(depths[code]), uint64(bits[code]))
|
||||||
writeBits(uint(depths[repeat_code]), uint64(bits[repeat_code]), storage_ix, storage)
|
bw.writeBits(uint(depths[repeat_code]), uint64(bits[repeat_code]))
|
||||||
writeBits(repeat_code, uint64(repeat_bits), storage_ix, storage)
|
bw.writeBits(repeat_code, uint64(repeat_bits))
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Write IMTF (inverse-move-to-front) bit. */
|
/* Write IMTF (inverse-move-to-front) bit. */
|
||||||
writeBits(1, 1, storage_ix, storage)
|
bw.writeBits(1, 1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -921,13 +915,13 @@ func cleanupBlockEncoder(self *blockEncoder) {
|
||||||
|
|
||||||
/* Creates entropy codes of block lengths and block types and stores them
|
/* Creates entropy codes of block lengths and block types and stores them
|
||||||
to the bit stream. */
|
to the bit stream. */
|
||||||
func buildAndStoreBlockSwitchEntropyCodes(self *blockEncoder, tree []huffmanTree, storage_ix *uint, storage []byte) {
|
func buildAndStoreBlockSwitchEntropyCodes(self *blockEncoder, tree []huffmanTree, bw *bitWriter) {
|
||||||
buildAndStoreBlockSplitCode(self.block_types_, self.block_lengths_, self.num_blocks_, self.num_block_types_, tree, &self.block_split_code_, storage_ix, storage)
|
buildAndStoreBlockSplitCode(self.block_types_, self.block_lengths_, self.num_blocks_, self.num_block_types_, tree, &self.block_split_code_, bw)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Stores the next symbol with the entropy code of the current block type.
|
/* Stores the next symbol with the entropy code of the current block type.
|
||||||
Updates the block type and block length at block boundaries. */
|
Updates the block type and block length at block boundaries. */
|
||||||
func storeSymbol(self *blockEncoder, symbol uint, storage_ix *uint, storage []byte) {
|
func storeSymbol(self *blockEncoder, symbol uint, bw *bitWriter) {
|
||||||
if self.block_len_ == 0 {
|
if self.block_len_ == 0 {
|
||||||
self.block_ix_++
|
self.block_ix_++
|
||||||
var block_ix uint = self.block_ix_
|
var block_ix uint = self.block_ix_
|
||||||
|
@ -935,20 +929,20 @@ func storeSymbol(self *blockEncoder, symbol uint, storage_ix *uint, storage []by
|
||||||
var block_type byte = self.block_types_[block_ix]
|
var block_type byte = self.block_types_[block_ix]
|
||||||
self.block_len_ = uint(block_len)
|
self.block_len_ = uint(block_len)
|
||||||
self.entropy_ix_ = uint(block_type) * self.histogram_length_
|
self.entropy_ix_ = uint(block_type) * self.histogram_length_
|
||||||
storeBlockSwitch(&self.block_split_code_, block_len, block_type, false, storage_ix, storage)
|
storeBlockSwitch(&self.block_split_code_, block_len, block_type, false, bw)
|
||||||
}
|
}
|
||||||
|
|
||||||
self.block_len_--
|
self.block_len_--
|
||||||
{
|
{
|
||||||
var ix uint = self.entropy_ix_ + symbol
|
var ix uint = self.entropy_ix_ + symbol
|
||||||
writeBits(uint(self.depths_[ix]), uint64(self.bits_[ix]), storage_ix, storage)
|
bw.writeBits(uint(self.depths_[ix]), uint64(self.bits_[ix]))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Stores the next symbol with the entropy code of the current block type and
|
/* Stores the next symbol with the entropy code of the current block type and
|
||||||
context value.
|
context value.
|
||||||
Updates the block type and block length at block boundaries. */
|
Updates the block type and block length at block boundaries. */
|
||||||
func storeSymbolWithContext(self *blockEncoder, symbol uint, context uint, context_map []uint32, storage_ix *uint, storage []byte, context_bits uint) {
|
func storeSymbolWithContext(self *blockEncoder, symbol uint, context uint, context_map []uint32, bw *bitWriter, context_bits uint) {
|
||||||
if self.block_len_ == 0 {
|
if self.block_len_ == 0 {
|
||||||
self.block_ix_++
|
self.block_ix_++
|
||||||
var block_ix uint = self.block_ix_
|
var block_ix uint = self.block_ix_
|
||||||
|
@ -956,18 +950,18 @@ func storeSymbolWithContext(self *blockEncoder, symbol uint, context uint, conte
|
||||||
var block_type byte = self.block_types_[block_ix]
|
var block_type byte = self.block_types_[block_ix]
|
||||||
self.block_len_ = uint(block_len)
|
self.block_len_ = uint(block_len)
|
||||||
self.entropy_ix_ = uint(block_type) << context_bits
|
self.entropy_ix_ = uint(block_type) << context_bits
|
||||||
storeBlockSwitch(&self.block_split_code_, block_len, block_type, false, storage_ix, storage)
|
storeBlockSwitch(&self.block_split_code_, block_len, block_type, false, bw)
|
||||||
}
|
}
|
||||||
|
|
||||||
self.block_len_--
|
self.block_len_--
|
||||||
{
|
{
|
||||||
var histo_ix uint = uint(context_map[self.entropy_ix_+context])
|
var histo_ix uint = uint(context_map[self.entropy_ix_+context])
|
||||||
var ix uint = histo_ix*self.histogram_length_ + symbol
|
var ix uint = histo_ix*self.histogram_length_ + symbol
|
||||||
writeBits(uint(self.depths_[ix]), uint64(self.bits_[ix]), storage_ix, storage)
|
bw.writeBits(uint(self.depths_[ix]), uint64(self.bits_[ix]))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildAndStoreEntropyCodesLiteral(self *blockEncoder, histograms []histogramLiteral, histograms_size uint, alphabet_size uint, tree []huffmanTree, storage_ix *uint, storage []byte) {
|
func buildAndStoreEntropyCodesLiteral(self *blockEncoder, histograms []histogramLiteral, histograms_size uint, alphabet_size uint, tree []huffmanTree, bw *bitWriter) {
|
||||||
var table_size uint = histograms_size * self.histogram_length_
|
var table_size uint = histograms_size * self.histogram_length_
|
||||||
if cap(self.depths_) < int(table_size) {
|
if cap(self.depths_) < int(table_size) {
|
||||||
self.depths_ = make([]byte, table_size)
|
self.depths_ = make([]byte, table_size)
|
||||||
|
@ -983,12 +977,12 @@ func buildAndStoreEntropyCodesLiteral(self *blockEncoder, histograms []histogram
|
||||||
var i uint
|
var i uint
|
||||||
for i = 0; i < histograms_size; i++ {
|
for i = 0; i < histograms_size; i++ {
|
||||||
var ix uint = i * self.histogram_length_
|
var ix uint = i * self.histogram_length_
|
||||||
buildAndStoreHuffmanTree(histograms[i].data_[0:], self.histogram_length_, alphabet_size, tree, self.depths_[ix:], self.bits_[ix:], storage_ix, storage)
|
buildAndStoreHuffmanTree(histograms[i].data_[0:], self.histogram_length_, alphabet_size, tree, self.depths_[ix:], self.bits_[ix:], bw)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildAndStoreEntropyCodesCommand(self *blockEncoder, histograms []histogramCommand, histograms_size uint, alphabet_size uint, tree []huffmanTree, storage_ix *uint, storage []byte) {
|
func buildAndStoreEntropyCodesCommand(self *blockEncoder, histograms []histogramCommand, histograms_size uint, alphabet_size uint, tree []huffmanTree, bw *bitWriter) {
|
||||||
var table_size uint = histograms_size * self.histogram_length_
|
var table_size uint = histograms_size * self.histogram_length_
|
||||||
if cap(self.depths_) < int(table_size) {
|
if cap(self.depths_) < int(table_size) {
|
||||||
self.depths_ = make([]byte, table_size)
|
self.depths_ = make([]byte, table_size)
|
||||||
|
@ -1004,12 +998,12 @@ func buildAndStoreEntropyCodesCommand(self *blockEncoder, histograms []histogram
|
||||||
var i uint
|
var i uint
|
||||||
for i = 0; i < histograms_size; i++ {
|
for i = 0; i < histograms_size; i++ {
|
||||||
var ix uint = i * self.histogram_length_
|
var ix uint = i * self.histogram_length_
|
||||||
buildAndStoreHuffmanTree(histograms[i].data_[0:], self.histogram_length_, alphabet_size, tree, self.depths_[ix:], self.bits_[ix:], storage_ix, storage)
|
buildAndStoreHuffmanTree(histograms[i].data_[0:], self.histogram_length_, alphabet_size, tree, self.depths_[ix:], self.bits_[ix:], bw)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildAndStoreEntropyCodesDistance(self *blockEncoder, histograms []histogramDistance, histograms_size uint, alphabet_size uint, tree []huffmanTree, storage_ix *uint, storage []byte) {
|
func buildAndStoreEntropyCodesDistance(self *blockEncoder, histograms []histogramDistance, histograms_size uint, alphabet_size uint, tree []huffmanTree, bw *bitWriter) {
|
||||||
var table_size uint = histograms_size * self.histogram_length_
|
var table_size uint = histograms_size * self.histogram_length_
|
||||||
if cap(self.depths_) < int(table_size) {
|
if cap(self.depths_) < int(table_size) {
|
||||||
self.depths_ = make([]byte, table_size)
|
self.depths_ = make([]byte, table_size)
|
||||||
|
@ -1025,17 +1019,12 @@ func buildAndStoreEntropyCodesDistance(self *blockEncoder, histograms []histogra
|
||||||
var i uint
|
var i uint
|
||||||
for i = 0; i < histograms_size; i++ {
|
for i = 0; i < histograms_size; i++ {
|
||||||
var ix uint = i * self.histogram_length_
|
var ix uint = i * self.histogram_length_
|
||||||
buildAndStoreHuffmanTree(histograms[i].data_[0:], self.histogram_length_, alphabet_size, tree, self.depths_[ix:], self.bits_[ix:], storage_ix, storage)
|
buildAndStoreHuffmanTree(histograms[i].data_[0:], self.histogram_length_, alphabet_size, tree, self.depths_[ix:], self.bits_[ix:], bw)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func jumpToByteBoundary(storage_ix *uint, storage []byte) {
|
func storeMetaBlock(input []byte, start_pos uint, length uint, mask uint, prev_byte byte, prev_byte2 byte, is_last bool, params *encoderParams, literal_context_mode int, commands []command, mb *metaBlockSplit, bw *bitWriter) {
|
||||||
*storage_ix = (*storage_ix + 7) &^ 7
|
|
||||||
storage[*storage_ix>>3] = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func storeMetaBlock(input []byte, start_pos uint, length uint, mask uint, prev_byte byte, prev_byte2 byte, is_last bool, params *encoderParams, literal_context_mode int, commands []command, mb *metaBlockSplit, storage_ix *uint, storage []byte) {
|
|
||||||
var pos uint = start_pos
|
var pos uint = start_pos
|
||||||
var i uint
|
var i uint
|
||||||
var num_distance_symbols uint32 = params.dist.alphabet_size
|
var num_distance_symbols uint32 = params.dist.alphabet_size
|
||||||
|
@ -1047,48 +1036,48 @@ func storeMetaBlock(input []byte, start_pos uint, length uint, mask uint, prev_b
|
||||||
num_effective_distance_symbols = numHistogramDistanceSymbols
|
num_effective_distance_symbols = numHistogramDistanceSymbols
|
||||||
}
|
}
|
||||||
|
|
||||||
storeCompressedMetaBlockHeader(is_last, length, storage_ix, storage)
|
storeCompressedMetaBlockHeader(is_last, length, bw)
|
||||||
|
|
||||||
tree = make([]huffmanTree, maxHuffmanTreeSize)
|
tree = make([]huffmanTree, maxHuffmanTreeSize)
|
||||||
literal_enc := getBlockEncoder(numLiteralSymbols, mb.literal_split.num_types, mb.literal_split.types, mb.literal_split.lengths, mb.literal_split.num_blocks)
|
literal_enc := getBlockEncoder(numLiteralSymbols, mb.literal_split.num_types, mb.literal_split.types, mb.literal_split.lengths, mb.literal_split.num_blocks)
|
||||||
command_enc := getBlockEncoder(numCommandSymbols, mb.command_split.num_types, mb.command_split.types, mb.command_split.lengths, mb.command_split.num_blocks)
|
command_enc := getBlockEncoder(numCommandSymbols, mb.command_split.num_types, mb.command_split.types, mb.command_split.lengths, mb.command_split.num_blocks)
|
||||||
distance_enc := getBlockEncoder(uint(num_effective_distance_symbols), mb.distance_split.num_types, mb.distance_split.types, mb.distance_split.lengths, mb.distance_split.num_blocks)
|
distance_enc := getBlockEncoder(uint(num_effective_distance_symbols), mb.distance_split.num_types, mb.distance_split.types, mb.distance_split.lengths, mb.distance_split.num_blocks)
|
||||||
|
|
||||||
buildAndStoreBlockSwitchEntropyCodes(literal_enc, tree, storage_ix, storage)
|
buildAndStoreBlockSwitchEntropyCodes(literal_enc, tree, bw)
|
||||||
buildAndStoreBlockSwitchEntropyCodes(command_enc, tree, storage_ix, storage)
|
buildAndStoreBlockSwitchEntropyCodes(command_enc, tree, bw)
|
||||||
buildAndStoreBlockSwitchEntropyCodes(distance_enc, tree, storage_ix, storage)
|
buildAndStoreBlockSwitchEntropyCodes(distance_enc, tree, bw)
|
||||||
|
|
||||||
writeBits(2, uint64(dist.distance_postfix_bits), storage_ix, storage)
|
bw.writeBits(2, uint64(dist.distance_postfix_bits))
|
||||||
writeBits(4, uint64(dist.num_direct_distance_codes)>>dist.distance_postfix_bits, storage_ix, storage)
|
bw.writeBits(4, uint64(dist.num_direct_distance_codes)>>dist.distance_postfix_bits)
|
||||||
for i = 0; i < mb.literal_split.num_types; i++ {
|
for i = 0; i < mb.literal_split.num_types; i++ {
|
||||||
writeBits(2, uint64(literal_context_mode), storage_ix, storage)
|
bw.writeBits(2, uint64(literal_context_mode))
|
||||||
}
|
}
|
||||||
|
|
||||||
if mb.literal_context_map_size == 0 {
|
if mb.literal_context_map_size == 0 {
|
||||||
storeTrivialContextMap(mb.literal_histograms_size, literalContextBits, tree, storage_ix, storage)
|
storeTrivialContextMap(mb.literal_histograms_size, literalContextBits, tree, bw)
|
||||||
} else {
|
} else {
|
||||||
encodeContextMap(mb.literal_context_map, mb.literal_context_map_size, mb.literal_histograms_size, tree, storage_ix, storage)
|
encodeContextMap(mb.literal_context_map, mb.literal_context_map_size, mb.literal_histograms_size, tree, bw)
|
||||||
}
|
}
|
||||||
|
|
||||||
if mb.distance_context_map_size == 0 {
|
if mb.distance_context_map_size == 0 {
|
||||||
storeTrivialContextMap(mb.distance_histograms_size, distanceContextBits, tree, storage_ix, storage)
|
storeTrivialContextMap(mb.distance_histograms_size, distanceContextBits, tree, bw)
|
||||||
} else {
|
} else {
|
||||||
encodeContextMap(mb.distance_context_map, mb.distance_context_map_size, mb.distance_histograms_size, tree, storage_ix, storage)
|
encodeContextMap(mb.distance_context_map, mb.distance_context_map_size, mb.distance_histograms_size, tree, bw)
|
||||||
}
|
}
|
||||||
|
|
||||||
buildAndStoreEntropyCodesLiteral(literal_enc, mb.literal_histograms, mb.literal_histograms_size, numLiteralSymbols, tree, storage_ix, storage)
|
buildAndStoreEntropyCodesLiteral(literal_enc, mb.literal_histograms, mb.literal_histograms_size, numLiteralSymbols, tree, bw)
|
||||||
buildAndStoreEntropyCodesCommand(command_enc, mb.command_histograms, mb.command_histograms_size, numCommandSymbols, tree, storage_ix, storage)
|
buildAndStoreEntropyCodesCommand(command_enc, mb.command_histograms, mb.command_histograms_size, numCommandSymbols, tree, bw)
|
||||||
buildAndStoreEntropyCodesDistance(distance_enc, mb.distance_histograms, mb.distance_histograms_size, uint(num_distance_symbols), tree, storage_ix, storage)
|
buildAndStoreEntropyCodesDistance(distance_enc, mb.distance_histograms, mb.distance_histograms_size, uint(num_distance_symbols), tree, bw)
|
||||||
tree = nil
|
tree = nil
|
||||||
|
|
||||||
for _, cmd := range commands {
|
for _, cmd := range commands {
|
||||||
var cmd_code uint = uint(cmd.cmd_prefix_)
|
var cmd_code uint = uint(cmd.cmd_prefix_)
|
||||||
storeSymbol(command_enc, cmd_code, storage_ix, storage)
|
storeSymbol(command_enc, cmd_code, bw)
|
||||||
storeCommandExtra(&cmd, storage_ix, storage)
|
storeCommandExtra(&cmd, bw)
|
||||||
if mb.literal_context_map_size == 0 {
|
if mb.literal_context_map_size == 0 {
|
||||||
var j uint
|
var j uint
|
||||||
for j = uint(cmd.insert_len_); j != 0; j-- {
|
for j = uint(cmd.insert_len_); j != 0; j-- {
|
||||||
storeSymbol(literal_enc, uint(input[pos&mask]), storage_ix, storage)
|
storeSymbol(literal_enc, uint(input[pos&mask]), bw)
|
||||||
pos++
|
pos++
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -1096,7 +1085,7 @@ func storeMetaBlock(input []byte, start_pos uint, length uint, mask uint, prev_b
|
||||||
for j = uint(cmd.insert_len_); j != 0; j-- {
|
for j = uint(cmd.insert_len_); j != 0; j-- {
|
||||||
var context uint = uint(getContext(prev_byte, prev_byte2, literal_context_lut))
|
var context uint = uint(getContext(prev_byte, prev_byte2, literal_context_lut))
|
||||||
var literal byte = input[pos&mask]
|
var literal byte = input[pos&mask]
|
||||||
storeSymbolWithContext(literal_enc, uint(literal), context, mb.literal_context_map, storage_ix, storage, literalContextBits)
|
storeSymbolWithContext(literal_enc, uint(literal), context, mb.literal_context_map, bw, literalContextBits)
|
||||||
prev_byte2 = prev_byte
|
prev_byte2 = prev_byte
|
||||||
prev_byte = literal
|
prev_byte = literal
|
||||||
pos++
|
pos++
|
||||||
|
@ -1112,13 +1101,13 @@ func storeMetaBlock(input []byte, start_pos uint, length uint, mask uint, prev_b
|
||||||
var distnumextra uint32 = uint32(cmd.dist_prefix_) >> 10
|
var distnumextra uint32 = uint32(cmd.dist_prefix_) >> 10
|
||||||
var distextra uint64 = uint64(cmd.dist_extra_)
|
var distextra uint64 = uint64(cmd.dist_extra_)
|
||||||
if mb.distance_context_map_size == 0 {
|
if mb.distance_context_map_size == 0 {
|
||||||
storeSymbol(distance_enc, dist_code, storage_ix, storage)
|
storeSymbol(distance_enc, dist_code, bw)
|
||||||
} else {
|
} else {
|
||||||
var context uint = uint(commandDistanceContext(&cmd))
|
var context uint = uint(commandDistanceContext(&cmd))
|
||||||
storeSymbolWithContext(distance_enc, dist_code, context, mb.distance_context_map, storage_ix, storage, distanceContextBits)
|
storeSymbolWithContext(distance_enc, dist_code, context, mb.distance_context_map, bw, distanceContextBits)
|
||||||
}
|
}
|
||||||
|
|
||||||
writeBits(uint(distnumextra), distextra, storage_ix, storage)
|
bw.writeBits(uint(distnumextra), distextra)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1127,7 +1116,7 @@ func storeMetaBlock(input []byte, start_pos uint, length uint, mask uint, prev_b
|
||||||
cleanupBlockEncoder(command_enc)
|
cleanupBlockEncoder(command_enc)
|
||||||
cleanupBlockEncoder(literal_enc)
|
cleanupBlockEncoder(literal_enc)
|
||||||
if is_last {
|
if is_last {
|
||||||
jumpToByteBoundary(storage_ix, storage)
|
bw.jumpToByteBoundary()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1148,16 +1137,16 @@ func buildHistograms(input []byte, start_pos uint, mask uint, commands []command
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func storeDataWithHuffmanCodes(input []byte, start_pos uint, mask uint, commands []command, lit_depth []byte, lit_bits []uint16, cmd_depth []byte, cmd_bits []uint16, dist_depth []byte, dist_bits []uint16, storage_ix *uint, storage []byte) {
|
func storeDataWithHuffmanCodes(input []byte, start_pos uint, mask uint, commands []command, lit_depth []byte, lit_bits []uint16, cmd_depth []byte, cmd_bits []uint16, dist_depth []byte, dist_bits []uint16, bw *bitWriter) {
|
||||||
var pos uint = start_pos
|
var pos uint = start_pos
|
||||||
for _, cmd := range commands {
|
for _, cmd := range commands {
|
||||||
var cmd_code uint = uint(cmd.cmd_prefix_)
|
var cmd_code uint = uint(cmd.cmd_prefix_)
|
||||||
var j uint
|
var j uint
|
||||||
writeBits(uint(cmd_depth[cmd_code]), uint64(cmd_bits[cmd_code]), storage_ix, storage)
|
bw.writeBits(uint(cmd_depth[cmd_code]), uint64(cmd_bits[cmd_code]))
|
||||||
storeCommandExtra(&cmd, storage_ix, storage)
|
storeCommandExtra(&cmd, bw)
|
||||||
for j = uint(cmd.insert_len_); j != 0; j-- {
|
for j = uint(cmd.insert_len_); j != 0; j-- {
|
||||||
var literal byte = input[pos&mask]
|
var literal byte = input[pos&mask]
|
||||||
writeBits(uint(lit_depth[literal]), uint64(lit_bits[literal]), storage_ix, storage)
|
bw.writeBits(uint(lit_depth[literal]), uint64(lit_bits[literal]))
|
||||||
pos++
|
pos++
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1166,13 +1155,13 @@ func storeDataWithHuffmanCodes(input []byte, start_pos uint, mask uint, commands
|
||||||
var dist_code uint = uint(cmd.dist_prefix_) & 0x3FF
|
var dist_code uint = uint(cmd.dist_prefix_) & 0x3FF
|
||||||
var distnumextra uint32 = uint32(cmd.dist_prefix_) >> 10
|
var distnumextra uint32 = uint32(cmd.dist_prefix_) >> 10
|
||||||
var distextra uint32 = cmd.dist_extra_
|
var distextra uint32 = cmd.dist_extra_
|
||||||
writeBits(uint(dist_depth[dist_code]), uint64(dist_bits[dist_code]), storage_ix, storage)
|
bw.writeBits(uint(dist_depth[dist_code]), uint64(dist_bits[dist_code]))
|
||||||
writeBits(uint(distnumextra), uint64(distextra), storage_ix, storage)
|
bw.writeBits(uint(distnumextra), uint64(distextra))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func storeMetaBlockTrivial(input []byte, start_pos uint, length uint, mask uint, is_last bool, params *encoderParams, commands []command, storage_ix *uint, storage []byte) {
|
func storeMetaBlockTrivial(input []byte, start_pos uint, length uint, mask uint, is_last bool, params *encoderParams, commands []command, bw *bitWriter) {
|
||||||
var lit_histo histogramLiteral
|
var lit_histo histogramLiteral
|
||||||
var cmd_histo histogramCommand
|
var cmd_histo histogramCommand
|
||||||
var dist_histo histogramDistance
|
var dist_histo histogramDistance
|
||||||
|
@ -1185,7 +1174,7 @@ func storeMetaBlockTrivial(input []byte, start_pos uint, length uint, mask uint,
|
||||||
var tree []huffmanTree
|
var tree []huffmanTree
|
||||||
var num_distance_symbols uint32 = params.dist.alphabet_size
|
var num_distance_symbols uint32 = params.dist.alphabet_size
|
||||||
|
|
||||||
storeCompressedMetaBlockHeader(is_last, length, storage_ix, storage)
|
storeCompressedMetaBlockHeader(is_last, length, bw)
|
||||||
|
|
||||||
histogramClearLiteral(&lit_histo)
|
histogramClearLiteral(&lit_histo)
|
||||||
histogramClearCommand(&cmd_histo)
|
histogramClearCommand(&cmd_histo)
|
||||||
|
@ -1193,26 +1182,26 @@ func storeMetaBlockTrivial(input []byte, start_pos uint, length uint, mask uint,
|
||||||
|
|
||||||
buildHistograms(input, start_pos, mask, commands, &lit_histo, &cmd_histo, &dist_histo)
|
buildHistograms(input, start_pos, mask, commands, &lit_histo, &cmd_histo, &dist_histo)
|
||||||
|
|
||||||
writeBits(13, 0, storage_ix, storage)
|
bw.writeBits(13, 0)
|
||||||
|
|
||||||
tree = make([]huffmanTree, maxHuffmanTreeSize)
|
tree = make([]huffmanTree, maxHuffmanTreeSize)
|
||||||
buildAndStoreHuffmanTree(lit_histo.data_[:], numLiteralSymbols, numLiteralSymbols, tree, lit_depth[:], lit_bits[:], storage_ix, storage)
|
buildAndStoreHuffmanTree(lit_histo.data_[:], numLiteralSymbols, numLiteralSymbols, tree, lit_depth[:], lit_bits[:], bw)
|
||||||
buildAndStoreHuffmanTree(cmd_histo.data_[:], numCommandSymbols, numCommandSymbols, tree, cmd_depth[:], cmd_bits[:], storage_ix, storage)
|
buildAndStoreHuffmanTree(cmd_histo.data_[:], numCommandSymbols, numCommandSymbols, tree, cmd_depth[:], cmd_bits[:], bw)
|
||||||
buildAndStoreHuffmanTree(dist_histo.data_[:], maxSimpleDistanceAlphabetSize, uint(num_distance_symbols), tree, dist_depth[:], dist_bits[:], storage_ix, storage)
|
buildAndStoreHuffmanTree(dist_histo.data_[:], maxSimpleDistanceAlphabetSize, uint(num_distance_symbols), tree, dist_depth[:], dist_bits[:], bw)
|
||||||
tree = nil
|
tree = nil
|
||||||
storeDataWithHuffmanCodes(input, start_pos, mask, commands, lit_depth[:], lit_bits[:], cmd_depth[:], cmd_bits[:], dist_depth[:], dist_bits[:], storage_ix, storage)
|
storeDataWithHuffmanCodes(input, start_pos, mask, commands, lit_depth[:], lit_bits[:], cmd_depth[:], cmd_bits[:], dist_depth[:], dist_bits[:], bw)
|
||||||
if is_last {
|
if is_last {
|
||||||
jumpToByteBoundary(storage_ix, storage)
|
bw.jumpToByteBoundary()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func storeMetaBlockFast(input []byte, start_pos uint, length uint, mask uint, is_last bool, params *encoderParams, commands []command, storage_ix *uint, storage []byte) {
|
func storeMetaBlockFast(input []byte, start_pos uint, length uint, mask uint, is_last bool, params *encoderParams, commands []command, bw *bitWriter) {
|
||||||
var num_distance_symbols uint32 = params.dist.alphabet_size
|
var num_distance_symbols uint32 = params.dist.alphabet_size
|
||||||
var distance_alphabet_bits uint32 = log2FloorNonZero(uint(num_distance_symbols-1)) + 1
|
var distance_alphabet_bits uint32 = log2FloorNonZero(uint(num_distance_symbols-1)) + 1
|
||||||
|
|
||||||
storeCompressedMetaBlockHeader(is_last, length, storage_ix, storage)
|
storeCompressedMetaBlockHeader(is_last, length, bw)
|
||||||
|
|
||||||
writeBits(13, 0, storage_ix, storage)
|
bw.writeBits(13, 0)
|
||||||
|
|
||||||
if len(commands) <= 128 {
|
if len(commands) <= 128 {
|
||||||
var histogram = [numLiteralSymbols]uint32{0}
|
var histogram = [numLiteralSymbols]uint32{0}
|
||||||
|
@ -1232,11 +1221,11 @@ func storeMetaBlockFast(input []byte, start_pos uint, length uint, mask uint, is
|
||||||
}
|
}
|
||||||
|
|
||||||
buildAndStoreHuffmanTreeFast(histogram[:], num_literals, /* max_bits = */
|
buildAndStoreHuffmanTreeFast(histogram[:], num_literals, /* max_bits = */
|
||||||
8, lit_depth[:], lit_bits[:], storage_ix, storage)
|
8, lit_depth[:], lit_bits[:], bw)
|
||||||
|
|
||||||
storeStaticCommandHuffmanTree(storage_ix, storage)
|
storeStaticCommandHuffmanTree(bw)
|
||||||
storeStaticDistanceHuffmanTree(storage_ix, storage)
|
storeStaticDistanceHuffmanTree(bw)
|
||||||
storeDataWithHuffmanCodes(input, start_pos, mask, commands, lit_depth[:], lit_bits[:], kStaticCommandCodeDepth[:], kStaticCommandCodeBits[:], kStaticDistanceCodeDepth[:], kStaticDistanceCodeBits[:], storage_ix, storage)
|
storeDataWithHuffmanCodes(input, start_pos, mask, commands, lit_depth[:], lit_bits[:], kStaticCommandCodeDepth[:], kStaticCommandCodeBits[:], kStaticDistanceCodeDepth[:], kStaticDistanceCodeBits[:], bw)
|
||||||
} else {
|
} else {
|
||||||
var lit_histo histogramLiteral
|
var lit_histo histogramLiteral
|
||||||
var cmd_histo histogramCommand
|
var cmd_histo histogramCommand
|
||||||
|
@ -1252,49 +1241,43 @@ func storeMetaBlockFast(input []byte, start_pos uint, length uint, mask uint, is
|
||||||
histogramClearDistance(&dist_histo)
|
histogramClearDistance(&dist_histo)
|
||||||
buildHistograms(input, start_pos, mask, commands, &lit_histo, &cmd_histo, &dist_histo)
|
buildHistograms(input, start_pos, mask, commands, &lit_histo, &cmd_histo, &dist_histo)
|
||||||
buildAndStoreHuffmanTreeFast(lit_histo.data_[:], lit_histo.total_count_, /* max_bits = */
|
buildAndStoreHuffmanTreeFast(lit_histo.data_[:], lit_histo.total_count_, /* max_bits = */
|
||||||
8, lit_depth[:], lit_bits[:], storage_ix, storage)
|
8, lit_depth[:], lit_bits[:], bw)
|
||||||
|
|
||||||
buildAndStoreHuffmanTreeFast(cmd_histo.data_[:], cmd_histo.total_count_, /* max_bits = */
|
buildAndStoreHuffmanTreeFast(cmd_histo.data_[:], cmd_histo.total_count_, /* max_bits = */
|
||||||
10, cmd_depth[:], cmd_bits[:], storage_ix, storage)
|
10, cmd_depth[:], cmd_bits[:], bw)
|
||||||
|
|
||||||
buildAndStoreHuffmanTreeFast(dist_histo.data_[:], dist_histo.total_count_, /* max_bits = */
|
buildAndStoreHuffmanTreeFast(dist_histo.data_[:], dist_histo.total_count_, /* max_bits = */
|
||||||
uint(distance_alphabet_bits), dist_depth[:], dist_bits[:], storage_ix, storage)
|
uint(distance_alphabet_bits), dist_depth[:], dist_bits[:], bw)
|
||||||
|
|
||||||
storeDataWithHuffmanCodes(input, start_pos, mask, commands, lit_depth[:], lit_bits[:], cmd_depth[:], cmd_bits[:], dist_depth[:], dist_bits[:], storage_ix, storage)
|
storeDataWithHuffmanCodes(input, start_pos, mask, commands, lit_depth[:], lit_bits[:], cmd_depth[:], cmd_bits[:], dist_depth[:], dist_bits[:], bw)
|
||||||
}
|
}
|
||||||
|
|
||||||
if is_last {
|
if is_last {
|
||||||
jumpToByteBoundary(storage_ix, storage)
|
bw.jumpToByteBoundary()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* This is for storing uncompressed blocks (simple raw storage of
|
/* This is for storing uncompressed blocks (simple raw storage of
|
||||||
bytes-as-bytes). */
|
bytes-as-bytes). */
|
||||||
func storeUncompressedMetaBlock(is_final_block bool, input []byte, position uint, mask uint, len uint, storage_ix *uint, storage []byte) {
|
func storeUncompressedMetaBlock(is_final_block bool, input []byte, position uint, mask uint, len uint, bw *bitWriter) {
|
||||||
var masked_pos uint = position & mask
|
var masked_pos uint = position & mask
|
||||||
storeUncompressedMetaBlockHeader(uint(len), storage_ix, storage)
|
storeUncompressedMetaBlockHeader(uint(len), bw)
|
||||||
jumpToByteBoundary(storage_ix, storage)
|
bw.jumpToByteBoundary()
|
||||||
|
|
||||||
if masked_pos+len > mask+1 {
|
if masked_pos+len > mask+1 {
|
||||||
var len1 uint = mask + 1 - masked_pos
|
var len1 uint = mask + 1 - masked_pos
|
||||||
copy(storage[*storage_ix>>3:], input[masked_pos:][:len1])
|
bw.writeBytes(input[masked_pos:][:len1])
|
||||||
*storage_ix += len1 << 3
|
|
||||||
len -= len1
|
len -= len1
|
||||||
masked_pos = 0
|
masked_pos = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
copy(storage[*storage_ix>>3:], input[masked_pos:][:len])
|
bw.writeBytes(input[masked_pos:][:len])
|
||||||
*storage_ix += uint(len << 3)
|
|
||||||
|
|
||||||
/* We need to clear the next 4 bytes to continue to be
|
|
||||||
compatible with BrotliWriteBits. */
|
|
||||||
writeBitsPrepareStorage(*storage_ix, storage)
|
|
||||||
|
|
||||||
/* Since the uncompressed block itself may not be the final block, add an
|
/* Since the uncompressed block itself may not be the final block, add an
|
||||||
empty one after this. */
|
empty one after this. */
|
||||||
if is_final_block {
|
if is_final_block {
|
||||||
writeBits(1, 1, storage_ix, storage) /* islast */
|
bw.writeBits(1, 1) /* islast */
|
||||||
writeBits(1, 1, storage_ix, storage) /* isempty */
|
bw.writeBits(1, 1) /* isempty */
|
||||||
jumpToByteBoundary(storage_ix, storage)
|
bw.jumpToByteBoundary()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,7 +45,7 @@ func isMatch5(p1 []byte, p2 []byte) bool {
|
||||||
and thus have to assign a non-zero depth for each literal.
|
and thus have to assign a non-zero depth for each literal.
|
||||||
Returns estimated compression ratio millibytes/char for encoding given input
|
Returns estimated compression ratio millibytes/char for encoding given input
|
||||||
with generated code. */
|
with generated code. */
|
||||||
func buildAndStoreLiteralPrefixCode(input []byte, input_size uint, depths []byte, bits []uint16, storage_ix *uint, storage []byte) uint {
|
func buildAndStoreLiteralPrefixCode(input []byte, input_size uint, depths []byte, bits []uint16, bw *bitWriter) uint {
|
||||||
var histogram = [256]uint32{0}
|
var histogram = [256]uint32{0}
|
||||||
var histogram_total uint
|
var histogram_total uint
|
||||||
var i uint
|
var i uint
|
||||||
|
@ -82,7 +82,7 @@ func buildAndStoreLiteralPrefixCode(input []byte, input_size uint, depths []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
buildAndStoreHuffmanTreeFast(histogram[:], histogram_total, /* max_bits = */
|
buildAndStoreHuffmanTreeFast(histogram[:], histogram_total, /* max_bits = */
|
||||||
8, depths, bits, storage_ix, storage)
|
8, depths, bits, bw)
|
||||||
{
|
{
|
||||||
var literal_ratio uint = 0
|
var literal_ratio uint = 0
|
||||||
for i = 0; i < 256; i++ {
|
for i = 0; i < 256; i++ {
|
||||||
|
@ -98,7 +98,7 @@ func buildAndStoreLiteralPrefixCode(input []byte, input_size uint, depths []byte
|
||||||
|
|
||||||
/* Builds a command and distance prefix code (each 64 symbols) into "depth" and
|
/* Builds a command and distance prefix code (each 64 symbols) into "depth" and
|
||||||
"bits" based on "histogram" and stores it into the bit stream. */
|
"bits" based on "histogram" and stores it into the bit stream. */
|
||||||
func buildAndStoreCommandPrefixCode1(histogram []uint32, depth []byte, bits []uint16, storage_ix *uint, storage []byte) {
|
func buildAndStoreCommandPrefixCode1(histogram []uint32, depth []byte, bits []uint16, bw *bitWriter) {
|
||||||
var tree [129]huffmanTree
|
var tree [129]huffmanTree
|
||||||
var cmd_depth = [numCommandSymbols]byte{0}
|
var cmd_depth = [numCommandSymbols]byte{0}
|
||||||
/* Tree size for building a tree over 64 symbols is 2 * 64 + 1. */
|
/* Tree size for building a tree over 64 symbols is 2 * 64 + 1. */
|
||||||
|
@ -145,141 +145,141 @@ func buildAndStoreCommandPrefixCode1(histogram []uint32, depth []byte, bits []ui
|
||||||
cmd_depth[448+8*i] = depth[56+i]
|
cmd_depth[448+8*i] = depth[56+i]
|
||||||
}
|
}
|
||||||
|
|
||||||
storeHuffmanTree(cmd_depth[:], numCommandSymbols, tree[:], storage_ix, storage)
|
storeHuffmanTree(cmd_depth[:], numCommandSymbols, tree[:], bw)
|
||||||
}
|
}
|
||||||
|
|
||||||
storeHuffmanTree(depth[64:], 64, tree[:], storage_ix, storage)
|
storeHuffmanTree(depth[64:], 64, tree[:], bw)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* REQUIRES: insertlen < 6210 */
|
/* REQUIRES: insertlen < 6210 */
|
||||||
func emitInsertLen1(insertlen uint, depth []byte, bits []uint16, histo []uint32, storage_ix *uint, storage []byte) {
|
func emitInsertLen1(insertlen uint, depth []byte, bits []uint16, histo []uint32, bw *bitWriter) {
|
||||||
if insertlen < 6 {
|
if insertlen < 6 {
|
||||||
var code uint = insertlen + 40
|
var code uint = insertlen + 40
|
||||||
writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage)
|
bw.writeBits(uint(depth[code]), uint64(bits[code]))
|
||||||
histo[code]++
|
histo[code]++
|
||||||
} else if insertlen < 130 {
|
} else if insertlen < 130 {
|
||||||
var tail uint = insertlen - 2
|
var tail uint = insertlen - 2
|
||||||
var nbits uint32 = log2FloorNonZero(tail) - 1
|
var nbits uint32 = log2FloorNonZero(tail) - 1
|
||||||
var prefix uint = tail >> nbits
|
var prefix uint = tail >> nbits
|
||||||
var inscode uint = uint((nbits << 1) + uint32(prefix) + 42)
|
var inscode uint = uint((nbits << 1) + uint32(prefix) + 42)
|
||||||
writeBits(uint(depth[inscode]), uint64(bits[inscode]), storage_ix, storage)
|
bw.writeBits(uint(depth[inscode]), uint64(bits[inscode]))
|
||||||
writeBits(uint(nbits), uint64(tail)-(uint64(prefix)<<nbits), storage_ix, storage)
|
bw.writeBits(uint(nbits), uint64(tail)-(uint64(prefix)<<nbits))
|
||||||
histo[inscode]++
|
histo[inscode]++
|
||||||
} else if insertlen < 2114 {
|
} else if insertlen < 2114 {
|
||||||
var tail uint = insertlen - 66
|
var tail uint = insertlen - 66
|
||||||
var nbits uint32 = log2FloorNonZero(tail)
|
var nbits uint32 = log2FloorNonZero(tail)
|
||||||
var code uint = uint(nbits + 50)
|
var code uint = uint(nbits + 50)
|
||||||
writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage)
|
bw.writeBits(uint(depth[code]), uint64(bits[code]))
|
||||||
writeBits(uint(nbits), uint64(tail)-(uint64(uint(1))<<nbits), storage_ix, storage)
|
bw.writeBits(uint(nbits), uint64(tail)-(uint64(uint(1))<<nbits))
|
||||||
histo[code]++
|
histo[code]++
|
||||||
} else {
|
} else {
|
||||||
writeBits(uint(depth[61]), uint64(bits[61]), storage_ix, storage)
|
bw.writeBits(uint(depth[61]), uint64(bits[61]))
|
||||||
writeBits(12, uint64(insertlen)-2114, storage_ix, storage)
|
bw.writeBits(12, uint64(insertlen)-2114)
|
||||||
histo[61]++
|
histo[61]++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func emitLongInsertLen(insertlen uint, depth []byte, bits []uint16, histo []uint32, storage_ix *uint, storage []byte) {
|
func emitLongInsertLen(insertlen uint, depth []byte, bits []uint16, histo []uint32, bw *bitWriter) {
|
||||||
if insertlen < 22594 {
|
if insertlen < 22594 {
|
||||||
writeBits(uint(depth[62]), uint64(bits[62]), storage_ix, storage)
|
bw.writeBits(uint(depth[62]), uint64(bits[62]))
|
||||||
writeBits(14, uint64(insertlen)-6210, storage_ix, storage)
|
bw.writeBits(14, uint64(insertlen)-6210)
|
||||||
histo[62]++
|
histo[62]++
|
||||||
} else {
|
} else {
|
||||||
writeBits(uint(depth[63]), uint64(bits[63]), storage_ix, storage)
|
bw.writeBits(uint(depth[63]), uint64(bits[63]))
|
||||||
writeBits(24, uint64(insertlen)-22594, storage_ix, storage)
|
bw.writeBits(24, uint64(insertlen)-22594)
|
||||||
histo[63]++
|
histo[63]++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func emitCopyLen1(copylen uint, depth []byte, bits []uint16, histo []uint32, storage_ix *uint, storage []byte) {
|
func emitCopyLen1(copylen uint, depth []byte, bits []uint16, histo []uint32, bw *bitWriter) {
|
||||||
if copylen < 10 {
|
if copylen < 10 {
|
||||||
writeBits(uint(depth[copylen+14]), uint64(bits[copylen+14]), storage_ix, storage)
|
bw.writeBits(uint(depth[copylen+14]), uint64(bits[copylen+14]))
|
||||||
histo[copylen+14]++
|
histo[copylen+14]++
|
||||||
} else if copylen < 134 {
|
} else if copylen < 134 {
|
||||||
var tail uint = copylen - 6
|
var tail uint = copylen - 6
|
||||||
var nbits uint32 = log2FloorNonZero(tail) - 1
|
var nbits uint32 = log2FloorNonZero(tail) - 1
|
||||||
var prefix uint = tail >> nbits
|
var prefix uint = tail >> nbits
|
||||||
var code uint = uint((nbits << 1) + uint32(prefix) + 20)
|
var code uint = uint((nbits << 1) + uint32(prefix) + 20)
|
||||||
writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage)
|
bw.writeBits(uint(depth[code]), uint64(bits[code]))
|
||||||
writeBits(uint(nbits), uint64(tail)-(uint64(prefix)<<nbits), storage_ix, storage)
|
bw.writeBits(uint(nbits), uint64(tail)-(uint64(prefix)<<nbits))
|
||||||
histo[code]++
|
histo[code]++
|
||||||
} else if copylen < 2118 {
|
} else if copylen < 2118 {
|
||||||
var tail uint = copylen - 70
|
var tail uint = copylen - 70
|
||||||
var nbits uint32 = log2FloorNonZero(tail)
|
var nbits uint32 = log2FloorNonZero(tail)
|
||||||
var code uint = uint(nbits + 28)
|
var code uint = uint(nbits + 28)
|
||||||
writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage)
|
bw.writeBits(uint(depth[code]), uint64(bits[code]))
|
||||||
writeBits(uint(nbits), uint64(tail)-(uint64(uint(1))<<nbits), storage_ix, storage)
|
bw.writeBits(uint(nbits), uint64(tail)-(uint64(uint(1))<<nbits))
|
||||||
histo[code]++
|
histo[code]++
|
||||||
} else {
|
} else {
|
||||||
writeBits(uint(depth[39]), uint64(bits[39]), storage_ix, storage)
|
bw.writeBits(uint(depth[39]), uint64(bits[39]))
|
||||||
writeBits(24, uint64(copylen)-2118, storage_ix, storage)
|
bw.writeBits(24, uint64(copylen)-2118)
|
||||||
histo[39]++
|
histo[39]++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func emitCopyLenLastDistance1(copylen uint, depth []byte, bits []uint16, histo []uint32, storage_ix *uint, storage []byte) {
|
func emitCopyLenLastDistance1(copylen uint, depth []byte, bits []uint16, histo []uint32, bw *bitWriter) {
|
||||||
if copylen < 12 {
|
if copylen < 12 {
|
||||||
writeBits(uint(depth[copylen-4]), uint64(bits[copylen-4]), storage_ix, storage)
|
bw.writeBits(uint(depth[copylen-4]), uint64(bits[copylen-4]))
|
||||||
histo[copylen-4]++
|
histo[copylen-4]++
|
||||||
} else if copylen < 72 {
|
} else if copylen < 72 {
|
||||||
var tail uint = copylen - 8
|
var tail uint = copylen - 8
|
||||||
var nbits uint32 = log2FloorNonZero(tail) - 1
|
var nbits uint32 = log2FloorNonZero(tail) - 1
|
||||||
var prefix uint = tail >> nbits
|
var prefix uint = tail >> nbits
|
||||||
var code uint = uint((nbits << 1) + uint32(prefix) + 4)
|
var code uint = uint((nbits << 1) + uint32(prefix) + 4)
|
||||||
writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage)
|
bw.writeBits(uint(depth[code]), uint64(bits[code]))
|
||||||
writeBits(uint(nbits), uint64(tail)-(uint64(prefix)<<nbits), storage_ix, storage)
|
bw.writeBits(uint(nbits), uint64(tail)-(uint64(prefix)<<nbits))
|
||||||
histo[code]++
|
histo[code]++
|
||||||
} else if copylen < 136 {
|
} else if copylen < 136 {
|
||||||
var tail uint = copylen - 8
|
var tail uint = copylen - 8
|
||||||
var code uint = (tail >> 5) + 30
|
var code uint = (tail >> 5) + 30
|
||||||
writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage)
|
bw.writeBits(uint(depth[code]), uint64(bits[code]))
|
||||||
writeBits(5, uint64(tail)&31, storage_ix, storage)
|
bw.writeBits(5, uint64(tail)&31)
|
||||||
writeBits(uint(depth[64]), uint64(bits[64]), storage_ix, storage)
|
bw.writeBits(uint(depth[64]), uint64(bits[64]))
|
||||||
histo[code]++
|
histo[code]++
|
||||||
histo[64]++
|
histo[64]++
|
||||||
} else if copylen < 2120 {
|
} else if copylen < 2120 {
|
||||||
var tail uint = copylen - 72
|
var tail uint = copylen - 72
|
||||||
var nbits uint32 = log2FloorNonZero(tail)
|
var nbits uint32 = log2FloorNonZero(tail)
|
||||||
var code uint = uint(nbits + 28)
|
var code uint = uint(nbits + 28)
|
||||||
writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage)
|
bw.writeBits(uint(depth[code]), uint64(bits[code]))
|
||||||
writeBits(uint(nbits), uint64(tail)-(uint64(uint(1))<<nbits), storage_ix, storage)
|
bw.writeBits(uint(nbits), uint64(tail)-(uint64(uint(1))<<nbits))
|
||||||
writeBits(uint(depth[64]), uint64(bits[64]), storage_ix, storage)
|
bw.writeBits(uint(depth[64]), uint64(bits[64]))
|
||||||
histo[code]++
|
histo[code]++
|
||||||
histo[64]++
|
histo[64]++
|
||||||
} else {
|
} else {
|
||||||
writeBits(uint(depth[39]), uint64(bits[39]), storage_ix, storage)
|
bw.writeBits(uint(depth[39]), uint64(bits[39]))
|
||||||
writeBits(24, uint64(copylen)-2120, storage_ix, storage)
|
bw.writeBits(24, uint64(copylen)-2120)
|
||||||
writeBits(uint(depth[64]), uint64(bits[64]), storage_ix, storage)
|
bw.writeBits(uint(depth[64]), uint64(bits[64]))
|
||||||
histo[39]++
|
histo[39]++
|
||||||
histo[64]++
|
histo[64]++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func emitDistance1(distance uint, depth []byte, bits []uint16, histo []uint32, storage_ix *uint, storage []byte) {
|
func emitDistance1(distance uint, depth []byte, bits []uint16, histo []uint32, bw *bitWriter) {
|
||||||
var d uint = distance + 3
|
var d uint = distance + 3
|
||||||
var nbits uint32 = log2FloorNonZero(d) - 1
|
var nbits uint32 = log2FloorNonZero(d) - 1
|
||||||
var prefix uint = (d >> nbits) & 1
|
var prefix uint = (d >> nbits) & 1
|
||||||
var offset uint = (2 + prefix) << nbits
|
var offset uint = (2 + prefix) << nbits
|
||||||
var distcode uint = uint(2*(nbits-1) + uint32(prefix) + 80)
|
var distcode uint = uint(2*(nbits-1) + uint32(prefix) + 80)
|
||||||
writeBits(uint(depth[distcode]), uint64(bits[distcode]), storage_ix, storage)
|
bw.writeBits(uint(depth[distcode]), uint64(bits[distcode]))
|
||||||
writeBits(uint(nbits), uint64(d)-uint64(offset), storage_ix, storage)
|
bw.writeBits(uint(nbits), uint64(d)-uint64(offset))
|
||||||
histo[distcode]++
|
histo[distcode]++
|
||||||
}
|
}
|
||||||
|
|
||||||
func emitLiterals(input []byte, len uint, depth []byte, bits []uint16, storage_ix *uint, storage []byte) {
|
func emitLiterals(input []byte, len uint, depth []byte, bits []uint16, bw *bitWriter) {
|
||||||
var j uint
|
var j uint
|
||||||
for j = 0; j < len; j++ {
|
for j = 0; j < len; j++ {
|
||||||
var lit byte = input[j]
|
var lit byte = input[j]
|
||||||
writeBits(uint(depth[lit]), uint64(bits[lit]), storage_ix, storage)
|
bw.writeBits(uint(depth[lit]), uint64(bits[lit]))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* REQUIRES: len <= 1 << 24. */
|
/* REQUIRES: len <= 1 << 24. */
|
||||||
func storeMetaBlockHeader1(len uint, is_uncompressed bool, storage_ix *uint, storage []byte) {
|
func storeMetaBlockHeader1(len uint, is_uncompressed bool, bw *bitWriter) {
|
||||||
var nibbles uint = 6
|
var nibbles uint = 6
|
||||||
|
|
||||||
/* ISLAST */
|
/* ISLAST */
|
||||||
writeBits(1, 0, storage_ix, storage)
|
bw.writeBits(1, 0)
|
||||||
|
|
||||||
if len <= 1<<16 {
|
if len <= 1<<16 {
|
||||||
nibbles = 4
|
nibbles = 4
|
||||||
|
@ -287,34 +287,11 @@ func storeMetaBlockHeader1(len uint, is_uncompressed bool, storage_ix *uint, sto
|
||||||
nibbles = 5
|
nibbles = 5
|
||||||
}
|
}
|
||||||
|
|
||||||
writeBits(2, uint64(nibbles)-4, storage_ix, storage)
|
bw.writeBits(2, uint64(nibbles)-4)
|
||||||
writeBits(nibbles*4, uint64(len)-1, storage_ix, storage)
|
bw.writeBits(nibbles*4, uint64(len)-1)
|
||||||
|
|
||||||
/* ISUNCOMPRESSED */
|
/* ISUNCOMPRESSED */
|
||||||
writeSingleBit(is_uncompressed, storage_ix, storage)
|
bw.writeSingleBit(is_uncompressed)
|
||||||
}
|
|
||||||
|
|
||||||
func updateBits(n_bits uint, bits uint32, pos uint, array []byte) {
|
|
||||||
for n_bits > 0 {
|
|
||||||
var byte_pos uint = pos >> 3
|
|
||||||
var n_unchanged_bits uint = pos & 7
|
|
||||||
var n_changed_bits uint = brotli_min_size_t(n_bits, 8-n_unchanged_bits)
|
|
||||||
var total_bits uint = n_unchanged_bits + n_changed_bits
|
|
||||||
var mask uint32 = (^((1 << total_bits) - 1)) | ((1 << n_unchanged_bits) - 1)
|
|
||||||
var unchanged_bits uint32 = uint32(array[byte_pos]) & mask
|
|
||||||
var changed_bits uint32 = bits & ((1 << n_changed_bits) - 1)
|
|
||||||
array[byte_pos] = byte(changed_bits<<n_unchanged_bits | unchanged_bits)
|
|
||||||
n_bits -= n_changed_bits
|
|
||||||
bits >>= n_changed_bits
|
|
||||||
pos += n_changed_bits
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func rewindBitPosition1(new_storage_ix uint, storage_ix *uint, storage []byte) {
|
|
||||||
var bitpos uint = new_storage_ix & 7
|
|
||||||
var mask uint = (1 << bitpos) - 1
|
|
||||||
storage[new_storage_ix>>3] &= byte(mask)
|
|
||||||
*storage_ix = new_storage_ix
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var shouldMergeBlock_kSampleRate uint = 43
|
var shouldMergeBlock_kSampleRate uint = 43
|
||||||
|
@ -345,151 +322,26 @@ func shouldUseUncompressedMode(metablock_start []byte, next_emit []byte, insertl
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func emitUncompressedMetaBlock1(begin []byte, end []byte, storage_ix_start uint, storage_ix *uint, storage []byte) {
|
func emitUncompressedMetaBlock1(data []byte, storage_ix_start uint, bw *bitWriter) {
|
||||||
var len uint = uint(-cap(end) + cap(begin))
|
bw.rewind(storage_ix_start)
|
||||||
rewindBitPosition1(storage_ix_start, storage_ix, storage)
|
storeMetaBlockHeader1(uint(len(data)), true, bw)
|
||||||
storeMetaBlockHeader1(uint(len), true, storage_ix, storage)
|
bw.jumpToByteBoundary()
|
||||||
*storage_ix = (*storage_ix + 7) &^ 7
|
bw.writeBytes(data)
|
||||||
copy(storage[*storage_ix>>3:], begin[:len])
|
|
||||||
*storage_ix += uint(len << 3)
|
|
||||||
storage[*storage_ix>>3] = 0
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var kCmdHistoSeed = [128]uint32{
|
var kCmdHistoSeed = [128]uint32{
|
||||||
0,
|
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1,
|
||||||
1,
|
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1,
|
||||||
1,
|
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,
|
||||||
1,
|
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||||
1,
|
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||||
1,
|
1, 1, 1, 1, 0, 0, 0, 0,
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
0,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var compressFragmentFastImpl_kFirstBlockSize uint = 3 << 15
|
var compressFragmentFastImpl_kFirstBlockSize uint = 3 << 15
|
||||||
var compressFragmentFastImpl_kMergeBlockSize uint = 1 << 16
|
var compressFragmentFastImpl_kMergeBlockSize uint = 1 << 16
|
||||||
|
|
||||||
func compressFragmentFastImpl(in []byte, input_size uint, is_last bool, table []int, table_bits uint, cmd_depth []byte, cmd_bits []uint16, cmd_code_numbits *uint, cmd_code []byte, storage_ix *uint, storage []byte) {
|
func compressFragmentFastImpl(in []byte, input_size uint, is_last bool, table []int, table_bits uint, cmd_depth []byte, cmd_bits []uint16, cmd_code_numbits *uint, cmd_code []byte, bw *bitWriter) {
|
||||||
var cmd_histo [128]uint32
|
var cmd_histo [128]uint32
|
||||||
var ip_end int
|
var ip_end int
|
||||||
var next_emit int = 0
|
var next_emit int = 0
|
||||||
|
@ -500,7 +352,7 @@ func compressFragmentFastImpl(in []byte, input_size uint, is_last bool, table []
|
||||||
var metablock_start int = input
|
var metablock_start int = input
|
||||||
var block_size uint = brotli_min_size_t(input_size, compressFragmentFastImpl_kFirstBlockSize)
|
var block_size uint = brotli_min_size_t(input_size, compressFragmentFastImpl_kFirstBlockSize)
|
||||||
var total_block_size uint = block_size
|
var total_block_size uint = block_size
|
||||||
var mlen_storage_ix uint = *storage_ix + 3
|
var mlen_storage_ix uint = bw.getPos() + 3
|
||||||
var lit_depth [256]byte
|
var lit_depth [256]byte
|
||||||
var lit_bits [256]uint16
|
var lit_bits [256]uint16
|
||||||
var literal_ratio uint
|
var literal_ratio uint
|
||||||
|
@ -517,21 +369,21 @@ func compressFragmentFastImpl(in []byte, input_size uint, is_last bool, table []
|
||||||
|
|
||||||
/* Save the bit position of the MLEN field of the meta-block header, so that
|
/* Save the bit position of the MLEN field of the meta-block header, so that
|
||||||
we can update it later if we decide to extend this meta-block. */
|
we can update it later if we decide to extend this meta-block. */
|
||||||
storeMetaBlockHeader1(block_size, false, storage_ix, storage)
|
storeMetaBlockHeader1(block_size, false, bw)
|
||||||
|
|
||||||
/* No block splits, no contexts. */
|
/* No block splits, no contexts. */
|
||||||
writeBits(13, 0, storage_ix, storage)
|
bw.writeBits(13, 0)
|
||||||
|
|
||||||
literal_ratio = buildAndStoreLiteralPrefixCode(in[input:], block_size, lit_depth[:], lit_bits[:], storage_ix, storage)
|
literal_ratio = buildAndStoreLiteralPrefixCode(in[input:], block_size, lit_depth[:], lit_bits[:], bw)
|
||||||
{
|
{
|
||||||
/* Store the pre-compressed command and distance prefix codes. */
|
/* Store the pre-compressed command and distance prefix codes. */
|
||||||
var i uint
|
var i uint
|
||||||
for i = 0; i+7 < *cmd_code_numbits; i += 8 {
|
for i = 0; i+7 < *cmd_code_numbits; i += 8 {
|
||||||
writeBits(8, uint64(cmd_code[i>>3]), storage_ix, storage)
|
bw.writeBits(8, uint64(cmd_code[i>>3]))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
writeBits(*cmd_code_numbits&7, uint64(cmd_code[*cmd_code_numbits>>3]), storage_ix, storage)
|
bw.writeBits(*cmd_code_numbits&7, uint64(cmd_code[*cmd_code_numbits>>3]))
|
||||||
|
|
||||||
/* Initialize the command and distance histograms. We will gather
|
/* Initialize the command and distance histograms. We will gather
|
||||||
statistics of command and distance codes during the processing
|
statistics of command and distance codes during the processing
|
||||||
|
@ -630,27 +482,27 @@ emit_commands:
|
||||||
var insert uint = uint(base - next_emit)
|
var insert uint = uint(base - next_emit)
|
||||||
ip += int(matched)
|
ip += int(matched)
|
||||||
if insert < 6210 {
|
if insert < 6210 {
|
||||||
emitInsertLen1(insert, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
|
emitInsertLen1(insert, cmd_depth, cmd_bits, cmd_histo[:], bw)
|
||||||
} else if shouldUseUncompressedMode(in[metablock_start:], in[next_emit:], insert, literal_ratio) {
|
} else if shouldUseUncompressedMode(in[metablock_start:], in[next_emit:], insert, literal_ratio) {
|
||||||
emitUncompressedMetaBlock1(in[metablock_start:], in[base:], mlen_storage_ix-3, storage_ix, storage)
|
emitUncompressedMetaBlock1(in[metablock_start:base], mlen_storage_ix-3, bw)
|
||||||
input_size -= uint(base - input)
|
input_size -= uint(base - input)
|
||||||
input = base
|
input = base
|
||||||
next_emit = input
|
next_emit = input
|
||||||
goto next_block
|
goto next_block
|
||||||
} else {
|
} else {
|
||||||
emitLongInsertLen(insert, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
|
emitLongInsertLen(insert, cmd_depth, cmd_bits, cmd_histo[:], bw)
|
||||||
}
|
}
|
||||||
|
|
||||||
emitLiterals(in[next_emit:], insert, lit_depth[:], lit_bits[:], storage_ix, storage)
|
emitLiterals(in[next_emit:], insert, lit_depth[:], lit_bits[:], bw)
|
||||||
if distance == last_distance {
|
if distance == last_distance {
|
||||||
writeBits(uint(cmd_depth[64]), uint64(cmd_bits[64]), storage_ix, storage)
|
bw.writeBits(uint(cmd_depth[64]), uint64(cmd_bits[64]))
|
||||||
cmd_histo[64]++
|
cmd_histo[64]++
|
||||||
} else {
|
} else {
|
||||||
emitDistance1(uint(distance), cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
|
emitDistance1(uint(distance), cmd_depth, cmd_bits, cmd_histo[:], bw)
|
||||||
last_distance = distance
|
last_distance = distance
|
||||||
}
|
}
|
||||||
|
|
||||||
emitCopyLenLastDistance1(matched, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
|
emitCopyLenLastDistance1(matched, cmd_depth, cmd_bits, cmd_histo[:], bw)
|
||||||
|
|
||||||
next_emit = ip
|
next_emit = ip
|
||||||
if ip >= ip_limit {
|
if ip >= ip_limit {
|
||||||
|
@ -686,8 +538,8 @@ emit_commands:
|
||||||
}
|
}
|
||||||
ip += int(matched)
|
ip += int(matched)
|
||||||
last_distance = int(base - candidate) /* > 0 */
|
last_distance = int(base - candidate) /* > 0 */
|
||||||
emitCopyLen1(matched, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
|
emitCopyLen1(matched, cmd_depth, cmd_bits, cmd_histo[:], bw)
|
||||||
emitDistance1(uint(last_distance), cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
|
emitDistance1(uint(last_distance), cmd_depth, cmd_bits, cmd_histo[:], bw)
|
||||||
|
|
||||||
next_emit = ip
|
next_emit = ip
|
||||||
if ip >= ip_limit {
|
if ip >= ip_limit {
|
||||||
|
@ -733,7 +585,7 @@ emit_remainder:
|
||||||
nibbles. */
|
nibbles. */
|
||||||
total_block_size += block_size
|
total_block_size += block_size
|
||||||
|
|
||||||
updateBits(20, uint32(total_block_size-1), mlen_storage_ix, storage)
|
bw.updateBits(20, uint32(total_block_size-1), mlen_storage_ix)
|
||||||
goto emit_commands
|
goto emit_commands
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -741,13 +593,13 @@ emit_remainder:
|
||||||
if next_emit < ip_end {
|
if next_emit < ip_end {
|
||||||
var insert uint = uint(ip_end - next_emit)
|
var insert uint = uint(ip_end - next_emit)
|
||||||
if insert < 6210 {
|
if insert < 6210 {
|
||||||
emitInsertLen1(insert, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
|
emitInsertLen1(insert, cmd_depth, cmd_bits, cmd_histo[:], bw)
|
||||||
emitLiterals(in[next_emit:], insert, lit_depth[:], lit_bits[:], storage_ix, storage)
|
emitLiterals(in[next_emit:], insert, lit_depth[:], lit_bits[:], bw)
|
||||||
} else if shouldUseUncompressedMode(in[metablock_start:], in[next_emit:], insert, literal_ratio) {
|
} else if shouldUseUncompressedMode(in[metablock_start:], in[next_emit:], insert, literal_ratio) {
|
||||||
emitUncompressedMetaBlock1(in[metablock_start:], in[ip_end:], mlen_storage_ix-3, storage_ix, storage)
|
emitUncompressedMetaBlock1(in[metablock_start:ip_end], mlen_storage_ix-3, bw)
|
||||||
} else {
|
} else {
|
||||||
emitLongInsertLen(insert, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
|
emitLongInsertLen(insert, cmd_depth, cmd_bits, cmd_histo[:], bw)
|
||||||
emitLiterals(in[next_emit:], insert, lit_depth[:], lit_bits[:], storage_ix, storage)
|
emitLiterals(in[next_emit:], insert, lit_depth[:], lit_bits[:], bw)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -763,30 +615,29 @@ next_block:
|
||||||
|
|
||||||
/* Save the bit position of the MLEN field of the meta-block header, so that
|
/* Save the bit position of the MLEN field of the meta-block header, so that
|
||||||
we can update it later if we decide to extend this meta-block. */
|
we can update it later if we decide to extend this meta-block. */
|
||||||
mlen_storage_ix = *storage_ix + 3
|
mlen_storage_ix = bw.getPos() + 3
|
||||||
|
|
||||||
storeMetaBlockHeader1(block_size, false, storage_ix, storage)
|
storeMetaBlockHeader1(block_size, false, bw)
|
||||||
|
|
||||||
/* No block splits, no contexts. */
|
/* No block splits, no contexts. */
|
||||||
writeBits(13, 0, storage_ix, storage)
|
bw.writeBits(13, 0)
|
||||||
|
|
||||||
literal_ratio = buildAndStoreLiteralPrefixCode(in[input:], block_size, lit_depth[:], lit_bits[:], storage_ix, storage)
|
literal_ratio = buildAndStoreLiteralPrefixCode(in[input:], block_size, lit_depth[:], lit_bits[:], bw)
|
||||||
buildAndStoreCommandPrefixCode1(cmd_histo[:], cmd_depth, cmd_bits, storage_ix, storage)
|
buildAndStoreCommandPrefixCode1(cmd_histo[:], cmd_depth, cmd_bits, bw)
|
||||||
goto emit_commands
|
goto emit_commands
|
||||||
}
|
}
|
||||||
|
|
||||||
if !is_last {
|
if !is_last {
|
||||||
/* If this is not the last block, update the command and distance prefix
|
/* If this is not the last block, update the command and distance prefix
|
||||||
codes for the next block and store the compressed forms. */
|
codes for the next block and store the compressed forms. */
|
||||||
cmd_code[0] = 0
|
var bw bitWriter
|
||||||
|
bw.dst = cmd_code
|
||||||
*cmd_code_numbits = 0
|
buildAndStoreCommandPrefixCode1(cmd_histo[:], cmd_depth, cmd_bits, &bw)
|
||||||
buildAndStoreCommandPrefixCode1(cmd_histo[:], cmd_depth, cmd_bits, cmd_code_numbits, cmd_code)
|
*cmd_code_numbits = bw.getPos()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Compresses "input" string to the "*storage" buffer as one or more complete
|
/* Compresses "input" string to bw as one or more complete meta-blocks.
|
||||||
meta-blocks, and updates the "*storage_ix" bit position.
|
|
||||||
|
|
||||||
If "is_last" is 1, emits an additional empty last meta-block.
|
If "is_last" is 1, emits an additional empty last meta-block.
|
||||||
|
|
||||||
|
@ -807,28 +658,28 @@ next_block:
|
||||||
REQUIRES: "table_size" is an odd (9, 11, 13, 15) power of two
|
REQUIRES: "table_size" is an odd (9, 11, 13, 15) power of two
|
||||||
OUTPUT: maximal copy distance <= |input_size|
|
OUTPUT: maximal copy distance <= |input_size|
|
||||||
OUTPUT: maximal copy distance <= BROTLI_MAX_BACKWARD_LIMIT(18) */
|
OUTPUT: maximal copy distance <= BROTLI_MAX_BACKWARD_LIMIT(18) */
|
||||||
func compressFragmentFast(input []byte, input_size uint, is_last bool, table []int, table_size uint, cmd_depth []byte, cmd_bits []uint16, cmd_code_numbits *uint, cmd_code []byte, storage_ix *uint, storage []byte) {
|
func compressFragmentFast(input []byte, input_size uint, is_last bool, table []int, table_size uint, cmd_depth []byte, cmd_bits []uint16, cmd_code_numbits *uint, cmd_code []byte, bw *bitWriter) {
|
||||||
var initial_storage_ix uint = *storage_ix
|
var initial_storage_ix uint = bw.getPos()
|
||||||
var table_bits uint = uint(log2FloorNonZero(table_size))
|
var table_bits uint = uint(log2FloorNonZero(table_size))
|
||||||
|
|
||||||
if input_size == 0 {
|
if input_size == 0 {
|
||||||
assert(is_last)
|
assert(is_last)
|
||||||
writeBits(1, 1, storage_ix, storage) /* islast */
|
bw.writeBits(1, 1) /* islast */
|
||||||
writeBits(1, 1, storage_ix, storage) /* isempty */
|
bw.writeBits(1, 1) /* isempty */
|
||||||
*storage_ix = (*storage_ix + 7) &^ 7
|
bw.jumpToByteBoundary()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
compressFragmentFastImpl(input, input_size, is_last, table, table_bits, cmd_depth, cmd_bits, cmd_code_numbits, cmd_code, storage_ix, storage)
|
compressFragmentFastImpl(input, input_size, is_last, table, table_bits, cmd_depth, cmd_bits, cmd_code_numbits, cmd_code, bw)
|
||||||
|
|
||||||
/* If output is larger than single uncompressed block, rewrite it. */
|
/* If output is larger than single uncompressed block, rewrite it. */
|
||||||
if *storage_ix-initial_storage_ix > 31+(input_size<<3) {
|
if bw.getPos()-initial_storage_ix > 31+(input_size<<3) {
|
||||||
emitUncompressedMetaBlock1(input, input[input_size:], initial_storage_ix, storage_ix, storage)
|
emitUncompressedMetaBlock1(input[:input_size], initial_storage_ix, bw)
|
||||||
}
|
}
|
||||||
|
|
||||||
if is_last {
|
if is_last {
|
||||||
writeBits(1, 1, storage_ix, storage) /* islast */
|
bw.writeBits(1, 1) /* islast */
|
||||||
writeBits(1, 1, storage_ix, storage) /* isempty */
|
bw.writeBits(1, 1) /* isempty */
|
||||||
*storage_ix = (*storage_ix + 7) &^ 7
|
bw.jumpToByteBoundary()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -41,7 +41,7 @@ func isMatch1(p1 []byte, p2 []byte, length uint) bool {
|
||||||
|
|
||||||
/* Builds a command and distance prefix code (each 64 symbols) into "depth" and
|
/* Builds a command and distance prefix code (each 64 symbols) into "depth" and
|
||||||
"bits" based on "histogram" and stores it into the bit stream. */
|
"bits" based on "histogram" and stores it into the bit stream. */
|
||||||
func buildAndStoreCommandPrefixCode(histogram []uint32, depth []byte, bits []uint16, storage_ix *uint, storage []byte) {
|
func buildAndStoreCommandPrefixCode(histogram []uint32, depth []byte, bits []uint16, bw *bitWriter) {
|
||||||
var tree [129]huffmanTree
|
var tree [129]huffmanTree
|
||||||
var cmd_depth = [numCommandSymbols]byte{0}
|
var cmd_depth = [numCommandSymbols]byte{0}
|
||||||
/* Tree size for building a tree over 64 symbols is 2 * 64 + 1. */
|
/* Tree size for building a tree over 64 symbols is 2 * 64 + 1. */
|
||||||
|
@ -87,10 +87,10 @@ func buildAndStoreCommandPrefixCode(histogram []uint32, depth []byte, bits []uin
|
||||||
cmd_depth[448+8*i] = depth[16+i]
|
cmd_depth[448+8*i] = depth[16+i]
|
||||||
}
|
}
|
||||||
|
|
||||||
storeHuffmanTree(cmd_depth[:], numCommandSymbols, tree[:], storage_ix, storage)
|
storeHuffmanTree(cmd_depth[:], numCommandSymbols, tree[:], bw)
|
||||||
}
|
}
|
||||||
|
|
||||||
storeHuffmanTree(depth[64:], 64, tree[:], storage_ix, storage)
|
storeHuffmanTree(depth[64:], 64, tree[:], bw)
|
||||||
}
|
}
|
||||||
|
|
||||||
func emitInsertLen(insertlen uint32, commands *[]uint32) {
|
func emitInsertLen(insertlen uint32, commands *[]uint32) {
|
||||||
|
@ -197,11 +197,11 @@ func emitDistance(distance uint32, commands *[]uint32) {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* REQUIRES: len <= 1 << 24. */
|
/* REQUIRES: len <= 1 << 24. */
|
||||||
func storeMetaBlockHeader(len uint, is_uncompressed bool, storage_ix *uint, storage []byte) {
|
func storeMetaBlockHeader(len uint, is_uncompressed bool, bw *bitWriter) {
|
||||||
var nibbles uint = 6
|
var nibbles uint = 6
|
||||||
|
|
||||||
/* ISLAST */
|
/* ISLAST */
|
||||||
writeBits(1, 0, storage_ix, storage)
|
bw.writeBits(1, 0)
|
||||||
|
|
||||||
if len <= 1<<16 {
|
if len <= 1<<16 {
|
||||||
nibbles = 4
|
nibbles = 4
|
||||||
|
@ -209,11 +209,11 @@ func storeMetaBlockHeader(len uint, is_uncompressed bool, storage_ix *uint, stor
|
||||||
nibbles = 5
|
nibbles = 5
|
||||||
}
|
}
|
||||||
|
|
||||||
writeBits(2, uint64(nibbles)-4, storage_ix, storage)
|
bw.writeBits(2, uint64(nibbles)-4)
|
||||||
writeBits(nibbles*4, uint64(len)-1, storage_ix, storage)
|
bw.writeBits(nibbles*4, uint64(len)-1)
|
||||||
|
|
||||||
/* ISUNCOMPRESSED */
|
/* ISUNCOMPRESSED */
|
||||||
writeSingleBit(is_uncompressed, storage_ix, storage)
|
bw.writeSingleBit(is_uncompressed)
|
||||||
}
|
}
|
||||||
|
|
||||||
func createCommands(input []byte, block_size uint, input_size uint, base_ip_ptr []byte, table []int, table_bits uint, min_match uint, literals *[]byte, commands *[]uint32) {
|
func createCommands(input []byte, block_size uint, input_size uint, base_ip_ptr []byte, table []int, table_bits uint, min_match uint, literals *[]byte, commands *[]uint32) {
|
||||||
|
@ -440,163 +440,20 @@ emit_remainder:
|
||||||
}
|
}
|
||||||
|
|
||||||
var storeCommands_kNumExtraBits = [128]uint32{
|
var storeCommands_kNumExtraBits = [128]uint32{
|
||||||
0,
|
0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 8, 9, 10, 12, 14, 24,
|
||||||
0,
|
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4,
|
||||||
0,
|
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 8, 9, 10, 24,
|
||||||
0,
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
0,
|
1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
|
||||||
0,
|
9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16,
|
||||||
1,
|
17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24,
|
||||||
1,
|
|
||||||
2,
|
|
||||||
2,
|
|
||||||
3,
|
|
||||||
3,
|
|
||||||
4,
|
|
||||||
4,
|
|
||||||
5,
|
|
||||||
5,
|
|
||||||
6,
|
|
||||||
7,
|
|
||||||
8,
|
|
||||||
9,
|
|
||||||
10,
|
|
||||||
12,
|
|
||||||
14,
|
|
||||||
24,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
2,
|
|
||||||
2,
|
|
||||||
3,
|
|
||||||
3,
|
|
||||||
4,
|
|
||||||
4,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
2,
|
|
||||||
2,
|
|
||||||
3,
|
|
||||||
3,
|
|
||||||
4,
|
|
||||||
4,
|
|
||||||
5,
|
|
||||||
5,
|
|
||||||
6,
|
|
||||||
7,
|
|
||||||
8,
|
|
||||||
9,
|
|
||||||
10,
|
|
||||||
24,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
2,
|
|
||||||
2,
|
|
||||||
3,
|
|
||||||
3,
|
|
||||||
4,
|
|
||||||
4,
|
|
||||||
5,
|
|
||||||
5,
|
|
||||||
6,
|
|
||||||
6,
|
|
||||||
7,
|
|
||||||
7,
|
|
||||||
8,
|
|
||||||
8,
|
|
||||||
9,
|
|
||||||
9,
|
|
||||||
10,
|
|
||||||
10,
|
|
||||||
11,
|
|
||||||
11,
|
|
||||||
12,
|
|
||||||
12,
|
|
||||||
13,
|
|
||||||
13,
|
|
||||||
14,
|
|
||||||
14,
|
|
||||||
15,
|
|
||||||
15,
|
|
||||||
16,
|
|
||||||
16,
|
|
||||||
17,
|
|
||||||
17,
|
|
||||||
18,
|
|
||||||
18,
|
|
||||||
19,
|
|
||||||
19,
|
|
||||||
20,
|
|
||||||
20,
|
|
||||||
21,
|
|
||||||
21,
|
|
||||||
22,
|
|
||||||
22,
|
|
||||||
23,
|
|
||||||
23,
|
|
||||||
24,
|
|
||||||
24,
|
|
||||||
}
|
}
|
||||||
var storeCommands_kInsertOffset = [24]uint32{
|
var storeCommands_kInsertOffset = [24]uint32{
|
||||||
0,
|
0, 1, 2, 3, 4, 5, 6, 8, 10, 14, 18, 26, 34, 50, 66, 98, 130, 194, 322, 578,
|
||||||
1,
|
1090, 2114, 6210, 22594,
|
||||||
2,
|
|
||||||
3,
|
|
||||||
4,
|
|
||||||
5,
|
|
||||||
6,
|
|
||||||
8,
|
|
||||||
10,
|
|
||||||
14,
|
|
||||||
18,
|
|
||||||
26,
|
|
||||||
34,
|
|
||||||
50,
|
|
||||||
66,
|
|
||||||
98,
|
|
||||||
130,
|
|
||||||
194,
|
|
||||||
322,
|
|
||||||
578,
|
|
||||||
1090,
|
|
||||||
2114,
|
|
||||||
6210,
|
|
||||||
22594,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func storeCommands(literals []byte, num_literals uint, commands []uint32, num_commands uint, storage_ix *uint, storage []byte) {
|
func storeCommands(literals []byte, num_literals uint, commands []uint32, num_commands uint, bw *bitWriter) {
|
||||||
var lit_depths [256]byte
|
var lit_depths [256]byte
|
||||||
var lit_bits [256]uint16
|
var lit_bits [256]uint16
|
||||||
var lit_histo = [256]uint32{0}
|
var lit_histo = [256]uint32{0}
|
||||||
|
@ -609,7 +466,7 @@ func storeCommands(literals []byte, num_literals uint, commands []uint32, num_co
|
||||||
}
|
}
|
||||||
|
|
||||||
buildAndStoreHuffmanTreeFast(lit_histo[:], num_literals, /* max_bits = */
|
buildAndStoreHuffmanTreeFast(lit_histo[:], num_literals, /* max_bits = */
|
||||||
8, lit_depths[:], lit_bits[:], storage_ix, storage)
|
8, lit_depths[:], lit_bits[:], bw)
|
||||||
|
|
||||||
for i = 0; i < num_commands; i++ {
|
for i = 0; i < num_commands; i++ {
|
||||||
var code uint32 = commands[i] & 0xFF
|
var code uint32 = commands[i] & 0xFF
|
||||||
|
@ -621,21 +478,21 @@ func storeCommands(literals []byte, num_literals uint, commands []uint32, num_co
|
||||||
cmd_histo[2] += 1
|
cmd_histo[2] += 1
|
||||||
cmd_histo[64] += 1
|
cmd_histo[64] += 1
|
||||||
cmd_histo[84] += 1
|
cmd_histo[84] += 1
|
||||||
buildAndStoreCommandPrefixCode(cmd_histo[:], cmd_depths[:], cmd_bits[:], storage_ix, storage)
|
buildAndStoreCommandPrefixCode(cmd_histo[:], cmd_depths[:], cmd_bits[:], bw)
|
||||||
|
|
||||||
for i = 0; i < num_commands; i++ {
|
for i = 0; i < num_commands; i++ {
|
||||||
var cmd uint32 = commands[i]
|
var cmd uint32 = commands[i]
|
||||||
var code uint32 = cmd & 0xFF
|
var code uint32 = cmd & 0xFF
|
||||||
var extra uint32 = cmd >> 8
|
var extra uint32 = cmd >> 8
|
||||||
assert(code < 128)
|
assert(code < 128)
|
||||||
writeBits(uint(cmd_depths[code]), uint64(cmd_bits[code]), storage_ix, storage)
|
bw.writeBits(uint(cmd_depths[code]), uint64(cmd_bits[code]))
|
||||||
writeBits(uint(storeCommands_kNumExtraBits[code]), uint64(extra), storage_ix, storage)
|
bw.writeBits(uint(storeCommands_kNumExtraBits[code]), uint64(extra))
|
||||||
if code < 24 {
|
if code < 24 {
|
||||||
var insert uint32 = storeCommands_kInsertOffset[code] + extra
|
var insert uint32 = storeCommands_kInsertOffset[code] + extra
|
||||||
var j uint32
|
var j uint32
|
||||||
for j = 0; j < insert; j++ {
|
for j = 0; j < insert; j++ {
|
||||||
var lit byte = literals[0]
|
var lit byte = literals[0]
|
||||||
writeBits(uint(lit_depths[lit]), uint64(lit_bits[lit]), storage_ix, storage)
|
bw.writeBits(uint(lit_depths[lit]), uint64(lit_bits[lit]))
|
||||||
literals = literals[1:]
|
literals = literals[1:]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -663,22 +520,13 @@ func shouldCompress(input []byte, input_size uint, num_literals uint) bool {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func rewindBitPosition(new_storage_ix uint, storage_ix *uint, storage []byte) {
|
func emitUncompressedMetaBlock(input []byte, input_size uint, bw *bitWriter) {
|
||||||
var bitpos uint = new_storage_ix & 7
|
storeMetaBlockHeader(input_size, true, bw)
|
||||||
var mask uint = (1 << bitpos) - 1
|
bw.jumpToByteBoundary()
|
||||||
storage[new_storage_ix>>3] &= byte(mask)
|
bw.writeBytes(input[:input_size])
|
||||||
*storage_ix = new_storage_ix
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func emitUncompressedMetaBlock(input []byte, input_size uint, storage_ix *uint, storage []byte) {
|
func compressFragmentTwoPassImpl(input []byte, input_size uint, is_last bool, command_buf []uint32, literal_buf []byte, table []int, table_bits uint, min_match uint, bw *bitWriter) {
|
||||||
storeMetaBlockHeader(input_size, true, storage_ix, storage)
|
|
||||||
*storage_ix = (*storage_ix + 7) &^ 7
|
|
||||||
copy(storage[*storage_ix>>3:], input[:input_size])
|
|
||||||
*storage_ix += input_size << 3
|
|
||||||
storage[*storage_ix>>3] = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func compressFragmentTwoPassImpl(input []byte, input_size uint, is_last bool, command_buf []uint32, literal_buf []byte, table []int, table_bits uint, min_match uint, storage_ix *uint, storage []byte) {
|
|
||||||
/* Save the start of the first block for position and distance computations.
|
/* Save the start of the first block for position and distance computations.
|
||||||
*/
|
*/
|
||||||
var base_ip []byte = input
|
var base_ip []byte = input
|
||||||
|
@ -692,17 +540,17 @@ func compressFragmentTwoPassImpl(input []byte, input_size uint, is_last bool, co
|
||||||
num_literals = uint(-cap(literals) + cap(literal_buf))
|
num_literals = uint(-cap(literals) + cap(literal_buf))
|
||||||
if shouldCompress(input, block_size, num_literals) {
|
if shouldCompress(input, block_size, num_literals) {
|
||||||
var num_commands uint = uint(-cap(commands) + cap(command_buf))
|
var num_commands uint = uint(-cap(commands) + cap(command_buf))
|
||||||
storeMetaBlockHeader(block_size, false, storage_ix, storage)
|
storeMetaBlockHeader(block_size, false, bw)
|
||||||
|
|
||||||
/* No block splits, no contexts. */
|
/* No block splits, no contexts. */
|
||||||
writeBits(13, 0, storage_ix, storage)
|
bw.writeBits(13, 0)
|
||||||
|
|
||||||
storeCommands(literal_buf, num_literals, command_buf, num_commands, storage_ix, storage)
|
storeCommands(literal_buf, num_literals, command_buf, num_commands, bw)
|
||||||
} else {
|
} else {
|
||||||
/* Since we did not find many backward references and the entropy of
|
/* Since we did not find many backward references and the entropy of
|
||||||
the data is close to 8 bits, we can simply emit an uncompressed block.
|
the data is close to 8 bits, we can simply emit an uncompressed block.
|
||||||
This makes compression speed of uncompressible data about 3x faster. */
|
This makes compression speed of uncompressible data about 3x faster. */
|
||||||
emitUncompressedMetaBlock(input, block_size, storage_ix, storage)
|
emitUncompressedMetaBlock(input, block_size, bw)
|
||||||
}
|
}
|
||||||
|
|
||||||
input = input[block_size:]
|
input = input[block_size:]
|
||||||
|
@ -710,8 +558,7 @@ func compressFragmentTwoPassImpl(input []byte, input_size uint, is_last bool, co
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Compresses "input" string to the "*storage" buffer as one or more complete
|
/* Compresses "input" string to bw as one or more complete meta-blocks.
|
||||||
meta-blocks, and updates the "*storage_ix" bit position.
|
|
||||||
|
|
||||||
If "is_last" is 1, emits an additional empty last meta-block.
|
If "is_last" is 1, emits an additional empty last meta-block.
|
||||||
|
|
||||||
|
@ -723,8 +570,8 @@ func compressFragmentTwoPassImpl(input []byte, input_size uint, is_last bool, co
|
||||||
REQUIRES: "table_size" is a power of two
|
REQUIRES: "table_size" is a power of two
|
||||||
OUTPUT: maximal copy distance <= |input_size|
|
OUTPUT: maximal copy distance <= |input_size|
|
||||||
OUTPUT: maximal copy distance <= BROTLI_MAX_BACKWARD_LIMIT(18) */
|
OUTPUT: maximal copy distance <= BROTLI_MAX_BACKWARD_LIMIT(18) */
|
||||||
func compressFragmentTwoPass(input []byte, input_size uint, is_last bool, command_buf []uint32, literal_buf []byte, table []int, table_size uint, storage_ix *uint, storage []byte) {
|
func compressFragmentTwoPass(input []byte, input_size uint, is_last bool, command_buf []uint32, literal_buf []byte, table []int, table_size uint, bw *bitWriter) {
|
||||||
var initial_storage_ix uint = *storage_ix
|
var initial_storage_ix uint = bw.getPos()
|
||||||
var table_bits uint = uint(log2FloorNonZero(table_size))
|
var table_bits uint = uint(log2FloorNonZero(table_size))
|
||||||
var min_match uint
|
var min_match uint
|
||||||
if table_bits <= 15 {
|
if table_bits <= 15 {
|
||||||
|
@ -732,17 +579,17 @@ func compressFragmentTwoPass(input []byte, input_size uint, is_last bool, comman
|
||||||
} else {
|
} else {
|
||||||
min_match = 6
|
min_match = 6
|
||||||
}
|
}
|
||||||
compressFragmentTwoPassImpl(input, input_size, is_last, command_buf, literal_buf, table, table_bits, min_match, storage_ix, storage)
|
compressFragmentTwoPassImpl(input, input_size, is_last, command_buf, literal_buf, table, table_bits, min_match, bw)
|
||||||
|
|
||||||
/* If output is larger than single uncompressed block, rewrite it. */
|
/* If output is larger than single uncompressed block, rewrite it. */
|
||||||
if *storage_ix-initial_storage_ix > 31+(input_size<<3) {
|
if bw.getPos()-initial_storage_ix > 31+(input_size<<3) {
|
||||||
rewindBitPosition(initial_storage_ix, storage_ix, storage)
|
bw.rewind(initial_storage_ix)
|
||||||
emitUncompressedMetaBlock(input, input_size, storage_ix, storage)
|
emitUncompressedMetaBlock(input, input_size, bw)
|
||||||
}
|
}
|
||||||
|
|
||||||
if is_last {
|
if is_last {
|
||||||
writeBits(1, 1, storage_ix, storage) /* islast */
|
bw.writeBits(1, 1) /* islast */
|
||||||
writeBits(1, 1, storage_ix, storage) /* isempty */
|
bw.writeBits(1, 1) /* isempty */
|
||||||
*storage_ix = (*storage_ix + 7) &^ 7
|
bw.jumpToByteBoundary()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
171
encode.go
171
encode.go
|
@ -87,11 +87,9 @@ type Writer struct {
|
||||||
last_processed_pos_ uint64
|
last_processed_pos_ uint64
|
||||||
dist_cache_ [numDistanceShortCodes]int
|
dist_cache_ [numDistanceShortCodes]int
|
||||||
saved_dist_cache_ [4]int
|
saved_dist_cache_ [4]int
|
||||||
last_bytes_ uint16
|
|
||||||
last_bytes_bits_ byte
|
|
||||||
prev_byte_ byte
|
prev_byte_ byte
|
||||||
prev_byte2_ byte
|
prev_byte2_ byte
|
||||||
storage []byte
|
bw bitWriter
|
||||||
small_table_ [1 << 10]int
|
small_table_ [1 << 10]int
|
||||||
large_table_ []int
|
large_table_ []int
|
||||||
large_table_size_ uint
|
large_table_size_ uint
|
||||||
|
@ -141,14 +139,6 @@ func wrapPosition(position uint64) uint32 {
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Writer) getStorage(size int) []byte {
|
|
||||||
if len(s.storage) < size {
|
|
||||||
s.storage = make([]byte, size)
|
|
||||||
}
|
|
||||||
|
|
||||||
return s.storage
|
|
||||||
}
|
|
||||||
|
|
||||||
func hashTableSize(max_table_size uint, input_size uint) uint {
|
func hashTableSize(max_table_size uint, input_size uint) uint {
|
||||||
var htsize uint = 256
|
var htsize uint = 256
|
||||||
for htsize < max_table_size && htsize < input_size {
|
for htsize < max_table_size && htsize < input_size {
|
||||||
|
@ -194,23 +184,18 @@ func getHashTable(s *Writer, quality int, input_size uint, table_size *uint) []i
|
||||||
return table
|
return table
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodeWindowBits(lgwin int, large_window bool, last_bytes *uint16, last_bytes_bits *byte) {
|
func encodeWindowBits(lgwin int, large_window bool, bw *bitWriter) {
|
||||||
if large_window {
|
if large_window {
|
||||||
*last_bytes = uint16((lgwin&0x3F)<<8 | 0x11)
|
bw.writeBits(14, uint64((lgwin&0x3F)<<8|0x11))
|
||||||
*last_bytes_bits = 14
|
|
||||||
} else {
|
} else {
|
||||||
if lgwin == 16 {
|
if lgwin == 16 {
|
||||||
*last_bytes = 0
|
bw.writeBits(1, 0)
|
||||||
*last_bytes_bits = 1
|
|
||||||
} else if lgwin == 17 {
|
} else if lgwin == 17 {
|
||||||
*last_bytes = 1
|
bw.writeBits(7, 1)
|
||||||
*last_bytes_bits = 7
|
|
||||||
} else if lgwin > 17 {
|
} else if lgwin > 17 {
|
||||||
*last_bytes = uint16((lgwin-17)<<1 | 0x01)
|
bw.writeBits(4, uint64((lgwin-17)<<1|0x01))
|
||||||
*last_bytes_bits = 4
|
|
||||||
} else {
|
} else {
|
||||||
*last_bytes = uint16((lgwin-8)<<4 | 0x01)
|
bw.writeBits(7, uint64((lgwin-8)<<4|0x01))
|
||||||
*last_bytes_bits = 7
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -432,18 +417,15 @@ func chooseContextMode(params *encoderParams, data []byte, pos uint, mask uint,
|
||||||
return contextUTF8
|
return contextUTF8
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeMetaBlockInternal(data []byte, mask uint, last_flush_pos uint64, bytes uint, is_last bool, literal_context_mode int, params *encoderParams, prev_byte byte, prev_byte2 byte, num_literals uint, commands []command, saved_dist_cache []int, dist_cache []int, storage_ix *uint, storage []byte) {
|
func writeMetaBlockInternal(data []byte, mask uint, last_flush_pos uint64, bytes uint, is_last bool, literal_context_mode int, params *encoderParams, prev_byte byte, prev_byte2 byte, num_literals uint, commands []command, saved_dist_cache []int, dist_cache []int, bw *bitWriter) {
|
||||||
var wrapped_last_flush_pos uint32 = wrapPosition(last_flush_pos)
|
var wrapped_last_flush_pos uint32 = wrapPosition(last_flush_pos)
|
||||||
var last_bytes uint16
|
|
||||||
var last_bytes_bits byte
|
|
||||||
var literal_context_lut contextLUT = getContextLUT(literal_context_mode)
|
var literal_context_lut contextLUT = getContextLUT(literal_context_mode)
|
||||||
var block_params encoderParams = *params
|
var block_params encoderParams = *params
|
||||||
|
|
||||||
if bytes == 0 {
|
if bytes == 0 {
|
||||||
/* Write the ISLAST and ISEMPTY bits. */
|
/* Write the ISLAST and ISEMPTY bits. */
|
||||||
writeBits(2, 3, storage_ix, storage)
|
bw.writeBits(2, 3)
|
||||||
|
bw.jumpToByteBoundary()
|
||||||
*storage_ix = (*storage_ix + 7) &^ 7
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -452,17 +434,15 @@ func writeMetaBlockInternal(data []byte, mask uint, last_flush_pos uint64, bytes
|
||||||
CreateBackwardReferences is now unused. */
|
CreateBackwardReferences is now unused. */
|
||||||
copy(dist_cache, saved_dist_cache[:4])
|
copy(dist_cache, saved_dist_cache[:4])
|
||||||
|
|
||||||
storeUncompressedMetaBlock(is_last, data, uint(wrapped_last_flush_pos), mask, bytes, storage_ix, storage)
|
storeUncompressedMetaBlock(is_last, data, uint(wrapped_last_flush_pos), mask, bytes, bw)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(*storage_ix <= 14)
|
savedPos := bw.getPos()
|
||||||
last_bytes = uint16(storage[1])<<8 | uint16(storage[0])
|
|
||||||
last_bytes_bits = byte(*storage_ix)
|
|
||||||
if params.quality <= maxQualityForStaticEntropyCodes {
|
if params.quality <= maxQualityForStaticEntropyCodes {
|
||||||
storeMetaBlockFast(data, uint(wrapped_last_flush_pos), bytes, mask, is_last, params, commands, storage_ix, storage)
|
storeMetaBlockFast(data, uint(wrapped_last_flush_pos), bytes, mask, is_last, params, commands, bw)
|
||||||
} else if params.quality < minQualityForBlockSplit {
|
} else if params.quality < minQualityForBlockSplit {
|
||||||
storeMetaBlockTrivial(data, uint(wrapped_last_flush_pos), bytes, mask, is_last, params, commands, storage_ix, storage)
|
storeMetaBlockTrivial(data, uint(wrapped_last_flush_pos), bytes, mask, is_last, params, commands, bw)
|
||||||
} else {
|
} else {
|
||||||
mb := getMetaBlockSplit()
|
mb := getMetaBlockSplit()
|
||||||
if params.quality < minQualityForHqBlockSplitting {
|
if params.quality < minQualityForHqBlockSplitting {
|
||||||
|
@ -489,18 +469,15 @@ func writeMetaBlockInternal(data []byte, mask uint, last_flush_pos uint64, bytes
|
||||||
optimizeHistograms(num_effective_dist_codes, mb)
|
optimizeHistograms(num_effective_dist_codes, mb)
|
||||||
}
|
}
|
||||||
|
|
||||||
storeMetaBlock(data, uint(wrapped_last_flush_pos), bytes, mask, prev_byte, prev_byte2, is_last, &block_params, literal_context_mode, commands, mb, storage_ix, storage)
|
storeMetaBlock(data, uint(wrapped_last_flush_pos), bytes, mask, prev_byte, prev_byte2, is_last, &block_params, literal_context_mode, commands, mb, bw)
|
||||||
freeMetaBlockSplit(mb)
|
freeMetaBlockSplit(mb)
|
||||||
}
|
}
|
||||||
|
|
||||||
if bytes+4 < *storage_ix>>3 {
|
if bytes+4 < bw.getPos()>>3 {
|
||||||
/* Restore the distance cache and last byte. */
|
/* Restore the distance cache and last byte. */
|
||||||
copy(dist_cache, saved_dist_cache[:4])
|
copy(dist_cache, saved_dist_cache[:4])
|
||||||
|
bw.rewind(savedPos)
|
||||||
storage[0] = byte(last_bytes)
|
storeUncompressedMetaBlock(is_last, data, uint(wrapped_last_flush_pos), mask, bytes, bw)
|
||||||
storage[1] = byte(last_bytes >> 8)
|
|
||||||
*storage_ix = uint(last_bytes_bits)
|
|
||||||
storeUncompressedMetaBlock(is_last, data, uint(wrapped_last_flush_pos), mask, bytes, storage_ix, storage)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -533,8 +510,10 @@ func ensureInitialized(s *Writer) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
s.last_bytes_bits_ = 0
|
s.bw.bits = 0
|
||||||
s.last_bytes_ = 0
|
s.bw.nbits = 0
|
||||||
|
s.bw.dst = s.bw.dst[:0]
|
||||||
|
|
||||||
s.remaining_metadata_bytes_ = math.MaxUint32
|
s.remaining_metadata_bytes_ = math.MaxUint32
|
||||||
|
|
||||||
sanitizeParams(&s.params)
|
sanitizeParams(&s.params)
|
||||||
|
@ -550,7 +529,7 @@ func ensureInitialized(s *Writer) bool {
|
||||||
lgwin = brotli_max_int(lgwin, 18)
|
lgwin = brotli_max_int(lgwin, 18)
|
||||||
}
|
}
|
||||||
|
|
||||||
encodeWindowBits(lgwin, s.params.large_window, &s.last_bytes_, &s.last_bytes_bits_)
|
encodeWindowBits(lgwin, s.params.large_window, &s.bw)
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.params.quality == fastOnePassCompressionQuality {
|
if s.params.quality == fastOnePassCompressionQuality {
|
||||||
|
@ -782,8 +761,6 @@ func encodeData(s *Writer, is_last bool, force_flush bool) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.params.quality == fastOnePassCompressionQuality || s.params.quality == fastTwoPassCompressionQuality {
|
if s.params.quality == fastOnePassCompressionQuality || s.params.quality == fastTwoPassCompressionQuality {
|
||||||
var storage []byte
|
|
||||||
var storage_ix uint = uint(s.last_bytes_bits_)
|
|
||||||
var table_size uint
|
var table_size uint
|
||||||
var table []int
|
var table []int
|
||||||
|
|
||||||
|
@ -793,20 +770,16 @@ func encodeData(s *Writer, is_last bool, force_flush bool) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
storage = s.getStorage(int(2*bytes + 503))
|
|
||||||
storage[0] = byte(s.last_bytes_)
|
|
||||||
storage[1] = byte(s.last_bytes_ >> 8)
|
|
||||||
table = getHashTable(s, s.params.quality, uint(bytes), &table_size)
|
table = getHashTable(s, s.params.quality, uint(bytes), &table_size)
|
||||||
if s.params.quality == fastOnePassCompressionQuality {
|
if s.params.quality == fastOnePassCompressionQuality {
|
||||||
compressFragmentFast(data[wrapped_last_processed_pos&mask:], uint(bytes), is_last, table, table_size, s.cmd_depths_[:], s.cmd_bits_[:], &s.cmd_code_numbits_, s.cmd_code_[:], &storage_ix, storage)
|
compressFragmentFast(data[wrapped_last_processed_pos&mask:], uint(bytes), is_last, table, table_size, s.cmd_depths_[:], s.cmd_bits_[:], &s.cmd_code_numbits_, s.cmd_code_[:], &s.bw)
|
||||||
} else {
|
} else {
|
||||||
compressFragmentTwoPass(data[wrapped_last_processed_pos&mask:], uint(bytes), is_last, s.command_buf_, s.literal_buf_, table, table_size, &storage_ix, storage)
|
compressFragmentTwoPass(data[wrapped_last_processed_pos&mask:], uint(bytes), is_last, s.command_buf_, s.literal_buf_, table, table_size, &s.bw)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.last_bytes_ = uint16(storage[storage_ix>>3])
|
|
||||||
s.last_bytes_bits_ = byte(storage_ix & 7)
|
|
||||||
updateLastProcessedPos(s)
|
updateLastProcessedPos(s)
|
||||||
s.writeOutput(storage[:storage_ix>>3])
|
s.writeOutput(s.bw.dst)
|
||||||
|
s.bw.dst = s.bw.dst[:0]
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
|
@ -883,13 +856,7 @@ func encodeData(s *Writer, is_last bool, force_flush bool) bool {
|
||||||
assert(s.input_pos_-s.last_flush_pos_ <= 1<<24)
|
assert(s.input_pos_-s.last_flush_pos_ <= 1<<24)
|
||||||
{
|
{
|
||||||
var metablock_size uint32 = uint32(s.input_pos_ - s.last_flush_pos_)
|
var metablock_size uint32 = uint32(s.input_pos_ - s.last_flush_pos_)
|
||||||
var storage []byte = s.getStorage(int(2*metablock_size + 503))
|
writeMetaBlockInternal(data, uint(mask), s.last_flush_pos_, uint(metablock_size), is_last, literal_context_mode, &s.params, s.prev_byte_, s.prev_byte2_, s.num_literals_, s.commands, s.saved_dist_cache_[:], s.dist_cache_[:], &s.bw)
|
||||||
var storage_ix uint = uint(s.last_bytes_bits_)
|
|
||||||
storage[0] = byte(s.last_bytes_)
|
|
||||||
storage[1] = byte(s.last_bytes_ >> 8)
|
|
||||||
writeMetaBlockInternal(data, uint(mask), s.last_flush_pos_, uint(metablock_size), is_last, literal_context_mode, &s.params, s.prev_byte_, s.prev_byte2_, s.num_literals_, s.commands, s.saved_dist_cache_[:], s.dist_cache_[:], &storage_ix, storage)
|
|
||||||
s.last_bytes_ = uint16(storage[storage_ix>>3])
|
|
||||||
s.last_bytes_bits_ = byte(storage_ix & 7)
|
|
||||||
s.last_flush_pos_ = s.input_pos_
|
s.last_flush_pos_ = s.input_pos_
|
||||||
if updateLastProcessedPos(s) {
|
if updateLastProcessedPos(s) {
|
||||||
hasherReset(s.hasher_)
|
hasherReset(s.hasher_)
|
||||||
|
@ -910,28 +877,22 @@ func encodeData(s *Writer, is_last bool, force_flush bool) bool {
|
||||||
emitting an uncompressed block. */
|
emitting an uncompressed block. */
|
||||||
copy(s.saved_dist_cache_[:], s.dist_cache_[:])
|
copy(s.saved_dist_cache_[:], s.dist_cache_[:])
|
||||||
|
|
||||||
s.writeOutput(storage[:storage_ix>>3])
|
s.writeOutput(s.bw.dst)
|
||||||
|
s.bw.dst = s.bw.dst[:0]
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Dumps remaining output bits and metadata header to |header|.
|
/* Dumps remaining output bits and metadata header to s.bw.
|
||||||
Returns number of produced bytes.
|
|
||||||
REQUIRED: |header| should be 8-byte aligned and at least 16 bytes long.
|
|
||||||
REQUIRED: |block_size| <= (1 << 24). */
|
REQUIRED: |block_size| <= (1 << 24). */
|
||||||
func writeMetadataHeader(s *Writer, block_size uint, header []byte) uint {
|
func writeMetadataHeader(s *Writer, block_size uint) {
|
||||||
var storage_ix uint
|
bw := &s.bw
|
||||||
storage_ix = uint(s.last_bytes_bits_)
|
|
||||||
header[0] = byte(s.last_bytes_)
|
|
||||||
header[1] = byte(s.last_bytes_ >> 8)
|
|
||||||
s.last_bytes_ = 0
|
|
||||||
s.last_bytes_bits_ = 0
|
|
||||||
|
|
||||||
writeBits(1, 0, &storage_ix, header)
|
bw.writeBits(1, 0)
|
||||||
writeBits(2, 3, &storage_ix, header)
|
bw.writeBits(2, 3)
|
||||||
writeBits(1, 0, &storage_ix, header)
|
bw.writeBits(1, 0)
|
||||||
if block_size == 0 {
|
if block_size == 0 {
|
||||||
writeBits(2, 0, &storage_ix, header)
|
bw.writeBits(2, 0)
|
||||||
} else {
|
} else {
|
||||||
var nbits uint32
|
var nbits uint32
|
||||||
if block_size == 1 {
|
if block_size == 1 {
|
||||||
|
@ -940,34 +901,19 @@ func writeMetadataHeader(s *Writer, block_size uint, header []byte) uint {
|
||||||
nbits = log2FloorNonZero(uint(uint32(block_size)-1)) + 1
|
nbits = log2FloorNonZero(uint(uint32(block_size)-1)) + 1
|
||||||
}
|
}
|
||||||
var nbytes uint32 = (nbits + 7) / 8
|
var nbytes uint32 = (nbits + 7) / 8
|
||||||
writeBits(2, uint64(nbytes), &storage_ix, header)
|
bw.writeBits(2, uint64(nbytes))
|
||||||
writeBits(uint(8*nbytes), uint64(block_size)-1, &storage_ix, header)
|
bw.writeBits(uint(8*nbytes), uint64(block_size)-1)
|
||||||
}
|
}
|
||||||
|
|
||||||
return (storage_ix + 7) >> 3
|
bw.jumpToByteBoundary()
|
||||||
}
|
}
|
||||||
|
|
||||||
func injectBytePaddingBlock(s *Writer) {
|
func injectBytePaddingBlock(s *Writer) {
|
||||||
var seal uint32 = uint32(s.last_bytes_)
|
|
||||||
var seal_bits uint = uint(s.last_bytes_bits_)
|
|
||||||
s.last_bytes_ = 0
|
|
||||||
s.last_bytes_bits_ = 0
|
|
||||||
|
|
||||||
/* is_last = 0, data_nibbles = 11, reserved = 0, meta_nibbles = 00 */
|
/* is_last = 0, data_nibbles = 11, reserved = 0, meta_nibbles = 00 */
|
||||||
seal |= 0x6 << seal_bits
|
s.bw.writeBits(6, 0x6)
|
||||||
|
s.bw.jumpToByteBoundary()
|
||||||
seal_bits += 6
|
s.writeOutput(s.bw.dst)
|
||||||
|
s.bw.dst = s.bw.dst[:0]
|
||||||
destination := s.tiny_buf_.u8[:]
|
|
||||||
|
|
||||||
destination[0] = byte(seal)
|
|
||||||
if seal_bits > 8 {
|
|
||||||
destination[1] = byte(seal >> 8)
|
|
||||||
}
|
|
||||||
if seal_bits > 16 {
|
|
||||||
destination[2] = byte(seal >> 16)
|
|
||||||
}
|
|
||||||
s.writeOutput(destination[:(seal_bits+7)>>3])
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkFlushComplete(s *Writer) {
|
func checkFlushComplete(s *Writer) {
|
||||||
|
@ -999,7 +945,7 @@ func encoderCompressStreamFast(s *Writer, op int, available_in *uint, next_in *[
|
||||||
}
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
if s.stream_state_ == streamFlushRequested && s.last_bytes_bits_ != 0 {
|
if s.stream_state_ == streamFlushRequested && s.bw.nbits&7 != 0 {
|
||||||
injectBytePaddingBlock(s)
|
injectBytePaddingBlock(s)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -1011,9 +957,6 @@ func encoderCompressStreamFast(s *Writer, op int, available_in *uint, next_in *[
|
||||||
var block_size uint = brotli_min_size_t(block_size_limit, *available_in)
|
var block_size uint = brotli_min_size_t(block_size_limit, *available_in)
|
||||||
var is_last bool = (*available_in == block_size) && (op == int(operationFinish))
|
var is_last bool = (*available_in == block_size) && (op == int(operationFinish))
|
||||||
var force_flush bool = (*available_in == block_size) && (op == int(operationFlush))
|
var force_flush bool = (*available_in == block_size) && (op == int(operationFlush))
|
||||||
var max_out_size uint = 2*block_size + 503
|
|
||||||
var storage []byte = nil
|
|
||||||
var storage_ix uint = uint(s.last_bytes_bits_)
|
|
||||||
var table_size uint
|
var table_size uint
|
||||||
var table []int
|
var table []int
|
||||||
|
|
||||||
|
@ -1022,25 +965,18 @@ func encoderCompressStreamFast(s *Writer, op int, available_in *uint, next_in *[
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
storage = s.getStorage(int(max_out_size))
|
|
||||||
|
|
||||||
storage[0] = byte(s.last_bytes_)
|
|
||||||
storage[1] = byte(s.last_bytes_ >> 8)
|
|
||||||
table = getHashTable(s, s.params.quality, block_size, &table_size)
|
table = getHashTable(s, s.params.quality, block_size, &table_size)
|
||||||
|
|
||||||
if s.params.quality == fastOnePassCompressionQuality {
|
if s.params.quality == fastOnePassCompressionQuality {
|
||||||
compressFragmentFast(*next_in, block_size, is_last, table, table_size, s.cmd_depths_[:], s.cmd_bits_[:], &s.cmd_code_numbits_, s.cmd_code_[:], &storage_ix, storage)
|
compressFragmentFast(*next_in, block_size, is_last, table, table_size, s.cmd_depths_[:], s.cmd_bits_[:], &s.cmd_code_numbits_, s.cmd_code_[:], &s.bw)
|
||||||
} else {
|
} else {
|
||||||
compressFragmentTwoPass(*next_in, block_size, is_last, command_buf, literal_buf, table, table_size, &storage_ix, storage)
|
compressFragmentTwoPass(*next_in, block_size, is_last, command_buf, literal_buf, table, table_size, &s.bw)
|
||||||
}
|
}
|
||||||
|
|
||||||
*next_in = (*next_in)[block_size:]
|
*next_in = (*next_in)[block_size:]
|
||||||
*available_in -= block_size
|
*available_in -= block_size
|
||||||
var out_bytes uint = storage_ix >> 3
|
s.writeOutput(s.bw.dst)
|
||||||
s.writeOutput(storage[:out_bytes])
|
s.bw.dst = s.bw.dst[:0]
|
||||||
|
|
||||||
s.last_bytes_ = uint16(storage[storage_ix>>3])
|
|
||||||
s.last_bytes_bits_ = byte(storage_ix & 7)
|
|
||||||
|
|
||||||
if force_flush {
|
if force_flush {
|
||||||
s.stream_state_ = streamFlushRequested
|
s.stream_state_ = streamFlushRequested
|
||||||
|
@ -1074,7 +1010,7 @@ func processMetadata(s *Writer, available_in *uint, next_in *[]byte) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
if s.stream_state_ == streamFlushRequested && s.last_bytes_bits_ != 0 {
|
if s.stream_state_ == streamFlushRequested && s.bw.nbits&7 != 0 {
|
||||||
injectBytePaddingBlock(s)
|
injectBytePaddingBlock(s)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -1088,8 +1024,9 @@ func processMetadata(s *Writer, available_in *uint, next_in *[]byte) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.stream_state_ == streamMetadataHead {
|
if s.stream_state_ == streamMetadataHead {
|
||||||
n := writeMetadataHeader(s, uint(s.remaining_metadata_bytes_), s.tiny_buf_.u8[:])
|
writeMetadataHeader(s, uint(s.remaining_metadata_bytes_))
|
||||||
s.writeOutput(s.tiny_buf_.u8[:n])
|
s.writeOutput(s.bw.dst)
|
||||||
|
s.bw.dst = s.bw.dst[:0]
|
||||||
s.stream_state_ = streamMetadataBody
|
s.stream_state_ = streamMetadataBody
|
||||||
continue
|
continue
|
||||||
} else {
|
} else {
|
||||||
|
@ -1175,7 +1112,7 @@ func encoderCompressStream(s *Writer, op int, available_in *uint, next_in *[]byt
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.stream_state_ == streamFlushRequested && s.last_bytes_bits_ != 0 {
|
if s.stream_state_ == streamFlushRequested && s.bw.nbits&7 != 0 {
|
||||||
injectBytePaddingBlock(s)
|
injectBytePaddingBlock(s)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
|
@ -778,8 +778,9 @@ var kStaticDistanceCodeDepth = [64]byte{
|
||||||
|
|
||||||
var kCodeLengthBits = [18]uint32{0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 15, 31, 0, 11, 7}
|
var kCodeLengthBits = [18]uint32{0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 15, 31, 0, 11, 7}
|
||||||
|
|
||||||
func storeStaticCodeLengthCode(storage_ix *uint, storage []byte) {
|
func storeStaticCodeLengthCode(bw *bitWriter) {
|
||||||
writeBits(40, 0x0000FF55555554, storage_ix, storage)
|
bw.writeBits(32, 0x55555554)
|
||||||
|
bw.writeBits(8, 0xFF)
|
||||||
}
|
}
|
||||||
|
|
||||||
var kZeroRepsBits = [numCommandSymbols]uint64{
|
var kZeroRepsBits = [numCommandSymbols]uint64{
|
||||||
|
@ -4317,9 +4318,10 @@ var kStaticCommandCodeBits = [numCommandSymbols]uint16{
|
||||||
2047,
|
2047,
|
||||||
}
|
}
|
||||||
|
|
||||||
func storeStaticCommandHuffmanTree(storage_ix *uint, storage []byte) {
|
func storeStaticCommandHuffmanTree(bw *bitWriter) {
|
||||||
writeBits(56, 0x92624416307003, storage_ix, storage)
|
bw.writeBits(32, 0x16307003)
|
||||||
writeBits(3, 0x00000000, storage_ix, storage)
|
bw.writeBits(24, 0x926244)
|
||||||
|
bw.writeBits(3, 0x00000000)
|
||||||
}
|
}
|
||||||
|
|
||||||
var kStaticDistanceCodeBits = [64]uint16{
|
var kStaticDistanceCodeBits = [64]uint16{
|
||||||
|
@ -4389,6 +4391,6 @@ var kStaticDistanceCodeBits = [64]uint16{
|
||||||
63,
|
63,
|
||||||
}
|
}
|
||||||
|
|
||||||
func storeStaticDistanceHuffmanTree(storage_ix *uint, storage []byte) {
|
func storeStaticDistanceHuffmanTree(bw *bitWriter) {
|
||||||
writeBits(28, 0x0369DC03, storage_ix, storage)
|
bw.writeBits(28, 0x0369DC03)
|
||||||
}
|
}
|
||||||
|
|
108
write_bits.go
108
write_bits.go
|
@ -1,7 +1,5 @@
|
||||||
package brotli
|
package brotli
|
||||||
|
|
||||||
import "encoding/binary"
|
|
||||||
|
|
||||||
/* Copyright 2010 Google Inc. All Rights Reserved.
|
/* Copyright 2010 Google Inc. All Rights Reserved.
|
||||||
|
|
||||||
Distributed under MIT license.
|
Distributed under MIT license.
|
||||||
|
@ -10,43 +8,87 @@ import "encoding/binary"
|
||||||
|
|
||||||
/* Write bits into a byte array. */
|
/* Write bits into a byte array. */
|
||||||
|
|
||||||
/* This function writes bits into bytes in increasing addresses, and within
|
type bitWriter struct {
|
||||||
a byte least-significant-bit first.
|
dst []byte
|
||||||
|
|
||||||
The function can write up to 56 bits in one go with WriteBits
|
// Data waiting to be written is the low nbits of bits.
|
||||||
Example: let's assume that 3 bits (Rs below) have been written already:
|
bits uint64
|
||||||
|
nbits uint
|
||||||
BYTE-0 BYTE+1 BYTE+2
|
|
||||||
|
|
||||||
0000 0RRR 0000 0000 0000 0000
|
|
||||||
|
|
||||||
Now, we could write 5 or less bits in MSB by just sifting by 3
|
|
||||||
and OR'ing to BYTE-0.
|
|
||||||
|
|
||||||
For n bits, we take the last 5 bits, OR that with high bits in BYTE-0,
|
|
||||||
and locate the rest in BYTE+1, BYTE+2, etc. */
|
|
||||||
func writeBits(n_bits uint, bits uint64, pos *uint, array []byte) {
|
|
||||||
/* This branch of the code can write up to 56 bits at a time,
|
|
||||||
7 bits are lost by being perhaps already in *p and at least
|
|
||||||
1 bit is needed to initialize the bit-stream ahead (i.e. if 7
|
|
||||||
bits are in *p and we write 57 bits, then the next write will
|
|
||||||
access a byte that was never initialized). */
|
|
||||||
p := array[*pos>>3:]
|
|
||||||
v := uint64(p[0])
|
|
||||||
v |= bits << (*pos & 7)
|
|
||||||
binary.LittleEndian.PutUint64(p, v)
|
|
||||||
*pos += n_bits
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeSingleBit(bit bool, pos *uint, array []byte) {
|
func (w *bitWriter) writeBits(nb uint, b uint64) {
|
||||||
|
w.bits |= b << w.nbits
|
||||||
|
w.nbits += nb
|
||||||
|
if w.nbits >= 32 {
|
||||||
|
bits := w.bits
|
||||||
|
w.bits >>= 32
|
||||||
|
w.nbits -= 32
|
||||||
|
w.dst = append(w.dst,
|
||||||
|
byte(bits),
|
||||||
|
byte(bits>>8),
|
||||||
|
byte(bits>>16),
|
||||||
|
byte(bits>>24),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *bitWriter) writeSingleBit(bit bool) {
|
||||||
if bit {
|
if bit {
|
||||||
writeBits(1, 1, pos, array)
|
w.writeBits(1, 1)
|
||||||
} else {
|
} else {
|
||||||
writeBits(1, 0, pos, array)
|
w.writeBits(1, 0)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeBitsPrepareStorage(pos uint, array []byte) {
|
func (w *bitWriter) jumpToByteBoundary() {
|
||||||
assert(pos&7 == 0)
|
dst := w.dst
|
||||||
array[pos>>3] = 0
|
for w.nbits != 0 {
|
||||||
|
dst = append(dst, byte(w.bits))
|
||||||
|
w.bits >>= 8
|
||||||
|
if w.nbits > 8 { // Avoid underflow
|
||||||
|
w.nbits -= 8
|
||||||
|
} else {
|
||||||
|
w.nbits = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
w.bits = 0
|
||||||
|
w.dst = dst
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *bitWriter) writeBytes(b []byte) {
|
||||||
|
if w.nbits&7 != 0 {
|
||||||
|
panic("writeBytes with unfinished bits")
|
||||||
|
}
|
||||||
|
for w.nbits != 0 {
|
||||||
|
w.dst = append(w.dst, byte(w.bits))
|
||||||
|
w.bits >>= 8
|
||||||
|
w.nbits -= 8
|
||||||
|
}
|
||||||
|
w.dst = append(w.dst, b...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *bitWriter) getPos() uint {
|
||||||
|
return uint(len(w.dst)<<3) + w.nbits
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *bitWriter) rewind(p uint) {
|
||||||
|
w.bits = uint64(w.dst[p>>3] & byte((1<<(p&7))-1))
|
||||||
|
w.nbits = p & 7
|
||||||
|
w.dst = w.dst[:p>>3]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *bitWriter) updateBits(n_bits uint, bits uint32, pos uint) {
|
||||||
|
for n_bits > 0 {
|
||||||
|
var byte_pos uint = pos >> 3
|
||||||
|
var n_unchanged_bits uint = pos & 7
|
||||||
|
var n_changed_bits uint = brotli_min_size_t(n_bits, 8-n_unchanged_bits)
|
||||||
|
var total_bits uint = n_unchanged_bits + n_changed_bits
|
||||||
|
var mask uint32 = (^((1 << total_bits) - 1)) | ((1 << n_unchanged_bits) - 1)
|
||||||
|
var unchanged_bits uint32 = uint32(w.dst[byte_pos]) & mask
|
||||||
|
var changed_bits uint32 = bits & ((1 << n_changed_bits) - 1)
|
||||||
|
w.dst[byte_pos] = byte(changed_bits<<n_unchanged_bits | unchanged_bits)
|
||||||
|
n_bits -= n_changed_bits
|
||||||
|
bits >>= n_changed_bits
|
||||||
|
pos += n_changed_bits
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue