forked from mirror/brotli
Revert "Faster bit writing."
This reverts commit c3da72aa01
.
With the sample data from issue 22, one byte in the output file is zero
instead of the correct value. For now at least, we'll go back to the old
way of writing bits.
Fixes #22
This commit is contained in:
parent
47c0dbab12
commit
94609f9606
|
@ -121,7 +121,7 @@ func encodeMlen(length uint, bits *uint64, numbits *uint, nibblesbits *uint64) {
|
||||||
*bits = uint64(length) - 1
|
*bits = uint64(length) - 1
|
||||||
}
|
}
|
||||||
|
|
||||||
func storeCommandExtra(cmd *command, bw *bitWriter) {
|
func storeCommandExtra(cmd *command, storage_ix *uint, storage []byte) {
|
||||||
var copylen_code uint32 = commandCopyLenCode(cmd)
|
var copylen_code uint32 = commandCopyLenCode(cmd)
|
||||||
var inscode uint16 = getInsertLengthCode(uint(cmd.insert_len_))
|
var inscode uint16 = getInsertLengthCode(uint(cmd.insert_len_))
|
||||||
var copycode uint16 = getCopyLengthCode(uint(copylen_code))
|
var copycode uint16 = getCopyLengthCode(uint(copylen_code))
|
||||||
|
@ -129,7 +129,7 @@ func storeCommandExtra(cmd *command, bw *bitWriter) {
|
||||||
var insextraval uint64 = uint64(cmd.insert_len_) - uint64(getInsertBase(inscode))
|
var insextraval uint64 = uint64(cmd.insert_len_) - uint64(getInsertBase(inscode))
|
||||||
var copyextraval uint64 = uint64(copylen_code) - uint64(getCopyBase(copycode))
|
var copyextraval uint64 = uint64(copylen_code) - uint64(getCopyBase(copycode))
|
||||||
var bits uint64 = copyextraval<<insnumextra | insextraval
|
var bits uint64 = copyextraval<<insnumextra | insextraval
|
||||||
bw.writeBits(uint(insnumextra+getCopyExtra(copycode)), bits)
|
writeBits(uint(insnumextra+getCopyExtra(copycode)), bits, storage_ix, storage)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Data structure that stores almost everything that is needed to encode each
|
/* Data structure that stores almost everything that is needed to encode each
|
||||||
|
@ -143,21 +143,21 @@ type blockSplitCode struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Stores a number between 0 and 255. */
|
/* Stores a number between 0 and 255. */
|
||||||
func storeVarLenUint8(n uint, bw *bitWriter) {
|
func storeVarLenUint8(n uint, storage_ix *uint, storage []byte) {
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
bw.writeBits(1, 0)
|
writeBits(1, 0, storage_ix, storage)
|
||||||
} else {
|
} else {
|
||||||
var nbits uint = uint(log2FloorNonZero(n))
|
var nbits uint = uint(log2FloorNonZero(n))
|
||||||
bw.writeBits(1, 1)
|
writeBits(1, 1, storage_ix, storage)
|
||||||
bw.writeBits(3, uint64(nbits))
|
writeBits(3, uint64(nbits), storage_ix, storage)
|
||||||
bw.writeBits(nbits, uint64(n)-(uint64(uint(1))<<nbits))
|
writeBits(nbits, uint64(n)-(uint64(uint(1))<<nbits), storage_ix, storage)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Stores the compressed meta-block header.
|
/* Stores the compressed meta-block header.
|
||||||
REQUIRES: length > 0
|
REQUIRES: length > 0
|
||||||
REQUIRES: length <= (1 << 24) */
|
REQUIRES: length <= (1 << 24) */
|
||||||
func storeCompressedMetaBlockHeader(is_final_block bool, length uint, bw *bitWriter) {
|
func storeCompressedMetaBlockHeader(is_final_block bool, length uint, storage_ix *uint, storage []byte) {
|
||||||
var lenbits uint64
|
var lenbits uint64
|
||||||
var nlenbits uint
|
var nlenbits uint
|
||||||
var nibblesbits uint64
|
var nibblesbits uint64
|
||||||
|
@ -169,41 +169,41 @@ func storeCompressedMetaBlockHeader(is_final_block bool, length uint, bw *bitWri
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Write ISLAST bit. */
|
/* Write ISLAST bit. */
|
||||||
bw.writeBits(1, is_final)
|
writeBits(1, is_final, storage_ix, storage)
|
||||||
|
|
||||||
/* Write ISEMPTY bit. */
|
/* Write ISEMPTY bit. */
|
||||||
if is_final_block {
|
if is_final_block {
|
||||||
bw.writeBits(1, 0)
|
writeBits(1, 0, storage_ix, storage)
|
||||||
}
|
}
|
||||||
|
|
||||||
encodeMlen(length, &lenbits, &nlenbits, &nibblesbits)
|
encodeMlen(length, &lenbits, &nlenbits, &nibblesbits)
|
||||||
bw.writeBits(2, nibblesbits)
|
writeBits(2, nibblesbits, storage_ix, storage)
|
||||||
bw.writeBits(nlenbits, lenbits)
|
writeBits(nlenbits, lenbits, storage_ix, storage)
|
||||||
|
|
||||||
if !is_final_block {
|
if !is_final_block {
|
||||||
/* Write ISUNCOMPRESSED bit. */
|
/* Write ISUNCOMPRESSED bit. */
|
||||||
bw.writeBits(1, 0)
|
writeBits(1, 0, storage_ix, storage)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Stores the uncompressed meta-block header.
|
/* Stores the uncompressed meta-block header.
|
||||||
REQUIRES: length > 0
|
REQUIRES: length > 0
|
||||||
REQUIRES: length <= (1 << 24) */
|
REQUIRES: length <= (1 << 24) */
|
||||||
func storeUncompressedMetaBlockHeader(length uint, bw *bitWriter) {
|
func storeUncompressedMetaBlockHeader(length uint, storage_ix *uint, storage []byte) {
|
||||||
var lenbits uint64
|
var lenbits uint64
|
||||||
var nlenbits uint
|
var nlenbits uint
|
||||||
var nibblesbits uint64
|
var nibblesbits uint64
|
||||||
|
|
||||||
/* Write ISLAST bit.
|
/* Write ISLAST bit.
|
||||||
Uncompressed block cannot be the last one, so set to 0. */
|
Uncompressed block cannot be the last one, so set to 0. */
|
||||||
bw.writeBits(1, 0)
|
writeBits(1, 0, storage_ix, storage)
|
||||||
|
|
||||||
encodeMlen(length, &lenbits, &nlenbits, &nibblesbits)
|
encodeMlen(length, &lenbits, &nlenbits, &nibblesbits)
|
||||||
bw.writeBits(2, nibblesbits)
|
writeBits(2, nibblesbits, storage_ix, storage)
|
||||||
bw.writeBits(nlenbits, lenbits)
|
writeBits(nlenbits, lenbits, storage_ix, storage)
|
||||||
|
|
||||||
/* Write ISUNCOMPRESSED bit. */
|
/* Write ISUNCOMPRESSED bit. */
|
||||||
bw.writeBits(1, 1)
|
writeBits(1, 1, storage_ix, storage)
|
||||||
}
|
}
|
||||||
|
|
||||||
var storeHuffmanTreeOfHuffmanTreeToBitMask_kStorageOrder = [codeLengthCodes]byte{1, 2, 3, 4, 0, 5, 17, 6, 16, 7, 8, 9, 10, 11, 12, 13, 14, 15}
|
var storeHuffmanTreeOfHuffmanTreeToBitMask_kStorageOrder = [codeLengthCodes]byte{1, 2, 3, 4, 0, 5, 17, 6, 16, 7, 8, 9, 10, 11, 12, 13, 14, 15}
|
||||||
|
@ -211,7 +211,7 @@ var storeHuffmanTreeOfHuffmanTreeToBitMask_kStorageOrder = [codeLengthCodes]byte
|
||||||
var storeHuffmanTreeOfHuffmanTreeToBitMask_kHuffmanBitLengthHuffmanCodeSymbols = [6]byte{0, 7, 3, 2, 1, 15}
|
var storeHuffmanTreeOfHuffmanTreeToBitMask_kHuffmanBitLengthHuffmanCodeSymbols = [6]byte{0, 7, 3, 2, 1, 15}
|
||||||
var storeHuffmanTreeOfHuffmanTreeToBitMask_kHuffmanBitLengthHuffmanCodeBitLengths = [6]byte{2, 4, 3, 2, 2, 4}
|
var storeHuffmanTreeOfHuffmanTreeToBitMask_kHuffmanBitLengthHuffmanCodeBitLengths = [6]byte{2, 4, 3, 2, 2, 4}
|
||||||
|
|
||||||
func storeHuffmanTreeOfHuffmanTreeToBitMask(num_codes int, code_length_bitdepth []byte, bw *bitWriter) {
|
func storeHuffmanTreeOfHuffmanTreeToBitMask(num_codes int, code_length_bitdepth []byte, storage_ix *uint, storage []byte) {
|
||||||
var skip_some uint = 0
|
var skip_some uint = 0
|
||||||
var codes_to_store uint = codeLengthCodes
|
var codes_to_store uint = codeLengthCodes
|
||||||
/* The bit lengths of the Huffman code over the code length alphabet
|
/* The bit lengths of the Huffman code over the code length alphabet
|
||||||
|
@ -241,38 +241,38 @@ func storeHuffmanTreeOfHuffmanTreeToBitMask(num_codes int, code_length_bitdepth
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bw.writeBits(2, uint64(skip_some))
|
writeBits(2, uint64(skip_some), storage_ix, storage)
|
||||||
{
|
{
|
||||||
var i uint
|
var i uint
|
||||||
for i = skip_some; i < codes_to_store; i++ {
|
for i = skip_some; i < codes_to_store; i++ {
|
||||||
var l uint = uint(code_length_bitdepth[storeHuffmanTreeOfHuffmanTreeToBitMask_kStorageOrder[i]])
|
var l uint = uint(code_length_bitdepth[storeHuffmanTreeOfHuffmanTreeToBitMask_kStorageOrder[i]])
|
||||||
bw.writeBits(uint(storeHuffmanTreeOfHuffmanTreeToBitMask_kHuffmanBitLengthHuffmanCodeBitLengths[l]), uint64(storeHuffmanTreeOfHuffmanTreeToBitMask_kHuffmanBitLengthHuffmanCodeSymbols[l]))
|
writeBits(uint(storeHuffmanTreeOfHuffmanTreeToBitMask_kHuffmanBitLengthHuffmanCodeBitLengths[l]), uint64(storeHuffmanTreeOfHuffmanTreeToBitMask_kHuffmanBitLengthHuffmanCodeSymbols[l]), storage_ix, storage)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func storeHuffmanTreeToBitMask(huffman_tree_size uint, huffman_tree []byte, huffman_tree_extra_bits []byte, code_length_bitdepth []byte, code_length_bitdepth_symbols []uint16, bw *bitWriter) {
|
func storeHuffmanTreeToBitMask(huffman_tree_size uint, huffman_tree []byte, huffman_tree_extra_bits []byte, code_length_bitdepth []byte, code_length_bitdepth_symbols []uint16, storage_ix *uint, storage []byte) {
|
||||||
var i uint
|
var i uint
|
||||||
for i = 0; i < huffman_tree_size; i++ {
|
for i = 0; i < huffman_tree_size; i++ {
|
||||||
var ix uint = uint(huffman_tree[i])
|
var ix uint = uint(huffman_tree[i])
|
||||||
bw.writeBits(uint(code_length_bitdepth[ix]), uint64(code_length_bitdepth_symbols[ix]))
|
writeBits(uint(code_length_bitdepth[ix]), uint64(code_length_bitdepth_symbols[ix]), storage_ix, storage)
|
||||||
|
|
||||||
/* Extra bits */
|
/* Extra bits */
|
||||||
switch ix {
|
switch ix {
|
||||||
case repeatPreviousCodeLength:
|
case repeatPreviousCodeLength:
|
||||||
bw.writeBits(2, uint64(huffman_tree_extra_bits[i]))
|
writeBits(2, uint64(huffman_tree_extra_bits[i]), storage_ix, storage)
|
||||||
|
|
||||||
case repeatZeroCodeLength:
|
case repeatZeroCodeLength:
|
||||||
bw.writeBits(3, uint64(huffman_tree_extra_bits[i]))
|
writeBits(3, uint64(huffman_tree_extra_bits[i]), storage_ix, storage)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func storeSimpleHuffmanTree(depths []byte, symbols []uint, num_symbols uint, max_bits uint, bw *bitWriter) {
|
func storeSimpleHuffmanTree(depths []byte, symbols []uint, num_symbols uint, max_bits uint, storage_ix *uint, storage []byte) {
|
||||||
/* value of 1 indicates a simple Huffman code */
|
/* value of 1 indicates a simple Huffman code */
|
||||||
bw.writeBits(2, 1)
|
writeBits(2, 1, storage_ix, storage)
|
||||||
|
|
||||||
bw.writeBits(2, uint64(num_symbols)-1) /* NSYM - 1 */
|
writeBits(2, uint64(num_symbols)-1, storage_ix, storage) /* NSYM - 1 */
|
||||||
{
|
{
|
||||||
/* Sort */
|
/* Sort */
|
||||||
var i uint
|
var i uint
|
||||||
|
@ -289,17 +289,17 @@ func storeSimpleHuffmanTree(depths []byte, symbols []uint, num_symbols uint, max
|
||||||
}
|
}
|
||||||
|
|
||||||
if num_symbols == 2 {
|
if num_symbols == 2 {
|
||||||
bw.writeBits(max_bits, uint64(symbols[0]))
|
writeBits(max_bits, uint64(symbols[0]), storage_ix, storage)
|
||||||
bw.writeBits(max_bits, uint64(symbols[1]))
|
writeBits(max_bits, uint64(symbols[1]), storage_ix, storage)
|
||||||
} else if num_symbols == 3 {
|
} else if num_symbols == 3 {
|
||||||
bw.writeBits(max_bits, uint64(symbols[0]))
|
writeBits(max_bits, uint64(symbols[0]), storage_ix, storage)
|
||||||
bw.writeBits(max_bits, uint64(symbols[1]))
|
writeBits(max_bits, uint64(symbols[1]), storage_ix, storage)
|
||||||
bw.writeBits(max_bits, uint64(symbols[2]))
|
writeBits(max_bits, uint64(symbols[2]), storage_ix, storage)
|
||||||
} else {
|
} else {
|
||||||
bw.writeBits(max_bits, uint64(symbols[0]))
|
writeBits(max_bits, uint64(symbols[0]), storage_ix, storage)
|
||||||
bw.writeBits(max_bits, uint64(symbols[1]))
|
writeBits(max_bits, uint64(symbols[1]), storage_ix, storage)
|
||||||
bw.writeBits(max_bits, uint64(symbols[2]))
|
writeBits(max_bits, uint64(symbols[2]), storage_ix, storage)
|
||||||
bw.writeBits(max_bits, uint64(symbols[3]))
|
writeBits(max_bits, uint64(symbols[3]), storage_ix, storage)
|
||||||
|
|
||||||
/* tree-select */
|
/* tree-select */
|
||||||
var tmp int
|
var tmp int
|
||||||
|
@ -308,13 +308,13 @@ func storeSimpleHuffmanTree(depths []byte, symbols []uint, num_symbols uint, max
|
||||||
} else {
|
} else {
|
||||||
tmp = 0
|
tmp = 0
|
||||||
}
|
}
|
||||||
bw.writeBits(1, uint64(tmp))
|
writeBits(1, uint64(tmp), storage_ix, storage)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* num = alphabet size
|
/* num = alphabet size
|
||||||
depths = symbol depths */
|
depths = symbol depths */
|
||||||
func storeHuffmanTree(depths []byte, num uint, tree []huffmanTree, bw *bitWriter) {
|
func storeHuffmanTree(depths []byte, num uint, tree []huffmanTree, storage_ix *uint, storage []byte) {
|
||||||
var huffman_tree [numCommandSymbols]byte
|
var huffman_tree [numCommandSymbols]byte
|
||||||
var huffman_tree_extra_bits [numCommandSymbols]byte
|
var huffman_tree_extra_bits [numCommandSymbols]byte
|
||||||
var huffman_tree_size uint = 0
|
var huffman_tree_size uint = 0
|
||||||
|
@ -357,19 +357,19 @@ func storeHuffmanTree(depths []byte, num uint, tree []huffmanTree, bw *bitWriter
|
||||||
convertBitDepthsToSymbols(code_length_bitdepth[:], codeLengthCodes, code_length_bitdepth_symbols[:])
|
convertBitDepthsToSymbols(code_length_bitdepth[:], codeLengthCodes, code_length_bitdepth_symbols[:])
|
||||||
|
|
||||||
/* Now, we have all the data, let's start storing it */
|
/* Now, we have all the data, let's start storing it */
|
||||||
storeHuffmanTreeOfHuffmanTreeToBitMask(num_codes, code_length_bitdepth[:], bw)
|
storeHuffmanTreeOfHuffmanTreeToBitMask(num_codes, code_length_bitdepth[:], storage_ix, storage)
|
||||||
|
|
||||||
if num_codes == 1 {
|
if num_codes == 1 {
|
||||||
code_length_bitdepth[code] = 0
|
code_length_bitdepth[code] = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Store the real Huffman tree now. */
|
/* Store the real Huffman tree now. */
|
||||||
storeHuffmanTreeToBitMask(huffman_tree_size, huffman_tree[:], huffman_tree_extra_bits[:], code_length_bitdepth[:], code_length_bitdepth_symbols[:], bw)
|
storeHuffmanTreeToBitMask(huffman_tree_size, huffman_tree[:], huffman_tree_extra_bits[:], code_length_bitdepth[:], code_length_bitdepth_symbols[:], storage_ix, storage)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Builds a Huffman tree from histogram[0:length] into depth[0:length] and
|
/* Builds a Huffman tree from histogram[0:length] into depth[0:length] and
|
||||||
bits[0:length] and stores the encoded tree to the bit stream. */
|
bits[0:length] and stores the encoded tree to the bit stream. */
|
||||||
func buildAndStoreHuffmanTree(histogram []uint32, histogram_length uint, alphabet_size uint, tree []huffmanTree, depth []byte, bits []uint16, bw *bitWriter) {
|
func buildAndStoreHuffmanTree(histogram []uint32, histogram_length uint, alphabet_size uint, tree []huffmanTree, depth []byte, bits []uint16, storage_ix *uint, storage []byte) {
|
||||||
var count uint = 0
|
var count uint = 0
|
||||||
var s4 = [4]uint{0}
|
var s4 = [4]uint{0}
|
||||||
var i uint
|
var i uint
|
||||||
|
@ -394,8 +394,8 @@ func buildAndStoreHuffmanTree(histogram []uint32, histogram_length uint, alphabe
|
||||||
}
|
}
|
||||||
|
|
||||||
if count <= 1 {
|
if count <= 1 {
|
||||||
bw.writeBits(4, 1)
|
writeBits(4, 1, storage_ix, storage)
|
||||||
bw.writeBits(max_bits, uint64(s4[0]))
|
writeBits(max_bits, uint64(s4[0]), storage_ix, storage)
|
||||||
depth[s4[0]] = 0
|
depth[s4[0]] = 0
|
||||||
bits[s4[0]] = 0
|
bits[s4[0]] = 0
|
||||||
return
|
return
|
||||||
|
@ -408,9 +408,9 @@ func buildAndStoreHuffmanTree(histogram []uint32, histogram_length uint, alphabe
|
||||||
convertBitDepthsToSymbols(depth, histogram_length, bits)
|
convertBitDepthsToSymbols(depth, histogram_length, bits)
|
||||||
|
|
||||||
if count <= 4 {
|
if count <= 4 {
|
||||||
storeSimpleHuffmanTree(depth, s4[:], count, max_bits, bw)
|
storeSimpleHuffmanTree(depth, s4[:], count, max_bits, storage_ix, storage)
|
||||||
} else {
|
} else {
|
||||||
storeHuffmanTree(depth, histogram_length, tree, bw)
|
storeHuffmanTree(depth, histogram_length, tree, storage_ix, storage)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -420,7 +420,7 @@ func sortHuffmanTree1(v0 huffmanTree, v1 huffmanTree) bool {
|
||||||
|
|
||||||
var huffmanTreePool sync.Pool
|
var huffmanTreePool sync.Pool
|
||||||
|
|
||||||
func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_bits uint, depth []byte, bits []uint16, bw *bitWriter) {
|
func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_bits uint, depth []byte, bits []uint16, storage_ix *uint, storage []byte) {
|
||||||
var count uint = 0
|
var count uint = 0
|
||||||
var symbols = [4]uint{0}
|
var symbols = [4]uint{0}
|
||||||
var length uint = 0
|
var length uint = 0
|
||||||
|
@ -439,8 +439,8 @@ func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_
|
||||||
}
|
}
|
||||||
|
|
||||||
if count <= 1 {
|
if count <= 1 {
|
||||||
bw.writeBits(4, 1)
|
writeBits(4, 1, storage_ix, storage)
|
||||||
bw.writeBits(max_bits, uint64(symbols[0]))
|
writeBits(max_bits, uint64(symbols[0]), storage_ix, storage)
|
||||||
depth[symbols[0]] = 0
|
depth[symbols[0]] = 0
|
||||||
bits[symbols[0]] = 0
|
bits[symbols[0]] = 0
|
||||||
return
|
return
|
||||||
|
@ -544,9 +544,9 @@ func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_
|
||||||
var i uint
|
var i uint
|
||||||
|
|
||||||
/* value of 1 indicates a simple Huffman code */
|
/* value of 1 indicates a simple Huffman code */
|
||||||
bw.writeBits(2, 1)
|
writeBits(2, 1, storage_ix, storage)
|
||||||
|
|
||||||
bw.writeBits(2, uint64(count)-1) /* NSYM - 1 */
|
writeBits(2, uint64(count)-1, storage_ix, storage) /* NSYM - 1 */
|
||||||
|
|
||||||
/* Sort */
|
/* Sort */
|
||||||
for i = 0; i < count; i++ {
|
for i = 0; i < count; i++ {
|
||||||
|
@ -561,27 +561,33 @@ func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_
|
||||||
}
|
}
|
||||||
|
|
||||||
if count == 2 {
|
if count == 2 {
|
||||||
bw.writeBits(max_bits, uint64(symbols[0]))
|
writeBits(max_bits, uint64(symbols[0]), storage_ix, storage)
|
||||||
bw.writeBits(max_bits, uint64(symbols[1]))
|
writeBits(max_bits, uint64(symbols[1]), storage_ix, storage)
|
||||||
} else if count == 3 {
|
} else if count == 3 {
|
||||||
bw.writeBits(max_bits, uint64(symbols[0]))
|
writeBits(max_bits, uint64(symbols[0]), storage_ix, storage)
|
||||||
bw.writeBits(max_bits, uint64(symbols[1]))
|
writeBits(max_bits, uint64(symbols[1]), storage_ix, storage)
|
||||||
bw.writeBits(max_bits, uint64(symbols[2]))
|
writeBits(max_bits, uint64(symbols[2]), storage_ix, storage)
|
||||||
} else {
|
} else {
|
||||||
bw.writeBits(max_bits, uint64(symbols[0]))
|
writeBits(max_bits, uint64(symbols[0]), storage_ix, storage)
|
||||||
bw.writeBits(max_bits, uint64(symbols[1]))
|
writeBits(max_bits, uint64(symbols[1]), storage_ix, storage)
|
||||||
bw.writeBits(max_bits, uint64(symbols[2]))
|
writeBits(max_bits, uint64(symbols[2]), storage_ix, storage)
|
||||||
bw.writeBits(max_bits, uint64(symbols[3]))
|
writeBits(max_bits, uint64(symbols[3]), storage_ix, storage)
|
||||||
|
|
||||||
/* tree-select */
|
/* tree-select */
|
||||||
bw.writeSingleBit(depth[symbols[0]] == 1)
|
var tmp int
|
||||||
|
if depth[symbols[0]] == 1 {
|
||||||
|
tmp = 1
|
||||||
|
} else {
|
||||||
|
tmp = 0
|
||||||
|
}
|
||||||
|
writeBits(1, uint64(tmp), storage_ix, storage)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
var previous_value byte = 8
|
var previous_value byte = 8
|
||||||
var i uint
|
var i uint
|
||||||
|
|
||||||
/* Complex Huffman Tree */
|
/* Complex Huffman Tree */
|
||||||
storeStaticCodeLengthCode(bw)
|
storeStaticCodeLengthCode(storage_ix, storage)
|
||||||
|
|
||||||
/* Actual RLE coding. */
|
/* Actual RLE coding. */
|
||||||
for i = 0; i < length; {
|
for i = 0; i < length; {
|
||||||
|
@ -594,21 +600,21 @@ func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_
|
||||||
|
|
||||||
i += reps
|
i += reps
|
||||||
if value == 0 {
|
if value == 0 {
|
||||||
bw.writeBits(uint(kZeroRepsDepth[reps]), kZeroRepsBits[reps])
|
writeBits(uint(kZeroRepsDepth[reps]), kZeroRepsBits[reps], storage_ix, storage)
|
||||||
} else {
|
} else {
|
||||||
if previous_value != value {
|
if previous_value != value {
|
||||||
bw.writeBits(uint(kCodeLengthDepth[value]), uint64(kCodeLengthBits[value]))
|
writeBits(uint(kCodeLengthDepth[value]), uint64(kCodeLengthBits[value]), storage_ix, storage)
|
||||||
reps--
|
reps--
|
||||||
}
|
}
|
||||||
|
|
||||||
if reps < 3 {
|
if reps < 3 {
|
||||||
for reps != 0 {
|
for reps != 0 {
|
||||||
reps--
|
reps--
|
||||||
bw.writeBits(uint(kCodeLengthDepth[value]), uint64(kCodeLengthBits[value]))
|
writeBits(uint(kCodeLengthDepth[value]), uint64(kCodeLengthBits[value]), storage_ix, storage)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
reps -= 3
|
reps -= 3
|
||||||
bw.writeBits(uint(kNonZeroRepsDepth[reps]), kNonZeroRepsBits[reps])
|
writeBits(uint(kNonZeroRepsDepth[reps]), kNonZeroRepsBits[reps], storage_ix, storage)
|
||||||
}
|
}
|
||||||
|
|
||||||
previous_value = value
|
previous_value = value
|
||||||
|
@ -733,7 +739,7 @@ const symbolBits = 9
|
||||||
|
|
||||||
var encodeContextMap_kSymbolMask uint32 = (1 << symbolBits) - 1
|
var encodeContextMap_kSymbolMask uint32 = (1 << symbolBits) - 1
|
||||||
|
|
||||||
func encodeContextMap(context_map []uint32, context_map_size uint, num_clusters uint, tree []huffmanTree, bw *bitWriter) {
|
func encodeContextMap(context_map []uint32, context_map_size uint, num_clusters uint, tree []huffmanTree, storage_ix *uint, storage []byte) {
|
||||||
var i uint
|
var i uint
|
||||||
var rle_symbols []uint32
|
var rle_symbols []uint32
|
||||||
var max_run_length_prefix uint32 = 6
|
var max_run_length_prefix uint32 = 6
|
||||||
|
@ -742,7 +748,7 @@ func encodeContextMap(context_map []uint32, context_map_size uint, num_clusters
|
||||||
var depths [maxContextMapSymbols]byte
|
var depths [maxContextMapSymbols]byte
|
||||||
var bits [maxContextMapSymbols]uint16
|
var bits [maxContextMapSymbols]uint16
|
||||||
|
|
||||||
storeVarLenUint8(num_clusters-1, bw)
|
storeVarLenUint8(num_clusters-1, storage_ix, storage)
|
||||||
|
|
||||||
if num_clusters == 1 {
|
if num_clusters == 1 {
|
||||||
return
|
return
|
||||||
|
@ -757,45 +763,45 @@ func encodeContextMap(context_map []uint32, context_map_size uint, num_clusters
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
var use_rle bool = (max_run_length_prefix > 0)
|
var use_rle bool = (max_run_length_prefix > 0)
|
||||||
bw.writeSingleBit(use_rle)
|
writeSingleBit(use_rle, storage_ix, storage)
|
||||||
if use_rle {
|
if use_rle {
|
||||||
bw.writeBits(4, uint64(max_run_length_prefix)-1)
|
writeBits(4, uint64(max_run_length_prefix)-1, storage_ix, storage)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
buildAndStoreHuffmanTree(histogram[:], uint(uint32(num_clusters)+max_run_length_prefix), uint(uint32(num_clusters)+max_run_length_prefix), tree, depths[:], bits[:], bw)
|
buildAndStoreHuffmanTree(histogram[:], uint(uint32(num_clusters)+max_run_length_prefix), uint(uint32(num_clusters)+max_run_length_prefix), tree, depths[:], bits[:], storage_ix, storage)
|
||||||
for i = 0; i < num_rle_symbols; i++ {
|
for i = 0; i < num_rle_symbols; i++ {
|
||||||
var rle_symbol uint32 = rle_symbols[i] & encodeContextMap_kSymbolMask
|
var rle_symbol uint32 = rle_symbols[i] & encodeContextMap_kSymbolMask
|
||||||
var extra_bits_val uint32 = rle_symbols[i] >> symbolBits
|
var extra_bits_val uint32 = rle_symbols[i] >> symbolBits
|
||||||
bw.writeBits(uint(depths[rle_symbol]), uint64(bits[rle_symbol]))
|
writeBits(uint(depths[rle_symbol]), uint64(bits[rle_symbol]), storage_ix, storage)
|
||||||
if rle_symbol > 0 && rle_symbol <= max_run_length_prefix {
|
if rle_symbol > 0 && rle_symbol <= max_run_length_prefix {
|
||||||
bw.writeBits(uint(rle_symbol), uint64(extra_bits_val))
|
writeBits(uint(rle_symbol), uint64(extra_bits_val), storage_ix, storage)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bw.writeBits(1, 1) /* use move-to-front */
|
writeBits(1, 1, storage_ix, storage) /* use move-to-front */
|
||||||
rle_symbols = nil
|
rle_symbols = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Stores the block switch command with index block_ix to the bit stream. */
|
/* Stores the block switch command with index block_ix to the bit stream. */
|
||||||
func storeBlockSwitch(code *blockSplitCode, block_len uint32, block_type byte, is_first_block bool, bw *bitWriter) {
|
func storeBlockSwitch(code *blockSplitCode, block_len uint32, block_type byte, is_first_block bool, storage_ix *uint, storage []byte) {
|
||||||
var typecode uint = nextBlockTypeCode(&code.type_code_calculator, block_type)
|
var typecode uint = nextBlockTypeCode(&code.type_code_calculator, block_type)
|
||||||
var lencode uint
|
var lencode uint
|
||||||
var len_nextra uint32
|
var len_nextra uint32
|
||||||
var len_extra uint32
|
var len_extra uint32
|
||||||
if !is_first_block {
|
if !is_first_block {
|
||||||
bw.writeBits(uint(code.type_depths[typecode]), uint64(code.type_bits[typecode]))
|
writeBits(uint(code.type_depths[typecode]), uint64(code.type_bits[typecode]), storage_ix, storage)
|
||||||
}
|
}
|
||||||
|
|
||||||
getBlockLengthPrefixCode(block_len, &lencode, &len_nextra, &len_extra)
|
getBlockLengthPrefixCode(block_len, &lencode, &len_nextra, &len_extra)
|
||||||
|
|
||||||
bw.writeBits(uint(code.length_depths[lencode]), uint64(code.length_bits[lencode]))
|
writeBits(uint(code.length_depths[lencode]), uint64(code.length_bits[lencode]), storage_ix, storage)
|
||||||
bw.writeBits(uint(len_nextra), uint64(len_extra))
|
writeBits(uint(len_nextra), uint64(len_extra), storage_ix, storage)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Builds a BlockSplitCode data structure from the block split given by the
|
/* Builds a BlockSplitCode data structure from the block split given by the
|
||||||
vector of block types and block lengths and stores it to the bit stream. */
|
vector of block types and block lengths and stores it to the bit stream. */
|
||||||
func buildAndStoreBlockSplitCode(types []byte, lengths []uint32, num_blocks uint, num_types uint, tree []huffmanTree, code *blockSplitCode, bw *bitWriter) {
|
func buildAndStoreBlockSplitCode(types []byte, lengths []uint32, num_blocks uint, num_types uint, tree []huffmanTree, code *blockSplitCode, storage_ix *uint, storage []byte) {
|
||||||
var type_histo [maxBlockTypeSymbols]uint32
|
var type_histo [maxBlockTypeSymbols]uint32
|
||||||
var length_histo [numBlockLenSymbols]uint32
|
var length_histo [numBlockLenSymbols]uint32
|
||||||
var i uint
|
var i uint
|
||||||
|
@ -813,17 +819,17 @@ func buildAndStoreBlockSplitCode(types []byte, lengths []uint32, num_blocks uint
|
||||||
length_histo[blockLengthPrefixCode(lengths[i])]++
|
length_histo[blockLengthPrefixCode(lengths[i])]++
|
||||||
}
|
}
|
||||||
|
|
||||||
storeVarLenUint8(num_types-1, bw)
|
storeVarLenUint8(num_types-1, storage_ix, storage)
|
||||||
if num_types > 1 { /* TODO: else? could StoreBlockSwitch occur? */
|
if num_types > 1 { /* TODO: else? could StoreBlockSwitch occur? */
|
||||||
buildAndStoreHuffmanTree(type_histo[0:], num_types+2, num_types+2, tree, code.type_depths[0:], code.type_bits[0:], bw)
|
buildAndStoreHuffmanTree(type_histo[0:], num_types+2, num_types+2, tree, code.type_depths[0:], code.type_bits[0:], storage_ix, storage)
|
||||||
buildAndStoreHuffmanTree(length_histo[0:], numBlockLenSymbols, numBlockLenSymbols, tree, code.length_depths[0:], code.length_bits[0:], bw)
|
buildAndStoreHuffmanTree(length_histo[0:], numBlockLenSymbols, numBlockLenSymbols, tree, code.length_depths[0:], code.length_bits[0:], storage_ix, storage)
|
||||||
storeBlockSwitch(code, lengths[0], types[0], true, bw)
|
storeBlockSwitch(code, lengths[0], types[0], true, storage_ix, storage)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Stores a context map where the histogram type is always the block type. */
|
/* Stores a context map where the histogram type is always the block type. */
|
||||||
func storeTrivialContextMap(num_types uint, context_bits uint, tree []huffmanTree, bw *bitWriter) {
|
func storeTrivialContextMap(num_types uint, context_bits uint, tree []huffmanTree, storage_ix *uint, storage []byte) {
|
||||||
storeVarLenUint8(num_types-1, bw)
|
storeVarLenUint8(num_types-1, storage_ix, storage)
|
||||||
if num_types > 1 {
|
if num_types > 1 {
|
||||||
var repeat_code uint = context_bits - 1
|
var repeat_code uint = context_bits - 1
|
||||||
var repeat_bits uint = (1 << repeat_code) - 1
|
var repeat_bits uint = (1 << repeat_code) - 1
|
||||||
|
@ -837,16 +843,16 @@ func storeTrivialContextMap(num_types uint, context_bits uint, tree []huffmanTre
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Write RLEMAX. */
|
/* Write RLEMAX. */
|
||||||
bw.writeBits(1, 1)
|
writeBits(1, 1, storage_ix, storage)
|
||||||
|
|
||||||
bw.writeBits(4, uint64(repeat_code)-1)
|
writeBits(4, uint64(repeat_code)-1, storage_ix, storage)
|
||||||
histogram[repeat_code] = uint32(num_types)
|
histogram[repeat_code] = uint32(num_types)
|
||||||
histogram[0] = 1
|
histogram[0] = 1
|
||||||
for i = context_bits; i < alphabet_size; i++ {
|
for i = context_bits; i < alphabet_size; i++ {
|
||||||
histogram[i] = 1
|
histogram[i] = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
buildAndStoreHuffmanTree(histogram[:], alphabet_size, alphabet_size, tree, depths[:], bits[:], bw)
|
buildAndStoreHuffmanTree(histogram[:], alphabet_size, alphabet_size, tree, depths[:], bits[:], storage_ix, storage)
|
||||||
for i = 0; i < num_types; i++ {
|
for i = 0; i < num_types; i++ {
|
||||||
var tmp uint
|
var tmp uint
|
||||||
if i == 0 {
|
if i == 0 {
|
||||||
|
@ -855,13 +861,13 @@ func storeTrivialContextMap(num_types uint, context_bits uint, tree []huffmanTre
|
||||||
tmp = i + context_bits - 1
|
tmp = i + context_bits - 1
|
||||||
}
|
}
|
||||||
var code uint = tmp
|
var code uint = tmp
|
||||||
bw.writeBits(uint(depths[code]), uint64(bits[code]))
|
writeBits(uint(depths[code]), uint64(bits[code]), storage_ix, storage)
|
||||||
bw.writeBits(uint(depths[repeat_code]), uint64(bits[repeat_code]))
|
writeBits(uint(depths[repeat_code]), uint64(bits[repeat_code]), storage_ix, storage)
|
||||||
bw.writeBits(repeat_code, uint64(repeat_bits))
|
writeBits(repeat_code, uint64(repeat_bits), storage_ix, storage)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Write IMTF (inverse-move-to-front) bit. */
|
/* Write IMTF (inverse-move-to-front) bit. */
|
||||||
bw.writeBits(1, 1)
|
writeBits(1, 1, storage_ix, storage)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -915,13 +921,13 @@ func cleanupBlockEncoder(self *blockEncoder) {
|
||||||
|
|
||||||
/* Creates entropy codes of block lengths and block types and stores them
|
/* Creates entropy codes of block lengths and block types and stores them
|
||||||
to the bit stream. */
|
to the bit stream. */
|
||||||
func buildAndStoreBlockSwitchEntropyCodes(self *blockEncoder, tree []huffmanTree, bw *bitWriter) {
|
func buildAndStoreBlockSwitchEntropyCodes(self *blockEncoder, tree []huffmanTree, storage_ix *uint, storage []byte) {
|
||||||
buildAndStoreBlockSplitCode(self.block_types_, self.block_lengths_, self.num_blocks_, self.num_block_types_, tree, &self.block_split_code_, bw)
|
buildAndStoreBlockSplitCode(self.block_types_, self.block_lengths_, self.num_blocks_, self.num_block_types_, tree, &self.block_split_code_, storage_ix, storage)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Stores the next symbol with the entropy code of the current block type.
|
/* Stores the next symbol with the entropy code of the current block type.
|
||||||
Updates the block type and block length at block boundaries. */
|
Updates the block type and block length at block boundaries. */
|
||||||
func storeSymbol(self *blockEncoder, symbol uint, bw *bitWriter) {
|
func storeSymbol(self *blockEncoder, symbol uint, storage_ix *uint, storage []byte) {
|
||||||
if self.block_len_ == 0 {
|
if self.block_len_ == 0 {
|
||||||
self.block_ix_++
|
self.block_ix_++
|
||||||
var block_ix uint = self.block_ix_
|
var block_ix uint = self.block_ix_
|
||||||
|
@ -929,20 +935,20 @@ func storeSymbol(self *blockEncoder, symbol uint, bw *bitWriter) {
|
||||||
var block_type byte = self.block_types_[block_ix]
|
var block_type byte = self.block_types_[block_ix]
|
||||||
self.block_len_ = uint(block_len)
|
self.block_len_ = uint(block_len)
|
||||||
self.entropy_ix_ = uint(block_type) * self.histogram_length_
|
self.entropy_ix_ = uint(block_type) * self.histogram_length_
|
||||||
storeBlockSwitch(&self.block_split_code_, block_len, block_type, false, bw)
|
storeBlockSwitch(&self.block_split_code_, block_len, block_type, false, storage_ix, storage)
|
||||||
}
|
}
|
||||||
|
|
||||||
self.block_len_--
|
self.block_len_--
|
||||||
{
|
{
|
||||||
var ix uint = self.entropy_ix_ + symbol
|
var ix uint = self.entropy_ix_ + symbol
|
||||||
bw.writeBits(uint(self.depths_[ix]), uint64(self.bits_[ix]))
|
writeBits(uint(self.depths_[ix]), uint64(self.bits_[ix]), storage_ix, storage)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Stores the next symbol with the entropy code of the current block type and
|
/* Stores the next symbol with the entropy code of the current block type and
|
||||||
context value.
|
context value.
|
||||||
Updates the block type and block length at block boundaries. */
|
Updates the block type and block length at block boundaries. */
|
||||||
func storeSymbolWithContext(self *blockEncoder, symbol uint, context uint, context_map []uint32, bw *bitWriter, context_bits uint) {
|
func storeSymbolWithContext(self *blockEncoder, symbol uint, context uint, context_map []uint32, storage_ix *uint, storage []byte, context_bits uint) {
|
||||||
if self.block_len_ == 0 {
|
if self.block_len_ == 0 {
|
||||||
self.block_ix_++
|
self.block_ix_++
|
||||||
var block_ix uint = self.block_ix_
|
var block_ix uint = self.block_ix_
|
||||||
|
@ -950,18 +956,18 @@ func storeSymbolWithContext(self *blockEncoder, symbol uint, context uint, conte
|
||||||
var block_type byte = self.block_types_[block_ix]
|
var block_type byte = self.block_types_[block_ix]
|
||||||
self.block_len_ = uint(block_len)
|
self.block_len_ = uint(block_len)
|
||||||
self.entropy_ix_ = uint(block_type) << context_bits
|
self.entropy_ix_ = uint(block_type) << context_bits
|
||||||
storeBlockSwitch(&self.block_split_code_, block_len, block_type, false, bw)
|
storeBlockSwitch(&self.block_split_code_, block_len, block_type, false, storage_ix, storage)
|
||||||
}
|
}
|
||||||
|
|
||||||
self.block_len_--
|
self.block_len_--
|
||||||
{
|
{
|
||||||
var histo_ix uint = uint(context_map[self.entropy_ix_+context])
|
var histo_ix uint = uint(context_map[self.entropy_ix_+context])
|
||||||
var ix uint = histo_ix*self.histogram_length_ + symbol
|
var ix uint = histo_ix*self.histogram_length_ + symbol
|
||||||
bw.writeBits(uint(self.depths_[ix]), uint64(self.bits_[ix]))
|
writeBits(uint(self.depths_[ix]), uint64(self.bits_[ix]), storage_ix, storage)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildAndStoreEntropyCodesLiteral(self *blockEncoder, histograms []histogramLiteral, histograms_size uint, alphabet_size uint, tree []huffmanTree, bw *bitWriter) {
|
func buildAndStoreEntropyCodesLiteral(self *blockEncoder, histograms []histogramLiteral, histograms_size uint, alphabet_size uint, tree []huffmanTree, storage_ix *uint, storage []byte) {
|
||||||
var table_size uint = histograms_size * self.histogram_length_
|
var table_size uint = histograms_size * self.histogram_length_
|
||||||
if cap(self.depths_) < int(table_size) {
|
if cap(self.depths_) < int(table_size) {
|
||||||
self.depths_ = make([]byte, table_size)
|
self.depths_ = make([]byte, table_size)
|
||||||
|
@ -977,12 +983,12 @@ func buildAndStoreEntropyCodesLiteral(self *blockEncoder, histograms []histogram
|
||||||
var i uint
|
var i uint
|
||||||
for i = 0; i < histograms_size; i++ {
|
for i = 0; i < histograms_size; i++ {
|
||||||
var ix uint = i * self.histogram_length_
|
var ix uint = i * self.histogram_length_
|
||||||
buildAndStoreHuffmanTree(histograms[i].data_[0:], self.histogram_length_, alphabet_size, tree, self.depths_[ix:], self.bits_[ix:], bw)
|
buildAndStoreHuffmanTree(histograms[i].data_[0:], self.histogram_length_, alphabet_size, tree, self.depths_[ix:], self.bits_[ix:], storage_ix, storage)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildAndStoreEntropyCodesCommand(self *blockEncoder, histograms []histogramCommand, histograms_size uint, alphabet_size uint, tree []huffmanTree, bw *bitWriter) {
|
func buildAndStoreEntropyCodesCommand(self *blockEncoder, histograms []histogramCommand, histograms_size uint, alphabet_size uint, tree []huffmanTree, storage_ix *uint, storage []byte) {
|
||||||
var table_size uint = histograms_size * self.histogram_length_
|
var table_size uint = histograms_size * self.histogram_length_
|
||||||
if cap(self.depths_) < int(table_size) {
|
if cap(self.depths_) < int(table_size) {
|
||||||
self.depths_ = make([]byte, table_size)
|
self.depths_ = make([]byte, table_size)
|
||||||
|
@ -998,12 +1004,12 @@ func buildAndStoreEntropyCodesCommand(self *blockEncoder, histograms []histogram
|
||||||
var i uint
|
var i uint
|
||||||
for i = 0; i < histograms_size; i++ {
|
for i = 0; i < histograms_size; i++ {
|
||||||
var ix uint = i * self.histogram_length_
|
var ix uint = i * self.histogram_length_
|
||||||
buildAndStoreHuffmanTree(histograms[i].data_[0:], self.histogram_length_, alphabet_size, tree, self.depths_[ix:], self.bits_[ix:], bw)
|
buildAndStoreHuffmanTree(histograms[i].data_[0:], self.histogram_length_, alphabet_size, tree, self.depths_[ix:], self.bits_[ix:], storage_ix, storage)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildAndStoreEntropyCodesDistance(self *blockEncoder, histograms []histogramDistance, histograms_size uint, alphabet_size uint, tree []huffmanTree, bw *bitWriter) {
|
func buildAndStoreEntropyCodesDistance(self *blockEncoder, histograms []histogramDistance, histograms_size uint, alphabet_size uint, tree []huffmanTree, storage_ix *uint, storage []byte) {
|
||||||
var table_size uint = histograms_size * self.histogram_length_
|
var table_size uint = histograms_size * self.histogram_length_
|
||||||
if cap(self.depths_) < int(table_size) {
|
if cap(self.depths_) < int(table_size) {
|
||||||
self.depths_ = make([]byte, table_size)
|
self.depths_ = make([]byte, table_size)
|
||||||
|
@ -1019,12 +1025,17 @@ func buildAndStoreEntropyCodesDistance(self *blockEncoder, histograms []histogra
|
||||||
var i uint
|
var i uint
|
||||||
for i = 0; i < histograms_size; i++ {
|
for i = 0; i < histograms_size; i++ {
|
||||||
var ix uint = i * self.histogram_length_
|
var ix uint = i * self.histogram_length_
|
||||||
buildAndStoreHuffmanTree(histograms[i].data_[0:], self.histogram_length_, alphabet_size, tree, self.depths_[ix:], self.bits_[ix:], bw)
|
buildAndStoreHuffmanTree(histograms[i].data_[0:], self.histogram_length_, alphabet_size, tree, self.depths_[ix:], self.bits_[ix:], storage_ix, storage)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func storeMetaBlock(input []byte, start_pos uint, length uint, mask uint, prev_byte byte, prev_byte2 byte, is_last bool, params *encoderParams, literal_context_mode int, commands []command, mb *metaBlockSplit, bw *bitWriter) {
|
func jumpToByteBoundary(storage_ix *uint, storage []byte) {
|
||||||
|
*storage_ix = (*storage_ix + 7) &^ 7
|
||||||
|
storage[*storage_ix>>3] = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func storeMetaBlock(input []byte, start_pos uint, length uint, mask uint, prev_byte byte, prev_byte2 byte, is_last bool, params *encoderParams, literal_context_mode int, commands []command, mb *metaBlockSplit, storage_ix *uint, storage []byte) {
|
||||||
var pos uint = start_pos
|
var pos uint = start_pos
|
||||||
var i uint
|
var i uint
|
||||||
var num_distance_symbols uint32 = params.dist.alphabet_size
|
var num_distance_symbols uint32 = params.dist.alphabet_size
|
||||||
|
@ -1036,48 +1047,48 @@ func storeMetaBlock(input []byte, start_pos uint, length uint, mask uint, prev_b
|
||||||
num_effective_distance_symbols = numHistogramDistanceSymbols
|
num_effective_distance_symbols = numHistogramDistanceSymbols
|
||||||
}
|
}
|
||||||
|
|
||||||
storeCompressedMetaBlockHeader(is_last, length, bw)
|
storeCompressedMetaBlockHeader(is_last, length, storage_ix, storage)
|
||||||
|
|
||||||
tree = make([]huffmanTree, maxHuffmanTreeSize)
|
tree = make([]huffmanTree, maxHuffmanTreeSize)
|
||||||
literal_enc := getBlockEncoder(numLiteralSymbols, mb.literal_split.num_types, mb.literal_split.types, mb.literal_split.lengths, mb.literal_split.num_blocks)
|
literal_enc := getBlockEncoder(numLiteralSymbols, mb.literal_split.num_types, mb.literal_split.types, mb.literal_split.lengths, mb.literal_split.num_blocks)
|
||||||
command_enc := getBlockEncoder(numCommandSymbols, mb.command_split.num_types, mb.command_split.types, mb.command_split.lengths, mb.command_split.num_blocks)
|
command_enc := getBlockEncoder(numCommandSymbols, mb.command_split.num_types, mb.command_split.types, mb.command_split.lengths, mb.command_split.num_blocks)
|
||||||
distance_enc := getBlockEncoder(uint(num_effective_distance_symbols), mb.distance_split.num_types, mb.distance_split.types, mb.distance_split.lengths, mb.distance_split.num_blocks)
|
distance_enc := getBlockEncoder(uint(num_effective_distance_symbols), mb.distance_split.num_types, mb.distance_split.types, mb.distance_split.lengths, mb.distance_split.num_blocks)
|
||||||
|
|
||||||
buildAndStoreBlockSwitchEntropyCodes(literal_enc, tree, bw)
|
buildAndStoreBlockSwitchEntropyCodes(literal_enc, tree, storage_ix, storage)
|
||||||
buildAndStoreBlockSwitchEntropyCodes(command_enc, tree, bw)
|
buildAndStoreBlockSwitchEntropyCodes(command_enc, tree, storage_ix, storage)
|
||||||
buildAndStoreBlockSwitchEntropyCodes(distance_enc, tree, bw)
|
buildAndStoreBlockSwitchEntropyCodes(distance_enc, tree, storage_ix, storage)
|
||||||
|
|
||||||
bw.writeBits(2, uint64(dist.distance_postfix_bits))
|
writeBits(2, uint64(dist.distance_postfix_bits), storage_ix, storage)
|
||||||
bw.writeBits(4, uint64(dist.num_direct_distance_codes)>>dist.distance_postfix_bits)
|
writeBits(4, uint64(dist.num_direct_distance_codes)>>dist.distance_postfix_bits, storage_ix, storage)
|
||||||
for i = 0; i < mb.literal_split.num_types; i++ {
|
for i = 0; i < mb.literal_split.num_types; i++ {
|
||||||
bw.writeBits(2, uint64(literal_context_mode))
|
writeBits(2, uint64(literal_context_mode), storage_ix, storage)
|
||||||
}
|
}
|
||||||
|
|
||||||
if mb.literal_context_map_size == 0 {
|
if mb.literal_context_map_size == 0 {
|
||||||
storeTrivialContextMap(mb.literal_histograms_size, literalContextBits, tree, bw)
|
storeTrivialContextMap(mb.literal_histograms_size, literalContextBits, tree, storage_ix, storage)
|
||||||
} else {
|
} else {
|
||||||
encodeContextMap(mb.literal_context_map, mb.literal_context_map_size, mb.literal_histograms_size, tree, bw)
|
encodeContextMap(mb.literal_context_map, mb.literal_context_map_size, mb.literal_histograms_size, tree, storage_ix, storage)
|
||||||
}
|
}
|
||||||
|
|
||||||
if mb.distance_context_map_size == 0 {
|
if mb.distance_context_map_size == 0 {
|
||||||
storeTrivialContextMap(mb.distance_histograms_size, distanceContextBits, tree, bw)
|
storeTrivialContextMap(mb.distance_histograms_size, distanceContextBits, tree, storage_ix, storage)
|
||||||
} else {
|
} else {
|
||||||
encodeContextMap(mb.distance_context_map, mb.distance_context_map_size, mb.distance_histograms_size, tree, bw)
|
encodeContextMap(mb.distance_context_map, mb.distance_context_map_size, mb.distance_histograms_size, tree, storage_ix, storage)
|
||||||
}
|
}
|
||||||
|
|
||||||
buildAndStoreEntropyCodesLiteral(literal_enc, mb.literal_histograms, mb.literal_histograms_size, numLiteralSymbols, tree, bw)
|
buildAndStoreEntropyCodesLiteral(literal_enc, mb.literal_histograms, mb.literal_histograms_size, numLiteralSymbols, tree, storage_ix, storage)
|
||||||
buildAndStoreEntropyCodesCommand(command_enc, mb.command_histograms, mb.command_histograms_size, numCommandSymbols, tree, bw)
|
buildAndStoreEntropyCodesCommand(command_enc, mb.command_histograms, mb.command_histograms_size, numCommandSymbols, tree, storage_ix, storage)
|
||||||
buildAndStoreEntropyCodesDistance(distance_enc, mb.distance_histograms, mb.distance_histograms_size, uint(num_distance_symbols), tree, bw)
|
buildAndStoreEntropyCodesDistance(distance_enc, mb.distance_histograms, mb.distance_histograms_size, uint(num_distance_symbols), tree, storage_ix, storage)
|
||||||
tree = nil
|
tree = nil
|
||||||
|
|
||||||
for _, cmd := range commands {
|
for _, cmd := range commands {
|
||||||
var cmd_code uint = uint(cmd.cmd_prefix_)
|
var cmd_code uint = uint(cmd.cmd_prefix_)
|
||||||
storeSymbol(command_enc, cmd_code, bw)
|
storeSymbol(command_enc, cmd_code, storage_ix, storage)
|
||||||
storeCommandExtra(&cmd, bw)
|
storeCommandExtra(&cmd, storage_ix, storage)
|
||||||
if mb.literal_context_map_size == 0 {
|
if mb.literal_context_map_size == 0 {
|
||||||
var j uint
|
var j uint
|
||||||
for j = uint(cmd.insert_len_); j != 0; j-- {
|
for j = uint(cmd.insert_len_); j != 0; j-- {
|
||||||
storeSymbol(literal_enc, uint(input[pos&mask]), bw)
|
storeSymbol(literal_enc, uint(input[pos&mask]), storage_ix, storage)
|
||||||
pos++
|
pos++
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -1085,7 +1096,7 @@ func storeMetaBlock(input []byte, start_pos uint, length uint, mask uint, prev_b
|
||||||
for j = uint(cmd.insert_len_); j != 0; j-- {
|
for j = uint(cmd.insert_len_); j != 0; j-- {
|
||||||
var context uint = uint(getContext(prev_byte, prev_byte2, literal_context_lut))
|
var context uint = uint(getContext(prev_byte, prev_byte2, literal_context_lut))
|
||||||
var literal byte = input[pos&mask]
|
var literal byte = input[pos&mask]
|
||||||
storeSymbolWithContext(literal_enc, uint(literal), context, mb.literal_context_map, bw, literalContextBits)
|
storeSymbolWithContext(literal_enc, uint(literal), context, mb.literal_context_map, storage_ix, storage, literalContextBits)
|
||||||
prev_byte2 = prev_byte
|
prev_byte2 = prev_byte
|
||||||
prev_byte = literal
|
prev_byte = literal
|
||||||
pos++
|
pos++
|
||||||
|
@ -1101,13 +1112,13 @@ func storeMetaBlock(input []byte, start_pos uint, length uint, mask uint, prev_b
|
||||||
var distnumextra uint32 = uint32(cmd.dist_prefix_) >> 10
|
var distnumextra uint32 = uint32(cmd.dist_prefix_) >> 10
|
||||||
var distextra uint64 = uint64(cmd.dist_extra_)
|
var distextra uint64 = uint64(cmd.dist_extra_)
|
||||||
if mb.distance_context_map_size == 0 {
|
if mb.distance_context_map_size == 0 {
|
||||||
storeSymbol(distance_enc, dist_code, bw)
|
storeSymbol(distance_enc, dist_code, storage_ix, storage)
|
||||||
} else {
|
} else {
|
||||||
var context uint = uint(commandDistanceContext(&cmd))
|
var context uint = uint(commandDistanceContext(&cmd))
|
||||||
storeSymbolWithContext(distance_enc, dist_code, context, mb.distance_context_map, bw, distanceContextBits)
|
storeSymbolWithContext(distance_enc, dist_code, context, mb.distance_context_map, storage_ix, storage, distanceContextBits)
|
||||||
}
|
}
|
||||||
|
|
||||||
bw.writeBits(uint(distnumextra), distextra)
|
writeBits(uint(distnumextra), distextra, storage_ix, storage)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1116,7 +1127,7 @@ func storeMetaBlock(input []byte, start_pos uint, length uint, mask uint, prev_b
|
||||||
cleanupBlockEncoder(command_enc)
|
cleanupBlockEncoder(command_enc)
|
||||||
cleanupBlockEncoder(literal_enc)
|
cleanupBlockEncoder(literal_enc)
|
||||||
if is_last {
|
if is_last {
|
||||||
bw.jumpToByteBoundary()
|
jumpToByteBoundary(storage_ix, storage)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1137,16 +1148,16 @@ func buildHistograms(input []byte, start_pos uint, mask uint, commands []command
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func storeDataWithHuffmanCodes(input []byte, start_pos uint, mask uint, commands []command, lit_depth []byte, lit_bits []uint16, cmd_depth []byte, cmd_bits []uint16, dist_depth []byte, dist_bits []uint16, bw *bitWriter) {
|
func storeDataWithHuffmanCodes(input []byte, start_pos uint, mask uint, commands []command, lit_depth []byte, lit_bits []uint16, cmd_depth []byte, cmd_bits []uint16, dist_depth []byte, dist_bits []uint16, storage_ix *uint, storage []byte) {
|
||||||
var pos uint = start_pos
|
var pos uint = start_pos
|
||||||
for _, cmd := range commands {
|
for _, cmd := range commands {
|
||||||
var cmd_code uint = uint(cmd.cmd_prefix_)
|
var cmd_code uint = uint(cmd.cmd_prefix_)
|
||||||
var j uint
|
var j uint
|
||||||
bw.writeBits(uint(cmd_depth[cmd_code]), uint64(cmd_bits[cmd_code]))
|
writeBits(uint(cmd_depth[cmd_code]), uint64(cmd_bits[cmd_code]), storage_ix, storage)
|
||||||
storeCommandExtra(&cmd, bw)
|
storeCommandExtra(&cmd, storage_ix, storage)
|
||||||
for j = uint(cmd.insert_len_); j != 0; j-- {
|
for j = uint(cmd.insert_len_); j != 0; j-- {
|
||||||
var literal byte = input[pos&mask]
|
var literal byte = input[pos&mask]
|
||||||
bw.writeBits(uint(lit_depth[literal]), uint64(lit_bits[literal]))
|
writeBits(uint(lit_depth[literal]), uint64(lit_bits[literal]), storage_ix, storage)
|
||||||
pos++
|
pos++
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1155,13 +1166,13 @@ func storeDataWithHuffmanCodes(input []byte, start_pos uint, mask uint, commands
|
||||||
var dist_code uint = uint(cmd.dist_prefix_) & 0x3FF
|
var dist_code uint = uint(cmd.dist_prefix_) & 0x3FF
|
||||||
var distnumextra uint32 = uint32(cmd.dist_prefix_) >> 10
|
var distnumextra uint32 = uint32(cmd.dist_prefix_) >> 10
|
||||||
var distextra uint32 = cmd.dist_extra_
|
var distextra uint32 = cmd.dist_extra_
|
||||||
bw.writeBits(uint(dist_depth[dist_code]), uint64(dist_bits[dist_code]))
|
writeBits(uint(dist_depth[dist_code]), uint64(dist_bits[dist_code]), storage_ix, storage)
|
||||||
bw.writeBits(uint(distnumextra), uint64(distextra))
|
writeBits(uint(distnumextra), uint64(distextra), storage_ix, storage)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func storeMetaBlockTrivial(input []byte, start_pos uint, length uint, mask uint, is_last bool, params *encoderParams, commands []command, bw *bitWriter) {
|
func storeMetaBlockTrivial(input []byte, start_pos uint, length uint, mask uint, is_last bool, params *encoderParams, commands []command, storage_ix *uint, storage []byte) {
|
||||||
var lit_histo histogramLiteral
|
var lit_histo histogramLiteral
|
||||||
var cmd_histo histogramCommand
|
var cmd_histo histogramCommand
|
||||||
var dist_histo histogramDistance
|
var dist_histo histogramDistance
|
||||||
|
@ -1174,7 +1185,7 @@ func storeMetaBlockTrivial(input []byte, start_pos uint, length uint, mask uint,
|
||||||
var tree []huffmanTree
|
var tree []huffmanTree
|
||||||
var num_distance_symbols uint32 = params.dist.alphabet_size
|
var num_distance_symbols uint32 = params.dist.alphabet_size
|
||||||
|
|
||||||
storeCompressedMetaBlockHeader(is_last, length, bw)
|
storeCompressedMetaBlockHeader(is_last, length, storage_ix, storage)
|
||||||
|
|
||||||
histogramClearLiteral(&lit_histo)
|
histogramClearLiteral(&lit_histo)
|
||||||
histogramClearCommand(&cmd_histo)
|
histogramClearCommand(&cmd_histo)
|
||||||
|
@ -1182,26 +1193,26 @@ func storeMetaBlockTrivial(input []byte, start_pos uint, length uint, mask uint,
|
||||||
|
|
||||||
buildHistograms(input, start_pos, mask, commands, &lit_histo, &cmd_histo, &dist_histo)
|
buildHistograms(input, start_pos, mask, commands, &lit_histo, &cmd_histo, &dist_histo)
|
||||||
|
|
||||||
bw.writeBits(13, 0)
|
writeBits(13, 0, storage_ix, storage)
|
||||||
|
|
||||||
tree = make([]huffmanTree, maxHuffmanTreeSize)
|
tree = make([]huffmanTree, maxHuffmanTreeSize)
|
||||||
buildAndStoreHuffmanTree(lit_histo.data_[:], numLiteralSymbols, numLiteralSymbols, tree, lit_depth[:], lit_bits[:], bw)
|
buildAndStoreHuffmanTree(lit_histo.data_[:], numLiteralSymbols, numLiteralSymbols, tree, lit_depth[:], lit_bits[:], storage_ix, storage)
|
||||||
buildAndStoreHuffmanTree(cmd_histo.data_[:], numCommandSymbols, numCommandSymbols, tree, cmd_depth[:], cmd_bits[:], bw)
|
buildAndStoreHuffmanTree(cmd_histo.data_[:], numCommandSymbols, numCommandSymbols, tree, cmd_depth[:], cmd_bits[:], storage_ix, storage)
|
||||||
buildAndStoreHuffmanTree(dist_histo.data_[:], maxSimpleDistanceAlphabetSize, uint(num_distance_symbols), tree, dist_depth[:], dist_bits[:], bw)
|
buildAndStoreHuffmanTree(dist_histo.data_[:], maxSimpleDistanceAlphabetSize, uint(num_distance_symbols), tree, dist_depth[:], dist_bits[:], storage_ix, storage)
|
||||||
tree = nil
|
tree = nil
|
||||||
storeDataWithHuffmanCodes(input, start_pos, mask, commands, lit_depth[:], lit_bits[:], cmd_depth[:], cmd_bits[:], dist_depth[:], dist_bits[:], bw)
|
storeDataWithHuffmanCodes(input, start_pos, mask, commands, lit_depth[:], lit_bits[:], cmd_depth[:], cmd_bits[:], dist_depth[:], dist_bits[:], storage_ix, storage)
|
||||||
if is_last {
|
if is_last {
|
||||||
bw.jumpToByteBoundary()
|
jumpToByteBoundary(storage_ix, storage)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func storeMetaBlockFast(input []byte, start_pos uint, length uint, mask uint, is_last bool, params *encoderParams, commands []command, bw *bitWriter) {
|
func storeMetaBlockFast(input []byte, start_pos uint, length uint, mask uint, is_last bool, params *encoderParams, commands []command, storage_ix *uint, storage []byte) {
|
||||||
var num_distance_symbols uint32 = params.dist.alphabet_size
|
var num_distance_symbols uint32 = params.dist.alphabet_size
|
||||||
var distance_alphabet_bits uint32 = log2FloorNonZero(uint(num_distance_symbols-1)) + 1
|
var distance_alphabet_bits uint32 = log2FloorNonZero(uint(num_distance_symbols-1)) + 1
|
||||||
|
|
||||||
storeCompressedMetaBlockHeader(is_last, length, bw)
|
storeCompressedMetaBlockHeader(is_last, length, storage_ix, storage)
|
||||||
|
|
||||||
bw.writeBits(13, 0)
|
writeBits(13, 0, storage_ix, storage)
|
||||||
|
|
||||||
if len(commands) <= 128 {
|
if len(commands) <= 128 {
|
||||||
var histogram = [numLiteralSymbols]uint32{0}
|
var histogram = [numLiteralSymbols]uint32{0}
|
||||||
|
@ -1221,11 +1232,11 @@ func storeMetaBlockFast(input []byte, start_pos uint, length uint, mask uint, is
|
||||||
}
|
}
|
||||||
|
|
||||||
buildAndStoreHuffmanTreeFast(histogram[:], num_literals, /* max_bits = */
|
buildAndStoreHuffmanTreeFast(histogram[:], num_literals, /* max_bits = */
|
||||||
8, lit_depth[:], lit_bits[:], bw)
|
8, lit_depth[:], lit_bits[:], storage_ix, storage)
|
||||||
|
|
||||||
storeStaticCommandHuffmanTree(bw)
|
storeStaticCommandHuffmanTree(storage_ix, storage)
|
||||||
storeStaticDistanceHuffmanTree(bw)
|
storeStaticDistanceHuffmanTree(storage_ix, storage)
|
||||||
storeDataWithHuffmanCodes(input, start_pos, mask, commands, lit_depth[:], lit_bits[:], kStaticCommandCodeDepth[:], kStaticCommandCodeBits[:], kStaticDistanceCodeDepth[:], kStaticDistanceCodeBits[:], bw)
|
storeDataWithHuffmanCodes(input, start_pos, mask, commands, lit_depth[:], lit_bits[:], kStaticCommandCodeDepth[:], kStaticCommandCodeBits[:], kStaticDistanceCodeDepth[:], kStaticDistanceCodeBits[:], storage_ix, storage)
|
||||||
} else {
|
} else {
|
||||||
var lit_histo histogramLiteral
|
var lit_histo histogramLiteral
|
||||||
var cmd_histo histogramCommand
|
var cmd_histo histogramCommand
|
||||||
|
@ -1241,43 +1252,49 @@ func storeMetaBlockFast(input []byte, start_pos uint, length uint, mask uint, is
|
||||||
histogramClearDistance(&dist_histo)
|
histogramClearDistance(&dist_histo)
|
||||||
buildHistograms(input, start_pos, mask, commands, &lit_histo, &cmd_histo, &dist_histo)
|
buildHistograms(input, start_pos, mask, commands, &lit_histo, &cmd_histo, &dist_histo)
|
||||||
buildAndStoreHuffmanTreeFast(lit_histo.data_[:], lit_histo.total_count_, /* max_bits = */
|
buildAndStoreHuffmanTreeFast(lit_histo.data_[:], lit_histo.total_count_, /* max_bits = */
|
||||||
8, lit_depth[:], lit_bits[:], bw)
|
8, lit_depth[:], lit_bits[:], storage_ix, storage)
|
||||||
|
|
||||||
buildAndStoreHuffmanTreeFast(cmd_histo.data_[:], cmd_histo.total_count_, /* max_bits = */
|
buildAndStoreHuffmanTreeFast(cmd_histo.data_[:], cmd_histo.total_count_, /* max_bits = */
|
||||||
10, cmd_depth[:], cmd_bits[:], bw)
|
10, cmd_depth[:], cmd_bits[:], storage_ix, storage)
|
||||||
|
|
||||||
buildAndStoreHuffmanTreeFast(dist_histo.data_[:], dist_histo.total_count_, /* max_bits = */
|
buildAndStoreHuffmanTreeFast(dist_histo.data_[:], dist_histo.total_count_, /* max_bits = */
|
||||||
uint(distance_alphabet_bits), dist_depth[:], dist_bits[:], bw)
|
uint(distance_alphabet_bits), dist_depth[:], dist_bits[:], storage_ix, storage)
|
||||||
|
|
||||||
storeDataWithHuffmanCodes(input, start_pos, mask, commands, lit_depth[:], lit_bits[:], cmd_depth[:], cmd_bits[:], dist_depth[:], dist_bits[:], bw)
|
storeDataWithHuffmanCodes(input, start_pos, mask, commands, lit_depth[:], lit_bits[:], cmd_depth[:], cmd_bits[:], dist_depth[:], dist_bits[:], storage_ix, storage)
|
||||||
}
|
}
|
||||||
|
|
||||||
if is_last {
|
if is_last {
|
||||||
bw.jumpToByteBoundary()
|
jumpToByteBoundary(storage_ix, storage)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* This is for storing uncompressed blocks (simple raw storage of
|
/* This is for storing uncompressed blocks (simple raw storage of
|
||||||
bytes-as-bytes). */
|
bytes-as-bytes). */
|
||||||
func storeUncompressedMetaBlock(is_final_block bool, input []byte, position uint, mask uint, len uint, bw *bitWriter) {
|
func storeUncompressedMetaBlock(is_final_block bool, input []byte, position uint, mask uint, len uint, storage_ix *uint, storage []byte) {
|
||||||
var masked_pos uint = position & mask
|
var masked_pos uint = position & mask
|
||||||
storeUncompressedMetaBlockHeader(uint(len), bw)
|
storeUncompressedMetaBlockHeader(uint(len), storage_ix, storage)
|
||||||
bw.jumpToByteBoundary()
|
jumpToByteBoundary(storage_ix, storage)
|
||||||
|
|
||||||
if masked_pos+len > mask+1 {
|
if masked_pos+len > mask+1 {
|
||||||
var len1 uint = mask + 1 - masked_pos
|
var len1 uint = mask + 1 - masked_pos
|
||||||
bw.writeBytes(input[masked_pos:][:len1])
|
copy(storage[*storage_ix>>3:], input[masked_pos:][:len1])
|
||||||
|
*storage_ix += len1 << 3
|
||||||
len -= len1
|
len -= len1
|
||||||
masked_pos = 0
|
masked_pos = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
bw.writeBytes(input[masked_pos:][:len])
|
copy(storage[*storage_ix>>3:], input[masked_pos:][:len])
|
||||||
|
*storage_ix += uint(len << 3)
|
||||||
|
|
||||||
|
/* We need to clear the next 4 bytes to continue to be
|
||||||
|
compatible with BrotliWriteBits. */
|
||||||
|
writeBitsPrepareStorage(*storage_ix, storage)
|
||||||
|
|
||||||
/* Since the uncompressed block itself may not be the final block, add an
|
/* Since the uncompressed block itself may not be the final block, add an
|
||||||
empty one after this. */
|
empty one after this. */
|
||||||
if is_final_block {
|
if is_final_block {
|
||||||
bw.writeBits(1, 1) /* islast */
|
writeBits(1, 1, storage_ix, storage) /* islast */
|
||||||
bw.writeBits(1, 1) /* isempty */
|
writeBits(1, 1, storage_ix, storage) /* isempty */
|
||||||
bw.jumpToByteBoundary()
|
jumpToByteBoundary(storage_ix, storage)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,7 +45,7 @@ func isMatch5(p1 []byte, p2 []byte) bool {
|
||||||
and thus have to assign a non-zero depth for each literal.
|
and thus have to assign a non-zero depth for each literal.
|
||||||
Returns estimated compression ratio millibytes/char for encoding given input
|
Returns estimated compression ratio millibytes/char for encoding given input
|
||||||
with generated code. */
|
with generated code. */
|
||||||
func buildAndStoreLiteralPrefixCode(input []byte, input_size uint, depths []byte, bits []uint16, bw *bitWriter) uint {
|
func buildAndStoreLiteralPrefixCode(input []byte, input_size uint, depths []byte, bits []uint16, storage_ix *uint, storage []byte) uint {
|
||||||
var histogram = [256]uint32{0}
|
var histogram = [256]uint32{0}
|
||||||
var histogram_total uint
|
var histogram_total uint
|
||||||
var i uint
|
var i uint
|
||||||
|
@ -82,7 +82,7 @@ func buildAndStoreLiteralPrefixCode(input []byte, input_size uint, depths []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
buildAndStoreHuffmanTreeFast(histogram[:], histogram_total, /* max_bits = */
|
buildAndStoreHuffmanTreeFast(histogram[:], histogram_total, /* max_bits = */
|
||||||
8, depths, bits, bw)
|
8, depths, bits, storage_ix, storage)
|
||||||
{
|
{
|
||||||
var literal_ratio uint = 0
|
var literal_ratio uint = 0
|
||||||
for i = 0; i < 256; i++ {
|
for i = 0; i < 256; i++ {
|
||||||
|
@ -98,7 +98,7 @@ func buildAndStoreLiteralPrefixCode(input []byte, input_size uint, depths []byte
|
||||||
|
|
||||||
/* Builds a command and distance prefix code (each 64 symbols) into "depth" and
|
/* Builds a command and distance prefix code (each 64 symbols) into "depth" and
|
||||||
"bits" based on "histogram" and stores it into the bit stream. */
|
"bits" based on "histogram" and stores it into the bit stream. */
|
||||||
func buildAndStoreCommandPrefixCode1(histogram []uint32, depth []byte, bits []uint16, bw *bitWriter) {
|
func buildAndStoreCommandPrefixCode1(histogram []uint32, depth []byte, bits []uint16, storage_ix *uint, storage []byte) {
|
||||||
var tree [129]huffmanTree
|
var tree [129]huffmanTree
|
||||||
var cmd_depth = [numCommandSymbols]byte{0}
|
var cmd_depth = [numCommandSymbols]byte{0}
|
||||||
/* Tree size for building a tree over 64 symbols is 2 * 64 + 1. */
|
/* Tree size for building a tree over 64 symbols is 2 * 64 + 1. */
|
||||||
|
@ -145,141 +145,141 @@ func buildAndStoreCommandPrefixCode1(histogram []uint32, depth []byte, bits []ui
|
||||||
cmd_depth[448+8*i] = depth[56+i]
|
cmd_depth[448+8*i] = depth[56+i]
|
||||||
}
|
}
|
||||||
|
|
||||||
storeHuffmanTree(cmd_depth[:], numCommandSymbols, tree[:], bw)
|
storeHuffmanTree(cmd_depth[:], numCommandSymbols, tree[:], storage_ix, storage)
|
||||||
}
|
}
|
||||||
|
|
||||||
storeHuffmanTree(depth[64:], 64, tree[:], bw)
|
storeHuffmanTree(depth[64:], 64, tree[:], storage_ix, storage)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* REQUIRES: insertlen < 6210 */
|
/* REQUIRES: insertlen < 6210 */
|
||||||
func emitInsertLen1(insertlen uint, depth []byte, bits []uint16, histo []uint32, bw *bitWriter) {
|
func emitInsertLen1(insertlen uint, depth []byte, bits []uint16, histo []uint32, storage_ix *uint, storage []byte) {
|
||||||
if insertlen < 6 {
|
if insertlen < 6 {
|
||||||
var code uint = insertlen + 40
|
var code uint = insertlen + 40
|
||||||
bw.writeBits(uint(depth[code]), uint64(bits[code]))
|
writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage)
|
||||||
histo[code]++
|
histo[code]++
|
||||||
} else if insertlen < 130 {
|
} else if insertlen < 130 {
|
||||||
var tail uint = insertlen - 2
|
var tail uint = insertlen - 2
|
||||||
var nbits uint32 = log2FloorNonZero(tail) - 1
|
var nbits uint32 = log2FloorNonZero(tail) - 1
|
||||||
var prefix uint = tail >> nbits
|
var prefix uint = tail >> nbits
|
||||||
var inscode uint = uint((nbits << 1) + uint32(prefix) + 42)
|
var inscode uint = uint((nbits << 1) + uint32(prefix) + 42)
|
||||||
bw.writeBits(uint(depth[inscode]), uint64(bits[inscode]))
|
writeBits(uint(depth[inscode]), uint64(bits[inscode]), storage_ix, storage)
|
||||||
bw.writeBits(uint(nbits), uint64(tail)-(uint64(prefix)<<nbits))
|
writeBits(uint(nbits), uint64(tail)-(uint64(prefix)<<nbits), storage_ix, storage)
|
||||||
histo[inscode]++
|
histo[inscode]++
|
||||||
} else if insertlen < 2114 {
|
} else if insertlen < 2114 {
|
||||||
var tail uint = insertlen - 66
|
var tail uint = insertlen - 66
|
||||||
var nbits uint32 = log2FloorNonZero(tail)
|
var nbits uint32 = log2FloorNonZero(tail)
|
||||||
var code uint = uint(nbits + 50)
|
var code uint = uint(nbits + 50)
|
||||||
bw.writeBits(uint(depth[code]), uint64(bits[code]))
|
writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage)
|
||||||
bw.writeBits(uint(nbits), uint64(tail)-(uint64(uint(1))<<nbits))
|
writeBits(uint(nbits), uint64(tail)-(uint64(uint(1))<<nbits), storage_ix, storage)
|
||||||
histo[code]++
|
histo[code]++
|
||||||
} else {
|
} else {
|
||||||
bw.writeBits(uint(depth[61]), uint64(bits[61]))
|
writeBits(uint(depth[61]), uint64(bits[61]), storage_ix, storage)
|
||||||
bw.writeBits(12, uint64(insertlen)-2114)
|
writeBits(12, uint64(insertlen)-2114, storage_ix, storage)
|
||||||
histo[61]++
|
histo[61]++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func emitLongInsertLen(insertlen uint, depth []byte, bits []uint16, histo []uint32, bw *bitWriter) {
|
func emitLongInsertLen(insertlen uint, depth []byte, bits []uint16, histo []uint32, storage_ix *uint, storage []byte) {
|
||||||
if insertlen < 22594 {
|
if insertlen < 22594 {
|
||||||
bw.writeBits(uint(depth[62]), uint64(bits[62]))
|
writeBits(uint(depth[62]), uint64(bits[62]), storage_ix, storage)
|
||||||
bw.writeBits(14, uint64(insertlen)-6210)
|
writeBits(14, uint64(insertlen)-6210, storage_ix, storage)
|
||||||
histo[62]++
|
histo[62]++
|
||||||
} else {
|
} else {
|
||||||
bw.writeBits(uint(depth[63]), uint64(bits[63]))
|
writeBits(uint(depth[63]), uint64(bits[63]), storage_ix, storage)
|
||||||
bw.writeBits(24, uint64(insertlen)-22594)
|
writeBits(24, uint64(insertlen)-22594, storage_ix, storage)
|
||||||
histo[63]++
|
histo[63]++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func emitCopyLen1(copylen uint, depth []byte, bits []uint16, histo []uint32, bw *bitWriter) {
|
func emitCopyLen1(copylen uint, depth []byte, bits []uint16, histo []uint32, storage_ix *uint, storage []byte) {
|
||||||
if copylen < 10 {
|
if copylen < 10 {
|
||||||
bw.writeBits(uint(depth[copylen+14]), uint64(bits[copylen+14]))
|
writeBits(uint(depth[copylen+14]), uint64(bits[copylen+14]), storage_ix, storage)
|
||||||
histo[copylen+14]++
|
histo[copylen+14]++
|
||||||
} else if copylen < 134 {
|
} else if copylen < 134 {
|
||||||
var tail uint = copylen - 6
|
var tail uint = copylen - 6
|
||||||
var nbits uint32 = log2FloorNonZero(tail) - 1
|
var nbits uint32 = log2FloorNonZero(tail) - 1
|
||||||
var prefix uint = tail >> nbits
|
var prefix uint = tail >> nbits
|
||||||
var code uint = uint((nbits << 1) + uint32(prefix) + 20)
|
var code uint = uint((nbits << 1) + uint32(prefix) + 20)
|
||||||
bw.writeBits(uint(depth[code]), uint64(bits[code]))
|
writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage)
|
||||||
bw.writeBits(uint(nbits), uint64(tail)-(uint64(prefix)<<nbits))
|
writeBits(uint(nbits), uint64(tail)-(uint64(prefix)<<nbits), storage_ix, storage)
|
||||||
histo[code]++
|
histo[code]++
|
||||||
} else if copylen < 2118 {
|
} else if copylen < 2118 {
|
||||||
var tail uint = copylen - 70
|
var tail uint = copylen - 70
|
||||||
var nbits uint32 = log2FloorNonZero(tail)
|
var nbits uint32 = log2FloorNonZero(tail)
|
||||||
var code uint = uint(nbits + 28)
|
var code uint = uint(nbits + 28)
|
||||||
bw.writeBits(uint(depth[code]), uint64(bits[code]))
|
writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage)
|
||||||
bw.writeBits(uint(nbits), uint64(tail)-(uint64(uint(1))<<nbits))
|
writeBits(uint(nbits), uint64(tail)-(uint64(uint(1))<<nbits), storage_ix, storage)
|
||||||
histo[code]++
|
histo[code]++
|
||||||
} else {
|
} else {
|
||||||
bw.writeBits(uint(depth[39]), uint64(bits[39]))
|
writeBits(uint(depth[39]), uint64(bits[39]), storage_ix, storage)
|
||||||
bw.writeBits(24, uint64(copylen)-2118)
|
writeBits(24, uint64(copylen)-2118, storage_ix, storage)
|
||||||
histo[39]++
|
histo[39]++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func emitCopyLenLastDistance1(copylen uint, depth []byte, bits []uint16, histo []uint32, bw *bitWriter) {
|
func emitCopyLenLastDistance1(copylen uint, depth []byte, bits []uint16, histo []uint32, storage_ix *uint, storage []byte) {
|
||||||
if copylen < 12 {
|
if copylen < 12 {
|
||||||
bw.writeBits(uint(depth[copylen-4]), uint64(bits[copylen-4]))
|
writeBits(uint(depth[copylen-4]), uint64(bits[copylen-4]), storage_ix, storage)
|
||||||
histo[copylen-4]++
|
histo[copylen-4]++
|
||||||
} else if copylen < 72 {
|
} else if copylen < 72 {
|
||||||
var tail uint = copylen - 8
|
var tail uint = copylen - 8
|
||||||
var nbits uint32 = log2FloorNonZero(tail) - 1
|
var nbits uint32 = log2FloorNonZero(tail) - 1
|
||||||
var prefix uint = tail >> nbits
|
var prefix uint = tail >> nbits
|
||||||
var code uint = uint((nbits << 1) + uint32(prefix) + 4)
|
var code uint = uint((nbits << 1) + uint32(prefix) + 4)
|
||||||
bw.writeBits(uint(depth[code]), uint64(bits[code]))
|
writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage)
|
||||||
bw.writeBits(uint(nbits), uint64(tail)-(uint64(prefix)<<nbits))
|
writeBits(uint(nbits), uint64(tail)-(uint64(prefix)<<nbits), storage_ix, storage)
|
||||||
histo[code]++
|
histo[code]++
|
||||||
} else if copylen < 136 {
|
} else if copylen < 136 {
|
||||||
var tail uint = copylen - 8
|
var tail uint = copylen - 8
|
||||||
var code uint = (tail >> 5) + 30
|
var code uint = (tail >> 5) + 30
|
||||||
bw.writeBits(uint(depth[code]), uint64(bits[code]))
|
writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage)
|
||||||
bw.writeBits(5, uint64(tail)&31)
|
writeBits(5, uint64(tail)&31, storage_ix, storage)
|
||||||
bw.writeBits(uint(depth[64]), uint64(bits[64]))
|
writeBits(uint(depth[64]), uint64(bits[64]), storage_ix, storage)
|
||||||
histo[code]++
|
histo[code]++
|
||||||
histo[64]++
|
histo[64]++
|
||||||
} else if copylen < 2120 {
|
} else if copylen < 2120 {
|
||||||
var tail uint = copylen - 72
|
var tail uint = copylen - 72
|
||||||
var nbits uint32 = log2FloorNonZero(tail)
|
var nbits uint32 = log2FloorNonZero(tail)
|
||||||
var code uint = uint(nbits + 28)
|
var code uint = uint(nbits + 28)
|
||||||
bw.writeBits(uint(depth[code]), uint64(bits[code]))
|
writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage)
|
||||||
bw.writeBits(uint(nbits), uint64(tail)-(uint64(uint(1))<<nbits))
|
writeBits(uint(nbits), uint64(tail)-(uint64(uint(1))<<nbits), storage_ix, storage)
|
||||||
bw.writeBits(uint(depth[64]), uint64(bits[64]))
|
writeBits(uint(depth[64]), uint64(bits[64]), storage_ix, storage)
|
||||||
histo[code]++
|
histo[code]++
|
||||||
histo[64]++
|
histo[64]++
|
||||||
} else {
|
} else {
|
||||||
bw.writeBits(uint(depth[39]), uint64(bits[39]))
|
writeBits(uint(depth[39]), uint64(bits[39]), storage_ix, storage)
|
||||||
bw.writeBits(24, uint64(copylen)-2120)
|
writeBits(24, uint64(copylen)-2120, storage_ix, storage)
|
||||||
bw.writeBits(uint(depth[64]), uint64(bits[64]))
|
writeBits(uint(depth[64]), uint64(bits[64]), storage_ix, storage)
|
||||||
histo[39]++
|
histo[39]++
|
||||||
histo[64]++
|
histo[64]++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func emitDistance1(distance uint, depth []byte, bits []uint16, histo []uint32, bw *bitWriter) {
|
func emitDistance1(distance uint, depth []byte, bits []uint16, histo []uint32, storage_ix *uint, storage []byte) {
|
||||||
var d uint = distance + 3
|
var d uint = distance + 3
|
||||||
var nbits uint32 = log2FloorNonZero(d) - 1
|
var nbits uint32 = log2FloorNonZero(d) - 1
|
||||||
var prefix uint = (d >> nbits) & 1
|
var prefix uint = (d >> nbits) & 1
|
||||||
var offset uint = (2 + prefix) << nbits
|
var offset uint = (2 + prefix) << nbits
|
||||||
var distcode uint = uint(2*(nbits-1) + uint32(prefix) + 80)
|
var distcode uint = uint(2*(nbits-1) + uint32(prefix) + 80)
|
||||||
bw.writeBits(uint(depth[distcode]), uint64(bits[distcode]))
|
writeBits(uint(depth[distcode]), uint64(bits[distcode]), storage_ix, storage)
|
||||||
bw.writeBits(uint(nbits), uint64(d)-uint64(offset))
|
writeBits(uint(nbits), uint64(d)-uint64(offset), storage_ix, storage)
|
||||||
histo[distcode]++
|
histo[distcode]++
|
||||||
}
|
}
|
||||||
|
|
||||||
func emitLiterals(input []byte, len uint, depth []byte, bits []uint16, bw *bitWriter) {
|
func emitLiterals(input []byte, len uint, depth []byte, bits []uint16, storage_ix *uint, storage []byte) {
|
||||||
var j uint
|
var j uint
|
||||||
for j = 0; j < len; j++ {
|
for j = 0; j < len; j++ {
|
||||||
var lit byte = input[j]
|
var lit byte = input[j]
|
||||||
bw.writeBits(uint(depth[lit]), uint64(bits[lit]))
|
writeBits(uint(depth[lit]), uint64(bits[lit]), storage_ix, storage)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* REQUIRES: len <= 1 << 24. */
|
/* REQUIRES: len <= 1 << 24. */
|
||||||
func storeMetaBlockHeader1(len uint, is_uncompressed bool, bw *bitWriter) {
|
func storeMetaBlockHeader1(len uint, is_uncompressed bool, storage_ix *uint, storage []byte) {
|
||||||
var nibbles uint = 6
|
var nibbles uint = 6
|
||||||
|
|
||||||
/* ISLAST */
|
/* ISLAST */
|
||||||
bw.writeBits(1, 0)
|
writeBits(1, 0, storage_ix, storage)
|
||||||
|
|
||||||
if len <= 1<<16 {
|
if len <= 1<<16 {
|
||||||
nibbles = 4
|
nibbles = 4
|
||||||
|
@ -287,11 +287,34 @@ func storeMetaBlockHeader1(len uint, is_uncompressed bool, bw *bitWriter) {
|
||||||
nibbles = 5
|
nibbles = 5
|
||||||
}
|
}
|
||||||
|
|
||||||
bw.writeBits(2, uint64(nibbles)-4)
|
writeBits(2, uint64(nibbles)-4, storage_ix, storage)
|
||||||
bw.writeBits(nibbles*4, uint64(len)-1)
|
writeBits(nibbles*4, uint64(len)-1, storage_ix, storage)
|
||||||
|
|
||||||
/* ISUNCOMPRESSED */
|
/* ISUNCOMPRESSED */
|
||||||
bw.writeSingleBit(is_uncompressed)
|
writeSingleBit(is_uncompressed, storage_ix, storage)
|
||||||
|
}
|
||||||
|
|
||||||
|
func updateBits(n_bits uint, bits uint32, pos uint, array []byte) {
|
||||||
|
for n_bits > 0 {
|
||||||
|
var byte_pos uint = pos >> 3
|
||||||
|
var n_unchanged_bits uint = pos & 7
|
||||||
|
var n_changed_bits uint = brotli_min_size_t(n_bits, 8-n_unchanged_bits)
|
||||||
|
var total_bits uint = n_unchanged_bits + n_changed_bits
|
||||||
|
var mask uint32 = (^((1 << total_bits) - 1)) | ((1 << n_unchanged_bits) - 1)
|
||||||
|
var unchanged_bits uint32 = uint32(array[byte_pos]) & mask
|
||||||
|
var changed_bits uint32 = bits & ((1 << n_changed_bits) - 1)
|
||||||
|
array[byte_pos] = byte(changed_bits<<n_unchanged_bits | unchanged_bits)
|
||||||
|
n_bits -= n_changed_bits
|
||||||
|
bits >>= n_changed_bits
|
||||||
|
pos += n_changed_bits
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func rewindBitPosition1(new_storage_ix uint, storage_ix *uint, storage []byte) {
|
||||||
|
var bitpos uint = new_storage_ix & 7
|
||||||
|
var mask uint = (1 << bitpos) - 1
|
||||||
|
storage[new_storage_ix>>3] &= byte(mask)
|
||||||
|
*storage_ix = new_storage_ix
|
||||||
}
|
}
|
||||||
|
|
||||||
var shouldMergeBlock_kSampleRate uint = 43
|
var shouldMergeBlock_kSampleRate uint = 43
|
||||||
|
@ -322,26 +345,151 @@ func shouldUseUncompressedMode(metablock_start []byte, next_emit []byte, insertl
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func emitUncompressedMetaBlock1(data []byte, storage_ix_start uint, bw *bitWriter) {
|
func emitUncompressedMetaBlock1(begin []byte, end []byte, storage_ix_start uint, storage_ix *uint, storage []byte) {
|
||||||
bw.rewind(storage_ix_start)
|
var len uint = uint(-cap(end) + cap(begin))
|
||||||
storeMetaBlockHeader1(uint(len(data)), true, bw)
|
rewindBitPosition1(storage_ix_start, storage_ix, storage)
|
||||||
bw.jumpToByteBoundary()
|
storeMetaBlockHeader1(uint(len), true, storage_ix, storage)
|
||||||
bw.writeBytes(data)
|
*storage_ix = (*storage_ix + 7) &^ 7
|
||||||
|
copy(storage[*storage_ix>>3:], begin[:len])
|
||||||
|
*storage_ix += uint(len << 3)
|
||||||
|
storage[*storage_ix>>3] = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
var kCmdHistoSeed = [128]uint32{
|
var kCmdHistoSeed = [128]uint32{
|
||||||
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1,
|
0,
|
||||||
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1,
|
1,
|
||||||
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,
|
1,
|
||||||
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
1,
|
||||||
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
1,
|
||||||
1, 1, 1, 1, 0, 0, 0, 0,
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
0,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
}
|
}
|
||||||
|
|
||||||
var compressFragmentFastImpl_kFirstBlockSize uint = 3 << 15
|
var compressFragmentFastImpl_kFirstBlockSize uint = 3 << 15
|
||||||
var compressFragmentFastImpl_kMergeBlockSize uint = 1 << 16
|
var compressFragmentFastImpl_kMergeBlockSize uint = 1 << 16
|
||||||
|
|
||||||
func compressFragmentFastImpl(in []byte, input_size uint, is_last bool, table []int, table_bits uint, cmd_depth []byte, cmd_bits []uint16, cmd_code_numbits *uint, cmd_code []byte, bw *bitWriter) {
|
func compressFragmentFastImpl(in []byte, input_size uint, is_last bool, table []int, table_bits uint, cmd_depth []byte, cmd_bits []uint16, cmd_code_numbits *uint, cmd_code []byte, storage_ix *uint, storage []byte) {
|
||||||
var cmd_histo [128]uint32
|
var cmd_histo [128]uint32
|
||||||
var ip_end int
|
var ip_end int
|
||||||
var next_emit int = 0
|
var next_emit int = 0
|
||||||
|
@ -352,7 +500,7 @@ func compressFragmentFastImpl(in []byte, input_size uint, is_last bool, table []
|
||||||
var metablock_start int = input
|
var metablock_start int = input
|
||||||
var block_size uint = brotli_min_size_t(input_size, compressFragmentFastImpl_kFirstBlockSize)
|
var block_size uint = brotli_min_size_t(input_size, compressFragmentFastImpl_kFirstBlockSize)
|
||||||
var total_block_size uint = block_size
|
var total_block_size uint = block_size
|
||||||
var mlen_storage_ix uint = bw.getPos() + 3
|
var mlen_storage_ix uint = *storage_ix + 3
|
||||||
var lit_depth [256]byte
|
var lit_depth [256]byte
|
||||||
var lit_bits [256]uint16
|
var lit_bits [256]uint16
|
||||||
var literal_ratio uint
|
var literal_ratio uint
|
||||||
|
@ -369,21 +517,21 @@ func compressFragmentFastImpl(in []byte, input_size uint, is_last bool, table []
|
||||||
|
|
||||||
/* Save the bit position of the MLEN field of the meta-block header, so that
|
/* Save the bit position of the MLEN field of the meta-block header, so that
|
||||||
we can update it later if we decide to extend this meta-block. */
|
we can update it later if we decide to extend this meta-block. */
|
||||||
storeMetaBlockHeader1(block_size, false, bw)
|
storeMetaBlockHeader1(block_size, false, storage_ix, storage)
|
||||||
|
|
||||||
/* No block splits, no contexts. */
|
/* No block splits, no contexts. */
|
||||||
bw.writeBits(13, 0)
|
writeBits(13, 0, storage_ix, storage)
|
||||||
|
|
||||||
literal_ratio = buildAndStoreLiteralPrefixCode(in[input:], block_size, lit_depth[:], lit_bits[:], bw)
|
literal_ratio = buildAndStoreLiteralPrefixCode(in[input:], block_size, lit_depth[:], lit_bits[:], storage_ix, storage)
|
||||||
{
|
{
|
||||||
/* Store the pre-compressed command and distance prefix codes. */
|
/* Store the pre-compressed command and distance prefix codes. */
|
||||||
var i uint
|
var i uint
|
||||||
for i = 0; i+7 < *cmd_code_numbits; i += 8 {
|
for i = 0; i+7 < *cmd_code_numbits; i += 8 {
|
||||||
bw.writeBits(8, uint64(cmd_code[i>>3]))
|
writeBits(8, uint64(cmd_code[i>>3]), storage_ix, storage)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bw.writeBits(*cmd_code_numbits&7, uint64(cmd_code[*cmd_code_numbits>>3]))
|
writeBits(*cmd_code_numbits&7, uint64(cmd_code[*cmd_code_numbits>>3]), storage_ix, storage)
|
||||||
|
|
||||||
/* Initialize the command and distance histograms. We will gather
|
/* Initialize the command and distance histograms. We will gather
|
||||||
statistics of command and distance codes during the processing
|
statistics of command and distance codes during the processing
|
||||||
|
@ -482,27 +630,27 @@ emit_commands:
|
||||||
var insert uint = uint(base - next_emit)
|
var insert uint = uint(base - next_emit)
|
||||||
ip += int(matched)
|
ip += int(matched)
|
||||||
if insert < 6210 {
|
if insert < 6210 {
|
||||||
emitInsertLen1(insert, cmd_depth, cmd_bits, cmd_histo[:], bw)
|
emitInsertLen1(insert, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
|
||||||
} else if shouldUseUncompressedMode(in[metablock_start:], in[next_emit:], insert, literal_ratio) {
|
} else if shouldUseUncompressedMode(in[metablock_start:], in[next_emit:], insert, literal_ratio) {
|
||||||
emitUncompressedMetaBlock1(in[metablock_start:base], mlen_storage_ix-3, bw)
|
emitUncompressedMetaBlock1(in[metablock_start:], in[base:], mlen_storage_ix-3, storage_ix, storage)
|
||||||
input_size -= uint(base - input)
|
input_size -= uint(base - input)
|
||||||
input = base
|
input = base
|
||||||
next_emit = input
|
next_emit = input
|
||||||
goto next_block
|
goto next_block
|
||||||
} else {
|
} else {
|
||||||
emitLongInsertLen(insert, cmd_depth, cmd_bits, cmd_histo[:], bw)
|
emitLongInsertLen(insert, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
|
||||||
}
|
}
|
||||||
|
|
||||||
emitLiterals(in[next_emit:], insert, lit_depth[:], lit_bits[:], bw)
|
emitLiterals(in[next_emit:], insert, lit_depth[:], lit_bits[:], storage_ix, storage)
|
||||||
if distance == last_distance {
|
if distance == last_distance {
|
||||||
bw.writeBits(uint(cmd_depth[64]), uint64(cmd_bits[64]))
|
writeBits(uint(cmd_depth[64]), uint64(cmd_bits[64]), storage_ix, storage)
|
||||||
cmd_histo[64]++
|
cmd_histo[64]++
|
||||||
} else {
|
} else {
|
||||||
emitDistance1(uint(distance), cmd_depth, cmd_bits, cmd_histo[:], bw)
|
emitDistance1(uint(distance), cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
|
||||||
last_distance = distance
|
last_distance = distance
|
||||||
}
|
}
|
||||||
|
|
||||||
emitCopyLenLastDistance1(matched, cmd_depth, cmd_bits, cmd_histo[:], bw)
|
emitCopyLenLastDistance1(matched, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
|
||||||
|
|
||||||
next_emit = ip
|
next_emit = ip
|
||||||
if ip >= ip_limit {
|
if ip >= ip_limit {
|
||||||
|
@ -538,8 +686,8 @@ emit_commands:
|
||||||
}
|
}
|
||||||
ip += int(matched)
|
ip += int(matched)
|
||||||
last_distance = int(base - candidate) /* > 0 */
|
last_distance = int(base - candidate) /* > 0 */
|
||||||
emitCopyLen1(matched, cmd_depth, cmd_bits, cmd_histo[:], bw)
|
emitCopyLen1(matched, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
|
||||||
emitDistance1(uint(last_distance), cmd_depth, cmd_bits, cmd_histo[:], bw)
|
emitDistance1(uint(last_distance), cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
|
||||||
|
|
||||||
next_emit = ip
|
next_emit = ip
|
||||||
if ip >= ip_limit {
|
if ip >= ip_limit {
|
||||||
|
@ -585,7 +733,7 @@ emit_remainder:
|
||||||
nibbles. */
|
nibbles. */
|
||||||
total_block_size += block_size
|
total_block_size += block_size
|
||||||
|
|
||||||
bw.updateBits(20, uint32(total_block_size-1), mlen_storage_ix)
|
updateBits(20, uint32(total_block_size-1), mlen_storage_ix, storage)
|
||||||
goto emit_commands
|
goto emit_commands
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -593,13 +741,13 @@ emit_remainder:
|
||||||
if next_emit < ip_end {
|
if next_emit < ip_end {
|
||||||
var insert uint = uint(ip_end - next_emit)
|
var insert uint = uint(ip_end - next_emit)
|
||||||
if insert < 6210 {
|
if insert < 6210 {
|
||||||
emitInsertLen1(insert, cmd_depth, cmd_bits, cmd_histo[:], bw)
|
emitInsertLen1(insert, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
|
||||||
emitLiterals(in[next_emit:], insert, lit_depth[:], lit_bits[:], bw)
|
emitLiterals(in[next_emit:], insert, lit_depth[:], lit_bits[:], storage_ix, storage)
|
||||||
} else if shouldUseUncompressedMode(in[metablock_start:], in[next_emit:], insert, literal_ratio) {
|
} else if shouldUseUncompressedMode(in[metablock_start:], in[next_emit:], insert, literal_ratio) {
|
||||||
emitUncompressedMetaBlock1(in[metablock_start:ip_end], mlen_storage_ix-3, bw)
|
emitUncompressedMetaBlock1(in[metablock_start:], in[ip_end:], mlen_storage_ix-3, storage_ix, storage)
|
||||||
} else {
|
} else {
|
||||||
emitLongInsertLen(insert, cmd_depth, cmd_bits, cmd_histo[:], bw)
|
emitLongInsertLen(insert, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
|
||||||
emitLiterals(in[next_emit:], insert, lit_depth[:], lit_bits[:], bw)
|
emitLiterals(in[next_emit:], insert, lit_depth[:], lit_bits[:], storage_ix, storage)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -615,29 +763,30 @@ next_block:
|
||||||
|
|
||||||
/* Save the bit position of the MLEN field of the meta-block header, so that
|
/* Save the bit position of the MLEN field of the meta-block header, so that
|
||||||
we can update it later if we decide to extend this meta-block. */
|
we can update it later if we decide to extend this meta-block. */
|
||||||
mlen_storage_ix = bw.getPos() + 3
|
mlen_storage_ix = *storage_ix + 3
|
||||||
|
|
||||||
storeMetaBlockHeader1(block_size, false, bw)
|
storeMetaBlockHeader1(block_size, false, storage_ix, storage)
|
||||||
|
|
||||||
/* No block splits, no contexts. */
|
/* No block splits, no contexts. */
|
||||||
bw.writeBits(13, 0)
|
writeBits(13, 0, storage_ix, storage)
|
||||||
|
|
||||||
literal_ratio = buildAndStoreLiteralPrefixCode(in[input:], block_size, lit_depth[:], lit_bits[:], bw)
|
literal_ratio = buildAndStoreLiteralPrefixCode(in[input:], block_size, lit_depth[:], lit_bits[:], storage_ix, storage)
|
||||||
buildAndStoreCommandPrefixCode1(cmd_histo[:], cmd_depth, cmd_bits, bw)
|
buildAndStoreCommandPrefixCode1(cmd_histo[:], cmd_depth, cmd_bits, storage_ix, storage)
|
||||||
goto emit_commands
|
goto emit_commands
|
||||||
}
|
}
|
||||||
|
|
||||||
if !is_last {
|
if !is_last {
|
||||||
/* If this is not the last block, update the command and distance prefix
|
/* If this is not the last block, update the command and distance prefix
|
||||||
codes for the next block and store the compressed forms. */
|
codes for the next block and store the compressed forms. */
|
||||||
var bw bitWriter
|
cmd_code[0] = 0
|
||||||
bw.dst = cmd_code
|
|
||||||
buildAndStoreCommandPrefixCode1(cmd_histo[:], cmd_depth, cmd_bits, &bw)
|
*cmd_code_numbits = 0
|
||||||
*cmd_code_numbits = bw.getPos()
|
buildAndStoreCommandPrefixCode1(cmd_histo[:], cmd_depth, cmd_bits, cmd_code_numbits, cmd_code)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Compresses "input" string to bw as one or more complete meta-blocks.
|
/* Compresses "input" string to the "*storage" buffer as one or more complete
|
||||||
|
meta-blocks, and updates the "*storage_ix" bit position.
|
||||||
|
|
||||||
If "is_last" is 1, emits an additional empty last meta-block.
|
If "is_last" is 1, emits an additional empty last meta-block.
|
||||||
|
|
||||||
|
@ -658,28 +807,28 @@ next_block:
|
||||||
REQUIRES: "table_size" is an odd (9, 11, 13, 15) power of two
|
REQUIRES: "table_size" is an odd (9, 11, 13, 15) power of two
|
||||||
OUTPUT: maximal copy distance <= |input_size|
|
OUTPUT: maximal copy distance <= |input_size|
|
||||||
OUTPUT: maximal copy distance <= BROTLI_MAX_BACKWARD_LIMIT(18) */
|
OUTPUT: maximal copy distance <= BROTLI_MAX_BACKWARD_LIMIT(18) */
|
||||||
func compressFragmentFast(input []byte, input_size uint, is_last bool, table []int, table_size uint, cmd_depth []byte, cmd_bits []uint16, cmd_code_numbits *uint, cmd_code []byte, bw *bitWriter) {
|
func compressFragmentFast(input []byte, input_size uint, is_last bool, table []int, table_size uint, cmd_depth []byte, cmd_bits []uint16, cmd_code_numbits *uint, cmd_code []byte, storage_ix *uint, storage []byte) {
|
||||||
var initial_storage_ix uint = bw.getPos()
|
var initial_storage_ix uint = *storage_ix
|
||||||
var table_bits uint = uint(log2FloorNonZero(table_size))
|
var table_bits uint = uint(log2FloorNonZero(table_size))
|
||||||
|
|
||||||
if input_size == 0 {
|
if input_size == 0 {
|
||||||
assert(is_last)
|
assert(is_last)
|
||||||
bw.writeBits(1, 1) /* islast */
|
writeBits(1, 1, storage_ix, storage) /* islast */
|
||||||
bw.writeBits(1, 1) /* isempty */
|
writeBits(1, 1, storage_ix, storage) /* isempty */
|
||||||
bw.jumpToByteBoundary()
|
*storage_ix = (*storage_ix + 7) &^ 7
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
compressFragmentFastImpl(input, input_size, is_last, table, table_bits, cmd_depth, cmd_bits, cmd_code_numbits, cmd_code, bw)
|
compressFragmentFastImpl(input, input_size, is_last, table, table_bits, cmd_depth, cmd_bits, cmd_code_numbits, cmd_code, storage_ix, storage)
|
||||||
|
|
||||||
/* If output is larger than single uncompressed block, rewrite it. */
|
/* If output is larger than single uncompressed block, rewrite it. */
|
||||||
if bw.getPos()-initial_storage_ix > 31+(input_size<<3) {
|
if *storage_ix-initial_storage_ix > 31+(input_size<<3) {
|
||||||
emitUncompressedMetaBlock1(input[:input_size], initial_storage_ix, bw)
|
emitUncompressedMetaBlock1(input, input[input_size:], initial_storage_ix, storage_ix, storage)
|
||||||
}
|
}
|
||||||
|
|
||||||
if is_last {
|
if is_last {
|
||||||
bw.writeBits(1, 1) /* islast */
|
writeBits(1, 1, storage_ix, storage) /* islast */
|
||||||
bw.writeBits(1, 1) /* isempty */
|
writeBits(1, 1, storage_ix, storage) /* isempty */
|
||||||
bw.jumpToByteBoundary()
|
*storage_ix = (*storage_ix + 7) &^ 7
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -41,7 +41,7 @@ func isMatch1(p1 []byte, p2 []byte, length uint) bool {
|
||||||
|
|
||||||
/* Builds a command and distance prefix code (each 64 symbols) into "depth" and
|
/* Builds a command and distance prefix code (each 64 symbols) into "depth" and
|
||||||
"bits" based on "histogram" and stores it into the bit stream. */
|
"bits" based on "histogram" and stores it into the bit stream. */
|
||||||
func buildAndStoreCommandPrefixCode(histogram []uint32, depth []byte, bits []uint16, bw *bitWriter) {
|
func buildAndStoreCommandPrefixCode(histogram []uint32, depth []byte, bits []uint16, storage_ix *uint, storage []byte) {
|
||||||
var tree [129]huffmanTree
|
var tree [129]huffmanTree
|
||||||
var cmd_depth = [numCommandSymbols]byte{0}
|
var cmd_depth = [numCommandSymbols]byte{0}
|
||||||
/* Tree size for building a tree over 64 symbols is 2 * 64 + 1. */
|
/* Tree size for building a tree over 64 symbols is 2 * 64 + 1. */
|
||||||
|
@ -87,10 +87,10 @@ func buildAndStoreCommandPrefixCode(histogram []uint32, depth []byte, bits []uin
|
||||||
cmd_depth[448+8*i] = depth[16+i]
|
cmd_depth[448+8*i] = depth[16+i]
|
||||||
}
|
}
|
||||||
|
|
||||||
storeHuffmanTree(cmd_depth[:], numCommandSymbols, tree[:], bw)
|
storeHuffmanTree(cmd_depth[:], numCommandSymbols, tree[:], storage_ix, storage)
|
||||||
}
|
}
|
||||||
|
|
||||||
storeHuffmanTree(depth[64:], 64, tree[:], bw)
|
storeHuffmanTree(depth[64:], 64, tree[:], storage_ix, storage)
|
||||||
}
|
}
|
||||||
|
|
||||||
func emitInsertLen(insertlen uint32, commands *[]uint32) {
|
func emitInsertLen(insertlen uint32, commands *[]uint32) {
|
||||||
|
@ -197,11 +197,11 @@ func emitDistance(distance uint32, commands *[]uint32) {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* REQUIRES: len <= 1 << 24. */
|
/* REQUIRES: len <= 1 << 24. */
|
||||||
func storeMetaBlockHeader(len uint, is_uncompressed bool, bw *bitWriter) {
|
func storeMetaBlockHeader(len uint, is_uncompressed bool, storage_ix *uint, storage []byte) {
|
||||||
var nibbles uint = 6
|
var nibbles uint = 6
|
||||||
|
|
||||||
/* ISLAST */
|
/* ISLAST */
|
||||||
bw.writeBits(1, 0)
|
writeBits(1, 0, storage_ix, storage)
|
||||||
|
|
||||||
if len <= 1<<16 {
|
if len <= 1<<16 {
|
||||||
nibbles = 4
|
nibbles = 4
|
||||||
|
@ -209,11 +209,11 @@ func storeMetaBlockHeader(len uint, is_uncompressed bool, bw *bitWriter) {
|
||||||
nibbles = 5
|
nibbles = 5
|
||||||
}
|
}
|
||||||
|
|
||||||
bw.writeBits(2, uint64(nibbles)-4)
|
writeBits(2, uint64(nibbles)-4, storage_ix, storage)
|
||||||
bw.writeBits(nibbles*4, uint64(len)-1)
|
writeBits(nibbles*4, uint64(len)-1, storage_ix, storage)
|
||||||
|
|
||||||
/* ISUNCOMPRESSED */
|
/* ISUNCOMPRESSED */
|
||||||
bw.writeSingleBit(is_uncompressed)
|
writeSingleBit(is_uncompressed, storage_ix, storage)
|
||||||
}
|
}
|
||||||
|
|
||||||
func createCommands(input []byte, block_size uint, input_size uint, base_ip_ptr []byte, table []int, table_bits uint, min_match uint, literals *[]byte, commands *[]uint32) {
|
func createCommands(input []byte, block_size uint, input_size uint, base_ip_ptr []byte, table []int, table_bits uint, min_match uint, literals *[]byte, commands *[]uint32) {
|
||||||
|
@ -440,20 +440,163 @@ emit_remainder:
|
||||||
}
|
}
|
||||||
|
|
||||||
var storeCommands_kNumExtraBits = [128]uint32{
|
var storeCommands_kNumExtraBits = [128]uint32{
|
||||||
0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 8, 9, 10, 12, 14, 24,
|
0,
|
||||||
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4,
|
0,
|
||||||
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 8, 9, 10, 24,
|
0,
|
||||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
0,
|
||||||
1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
|
0,
|
||||||
9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16,
|
0,
|
||||||
17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24,
|
1,
|
||||||
|
1,
|
||||||
|
2,
|
||||||
|
2,
|
||||||
|
3,
|
||||||
|
3,
|
||||||
|
4,
|
||||||
|
4,
|
||||||
|
5,
|
||||||
|
5,
|
||||||
|
6,
|
||||||
|
7,
|
||||||
|
8,
|
||||||
|
9,
|
||||||
|
10,
|
||||||
|
12,
|
||||||
|
14,
|
||||||
|
24,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
2,
|
||||||
|
2,
|
||||||
|
3,
|
||||||
|
3,
|
||||||
|
4,
|
||||||
|
4,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
2,
|
||||||
|
2,
|
||||||
|
3,
|
||||||
|
3,
|
||||||
|
4,
|
||||||
|
4,
|
||||||
|
5,
|
||||||
|
5,
|
||||||
|
6,
|
||||||
|
7,
|
||||||
|
8,
|
||||||
|
9,
|
||||||
|
10,
|
||||||
|
24,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
2,
|
||||||
|
2,
|
||||||
|
3,
|
||||||
|
3,
|
||||||
|
4,
|
||||||
|
4,
|
||||||
|
5,
|
||||||
|
5,
|
||||||
|
6,
|
||||||
|
6,
|
||||||
|
7,
|
||||||
|
7,
|
||||||
|
8,
|
||||||
|
8,
|
||||||
|
9,
|
||||||
|
9,
|
||||||
|
10,
|
||||||
|
10,
|
||||||
|
11,
|
||||||
|
11,
|
||||||
|
12,
|
||||||
|
12,
|
||||||
|
13,
|
||||||
|
13,
|
||||||
|
14,
|
||||||
|
14,
|
||||||
|
15,
|
||||||
|
15,
|
||||||
|
16,
|
||||||
|
16,
|
||||||
|
17,
|
||||||
|
17,
|
||||||
|
18,
|
||||||
|
18,
|
||||||
|
19,
|
||||||
|
19,
|
||||||
|
20,
|
||||||
|
20,
|
||||||
|
21,
|
||||||
|
21,
|
||||||
|
22,
|
||||||
|
22,
|
||||||
|
23,
|
||||||
|
23,
|
||||||
|
24,
|
||||||
|
24,
|
||||||
}
|
}
|
||||||
var storeCommands_kInsertOffset = [24]uint32{
|
var storeCommands_kInsertOffset = [24]uint32{
|
||||||
0, 1, 2, 3, 4, 5, 6, 8, 10, 14, 18, 26, 34, 50, 66, 98, 130, 194, 322, 578,
|
0,
|
||||||
1090, 2114, 6210, 22594,
|
1,
|
||||||
|
2,
|
||||||
|
3,
|
||||||
|
4,
|
||||||
|
5,
|
||||||
|
6,
|
||||||
|
8,
|
||||||
|
10,
|
||||||
|
14,
|
||||||
|
18,
|
||||||
|
26,
|
||||||
|
34,
|
||||||
|
50,
|
||||||
|
66,
|
||||||
|
98,
|
||||||
|
130,
|
||||||
|
194,
|
||||||
|
322,
|
||||||
|
578,
|
||||||
|
1090,
|
||||||
|
2114,
|
||||||
|
6210,
|
||||||
|
22594,
|
||||||
}
|
}
|
||||||
|
|
||||||
func storeCommands(literals []byte, num_literals uint, commands []uint32, num_commands uint, bw *bitWriter) {
|
func storeCommands(literals []byte, num_literals uint, commands []uint32, num_commands uint, storage_ix *uint, storage []byte) {
|
||||||
var lit_depths [256]byte
|
var lit_depths [256]byte
|
||||||
var lit_bits [256]uint16
|
var lit_bits [256]uint16
|
||||||
var lit_histo = [256]uint32{0}
|
var lit_histo = [256]uint32{0}
|
||||||
|
@ -466,7 +609,7 @@ func storeCommands(literals []byte, num_literals uint, commands []uint32, num_co
|
||||||
}
|
}
|
||||||
|
|
||||||
buildAndStoreHuffmanTreeFast(lit_histo[:], num_literals, /* max_bits = */
|
buildAndStoreHuffmanTreeFast(lit_histo[:], num_literals, /* max_bits = */
|
||||||
8, lit_depths[:], lit_bits[:], bw)
|
8, lit_depths[:], lit_bits[:], storage_ix, storage)
|
||||||
|
|
||||||
for i = 0; i < num_commands; i++ {
|
for i = 0; i < num_commands; i++ {
|
||||||
var code uint32 = commands[i] & 0xFF
|
var code uint32 = commands[i] & 0xFF
|
||||||
|
@ -478,21 +621,21 @@ func storeCommands(literals []byte, num_literals uint, commands []uint32, num_co
|
||||||
cmd_histo[2] += 1
|
cmd_histo[2] += 1
|
||||||
cmd_histo[64] += 1
|
cmd_histo[64] += 1
|
||||||
cmd_histo[84] += 1
|
cmd_histo[84] += 1
|
||||||
buildAndStoreCommandPrefixCode(cmd_histo[:], cmd_depths[:], cmd_bits[:], bw)
|
buildAndStoreCommandPrefixCode(cmd_histo[:], cmd_depths[:], cmd_bits[:], storage_ix, storage)
|
||||||
|
|
||||||
for i = 0; i < num_commands; i++ {
|
for i = 0; i < num_commands; i++ {
|
||||||
var cmd uint32 = commands[i]
|
var cmd uint32 = commands[i]
|
||||||
var code uint32 = cmd & 0xFF
|
var code uint32 = cmd & 0xFF
|
||||||
var extra uint32 = cmd >> 8
|
var extra uint32 = cmd >> 8
|
||||||
assert(code < 128)
|
assert(code < 128)
|
||||||
bw.writeBits(uint(cmd_depths[code]), uint64(cmd_bits[code]))
|
writeBits(uint(cmd_depths[code]), uint64(cmd_bits[code]), storage_ix, storage)
|
||||||
bw.writeBits(uint(storeCommands_kNumExtraBits[code]), uint64(extra))
|
writeBits(uint(storeCommands_kNumExtraBits[code]), uint64(extra), storage_ix, storage)
|
||||||
if code < 24 {
|
if code < 24 {
|
||||||
var insert uint32 = storeCommands_kInsertOffset[code] + extra
|
var insert uint32 = storeCommands_kInsertOffset[code] + extra
|
||||||
var j uint32
|
var j uint32
|
||||||
for j = 0; j < insert; j++ {
|
for j = 0; j < insert; j++ {
|
||||||
var lit byte = literals[0]
|
var lit byte = literals[0]
|
||||||
bw.writeBits(uint(lit_depths[lit]), uint64(lit_bits[lit]))
|
writeBits(uint(lit_depths[lit]), uint64(lit_bits[lit]), storage_ix, storage)
|
||||||
literals = literals[1:]
|
literals = literals[1:]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -520,13 +663,22 @@ func shouldCompress(input []byte, input_size uint, num_literals uint) bool {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func emitUncompressedMetaBlock(input []byte, input_size uint, bw *bitWriter) {
|
func rewindBitPosition(new_storage_ix uint, storage_ix *uint, storage []byte) {
|
||||||
storeMetaBlockHeader(input_size, true, bw)
|
var bitpos uint = new_storage_ix & 7
|
||||||
bw.jumpToByteBoundary()
|
var mask uint = (1 << bitpos) - 1
|
||||||
bw.writeBytes(input[:input_size])
|
storage[new_storage_ix>>3] &= byte(mask)
|
||||||
|
*storage_ix = new_storage_ix
|
||||||
}
|
}
|
||||||
|
|
||||||
func compressFragmentTwoPassImpl(input []byte, input_size uint, is_last bool, command_buf []uint32, literal_buf []byte, table []int, table_bits uint, min_match uint, bw *bitWriter) {
|
func emitUncompressedMetaBlock(input []byte, input_size uint, storage_ix *uint, storage []byte) {
|
||||||
|
storeMetaBlockHeader(input_size, true, storage_ix, storage)
|
||||||
|
*storage_ix = (*storage_ix + 7) &^ 7
|
||||||
|
copy(storage[*storage_ix>>3:], input[:input_size])
|
||||||
|
*storage_ix += input_size << 3
|
||||||
|
storage[*storage_ix>>3] = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func compressFragmentTwoPassImpl(input []byte, input_size uint, is_last bool, command_buf []uint32, literal_buf []byte, table []int, table_bits uint, min_match uint, storage_ix *uint, storage []byte) {
|
||||||
/* Save the start of the first block for position and distance computations.
|
/* Save the start of the first block for position and distance computations.
|
||||||
*/
|
*/
|
||||||
var base_ip []byte = input
|
var base_ip []byte = input
|
||||||
|
@ -540,17 +692,17 @@ func compressFragmentTwoPassImpl(input []byte, input_size uint, is_last bool, co
|
||||||
num_literals = uint(-cap(literals) + cap(literal_buf))
|
num_literals = uint(-cap(literals) + cap(literal_buf))
|
||||||
if shouldCompress(input, block_size, num_literals) {
|
if shouldCompress(input, block_size, num_literals) {
|
||||||
var num_commands uint = uint(-cap(commands) + cap(command_buf))
|
var num_commands uint = uint(-cap(commands) + cap(command_buf))
|
||||||
storeMetaBlockHeader(block_size, false, bw)
|
storeMetaBlockHeader(block_size, false, storage_ix, storage)
|
||||||
|
|
||||||
/* No block splits, no contexts. */
|
/* No block splits, no contexts. */
|
||||||
bw.writeBits(13, 0)
|
writeBits(13, 0, storage_ix, storage)
|
||||||
|
|
||||||
storeCommands(literal_buf, num_literals, command_buf, num_commands, bw)
|
storeCommands(literal_buf, num_literals, command_buf, num_commands, storage_ix, storage)
|
||||||
} else {
|
} else {
|
||||||
/* Since we did not find many backward references and the entropy of
|
/* Since we did not find many backward references and the entropy of
|
||||||
the data is close to 8 bits, we can simply emit an uncompressed block.
|
the data is close to 8 bits, we can simply emit an uncompressed block.
|
||||||
This makes compression speed of uncompressible data about 3x faster. */
|
This makes compression speed of uncompressible data about 3x faster. */
|
||||||
emitUncompressedMetaBlock(input, block_size, bw)
|
emitUncompressedMetaBlock(input, block_size, storage_ix, storage)
|
||||||
}
|
}
|
||||||
|
|
||||||
input = input[block_size:]
|
input = input[block_size:]
|
||||||
|
@ -558,7 +710,8 @@ func compressFragmentTwoPassImpl(input []byte, input_size uint, is_last bool, co
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Compresses "input" string to bw as one or more complete meta-blocks.
|
/* Compresses "input" string to the "*storage" buffer as one or more complete
|
||||||
|
meta-blocks, and updates the "*storage_ix" bit position.
|
||||||
|
|
||||||
If "is_last" is 1, emits an additional empty last meta-block.
|
If "is_last" is 1, emits an additional empty last meta-block.
|
||||||
|
|
||||||
|
@ -570,8 +723,8 @@ func compressFragmentTwoPassImpl(input []byte, input_size uint, is_last bool, co
|
||||||
REQUIRES: "table_size" is a power of two
|
REQUIRES: "table_size" is a power of two
|
||||||
OUTPUT: maximal copy distance <= |input_size|
|
OUTPUT: maximal copy distance <= |input_size|
|
||||||
OUTPUT: maximal copy distance <= BROTLI_MAX_BACKWARD_LIMIT(18) */
|
OUTPUT: maximal copy distance <= BROTLI_MAX_BACKWARD_LIMIT(18) */
|
||||||
func compressFragmentTwoPass(input []byte, input_size uint, is_last bool, command_buf []uint32, literal_buf []byte, table []int, table_size uint, bw *bitWriter) {
|
func compressFragmentTwoPass(input []byte, input_size uint, is_last bool, command_buf []uint32, literal_buf []byte, table []int, table_size uint, storage_ix *uint, storage []byte) {
|
||||||
var initial_storage_ix uint = bw.getPos()
|
var initial_storage_ix uint = *storage_ix
|
||||||
var table_bits uint = uint(log2FloorNonZero(table_size))
|
var table_bits uint = uint(log2FloorNonZero(table_size))
|
||||||
var min_match uint
|
var min_match uint
|
||||||
if table_bits <= 15 {
|
if table_bits <= 15 {
|
||||||
|
@ -579,17 +732,17 @@ func compressFragmentTwoPass(input []byte, input_size uint, is_last bool, comman
|
||||||
} else {
|
} else {
|
||||||
min_match = 6
|
min_match = 6
|
||||||
}
|
}
|
||||||
compressFragmentTwoPassImpl(input, input_size, is_last, command_buf, literal_buf, table, table_bits, min_match, bw)
|
compressFragmentTwoPassImpl(input, input_size, is_last, command_buf, literal_buf, table, table_bits, min_match, storage_ix, storage)
|
||||||
|
|
||||||
/* If output is larger than single uncompressed block, rewrite it. */
|
/* If output is larger than single uncompressed block, rewrite it. */
|
||||||
if bw.getPos()-initial_storage_ix > 31+(input_size<<3) {
|
if *storage_ix-initial_storage_ix > 31+(input_size<<3) {
|
||||||
bw.rewind(initial_storage_ix)
|
rewindBitPosition(initial_storage_ix, storage_ix, storage)
|
||||||
emitUncompressedMetaBlock(input, input_size, bw)
|
emitUncompressedMetaBlock(input, input_size, storage_ix, storage)
|
||||||
}
|
}
|
||||||
|
|
||||||
if is_last {
|
if is_last {
|
||||||
bw.writeBits(1, 1) /* islast */
|
writeBits(1, 1, storage_ix, storage) /* islast */
|
||||||
bw.writeBits(1, 1) /* isempty */
|
writeBits(1, 1, storage_ix, storage) /* isempty */
|
||||||
bw.jumpToByteBoundary()
|
*storage_ix = (*storage_ix + 7) &^ 7
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
171
encode.go
171
encode.go
|
@ -87,9 +87,11 @@ type Writer struct {
|
||||||
last_processed_pos_ uint64
|
last_processed_pos_ uint64
|
||||||
dist_cache_ [numDistanceShortCodes]int
|
dist_cache_ [numDistanceShortCodes]int
|
||||||
saved_dist_cache_ [4]int
|
saved_dist_cache_ [4]int
|
||||||
|
last_bytes_ uint16
|
||||||
|
last_bytes_bits_ byte
|
||||||
prev_byte_ byte
|
prev_byte_ byte
|
||||||
prev_byte2_ byte
|
prev_byte2_ byte
|
||||||
bw bitWriter
|
storage []byte
|
||||||
small_table_ [1 << 10]int
|
small_table_ [1 << 10]int
|
||||||
large_table_ []int
|
large_table_ []int
|
||||||
large_table_size_ uint
|
large_table_size_ uint
|
||||||
|
@ -139,6 +141,14 @@ func wrapPosition(position uint64) uint32 {
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Writer) getStorage(size int) []byte {
|
||||||
|
if len(s.storage) < size {
|
||||||
|
s.storage = make([]byte, size)
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.storage
|
||||||
|
}
|
||||||
|
|
||||||
func hashTableSize(max_table_size uint, input_size uint) uint {
|
func hashTableSize(max_table_size uint, input_size uint) uint {
|
||||||
var htsize uint = 256
|
var htsize uint = 256
|
||||||
for htsize < max_table_size && htsize < input_size {
|
for htsize < max_table_size && htsize < input_size {
|
||||||
|
@ -184,18 +194,23 @@ func getHashTable(s *Writer, quality int, input_size uint, table_size *uint) []i
|
||||||
return table
|
return table
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodeWindowBits(lgwin int, large_window bool, bw *bitWriter) {
|
func encodeWindowBits(lgwin int, large_window bool, last_bytes *uint16, last_bytes_bits *byte) {
|
||||||
if large_window {
|
if large_window {
|
||||||
bw.writeBits(14, uint64((lgwin&0x3F)<<8|0x11))
|
*last_bytes = uint16((lgwin&0x3F)<<8 | 0x11)
|
||||||
|
*last_bytes_bits = 14
|
||||||
} else {
|
} else {
|
||||||
if lgwin == 16 {
|
if lgwin == 16 {
|
||||||
bw.writeBits(1, 0)
|
*last_bytes = 0
|
||||||
|
*last_bytes_bits = 1
|
||||||
} else if lgwin == 17 {
|
} else if lgwin == 17 {
|
||||||
bw.writeBits(7, 1)
|
*last_bytes = 1
|
||||||
|
*last_bytes_bits = 7
|
||||||
} else if lgwin > 17 {
|
} else if lgwin > 17 {
|
||||||
bw.writeBits(4, uint64((lgwin-17)<<1|0x01))
|
*last_bytes = uint16((lgwin-17)<<1 | 0x01)
|
||||||
|
*last_bytes_bits = 4
|
||||||
} else {
|
} else {
|
||||||
bw.writeBits(7, uint64((lgwin-8)<<4|0x01))
|
*last_bytes = uint16((lgwin-8)<<4 | 0x01)
|
||||||
|
*last_bytes_bits = 7
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -417,15 +432,18 @@ func chooseContextMode(params *encoderParams, data []byte, pos uint, mask uint,
|
||||||
return contextUTF8
|
return contextUTF8
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeMetaBlockInternal(data []byte, mask uint, last_flush_pos uint64, bytes uint, is_last bool, literal_context_mode int, params *encoderParams, prev_byte byte, prev_byte2 byte, num_literals uint, commands []command, saved_dist_cache []int, dist_cache []int, bw *bitWriter) {
|
func writeMetaBlockInternal(data []byte, mask uint, last_flush_pos uint64, bytes uint, is_last bool, literal_context_mode int, params *encoderParams, prev_byte byte, prev_byte2 byte, num_literals uint, commands []command, saved_dist_cache []int, dist_cache []int, storage_ix *uint, storage []byte) {
|
||||||
var wrapped_last_flush_pos uint32 = wrapPosition(last_flush_pos)
|
var wrapped_last_flush_pos uint32 = wrapPosition(last_flush_pos)
|
||||||
|
var last_bytes uint16
|
||||||
|
var last_bytes_bits byte
|
||||||
var literal_context_lut contextLUT = getContextLUT(literal_context_mode)
|
var literal_context_lut contextLUT = getContextLUT(literal_context_mode)
|
||||||
var block_params encoderParams = *params
|
var block_params encoderParams = *params
|
||||||
|
|
||||||
if bytes == 0 {
|
if bytes == 0 {
|
||||||
/* Write the ISLAST and ISEMPTY bits. */
|
/* Write the ISLAST and ISEMPTY bits. */
|
||||||
bw.writeBits(2, 3)
|
writeBits(2, 3, storage_ix, storage)
|
||||||
bw.jumpToByteBoundary()
|
|
||||||
|
*storage_ix = (*storage_ix + 7) &^ 7
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -434,15 +452,17 @@ func writeMetaBlockInternal(data []byte, mask uint, last_flush_pos uint64, bytes
|
||||||
CreateBackwardReferences is now unused. */
|
CreateBackwardReferences is now unused. */
|
||||||
copy(dist_cache, saved_dist_cache[:4])
|
copy(dist_cache, saved_dist_cache[:4])
|
||||||
|
|
||||||
storeUncompressedMetaBlock(is_last, data, uint(wrapped_last_flush_pos), mask, bytes, bw)
|
storeUncompressedMetaBlock(is_last, data, uint(wrapped_last_flush_pos), mask, bytes, storage_ix, storage)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
savedPos := bw.getPos()
|
assert(*storage_ix <= 14)
|
||||||
|
last_bytes = uint16(storage[1])<<8 | uint16(storage[0])
|
||||||
|
last_bytes_bits = byte(*storage_ix)
|
||||||
if params.quality <= maxQualityForStaticEntropyCodes {
|
if params.quality <= maxQualityForStaticEntropyCodes {
|
||||||
storeMetaBlockFast(data, uint(wrapped_last_flush_pos), bytes, mask, is_last, params, commands, bw)
|
storeMetaBlockFast(data, uint(wrapped_last_flush_pos), bytes, mask, is_last, params, commands, storage_ix, storage)
|
||||||
} else if params.quality < minQualityForBlockSplit {
|
} else if params.quality < minQualityForBlockSplit {
|
||||||
storeMetaBlockTrivial(data, uint(wrapped_last_flush_pos), bytes, mask, is_last, params, commands, bw)
|
storeMetaBlockTrivial(data, uint(wrapped_last_flush_pos), bytes, mask, is_last, params, commands, storage_ix, storage)
|
||||||
} else {
|
} else {
|
||||||
mb := getMetaBlockSplit()
|
mb := getMetaBlockSplit()
|
||||||
if params.quality < minQualityForHqBlockSplitting {
|
if params.quality < minQualityForHqBlockSplitting {
|
||||||
|
@ -469,15 +489,18 @@ func writeMetaBlockInternal(data []byte, mask uint, last_flush_pos uint64, bytes
|
||||||
optimizeHistograms(num_effective_dist_codes, mb)
|
optimizeHistograms(num_effective_dist_codes, mb)
|
||||||
}
|
}
|
||||||
|
|
||||||
storeMetaBlock(data, uint(wrapped_last_flush_pos), bytes, mask, prev_byte, prev_byte2, is_last, &block_params, literal_context_mode, commands, mb, bw)
|
storeMetaBlock(data, uint(wrapped_last_flush_pos), bytes, mask, prev_byte, prev_byte2, is_last, &block_params, literal_context_mode, commands, mb, storage_ix, storage)
|
||||||
freeMetaBlockSplit(mb)
|
freeMetaBlockSplit(mb)
|
||||||
}
|
}
|
||||||
|
|
||||||
if bytes+4 < bw.getPos()>>3 {
|
if bytes+4 < *storage_ix>>3 {
|
||||||
/* Restore the distance cache and last byte. */
|
/* Restore the distance cache and last byte. */
|
||||||
copy(dist_cache, saved_dist_cache[:4])
|
copy(dist_cache, saved_dist_cache[:4])
|
||||||
bw.rewind(savedPos)
|
|
||||||
storeUncompressedMetaBlock(is_last, data, uint(wrapped_last_flush_pos), mask, bytes, bw)
|
storage[0] = byte(last_bytes)
|
||||||
|
storage[1] = byte(last_bytes >> 8)
|
||||||
|
*storage_ix = uint(last_bytes_bits)
|
||||||
|
storeUncompressedMetaBlock(is_last, data, uint(wrapped_last_flush_pos), mask, bytes, storage_ix, storage)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -510,10 +533,8 @@ func ensureInitialized(s *Writer) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
s.bw.bits = 0
|
s.last_bytes_bits_ = 0
|
||||||
s.bw.nbits = 0
|
s.last_bytes_ = 0
|
||||||
s.bw.dst = s.bw.dst[:0]
|
|
||||||
|
|
||||||
s.remaining_metadata_bytes_ = math.MaxUint32
|
s.remaining_metadata_bytes_ = math.MaxUint32
|
||||||
|
|
||||||
sanitizeParams(&s.params)
|
sanitizeParams(&s.params)
|
||||||
|
@ -529,7 +550,7 @@ func ensureInitialized(s *Writer) bool {
|
||||||
lgwin = brotli_max_int(lgwin, 18)
|
lgwin = brotli_max_int(lgwin, 18)
|
||||||
}
|
}
|
||||||
|
|
||||||
encodeWindowBits(lgwin, s.params.large_window, &s.bw)
|
encodeWindowBits(lgwin, s.params.large_window, &s.last_bytes_, &s.last_bytes_bits_)
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.params.quality == fastOnePassCompressionQuality {
|
if s.params.quality == fastOnePassCompressionQuality {
|
||||||
|
@ -761,6 +782,8 @@ func encodeData(s *Writer, is_last bool, force_flush bool) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.params.quality == fastOnePassCompressionQuality || s.params.quality == fastTwoPassCompressionQuality {
|
if s.params.quality == fastOnePassCompressionQuality || s.params.quality == fastTwoPassCompressionQuality {
|
||||||
|
var storage []byte
|
||||||
|
var storage_ix uint = uint(s.last_bytes_bits_)
|
||||||
var table_size uint
|
var table_size uint
|
||||||
var table []int
|
var table []int
|
||||||
|
|
||||||
|
@ -770,16 +793,20 @@ func encodeData(s *Writer, is_last bool, force_flush bool) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
storage = s.getStorage(int(2*bytes + 503))
|
||||||
|
storage[0] = byte(s.last_bytes_)
|
||||||
|
storage[1] = byte(s.last_bytes_ >> 8)
|
||||||
table = getHashTable(s, s.params.quality, uint(bytes), &table_size)
|
table = getHashTable(s, s.params.quality, uint(bytes), &table_size)
|
||||||
if s.params.quality == fastOnePassCompressionQuality {
|
if s.params.quality == fastOnePassCompressionQuality {
|
||||||
compressFragmentFast(data[wrapped_last_processed_pos&mask:], uint(bytes), is_last, table, table_size, s.cmd_depths_[:], s.cmd_bits_[:], &s.cmd_code_numbits_, s.cmd_code_[:], &s.bw)
|
compressFragmentFast(data[wrapped_last_processed_pos&mask:], uint(bytes), is_last, table, table_size, s.cmd_depths_[:], s.cmd_bits_[:], &s.cmd_code_numbits_, s.cmd_code_[:], &storage_ix, storage)
|
||||||
} else {
|
} else {
|
||||||
compressFragmentTwoPass(data[wrapped_last_processed_pos&mask:], uint(bytes), is_last, s.command_buf_, s.literal_buf_, table, table_size, &s.bw)
|
compressFragmentTwoPass(data[wrapped_last_processed_pos&mask:], uint(bytes), is_last, s.command_buf_, s.literal_buf_, table, table_size, &storage_ix, storage)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
s.last_bytes_ = uint16(storage[storage_ix>>3])
|
||||||
|
s.last_bytes_bits_ = byte(storage_ix & 7)
|
||||||
updateLastProcessedPos(s)
|
updateLastProcessedPos(s)
|
||||||
s.writeOutput(s.bw.dst)
|
s.writeOutput(storage[:storage_ix>>3])
|
||||||
s.bw.dst = s.bw.dst[:0]
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
|
@ -856,7 +883,13 @@ func encodeData(s *Writer, is_last bool, force_flush bool) bool {
|
||||||
assert(s.input_pos_-s.last_flush_pos_ <= 1<<24)
|
assert(s.input_pos_-s.last_flush_pos_ <= 1<<24)
|
||||||
{
|
{
|
||||||
var metablock_size uint32 = uint32(s.input_pos_ - s.last_flush_pos_)
|
var metablock_size uint32 = uint32(s.input_pos_ - s.last_flush_pos_)
|
||||||
writeMetaBlockInternal(data, uint(mask), s.last_flush_pos_, uint(metablock_size), is_last, literal_context_mode, &s.params, s.prev_byte_, s.prev_byte2_, s.num_literals_, s.commands, s.saved_dist_cache_[:], s.dist_cache_[:], &s.bw)
|
var storage []byte = s.getStorage(int(2*metablock_size + 503))
|
||||||
|
var storage_ix uint = uint(s.last_bytes_bits_)
|
||||||
|
storage[0] = byte(s.last_bytes_)
|
||||||
|
storage[1] = byte(s.last_bytes_ >> 8)
|
||||||
|
writeMetaBlockInternal(data, uint(mask), s.last_flush_pos_, uint(metablock_size), is_last, literal_context_mode, &s.params, s.prev_byte_, s.prev_byte2_, s.num_literals_, s.commands, s.saved_dist_cache_[:], s.dist_cache_[:], &storage_ix, storage)
|
||||||
|
s.last_bytes_ = uint16(storage[storage_ix>>3])
|
||||||
|
s.last_bytes_bits_ = byte(storage_ix & 7)
|
||||||
s.last_flush_pos_ = s.input_pos_
|
s.last_flush_pos_ = s.input_pos_
|
||||||
if updateLastProcessedPos(s) {
|
if updateLastProcessedPos(s) {
|
||||||
hasherReset(s.hasher_)
|
hasherReset(s.hasher_)
|
||||||
|
@ -877,22 +910,28 @@ func encodeData(s *Writer, is_last bool, force_flush bool) bool {
|
||||||
emitting an uncompressed block. */
|
emitting an uncompressed block. */
|
||||||
copy(s.saved_dist_cache_[:], s.dist_cache_[:])
|
copy(s.saved_dist_cache_[:], s.dist_cache_[:])
|
||||||
|
|
||||||
s.writeOutput(s.bw.dst)
|
s.writeOutput(storage[:storage_ix>>3])
|
||||||
s.bw.dst = s.bw.dst[:0]
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Dumps remaining output bits and metadata header to s.bw.
|
/* Dumps remaining output bits and metadata header to |header|.
|
||||||
|
Returns number of produced bytes.
|
||||||
|
REQUIRED: |header| should be 8-byte aligned and at least 16 bytes long.
|
||||||
REQUIRED: |block_size| <= (1 << 24). */
|
REQUIRED: |block_size| <= (1 << 24). */
|
||||||
func writeMetadataHeader(s *Writer, block_size uint) {
|
func writeMetadataHeader(s *Writer, block_size uint, header []byte) uint {
|
||||||
bw := &s.bw
|
var storage_ix uint
|
||||||
|
storage_ix = uint(s.last_bytes_bits_)
|
||||||
|
header[0] = byte(s.last_bytes_)
|
||||||
|
header[1] = byte(s.last_bytes_ >> 8)
|
||||||
|
s.last_bytes_ = 0
|
||||||
|
s.last_bytes_bits_ = 0
|
||||||
|
|
||||||
bw.writeBits(1, 0)
|
writeBits(1, 0, &storage_ix, header)
|
||||||
bw.writeBits(2, 3)
|
writeBits(2, 3, &storage_ix, header)
|
||||||
bw.writeBits(1, 0)
|
writeBits(1, 0, &storage_ix, header)
|
||||||
if block_size == 0 {
|
if block_size == 0 {
|
||||||
bw.writeBits(2, 0)
|
writeBits(2, 0, &storage_ix, header)
|
||||||
} else {
|
} else {
|
||||||
var nbits uint32
|
var nbits uint32
|
||||||
if block_size == 1 {
|
if block_size == 1 {
|
||||||
|
@ -901,19 +940,34 @@ func writeMetadataHeader(s *Writer, block_size uint) {
|
||||||
nbits = log2FloorNonZero(uint(uint32(block_size)-1)) + 1
|
nbits = log2FloorNonZero(uint(uint32(block_size)-1)) + 1
|
||||||
}
|
}
|
||||||
var nbytes uint32 = (nbits + 7) / 8
|
var nbytes uint32 = (nbits + 7) / 8
|
||||||
bw.writeBits(2, uint64(nbytes))
|
writeBits(2, uint64(nbytes), &storage_ix, header)
|
||||||
bw.writeBits(uint(8*nbytes), uint64(block_size)-1)
|
writeBits(uint(8*nbytes), uint64(block_size)-1, &storage_ix, header)
|
||||||
}
|
}
|
||||||
|
|
||||||
bw.jumpToByteBoundary()
|
return (storage_ix + 7) >> 3
|
||||||
}
|
}
|
||||||
|
|
||||||
func injectBytePaddingBlock(s *Writer) {
|
func injectBytePaddingBlock(s *Writer) {
|
||||||
|
var seal uint32 = uint32(s.last_bytes_)
|
||||||
|
var seal_bits uint = uint(s.last_bytes_bits_)
|
||||||
|
s.last_bytes_ = 0
|
||||||
|
s.last_bytes_bits_ = 0
|
||||||
|
|
||||||
/* is_last = 0, data_nibbles = 11, reserved = 0, meta_nibbles = 00 */
|
/* is_last = 0, data_nibbles = 11, reserved = 0, meta_nibbles = 00 */
|
||||||
s.bw.writeBits(6, 0x6)
|
seal |= 0x6 << seal_bits
|
||||||
s.bw.jumpToByteBoundary()
|
|
||||||
s.writeOutput(s.bw.dst)
|
seal_bits += 6
|
||||||
s.bw.dst = s.bw.dst[:0]
|
|
||||||
|
destination := s.tiny_buf_.u8[:]
|
||||||
|
|
||||||
|
destination[0] = byte(seal)
|
||||||
|
if seal_bits > 8 {
|
||||||
|
destination[1] = byte(seal >> 8)
|
||||||
|
}
|
||||||
|
if seal_bits > 16 {
|
||||||
|
destination[2] = byte(seal >> 16)
|
||||||
|
}
|
||||||
|
s.writeOutput(destination[:(seal_bits+7)>>3])
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkFlushComplete(s *Writer) {
|
func checkFlushComplete(s *Writer) {
|
||||||
|
@ -945,7 +999,7 @@ func encoderCompressStreamFast(s *Writer, op int, available_in *uint, next_in *[
|
||||||
}
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
if s.stream_state_ == streamFlushRequested && s.bw.nbits&7 != 0 {
|
if s.stream_state_ == streamFlushRequested && s.last_bytes_bits_ != 0 {
|
||||||
injectBytePaddingBlock(s)
|
injectBytePaddingBlock(s)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -957,6 +1011,9 @@ func encoderCompressStreamFast(s *Writer, op int, available_in *uint, next_in *[
|
||||||
var block_size uint = brotli_min_size_t(block_size_limit, *available_in)
|
var block_size uint = brotli_min_size_t(block_size_limit, *available_in)
|
||||||
var is_last bool = (*available_in == block_size) && (op == int(operationFinish))
|
var is_last bool = (*available_in == block_size) && (op == int(operationFinish))
|
||||||
var force_flush bool = (*available_in == block_size) && (op == int(operationFlush))
|
var force_flush bool = (*available_in == block_size) && (op == int(operationFlush))
|
||||||
|
var max_out_size uint = 2*block_size + 503
|
||||||
|
var storage []byte = nil
|
||||||
|
var storage_ix uint = uint(s.last_bytes_bits_)
|
||||||
var table_size uint
|
var table_size uint
|
||||||
var table []int
|
var table []int
|
||||||
|
|
||||||
|
@ -965,18 +1022,25 @@ func encoderCompressStreamFast(s *Writer, op int, available_in *uint, next_in *[
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
storage = s.getStorage(int(max_out_size))
|
||||||
|
|
||||||
|
storage[0] = byte(s.last_bytes_)
|
||||||
|
storage[1] = byte(s.last_bytes_ >> 8)
|
||||||
table = getHashTable(s, s.params.quality, block_size, &table_size)
|
table = getHashTable(s, s.params.quality, block_size, &table_size)
|
||||||
|
|
||||||
if s.params.quality == fastOnePassCompressionQuality {
|
if s.params.quality == fastOnePassCompressionQuality {
|
||||||
compressFragmentFast(*next_in, block_size, is_last, table, table_size, s.cmd_depths_[:], s.cmd_bits_[:], &s.cmd_code_numbits_, s.cmd_code_[:], &s.bw)
|
compressFragmentFast(*next_in, block_size, is_last, table, table_size, s.cmd_depths_[:], s.cmd_bits_[:], &s.cmd_code_numbits_, s.cmd_code_[:], &storage_ix, storage)
|
||||||
} else {
|
} else {
|
||||||
compressFragmentTwoPass(*next_in, block_size, is_last, command_buf, literal_buf, table, table_size, &s.bw)
|
compressFragmentTwoPass(*next_in, block_size, is_last, command_buf, literal_buf, table, table_size, &storage_ix, storage)
|
||||||
}
|
}
|
||||||
|
|
||||||
*next_in = (*next_in)[block_size:]
|
*next_in = (*next_in)[block_size:]
|
||||||
*available_in -= block_size
|
*available_in -= block_size
|
||||||
s.writeOutput(s.bw.dst)
|
var out_bytes uint = storage_ix >> 3
|
||||||
s.bw.dst = s.bw.dst[:0]
|
s.writeOutput(storage[:out_bytes])
|
||||||
|
|
||||||
|
s.last_bytes_ = uint16(storage[storage_ix>>3])
|
||||||
|
s.last_bytes_bits_ = byte(storage_ix & 7)
|
||||||
|
|
||||||
if force_flush {
|
if force_flush {
|
||||||
s.stream_state_ = streamFlushRequested
|
s.stream_state_ = streamFlushRequested
|
||||||
|
@ -1010,7 +1074,7 @@ func processMetadata(s *Writer, available_in *uint, next_in *[]byte) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
if s.stream_state_ == streamFlushRequested && s.bw.nbits&7 != 0 {
|
if s.stream_state_ == streamFlushRequested && s.last_bytes_bits_ != 0 {
|
||||||
injectBytePaddingBlock(s)
|
injectBytePaddingBlock(s)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -1024,9 +1088,8 @@ func processMetadata(s *Writer, available_in *uint, next_in *[]byte) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.stream_state_ == streamMetadataHead {
|
if s.stream_state_ == streamMetadataHead {
|
||||||
writeMetadataHeader(s, uint(s.remaining_metadata_bytes_))
|
n := writeMetadataHeader(s, uint(s.remaining_metadata_bytes_), s.tiny_buf_.u8[:])
|
||||||
s.writeOutput(s.bw.dst)
|
s.writeOutput(s.tiny_buf_.u8[:n])
|
||||||
s.bw.dst = s.bw.dst[:0]
|
|
||||||
s.stream_state_ = streamMetadataBody
|
s.stream_state_ = streamMetadataBody
|
||||||
continue
|
continue
|
||||||
} else {
|
} else {
|
||||||
|
@ -1112,7 +1175,7 @@ func encoderCompressStream(s *Writer, op int, available_in *uint, next_in *[]byt
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.stream_state_ == streamFlushRequested && s.bw.nbits&7 != 0 {
|
if s.stream_state_ == streamFlushRequested && s.last_bytes_bits_ != 0 {
|
||||||
injectBytePaddingBlock(s)
|
injectBytePaddingBlock(s)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
|
@ -778,9 +778,8 @@ var kStaticDistanceCodeDepth = [64]byte{
|
||||||
|
|
||||||
var kCodeLengthBits = [18]uint32{0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 15, 31, 0, 11, 7}
|
var kCodeLengthBits = [18]uint32{0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 15, 31, 0, 11, 7}
|
||||||
|
|
||||||
func storeStaticCodeLengthCode(bw *bitWriter) {
|
func storeStaticCodeLengthCode(storage_ix *uint, storage []byte) {
|
||||||
bw.writeBits(32, 0x55555554)
|
writeBits(40, 0x0000FF55555554, storage_ix, storage)
|
||||||
bw.writeBits(8, 0xFF)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var kZeroRepsBits = [numCommandSymbols]uint64{
|
var kZeroRepsBits = [numCommandSymbols]uint64{
|
||||||
|
@ -4318,10 +4317,9 @@ var kStaticCommandCodeBits = [numCommandSymbols]uint16{
|
||||||
2047,
|
2047,
|
||||||
}
|
}
|
||||||
|
|
||||||
func storeStaticCommandHuffmanTree(bw *bitWriter) {
|
func storeStaticCommandHuffmanTree(storage_ix *uint, storage []byte) {
|
||||||
bw.writeBits(32, 0x16307003)
|
writeBits(56, 0x92624416307003, storage_ix, storage)
|
||||||
bw.writeBits(24, 0x926244)
|
writeBits(3, 0x00000000, storage_ix, storage)
|
||||||
bw.writeBits(3, 0x00000000)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var kStaticDistanceCodeBits = [64]uint16{
|
var kStaticDistanceCodeBits = [64]uint16{
|
||||||
|
@ -4391,6 +4389,6 @@ var kStaticDistanceCodeBits = [64]uint16{
|
||||||
63,
|
63,
|
||||||
}
|
}
|
||||||
|
|
||||||
func storeStaticDistanceHuffmanTree(bw *bitWriter) {
|
func storeStaticDistanceHuffmanTree(storage_ix *uint, storage []byte) {
|
||||||
bw.writeBits(28, 0x0369DC03)
|
writeBits(28, 0x0369DC03, storage_ix, storage)
|
||||||
}
|
}
|
||||||
|
|
108
write_bits.go
108
write_bits.go
|
@ -1,5 +1,7 @@
|
||||||
package brotli
|
package brotli
|
||||||
|
|
||||||
|
import "encoding/binary"
|
||||||
|
|
||||||
/* Copyright 2010 Google Inc. All Rights Reserved.
|
/* Copyright 2010 Google Inc. All Rights Reserved.
|
||||||
|
|
||||||
Distributed under MIT license.
|
Distributed under MIT license.
|
||||||
|
@ -8,87 +10,43 @@ package brotli
|
||||||
|
|
||||||
/* Write bits into a byte array. */
|
/* Write bits into a byte array. */
|
||||||
|
|
||||||
type bitWriter struct {
|
/* This function writes bits into bytes in increasing addresses, and within
|
||||||
dst []byte
|
a byte least-significant-bit first.
|
||||||
|
|
||||||
// Data waiting to be written is the low nbits of bits.
|
The function can write up to 56 bits in one go with WriteBits
|
||||||
bits uint64
|
Example: let's assume that 3 bits (Rs below) have been written already:
|
||||||
nbits uint
|
|
||||||
|
BYTE-0 BYTE+1 BYTE+2
|
||||||
|
|
||||||
|
0000 0RRR 0000 0000 0000 0000
|
||||||
|
|
||||||
|
Now, we could write 5 or less bits in MSB by just sifting by 3
|
||||||
|
and OR'ing to BYTE-0.
|
||||||
|
|
||||||
|
For n bits, we take the last 5 bits, OR that with high bits in BYTE-0,
|
||||||
|
and locate the rest in BYTE+1, BYTE+2, etc. */
|
||||||
|
func writeBits(n_bits uint, bits uint64, pos *uint, array []byte) {
|
||||||
|
/* This branch of the code can write up to 56 bits at a time,
|
||||||
|
7 bits are lost by being perhaps already in *p and at least
|
||||||
|
1 bit is needed to initialize the bit-stream ahead (i.e. if 7
|
||||||
|
bits are in *p and we write 57 bits, then the next write will
|
||||||
|
access a byte that was never initialized). */
|
||||||
|
p := array[*pos>>3:]
|
||||||
|
v := uint64(p[0])
|
||||||
|
v |= bits << (*pos & 7)
|
||||||
|
binary.LittleEndian.PutUint64(p, v)
|
||||||
|
*pos += n_bits
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *bitWriter) writeBits(nb uint, b uint64) {
|
func writeSingleBit(bit bool, pos *uint, array []byte) {
|
||||||
w.bits |= b << w.nbits
|
|
||||||
w.nbits += nb
|
|
||||||
if w.nbits >= 32 {
|
|
||||||
bits := w.bits
|
|
||||||
w.bits >>= 32
|
|
||||||
w.nbits -= 32
|
|
||||||
w.dst = append(w.dst,
|
|
||||||
byte(bits),
|
|
||||||
byte(bits>>8),
|
|
||||||
byte(bits>>16),
|
|
||||||
byte(bits>>24),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *bitWriter) writeSingleBit(bit bool) {
|
|
||||||
if bit {
|
if bit {
|
||||||
w.writeBits(1, 1)
|
writeBits(1, 1, pos, array)
|
||||||
} else {
|
} else {
|
||||||
w.writeBits(1, 0)
|
writeBits(1, 0, pos, array)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *bitWriter) jumpToByteBoundary() {
|
func writeBitsPrepareStorage(pos uint, array []byte) {
|
||||||
dst := w.dst
|
assert(pos&7 == 0)
|
||||||
for w.nbits != 0 {
|
array[pos>>3] = 0
|
||||||
dst = append(dst, byte(w.bits))
|
|
||||||
w.bits >>= 8
|
|
||||||
if w.nbits > 8 { // Avoid underflow
|
|
||||||
w.nbits -= 8
|
|
||||||
} else {
|
|
||||||
w.nbits = 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
w.bits = 0
|
|
||||||
w.dst = dst
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *bitWriter) writeBytes(b []byte) {
|
|
||||||
if w.nbits&7 != 0 {
|
|
||||||
panic("writeBytes with unfinished bits")
|
|
||||||
}
|
|
||||||
for w.nbits != 0 {
|
|
||||||
w.dst = append(w.dst, byte(w.bits))
|
|
||||||
w.bits >>= 8
|
|
||||||
w.nbits -= 8
|
|
||||||
}
|
|
||||||
w.dst = append(w.dst, b...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *bitWriter) getPos() uint {
|
|
||||||
return uint(len(w.dst)<<3) + w.nbits
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *bitWriter) rewind(p uint) {
|
|
||||||
w.bits = uint64(w.dst[p>>3] & byte((1<<(p&7))-1))
|
|
||||||
w.nbits = p & 7
|
|
||||||
w.dst = w.dst[:p>>3]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *bitWriter) updateBits(n_bits uint, bits uint32, pos uint) {
|
|
||||||
for n_bits > 0 {
|
|
||||||
var byte_pos uint = pos >> 3
|
|
||||||
var n_unchanged_bits uint = pos & 7
|
|
||||||
var n_changed_bits uint = brotli_min_size_t(n_bits, 8-n_unchanged_bits)
|
|
||||||
var total_bits uint = n_unchanged_bits + n_changed_bits
|
|
||||||
var mask uint32 = (^((1 << total_bits) - 1)) | ((1 << n_unchanged_bits) - 1)
|
|
||||||
var unchanged_bits uint32 = uint32(w.dst[byte_pos]) & mask
|
|
||||||
var changed_bits uint32 = bits & ((1 << n_changed_bits) - 1)
|
|
||||||
w.dst[byte_pos] = byte(changed_bits<<n_unchanged_bits | unchanged_bits)
|
|
||||||
n_bits -= n_changed_bits
|
|
||||||
bits >>= n_changed_bits
|
|
||||||
pos += n_changed_bits
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue