forked from mirror/brotli
Reuse buffers and objects using sync.Pool
This reduces the amount of garbage generated and relieves pressure on the GC. For a workload without reusing the Writer (using Writer.Reset) the number of allocations goes from 31 to 9. While for a workload when you reuse the Writer the number of allocations goes from 25 to 0.
This commit is contained in:
parent
e2c5f2109f
commit
a01a7b12c9
|
@ -1,5 +1,9 @@
|
||||||
package brotli
|
package brotli
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
/* Copyright 2013 Google Inc. All Rights Reserved.
|
/* Copyright 2013 Google Inc. All Rights Reserved.
|
||||||
|
|
||||||
Distributed under MIT license.
|
Distributed under MIT license.
|
||||||
|
@ -31,6 +35,8 @@ func computeDistanceCode(distance uint, max_distance uint, dist_cache []int) uin
|
||||||
return distance + numDistanceShortCodes - 1
|
return distance + numDistanceShortCodes - 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var hasherSearchResultPool sync.Pool
|
||||||
|
|
||||||
func createBackwardReferences(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, hasher hasherHandle, dist_cache []int, last_insert_len *uint, commands *[]command, num_literals *uint) {
|
func createBackwardReferences(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, hasher hasherHandle, dist_cache []int, last_insert_len *uint, commands *[]command, num_literals *uint) {
|
||||||
var max_backward_limit uint = maxBackwardLimit(params.lgwin)
|
var max_backward_limit uint = maxBackwardLimit(params.lgwin)
|
||||||
var insert_length uint = *last_insert_len
|
var insert_length uint = *last_insert_len
|
||||||
|
@ -52,8 +58,14 @@ func createBackwardReferences(num_bytes uint, position uint, ringbuffer []byte,
|
||||||
|
|
||||||
/* Minimum score to accept a backward reference. */
|
/* Minimum score to accept a backward reference. */
|
||||||
hasher.PrepareDistanceCache(dist_cache)
|
hasher.PrepareDistanceCache(dist_cache)
|
||||||
var sr2 hasherSearchResult
|
sr2, _ := hasherSearchResultPool.Get().(*hasherSearchResult)
|
||||||
var sr hasherSearchResult
|
if sr2 == nil {
|
||||||
|
sr2 = &hasherSearchResult{}
|
||||||
|
}
|
||||||
|
sr, _ := hasherSearchResultPool.Get().(*hasherSearchResult)
|
||||||
|
if sr == nil {
|
||||||
|
sr = &hasherSearchResult{}
|
||||||
|
}
|
||||||
|
|
||||||
for position+hasher.HashTypeLength() < pos_end {
|
for position+hasher.HashTypeLength() < pos_end {
|
||||||
var max_length uint = pos_end - position
|
var max_length uint = pos_end - position
|
||||||
|
@ -62,7 +74,7 @@ func createBackwardReferences(num_bytes uint, position uint, ringbuffer []byte,
|
||||||
sr.len_code_delta = 0
|
sr.len_code_delta = 0
|
||||||
sr.distance = 0
|
sr.distance = 0
|
||||||
sr.score = kMinScore
|
sr.score = kMinScore
|
||||||
hasher.FindLongestMatch(¶ms.dictionary, ringbuffer, ringbuffer_mask, dist_cache, position, max_length, max_distance, gap, params.dist.max_distance, &sr)
|
hasher.FindLongestMatch(¶ms.dictionary, ringbuffer, ringbuffer_mask, dist_cache, position, max_length, max_distance, gap, params.dist.max_distance, sr)
|
||||||
if sr.score > kMinScore {
|
if sr.score > kMinScore {
|
||||||
/* Found a match. Let's look for something even better ahead. */
|
/* Found a match. Let's look for something even better ahead. */
|
||||||
var delayed_backward_references_in_row int = 0
|
var delayed_backward_references_in_row int = 0
|
||||||
|
@ -78,14 +90,14 @@ func createBackwardReferences(num_bytes uint, position uint, ringbuffer []byte,
|
||||||
sr2.distance = 0
|
sr2.distance = 0
|
||||||
sr2.score = kMinScore
|
sr2.score = kMinScore
|
||||||
max_distance = brotli_min_size_t(position+1, max_backward_limit)
|
max_distance = brotli_min_size_t(position+1, max_backward_limit)
|
||||||
hasher.FindLongestMatch(¶ms.dictionary, ringbuffer, ringbuffer_mask, dist_cache, position+1, max_length, max_distance, gap, params.dist.max_distance, &sr2)
|
hasher.FindLongestMatch(¶ms.dictionary, ringbuffer, ringbuffer_mask, dist_cache, position+1, max_length, max_distance, gap, params.dist.max_distance, sr2)
|
||||||
if sr2.score >= sr.score+cost_diff_lazy {
|
if sr2.score >= sr.score+cost_diff_lazy {
|
||||||
/* Ok, let's just write one byte for now and start a match from the
|
/* Ok, let's just write one byte for now and start a match from the
|
||||||
next byte. */
|
next byte. */
|
||||||
position++
|
position++
|
||||||
|
|
||||||
insert_length++
|
insert_length++
|
||||||
sr = sr2
|
*sr = *sr2
|
||||||
delayed_backward_references_in_row++
|
delayed_backward_references_in_row++
|
||||||
if delayed_backward_references_in_row < 4 && position+hasher.HashTypeLength() < pos_end {
|
if delayed_backward_references_in_row < 4 && position+hasher.HashTypeLength() < pos_end {
|
||||||
continue
|
continue
|
||||||
|
@ -167,4 +179,7 @@ func createBackwardReferences(num_bytes uint, position uint, ringbuffer []byte,
|
||||||
|
|
||||||
insert_length += pos_end - position
|
insert_length += pos_end - position
|
||||||
*last_insert_len = insert_length
|
*last_insert_len = insert_length
|
||||||
|
|
||||||
|
hasherSearchResultPool.Put(sr)
|
||||||
|
hasherSearchResultPool.Put(sr2)
|
||||||
}
|
}
|
||||||
|
|
|
@ -88,17 +88,12 @@ const clustersPerBatch = 16
|
||||||
func initBlockSplit(self *blockSplit) {
|
func initBlockSplit(self *blockSplit) {
|
||||||
self.num_types = 0
|
self.num_types = 0
|
||||||
self.num_blocks = 0
|
self.num_blocks = 0
|
||||||
self.types = nil
|
self.types = self.types[:0]
|
||||||
self.lengths = nil
|
self.lengths = self.lengths[:0]
|
||||||
self.types_alloc_size = 0
|
self.types_alloc_size = 0
|
||||||
self.lengths_alloc_size = 0
|
self.lengths_alloc_size = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func destroyBlockSplit(self *blockSplit) {
|
|
||||||
self.types = nil
|
|
||||||
self.lengths = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func splitBlock(cmds []command, data []byte, pos uint, mask uint, params *encoderParams, literal_split *blockSplit, insert_and_copy_split *blockSplit, dist_split *blockSplit) {
|
func splitBlock(cmds []command, data []byte, pos uint, mask uint, params *encoderParams, literal_split *blockSplit, insert_and_copy_split *blockSplit, dist_split *blockSplit) {
|
||||||
{
|
{
|
||||||
var literals_count uint = countLiterals(cmds)
|
var literals_count uint = countLiterals(cmds)
|
||||||
|
|
|
@ -1,6 +1,9 @@
|
||||||
package brotli
|
package brotli
|
||||||
|
|
||||||
import "math"
|
import (
|
||||||
|
"math"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
const maxHuffmanTreeSize = (2*numCommandSymbols + 1)
|
const maxHuffmanTreeSize = (2*numCommandSymbols + 1)
|
||||||
|
|
||||||
|
@ -415,6 +418,8 @@ func sortHuffmanTree1(v0 huffmanTree, v1 huffmanTree) bool {
|
||||||
return v0.total_count_ < v1.total_count_
|
return v0.total_count_ < v1.total_count_
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var huffmanTreePool sync.Pool
|
||||||
|
|
||||||
func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_bits uint, depth []byte, bits []uint16, storage_ix *uint, storage []byte) {
|
func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_bits uint, depth []byte, bits []uint16, storage_ix *uint, storage []byte) {
|
||||||
var count uint = 0
|
var count uint = 0
|
||||||
var symbols = [4]uint{0}
|
var symbols = [4]uint{0}
|
||||||
|
@ -446,7 +451,13 @@ func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
var max_tree_size uint = 2*length + 1
|
var max_tree_size uint = 2*length + 1
|
||||||
var tree []huffmanTree = make([]huffmanTree, max_tree_size)
|
tree, _ := huffmanTreePool.Get().(*[]huffmanTree)
|
||||||
|
if tree == nil || cap(*tree) < int(max_tree_size) {
|
||||||
|
tmp := make([]huffmanTree, max_tree_size)
|
||||||
|
tree = &tmp
|
||||||
|
} else {
|
||||||
|
*tree = (*tree)[:max_tree_size]
|
||||||
|
}
|
||||||
var count_limit uint32
|
var count_limit uint32
|
||||||
for count_limit = 1; ; count_limit *= 2 {
|
for count_limit = 1; ; count_limit *= 2 {
|
||||||
var node int = 0
|
var node int = 0
|
||||||
|
@ -455,9 +466,9 @@ func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_
|
||||||
l--
|
l--
|
||||||
if histogram[l] != 0 {
|
if histogram[l] != 0 {
|
||||||
if histogram[l] >= count_limit {
|
if histogram[l] >= count_limit {
|
||||||
initHuffmanTree(&tree[node:][0], histogram[l], -1, int16(l))
|
initHuffmanTree(&(*tree)[node:][0], histogram[l], -1, int16(l))
|
||||||
} else {
|
} else {
|
||||||
initHuffmanTree(&tree[node:][0], count_limit, -1, int16(l))
|
initHuffmanTree(&(*tree)[node:][0], count_limit, -1, int16(l))
|
||||||
}
|
}
|
||||||
|
|
||||||
node++
|
node++
|
||||||
|
@ -471,7 +482,7 @@ func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_
|
||||||
var j int = n + 1
|
var j int = n + 1
|
||||||
var k int
|
var k int
|
||||||
|
|
||||||
sortHuffmanTreeItems(tree, uint(n), huffmanTreeComparator(sortHuffmanTree1))
|
sortHuffmanTreeItems(*tree, uint(n), huffmanTreeComparator(sortHuffmanTree1))
|
||||||
|
|
||||||
/* The nodes are:
|
/* The nodes are:
|
||||||
[0, n): the sorted leaf nodes that we start with.
|
[0, n): the sorted leaf nodes that we start with.
|
||||||
|
@ -482,15 +493,15 @@ func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_
|
||||||
There will be (2n+1) elements at the end. */
|
There will be (2n+1) elements at the end. */
|
||||||
initHuffmanTree(&sentinel, math.MaxUint32, -1, -1)
|
initHuffmanTree(&sentinel, math.MaxUint32, -1, -1)
|
||||||
|
|
||||||
tree[node] = sentinel
|
(*tree)[node] = sentinel
|
||||||
node++
|
node++
|
||||||
tree[node] = sentinel
|
(*tree)[node] = sentinel
|
||||||
node++
|
node++
|
||||||
|
|
||||||
for k = n - 1; k > 0; k-- {
|
for k = n - 1; k > 0; k-- {
|
||||||
var left int
|
var left int
|
||||||
var right int
|
var right int
|
||||||
if tree[i].total_count_ <= tree[j].total_count_ {
|
if (*tree)[i].total_count_ <= (*tree)[j].total_count_ {
|
||||||
left = i
|
left = i
|
||||||
i++
|
i++
|
||||||
} else {
|
} else {
|
||||||
|
@ -498,7 +509,7 @@ func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_
|
||||||
j++
|
j++
|
||||||
}
|
}
|
||||||
|
|
||||||
if tree[i].total_count_ <= tree[j].total_count_ {
|
if (*tree)[i].total_count_ <= (*tree)[j].total_count_ {
|
||||||
right = i
|
right = i
|
||||||
i++
|
i++
|
||||||
} else {
|
} else {
|
||||||
|
@ -507,17 +518,17 @@ func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_
|
||||||
}
|
}
|
||||||
|
|
||||||
/* The sentinel node becomes the parent node. */
|
/* The sentinel node becomes the parent node. */
|
||||||
tree[node-1].total_count_ = tree[left].total_count_ + tree[right].total_count_
|
(*tree)[node-1].total_count_ = (*tree)[left].total_count_ + (*tree)[right].total_count_
|
||||||
|
|
||||||
tree[node-1].index_left_ = int16(left)
|
(*tree)[node-1].index_left_ = int16(left)
|
||||||
tree[node-1].index_right_or_value_ = int16(right)
|
(*tree)[node-1].index_right_or_value_ = int16(right)
|
||||||
|
|
||||||
/* Add back the last sentinel node. */
|
/* Add back the last sentinel node. */
|
||||||
tree[node] = sentinel
|
(*tree)[node] = sentinel
|
||||||
node++
|
node++
|
||||||
}
|
}
|
||||||
|
|
||||||
if setDepth(2*n-1, tree, depth, 14) {
|
if setDepth(2*n-1, *tree, depth, 14) {
|
||||||
/* We need to pack the Huffman tree in 14 bits. If this was not
|
/* We need to pack the Huffman tree in 14 bits. If this was not
|
||||||
successful, add fake entities to the lowest values and retry. */
|
successful, add fake entities to the lowest values and retry. */
|
||||||
break
|
break
|
||||||
|
@ -525,7 +536,7 @@ func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
tree = nil
|
huffmanTreePool.Put(tree)
|
||||||
}
|
}
|
||||||
|
|
||||||
convertBitDepthsToSymbols(depth, length, bits)
|
convertBitDepthsToSymbols(depth, length, bits)
|
||||||
|
@ -875,27 +886,37 @@ type blockEncoder struct {
|
||||||
bits_ []uint16
|
bits_ []uint16
|
||||||
}
|
}
|
||||||
|
|
||||||
func initBlockEncoder(self *blockEncoder, histogram_length uint, num_block_types uint, block_types []byte, block_lengths []uint32, num_blocks uint) {
|
var blockEncoderPool sync.Pool
|
||||||
|
|
||||||
|
func getBlockEncoder(histogram_length uint, num_block_types uint, block_types []byte, block_lengths []uint32, num_blocks uint) *blockEncoder {
|
||||||
|
self, _ := blockEncoderPool.Get().(*blockEncoder)
|
||||||
|
|
||||||
|
if self != nil {
|
||||||
|
self.block_ix_ = 0
|
||||||
|
self.entropy_ix_ = 0
|
||||||
|
self.depths_ = self.depths_[:0]
|
||||||
|
self.bits_ = self.bits_[:0]
|
||||||
|
} else {
|
||||||
|
self = &blockEncoder{}
|
||||||
|
}
|
||||||
|
|
||||||
self.histogram_length_ = histogram_length
|
self.histogram_length_ = histogram_length
|
||||||
self.num_block_types_ = num_block_types
|
self.num_block_types_ = num_block_types
|
||||||
self.block_types_ = block_types
|
self.block_types_ = block_types
|
||||||
self.block_lengths_ = block_lengths
|
self.block_lengths_ = block_lengths
|
||||||
self.num_blocks_ = num_blocks
|
self.num_blocks_ = num_blocks
|
||||||
initBlockTypeCodeCalculator(&self.block_split_code_.type_code_calculator)
|
initBlockTypeCodeCalculator(&self.block_split_code_.type_code_calculator)
|
||||||
self.block_ix_ = 0
|
|
||||||
if num_blocks == 0 {
|
if num_blocks == 0 {
|
||||||
self.block_len_ = 0
|
self.block_len_ = 0
|
||||||
} else {
|
} else {
|
||||||
self.block_len_ = uint(block_lengths[0])
|
self.block_len_ = uint(block_lengths[0])
|
||||||
}
|
}
|
||||||
self.entropy_ix_ = 0
|
|
||||||
self.depths_ = nil
|
return self
|
||||||
self.bits_ = nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func cleanupBlockEncoder(self *blockEncoder) {
|
func cleanupBlockEncoder(self *blockEncoder) {
|
||||||
self.depths_ = nil
|
blockEncoderPool.Put(self)
|
||||||
self.bits_ = nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Creates entropy codes of block lengths and block types and stores them
|
/* Creates entropy codes of block lengths and block types and stores them
|
||||||
|
@ -948,8 +969,16 @@ func storeSymbolWithContext(self *blockEncoder, symbol uint, context uint, conte
|
||||||
|
|
||||||
func buildAndStoreEntropyCodesLiteral(self *blockEncoder, histograms []histogramLiteral, histograms_size uint, alphabet_size uint, tree []huffmanTree, storage_ix *uint, storage []byte) {
|
func buildAndStoreEntropyCodesLiteral(self *blockEncoder, histograms []histogramLiteral, histograms_size uint, alphabet_size uint, tree []huffmanTree, storage_ix *uint, storage []byte) {
|
||||||
var table_size uint = histograms_size * self.histogram_length_
|
var table_size uint = histograms_size * self.histogram_length_
|
||||||
self.depths_ = make([]byte, table_size)
|
if cap(self.depths_) < int(table_size) {
|
||||||
self.bits_ = make([]uint16, table_size)
|
self.depths_ = make([]byte, table_size)
|
||||||
|
} else {
|
||||||
|
self.depths_ = self.depths_[:table_size]
|
||||||
|
}
|
||||||
|
if cap(self.bits_) < int(table_size) {
|
||||||
|
self.bits_ = make([]uint16, table_size)
|
||||||
|
} else {
|
||||||
|
self.bits_ = self.bits_[:table_size]
|
||||||
|
}
|
||||||
{
|
{
|
||||||
var i uint
|
var i uint
|
||||||
for i = 0; i < histograms_size; i++ {
|
for i = 0; i < histograms_size; i++ {
|
||||||
|
@ -961,8 +990,16 @@ func buildAndStoreEntropyCodesLiteral(self *blockEncoder, histograms []histogram
|
||||||
|
|
||||||
func buildAndStoreEntropyCodesCommand(self *blockEncoder, histograms []histogramCommand, histograms_size uint, alphabet_size uint, tree []huffmanTree, storage_ix *uint, storage []byte) {
|
func buildAndStoreEntropyCodesCommand(self *blockEncoder, histograms []histogramCommand, histograms_size uint, alphabet_size uint, tree []huffmanTree, storage_ix *uint, storage []byte) {
|
||||||
var table_size uint = histograms_size * self.histogram_length_
|
var table_size uint = histograms_size * self.histogram_length_
|
||||||
self.depths_ = make([]byte, table_size)
|
if cap(self.depths_) < int(table_size) {
|
||||||
self.bits_ = make([]uint16, table_size)
|
self.depths_ = make([]byte, table_size)
|
||||||
|
} else {
|
||||||
|
self.depths_ = self.depths_[:table_size]
|
||||||
|
}
|
||||||
|
if cap(self.bits_) < int(table_size) {
|
||||||
|
self.bits_ = make([]uint16, table_size)
|
||||||
|
} else {
|
||||||
|
self.bits_ = self.bits_[:table_size]
|
||||||
|
}
|
||||||
{
|
{
|
||||||
var i uint
|
var i uint
|
||||||
for i = 0; i < histograms_size; i++ {
|
for i = 0; i < histograms_size; i++ {
|
||||||
|
@ -974,8 +1011,16 @@ func buildAndStoreEntropyCodesCommand(self *blockEncoder, histograms []histogram
|
||||||
|
|
||||||
func buildAndStoreEntropyCodesDistance(self *blockEncoder, histograms []histogramDistance, histograms_size uint, alphabet_size uint, tree []huffmanTree, storage_ix *uint, storage []byte) {
|
func buildAndStoreEntropyCodesDistance(self *blockEncoder, histograms []histogramDistance, histograms_size uint, alphabet_size uint, tree []huffmanTree, storage_ix *uint, storage []byte) {
|
||||||
var table_size uint = histograms_size * self.histogram_length_
|
var table_size uint = histograms_size * self.histogram_length_
|
||||||
self.depths_ = make([]byte, table_size)
|
if cap(self.depths_) < int(table_size) {
|
||||||
self.bits_ = make([]uint16, table_size)
|
self.depths_ = make([]byte, table_size)
|
||||||
|
} else {
|
||||||
|
self.depths_ = self.depths_[:table_size]
|
||||||
|
}
|
||||||
|
if cap(self.bits_) < int(table_size) {
|
||||||
|
self.bits_ = make([]uint16, table_size)
|
||||||
|
} else {
|
||||||
|
self.bits_ = self.bits_[:table_size]
|
||||||
|
}
|
||||||
{
|
{
|
||||||
var i uint
|
var i uint
|
||||||
for i = 0; i < histograms_size; i++ {
|
for i = 0; i < histograms_size; i++ {
|
||||||
|
@ -997,9 +1042,6 @@ func storeMetaBlock(input []byte, start_pos uint, length uint, mask uint, prev_b
|
||||||
var num_effective_distance_symbols uint32 = num_distance_symbols
|
var num_effective_distance_symbols uint32 = num_distance_symbols
|
||||||
var tree []huffmanTree
|
var tree []huffmanTree
|
||||||
var literal_context_lut contextLUT = getContextLUT(literal_context_mode)
|
var literal_context_lut contextLUT = getContextLUT(literal_context_mode)
|
||||||
var literal_enc blockEncoder
|
|
||||||
var command_enc blockEncoder
|
|
||||||
var distance_enc blockEncoder
|
|
||||||
var dist *distanceParams = ¶ms.dist
|
var dist *distanceParams = ¶ms.dist
|
||||||
if params.large_window && num_effective_distance_symbols > numHistogramDistanceSymbols {
|
if params.large_window && num_effective_distance_symbols > numHistogramDistanceSymbols {
|
||||||
num_effective_distance_symbols = numHistogramDistanceSymbols
|
num_effective_distance_symbols = numHistogramDistanceSymbols
|
||||||
|
@ -1008,13 +1050,13 @@ func storeMetaBlock(input []byte, start_pos uint, length uint, mask uint, prev_b
|
||||||
storeCompressedMetaBlockHeader(is_last, length, storage_ix, storage)
|
storeCompressedMetaBlockHeader(is_last, length, storage_ix, storage)
|
||||||
|
|
||||||
tree = make([]huffmanTree, maxHuffmanTreeSize)
|
tree = make([]huffmanTree, maxHuffmanTreeSize)
|
||||||
initBlockEncoder(&literal_enc, numLiteralSymbols, mb.literal_split.num_types, mb.literal_split.types, mb.literal_split.lengths, mb.literal_split.num_blocks)
|
literal_enc := getBlockEncoder(numLiteralSymbols, mb.literal_split.num_types, mb.literal_split.types, mb.literal_split.lengths, mb.literal_split.num_blocks)
|
||||||
initBlockEncoder(&command_enc, numCommandSymbols, mb.command_split.num_types, mb.command_split.types, mb.command_split.lengths, mb.command_split.num_blocks)
|
command_enc := getBlockEncoder(numCommandSymbols, mb.command_split.num_types, mb.command_split.types, mb.command_split.lengths, mb.command_split.num_blocks)
|
||||||
initBlockEncoder(&distance_enc, uint(num_effective_distance_symbols), mb.distance_split.num_types, mb.distance_split.types, mb.distance_split.lengths, mb.distance_split.num_blocks)
|
distance_enc := getBlockEncoder(uint(num_effective_distance_symbols), mb.distance_split.num_types, mb.distance_split.types, mb.distance_split.lengths, mb.distance_split.num_blocks)
|
||||||
|
|
||||||
buildAndStoreBlockSwitchEntropyCodes(&literal_enc, tree, storage_ix, storage)
|
buildAndStoreBlockSwitchEntropyCodes(literal_enc, tree, storage_ix, storage)
|
||||||
buildAndStoreBlockSwitchEntropyCodes(&command_enc, tree, storage_ix, storage)
|
buildAndStoreBlockSwitchEntropyCodes(command_enc, tree, storage_ix, storage)
|
||||||
buildAndStoreBlockSwitchEntropyCodes(&distance_enc, tree, storage_ix, storage)
|
buildAndStoreBlockSwitchEntropyCodes(distance_enc, tree, storage_ix, storage)
|
||||||
|
|
||||||
writeBits(2, uint64(dist.distance_postfix_bits), storage_ix, storage)
|
writeBits(2, uint64(dist.distance_postfix_bits), storage_ix, storage)
|
||||||
writeBits(4, uint64(dist.num_direct_distance_codes)>>dist.distance_postfix_bits, storage_ix, storage)
|
writeBits(4, uint64(dist.num_direct_distance_codes)>>dist.distance_postfix_bits, storage_ix, storage)
|
||||||
|
@ -1034,19 +1076,19 @@ func storeMetaBlock(input []byte, start_pos uint, length uint, mask uint, prev_b
|
||||||
encodeContextMap(mb.distance_context_map, mb.distance_context_map_size, mb.distance_histograms_size, tree, storage_ix, storage)
|
encodeContextMap(mb.distance_context_map, mb.distance_context_map_size, mb.distance_histograms_size, tree, storage_ix, storage)
|
||||||
}
|
}
|
||||||
|
|
||||||
buildAndStoreEntropyCodesLiteral(&literal_enc, mb.literal_histograms, mb.literal_histograms_size, numLiteralSymbols, tree, storage_ix, storage)
|
buildAndStoreEntropyCodesLiteral(literal_enc, mb.literal_histograms, mb.literal_histograms_size, numLiteralSymbols, tree, storage_ix, storage)
|
||||||
buildAndStoreEntropyCodesCommand(&command_enc, mb.command_histograms, mb.command_histograms_size, numCommandSymbols, tree, storage_ix, storage)
|
buildAndStoreEntropyCodesCommand(command_enc, mb.command_histograms, mb.command_histograms_size, numCommandSymbols, tree, storage_ix, storage)
|
||||||
buildAndStoreEntropyCodesDistance(&distance_enc, mb.distance_histograms, mb.distance_histograms_size, uint(num_distance_symbols), tree, storage_ix, storage)
|
buildAndStoreEntropyCodesDistance(distance_enc, mb.distance_histograms, mb.distance_histograms_size, uint(num_distance_symbols), tree, storage_ix, storage)
|
||||||
tree = nil
|
tree = nil
|
||||||
|
|
||||||
for _, cmd := range commands {
|
for _, cmd := range commands {
|
||||||
var cmd_code uint = uint(cmd.cmd_prefix_)
|
var cmd_code uint = uint(cmd.cmd_prefix_)
|
||||||
storeSymbol(&command_enc, cmd_code, storage_ix, storage)
|
storeSymbol(command_enc, cmd_code, storage_ix, storage)
|
||||||
storeCommandExtra(&cmd, storage_ix, storage)
|
storeCommandExtra(&cmd, storage_ix, storage)
|
||||||
if mb.literal_context_map_size == 0 {
|
if mb.literal_context_map_size == 0 {
|
||||||
var j uint
|
var j uint
|
||||||
for j = uint(cmd.insert_len_); j != 0; j-- {
|
for j = uint(cmd.insert_len_); j != 0; j-- {
|
||||||
storeSymbol(&literal_enc, uint(input[pos&mask]), storage_ix, storage)
|
storeSymbol(literal_enc, uint(input[pos&mask]), storage_ix, storage)
|
||||||
pos++
|
pos++
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -1054,7 +1096,7 @@ func storeMetaBlock(input []byte, start_pos uint, length uint, mask uint, prev_b
|
||||||
for j = uint(cmd.insert_len_); j != 0; j-- {
|
for j = uint(cmd.insert_len_); j != 0; j-- {
|
||||||
var context uint = uint(getContext(prev_byte, prev_byte2, literal_context_lut))
|
var context uint = uint(getContext(prev_byte, prev_byte2, literal_context_lut))
|
||||||
var literal byte = input[pos&mask]
|
var literal byte = input[pos&mask]
|
||||||
storeSymbolWithContext(&literal_enc, uint(literal), context, mb.literal_context_map, storage_ix, storage, literalContextBits)
|
storeSymbolWithContext(literal_enc, uint(literal), context, mb.literal_context_map, storage_ix, storage, literalContextBits)
|
||||||
prev_byte2 = prev_byte
|
prev_byte2 = prev_byte
|
||||||
prev_byte = literal
|
prev_byte = literal
|
||||||
pos++
|
pos++
|
||||||
|
@ -1070,10 +1112,10 @@ func storeMetaBlock(input []byte, start_pos uint, length uint, mask uint, prev_b
|
||||||
var distnumextra uint32 = uint32(cmd.dist_prefix_) >> 10
|
var distnumextra uint32 = uint32(cmd.dist_prefix_) >> 10
|
||||||
var distextra uint64 = uint64(cmd.dist_extra_)
|
var distextra uint64 = uint64(cmd.dist_extra_)
|
||||||
if mb.distance_context_map_size == 0 {
|
if mb.distance_context_map_size == 0 {
|
||||||
storeSymbol(&distance_enc, dist_code, storage_ix, storage)
|
storeSymbol(distance_enc, dist_code, storage_ix, storage)
|
||||||
} else {
|
} else {
|
||||||
var context uint = uint(commandDistanceContext(&cmd))
|
var context uint = uint(commandDistanceContext(&cmd))
|
||||||
storeSymbolWithContext(&distance_enc, dist_code, context, mb.distance_context_map, storage_ix, storage, distanceContextBits)
|
storeSymbolWithContext(distance_enc, dist_code, context, mb.distance_context_map, storage_ix, storage, distanceContextBits)
|
||||||
}
|
}
|
||||||
|
|
||||||
writeBits(uint(distnumextra), distextra, storage_ix, storage)
|
writeBits(uint(distnumextra), distextra, storage_ix, storage)
|
||||||
|
@ -1081,9 +1123,9 @@ func storeMetaBlock(input []byte, start_pos uint, length uint, mask uint, prev_b
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
cleanupBlockEncoder(&distance_enc)
|
cleanupBlockEncoder(distance_enc)
|
||||||
cleanupBlockEncoder(&command_enc)
|
cleanupBlockEncoder(command_enc)
|
||||||
cleanupBlockEncoder(&literal_enc)
|
cleanupBlockEncoder(literal_enc)
|
||||||
if is_last {
|
if is_last {
|
||||||
jumpToByteBoundary(storage_ix, storage)
|
jumpToByteBoundary(storage_ix, storage)
|
||||||
}
|
}
|
||||||
|
|
48
encode.go
48
encode.go
|
@ -464,8 +464,7 @@ func writeMetaBlockInternal(data []byte, mask uint, last_flush_pos uint64, bytes
|
||||||
} else if params.quality < minQualityForBlockSplit {
|
} else if params.quality < minQualityForBlockSplit {
|
||||||
storeMetaBlockTrivial(data, uint(wrapped_last_flush_pos), bytes, mask, is_last, params, commands, storage_ix, storage)
|
storeMetaBlockTrivial(data, uint(wrapped_last_flush_pos), bytes, mask, is_last, params, commands, storage_ix, storage)
|
||||||
} else {
|
} else {
|
||||||
var mb metaBlockSplit
|
mb := getMetaBlockSplit()
|
||||||
initMetaBlockSplit(&mb)
|
|
||||||
if params.quality < minQualityForHqBlockSplitting {
|
if params.quality < minQualityForHqBlockSplitting {
|
||||||
var num_literal_contexts uint = 1
|
var num_literal_contexts uint = 1
|
||||||
var literal_context_map []uint32 = nil
|
var literal_context_map []uint32 = nil
|
||||||
|
@ -473,9 +472,9 @@ func writeMetaBlockInternal(data []byte, mask uint, last_flush_pos uint64, bytes
|
||||||
decideOverLiteralContextModeling(data, uint(wrapped_last_flush_pos), bytes, mask, params.quality, params.size_hint, &num_literal_contexts, &literal_context_map)
|
decideOverLiteralContextModeling(data, uint(wrapped_last_flush_pos), bytes, mask, params.quality, params.size_hint, &num_literal_contexts, &literal_context_map)
|
||||||
}
|
}
|
||||||
|
|
||||||
buildMetaBlockGreedy(data, uint(wrapped_last_flush_pos), mask, prev_byte, prev_byte2, literal_context_lut, num_literal_contexts, literal_context_map, commands, &mb)
|
buildMetaBlockGreedy(data, uint(wrapped_last_flush_pos), mask, prev_byte, prev_byte2, literal_context_lut, num_literal_contexts, literal_context_map, commands, mb)
|
||||||
} else {
|
} else {
|
||||||
buildMetaBlock(data, uint(wrapped_last_flush_pos), mask, &block_params, prev_byte, prev_byte2, commands, literal_context_mode, &mb)
|
buildMetaBlock(data, uint(wrapped_last_flush_pos), mask, &block_params, prev_byte, prev_byte2, commands, literal_context_mode, mb)
|
||||||
}
|
}
|
||||||
|
|
||||||
if params.quality >= minQualityForOptimizeHistograms {
|
if params.quality >= minQualityForOptimizeHistograms {
|
||||||
|
@ -487,11 +486,11 @@ func writeMetaBlockInternal(data []byte, mask uint, last_flush_pos uint64, bytes
|
||||||
num_effective_dist_codes = numHistogramDistanceSymbols
|
num_effective_dist_codes = numHistogramDistanceSymbols
|
||||||
}
|
}
|
||||||
|
|
||||||
optimizeHistograms(num_effective_dist_codes, &mb)
|
optimizeHistograms(num_effective_dist_codes, mb)
|
||||||
}
|
}
|
||||||
|
|
||||||
storeMetaBlock(data, uint(wrapped_last_flush_pos), bytes, mask, prev_byte, prev_byte2, is_last, &block_params, literal_context_mode, commands, &mb, storage_ix, storage)
|
storeMetaBlock(data, uint(wrapped_last_flush_pos), bytes, mask, prev_byte, prev_byte2, is_last, &block_params, literal_context_mode, commands, mb, storage_ix, storage)
|
||||||
destroyMetaBlockSplit(&mb)
|
freeMetaBlockSplit(mb)
|
||||||
}
|
}
|
||||||
|
|
||||||
if bytes+4 < *storage_ix>>3 {
|
if bytes+4 < *storage_ix>>3 {
|
||||||
|
@ -772,9 +771,14 @@ func encodeData(s *Writer, is_last bool, force_flush bool) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.params.quality == fastTwoPassCompressionQuality && s.command_buf_ == nil {
|
if s.params.quality == fastTwoPassCompressionQuality {
|
||||||
s.command_buf_ = make([]uint32, kCompressFragmentTwoPassBlockSize)
|
if s.command_buf_ == nil || cap(s.command_buf_) < int(kCompressFragmentTwoPassBlockSize) {
|
||||||
s.literal_buf_ = make([]byte, kCompressFragmentTwoPassBlockSize)
|
s.command_buf_ = make([]uint32, kCompressFragmentTwoPassBlockSize)
|
||||||
|
s.literal_buf_ = make([]byte, kCompressFragmentTwoPassBlockSize)
|
||||||
|
} else {
|
||||||
|
s.command_buf_ = s.command_buf_[:kCompressFragmentTwoPassBlockSize]
|
||||||
|
s.literal_buf_ = s.literal_buf_[:kCompressFragmentTwoPassBlockSize]
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.params.quality == fastOnePassCompressionQuality || s.params.quality == fastTwoPassCompressionQuality {
|
if s.params.quality == fastOnePassCompressionQuality || s.params.quality == fastTwoPassCompressionQuality {
|
||||||
|
@ -975,29 +979,23 @@ func checkFlushComplete(s *Writer) {
|
||||||
func encoderCompressStreamFast(s *Writer, op int, available_in *uint, next_in *[]byte) bool {
|
func encoderCompressStreamFast(s *Writer, op int, available_in *uint, next_in *[]byte) bool {
|
||||||
var block_size_limit uint = uint(1) << s.params.lgwin
|
var block_size_limit uint = uint(1) << s.params.lgwin
|
||||||
var buf_size uint = brotli_min_size_t(kCompressFragmentTwoPassBlockSize, brotli_min_size_t(*available_in, block_size_limit))
|
var buf_size uint = brotli_min_size_t(kCompressFragmentTwoPassBlockSize, brotli_min_size_t(*available_in, block_size_limit))
|
||||||
var tmp_command_buf []uint32 = nil
|
|
||||||
var command_buf []uint32 = nil
|
var command_buf []uint32 = nil
|
||||||
var tmp_literal_buf []byte = nil
|
|
||||||
var literal_buf []byte = nil
|
var literal_buf []byte = nil
|
||||||
if s.params.quality != fastOnePassCompressionQuality && s.params.quality != fastTwoPassCompressionQuality {
|
if s.params.quality != fastOnePassCompressionQuality && s.params.quality != fastTwoPassCompressionQuality {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.params.quality == fastTwoPassCompressionQuality {
|
if s.params.quality == fastTwoPassCompressionQuality {
|
||||||
if s.command_buf_ == nil && buf_size == kCompressFragmentTwoPassBlockSize {
|
if s.command_buf_ == nil || cap(s.command_buf_) < int(buf_size) {
|
||||||
s.command_buf_ = make([]uint32, kCompressFragmentTwoPassBlockSize)
|
s.command_buf_ = make([]uint32, buf_size)
|
||||||
s.literal_buf_ = make([]byte, kCompressFragmentTwoPassBlockSize)
|
s.literal_buf_ = make([]byte, buf_size)
|
||||||
|
} else {
|
||||||
|
s.command_buf_ = s.command_buf_[:buf_size]
|
||||||
|
s.literal_buf_ = s.literal_buf_[:buf_size]
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.command_buf_ != nil {
|
command_buf = s.command_buf_
|
||||||
command_buf = s.command_buf_
|
literal_buf = s.literal_buf_
|
||||||
literal_buf = s.literal_buf_
|
|
||||||
} else {
|
|
||||||
tmp_command_buf = make([]uint32, buf_size)
|
|
||||||
tmp_literal_buf = make([]byte, buf_size)
|
|
||||||
command_buf = tmp_command_buf
|
|
||||||
literal_buf = tmp_literal_buf
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
|
@ -1056,8 +1054,6 @@ func encoderCompressStreamFast(s *Writer, op int, available_in *uint, next_in *[
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
tmp_command_buf = nil
|
|
||||||
tmp_literal_buf = nil
|
|
||||||
checkFlushComplete(s)
|
checkFlushComplete(s)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
28
memory.go
28
memory.go
|
@ -23,12 +23,18 @@ func brotli_ensure_capacity_uint8_t(a *[]byte, c *uint, r uint) {
|
||||||
for new_size < r {
|
for new_size < r {
|
||||||
new_size *= 2
|
new_size *= 2
|
||||||
}
|
}
|
||||||
var new_array []byte = make([]byte, new_size)
|
|
||||||
if *c != 0 {
|
if cap(*a) < int(new_size) {
|
||||||
copy(new_array, (*a)[:*c])
|
var new_array []byte = make([]byte, new_size)
|
||||||
|
if *c != 0 {
|
||||||
|
copy(new_array, (*a)[:*c])
|
||||||
|
}
|
||||||
|
|
||||||
|
*a = new_array
|
||||||
|
} else {
|
||||||
|
*a = (*a)[:new_size]
|
||||||
}
|
}
|
||||||
|
|
||||||
*a = new_array
|
|
||||||
*c = new_size
|
*c = new_size
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -45,12 +51,16 @@ func brotli_ensure_capacity_uint32_t(a *[]uint32, c *uint, r uint) {
|
||||||
new_size *= 2
|
new_size *= 2
|
||||||
}
|
}
|
||||||
|
|
||||||
new_array = make([]uint32, new_size)
|
if cap(*a) < int(new_size) {
|
||||||
if *c != 0 {
|
new_array = make([]uint32, new_size)
|
||||||
copy(new_array, (*a)[:*c])
|
if *c != 0 {
|
||||||
}
|
copy(new_array, (*a)[:*c])
|
||||||
|
}
|
||||||
|
|
||||||
*a = new_array
|
*a = new_array
|
||||||
|
} else {
|
||||||
|
*a = (*a)[:new_size]
|
||||||
|
}
|
||||||
*c = new_size
|
*c = new_size
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
98
metablock.go
98
metablock.go
|
@ -1,5 +1,9 @@
|
||||||
package brotli
|
package brotli
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
/* Copyright 2014 Google Inc. All Rights Reserved.
|
/* Copyright 2014 Google Inc. All Rights Reserved.
|
||||||
|
|
||||||
Distributed under MIT license.
|
Distributed under MIT license.
|
||||||
|
@ -25,31 +29,30 @@ type metaBlockSplit struct {
|
||||||
distance_histograms_size uint
|
distance_histograms_size uint
|
||||||
}
|
}
|
||||||
|
|
||||||
func initMetaBlockSplit(mb *metaBlockSplit) {
|
var metaBlockPool sync.Pool
|
||||||
initBlockSplit(&mb.literal_split)
|
|
||||||
initBlockSplit(&mb.command_split)
|
func getMetaBlockSplit() *metaBlockSplit {
|
||||||
initBlockSplit(&mb.distance_split)
|
mb, _ := metaBlockPool.Get().(*metaBlockSplit)
|
||||||
mb.literal_context_map = nil
|
|
||||||
mb.literal_context_map_size = 0
|
if mb == nil {
|
||||||
mb.distance_context_map = nil
|
mb = &metaBlockSplit{}
|
||||||
mb.distance_context_map_size = 0
|
} else {
|
||||||
mb.literal_histograms = nil
|
initBlockSplit(&mb.literal_split)
|
||||||
mb.literal_histograms_size = 0
|
initBlockSplit(&mb.command_split)
|
||||||
mb.command_histograms = nil
|
initBlockSplit(&mb.distance_split)
|
||||||
mb.command_histograms_size = 0
|
mb.literal_context_map = mb.literal_context_map[:0]
|
||||||
mb.distance_histograms = nil
|
mb.literal_context_map_size = 0
|
||||||
mb.distance_histograms_size = 0
|
mb.distance_context_map = mb.distance_context_map[:0]
|
||||||
|
mb.distance_context_map_size = 0
|
||||||
|
mb.literal_histograms = mb.literal_histograms[:0]
|
||||||
|
mb.command_histograms = mb.command_histograms[:0]
|
||||||
|
mb.distance_histograms = mb.distance_histograms[:0]
|
||||||
|
}
|
||||||
|
return mb
|
||||||
}
|
}
|
||||||
|
|
||||||
func destroyMetaBlockSplit(mb *metaBlockSplit) {
|
func freeMetaBlockSplit(mb *metaBlockSplit) {
|
||||||
destroyBlockSplit(&mb.literal_split)
|
metaBlockPool.Put(mb)
|
||||||
destroyBlockSplit(&mb.command_split)
|
|
||||||
destroyBlockSplit(&mb.distance_split)
|
|
||||||
mb.literal_context_map = nil
|
|
||||||
mb.distance_context_map = nil
|
|
||||||
mb.literal_histograms = nil
|
|
||||||
mb.command_histograms = nil
|
|
||||||
mb.distance_histograms = nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func initDistanceParams(params *encoderParams, npostfix uint32, ndirect uint32) {
|
func initDistanceParams(params *encoderParams, npostfix uint32, ndirect uint32) {
|
||||||
|
@ -206,21 +209,30 @@ func buildMetaBlock(ringbuffer []byte, pos uint, mask uint, params *encoderParam
|
||||||
distance_histograms = make([]histogramDistance, distance_histograms_size)
|
distance_histograms = make([]histogramDistance, distance_histograms_size)
|
||||||
clearHistogramsDistance(distance_histograms, distance_histograms_size)
|
clearHistogramsDistance(distance_histograms, distance_histograms_size)
|
||||||
|
|
||||||
assert(mb.command_histograms == nil)
|
|
||||||
mb.command_histograms_size = mb.command_split.num_types
|
mb.command_histograms_size = mb.command_split.num_types
|
||||||
mb.command_histograms = make([]histogramCommand, (mb.command_histograms_size))
|
if cap(mb.command_histograms) < int(mb.command_histograms_size) {
|
||||||
|
mb.command_histograms = make([]histogramCommand, (mb.command_histograms_size))
|
||||||
|
} else {
|
||||||
|
mb.command_histograms = mb.command_histograms[:mb.command_histograms_size]
|
||||||
|
}
|
||||||
clearHistogramsCommand(mb.command_histograms, mb.command_histograms_size)
|
clearHistogramsCommand(mb.command_histograms, mb.command_histograms_size)
|
||||||
|
|
||||||
buildHistogramsWithContext(cmds, &mb.literal_split, &mb.command_split, &mb.distance_split, ringbuffer, pos, mask, prev_byte, prev_byte2, literal_context_modes, literal_histograms, mb.command_histograms, distance_histograms)
|
buildHistogramsWithContext(cmds, &mb.literal_split, &mb.command_split, &mb.distance_split, ringbuffer, pos, mask, prev_byte, prev_byte2, literal_context_modes, literal_histograms, mb.command_histograms, distance_histograms)
|
||||||
literal_context_modes = nil
|
literal_context_modes = nil
|
||||||
|
|
||||||
assert(mb.literal_context_map == nil)
|
|
||||||
mb.literal_context_map_size = mb.literal_split.num_types << literalContextBits
|
mb.literal_context_map_size = mb.literal_split.num_types << literalContextBits
|
||||||
mb.literal_context_map = make([]uint32, (mb.literal_context_map_size))
|
if cap(mb.literal_context_map) < int(mb.literal_context_map_size) {
|
||||||
|
mb.literal_context_map = make([]uint32, (mb.literal_context_map_size))
|
||||||
|
} else {
|
||||||
|
mb.literal_context_map = mb.literal_context_map[:mb.literal_context_map_size]
|
||||||
|
}
|
||||||
|
|
||||||
assert(mb.literal_histograms == nil)
|
|
||||||
mb.literal_histograms_size = mb.literal_context_map_size
|
mb.literal_histograms_size = mb.literal_context_map_size
|
||||||
mb.literal_histograms = make([]histogramLiteral, (mb.literal_histograms_size))
|
if cap(mb.literal_histograms) < int(mb.literal_histograms_size) {
|
||||||
|
mb.literal_histograms = make([]histogramLiteral, (mb.literal_histograms_size))
|
||||||
|
} else {
|
||||||
|
mb.literal_histograms = mb.literal_histograms[:mb.literal_histograms_size]
|
||||||
|
}
|
||||||
|
|
||||||
clusterHistogramsLiteral(literal_histograms, literal_histograms_size, buildMetaBlock_kMaxNumberOfHistograms, mb.literal_histograms, &mb.literal_histograms_size, mb.literal_context_map)
|
clusterHistogramsLiteral(literal_histograms, literal_histograms_size, buildMetaBlock_kMaxNumberOfHistograms, mb.literal_histograms, &mb.literal_histograms_size, mb.literal_context_map)
|
||||||
literal_histograms = nil
|
literal_histograms = nil
|
||||||
|
@ -236,13 +248,19 @@ func buildMetaBlock(ringbuffer []byte, pos uint, mask uint, params *encoderParam
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(mb.distance_context_map == nil)
|
|
||||||
mb.distance_context_map_size = mb.distance_split.num_types << distanceContextBits
|
mb.distance_context_map_size = mb.distance_split.num_types << distanceContextBits
|
||||||
mb.distance_context_map = make([]uint32, (mb.distance_context_map_size))
|
if cap(mb.distance_context_map) < int(mb.distance_context_map_size) {
|
||||||
|
mb.distance_context_map = make([]uint32, (mb.distance_context_map_size))
|
||||||
|
} else {
|
||||||
|
mb.distance_context_map = mb.distance_context_map[:mb.distance_context_map_size]
|
||||||
|
}
|
||||||
|
|
||||||
assert(mb.distance_histograms == nil)
|
|
||||||
mb.distance_histograms_size = mb.distance_context_map_size
|
mb.distance_histograms_size = mb.distance_context_map_size
|
||||||
mb.distance_histograms = make([]histogramDistance, (mb.distance_histograms_size))
|
if cap(mb.distance_histograms) < int(mb.distance_histograms_size) {
|
||||||
|
mb.distance_histograms = make([]histogramDistance, (mb.distance_histograms_size))
|
||||||
|
} else {
|
||||||
|
mb.distance_histograms = mb.distance_histograms[:mb.distance_histograms_size]
|
||||||
|
}
|
||||||
|
|
||||||
clusterHistogramsDistance(distance_histograms, mb.distance_context_map_size, buildMetaBlock_kMaxNumberOfHistograms, mb.distance_histograms, &mb.distance_histograms_size, mb.distance_context_map)
|
clusterHistogramsDistance(distance_histograms, mb.distance_context_map_size, buildMetaBlock_kMaxNumberOfHistograms, mb.distance_histograms, &mb.distance_histograms_size, mb.distance_context_map)
|
||||||
distance_histograms = nil
|
distance_histograms = nil
|
||||||
|
@ -295,9 +313,12 @@ func initContextBlockSplitter(self *contextBlockSplitter, alphabet_size uint, nu
|
||||||
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, max_num_blocks)
|
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, max_num_blocks)
|
||||||
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, max_num_blocks)
|
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, max_num_blocks)
|
||||||
split.num_blocks = max_num_blocks
|
split.num_blocks = max_num_blocks
|
||||||
assert(*histograms == nil)
|
|
||||||
*histograms_size = max_num_types * num_contexts
|
*histograms_size = max_num_types * num_contexts
|
||||||
*histograms = make([]histogramLiteral, (*histograms_size))
|
if histograms == nil || cap(*histograms) < int(*histograms_size) {
|
||||||
|
*histograms = make([]histogramLiteral, (*histograms_size))
|
||||||
|
} else {
|
||||||
|
*histograms = (*histograms)[:*histograms_size]
|
||||||
|
}
|
||||||
self.histograms_ = *histograms
|
self.histograms_ = *histograms
|
||||||
|
|
||||||
/* Clear only current histogram. */
|
/* Clear only current histogram. */
|
||||||
|
@ -450,9 +471,12 @@ func contextBlockSplitterAddSymbol(self *contextBlockSplitter, symbol uint, cont
|
||||||
|
|
||||||
func mapStaticContexts(num_contexts uint, static_context_map []uint32, mb *metaBlockSplit) {
|
func mapStaticContexts(num_contexts uint, static_context_map []uint32, mb *metaBlockSplit) {
|
||||||
var i uint
|
var i uint
|
||||||
assert(mb.literal_context_map == nil)
|
|
||||||
mb.literal_context_map_size = mb.literal_split.num_types << literalContextBits
|
mb.literal_context_map_size = mb.literal_split.num_types << literalContextBits
|
||||||
mb.literal_context_map = make([]uint32, (mb.literal_context_map_size))
|
if cap(mb.literal_context_map) < int(mb.literal_context_map_size) {
|
||||||
|
mb.literal_context_map = make([]uint32, (mb.literal_context_map_size))
|
||||||
|
} else {
|
||||||
|
mb.literal_context_map = mb.literal_context_map[:mb.literal_context_map_size]
|
||||||
|
}
|
||||||
|
|
||||||
for i = 0; i < mb.literal_split.num_types; i++ {
|
for i = 0; i < mb.literal_split.num_types; i++ {
|
||||||
var offset uint32 = uint32(i * num_contexts)
|
var offset uint32 = uint32(i * num_contexts)
|
||||||
|
|
|
@ -43,9 +43,12 @@ func initBlockSplitterCommand(self *blockSplitterCommand, alphabet_size uint, mi
|
||||||
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, max_num_blocks)
|
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, max_num_blocks)
|
||||||
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, max_num_blocks)
|
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, max_num_blocks)
|
||||||
self.split_.num_blocks = max_num_blocks
|
self.split_.num_blocks = max_num_blocks
|
||||||
assert(*histograms == nil)
|
|
||||||
*histograms_size = max_num_types
|
*histograms_size = max_num_types
|
||||||
*histograms = make([]histogramCommand, (*histograms_size))
|
if histograms == nil || cap(*histograms) < int(*histograms_size) {
|
||||||
|
*histograms = make([]histogramCommand, (*histograms_size))
|
||||||
|
} else {
|
||||||
|
*histograms = (*histograms)[:*histograms_size]
|
||||||
|
}
|
||||||
self.histograms_ = *histograms
|
self.histograms_ = *histograms
|
||||||
|
|
||||||
/* Clear only current histogram. */
|
/* Clear only current histogram. */
|
||||||
|
|
|
@ -43,9 +43,12 @@ func initBlockSplitterDistance(self *blockSplitterDistance, alphabet_size uint,
|
||||||
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, max_num_blocks)
|
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, max_num_blocks)
|
||||||
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, max_num_blocks)
|
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, max_num_blocks)
|
||||||
self.split_.num_blocks = max_num_blocks
|
self.split_.num_blocks = max_num_blocks
|
||||||
assert(*histograms == nil)
|
|
||||||
*histograms_size = max_num_types
|
*histograms_size = max_num_types
|
||||||
*histograms = make([]histogramDistance, (*histograms_size))
|
if histograms == nil || cap(*histograms) < int(*histograms_size) {
|
||||||
|
*histograms = make([]histogramDistance, *histograms_size)
|
||||||
|
} else {
|
||||||
|
*histograms = (*histograms)[:*histograms_size]
|
||||||
|
}
|
||||||
self.histograms_ = *histograms
|
self.histograms_ = *histograms
|
||||||
|
|
||||||
/* Clear only current histogram. */
|
/* Clear only current histogram. */
|
||||||
|
|
|
@ -43,9 +43,12 @@ func initBlockSplitterLiteral(self *blockSplitterLiteral, alphabet_size uint, mi
|
||||||
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, max_num_blocks)
|
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, max_num_blocks)
|
||||||
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, max_num_blocks)
|
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, max_num_blocks)
|
||||||
self.split_.num_blocks = max_num_blocks
|
self.split_.num_blocks = max_num_blocks
|
||||||
assert(*histograms == nil)
|
|
||||||
*histograms_size = max_num_types
|
*histograms_size = max_num_types
|
||||||
*histograms = make([]histogramLiteral, (*histograms_size))
|
if histograms == nil || cap(*histograms) < int(*histograms_size) {
|
||||||
|
*histograms = make([]histogramLiteral, *histograms_size)
|
||||||
|
} else {
|
||||||
|
*histograms = (*histograms)[:*histograms_size]
|
||||||
|
}
|
||||||
self.histograms_ = *histograms
|
self.histograms_ = *histograms
|
||||||
|
|
||||||
/* Clear only current histogram. */
|
/* Clear only current histogram. */
|
||||||
|
|
|
@ -44,11 +44,16 @@ const kSlackForEightByteHashingEverywhere uint = 7
|
||||||
/* Allocates or re-allocates data_ to the given length + plus some slack
|
/* Allocates or re-allocates data_ to the given length + plus some slack
|
||||||
region before and after. Fills the slack regions with zeros. */
|
region before and after. Fills the slack regions with zeros. */
|
||||||
func ringBufferInitBuffer(buflen uint32, rb *ringBuffer) {
|
func ringBufferInitBuffer(buflen uint32, rb *ringBuffer) {
|
||||||
var new_data []byte = make([]byte, (2 + uint(buflen) + kSlackForEightByteHashingEverywhere))
|
var new_data []byte
|
||||||
var i uint
|
var i uint
|
||||||
|
size := 2 + int(buflen) + int(kSlackForEightByteHashingEverywhere)
|
||||||
|
if cap(rb.data_) < size {
|
||||||
|
new_data = make([]byte, size)
|
||||||
|
} else {
|
||||||
|
new_data = rb.data_[:size]
|
||||||
|
}
|
||||||
if rb.data_ != nil {
|
if rb.data_ != nil {
|
||||||
copy(new_data, rb.data_[:2+rb.cur_size_+uint32(kSlackForEightByteHashingEverywhere)])
|
copy(new_data, rb.data_[:2+rb.cur_size_+uint32(kSlackForEightByteHashingEverywhere)])
|
||||||
rb.data_ = nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
rb.data_ = new_data
|
rb.data_ = new_data
|
||||||
|
|
Loading…
Reference in New Issue