Reuse buffers and objects using sync.Pool

This reduces the amount of garbage generated and relieves pressure on
the GC.

For a workload without reusing the Writer (using Writer.Reset) the number of allocations goes from 31 to 9.
While for a workload when you reuse the Writer the number of allocations goes from 25 to 0.
This commit is contained in:
Erik Dubbelboer 2020-05-09 16:27:05 +02:00
parent e2c5f2109f
commit a01a7b12c9
10 changed files with 236 additions and 140 deletions

View File

@ -1,5 +1,9 @@
package brotli
import (
"sync"
)
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
@ -31,6 +35,8 @@ func computeDistanceCode(distance uint, max_distance uint, dist_cache []int) uin
return distance + numDistanceShortCodes - 1
}
var hasherSearchResultPool sync.Pool
func createBackwardReferences(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, hasher hasherHandle, dist_cache []int, last_insert_len *uint, commands *[]command, num_literals *uint) {
var max_backward_limit uint = maxBackwardLimit(params.lgwin)
var insert_length uint = *last_insert_len
@ -52,8 +58,14 @@ func createBackwardReferences(num_bytes uint, position uint, ringbuffer []byte,
/* Minimum score to accept a backward reference. */
hasher.PrepareDistanceCache(dist_cache)
var sr2 hasherSearchResult
var sr hasherSearchResult
sr2, _ := hasherSearchResultPool.Get().(*hasherSearchResult)
if sr2 == nil {
sr2 = &hasherSearchResult{}
}
sr, _ := hasherSearchResultPool.Get().(*hasherSearchResult)
if sr == nil {
sr = &hasherSearchResult{}
}
for position+hasher.HashTypeLength() < pos_end {
var max_length uint = pos_end - position
@ -62,7 +74,7 @@ func createBackwardReferences(num_bytes uint, position uint, ringbuffer []byte,
sr.len_code_delta = 0
sr.distance = 0
sr.score = kMinScore
hasher.FindLongestMatch(&params.dictionary, ringbuffer, ringbuffer_mask, dist_cache, position, max_length, max_distance, gap, params.dist.max_distance, &sr)
hasher.FindLongestMatch(&params.dictionary, ringbuffer, ringbuffer_mask, dist_cache, position, max_length, max_distance, gap, params.dist.max_distance, sr)
if sr.score > kMinScore {
/* Found a match. Let's look for something even better ahead. */
var delayed_backward_references_in_row int = 0
@ -78,14 +90,14 @@ func createBackwardReferences(num_bytes uint, position uint, ringbuffer []byte,
sr2.distance = 0
sr2.score = kMinScore
max_distance = brotli_min_size_t(position+1, max_backward_limit)
hasher.FindLongestMatch(&params.dictionary, ringbuffer, ringbuffer_mask, dist_cache, position+1, max_length, max_distance, gap, params.dist.max_distance, &sr2)
hasher.FindLongestMatch(&params.dictionary, ringbuffer, ringbuffer_mask, dist_cache, position+1, max_length, max_distance, gap, params.dist.max_distance, sr2)
if sr2.score >= sr.score+cost_diff_lazy {
/* Ok, let's just write one byte for now and start a match from the
next byte. */
position++
insert_length++
sr = sr2
*sr = *sr2
delayed_backward_references_in_row++
if delayed_backward_references_in_row < 4 && position+hasher.HashTypeLength() < pos_end {
continue
@ -167,4 +179,7 @@ func createBackwardReferences(num_bytes uint, position uint, ringbuffer []byte,
insert_length += pos_end - position
*last_insert_len = insert_length
hasherSearchResultPool.Put(sr)
hasherSearchResultPool.Put(sr2)
}

View File

@ -88,17 +88,12 @@ const clustersPerBatch = 16
func initBlockSplit(self *blockSplit) {
self.num_types = 0
self.num_blocks = 0
self.types = nil
self.lengths = nil
self.types = self.types[:0]
self.lengths = self.lengths[:0]
self.types_alloc_size = 0
self.lengths_alloc_size = 0
}
func destroyBlockSplit(self *blockSplit) {
self.types = nil
self.lengths = nil
}
func splitBlock(cmds []command, data []byte, pos uint, mask uint, params *encoderParams, literal_split *blockSplit, insert_and_copy_split *blockSplit, dist_split *blockSplit) {
{
var literals_count uint = countLiterals(cmds)

View File

@ -1,6 +1,9 @@
package brotli
import "math"
import (
"math"
"sync"
)
const maxHuffmanTreeSize = (2*numCommandSymbols + 1)
@ -415,6 +418,8 @@ func sortHuffmanTree1(v0 huffmanTree, v1 huffmanTree) bool {
return v0.total_count_ < v1.total_count_
}
var huffmanTreePool sync.Pool
func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_bits uint, depth []byte, bits []uint16, storage_ix *uint, storage []byte) {
var count uint = 0
var symbols = [4]uint{0}
@ -446,7 +451,13 @@ func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_
}
{
var max_tree_size uint = 2*length + 1
var tree []huffmanTree = make([]huffmanTree, max_tree_size)
tree, _ := huffmanTreePool.Get().(*[]huffmanTree)
if tree == nil || cap(*tree) < int(max_tree_size) {
tmp := make([]huffmanTree, max_tree_size)
tree = &tmp
} else {
*tree = (*tree)[:max_tree_size]
}
var count_limit uint32
for count_limit = 1; ; count_limit *= 2 {
var node int = 0
@ -455,9 +466,9 @@ func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_
l--
if histogram[l] != 0 {
if histogram[l] >= count_limit {
initHuffmanTree(&tree[node:][0], histogram[l], -1, int16(l))
initHuffmanTree(&(*tree)[node:][0], histogram[l], -1, int16(l))
} else {
initHuffmanTree(&tree[node:][0], count_limit, -1, int16(l))
initHuffmanTree(&(*tree)[node:][0], count_limit, -1, int16(l))
}
node++
@ -471,7 +482,7 @@ func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_
var j int = n + 1
var k int
sortHuffmanTreeItems(tree, uint(n), huffmanTreeComparator(sortHuffmanTree1))
sortHuffmanTreeItems(*tree, uint(n), huffmanTreeComparator(sortHuffmanTree1))
/* The nodes are:
[0, n): the sorted leaf nodes that we start with.
@ -482,15 +493,15 @@ func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_
There will be (2n+1) elements at the end. */
initHuffmanTree(&sentinel, math.MaxUint32, -1, -1)
tree[node] = sentinel
(*tree)[node] = sentinel
node++
tree[node] = sentinel
(*tree)[node] = sentinel
node++
for k = n - 1; k > 0; k-- {
var left int
var right int
if tree[i].total_count_ <= tree[j].total_count_ {
if (*tree)[i].total_count_ <= (*tree)[j].total_count_ {
left = i
i++
} else {
@ -498,7 +509,7 @@ func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_
j++
}
if tree[i].total_count_ <= tree[j].total_count_ {
if (*tree)[i].total_count_ <= (*tree)[j].total_count_ {
right = i
i++
} else {
@ -507,17 +518,17 @@ func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_
}
/* The sentinel node becomes the parent node. */
tree[node-1].total_count_ = tree[left].total_count_ + tree[right].total_count_
(*tree)[node-1].total_count_ = (*tree)[left].total_count_ + (*tree)[right].total_count_
tree[node-1].index_left_ = int16(left)
tree[node-1].index_right_or_value_ = int16(right)
(*tree)[node-1].index_left_ = int16(left)
(*tree)[node-1].index_right_or_value_ = int16(right)
/* Add back the last sentinel node. */
tree[node] = sentinel
(*tree)[node] = sentinel
node++
}
if setDepth(2*n-1, tree, depth, 14) {
if setDepth(2*n-1, *tree, depth, 14) {
/* We need to pack the Huffman tree in 14 bits. If this was not
successful, add fake entities to the lowest values and retry. */
break
@ -525,7 +536,7 @@ func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_
}
}
tree = nil
huffmanTreePool.Put(tree)
}
convertBitDepthsToSymbols(depth, length, bits)
@ -875,27 +886,37 @@ type blockEncoder struct {
bits_ []uint16
}
func initBlockEncoder(self *blockEncoder, histogram_length uint, num_block_types uint, block_types []byte, block_lengths []uint32, num_blocks uint) {
var blockEncoderPool sync.Pool
func getBlockEncoder(histogram_length uint, num_block_types uint, block_types []byte, block_lengths []uint32, num_blocks uint) *blockEncoder {
self, _ := blockEncoderPool.Get().(*blockEncoder)
if self != nil {
self.block_ix_ = 0
self.entropy_ix_ = 0
self.depths_ = self.depths_[:0]
self.bits_ = self.bits_[:0]
} else {
self = &blockEncoder{}
}
self.histogram_length_ = histogram_length
self.num_block_types_ = num_block_types
self.block_types_ = block_types
self.block_lengths_ = block_lengths
self.num_blocks_ = num_blocks
initBlockTypeCodeCalculator(&self.block_split_code_.type_code_calculator)
self.block_ix_ = 0
if num_blocks == 0 {
self.block_len_ = 0
} else {
self.block_len_ = uint(block_lengths[0])
}
self.entropy_ix_ = 0
self.depths_ = nil
self.bits_ = nil
return self
}
func cleanupBlockEncoder(self *blockEncoder) {
self.depths_ = nil
self.bits_ = nil
blockEncoderPool.Put(self)
}
/* Creates entropy codes of block lengths and block types and stores them
@ -948,8 +969,16 @@ func storeSymbolWithContext(self *blockEncoder, symbol uint, context uint, conte
func buildAndStoreEntropyCodesLiteral(self *blockEncoder, histograms []histogramLiteral, histograms_size uint, alphabet_size uint, tree []huffmanTree, storage_ix *uint, storage []byte) {
var table_size uint = histograms_size * self.histogram_length_
self.depths_ = make([]byte, table_size)
self.bits_ = make([]uint16, table_size)
if cap(self.depths_) < int(table_size) {
self.depths_ = make([]byte, table_size)
} else {
self.depths_ = self.depths_[:table_size]
}
if cap(self.bits_) < int(table_size) {
self.bits_ = make([]uint16, table_size)
} else {
self.bits_ = self.bits_[:table_size]
}
{
var i uint
for i = 0; i < histograms_size; i++ {
@ -961,8 +990,16 @@ func buildAndStoreEntropyCodesLiteral(self *blockEncoder, histograms []histogram
func buildAndStoreEntropyCodesCommand(self *blockEncoder, histograms []histogramCommand, histograms_size uint, alphabet_size uint, tree []huffmanTree, storage_ix *uint, storage []byte) {
var table_size uint = histograms_size * self.histogram_length_
self.depths_ = make([]byte, table_size)
self.bits_ = make([]uint16, table_size)
if cap(self.depths_) < int(table_size) {
self.depths_ = make([]byte, table_size)
} else {
self.depths_ = self.depths_[:table_size]
}
if cap(self.bits_) < int(table_size) {
self.bits_ = make([]uint16, table_size)
} else {
self.bits_ = self.bits_[:table_size]
}
{
var i uint
for i = 0; i < histograms_size; i++ {
@ -974,8 +1011,16 @@ func buildAndStoreEntropyCodesCommand(self *blockEncoder, histograms []histogram
func buildAndStoreEntropyCodesDistance(self *blockEncoder, histograms []histogramDistance, histograms_size uint, alphabet_size uint, tree []huffmanTree, storage_ix *uint, storage []byte) {
var table_size uint = histograms_size * self.histogram_length_
self.depths_ = make([]byte, table_size)
self.bits_ = make([]uint16, table_size)
if cap(self.depths_) < int(table_size) {
self.depths_ = make([]byte, table_size)
} else {
self.depths_ = self.depths_[:table_size]
}
if cap(self.bits_) < int(table_size) {
self.bits_ = make([]uint16, table_size)
} else {
self.bits_ = self.bits_[:table_size]
}
{
var i uint
for i = 0; i < histograms_size; i++ {
@ -997,9 +1042,6 @@ func storeMetaBlock(input []byte, start_pos uint, length uint, mask uint, prev_b
var num_effective_distance_symbols uint32 = num_distance_symbols
var tree []huffmanTree
var literal_context_lut contextLUT = getContextLUT(literal_context_mode)
var literal_enc blockEncoder
var command_enc blockEncoder
var distance_enc blockEncoder
var dist *distanceParams = &params.dist
if params.large_window && num_effective_distance_symbols > numHistogramDistanceSymbols {
num_effective_distance_symbols = numHistogramDistanceSymbols
@ -1008,13 +1050,13 @@ func storeMetaBlock(input []byte, start_pos uint, length uint, mask uint, prev_b
storeCompressedMetaBlockHeader(is_last, length, storage_ix, storage)
tree = make([]huffmanTree, maxHuffmanTreeSize)
initBlockEncoder(&literal_enc, numLiteralSymbols, mb.literal_split.num_types, mb.literal_split.types, mb.literal_split.lengths, mb.literal_split.num_blocks)
initBlockEncoder(&command_enc, numCommandSymbols, mb.command_split.num_types, mb.command_split.types, mb.command_split.lengths, mb.command_split.num_blocks)
initBlockEncoder(&distance_enc, uint(num_effective_distance_symbols), mb.distance_split.num_types, mb.distance_split.types, mb.distance_split.lengths, mb.distance_split.num_blocks)
literal_enc := getBlockEncoder(numLiteralSymbols, mb.literal_split.num_types, mb.literal_split.types, mb.literal_split.lengths, mb.literal_split.num_blocks)
command_enc := getBlockEncoder(numCommandSymbols, mb.command_split.num_types, mb.command_split.types, mb.command_split.lengths, mb.command_split.num_blocks)
distance_enc := getBlockEncoder(uint(num_effective_distance_symbols), mb.distance_split.num_types, mb.distance_split.types, mb.distance_split.lengths, mb.distance_split.num_blocks)
buildAndStoreBlockSwitchEntropyCodes(&literal_enc, tree, storage_ix, storage)
buildAndStoreBlockSwitchEntropyCodes(&command_enc, tree, storage_ix, storage)
buildAndStoreBlockSwitchEntropyCodes(&distance_enc, tree, storage_ix, storage)
buildAndStoreBlockSwitchEntropyCodes(literal_enc, tree, storage_ix, storage)
buildAndStoreBlockSwitchEntropyCodes(command_enc, tree, storage_ix, storage)
buildAndStoreBlockSwitchEntropyCodes(distance_enc, tree, storage_ix, storage)
writeBits(2, uint64(dist.distance_postfix_bits), storage_ix, storage)
writeBits(4, uint64(dist.num_direct_distance_codes)>>dist.distance_postfix_bits, storage_ix, storage)
@ -1034,19 +1076,19 @@ func storeMetaBlock(input []byte, start_pos uint, length uint, mask uint, prev_b
encodeContextMap(mb.distance_context_map, mb.distance_context_map_size, mb.distance_histograms_size, tree, storage_ix, storage)
}
buildAndStoreEntropyCodesLiteral(&literal_enc, mb.literal_histograms, mb.literal_histograms_size, numLiteralSymbols, tree, storage_ix, storage)
buildAndStoreEntropyCodesCommand(&command_enc, mb.command_histograms, mb.command_histograms_size, numCommandSymbols, tree, storage_ix, storage)
buildAndStoreEntropyCodesDistance(&distance_enc, mb.distance_histograms, mb.distance_histograms_size, uint(num_distance_symbols), tree, storage_ix, storage)
buildAndStoreEntropyCodesLiteral(literal_enc, mb.literal_histograms, mb.literal_histograms_size, numLiteralSymbols, tree, storage_ix, storage)
buildAndStoreEntropyCodesCommand(command_enc, mb.command_histograms, mb.command_histograms_size, numCommandSymbols, tree, storage_ix, storage)
buildAndStoreEntropyCodesDistance(distance_enc, mb.distance_histograms, mb.distance_histograms_size, uint(num_distance_symbols), tree, storage_ix, storage)
tree = nil
for _, cmd := range commands {
var cmd_code uint = uint(cmd.cmd_prefix_)
storeSymbol(&command_enc, cmd_code, storage_ix, storage)
storeSymbol(command_enc, cmd_code, storage_ix, storage)
storeCommandExtra(&cmd, storage_ix, storage)
if mb.literal_context_map_size == 0 {
var j uint
for j = uint(cmd.insert_len_); j != 0; j-- {
storeSymbol(&literal_enc, uint(input[pos&mask]), storage_ix, storage)
storeSymbol(literal_enc, uint(input[pos&mask]), storage_ix, storage)
pos++
}
} else {
@ -1054,7 +1096,7 @@ func storeMetaBlock(input []byte, start_pos uint, length uint, mask uint, prev_b
for j = uint(cmd.insert_len_); j != 0; j-- {
var context uint = uint(getContext(prev_byte, prev_byte2, literal_context_lut))
var literal byte = input[pos&mask]
storeSymbolWithContext(&literal_enc, uint(literal), context, mb.literal_context_map, storage_ix, storage, literalContextBits)
storeSymbolWithContext(literal_enc, uint(literal), context, mb.literal_context_map, storage_ix, storage, literalContextBits)
prev_byte2 = prev_byte
prev_byte = literal
pos++
@ -1070,10 +1112,10 @@ func storeMetaBlock(input []byte, start_pos uint, length uint, mask uint, prev_b
var distnumextra uint32 = uint32(cmd.dist_prefix_) >> 10
var distextra uint64 = uint64(cmd.dist_extra_)
if mb.distance_context_map_size == 0 {
storeSymbol(&distance_enc, dist_code, storage_ix, storage)
storeSymbol(distance_enc, dist_code, storage_ix, storage)
} else {
var context uint = uint(commandDistanceContext(&cmd))
storeSymbolWithContext(&distance_enc, dist_code, context, mb.distance_context_map, storage_ix, storage, distanceContextBits)
storeSymbolWithContext(distance_enc, dist_code, context, mb.distance_context_map, storage_ix, storage, distanceContextBits)
}
writeBits(uint(distnumextra), distextra, storage_ix, storage)
@ -1081,9 +1123,9 @@ func storeMetaBlock(input []byte, start_pos uint, length uint, mask uint, prev_b
}
}
cleanupBlockEncoder(&distance_enc)
cleanupBlockEncoder(&command_enc)
cleanupBlockEncoder(&literal_enc)
cleanupBlockEncoder(distance_enc)
cleanupBlockEncoder(command_enc)
cleanupBlockEncoder(literal_enc)
if is_last {
jumpToByteBoundary(storage_ix, storage)
}

View File

@ -464,8 +464,7 @@ func writeMetaBlockInternal(data []byte, mask uint, last_flush_pos uint64, bytes
} else if params.quality < minQualityForBlockSplit {
storeMetaBlockTrivial(data, uint(wrapped_last_flush_pos), bytes, mask, is_last, params, commands, storage_ix, storage)
} else {
var mb metaBlockSplit
initMetaBlockSplit(&mb)
mb := getMetaBlockSplit()
if params.quality < minQualityForHqBlockSplitting {
var num_literal_contexts uint = 1
var literal_context_map []uint32 = nil
@ -473,9 +472,9 @@ func writeMetaBlockInternal(data []byte, mask uint, last_flush_pos uint64, bytes
decideOverLiteralContextModeling(data, uint(wrapped_last_flush_pos), bytes, mask, params.quality, params.size_hint, &num_literal_contexts, &literal_context_map)
}
buildMetaBlockGreedy(data, uint(wrapped_last_flush_pos), mask, prev_byte, prev_byte2, literal_context_lut, num_literal_contexts, literal_context_map, commands, &mb)
buildMetaBlockGreedy(data, uint(wrapped_last_flush_pos), mask, prev_byte, prev_byte2, literal_context_lut, num_literal_contexts, literal_context_map, commands, mb)
} else {
buildMetaBlock(data, uint(wrapped_last_flush_pos), mask, &block_params, prev_byte, prev_byte2, commands, literal_context_mode, &mb)
buildMetaBlock(data, uint(wrapped_last_flush_pos), mask, &block_params, prev_byte, prev_byte2, commands, literal_context_mode, mb)
}
if params.quality >= minQualityForOptimizeHistograms {
@ -487,11 +486,11 @@ func writeMetaBlockInternal(data []byte, mask uint, last_flush_pos uint64, bytes
num_effective_dist_codes = numHistogramDistanceSymbols
}
optimizeHistograms(num_effective_dist_codes, &mb)
optimizeHistograms(num_effective_dist_codes, mb)
}
storeMetaBlock(data, uint(wrapped_last_flush_pos), bytes, mask, prev_byte, prev_byte2, is_last, &block_params, literal_context_mode, commands, &mb, storage_ix, storage)
destroyMetaBlockSplit(&mb)
storeMetaBlock(data, uint(wrapped_last_flush_pos), bytes, mask, prev_byte, prev_byte2, is_last, &block_params, literal_context_mode, commands, mb, storage_ix, storage)
freeMetaBlockSplit(mb)
}
if bytes+4 < *storage_ix>>3 {
@ -772,9 +771,14 @@ func encodeData(s *Writer, is_last bool, force_flush bool) bool {
return false
}
if s.params.quality == fastTwoPassCompressionQuality && s.command_buf_ == nil {
s.command_buf_ = make([]uint32, kCompressFragmentTwoPassBlockSize)
s.literal_buf_ = make([]byte, kCompressFragmentTwoPassBlockSize)
if s.params.quality == fastTwoPassCompressionQuality {
if s.command_buf_ == nil || cap(s.command_buf_) < int(kCompressFragmentTwoPassBlockSize) {
s.command_buf_ = make([]uint32, kCompressFragmentTwoPassBlockSize)
s.literal_buf_ = make([]byte, kCompressFragmentTwoPassBlockSize)
} else {
s.command_buf_ = s.command_buf_[:kCompressFragmentTwoPassBlockSize]
s.literal_buf_ = s.literal_buf_[:kCompressFragmentTwoPassBlockSize]
}
}
if s.params.quality == fastOnePassCompressionQuality || s.params.quality == fastTwoPassCompressionQuality {
@ -975,29 +979,23 @@ func checkFlushComplete(s *Writer) {
func encoderCompressStreamFast(s *Writer, op int, available_in *uint, next_in *[]byte) bool {
var block_size_limit uint = uint(1) << s.params.lgwin
var buf_size uint = brotli_min_size_t(kCompressFragmentTwoPassBlockSize, brotli_min_size_t(*available_in, block_size_limit))
var tmp_command_buf []uint32 = nil
var command_buf []uint32 = nil
var tmp_literal_buf []byte = nil
var literal_buf []byte = nil
if s.params.quality != fastOnePassCompressionQuality && s.params.quality != fastTwoPassCompressionQuality {
return false
}
if s.params.quality == fastTwoPassCompressionQuality {
if s.command_buf_ == nil && buf_size == kCompressFragmentTwoPassBlockSize {
s.command_buf_ = make([]uint32, kCompressFragmentTwoPassBlockSize)
s.literal_buf_ = make([]byte, kCompressFragmentTwoPassBlockSize)
if s.command_buf_ == nil || cap(s.command_buf_) < int(buf_size) {
s.command_buf_ = make([]uint32, buf_size)
s.literal_buf_ = make([]byte, buf_size)
} else {
s.command_buf_ = s.command_buf_[:buf_size]
s.literal_buf_ = s.literal_buf_[:buf_size]
}
if s.command_buf_ != nil {
command_buf = s.command_buf_
literal_buf = s.literal_buf_
} else {
tmp_command_buf = make([]uint32, buf_size)
tmp_literal_buf = make([]byte, buf_size)
command_buf = tmp_command_buf
literal_buf = tmp_literal_buf
}
command_buf = s.command_buf_
literal_buf = s.literal_buf_
}
for {
@ -1056,8 +1054,6 @@ func encoderCompressStreamFast(s *Writer, op int, available_in *uint, next_in *[
break
}
tmp_command_buf = nil
tmp_literal_buf = nil
checkFlushComplete(s)
return true
}

View File

@ -23,12 +23,18 @@ func brotli_ensure_capacity_uint8_t(a *[]byte, c *uint, r uint) {
for new_size < r {
new_size *= 2
}
var new_array []byte = make([]byte, new_size)
if *c != 0 {
copy(new_array, (*a)[:*c])
if cap(*a) < int(new_size) {
var new_array []byte = make([]byte, new_size)
if *c != 0 {
copy(new_array, (*a)[:*c])
}
*a = new_array
} else {
*a = (*a)[:new_size]
}
*a = new_array
*c = new_size
}
}
@ -45,12 +51,16 @@ func brotli_ensure_capacity_uint32_t(a *[]uint32, c *uint, r uint) {
new_size *= 2
}
new_array = make([]uint32, new_size)
if *c != 0 {
copy(new_array, (*a)[:*c])
}
if cap(*a) < int(new_size) {
new_array = make([]uint32, new_size)
if *c != 0 {
copy(new_array, (*a)[:*c])
}
*a = new_array
*a = new_array
} else {
*a = (*a)[:new_size]
}
*c = new_size
}
}

View File

@ -1,5 +1,9 @@
package brotli
import (
"sync"
)
/* Copyright 2014 Google Inc. All Rights Reserved.
Distributed under MIT license.
@ -25,31 +29,30 @@ type metaBlockSplit struct {
distance_histograms_size uint
}
func initMetaBlockSplit(mb *metaBlockSplit) {
initBlockSplit(&mb.literal_split)
initBlockSplit(&mb.command_split)
initBlockSplit(&mb.distance_split)
mb.literal_context_map = nil
mb.literal_context_map_size = 0
mb.distance_context_map = nil
mb.distance_context_map_size = 0
mb.literal_histograms = nil
mb.literal_histograms_size = 0
mb.command_histograms = nil
mb.command_histograms_size = 0
mb.distance_histograms = nil
mb.distance_histograms_size = 0
var metaBlockPool sync.Pool
func getMetaBlockSplit() *metaBlockSplit {
mb, _ := metaBlockPool.Get().(*metaBlockSplit)
if mb == nil {
mb = &metaBlockSplit{}
} else {
initBlockSplit(&mb.literal_split)
initBlockSplit(&mb.command_split)
initBlockSplit(&mb.distance_split)
mb.literal_context_map = mb.literal_context_map[:0]
mb.literal_context_map_size = 0
mb.distance_context_map = mb.distance_context_map[:0]
mb.distance_context_map_size = 0
mb.literal_histograms = mb.literal_histograms[:0]
mb.command_histograms = mb.command_histograms[:0]
mb.distance_histograms = mb.distance_histograms[:0]
}
return mb
}
func destroyMetaBlockSplit(mb *metaBlockSplit) {
destroyBlockSplit(&mb.literal_split)
destroyBlockSplit(&mb.command_split)
destroyBlockSplit(&mb.distance_split)
mb.literal_context_map = nil
mb.distance_context_map = nil
mb.literal_histograms = nil
mb.command_histograms = nil
mb.distance_histograms = nil
func freeMetaBlockSplit(mb *metaBlockSplit) {
metaBlockPool.Put(mb)
}
func initDistanceParams(params *encoderParams, npostfix uint32, ndirect uint32) {
@ -206,21 +209,30 @@ func buildMetaBlock(ringbuffer []byte, pos uint, mask uint, params *encoderParam
distance_histograms = make([]histogramDistance, distance_histograms_size)
clearHistogramsDistance(distance_histograms, distance_histograms_size)
assert(mb.command_histograms == nil)
mb.command_histograms_size = mb.command_split.num_types
mb.command_histograms = make([]histogramCommand, (mb.command_histograms_size))
if cap(mb.command_histograms) < int(mb.command_histograms_size) {
mb.command_histograms = make([]histogramCommand, (mb.command_histograms_size))
} else {
mb.command_histograms = mb.command_histograms[:mb.command_histograms_size]
}
clearHistogramsCommand(mb.command_histograms, mb.command_histograms_size)
buildHistogramsWithContext(cmds, &mb.literal_split, &mb.command_split, &mb.distance_split, ringbuffer, pos, mask, prev_byte, prev_byte2, literal_context_modes, literal_histograms, mb.command_histograms, distance_histograms)
literal_context_modes = nil
assert(mb.literal_context_map == nil)
mb.literal_context_map_size = mb.literal_split.num_types << literalContextBits
mb.literal_context_map = make([]uint32, (mb.literal_context_map_size))
if cap(mb.literal_context_map) < int(mb.literal_context_map_size) {
mb.literal_context_map = make([]uint32, (mb.literal_context_map_size))
} else {
mb.literal_context_map = mb.literal_context_map[:mb.literal_context_map_size]
}
assert(mb.literal_histograms == nil)
mb.literal_histograms_size = mb.literal_context_map_size
mb.literal_histograms = make([]histogramLiteral, (mb.literal_histograms_size))
if cap(mb.literal_histograms) < int(mb.literal_histograms_size) {
mb.literal_histograms = make([]histogramLiteral, (mb.literal_histograms_size))
} else {
mb.literal_histograms = mb.literal_histograms[:mb.literal_histograms_size]
}
clusterHistogramsLiteral(literal_histograms, literal_histograms_size, buildMetaBlock_kMaxNumberOfHistograms, mb.literal_histograms, &mb.literal_histograms_size, mb.literal_context_map)
literal_histograms = nil
@ -236,13 +248,19 @@ func buildMetaBlock(ringbuffer []byte, pos uint, mask uint, params *encoderParam
}
}
assert(mb.distance_context_map == nil)
mb.distance_context_map_size = mb.distance_split.num_types << distanceContextBits
mb.distance_context_map = make([]uint32, (mb.distance_context_map_size))
if cap(mb.distance_context_map) < int(mb.distance_context_map_size) {
mb.distance_context_map = make([]uint32, (mb.distance_context_map_size))
} else {
mb.distance_context_map = mb.distance_context_map[:mb.distance_context_map_size]
}
assert(mb.distance_histograms == nil)
mb.distance_histograms_size = mb.distance_context_map_size
mb.distance_histograms = make([]histogramDistance, (mb.distance_histograms_size))
if cap(mb.distance_histograms) < int(mb.distance_histograms_size) {
mb.distance_histograms = make([]histogramDistance, (mb.distance_histograms_size))
} else {
mb.distance_histograms = mb.distance_histograms[:mb.distance_histograms_size]
}
clusterHistogramsDistance(distance_histograms, mb.distance_context_map_size, buildMetaBlock_kMaxNumberOfHistograms, mb.distance_histograms, &mb.distance_histograms_size, mb.distance_context_map)
distance_histograms = nil
@ -295,9 +313,12 @@ func initContextBlockSplitter(self *contextBlockSplitter, alphabet_size uint, nu
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, max_num_blocks)
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, max_num_blocks)
split.num_blocks = max_num_blocks
assert(*histograms == nil)
*histograms_size = max_num_types * num_contexts
*histograms = make([]histogramLiteral, (*histograms_size))
if histograms == nil || cap(*histograms) < int(*histograms_size) {
*histograms = make([]histogramLiteral, (*histograms_size))
} else {
*histograms = (*histograms)[:*histograms_size]
}
self.histograms_ = *histograms
/* Clear only current histogram. */
@ -450,9 +471,12 @@ func contextBlockSplitterAddSymbol(self *contextBlockSplitter, symbol uint, cont
func mapStaticContexts(num_contexts uint, static_context_map []uint32, mb *metaBlockSplit) {
var i uint
assert(mb.literal_context_map == nil)
mb.literal_context_map_size = mb.literal_split.num_types << literalContextBits
mb.literal_context_map = make([]uint32, (mb.literal_context_map_size))
if cap(mb.literal_context_map) < int(mb.literal_context_map_size) {
mb.literal_context_map = make([]uint32, (mb.literal_context_map_size))
} else {
mb.literal_context_map = mb.literal_context_map[:mb.literal_context_map_size]
}
for i = 0; i < mb.literal_split.num_types; i++ {
var offset uint32 = uint32(i * num_contexts)

View File

@ -43,9 +43,12 @@ func initBlockSplitterCommand(self *blockSplitterCommand, alphabet_size uint, mi
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, max_num_blocks)
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, max_num_blocks)
self.split_.num_blocks = max_num_blocks
assert(*histograms == nil)
*histograms_size = max_num_types
*histograms = make([]histogramCommand, (*histograms_size))
if histograms == nil || cap(*histograms) < int(*histograms_size) {
*histograms = make([]histogramCommand, (*histograms_size))
} else {
*histograms = (*histograms)[:*histograms_size]
}
self.histograms_ = *histograms
/* Clear only current histogram. */

View File

@ -43,9 +43,12 @@ func initBlockSplitterDistance(self *blockSplitterDistance, alphabet_size uint,
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, max_num_blocks)
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, max_num_blocks)
self.split_.num_blocks = max_num_blocks
assert(*histograms == nil)
*histograms_size = max_num_types
*histograms = make([]histogramDistance, (*histograms_size))
if histograms == nil || cap(*histograms) < int(*histograms_size) {
*histograms = make([]histogramDistance, *histograms_size)
} else {
*histograms = (*histograms)[:*histograms_size]
}
self.histograms_ = *histograms
/* Clear only current histogram. */

View File

@ -43,9 +43,12 @@ func initBlockSplitterLiteral(self *blockSplitterLiteral, alphabet_size uint, mi
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, max_num_blocks)
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, max_num_blocks)
self.split_.num_blocks = max_num_blocks
assert(*histograms == nil)
*histograms_size = max_num_types
*histograms = make([]histogramLiteral, (*histograms_size))
if histograms == nil || cap(*histograms) < int(*histograms_size) {
*histograms = make([]histogramLiteral, *histograms_size)
} else {
*histograms = (*histograms)[:*histograms_size]
}
self.histograms_ = *histograms
/* Clear only current histogram. */

View File

@ -44,11 +44,16 @@ const kSlackForEightByteHashingEverywhere uint = 7
/* Allocates or re-allocates data_ to the given length + plus some slack
region before and after. Fills the slack regions with zeros. */
func ringBufferInitBuffer(buflen uint32, rb *ringBuffer) {
var new_data []byte = make([]byte, (2 + uint(buflen) + kSlackForEightByteHashingEverywhere))
var new_data []byte
var i uint
size := 2 + int(buflen) + int(kSlackForEightByteHashingEverywhere)
if cap(rb.data_) < size {
new_data = make([]byte, size)
} else {
new_data = rb.data_[:size]
}
if rb.data_ != nil {
copy(new_data, rb.data_[:2+rb.cur_size_+uint32(kSlackForEightByteHashingEverywhere)])
rb.data_ = nil
}
rb.data_ = new_data