diff --git a/backward_references.go b/backward_references.go index 1d4ce86..0ac5cb8 100644 --- a/backward_references.go +++ b/backward_references.go @@ -51,7 +51,7 @@ func createBackwardReferences(num_bytes uint, position uint, ringbuffer []byte, var gap uint = 0 /* Set maximum distance, see section 9.1. of the spec. */ - var kMinScore uint = scoreBase + 100 + const kMinScore uint = scoreBase + 100 /* For speed up heuristics for random data. */ diff --git a/backward_references_hq.go b/backward_references_hq.go index 2323285..5eac736 100644 --- a/backward_references_hq.go +++ b/backward_references_hq.go @@ -15,7 +15,7 @@ type zopfliNode struct { const maxEffectiveDistanceAlphabetSize = 544 -var kInfinity float32 = 1.7e38 /* ~= 2 ^ 127 */ +const kInfinity float32 = 1.7e38 /* ~= 2 ^ 127 */ var kDistanceCacheIndex = []uint32{0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1} diff --git a/block_splitter.go b/block_splitter.go index 1fc1cd8..2ccff45 100644 --- a/block_splitter.go +++ b/block_splitter.go @@ -17,31 +17,21 @@ type blockSplit struct { lengths_alloc_size uint } -var kMaxLiteralHistograms uint = 100 - -var kMaxCommandHistograms uint = 50 - -var kLiteralBlockSwitchCost float64 = 28.1 - -var kCommandBlockSwitchCost float64 = 13.5 - -var kDistanceBlockSwitchCost float64 = 14.6 - -var kLiteralStrideLength uint = 70 - -var kCommandStrideLength uint = 40 - -var kSymbolsPerLiteralHistogram uint = 544 - -var kSymbolsPerCommandHistogram uint = 530 - -var kSymbolsPerDistanceHistogram uint = 544 - -var kMinLengthForBlockSplitting uint = 128 - -var kIterMulForRefining uint = 2 - -var kMinItersForRefining uint = 100 +const ( + kMaxLiteralHistograms uint = 100 + kMaxCommandHistograms uint = 50 + kLiteralBlockSwitchCost float64 = 28.1 + kCommandBlockSwitchCost float64 = 13.5 + kDistanceBlockSwitchCost float64 = 14.6 + kLiteralStrideLength uint = 70 + kCommandStrideLength uint = 40 + kSymbolsPerLiteralHistogram uint = 544 + kSymbolsPerCommandHistogram uint = 530 + kSymbolsPerDistanceHistogram uint = 544 + kMinLengthForBlockSplitting uint = 128 + kIterMulForRefining uint = 2 + kMinItersForRefining uint = 100 +) func countLiterals(cmds []command, num_commands uint) uint { var total_length uint = 0 diff --git a/compress_fragment.go b/compress_fragment.go index 1f6520a..435898e 100644 --- a/compress_fragment.go +++ b/compress_fragment.go @@ -69,7 +69,7 @@ func buildAndStoreLiteralPrefixCode(input []byte, input_size uint, depths []byte histogram_total += uint(adjust) } } else { - var kSampleRate uint = 29 + const kSampleRate uint = 29 for i = 0; i < input_size; i += kSampleRate { histogram[input[i]]++ } @@ -501,8 +501,8 @@ func compressFragmentFastImpl(in []byte, input_size uint, is_last bool, table [] var next_emit int = 0 var base_ip int = 0 var input int = 0 - var kInputMarginBytes uint = windowGap - var kMinMatchLen uint = 5 + const kInputMarginBytes uint = windowGap + const kMinMatchLen uint = 5 var metablock_start int = input var block_size uint = brotli_min_size_t(input_size, compressFragmentFastImpl_kFirstBlockSize) var total_block_size uint = block_size diff --git a/compress_fragment_two_pass.go b/compress_fragment_two_pass.go index 5c03d39..b0078cd 100644 --- a/compress_fragment_two_pass.go +++ b/compress_fragment_two_pass.go @@ -14,7 +14,7 @@ import "encoding/binary" second pass we emit them into the bit stream using prefix codes built based on the actual command and literal byte histograms. */ -var kCompressFragmentTwoPassBlockSize uint = 1 << 17 +const kCompressFragmentTwoPassBlockSize uint = 1 << 17 func hash1(p []byte, shift uint, length uint) uint32 { var h uint64 = (binary.LittleEndian.Uint64(p) << ((8 - length) * 8)) * uint64(kHashMul32) @@ -226,7 +226,7 @@ func createCommands(input []byte, block_size uint, input_size uint, base_ip_ptr var last_distance int = -1 /* "ip" is the input pointer. */ - var kInputMarginBytes uint = windowGap + const kInputMarginBytes uint = windowGap /* "next_emit" is a pointer to the first byte that is not covered by a previous copy. Bytes between "next_emit" and the start of the next copy or diff --git a/decode.go b/decode.go index 1086110..ccf16f6 100644 --- a/decode.go +++ b/decode.go @@ -72,7 +72,7 @@ const huffmanTableMask = 0xFF /* We need the slack region for the following reasons: - doing up to two 16-byte copies for fast backward copying - inserting transformed dictionary word (5 prefix + 24 base + 8 suffix) */ -var kRingBufferWriteAheadSlack uint32 = 42 +const kRingBufferWriteAheadSlack uint32 = 42 var kCodeLengthCodeOrder = [codeLengthCodes]byte{1, 2, 3, 4, 0, 5, 17, 6, 16, 7, 8, 9, 10, 11, 12, 13, 14, 15} @@ -1528,8 +1528,8 @@ func takeDistanceFromRingBuffer(s *Reader) { s.distance_context = 1 } else { var distance_code int = s.distance_code << 1 - var kDistanceShortCodeIndexOffset uint32 = 0xAAAFFF1B - var kDistanceShortCodeValueOffset uint32 = 0xFA5FA500 + const kDistanceShortCodeIndexOffset uint32 = 0xAAAFFF1B + const kDistanceShortCodeValueOffset uint32 = 0xFA5FA500 var v int = (s.dist_rb_idx + int(kDistanceShortCodeIndexOffset>>uint(distance_code))) & 0x3 /* kDistanceShortCodeIndexOffset has 2-bit values from LSB: 3, 2, 1, 0, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2 */ diff --git a/encode.go b/encode.go index 4e26efb..c2e9c11 100644 --- a/encode.go +++ b/encode.go @@ -901,8 +901,8 @@ func shouldCompress_encode(data []byte, mask uint, last_flush_pos uint64, bytes if num_commands < (bytes>>8)+2 { if float64(num_literals) > 0.99*float64(bytes) { var literal_histo = [256]uint32{0} - var kSampleRate uint32 = 13 - var kMinEntropy float64 = 7.92 + const kSampleRate uint32 = 13 + const kMinEntropy float64 = 7.92 var bit_cost_threshold float64 = float64(bytes) * kMinEntropy / float64(kSampleRate) var t uint = uint((uint32(bytes) + kSampleRate - 1) / kSampleRate) var pos uint32 = uint32(last_flush_pos) diff --git a/hash.go b/hash.go index 63d61f1..003b433 100644 --- a/hash.go +++ b/hash.go @@ -31,11 +31,11 @@ type hasherHandle interface { type score_t uint -var kCutoffTransformsCount uint32 = 10 +const kCutoffTransformsCount uint32 = 10 /* 0, 12, 27, 23, 42, 63, 56, 48, 59, 64 */ /* 0+0, 4+8, 8+19, 12+11, 16+26, 20+43, 24+32, 28+20, 32+27, 36+28 */ -var kCutoffTransforms uint64 = 0x071B520ADA2D3200 +const kCutoffTransforms uint64 = 0x071B520ADA2D3200 type hasherSearchResult struct { len uint diff --git a/hash_rolling.go b/hash_rolling.go index 569fbda..ad655a0 100644 --- a/hash_rolling.go +++ b/hash_rolling.go @@ -9,9 +9,9 @@ package brotli /* NOTE: this hasher does not search in the dictionary. It is used as backup-hasher, the main hasher already searches in it. */ -var kRollingHashMul32hashRolling uint32 = 69069 +const kRollingHashMul32 uint32 = 69069 -var kInvalidPosHashRolling uint32 = 0xffffffff +const kInvalidPosHashRolling uint32 = 0xffffffff /* This hasher uses a longer forward length, but returning a higher value here will hurt compression by the main hasher when combined with a composite @@ -57,7 +57,7 @@ func (h *hashRolling) Initialize(params *encoderParams) { h.state = 0 h.next_ix = 0 - h.factor = kRollingHashMul32hashRolling + h.factor = kRollingHashMul32 /* Compute the factor of the oldest byte to remove: factor**steps modulo 0xffffffff (the multiplications rely on 32-bit overflow) */ diff --git a/ringbuffer.go b/ringbuffer.go index 6ea3254..693a3f6 100644 --- a/ringbuffer.go +++ b/ringbuffer.go @@ -42,11 +42,10 @@ func ringBufferSetup(params *encoderParams, rb *ringBuffer) { *(*uint32)(&rb.total_size_) = rb.size_ + rb.tail_size_ } +const kSlackForEightByteHashingEverywhere uint = 7 + /* Allocates or re-allocates data_ to the given length + plus some slack region before and after. Fills the slack regions with zeros. */ - -var kSlackForEightByteHashingEverywhere uint = 7 - func ringBufferInitBuffer(buflen uint32, rb *ringBuffer) { var new_data []byte = make([]byte, (2 + uint(buflen) + kSlackForEightByteHashingEverywhere)) var i uint diff --git a/static_dict.go b/static_dict.go index 181337f..8e7492d 100644 --- a/static_dict.go +++ b/static_dict.go @@ -12,7 +12,7 @@ import "encoding/binary" const maxStaticDictionaryMatchLen = 37 -var kInvalidMatch uint32 = 0xFFFFFFF +const kInvalidMatch uint32 = 0xFFFFFFF /* Copyright 2013 Google Inc. All Rights Reserved. diff --git a/static_dict_lut.go b/static_dict_lut.go index 997ced4..b33963e 100644 --- a/static_dict_lut.go +++ b/static_dict_lut.go @@ -14,9 +14,9 @@ type dictWord struct { idx uint16 } -var kDictNumBits int = 15 +const kDictNumBits int = 15 -var kDictHashMul32 uint32 = 0x1E35A7BD +const kDictHashMul32 uint32 = 0x1E35A7BD var kStaticDictionaryBuckets = [32768]uint16{ 1, diff --git a/transform.go b/transform.go index 0bb00d7..7ad085c 100644 --- a/transform.go +++ b/transform.go @@ -60,7 +60,7 @@ func transformSuffix(t *transforms, I int) []byte { } /* RFC 7932 transforms string data */ -var kPrefixSuffix string = "\001 \002, \010 of the \004 of \002s \001.\005 and \004 " + "in \001\"\004 to \002\">\001\n\002. \001]\005 for \003 a \006 " + "that \001'\006 with \006 from \004 by \001(\006. T" + "he \004 on \004 as \004 is \004ing \002\n\t\001:\003ed " + "\002=\"\004 at \003ly \001,\002='\005.com/\007. This \005" + " not \003er \003al \004ful \004ive \005less \004es" + "t \004ize \002\xc2\xa0\004ous \005 the \002e \000" +const kPrefixSuffix string = "\001 \002, \010 of the \004 of \002s \001.\005 and \004 " + "in \001\"\004 to \002\">\001\n\002. \001]\005 for \003 a \006 " + "that \001'\006 with \006 from \004 by \001(\006. T" + "he \004 on \004 as \004 is \004ing \002\n\t\001:\003ed " + "\002=\"\004 at \003ly \001,\002='\005.com/\007. This \005" + " not \003er \003al \004ful \004ive \005less \004es" + "t \004ize \002\xc2\xa0\004ous \005 the \002e \000" var kPrefixSuffixMap = [50]uint16{ 0x00, diff --git a/utf8_util.go b/utf8_util.go index 8de0438..f86de3d 100644 --- a/utf8_util.go +++ b/utf8_util.go @@ -8,7 +8,7 @@ package brotli /* Heuristics for deciding about the UTF8-ness of strings. */ -var kMinUTF8Ratio float64 = 0.75 +const kMinUTF8Ratio float64 = 0.75 /* Returns 1 if at least min_fraction of the bytes between pos and pos + length in the (data, mask) ring-buffer is UTF8-encoded, otherwise