Start converting hasher functions to methods.

This commit is contained in:
Andy Balholm 2019-03-08 15:10:41 -08:00
parent 6a14da654a
commit bbbdedf380
17 changed files with 212 additions and 332 deletions

View File

@ -1598,7 +1598,7 @@ func BrotliCompressBufferQuality10(lgwin int, input_size uint, input_buffer []by
var path_size uint
var new_cmd_alloc_size uint
BrotliInitZopfliNodes(nodes, block_size+1)
StitchToPreviousBlockH10(hasher, block_size, block_start, input_buffer, mask)
hasher.StitchToPreviousBlock(block_size, block_start, input_buffer, mask)
path_size = BrotliZopfliComputeShortestPath(block_size, block_start, input_buffer, mask, &params, dist_cache[:], hasher, nodes)
/* We allocate a command buffer in the first iteration of this loop that

23
h10.go
View File

@ -45,20 +45,18 @@ func ForestH10(self *H10) []uint32 {
return []uint32(self.forest)
}
func InitializeH10(handle HasherHandle, params *BrotliEncoderParams) {
var self *H10 = SelfH10(handle)
self.window_mask_ = (1 << params.lgwin) - 1
self.invalid_pos_ = uint32(0 - self.window_mask_)
func (h *H10) Initialize(params *BrotliEncoderParams) {
h.window_mask_ = (1 << params.lgwin) - 1
h.invalid_pos_ = uint32(0 - h.window_mask_)
var num_nodes uint = uint(1) << params.lgwin
self.forest = make([]uint32, 2*num_nodes)
h.forest = make([]uint32, 2*num_nodes)
}
func PrepareH10(handle HasherHandle, one_shot bool, input_size uint, data []byte) {
var self *H10 = SelfH10(handle)
var invalid_pos uint32 = self.invalid_pos_
func (h *H10) Prepare(one_shot bool, input_size uint, data []byte) {
var invalid_pos uint32 = h.invalid_pos_
var i uint32
for i = 0; i < 1<<17; i++ {
self.buckets_[i] = invalid_pos
h.buckets_[i] = invalid_pos
}
}
@ -261,8 +259,7 @@ func StoreRangeH10(handle HasherHandle, data []byte, mask uint, ix_start uint, i
}
}
func StitchToPreviousBlockH10(handle HasherHandle, num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint) {
var self *H10 = SelfH10(handle)
func (h *H10) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint) {
if num_bytes >= HashTypeLengthH10()-1 && position >= 128 {
var i_start uint = position - 128 + 1
var i_end uint = brotli_min_size_t(position, i_start+num_bytes)
@ -276,12 +273,12 @@ func StitchToPreviousBlockH10(handle HasherHandle, num_bytes uint, position uint
Furthermore, we have to make sure that we don't look further back
from the start of the next block than the window size, otherwise we
could access already overwritten areas of the ring-buffer. */
var max_backward uint = self.window_mask_ - brotli_max_size_t(BROTLI_WINDOW_GAP-1, position-i)
var max_backward uint = h.window_mask_ - brotli_max_size_t(BROTLI_WINDOW_GAP-1, position-i)
/* We know that i + 128 <= position + num_bytes, i.e. the
end of the current block and that we have at least
128 tail in the ring-buffer. */
StoreAndFindMatchesH10(self, ringbuffer, i, ringbuffer_mask, 128, max_backward, nil, nil)
StoreAndFindMatchesH10(h, ringbuffer, i, ringbuffer_mask, 128, max_backward, nil, nil)
}
}
}

19
h2.go
View File

@ -43,18 +43,17 @@ func SelfH2(handle HasherHandle) *H2 {
return handle.(*H2)
}
func InitializeH2(handle HasherHandle, params *BrotliEncoderParams) {
func (*H2) Initialize(params *BrotliEncoderParams) {
}
func PrepareH2(handle HasherHandle, one_shot bool, input_size uint, data []byte) {
var self *H2 = SelfH2(handle)
func (h *H2) Prepare(one_shot bool, input_size uint, data []byte) {
var partial_prepare_threshold uint = (4 << 16) >> 7
/* Partial preparation is 100 times slower (per socket). */
if one_shot && input_size <= partial_prepare_threshold {
var i uint
for i = 0; i < input_size; i++ {
var key uint32 = HashBytesH2(data[i:])
self.buckets_[key] = 0
h.buckets_[key] = 0
}
} else {
/* It is not strictly necessary to fill this buffer here, but
@ -62,8 +61,8 @@ func PrepareH2(handle HasherHandle, one_shot bool, input_size uint, data []byte)
(but correct). This is because random data would cause the
system to find accidentally good backward references here and there. */
var i int
for i = 0; i < len(self.buckets_); i++ {
self.buckets_[i] = 0
for i = 0; i < len(h.buckets_); i++ {
h.buckets_[i] = 0
}
}
}
@ -85,15 +84,15 @@ func StoreRangeH2(handle HasherHandle, data []byte, mask uint, ix_start uint, ix
}
}
func StitchToPreviousBlockH2(handle HasherHandle, num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint) {
func (h *H2) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint) {
if num_bytes >= HashTypeLengthH2()-1 && position >= 3 {
/* Prepare the hashes for three last bytes of the last write.
These could not be calculated before, since they require knowledge
of both the previous and the current block. */
StoreH2(handle, ringbuffer, ringbuffer_mask, position-3)
StoreH2(h, ringbuffer, ringbuffer_mask, position-3)
StoreH2(handle, ringbuffer, ringbuffer_mask, position-2)
StoreH2(handle, ringbuffer, ringbuffer_mask, position-1)
StoreH2(h, ringbuffer, ringbuffer_mask, position-2)
StoreH2(h, ringbuffer, ringbuffer_mask, position-1)
}
}

19
h3.go
View File

@ -39,11 +39,10 @@ func SelfH3(handle HasherHandle) *H3 {
return handle.(*H3)
}
func InitializeH3(handle HasherHandle, params *BrotliEncoderParams) {
func (*H3) Initialize(params *BrotliEncoderParams) {
}
func PrepareH3(handle HasherHandle, one_shot bool, input_size uint, data []byte) {
var self *H3 = SelfH3(handle)
func (h *H3) Prepare(one_shot bool, input_size uint, data []byte) {
var partial_prepare_threshold uint = (4 << 16) >> 7
/* Partial preparation is 100 times slower (per socket). */
if one_shot && input_size <= partial_prepare_threshold {
@ -51,7 +50,7 @@ func PrepareH3(handle HasherHandle, one_shot bool, input_size uint, data []byte)
for i = 0; i < input_size; i++ {
var key uint32 = HashBytesH3(data[i:])
for i := 0; i < int(2); i++ {
self.buckets_[key:][i] = 0
h.buckets_[key:][i] = 0
}
}
} else {
@ -60,8 +59,8 @@ func PrepareH3(handle HasherHandle, one_shot bool, input_size uint, data []byte)
(but correct). This is because random data would cause the
system to find accidentally good backward references here and there. */
var i int
for i = 0; i < len(self.buckets_); i++ {
self.buckets_[i] = 0
for i = 0; i < len(h.buckets_); i++ {
h.buckets_[i] = 0
}
}
}
@ -83,15 +82,15 @@ func StoreRangeH3(handle HasherHandle, data []byte, mask uint, ix_start uint, ix
}
}
func StitchToPreviousBlockH3(handle HasherHandle, num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint) {
func (h *H3) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint) {
if num_bytes >= HashTypeLengthH3()-1 && position >= 3 {
/* Prepare the hashes for three last bytes of the last write.
These could not be calculated before, since they require knowledge
of both the previous and the current block. */
StoreH3(handle, ringbuffer, ringbuffer_mask, position-3)
StoreH3(h, ringbuffer, ringbuffer_mask, position-3)
StoreH3(handle, ringbuffer, ringbuffer_mask, position-2)
StoreH3(handle, ringbuffer, ringbuffer_mask, position-1)
StoreH3(h, ringbuffer, ringbuffer_mask, position-2)
StoreH3(h, ringbuffer, ringbuffer_mask, position-1)
}
}

41
h35.go
View File

@ -42,42 +42,40 @@ func SelfH35(handle HasherHandle) *H35 {
return handle.(*H35)
}
func InitializeH35(handle HasherHandle, params *BrotliEncoderParams) {
var self *H35 = SelfH35(handle)
self.ha = nil
self.hb = nil
self.params = params
func (h *H35) Initialize(params *BrotliEncoderParams) {
h.ha = nil
h.hb = nil
h.params = params
}
/* TODO: Initialize of the hashers is defered to Prepare (and params
remembered here) because we don't get the one_shot and input_size params
here that are needed to know the memory size of them. Instead provide
those params to all hashers InitializeH35 */
func PrepareH35(handle HasherHandle, one_shot bool, input_size uint, data []byte) {
var self *H35 = SelfH35(handle)
if self.ha == nil {
func (h *H35) Prepare(one_shot bool, input_size uint, data []byte) {
if h.ha == nil {
var common_a *HasherCommon
var common_b *HasherCommon
self.ha = new(H3)
common_a = self.ha.Common()
common_a.params = self.params.hasher
h.ha = new(H3)
common_a = h.ha.Common()
common_a.params = h.params.hasher
common_a.is_prepared_ = false
common_a.dict_num_lookups = 0
common_a.dict_num_matches = 0
InitializeH3(self.ha, self.params)
h.ha.Initialize(h.params)
self.hb = new(HROLLING_FAST)
common_b = self.hb.Common()
common_b.params = self.params.hasher
h.hb = new(HROLLING_FAST)
common_b = h.hb.Common()
common_b.params = h.params.hasher
common_b.is_prepared_ = false
common_b.dict_num_lookups = 0
common_b.dict_num_matches = 0
InitializeHROLLING_FAST(self.hb, self.params)
h.hb.Initialize(h.params)
}
PrepareH3(self.ha, one_shot, input_size, data)
PrepareHROLLING_FAST(self.hb, one_shot, input_size, data)
h.ha.Prepare(one_shot, input_size, data)
h.hb.Prepare(one_shot, input_size, data)
}
func StoreH35(handle HasherHandle, data []byte, mask uint, ix uint) {
@ -92,10 +90,9 @@ func StoreRangeH35(handle HasherHandle, data []byte, mask uint, ix_start uint, i
StoreRangeHROLLING_FAST(self.hb, data, mask, ix_start, ix_end)
}
func StitchToPreviousBlockH35(handle HasherHandle, num_bytes uint, position uint, ringbuffer []byte, ring_buffer_mask uint) {
var self *H35 = SelfH35(handle)
StitchToPreviousBlockH3(self.ha, num_bytes, position, ringbuffer, ring_buffer_mask)
StitchToPreviousBlockHROLLING_FAST(self.hb, num_bytes, position, ringbuffer, ring_buffer_mask)
func (h *H35) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ring_buffer_mask uint) {
h.ha.StitchToPreviousBlock(num_bytes, position, ringbuffer, ring_buffer_mask)
h.hb.StitchToPreviousBlock(num_bytes, position, ringbuffer, ring_buffer_mask)
}
func PrepareDistanceCacheH35(handle HasherHandle, distance_cache []int) {

19
h4.go
View File

@ -39,11 +39,10 @@ func SelfH4(handle HasherHandle) *H4 {
return handle.(*H4)
}
func InitializeH4(handle HasherHandle, params *BrotliEncoderParams) {
func (*H4) Initialize(params *BrotliEncoderParams) {
}
func PrepareH4(handle HasherHandle, one_shot bool, input_size uint, data []byte) {
var self *H4 = SelfH4(handle)
func (h *H4) Prepare(one_shot bool, input_size uint, data []byte) {
var partial_prepare_threshold uint = (4 << 17) >> 7
/* Partial preparation is 100 times slower (per socket). */
if one_shot && input_size <= partial_prepare_threshold {
@ -51,7 +50,7 @@ func PrepareH4(handle HasherHandle, one_shot bool, input_size uint, data []byte)
for i = 0; i < input_size; i++ {
var key uint32 = HashBytesH4(data[i:])
for i := 0; i < int(4); i++ {
self.buckets_[key:][i] = 0
h.buckets_[key:][i] = 0
}
}
} else {
@ -60,8 +59,8 @@ func PrepareH4(handle HasherHandle, one_shot bool, input_size uint, data []byte)
(but correct). This is because random data would cause the
system to find accidentally good backward references here and there. */
var i int
for i = 0; i < len(self.buckets_); i++ {
self.buckets_[i] = 0
for i = 0; i < len(h.buckets_); i++ {
h.buckets_[i] = 0
}
}
}
@ -83,15 +82,15 @@ func StoreRangeH4(handle HasherHandle, data []byte, mask uint, ix_start uint, ix
}
}
func StitchToPreviousBlockH4(handle HasherHandle, num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint) {
func (h *H4) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint) {
if num_bytes >= HashTypeLengthH4()-1 && position >= 3 {
/* Prepare the hashes for three last bytes of the last write.
These could not be calculated before, since they require knowledge
of both the previous and the current block. */
StoreH4(handle, ringbuffer, ringbuffer_mask, position-3)
StoreH4(h, ringbuffer, ringbuffer_mask, position-3)
StoreH4(handle, ringbuffer, ringbuffer_mask, position-2)
StoreH4(handle, ringbuffer, ringbuffer_mask, position-1)
StoreH4(h, ringbuffer, ringbuffer_mask, position-2)
StoreH4(h, ringbuffer, ringbuffer_mask, position-1)
}
}

29
h40.go
View File

@ -53,18 +53,17 @@ func SelfH40(handle HasherHandle) *H40 {
return handle.(*H40)
}
func InitializeH40(handle HasherHandle, params *BrotliEncoderParams) {
func (h *H40) Initialize(params *BrotliEncoderParams) {
var q uint
if params.quality > 6 {
q = 7
} else {
q = 8
}
SelfH40(handle).max_hops = q << uint(params.quality-4)
h.max_hops = q << uint(params.quality-4)
}
func PrepareH40(handle HasherHandle, one_shot bool, input_size uint, data []byte) {
var self *H40 = SelfH40(handle)
func (h *H40) Prepare(one_shot bool, input_size uint, data []byte) {
var partial_prepare_threshold uint = (1 << 15) >> 6
/* Partial preparation is 100 times slower (per socket). */
if one_shot && input_size <= partial_prepare_threshold {
@ -73,24 +72,24 @@ func PrepareH40(handle HasherHandle, one_shot bool, input_size uint, data []byte
var bucket uint = HashBytesH40(data[i:])
/* See InitEmpty comment. */
self.addr[bucket] = 0xCCCCCCCC
h.addr[bucket] = 0xCCCCCCCC
self.head[bucket] = 0xCCCC
h.head[bucket] = 0xCCCC
}
} else {
/* Fill |addr| array with 0xCCCCCCCC value. Because of wrapping, position
processed by hasher never reaches 3GB + 64M; this makes all new chains
to be terminated after the first node. */
var i int
for i = 0; i < len(self.addr); i++ {
self.addr[i] = 0xCCCCCCCC
for i = 0; i < len(h.addr); i++ {
h.addr[i] = 0xCCCCCCCC
}
self.head = [1 << 15]uint16{}
h.head = [1 << 15]uint16{}
}
self.tiny_hash = [65536]byte{}
self.free_slot_idx = [1]uint16{}
h.tiny_hash = [65536]byte{}
h.free_slot_idx = [1]uint16{}
}
/* Look at 4 bytes at &data[ix & mask]. Compute a hash from these, and prepend
@ -120,15 +119,15 @@ func StoreRangeH40(handle HasherHandle, data []byte, mask uint, ix_start uint, i
}
}
func StitchToPreviousBlockH40(handle HasherHandle, num_bytes uint, position uint, ringbuffer []byte, ring_buffer_mask uint) {
func (h *H40) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ring_buffer_mask uint) {
if num_bytes >= HashTypeLengthH40()-1 && position >= 3 {
/* Prepare the hashes for three last bytes of the last write.
These could not be calculated before, since they require knowledge
of both the previous and the current block. */
StoreH40(handle, ringbuffer, ring_buffer_mask, position-3)
StoreH40(h, ringbuffer, ring_buffer_mask, position-3)
StoreH40(handle, ringbuffer, ring_buffer_mask, position-2)
StoreH40(handle, ringbuffer, ring_buffer_mask, position-1)
StoreH40(h, ringbuffer, ring_buffer_mask, position-2)
StoreH40(h, ringbuffer, ring_buffer_mask, position-1)
}
}

29
h41.go
View File

@ -53,18 +53,17 @@ func SelfH41(handle HasherHandle) *H41 {
return handle.(*H41)
}
func InitializeH41(handle HasherHandle, params *BrotliEncoderParams) {
func (h *H41) Initialize(params *BrotliEncoderParams) {
var tmp uint
if params.quality > 6 {
tmp = 7
} else {
tmp = 8
}
SelfH41(handle).max_hops = tmp << uint(params.quality-4)
h.max_hops = tmp << uint(params.quality-4)
}
func PrepareH41(handle HasherHandle, one_shot bool, input_size uint, data []byte) {
var self *H41 = SelfH41(handle)
func (h *H41) Prepare(one_shot bool, input_size uint, data []byte) {
var partial_prepare_threshold uint = (1 << 15) >> 6
/* Partial preparation is 100 times slower (per socket). */
if one_shot && input_size <= partial_prepare_threshold {
@ -73,24 +72,24 @@ func PrepareH41(handle HasherHandle, one_shot bool, input_size uint, data []byte
var bucket uint = HashBytesH41(data[i:])
/* See InitEmpty comment. */
self.addr[bucket] = 0xCCCCCCCC
h.addr[bucket] = 0xCCCCCCCC
self.head[bucket] = 0xCCCC
h.head[bucket] = 0xCCCC
}
} else {
/* Fill |addr| array with 0xCCCCCCCC value. Because of wrapping, position
processed by hasher never reaches 3GB + 64M; this makes all new chains
to be terminated after the first node. */
var i int
for i = 0; i < len(self.addr); i++ {
self.addr[i] = 0xCCCCCCCC
for i = 0; i < len(h.addr); i++ {
h.addr[i] = 0xCCCCCCCC
}
self.head = [1 << 15]uint16{}
h.head = [1 << 15]uint16{}
}
self.tiny_hash = [65536]byte{}
self.free_slot_idx = [1]uint16{}
h.tiny_hash = [65536]byte{}
h.free_slot_idx = [1]uint16{}
}
/* Look at 4 bytes at &data[ix & mask]. Compute a hash from these, and prepend
@ -120,15 +119,15 @@ func StoreRangeH41(handle HasherHandle, data []byte, mask uint, ix_start uint, i
}
}
func StitchToPreviousBlockH41(handle HasherHandle, num_bytes uint, position uint, ringbuffer []byte, ring_buffer_mask uint) {
func (h *H41) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ring_buffer_mask uint) {
if num_bytes >= HashTypeLengthH41()-1 && position >= 3 {
/* Prepare the hashes for three last bytes of the last write.
These could not be calculated before, since they require knowledge
of both the previous and the current block. */
StoreH41(handle, ringbuffer, ring_buffer_mask, position-3)
StoreH41(h, ringbuffer, ring_buffer_mask, position-3)
StoreH41(handle, ringbuffer, ring_buffer_mask, position-2)
StoreH41(handle, ringbuffer, ring_buffer_mask, position-1)
StoreH41(h, ringbuffer, ring_buffer_mask, position-2)
StoreH41(h, ringbuffer, ring_buffer_mask, position-1)
}
}

29
h42.go
View File

@ -53,18 +53,17 @@ func SelfH42(handle HasherHandle) *H42 {
return handle.(*H42)
}
func InitializeH42(handle HasherHandle, params *BrotliEncoderParams) {
func (h *H42) Initialize(params *BrotliEncoderParams) {
var tmp uint
if params.quality > 6 {
tmp = 7
} else {
tmp = 8
}
SelfH42(handle).max_hops = tmp << uint(params.quality-4)
h.max_hops = tmp << uint(params.quality-4)
}
func PrepareH42(handle HasherHandle, one_shot bool, input_size uint, data []byte) {
var self *H42 = SelfH42(handle)
func (h *H42) Prepare(one_shot bool, input_size uint, data []byte) {
var partial_prepare_threshold uint = (1 << 15) >> 6
/* Partial preparation is 100 times slower (per socket). */
if one_shot && input_size <= partial_prepare_threshold {
@ -73,24 +72,24 @@ func PrepareH42(handle HasherHandle, one_shot bool, input_size uint, data []byte
var bucket uint = HashBytesH42(data[i:])
/* See InitEmpty comment. */
self.addr[bucket] = 0xCCCCCCCC
h.addr[bucket] = 0xCCCCCCCC
self.head[bucket] = 0xCCCC
h.head[bucket] = 0xCCCC
}
} else {
/* Fill |addr| array with 0xCCCCCCCC value. Because of wrapping, position
processed by hasher never reaches 3GB + 64M; this makes all new chains
to be terminated after the first node. */
var i int
for i = 0; i < len(self.addr); i++ {
self.addr[i] = 0xCCCCCCCC
for i = 0; i < len(h.addr); i++ {
h.addr[i] = 0xCCCCCCCC
}
self.head = [1 << 15]uint16{}
h.head = [1 << 15]uint16{}
}
self.tiny_hash = [65536]byte{}
self.free_slot_idx = [512]uint16{}
h.tiny_hash = [65536]byte{}
h.free_slot_idx = [512]uint16{}
}
/* Look at 4 bytes at &data[ix & mask]. Compute a hash from these, and prepend
@ -120,15 +119,15 @@ func StoreRangeH42(handle HasherHandle, data []byte, mask uint, ix_start uint, i
}
}
func StitchToPreviousBlockH42(handle HasherHandle, num_bytes uint, position uint, ringbuffer []byte, ring_buffer_mask uint) {
func (h *H42) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ring_buffer_mask uint) {
if num_bytes >= HashTypeLengthH42()-1 && position >= 3 {
/* Prepare the hashes for three last bytes of the last write.
These could not be calculated before, since they require knowledge
of both the previous and the current block. */
StoreH42(handle, ringbuffer, ring_buffer_mask, position-3)
StoreH42(h, ringbuffer, ring_buffer_mask, position-3)
StoreH42(handle, ringbuffer, ring_buffer_mask, position-2)
StoreH42(handle, ringbuffer, ring_buffer_mask, position-1)
StoreH42(h, ringbuffer, ring_buffer_mask, position-2)
StoreH42(h, ringbuffer, ring_buffer_mask, position-1)
}
}

35
h5.go
View File

@ -52,30 +52,27 @@ func BucketsH5(self *H5) []uint32 {
return []uint32(self.buckets)
}
func InitializeH5(handle HasherHandle, params *BrotliEncoderParams) {
var common *HasherCommon = handle.Common()
var self *H5 = SelfH5(handle)
self.hash_shift_ = 32 - common.params.bucket_bits
self.bucket_size_ = uint(1) << uint(common.params.bucket_bits)
self.block_size_ = uint(1) << uint(common.params.block_bits)
self.block_mask_ = uint32(self.block_size_ - 1)
self.num = make([]uint16, self.bucket_size_)
self.buckets = make([]uint32, self.block_size_*self.bucket_size_)
func (h *H5) Initialize(params *BrotliEncoderParams) {
h.hash_shift_ = 32 - h.params.bucket_bits
h.bucket_size_ = uint(1) << uint(h.params.bucket_bits)
h.block_size_ = uint(1) << uint(h.params.block_bits)
h.block_mask_ = uint32(h.block_size_ - 1)
h.num = make([]uint16, h.bucket_size_)
h.buckets = make([]uint32, h.block_size_*h.bucket_size_)
}
func PrepareH5(handle HasherHandle, one_shot bool, input_size uint, data []byte) {
var self *H5 = SelfH5(handle)
var num []uint16 = NumH5(self)
var partial_prepare_threshold uint = self.bucket_size_ >> 6
func (h *H5) Prepare(one_shot bool, input_size uint, data []byte) {
var num []uint16 = h.num
var partial_prepare_threshold uint = h.bucket_size_ >> 6
/* Partial preparation is 100 times slower (per socket). */
if one_shot && input_size <= partial_prepare_threshold {
var i uint
for i = 0; i < input_size; i++ {
var key uint32 = HashBytesH5(data[i:], self.hash_shift_)
var key uint32 = HashBytesH5(data[i:], h.hash_shift_)
num[key] = 0
}
} else {
for i := 0; i < int(self.bucket_size_); i++ {
for i := 0; i < int(h.bucket_size_); i++ {
num[i] = 0
}
}
@ -100,15 +97,15 @@ func StoreRangeH5(handle HasherHandle, data []byte, mask uint, ix_start uint, ix
}
}
func StitchToPreviousBlockH5(handle HasherHandle, num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint) {
func (h *H5) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint) {
if num_bytes >= HashTypeLengthH5()-1 && position >= 3 {
/* Prepare the hashes for three last bytes of the last write.
These could not be calculated before, since they require knowledge
of both the previous and the current block. */
StoreH5(handle, ringbuffer, ringbuffer_mask, position-3)
StoreH5(h, ringbuffer, ringbuffer_mask, position-3)
StoreH5(handle, ringbuffer, ringbuffer_mask, position-2)
StoreH5(handle, ringbuffer, ringbuffer_mask, position-1)
StoreH5(h, ringbuffer, ringbuffer_mask, position-2)
StoreH5(h, ringbuffer, ringbuffer_mask, position-1)
}
}

17
h54.go
View File

@ -39,11 +39,10 @@ func SelfH54(handle HasherHandle) *H54 {
return handle.(*H54)
}
func InitializeH54(handle HasherHandle, params *BrotliEncoderParams) {
func (*H54) Initialize(params *BrotliEncoderParams) {
}
func PrepareH54(handle HasherHandle, one_shot bool, input_size uint, data []byte) {
var self *H54 = SelfH54(handle)
func (h *H54) Prepare(one_shot bool, input_size uint, data []byte) {
var partial_prepare_threshold uint = (4 << 20) >> 7
/* Partial preparation is 100 times slower (per socket). */
if one_shot && input_size <= partial_prepare_threshold {
@ -51,7 +50,7 @@ func PrepareH54(handle HasherHandle, one_shot bool, input_size uint, data []byte
for i = 0; i < input_size; i++ {
var key uint32 = HashBytesH54(data[i:])
for i := 0; i < int(4); i++ {
self.buckets_[key:][i] = 0
h.buckets_[key:][i] = 0
}
}
} else {
@ -59,7 +58,7 @@ func PrepareH54(handle HasherHandle, one_shot bool, input_size uint, data []byte
not filling will make the results of the compression stochastic
(but correct). This is because random data would cause the
system to find accidentally good backward references here and there. */
self.buckets_ = [(1 << 20) + 4]uint32{}
h.buckets_ = [(1 << 20) + 4]uint32{}
}
}
@ -80,15 +79,15 @@ func StoreRangeH54(handle HasherHandle, data []byte, mask uint, ix_start uint, i
}
}
func StitchToPreviousBlockH54(handle HasherHandle, num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint) {
func (h *H54) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint) {
if num_bytes >= HashTypeLengthH54()-1 && position >= 3 {
/* Prepare the hashes for three last bytes of the last write.
These could not be calculated before, since they require knowledge
of both the previous and the current block. */
StoreH54(handle, ringbuffer, ringbuffer_mask, position-3)
StoreH54(h, ringbuffer, ringbuffer_mask, position-3)
StoreH54(handle, ringbuffer, ringbuffer_mask, position-2)
StoreH54(handle, ringbuffer, ringbuffer_mask, position-1)
StoreH54(h, ringbuffer, ringbuffer_mask, position-2)
StoreH54(h, ringbuffer, ringbuffer_mask, position-1)
}
}

41
h55.go
View File

@ -40,42 +40,40 @@ func SelfH55(handle HasherHandle) *H55 {
return handle.(*H55)
}
func InitializeH55(handle HasherHandle, params *BrotliEncoderParams) {
var self *H55 = SelfH55(handle)
self.ha = nil
self.hb = nil
self.params = params
func (h *H55) Initialize(params *BrotliEncoderParams) {
h.ha = nil
h.hb = nil
h.params = params
}
/* TODO: Initialize of the hashers is defered to Prepare (and params
remembered here) because we don't get the one_shot and input_size params
here that are needed to know the memory size of them. Instead provide
those params to all hashers InitializeH55 */
func PrepareH55(handle HasherHandle, one_shot bool, input_size uint, data []byte) {
var self *H55 = SelfH55(handle)
if self.ha == nil {
func (h *H55) Prepare(one_shot bool, input_size uint, data []byte) {
if h.ha == nil {
var common_a *HasherCommon
var common_b *HasherCommon
self.ha = new(H54)
common_a = self.ha.Common()
common_a.params = self.params.hasher
h.ha = new(H54)
common_a = h.ha.Common()
common_a.params = h.params.hasher
common_a.is_prepared_ = false
common_a.dict_num_lookups = 0
common_a.dict_num_matches = 0
InitializeH54(self.ha, self.params)
h.ha.Initialize(h.params)
self.hb = new(HROLLING_FAST)
common_b = self.hb.Common()
common_b.params = self.params.hasher
h.hb = new(HROLLING_FAST)
common_b = h.hb.Common()
common_b.params = h.params.hasher
common_b.is_prepared_ = false
common_b.dict_num_lookups = 0
common_b.dict_num_matches = 0
InitializeHROLLING_FAST(self.hb, self.params)
h.hb.Initialize(h.params)
}
PrepareH54(self.ha, one_shot, input_size, data)
PrepareHROLLING_FAST(self.hb, one_shot, input_size, data)
h.ha.Prepare(one_shot, input_size, data)
h.hb.Prepare(one_shot, input_size, data)
}
func StoreH55(handle HasherHandle, data []byte, mask uint, ix uint) {
@ -90,10 +88,9 @@ func StoreRangeH55(handle HasherHandle, data []byte, mask uint, ix_start uint, i
StoreRangeHROLLING_FAST(self.hb, data, mask, ix_start, ix_end)
}
func StitchToPreviousBlockH55(handle HasherHandle, num_bytes uint, position uint, ringbuffer []byte, ring_buffer_mask uint) {
var self *H55 = SelfH55(handle)
StitchToPreviousBlockH54(self.ha, num_bytes, position, ringbuffer, ring_buffer_mask)
StitchToPreviousBlockHROLLING_FAST(self.hb, num_bytes, position, ringbuffer, ring_buffer_mask)
func (h *H55) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ring_buffer_mask uint) {
h.ha.StitchToPreviousBlock(num_bytes, position, ringbuffer, ring_buffer_mask)
h.hb.StitchToPreviousBlock(num_bytes, position, ringbuffer, ring_buffer_mask)
}
func PrepareDistanceCacheH55(handle HasherHandle, distance_cache []int) {

37
h6.go
View File

@ -53,31 +53,28 @@ func BucketsH6(self *H6) []uint32 {
return []uint32(self.buckets)
}
func InitializeH6(handle HasherHandle, params *BrotliEncoderParams) {
var common *HasherCommon = handle.Common()
var self *H6 = SelfH6(handle)
self.hash_shift_ = 64 - common.params.bucket_bits
self.hash_mask_ = (^(uint64(0))) >> uint(64-8*common.params.hash_len)
self.bucket_size_ = uint(1) << uint(common.params.bucket_bits)
self.block_size_ = uint(1) << uint(common.params.block_bits)
self.block_mask_ = uint32(self.block_size_ - 1)
self.num = make([]uint16, self.bucket_size_)
self.buckets = make([]uint32, self.block_size_*self.bucket_size_)
func (h *H6) Initialize(params *BrotliEncoderParams) {
h.hash_shift_ = 64 - h.params.bucket_bits
h.hash_mask_ = (^(uint64(0))) >> uint(64-8*h.params.hash_len)
h.bucket_size_ = uint(1) << uint(h.params.bucket_bits)
h.block_size_ = uint(1) << uint(h.params.block_bits)
h.block_mask_ = uint32(h.block_size_ - 1)
h.num = make([]uint16, h.bucket_size_)
h.buckets = make([]uint32, h.block_size_*h.bucket_size_)
}
func PrepareH6(handle HasherHandle, one_shot bool, input_size uint, data []byte) {
var self *H6 = SelfH6(handle)
var num []uint16 = NumH6(self)
var partial_prepare_threshold uint = self.bucket_size_ >> 6
func (h *H6) Prepare(one_shot bool, input_size uint, data []byte) {
var num []uint16 = h.num
var partial_prepare_threshold uint = h.bucket_size_ >> 6
/* Partial preparation is 100 times slower (per socket). */
if one_shot && input_size <= partial_prepare_threshold {
var i uint
for i = 0; i < input_size; i++ {
var key uint32 = HashBytesH6(data[i:], self.hash_mask_, self.hash_shift_)
var key uint32 = HashBytesH6(data[i:], h.hash_mask_, h.hash_shift_)
num[key] = 0
}
} else {
for i := 0; i < int(self.bucket_size_); i++ {
for i := 0; i < int(h.bucket_size_); i++ {
num[i] = 0
}
}
@ -102,15 +99,15 @@ func StoreRangeH6(handle HasherHandle, data []byte, mask uint, ix_start uint, ix
}
}
func StitchToPreviousBlockH6(handle HasherHandle, num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint) {
func (h *H6) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint) {
if num_bytes >= HashTypeLengthH6()-1 && position >= 3 {
/* Prepare the hashes for three last bytes of the last write.
These could not be calculated before, since they require knowledge
of both the previous and the current block. */
StoreH6(handle, ringbuffer, ringbuffer_mask, position-3)
StoreH6(h, ringbuffer, ringbuffer_mask, position-3)
StoreH6(handle, ringbuffer, ringbuffer_mask, position-2)
StoreH6(handle, ringbuffer, ringbuffer_mask, position-1)
StoreH6(h, ringbuffer, ringbuffer_mask, position-2)
StoreH6(h, ringbuffer, ringbuffer_mask, position-1)
}
}

41
h65.go
View File

@ -40,42 +40,40 @@ func SelfH65(handle HasherHandle) *H65 {
return handle.(*H65)
}
func InitializeH65(handle HasherHandle, params *BrotliEncoderParams) {
var self *H65 = SelfH65(handle)
self.ha = nil
self.hb = nil
self.params = params
func (h *H65) Initialize(params *BrotliEncoderParams) {
h.ha = nil
h.hb = nil
h.params = params
}
/* TODO: Initialize of the hashers is defered to Prepare (and params
remembered here) because we don't get the one_shot and input_size params
here that are needed to know the memory size of them. Instead provide
those params to all hashers InitializeH65 */
func PrepareH65(handle HasherHandle, one_shot bool, input_size uint, data []byte) {
var self *H65 = SelfH65(handle)
if self.ha == nil {
func (h *H65) Prepare(one_shot bool, input_size uint, data []byte) {
if h.ha == nil {
var common_a *HasherCommon
var common_b *HasherCommon
self.ha = new(H6)
common_a = self.ha.Common()
common_a.params = self.params.hasher
h.ha = new(H6)
common_a = h.ha.Common()
common_a.params = h.params.hasher
common_a.is_prepared_ = false
common_a.dict_num_lookups = 0
common_a.dict_num_matches = 0
InitializeH6(self.ha, self.params)
h.ha.Initialize(h.params)
self.hb = new(HROLLING)
common_b = self.hb.Common()
common_b.params = self.params.hasher
h.hb = new(HROLLING)
common_b = h.hb.Common()
common_b.params = h.params.hasher
common_b.is_prepared_ = false
common_b.dict_num_lookups = 0
common_b.dict_num_matches = 0
InitializeHROLLING(self.hb, self.params)
h.hb.Initialize(h.params)
}
PrepareH6(self.ha, one_shot, input_size, data)
PrepareHROLLING(self.hb, one_shot, input_size, data)
h.ha.Prepare(one_shot, input_size, data)
h.hb.Prepare(one_shot, input_size, data)
}
func StoreH65(handle HasherHandle, data []byte, mask uint, ix uint) {
@ -90,10 +88,9 @@ func StoreRangeH65(handle HasherHandle, data []byte, mask uint, ix_start uint, i
StoreRangeHROLLING(self.hb, data, mask, ix_start, ix_end)
}
func StitchToPreviousBlockH65(handle HasherHandle, num_bytes uint, position uint, ringbuffer []byte, ring_buffer_mask uint) {
var self *H65 = SelfH65(handle)
StitchToPreviousBlockH6(self.ha, num_bytes, position, ringbuffer, ring_buffer_mask)
StitchToPreviousBlockHROLLING(self.hb, num_bytes, position, ringbuffer, ring_buffer_mask)
func (h *H65) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ring_buffer_mask uint) {
h.ha.StitchToPreviousBlock(num_bytes, position, ringbuffer, ring_buffer_mask)
h.hb.StitchToPreviousBlock(num_bytes, position, ringbuffer, ring_buffer_mask)
}
func PrepareDistanceCacheH65(handle HasherHandle, distance_cache []int) {

101
hash.go
View File

@ -31,6 +31,9 @@ func (h *HasherCommon) Common() *HasherCommon {
type HasherHandle interface {
Common() *HasherCommon
Initialize(params *BrotliEncoderParams)
Prepare(one_shot bool, input_size uint, data []byte)
StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint)
}
type score_t uint
@ -280,75 +283,13 @@ func HasherSetup(handle *HasherHandle, params *BrotliEncoderParams, data []byte,
*handle = self
common = self.Common()
common.params = params.hasher
switch common.params.type_ {
case 2:
InitializeH2(*handle, params)
case 3:
InitializeH3(*handle, params)
case 4:
InitializeH4(*handle, params)
case 5:
InitializeH5(*handle, params)
case 6:
InitializeH6(*handle, params)
case 40:
InitializeH40(*handle, params)
case 41:
InitializeH41(*handle, params)
case 42:
InitializeH42(*handle, params)
case 54:
InitializeH54(*handle, params)
case 35:
InitializeH35(*handle, params)
case 55:
InitializeH55(*handle, params)
case 65:
InitializeH65(*handle, params)
case 10:
InitializeH10(*handle, params)
default:
break
}
HasherReset(*handle)
self.Initialize(params)
}
self = *handle
common = self.Common()
if !common.is_prepared_ {
switch common.params.type_ {
case 2:
PrepareH2(self, one_shot, input_size, data)
case 3:
PrepareH3(self, one_shot, input_size, data)
case 4:
PrepareH4(self, one_shot, input_size, data)
case 5:
PrepareH5(self, one_shot, input_size, data)
case 6:
PrepareH6(self, one_shot, input_size, data)
case 40:
PrepareH40(self, one_shot, input_size, data)
case 41:
PrepareH41(self, one_shot, input_size, data)
case 42:
PrepareH42(self, one_shot, input_size, data)
case 54:
PrepareH54(self, one_shot, input_size, data)
case 35:
PrepareH35(self, one_shot, input_size, data)
case 55:
PrepareH55(self, one_shot, input_size, data)
case 65:
PrepareH65(self, one_shot, input_size, data)
case 10:
PrepareH10(self, one_shot, input_size, data)
default:
break
}
self.Prepare(one_shot, input_size, data)
if position == 0 {
common.dict_num_lookups = 0
@ -363,35 +304,5 @@ func InitOrStitchToPreviousBlock(handle *HasherHandle, data []byte, mask uint, p
var self HasherHandle
HasherSetup(handle, params, data, position, input_size, is_last)
self = *handle
switch self.Common().params.type_ {
case 2:
StitchToPreviousBlockH2(self, input_size, position, data, mask)
case 3:
StitchToPreviousBlockH3(self, input_size, position, data, mask)
case 4:
StitchToPreviousBlockH4(self, input_size, position, data, mask)
case 5:
StitchToPreviousBlockH5(self, input_size, position, data, mask)
case 6:
StitchToPreviousBlockH6(self, input_size, position, data, mask)
case 40:
StitchToPreviousBlockH40(self, input_size, position, data, mask)
case 41:
StitchToPreviousBlockH41(self, input_size, position, data, mask)
case 42:
StitchToPreviousBlockH42(self, input_size, position, data, mask)
case 54:
StitchToPreviousBlockH54(self, input_size, position, data, mask)
case 35:
StitchToPreviousBlockH35(self, input_size, position, data, mask)
case 55:
StitchToPreviousBlockH55(self, input_size, position, data, mask)
case 65:
StitchToPreviousBlockH65(self, input_size, position, data, mask)
case 10:
StitchToPreviousBlockH10(self, input_size, position, data, mask)
default:
break
}
self.StitchToPreviousBlock(input_size, position, data, mask)
}

View File

@ -54,39 +54,37 @@ func SelfHROLLING(handle HasherHandle) *HROLLING {
return handle.(*HROLLING)
}
func InitializeHROLLING(handle HasherHandle, params *BrotliEncoderParams) {
var self *HROLLING = SelfHROLLING(handle)
func (h *HROLLING) Initialize(params *BrotliEncoderParams) {
var i uint
self.state = 0
self.next_ix = 0
h.state = 0
h.next_ix = 0
self.factor = kRollingHashMul32HROLLING
h.factor = kRollingHashMul32HROLLING
/* Compute the factor of the oldest byte to remove: factor**steps modulo
0xffffffff (the multiplications rely on 32-bit overflow) */
self.factor_remove = 1
h.factor_remove = 1
for i = 0; i < 32; i += 1 {
self.factor_remove *= self.factor
h.factor_remove *= h.factor
}
self.table = make([]uint32, 16777216)
h.table = make([]uint32, 16777216)
for i = 0; i < 16777216; i++ {
self.table[i] = kInvalidPosHROLLING
h.table[i] = kInvalidPosHROLLING
}
}
func PrepareHROLLING(handle HasherHandle, one_shot bool, input_size uint, data []byte) {
var self *HROLLING = SelfHROLLING(handle)
func (h *HROLLING) Prepare(one_shot bool, input_size uint, data []byte) {
var i uint
/* Too small size, cannot use this hasher. */
if input_size < 32 {
return
}
self.state = 0
h.state = 0
for i = 0; i < 32; i += 1 {
self.state = HashRollingFunctionInitialHROLLING(self.state, data[i], self.factor)
h.state = HashRollingFunctionInitialHROLLING(h.state, data[i], h.factor)
}
}
@ -96,8 +94,7 @@ func StoreHROLLING(handle HasherHandle, data []byte, mask uint, ix uint) {
func StoreRangeHROLLING(handle HasherHandle, data []byte, mask uint, ix_start uint, ix_end uint) {
}
func StitchToPreviousBlockHROLLING(handle HasherHandle, num_bytes uint, position uint, ringbuffer []byte, ring_buffer_mask uint) {
var self *HROLLING = SelfHROLLING(handle)
func (h *HROLLING) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ring_buffer_mask uint) {
var position_masked uint
/* In this case we must re-initialize the hasher from scratch from the
current position. */
@ -120,8 +117,8 @@ func StitchToPreviousBlockHROLLING(handle HasherHandle, num_bytes uint, position
available = ring_buffer_mask - position_masked
}
PrepareHROLLING(handle, false, available, ringbuffer[position&ring_buffer_mask:])
self.next_ix = position
h.Prepare(false, available, ringbuffer[position&ring_buffer_mask:])
h.next_ix = position
}
func PrepareDistanceCacheHROLLING(handle HasherHandle, distance_cache *int) {

View File

@ -52,39 +52,37 @@ func SelfHROLLING_FAST(handle HasherHandle) *HROLLING_FAST {
return handle.(*HROLLING_FAST)
}
func InitializeHROLLING_FAST(handle HasherHandle, params *BrotliEncoderParams) {
var self *HROLLING_FAST = SelfHROLLING_FAST(handle)
func (h *HROLLING_FAST) Initialize(params *BrotliEncoderParams) {
var i uint
self.state = 0
self.next_ix = 0
h.state = 0
h.next_ix = 0
self.factor = kRollingHashMul32HROLLING_FAST
h.factor = kRollingHashMul32HROLLING_FAST
/* Compute the factor of the oldest byte to remove: factor**steps modulo
0xffffffff (the multiplications rely on 32-bit overflow) */
self.factor_remove = 1
h.factor_remove = 1
for i = 0; i < 32; i += 4 {
self.factor_remove *= self.factor
h.factor_remove *= h.factor
}
self.table = make([]uint32, 16777216)
h.table = make([]uint32, 16777216)
for i = 0; i < 16777216; i++ {
self.table[i] = kInvalidPosHROLLING_FAST
h.table[i] = kInvalidPosHROLLING_FAST
}
}
func PrepareHROLLING_FAST(handle HasherHandle, one_shot bool, input_size uint, data []byte) {
var self *HROLLING_FAST = SelfHROLLING_FAST(handle)
func (h *HROLLING_FAST) Prepare(one_shot bool, input_size uint, data []byte) {
var i uint
/* Too small size, cannot use this hasher. */
if input_size < 32 {
return
}
self.state = 0
h.state = 0
for i = 0; i < 32; i += 4 {
self.state = HashRollingFunctionInitialHROLLING_FAST(self.state, data[i], self.factor)
h.state = HashRollingFunctionInitialHROLLING_FAST(h.state, data[i], h.factor)
}
}
@ -94,8 +92,7 @@ func StoreHROLLING_FAST(handle HasherHandle, data []byte, mask uint, ix uint) {
func StoreRangeHROLLING_FAST(handle HasherHandle, data []byte, mask uint, ix_start uint, ix_end uint) {
}
func StitchToPreviousBlockHROLLING_FAST(handle HasherHandle, num_bytes uint, position uint, ringbuffer []byte, ring_buffer_mask uint) {
var self *HROLLING_FAST = SelfHROLLING_FAST(handle)
func (h *HROLLING_FAST) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ring_buffer_mask uint) {
var position_masked uint
/* In this case we must re-initialize the hasher from scratch from the
current position. */
@ -118,8 +115,8 @@ func StitchToPreviousBlockHROLLING_FAST(handle HasherHandle, num_bytes uint, pos
available = ring_buffer_mask - position_masked
}
PrepareHROLLING_FAST(handle, false, available, ringbuffer[position&ring_buffer_mask:])
self.next_ix = position
h.Prepare(false, available, ringbuffer[position&ring_buffer_mask:])
h.next_ix = position
}
func PrepareDistanceCacheHROLLING_FAST(handle HasherHandle, distance_cache *int) {