forked from mirror/go-json
Merge pull request #113 from goccy/feature/improve-decoder-performance
Dramatically improve decoder performance
This commit is contained in:
commit
f9327d5010
|
@ -0,0 +1,112 @@
|
|||
package json
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
maxAcceptableTypeAddrRange = 1024 * 1024 * 2 // 2 Mib
|
||||
)
|
||||
|
||||
var (
|
||||
cachedOpcodeSets []*opcodeSet
|
||||
cachedOpcodeMap unsafe.Pointer // map[uintptr]*opcodeSet
|
||||
existsCachedOpcodeSets bool
|
||||
cachedDecoder []decoder
|
||||
cachedDecoderMap unsafe.Pointer // map[uintptr]decoder
|
||||
existsCachedDecoder bool
|
||||
baseTypeAddr uintptr
|
||||
)
|
||||
|
||||
//go:linkname typelinks reflect.typelinks
|
||||
func typelinks() ([]unsafe.Pointer, [][]int32)
|
||||
|
||||
//go:linkname rtypeOff reflect.rtypeOff
|
||||
func rtypeOff(unsafe.Pointer, int32) unsafe.Pointer
|
||||
|
||||
func setupCodec() error {
|
||||
sections, offsets := typelinks()
|
||||
if len(sections) != 1 {
|
||||
return fmt.Errorf("failed to get sections")
|
||||
}
|
||||
if len(offsets) != 1 {
|
||||
return fmt.Errorf("failed to get offsets")
|
||||
}
|
||||
section := sections[0]
|
||||
offset := offsets[0]
|
||||
var (
|
||||
min uintptr = uintptr(^uint(0))
|
||||
max uintptr = 0
|
||||
)
|
||||
for i := 0; i < len(offset); i++ {
|
||||
typ := (*rtype)(rtypeOff(section, offset[i]))
|
||||
addr := uintptr(unsafe.Pointer(typ))
|
||||
if min > addr {
|
||||
min = addr
|
||||
}
|
||||
if max < addr {
|
||||
max = addr
|
||||
}
|
||||
if typ.Kind() == reflect.Ptr {
|
||||
addr = uintptr(unsafe.Pointer(typ.Elem()))
|
||||
if min > addr {
|
||||
min = addr
|
||||
}
|
||||
if max < addr {
|
||||
max = addr
|
||||
}
|
||||
}
|
||||
}
|
||||
addrRange := max - min
|
||||
if addrRange == 0 {
|
||||
return fmt.Errorf("failed to get address range of types")
|
||||
}
|
||||
if addrRange > maxAcceptableTypeAddrRange {
|
||||
return fmt.Errorf("too big address range %d", addrRange)
|
||||
}
|
||||
cachedOpcodeSets = make([]*opcodeSet, addrRange)
|
||||
existsCachedOpcodeSets = true
|
||||
cachedDecoder = make([]decoder, addrRange)
|
||||
existsCachedDecoder = true
|
||||
baseTypeAddr = min
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
_ = setupCodec()
|
||||
}
|
||||
|
||||
func loadOpcodeMap() map[uintptr]*opcodeSet {
|
||||
p := atomic.LoadPointer(&cachedOpcodeMap)
|
||||
return *(*map[uintptr]*opcodeSet)(unsafe.Pointer(&p))
|
||||
}
|
||||
|
||||
func storeOpcodeSet(typ uintptr, set *opcodeSet, m map[uintptr]*opcodeSet) {
|
||||
newOpcodeMap := make(map[uintptr]*opcodeSet, len(m)+1)
|
||||
newOpcodeMap[typ] = set
|
||||
|
||||
for k, v := range m {
|
||||
newOpcodeMap[k] = v
|
||||
}
|
||||
|
||||
atomic.StorePointer(&cachedOpcodeMap, *(*unsafe.Pointer)(unsafe.Pointer(&newOpcodeMap)))
|
||||
}
|
||||
|
||||
func loadDecoderMap() map[uintptr]decoder {
|
||||
p := atomic.LoadPointer(&cachedDecoderMap)
|
||||
return *(*map[uintptr]decoder)(unsafe.Pointer(&p))
|
||||
}
|
||||
|
||||
func storeDecoder(typ uintptr, dec decoder, m map[uintptr]decoder) {
|
||||
newDecoderMap := make(map[uintptr]decoder, len(m)+1)
|
||||
newDecoderMap[typ] = dec
|
||||
|
||||
for k, v := range m {
|
||||
newDecoderMap[k] = v
|
||||
}
|
||||
|
||||
atomic.StorePointer(&cachedDecoderMap, *(*unsafe.Pointer)(unsafe.Pointer(&newDecoderMap)))
|
||||
}
|
45
decode.go
45
decode.go
|
@ -5,7 +5,6 @@ import (
|
|||
"io"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"sync"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
|
@ -25,31 +24,11 @@ type Decoder struct {
|
|||
structTypeToDecoder map[uintptr]decoder
|
||||
}
|
||||
|
||||
type decoderMap struct {
|
||||
sync.Map
|
||||
}
|
||||
|
||||
func (m *decoderMap) get(k uintptr) decoder {
|
||||
if v, ok := m.Load(k); ok {
|
||||
return v.(decoder)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *decoderMap) set(k uintptr, dec decoder) {
|
||||
m.Store(k, dec)
|
||||
}
|
||||
|
||||
var (
|
||||
cachedDecoder decoderMap
|
||||
unmarshalJSONType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
|
||||
unmarshalTextType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
|
||||
)
|
||||
|
||||
func init() {
|
||||
cachedDecoder = decoderMap{}
|
||||
}
|
||||
|
||||
const (
|
||||
nul = '\000'
|
||||
)
|
||||
|
@ -89,15 +68,9 @@ func (d *Decoder) decode(src []byte, header *interfaceHeader) error {
|
|||
if err := d.validateType(copiedType, ptr); err != nil {
|
||||
return err
|
||||
}
|
||||
dec := cachedDecoder.get(typeptr)
|
||||
if dec == nil {
|
||||
d.structTypeToDecoder = map[uintptr]decoder{}
|
||||
compiledDec, err := d.compileHead(copiedType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cachedDecoder.set(typeptr, compiledDec)
|
||||
dec = compiledDec
|
||||
dec, err := d.compileToGetDecoder(typeptr, typ)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := dec.decode(src, 0, header.ptr); err != nil {
|
||||
return err
|
||||
|
@ -154,15 +127,9 @@ func (d *Decoder) Decode(v interface{}) error {
|
|||
return err
|
||||
}
|
||||
|
||||
dec := cachedDecoder.get(typeptr)
|
||||
if dec == nil {
|
||||
d.structTypeToDecoder = map[uintptr]decoder{}
|
||||
compiledDec, err := d.compileHead(typ)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cachedDecoder.set(typeptr, compiledDec)
|
||||
dec = compiledDec
|
||||
dec, err := d.compileToGetDecoder(typeptr, typ)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := d.prepareForDecode(); err != nil {
|
||||
return err
|
||||
|
|
|
@ -6,6 +6,21 @@ import (
|
|||
"unsafe"
|
||||
)
|
||||
|
||||
func (d *Decoder) compileToGetDecoderSlowPath(typeptr uintptr, typ *rtype) (decoder, error) {
|
||||
decoderMap := loadDecoderMap()
|
||||
if dec, exists := decoderMap[typeptr]; exists {
|
||||
return dec, nil
|
||||
}
|
||||
|
||||
d.structTypeToDecoder = map[uintptr]decoder{}
|
||||
dec, err := d.compileHead(typ)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
storeDecoder(typeptr, dec, decoderMap)
|
||||
return dec, nil
|
||||
}
|
||||
|
||||
func (d *Decoder) compileHead(typ *rtype) (decoder, error) {
|
||||
switch {
|
||||
case rtype_ptrTo(typ).Implements(unmarshalJSONType):
|
||||
|
@ -245,6 +260,8 @@ func (d *Decoder) removeConflictFields(fieldMap map[string]*structFieldSet, conf
|
|||
dec: v.dec,
|
||||
offset: baseOffset + v.offset,
|
||||
isTaggedKey: v.isTaggedKey,
|
||||
key: k,
|
||||
keyLen: int64(len(k)),
|
||||
}
|
||||
fieldMap[k] = fieldSet
|
||||
lower := strings.ToLower(k)
|
||||
|
@ -257,6 +274,7 @@ func (d *Decoder) removeConflictFields(fieldMap map[string]*structFieldSet, conf
|
|||
if v.isTaggedKey {
|
||||
// conflict tag key
|
||||
delete(fieldMap, k)
|
||||
delete(fieldMap, strings.ToLower(k))
|
||||
conflictedMap[k] = struct{}{}
|
||||
conflictedMap[strings.ToLower(k)] = struct{}{}
|
||||
}
|
||||
|
@ -266,6 +284,8 @@ func (d *Decoder) removeConflictFields(fieldMap map[string]*structFieldSet, conf
|
|||
dec: v.dec,
|
||||
offset: baseOffset + v.offset,
|
||||
isTaggedKey: v.isTaggedKey,
|
||||
key: k,
|
||||
keyLen: int64(len(k)),
|
||||
}
|
||||
fieldMap[k] = fieldSet
|
||||
lower := strings.ToLower(k)
|
||||
|
@ -275,6 +295,7 @@ func (d *Decoder) removeConflictFields(fieldMap map[string]*structFieldSet, conf
|
|||
} else {
|
||||
// conflict tag key
|
||||
delete(fieldMap, k)
|
||||
delete(fieldMap, strings.ToLower(k))
|
||||
conflictedMap[k] = struct{}{}
|
||||
conflictedMap[strings.ToLower(k)] = struct{}{}
|
||||
}
|
||||
|
@ -328,6 +349,8 @@ func (d *Decoder) compileStruct(typ *rtype, structName, fieldName string) (decod
|
|||
dec: newAnonymousFieldDecoder(pdec.typ, v.offset, v.dec),
|
||||
offset: field.Offset,
|
||||
isTaggedKey: v.isTaggedKey,
|
||||
key: k,
|
||||
keyLen: int64(len(k)),
|
||||
}
|
||||
fieldMap[k] = fieldSet
|
||||
lower := strings.ToLower(k)
|
||||
|
@ -340,6 +363,7 @@ func (d *Decoder) compileStruct(typ *rtype, structName, fieldName string) (decod
|
|||
if v.isTaggedKey {
|
||||
// conflict tag key
|
||||
delete(fieldMap, k)
|
||||
delete(fieldMap, strings.ToLower(k))
|
||||
conflictedMap[k] = struct{}{}
|
||||
conflictedMap[strings.ToLower(k)] = struct{}{}
|
||||
}
|
||||
|
@ -349,6 +373,8 @@ func (d *Decoder) compileStruct(typ *rtype, structName, fieldName string) (decod
|
|||
dec: newAnonymousFieldDecoder(pdec.typ, v.offset, v.dec),
|
||||
offset: field.Offset,
|
||||
isTaggedKey: v.isTaggedKey,
|
||||
key: k,
|
||||
keyLen: int64(len(k)),
|
||||
}
|
||||
fieldMap[k] = fieldSet
|
||||
lower := strings.ToLower(k)
|
||||
|
@ -358,6 +384,7 @@ func (d *Decoder) compileStruct(typ *rtype, structName, fieldName string) (decod
|
|||
} else {
|
||||
// conflict tag key
|
||||
delete(fieldMap, k)
|
||||
delete(fieldMap, strings.ToLower(k))
|
||||
conflictedMap[k] = struct{}{}
|
||||
conflictedMap[strings.ToLower(k)] = struct{}{}
|
||||
}
|
||||
|
@ -369,22 +396,27 @@ func (d *Decoder) compileStruct(typ *rtype, structName, fieldName string) (decod
|
|||
if tag.isString {
|
||||
dec = newWrappedStringDecoder(dec, structName, field.Name)
|
||||
}
|
||||
fieldSet := &structFieldSet{dec: dec, offset: field.Offset, isTaggedKey: tag.isTaggedKey}
|
||||
var key string
|
||||
if tag.key != "" {
|
||||
fieldMap[tag.key] = fieldSet
|
||||
lower := strings.ToLower(tag.key)
|
||||
if _, exists := fieldMap[lower]; !exists {
|
||||
fieldMap[lower] = fieldSet
|
||||
}
|
||||
key = tag.key
|
||||
} else {
|
||||
fieldMap[field.Name] = fieldSet
|
||||
lower := strings.ToLower(field.Name)
|
||||
if _, exists := fieldMap[lower]; !exists {
|
||||
fieldMap[lower] = fieldSet
|
||||
}
|
||||
key = field.Name
|
||||
}
|
||||
fieldSet := &structFieldSet{
|
||||
dec: dec,
|
||||
offset: field.Offset,
|
||||
isTaggedKey: tag.isTaggedKey,
|
||||
key: key,
|
||||
keyLen: int64(len(key)),
|
||||
}
|
||||
fieldMap[key] = fieldSet
|
||||
lower := strings.ToLower(key)
|
||||
if _, exists := fieldMap[lower]; !exists {
|
||||
fieldMap[lower] = fieldSet
|
||||
}
|
||||
}
|
||||
}
|
||||
delete(d.structTypeToDecoder, typeptr)
|
||||
structDec.tryOptimize()
|
||||
return structDec, nil
|
||||
}
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
// +build !race
|
||||
|
||||
package json
|
||||
|
||||
func (d *Decoder) compileToGetDecoder(typeptr uintptr, typ *rtype) (decoder, error) {
|
||||
if !existsCachedDecoder {
|
||||
return d.compileToGetDecoderSlowPath(typeptr, typ)
|
||||
}
|
||||
|
||||
index := typeptr - baseTypeAddr
|
||||
if dec := cachedDecoder[index]; dec != nil {
|
||||
return dec, nil
|
||||
}
|
||||
|
||||
d.structTypeToDecoder = map[uintptr]decoder{}
|
||||
dec, err := d.compileHead(typ)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cachedDecoder[index] = dec
|
||||
return dec, nil
|
||||
}
|
|
@ -0,0 +1,31 @@
|
|||
// +build race
|
||||
|
||||
package json
|
||||
|
||||
import "sync"
|
||||
|
||||
var decMu sync.RWMutex
|
||||
|
||||
func (d *Decoder) compileToGetDecoder(typeptr uintptr, typ *rtype) (decoder, error) {
|
||||
if !existsCachedDecoder {
|
||||
return d.compileToGetDecoderSlowPath(typeptr, typ)
|
||||
}
|
||||
|
||||
index := typeptr - baseTypeAddr
|
||||
decMu.RLock()
|
||||
if dec := cachedDecoder[index]; dec != nil {
|
||||
decMu.RUnlock()
|
||||
return dec, nil
|
||||
}
|
||||
decMu.RUnlock()
|
||||
|
||||
d.structTypeToDecoder = map[uintptr]decoder{}
|
||||
dec, err := d.compileHead(typ)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
decMu.Lock()
|
||||
cachedDecoder[index] = dec
|
||||
decMu.Unlock()
|
||||
return dec, nil
|
||||
}
|
|
@ -1,5 +1,7 @@
|
|||
package json
|
||||
|
||||
import "unsafe"
|
||||
|
||||
var (
|
||||
isWhiteSpace = [256]bool{}
|
||||
)
|
||||
|
@ -11,6 +13,10 @@ func init() {
|
|||
isWhiteSpace['\r'] = true
|
||||
}
|
||||
|
||||
func char(ptr unsafe.Pointer, offset int64) byte {
|
||||
return *(*byte)(unsafe.Pointer(uintptr(ptr) + uintptr(offset)))
|
||||
}
|
||||
|
||||
func skipWhiteSpace(buf []byte, cursor int64) int64 {
|
||||
LOOP:
|
||||
if isWhiteSpace[buf[cursor]] {
|
||||
|
|
|
@ -130,8 +130,9 @@ ERROR:
|
|||
}
|
||||
|
||||
func (d *intDecoder) decodeByte(buf []byte, cursor int64) ([]byte, int64, error) {
|
||||
b := (*sliceHeader)(unsafe.Pointer(&buf)).data
|
||||
for {
|
||||
switch buf[cursor] {
|
||||
switch char(b, cursor) {
|
||||
case ' ', '\n', '\t', '\r':
|
||||
cursor++
|
||||
continue
|
||||
|
@ -139,14 +140,14 @@ func (d *intDecoder) decodeByte(buf []byte, cursor int64) ([]byte, int64, error)
|
|||
start := cursor
|
||||
cursor++
|
||||
LOOP:
|
||||
if numTable[buf[cursor]] {
|
||||
if numTable[char(b, cursor)] {
|
||||
cursor++
|
||||
goto LOOP
|
||||
}
|
||||
num := buf[start:cursor]
|
||||
return num, cursor, nil
|
||||
default:
|
||||
return nil, 0, d.typeError([]byte{buf[cursor]}, cursor)
|
||||
return nil, 0, d.typeError([]byte{char(b, cursor)}, cursor)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -85,11 +85,11 @@ func (s *stream) read() bool {
|
|||
|
||||
func (s *stream) skipWhiteSpace() {
|
||||
LOOP:
|
||||
c := s.char()
|
||||
if isWhiteSpace[c] {
|
||||
switch s.char() {
|
||||
case ' ', '\n', '\t', '\r':
|
||||
s.cursor++
|
||||
goto LOOP
|
||||
} else if c == nul {
|
||||
case nul:
|
||||
if s.read() {
|
||||
goto LOOP
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ func (d *stringDecoder) decodeStream(s *stream, p unsafe.Pointer) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*(*string)(p) = string(bytes)
|
||||
*(*string)(p) = *(*string)(unsafe.Pointer(&bytes))
|
||||
s.reset()
|
||||
return nil
|
||||
}
|
||||
|
@ -249,11 +249,12 @@ func (d *stringDecoder) decodeByte(buf []byte, cursor int64) ([]byte, int64, err
|
|||
case '"':
|
||||
cursor++
|
||||
start := cursor
|
||||
b := (*sliceHeader)(unsafe.Pointer(&buf)).data
|
||||
for {
|
||||
switch buf[cursor] {
|
||||
switch char(b, cursor) {
|
||||
case '\\':
|
||||
cursor++
|
||||
switch buf[cursor] {
|
||||
switch char(b, cursor) {
|
||||
case '"':
|
||||
buf[cursor] = '"'
|
||||
buf = append(buf[:cursor-1], buf[cursor:]...)
|
||||
|
|
573
decode_struct.go
573
decode_struct.go
|
@ -2,6 +2,9 @@ package json
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"strings"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
|
@ -9,24 +12,541 @@ type structFieldSet struct {
|
|||
dec decoder
|
||||
offset uintptr
|
||||
isTaggedKey bool
|
||||
key string
|
||||
keyLen int64
|
||||
}
|
||||
|
||||
type structDecoder struct {
|
||||
fieldMap map[string]*structFieldSet
|
||||
keyDecoder *stringDecoder
|
||||
structName string
|
||||
fieldName string
|
||||
fieldMap map[string]*structFieldSet
|
||||
stringDecoder *stringDecoder
|
||||
structName string
|
||||
fieldName string
|
||||
isTriedOptimize bool
|
||||
keyBitmapInt8 [][256]int8
|
||||
keyBitmapInt16 [][256]int16
|
||||
sortedFieldSets []*structFieldSet
|
||||
keyDecoder func(*structDecoder, []byte, int64) (int64, *structFieldSet, error)
|
||||
keyStreamDecoder func(*structDecoder, *stream) (*structFieldSet, string, error)
|
||||
}
|
||||
|
||||
var (
|
||||
bitHashTable [64]int
|
||||
largeToSmallTable [256]byte
|
||||
)
|
||||
|
||||
func init() {
|
||||
hash := uint64(0x03F566ED27179461)
|
||||
for i := 0; i < 64; i++ {
|
||||
bitHashTable[hash>>58] = i
|
||||
hash <<= 1
|
||||
}
|
||||
for i := 0; i < 256; i++ {
|
||||
c := i
|
||||
if 'A' <= c && c <= 'Z' {
|
||||
c += 'a' - 'A'
|
||||
}
|
||||
largeToSmallTable[i] = byte(c)
|
||||
}
|
||||
}
|
||||
|
||||
func newStructDecoder(structName, fieldName string, fieldMap map[string]*structFieldSet) *structDecoder {
|
||||
return &structDecoder{
|
||||
fieldMap: fieldMap,
|
||||
keyDecoder: newStringDecoder(structName, fieldName),
|
||||
structName: structName,
|
||||
fieldName: fieldName,
|
||||
fieldMap: fieldMap,
|
||||
stringDecoder: newStringDecoder(structName, fieldName),
|
||||
structName: structName,
|
||||
fieldName: fieldName,
|
||||
keyDecoder: decodeKey,
|
||||
keyStreamDecoder: decodeKeyStream,
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
allowOptimizeMaxKeyLen = 64
|
||||
allowOptimizeMaxFieldLen = 16
|
||||
)
|
||||
|
||||
func (d *structDecoder) tryOptimize() {
|
||||
if d.isTriedOptimize {
|
||||
return
|
||||
}
|
||||
fieldMap := map[string]*structFieldSet{}
|
||||
conflicted := map[string]struct{}{}
|
||||
for k, v := range d.fieldMap {
|
||||
key := strings.ToLower(k)
|
||||
if key != k {
|
||||
// already exists same key (e.g. Hello and HELLO has same lower case key
|
||||
if _, exists := conflicted[key]; exists {
|
||||
d.isTriedOptimize = true
|
||||
return
|
||||
}
|
||||
conflicted[key] = struct{}{}
|
||||
}
|
||||
if field, exists := fieldMap[key]; exists {
|
||||
if field != v {
|
||||
d.isTriedOptimize = true
|
||||
return
|
||||
}
|
||||
}
|
||||
fieldMap[key] = v
|
||||
}
|
||||
|
||||
if len(fieldMap) > allowOptimizeMaxFieldLen {
|
||||
d.isTriedOptimize = true
|
||||
return
|
||||
}
|
||||
|
||||
var maxKeyLen int
|
||||
sortedKeys := []string{}
|
||||
for key := range fieldMap {
|
||||
keyLen := len(key)
|
||||
if keyLen > allowOptimizeMaxKeyLen {
|
||||
d.isTriedOptimize = true
|
||||
return
|
||||
}
|
||||
if maxKeyLen < keyLen {
|
||||
maxKeyLen = keyLen
|
||||
}
|
||||
sortedKeys = append(sortedKeys, key)
|
||||
}
|
||||
sort.Strings(sortedKeys)
|
||||
if len(sortedKeys) <= 8 {
|
||||
keyBitmap := make([][256]int8, maxKeyLen)
|
||||
for i, key := range sortedKeys {
|
||||
for j := 0; j < len(key); j++ {
|
||||
c := key[j]
|
||||
keyBitmap[j][c] |= (1 << uint(i))
|
||||
}
|
||||
d.sortedFieldSets = append(d.sortedFieldSets, fieldMap[key])
|
||||
}
|
||||
d.keyBitmapInt8 = keyBitmap
|
||||
d.keyDecoder = decodeKeyByBitmapInt8
|
||||
d.keyStreamDecoder = decodeKeyByBitmapInt8Stream
|
||||
} else {
|
||||
keyBitmap := make([][256]int16, maxKeyLen)
|
||||
for i, key := range sortedKeys {
|
||||
for j := 0; j < len(key); j++ {
|
||||
c := key[j]
|
||||
keyBitmap[j][c] |= (1 << uint(i))
|
||||
}
|
||||
d.sortedFieldSets = append(d.sortedFieldSets, fieldMap[key])
|
||||
}
|
||||
d.keyBitmapInt16 = keyBitmap
|
||||
d.keyDecoder = decodeKeyByBitmapInt16
|
||||
d.keyStreamDecoder = decodeKeyByBitmapInt16Stream
|
||||
}
|
||||
}
|
||||
|
||||
func decodeKeyByBitmapInt8(d *structDecoder, buf []byte, cursor int64) (int64, *structFieldSet, error) {
|
||||
var (
|
||||
field *structFieldSet
|
||||
curBit int8 = math.MaxInt8
|
||||
)
|
||||
b := (*sliceHeader)(unsafe.Pointer(&buf)).data
|
||||
for {
|
||||
switch char(b, cursor) {
|
||||
case ' ', '\n', '\t', '\r':
|
||||
cursor++
|
||||
case '"':
|
||||
cursor++
|
||||
c := char(b, cursor)
|
||||
switch c {
|
||||
case '"':
|
||||
cursor++
|
||||
return cursor, field, nil
|
||||
case nul:
|
||||
return 0, nil, errUnexpectedEndOfJSON("string", cursor)
|
||||
}
|
||||
keyIdx := 0
|
||||
bitmap := d.keyBitmapInt8
|
||||
keyBitmapLen := len(bitmap)
|
||||
start := cursor
|
||||
for {
|
||||
c := char(b, cursor)
|
||||
switch c {
|
||||
case '"':
|
||||
x := uint64(curBit & -curBit)
|
||||
fieldSetIndex := bitHashTable[(x*0x03F566ED27179461)>>58]
|
||||
field = d.sortedFieldSets[fieldSetIndex]
|
||||
keyLen := cursor - start
|
||||
cursor++
|
||||
if keyLen < field.keyLen {
|
||||
// early match
|
||||
return cursor, nil, nil
|
||||
}
|
||||
return cursor, field, nil
|
||||
case nul:
|
||||
return 0, nil, errUnexpectedEndOfJSON("string", cursor)
|
||||
default:
|
||||
if keyIdx >= keyBitmapLen {
|
||||
for {
|
||||
cursor++
|
||||
switch char(b, cursor) {
|
||||
case '"':
|
||||
cursor++
|
||||
return cursor, field, nil
|
||||
case '\\':
|
||||
cursor++
|
||||
if char(b, cursor) == nul {
|
||||
return 0, nil, errUnexpectedEndOfJSON("string", cursor)
|
||||
}
|
||||
case nul:
|
||||
return 0, nil, errUnexpectedEndOfJSON("string", cursor)
|
||||
}
|
||||
}
|
||||
}
|
||||
curBit &= bitmap[keyIdx][largeToSmallTable[c]]
|
||||
if curBit == 0 {
|
||||
for {
|
||||
cursor++
|
||||
switch char(b, cursor) {
|
||||
case '"':
|
||||
cursor++
|
||||
return cursor, field, nil
|
||||
case '\\':
|
||||
cursor++
|
||||
if char(b, cursor) == nul {
|
||||
return 0, nil, errUnexpectedEndOfJSON("string", cursor)
|
||||
}
|
||||
case nul:
|
||||
return 0, nil, errUnexpectedEndOfJSON("string", cursor)
|
||||
}
|
||||
}
|
||||
}
|
||||
keyIdx++
|
||||
}
|
||||
cursor++
|
||||
}
|
||||
default:
|
||||
return cursor, nil, errNotAtBeginningOfValue(cursor)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func decodeKeyByBitmapInt16(d *structDecoder, buf []byte, cursor int64) (int64, *structFieldSet, error) {
|
||||
var (
|
||||
field *structFieldSet
|
||||
curBit int16 = math.MaxInt16
|
||||
)
|
||||
b := (*sliceHeader)(unsafe.Pointer(&buf)).data
|
||||
for {
|
||||
switch char(b, cursor) {
|
||||
case ' ', '\n', '\t', '\r':
|
||||
cursor++
|
||||
case '"':
|
||||
cursor++
|
||||
c := char(b, cursor)
|
||||
switch c {
|
||||
case '"':
|
||||
cursor++
|
||||
return cursor, field, nil
|
||||
case nul:
|
||||
return 0, nil, errUnexpectedEndOfJSON("string", cursor)
|
||||
}
|
||||
keyIdx := 0
|
||||
bitmap := d.keyBitmapInt16
|
||||
keyBitmapLen := len(bitmap)
|
||||
start := cursor
|
||||
for {
|
||||
c := char(b, cursor)
|
||||
switch c {
|
||||
case '"':
|
||||
x := uint64(curBit & -curBit)
|
||||
fieldSetIndex := bitHashTable[(x*0x03F566ED27179461)>>58]
|
||||
field = d.sortedFieldSets[fieldSetIndex]
|
||||
keyLen := cursor - start
|
||||
cursor++
|
||||
if keyLen < field.keyLen {
|
||||
// early match
|
||||
return cursor, nil, nil
|
||||
}
|
||||
return cursor, field, nil
|
||||
case nul:
|
||||
return 0, nil, errUnexpectedEndOfJSON("string", cursor)
|
||||
default:
|
||||
if keyIdx >= keyBitmapLen {
|
||||
for {
|
||||
cursor++
|
||||
switch char(b, cursor) {
|
||||
case '"':
|
||||
cursor++
|
||||
return cursor, field, nil
|
||||
case '\\':
|
||||
cursor++
|
||||
if char(b, cursor) == nul {
|
||||
return 0, nil, errUnexpectedEndOfJSON("string", cursor)
|
||||
}
|
||||
case nul:
|
||||
return 0, nil, errUnexpectedEndOfJSON("string", cursor)
|
||||
}
|
||||
}
|
||||
}
|
||||
curBit &= bitmap[keyIdx][largeToSmallTable[c]]
|
||||
if curBit == 0 {
|
||||
for {
|
||||
cursor++
|
||||
switch char(b, cursor) {
|
||||
case '"':
|
||||
cursor++
|
||||
return cursor, field, nil
|
||||
case '\\':
|
||||
cursor++
|
||||
if char(b, cursor) == nul {
|
||||
return 0, nil, errUnexpectedEndOfJSON("string", cursor)
|
||||
}
|
||||
case nul:
|
||||
return 0, nil, errUnexpectedEndOfJSON("string", cursor)
|
||||
}
|
||||
}
|
||||
}
|
||||
keyIdx++
|
||||
}
|
||||
cursor++
|
||||
}
|
||||
default:
|
||||
return cursor, nil, errNotAtBeginningOfValue(cursor)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func decodeKey(d *structDecoder, buf []byte, cursor int64) (int64, *structFieldSet, error) {
|
||||
key, c, err := d.stringDecoder.decodeByte(buf, cursor)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
cursor = c
|
||||
k := *(*string)(unsafe.Pointer(&key))
|
||||
field, exists := d.fieldMap[k]
|
||||
if !exists {
|
||||
return cursor, nil, nil
|
||||
}
|
||||
return cursor, field, nil
|
||||
}
|
||||
|
||||
func decodeKeyByBitmapInt8Stream(d *structDecoder, s *stream) (*structFieldSet, string, error) {
|
||||
var (
|
||||
field *structFieldSet
|
||||
curBit int8 = math.MaxInt8
|
||||
)
|
||||
for {
|
||||
switch s.char() {
|
||||
case ' ', '\n', '\t', '\r':
|
||||
s.cursor++
|
||||
case nul:
|
||||
if s.read() {
|
||||
continue
|
||||
}
|
||||
return nil, "", errNotAtBeginningOfValue(s.totalOffset())
|
||||
case '"':
|
||||
s.cursor++
|
||||
FIRST_CHAR:
|
||||
start := s.cursor
|
||||
switch s.char() {
|
||||
case '"':
|
||||
s.cursor++
|
||||
return field, "", nil
|
||||
case nul:
|
||||
if s.read() {
|
||||
goto FIRST_CHAR
|
||||
}
|
||||
return nil, "", errUnexpectedEndOfJSON("string", s.totalOffset())
|
||||
}
|
||||
keyIdx := 0
|
||||
bitmap := d.keyBitmapInt8
|
||||
keyBitmapLen := len(bitmap)
|
||||
for {
|
||||
c := s.char()
|
||||
switch c {
|
||||
case '"':
|
||||
x := uint64(curBit & -curBit)
|
||||
fieldSetIndex := bitHashTable[(x*0x03F566ED27179461)>>58]
|
||||
field = d.sortedFieldSets[fieldSetIndex]
|
||||
keyLen := s.cursor - start
|
||||
s.cursor++
|
||||
if keyLen < field.keyLen {
|
||||
// early match
|
||||
return nil, field.key, nil
|
||||
}
|
||||
return field, field.key, nil
|
||||
case nul:
|
||||
if s.read() {
|
||||
continue
|
||||
}
|
||||
return nil, "", errUnexpectedEndOfJSON("string", s.totalOffset())
|
||||
default:
|
||||
if keyIdx >= keyBitmapLen {
|
||||
for {
|
||||
s.cursor++
|
||||
switch s.char() {
|
||||
case '"':
|
||||
b := s.buf[start:s.cursor]
|
||||
key := *(*string)(unsafe.Pointer(&b))
|
||||
s.cursor++
|
||||
return field, key, nil
|
||||
case '\\':
|
||||
s.cursor++
|
||||
if s.char() == nul {
|
||||
if !s.read() {
|
||||
return nil, "", errUnexpectedEndOfJSON("string", s.totalOffset())
|
||||
}
|
||||
}
|
||||
case nul:
|
||||
if !s.read() {
|
||||
return nil, "", errUnexpectedEndOfJSON("string", s.totalOffset())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
curBit &= bitmap[keyIdx][largeToSmallTable[c]]
|
||||
if curBit == 0 {
|
||||
for {
|
||||
s.cursor++
|
||||
switch s.char() {
|
||||
case '"':
|
||||
b := s.buf[start:s.cursor]
|
||||
key := *(*string)(unsafe.Pointer(&b))
|
||||
s.cursor++
|
||||
return field, key, nil
|
||||
case '\\':
|
||||
s.cursor++
|
||||
if s.char() == nul {
|
||||
if !s.read() {
|
||||
return nil, "", errUnexpectedEndOfJSON("string", s.totalOffset())
|
||||
}
|
||||
}
|
||||
case nul:
|
||||
if !s.read() {
|
||||
return nil, "", errUnexpectedEndOfJSON("string", s.totalOffset())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
keyIdx++
|
||||
}
|
||||
s.cursor++
|
||||
}
|
||||
default:
|
||||
return nil, "", errNotAtBeginningOfValue(s.totalOffset())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func decodeKeyByBitmapInt16Stream(d *structDecoder, s *stream) (*structFieldSet, string, error) {
|
||||
var (
|
||||
field *structFieldSet
|
||||
curBit int16 = math.MaxInt16
|
||||
)
|
||||
for {
|
||||
switch s.char() {
|
||||
case ' ', '\n', '\t', '\r':
|
||||
s.cursor++
|
||||
case nul:
|
||||
if s.read() {
|
||||
continue
|
||||
}
|
||||
return nil, "", errNotAtBeginningOfValue(s.totalOffset())
|
||||
case '"':
|
||||
s.cursor++
|
||||
FIRST_CHAR:
|
||||
start := s.cursor
|
||||
switch s.char() {
|
||||
case '"':
|
||||
s.cursor++
|
||||
return field, "", nil
|
||||
case nul:
|
||||
if s.read() {
|
||||
goto FIRST_CHAR
|
||||
}
|
||||
return nil, "", errUnexpectedEndOfJSON("string", s.totalOffset())
|
||||
}
|
||||
keyIdx := 0
|
||||
bitmap := d.keyBitmapInt16
|
||||
keyBitmapLen := len(bitmap)
|
||||
for {
|
||||
c := s.char()
|
||||
switch c {
|
||||
case '"':
|
||||
x := uint64(curBit & -curBit)
|
||||
fieldSetIndex := bitHashTable[(x*0x03F566ED27179461)>>58]
|
||||
field = d.sortedFieldSets[fieldSetIndex]
|
||||
keyLen := s.cursor - start
|
||||
s.cursor++
|
||||
if keyLen < field.keyLen {
|
||||
// early match
|
||||
return nil, field.key, nil
|
||||
}
|
||||
return field, field.key, nil
|
||||
case nul:
|
||||
if s.read() {
|
||||
continue
|
||||
}
|
||||
return nil, "", errUnexpectedEndOfJSON("string", s.totalOffset())
|
||||
default:
|
||||
if keyIdx >= keyBitmapLen {
|
||||
for {
|
||||
s.cursor++
|
||||
switch s.char() {
|
||||
case '"':
|
||||
b := s.buf[start:s.cursor]
|
||||
key := *(*string)(unsafe.Pointer(&b))
|
||||
s.cursor++
|
||||
return field, key, nil
|
||||
case '\\':
|
||||
s.cursor++
|
||||
if s.char() == nul {
|
||||
if !s.read() {
|
||||
return nil, "", errUnexpectedEndOfJSON("string", s.totalOffset())
|
||||
}
|
||||
}
|
||||
case nul:
|
||||
if !s.read() {
|
||||
return nil, "", errUnexpectedEndOfJSON("string", s.totalOffset())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
curBit &= bitmap[keyIdx][largeToSmallTable[c]]
|
||||
if curBit == 0 {
|
||||
for {
|
||||
s.cursor++
|
||||
switch s.char() {
|
||||
case '"':
|
||||
b := s.buf[start:s.cursor]
|
||||
key := *(*string)(unsafe.Pointer(&b))
|
||||
s.cursor++
|
||||
return field, key, nil
|
||||
case '\\':
|
||||
s.cursor++
|
||||
if s.char() == nul {
|
||||
if !s.read() {
|
||||
return nil, "", errUnexpectedEndOfJSON("string", s.totalOffset())
|
||||
}
|
||||
}
|
||||
case nul:
|
||||
if !s.read() {
|
||||
return nil, "", errUnexpectedEndOfJSON("string", s.totalOffset())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
keyIdx++
|
||||
}
|
||||
s.cursor++
|
||||
}
|
||||
default:
|
||||
return nil, "", errNotAtBeginningOfValue(s.totalOffset())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func decodeKeyStream(d *structDecoder, s *stream) (*structFieldSet, string, error) {
|
||||
key, err := d.stringDecoder.decodeStreamByte(s)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
k := *(*string)(unsafe.Pointer(&key))
|
||||
return d.fieldMap[k], k, nil
|
||||
}
|
||||
|
||||
func (d *structDecoder) decodeStream(s *stream, p unsafe.Pointer) error {
|
||||
s.skipWhiteSpace()
|
||||
switch s.char() {
|
||||
|
@ -49,14 +569,11 @@ func (d *structDecoder) decodeStream(s *stream, p unsafe.Pointer) error {
|
|||
}
|
||||
for {
|
||||
s.reset()
|
||||
key, err := d.keyDecoder.decodeStreamByte(s)
|
||||
field, key, err := d.keyStreamDecoder(d, s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.skipWhiteSpace()
|
||||
if s.char() == nul {
|
||||
s.read()
|
||||
}
|
||||
if s.char() != ':' {
|
||||
return errExpected("colon after object key", s.totalOffset())
|
||||
}
|
||||
|
@ -66,23 +583,18 @@ func (d *structDecoder) decodeStream(s *stream, p unsafe.Pointer) error {
|
|||
return errExpected("object value after colon", s.totalOffset())
|
||||
}
|
||||
}
|
||||
k := *(*string)(unsafe.Pointer(&key))
|
||||
field, exists := d.fieldMap[k]
|
||||
if exists {
|
||||
if field != nil {
|
||||
if err := field.dec.decodeStream(s, unsafe.Pointer(uintptr(p)+field.offset)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if s.disallowUnknownFields {
|
||||
return fmt.Errorf("json: unknown field %q", k)
|
||||
return fmt.Errorf("json: unknown field %q", key)
|
||||
} else {
|
||||
if err := s.skipValue(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
s.skipWhiteSpace()
|
||||
if s.char() == nul {
|
||||
s.read()
|
||||
}
|
||||
c := s.char()
|
||||
if c == '}' {
|
||||
s.cursor++
|
||||
|
@ -98,9 +610,9 @@ func (d *structDecoder) decodeStream(s *stream, p unsafe.Pointer) error {
|
|||
func (d *structDecoder) decode(buf []byte, cursor int64, p unsafe.Pointer) (int64, error) {
|
||||
buflen := int64(len(buf))
|
||||
cursor = skipWhiteSpace(buf, cursor)
|
||||
switch buf[cursor] {
|
||||
b := (*sliceHeader)(unsafe.Pointer(&buf)).data
|
||||
switch char(b, cursor) {
|
||||
case 'n':
|
||||
buflen := int64(len(buf))
|
||||
if cursor+3 >= buflen {
|
||||
return 0, errUnexpectedEndOfJSON("null", cursor)
|
||||
}
|
||||
|
@ -123,23 +635,20 @@ func (d *structDecoder) decode(buf []byte, cursor int64, p unsafe.Pointer) (int6
|
|||
return 0, errUnexpectedEndOfJSON("object", cursor)
|
||||
}
|
||||
cursor++
|
||||
for ; cursor < buflen; cursor++ {
|
||||
key, c, err := d.keyDecoder.decodeByte(buf, cursor)
|
||||
for {
|
||||
c, field, err := d.keyDecoder(d, buf, cursor)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
cursor = c
|
||||
cursor = skipWhiteSpace(buf, cursor)
|
||||
if buf[cursor] != ':' {
|
||||
cursor = skipWhiteSpace(buf, c)
|
||||
if char(b, cursor) != ':' {
|
||||
return 0, errExpected("colon after object key", cursor)
|
||||
}
|
||||
cursor++
|
||||
if cursor >= buflen {
|
||||
return 0, errExpected("object value after colon", cursor)
|
||||
}
|
||||
k := *(*string)(unsafe.Pointer(&key))
|
||||
field, exists := d.fieldMap[k]
|
||||
if exists {
|
||||
if field != nil {
|
||||
c, err := field.dec.decode(buf, cursor, unsafe.Pointer(uintptr(p)+field.offset))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
|
@ -153,13 +662,13 @@ func (d *structDecoder) decode(buf []byte, cursor int64, p unsafe.Pointer) (int6
|
|||
cursor = c
|
||||
}
|
||||
cursor = skipWhiteSpace(buf, cursor)
|
||||
if buf[cursor] == '}' {
|
||||
if char(b, cursor) == '}' {
|
||||
cursor++
|
||||
return cursor, nil
|
||||
}
|
||||
if buf[cursor] != ',' {
|
||||
if char(b, cursor) != ',' {
|
||||
return 0, errExpected("comma after object element", cursor)
|
||||
}
|
||||
cursor++
|
||||
}
|
||||
return cursor, nil
|
||||
}
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
|
@ -22,74 +21,10 @@ type opcodeSet struct {
|
|||
}
|
||||
|
||||
var (
|
||||
marshalJSONType = reflect.TypeOf((*Marshaler)(nil)).Elem()
|
||||
marshalTextType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
|
||||
cachedOpcode unsafe.Pointer // map[uintptr]*opcodeSet
|
||||
baseTypeAddr uintptr
|
||||
cachedOpcodeSets []*opcodeSet
|
||||
existsCachedOpcodeSets bool
|
||||
marshalJSONType = reflect.TypeOf((*Marshaler)(nil)).Elem()
|
||||
marshalTextType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
|
||||
)
|
||||
|
||||
const (
|
||||
maxAcceptableTypeAddrRange = 1024 * 1024 * 2 // 2 Mib
|
||||
)
|
||||
|
||||
//go:linkname typelinks reflect.typelinks
|
||||
func typelinks() ([]unsafe.Pointer, [][]int32)
|
||||
|
||||
//go:linkname rtypeOff reflect.rtypeOff
|
||||
func rtypeOff(unsafe.Pointer, int32) unsafe.Pointer
|
||||
|
||||
func setupOpcodeSets() error {
|
||||
sections, offsets := typelinks()
|
||||
if len(sections) != 1 {
|
||||
return fmt.Errorf("failed to get sections")
|
||||
}
|
||||
if len(offsets) != 1 {
|
||||
return fmt.Errorf("failed to get offsets")
|
||||
}
|
||||
section := sections[0]
|
||||
offset := offsets[0]
|
||||
var (
|
||||
min uintptr = uintptr(^uint(0))
|
||||
max uintptr = 0
|
||||
)
|
||||
for i := 0; i < len(offset); i++ {
|
||||
typ := (*rtype)(rtypeOff(section, offset[i]))
|
||||
addr := uintptr(unsafe.Pointer(typ))
|
||||
if min > addr {
|
||||
min = addr
|
||||
}
|
||||
if max < addr {
|
||||
max = addr
|
||||
}
|
||||
if typ.Kind() == reflect.Ptr {
|
||||
addr = uintptr(unsafe.Pointer(typ.Elem()))
|
||||
if min > addr {
|
||||
min = addr
|
||||
}
|
||||
if max < addr {
|
||||
max = addr
|
||||
}
|
||||
}
|
||||
}
|
||||
addrRange := max - min
|
||||
if addrRange == 0 {
|
||||
return fmt.Errorf("failed to get address range of types")
|
||||
}
|
||||
if addrRange > maxAcceptableTypeAddrRange {
|
||||
return fmt.Errorf("too big address range %d", addrRange)
|
||||
}
|
||||
cachedOpcodeSets = make([]*opcodeSet, addrRange)
|
||||
existsCachedOpcodeSets = true
|
||||
baseTypeAddr = min
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
_ = setupOpcodeSets()
|
||||
}
|
||||
|
||||
func encodeCompileToGetCodeSetSlowPath(typeptr uintptr) (*opcodeSet, error) {
|
||||
opcodeMap := loadOpcodeMap()
|
||||
if codeSet, exists := opcodeMap[typeptr]; exists {
|
||||
|
@ -117,22 +52,6 @@ func encodeCompileToGetCodeSetSlowPath(typeptr uintptr) (*opcodeSet, error) {
|
|||
return codeSet, nil
|
||||
}
|
||||
|
||||
func loadOpcodeMap() map[uintptr]*opcodeSet {
|
||||
p := atomic.LoadPointer(&cachedOpcode)
|
||||
return *(*map[uintptr]*opcodeSet)(unsafe.Pointer(&p))
|
||||
}
|
||||
|
||||
func storeOpcodeSet(typ uintptr, set *opcodeSet, m map[uintptr]*opcodeSet) {
|
||||
newOpcodeMap := make(map[uintptr]*opcodeSet, len(m)+1)
|
||||
newOpcodeMap[typ] = set
|
||||
|
||||
for k, v := range m {
|
||||
newOpcodeMap[k] = v
|
||||
}
|
||||
|
||||
atomic.StorePointer(&cachedOpcode, *(*unsafe.Pointer)(unsafe.Pointer(&newOpcodeMap)))
|
||||
}
|
||||
|
||||
func encodeCompileHead(ctx *encodeCompileContext) (*opcode, error) {
|
||||
typ := ctx.typ
|
||||
switch {
|
||||
|
|
Loading…
Reference in New Issue