use glide and refactor (#281)

This commit is contained in:
siddontang 2017-03-12 18:05:35 +08:00 committed by GitHub
parent 380835a0ca
commit 3970f8e4f4
229 changed files with 27109 additions and 16840 deletions

View File

@ -1,7 +1,7 @@
language: go
go:
- 1.5
- 1.6
- 1.7
script:
- make travis
- make test

View File

@ -11,44 +11,39 @@ export LD_LIBRARY_PATH
export DYLD_LIBRARY_PATH
export GO_BUILD_TAGS
GO=GO15VENDOREXPERIMENT="1" go
PKGS=$(shell $(GO) list ./... | grep -v "cmd")
all: build
all: build
build:
$(GO) build -o bin/ledis-server -tags '$(GO_BUILD_TAGS)' cmd/ledis-server/*
$(GO) build -o bin/ledis-cli -tags '$(GO_BUILD_TAGS)' cmd/ledis-cli/*
rm -rf vendor && ln -s _vendor/vendor vendor
go build -o bin/ledis-benchmark -tags '$(GO_BUILD_TAGS)' cmd/ledis-benchmark/*
go build -o bin/ledis-dump -tags '$(GO_BUILD_TAGS)' cmd/ledis-dump/*
go build -o bin/ledis-load -tags '$(GO_BUILD_TAGS)' cmd/ledis-load/*
go build -o bin/ledis-repair -tags '$(GO_BUILD_TAGS)' cmd/ledis-repair/*
rm -rf vendor
build_all: build
$(GO) build -o bin/ledis-benchmark -tags '$(GO_BUILD_TAGS)' cmd/ledis-benchmark/*
$(GO) build -o bin/ledis-dump -tags '$(GO_BUILD_TAGS)' cmd/ledis-dump/*
$(GO) build -o bin/ledis-load -tags '$(GO_BUILD_TAGS)' cmd/ledis-load/*
$(GO) build -o bin/ledis-repair -tags '$(GO_BUILD_TAGS)' cmd/ledis-repair/*
test:
# use vendor for test
@rm -rf vendor && ln -s cmd/vendor vendor
@$(GO) test --race -tags '$(GO_BUILD_TAGS)' $(PKGS)
@rm -rf vendor
rm -rf vendor && ln -s _vendor/vendor vendor
go test --race -tags '$(GO_BUILD_TAGS)' -timeout 2m ./...
rm -rf vendor
clean:
$(GO) clean -i ./...
go clean -i ./...
fmt:
gofmt -w -s . 2>&1 | grep -vE 'vendor' | awk '{print} END{if(NR>0) {exit 1}}'
deps:
# see https://github.com/coreos/etcd/blob/master/scripts/updatedep.sh
rm -rf Godeps vendor cmd/vendor
mkdir -p cmd/vendor
ln -s cmd/vendor vendor
godep save ./...
rm -rf cmd/Godeps
rm vendor
mv Godeps cmd/
travis:
@rm -rf vendor && ln -s cmd/vendor vendor
@$(GO) test --race -tags '$(GO_BUILD_TAGS)' $(PKGS)
update_vendor:
which glide >/dev/null || curl https://glide.sh/get | sh
which glide-vc || go get -v -u github.com/sgotti/glide-vc
rm -r vendor && mv _vendor/vendor vendor || true
rm -rf _vendor
ifdef PKG
glide get --strip-vendor --skip-test ${PKG}
else
glide update --strip-vendor --skip-test
endif
@echo "removing test files"
glide vc --only-code --no-tests
mkdir -p _vendor
mv vendor _vendor/vendor

View File

@ -34,9 +34,6 @@ Create a workspace and checkout ledisdb source
cd src/github.com/siddontang/ledisdb
#install godep and be sure godep command can be found in $PATH
go get github.com/tools/godep
#set build and run environment
source dev.sh
@ -180,7 +177,7 @@ See [Clients](https://github.com/siddontang/ledisdb/wiki/Clients) to find or con
## Requirement
+ Go version >= 1.5
+ Go version >= 1.6
## Related Repos

View File

@ -225,6 +225,9 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
tmap, ok := mapping.(map[string]interface{})
if !ok {
if mapping == nil {
return nil
}
return mismatch(rv, "map", mapping)
}
@ -267,6 +270,9 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
tmap, ok := mapping.(map[string]interface{})
if !ok {
if tmap == nil {
return nil
}
return badtype("map", mapping)
}
if rv.IsNil() {
@ -292,6 +298,9 @@ func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
datav := reflect.ValueOf(data)
if datav.Kind() != reflect.Slice {
if !datav.IsValid() {
return nil
}
return badtype("slice", data)
}
sliceLen := datav.Len()
@ -305,12 +314,16 @@ func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
datav := reflect.ValueOf(data)
if datav.Kind() != reflect.Slice {
if !datav.IsValid() {
return nil
}
return badtype("slice", data)
}
sliceLen := datav.Len()
if rv.IsNil() {
rv.Set(reflect.MakeSlice(rv.Type(), sliceLen, sliceLen))
n := datav.Len()
if rv.IsNil() || rv.Cap() < n {
rv.Set(reflect.MakeSlice(rv.Type(), n, n))
}
rv.SetLen(n)
return md.unifySliceArray(datav, rv)
}

View File

@ -306,19 +306,30 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
for i := 0; i < rt.NumField(); i++ {
f := rt.Field(i)
// skip unexporded fields
if f.PkgPath != "" {
// skip unexported fields
if f.PkgPath != "" && !f.Anonymous {
continue
}
frv := rv.Field(i)
if f.Anonymous {
frv := eindirect(frv)
t := frv.Type()
if t.Kind() != reflect.Struct {
encPanic(errAnonNonStruct)
t := f.Type
switch t.Kind() {
case reflect.Struct:
addFields(t, frv, f.Index)
continue
case reflect.Ptr:
if t.Elem().Kind() == reflect.Struct {
if !frv.IsNil() {
addFields(t.Elem(), frv.Elem(), f.Index)
}
continue
}
// Fall through to the normal field encoding logic below
// for non-struct anonymous fields.
}
addFields(t, frv, f.Index)
} else if typeIsHash(tomlTypeOfGo(frv)) {
}
if typeIsHash(tomlTypeOfGo(frv)) {
fieldsSub = append(fieldsSub, append(start, f.Index...))
} else {
fieldsDirect = append(fieldsDirect, append(start, f.Index...))
@ -336,15 +347,14 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
continue
}
keyName := sft.Tag.Get("toml")
if keyName == "-" {
tag := sft.Tag.Get("toml")
if tag == "-" {
continue
}
keyName, opts := getOptions(tag)
if keyName == "" {
keyName = sft.Name
}
keyName, opts := getOptions(keyName)
if _, ok := opts["omitempty"]; ok && isEmpty(sf) {
continue
} else if _, ok := opts["omitzero"]; ok && isZero(sf) {
@ -457,34 +467,22 @@ func getOptions(keyName string) (string, map[string]struct{}) {
func isZero(rv reflect.Value) bool {
switch rv.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
if rv.Int() == 0 {
return true
}
return rv.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
if rv.Uint() == 0 {
return true
}
return rv.Uint() == 0
case reflect.Float32, reflect.Float64:
if rv.Float() == 0.0 {
return true
}
return rv.Float() == 0.0
}
return false
}
func isEmpty(rv reflect.Value) bool {
switch rv.Kind() {
case reflect.String:
if len(strings.TrimSpace(rv.String())) == 0 {
return true
}
case reflect.Array, reflect.Slice, reflect.Map:
if rv.Len() == 0 {
return true
}
case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
return rv.Len() == 0
case reflect.Bool:
return !rv.Bool()
}
return false
}

View File

@ -272,8 +272,6 @@ func lexTableNameStart(lx *lexer) stateFn {
lx.ignore()
lx.push(lexTableNameEnd)
return lexValue // reuse string lexing
case isWhitespace(r):
return lexTableNameStart
default:
return lexBareTableName
}
@ -560,7 +558,6 @@ func lexMultilineRawString(lx *lexer) stateFn {
func lexMultilineStringEscape(lx *lexer) stateFn {
// Handle the special case first:
if isNL(lx.next()) {
lx.next()
return lexMultilineString
} else {
lx.backup()

View File

@ -81,7 +81,7 @@ func (p *parser) next() item {
}
func (p *parser) bug(format string, v ...interface{}) {
log.Fatalf("BUG: %s\n\n", fmt.Sprintf(format, v...))
log.Panicf("BUG: %s\n\n", fmt.Sprintf(format, v...))
}
func (p *parser) expect(typ itemType) item {
@ -216,7 +216,7 @@ func (p *parser) value(it item) (interface{}, tomlType) {
case itemDatetime:
t, err := time.Parse("2006-01-02T15:04:05Z", it.val)
if err != nil {
p.bug("Expected Zulu formatted DateTime, but got '%s'.", it.val)
p.panicf("Invalid RFC3339 Zulu DateTime: '%s'.", it.val)
}
return t, p.typeOfPrimitive(it)
case itemArray:
@ -401,7 +401,7 @@ func stripFirstNewline(s string) string {
if len(s) == 0 || s[0] != '\n' {
return s
}
return s[1:len(s)]
return s[1:]
}
func stripEscapedWhitespace(s string) string {
@ -481,12 +481,7 @@ func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
p.bug("Could not parse '%s' as a hexadecimal number, but the "+
"lexer claims it's OK: %s", s, err)
}
// BUG(burntsushi)
// I honestly don't understand how this works. I can't seem
// to find a way to make this fail. I figured this would fail on invalid
// UTF-8 characters like U+DCFF, but it doesn't.
if !utf8.ValidString(string(rune(hex))) {
if !utf8.ValidRune(rune(hex)) {
p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
}
return rune(hex)

View File

@ -92,10 +92,10 @@ func typeFields(t reflect.Type) []field {
// Scan f.typ for fields to include.
for i := 0; i < f.typ.NumField(); i++ {
sf := f.typ.Field(i)
if sf.PkgPath != "" { // unexported
if sf.PkgPath != "" && !sf.Anonymous { // unexported
continue
}
name := sf.Tag.Get("toml")
name, _ := getOptions(sf.Tag.Get("toml"))
if name == "-" {
continue
}

View File

@ -35,7 +35,7 @@ type digest struct {
crc uint64
}
func New() hash.Hash {
func New() hash.Hash64 {
return &digest{}
}

View File

@ -20,6 +20,10 @@ type Decoder interface {
// StartDatabase is called when database n starts.
// Once a database starts, another database will not start until EndDatabase is called.
StartDatabase(n int)
// AUX field
Aux(key, value []byte)
// ResizeDB hint
ResizeDatabase(dbSize, expiresSize uint32)
// Set is called once for each string key.
Set(key, value []byte, expiry int64)
// StartHash is called at the beginning of a hash.
@ -38,6 +42,7 @@ type Decoder interface {
EndSet(key []byte)
// StartList is called at the beginning of a list.
// Rpush will be called exactly length times before EndList.
// If length of the list is not known, then length is -1
StartList(key []byte, length, expiry int64)
// Rpush is called once for each value in a list.
Rpush(key, value []byte)
@ -102,11 +107,12 @@ const (
TypeZSet ValueType = 3
TypeHash ValueType = 4
TypeHashZipmap ValueType = 9
TypeListZiplist ValueType = 10
TypeSetIntset ValueType = 11
TypeZSetZiplist ValueType = 12
TypeHashZiplist ValueType = 13
TypeHashZipmap ValueType = 9
TypeListZiplist ValueType = 10
TypeSetIntset ValueType = 11
TypeZSetZiplist ValueType = 12
TypeHashZiplist ValueType = 13
TypeListQuicklist ValueType = 14
)
const (
@ -115,6 +121,8 @@ const (
rdb32bitLen = 2
rdbEncVal = 3
rdbFlagAux = 0xfa
rdbFlagResizeDB = 0xfb
rdbFlagExpiryMS = 0xfc
rdbFlagExpiry = 0xfd
rdbFlagSelectDB = 0xfe
@ -154,6 +162,26 @@ func (d *decode) decode() error {
return err
}
switch objType {
case rdbFlagAux:
auxKey, err := d.readString()
if err != nil {
return err
}
auxVal, err := d.readString()
if err != nil {
return err
}
d.event.Aux(auxKey, auxVal)
case rdbFlagResizeDB:
dbSize, _, err := d.readLength()
if err != nil {
return err
}
expiresSize, _, err := d.readLength()
if err != nil {
return err
}
d.event.ResizeDatabase(dbSize, expiresSize)
case rdbFlagExpiryMS:
_, err := io.ReadFull(d.r, d.intBuf)
if err != nil {
@ -188,6 +216,7 @@ func (d *decode) decode() error {
if err != nil {
return err
}
expiry = 0
}
}
@ -216,6 +245,16 @@ func (d *decode) readObject(key []byte, typ ValueType, expiry int64) error {
d.event.Rpush(key, value)
}
d.event.EndList(key)
case TypeListQuicklist:
length, _, err := d.readLength()
if err != nil {
return err
}
d.event.StartList(key, int64(-1), expiry)
for i := uint32(0); i < length; i++ {
d.readZiplist(key, 0, false)
}
d.event.EndList(key)
case TypeSet:
cardinality, _, err := d.readLength()
if err != nil {
@ -269,7 +308,7 @@ func (d *decode) readObject(key []byte, typ ValueType, expiry int64) error {
case TypeHashZipmap:
return d.readZipmap(key, expiry)
case TypeListZiplist:
return d.readZiplist(key, expiry)
return d.readZiplist(key, expiry, true)
case TypeSetIntset:
return d.readIntset(key, expiry)
case TypeZSetZiplist:
@ -378,7 +417,7 @@ func readZipmapItemLength(buf *sliceBuffer, readFree bool) (int, int, error) {
return int(b), int(free), err
}
func (d *decode) readZiplist(key []byte, expiry int64) error {
func (d *decode) readZiplist(key []byte, expiry int64, addListEvents bool) error {
ziplist, err := d.readString()
if err != nil {
return err
@ -388,7 +427,9 @@ func (d *decode) readZiplist(key []byte, expiry int64) error {
if err != nil {
return err
}
d.event.StartList(key, length, expiry)
if addListEvents {
d.event.StartList(key, length, expiry)
}
for i := int64(0); i < length; i++ {
entry, err := readZiplistEntry(buf)
if err != nil {
@ -396,7 +437,9 @@ func (d *decode) readZiplist(key []byte, expiry int64) error {
}
d.event.Rpush(key, entry)
}
d.event.EndList(key)
if addListEvents {
d.event.EndList(key)
}
return nil
}
@ -585,7 +628,7 @@ func (d *decode) checkHeader() error {
}
version, _ := strconv.ParseInt(string(header[5:]), 10, 64)
if version < 1 || version > 6 {
if version < 1 || version > 7 {
return fmt.Errorf("rdb: invalid RDB version number %d", version)
}

View File

@ -5,6 +5,8 @@ type NopDecoder struct{}
func (d NopDecoder) StartRDB() {}
func (d NopDecoder) StartDatabase(n int) {}
func (d NopDecoder) Aux(key, value []byte) {}
func (d NopDecoder) ResizeDatabase(dbSize, expiresSize uint32) {}
func (d NopDecoder) EndDatabase(n int) {}
func (d NopDecoder) EndRDB() {}
func (d NopDecoder) Set(key, value []byte, expiry int64) {}

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin dragonfly freebsd linux openbsd solaris
// +build darwin dragonfly freebsd linux openbsd solaris netbsd
package mmap
@ -35,7 +35,7 @@ func mmap(len int, inprot, inflags, fd uintptr, off int64) ([]byte, error) {
}
func flush(addr, len uintptr) error {
_, _, errno := syscall.Syscall(syscall.SYS_MSYNC, addr, len, syscall.MS_SYNC)
_, _, errno := syscall.Syscall(_SYS_MSYNC, addr, len, _MS_SYNC)
if errno != 0 {
return syscall.Errno(errno)
}

View File

@ -73,7 +73,20 @@ func mmap(len int, prot, flags, hfile uintptr, off int64) ([]byte, error) {
func flush(addr, len uintptr) error {
errno := syscall.FlushViewOfFile(addr, len)
return os.NewSyscallError("FlushViewOfFile", errno)
if errno != nil {
return os.NewSyscallError("FlushViewOfFile", errno)
}
handleLock.Lock()
defer handleLock.Unlock()
handle, ok := handleMap[addr]
if !ok {
// should be impossible; we would've errored above
return errors.New("unknown base address")
}
errno = syscall.FlushFileBuffers(handle)
return os.NewSyscallError("FlushFileBuffers", errno)
}
func lock(addr, len uintptr) error {
@ -88,13 +101,18 @@ func unlock(addr, len uintptr) error {
func unmap(addr, len uintptr) error {
flush(addr, len)
// Lock the UnmapViewOfFile along with the handleMap deletion.
// As soon as we unmap the view, the OS is free to give the
// same addr to another new map. We don't want another goroutine
// to insert and remove the same addr into handleMap while
// we're trying to remove our old addr/handle pair.
handleLock.Lock()
defer handleLock.Unlock()
err := syscall.UnmapViewOfFile(addr)
if err != nil {
return err
}
handleLock.Lock()
defer handleLock.Unlock()
handle, ok := handleMap[addr]
if !ok {
// should be impossible; we would've errored above

View File

@ -0,0 +1,8 @@
// Copyright 2011 Evan Shaw. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mmap
const _SYS_MSYNC = 277
const _MS_SYNC = 0x04

14
_vendor/vendor/github.com/edsrzf/mmap-go/msync_unix.go generated vendored Normal file
View File

@ -0,0 +1,14 @@
// Copyright 2011 Evan Shaw. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin dragonfly freebsd linux openbsd solaris
package mmap
import (
"syscall"
)
const _SYS_MSYNC = syscall.SYS_MSYNC
const _MS_SYNC = syscall.MS_SYNC

View File

@ -17,6 +17,8 @@ var (
ErrTooLarge = errors.New("snappy: decoded block is too large")
// ErrUnsupported reports that the input isn't supported.
ErrUnsupported = errors.New("snappy: unsupported input")
errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length")
)
// DecodedLen returns the length of the decoded block.
@ -40,96 +42,33 @@ func decodedLen(src []byte) (blockLen, headerLen int, err error) {
return int(v), n, nil
}
const (
decodeErrCodeCorrupt = 1
decodeErrCodeUnsupportedLiteralLength = 2
)
// Decode returns the decoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire decoded block.
// Otherwise, a newly allocated slice will be returned.
// It is valid to pass a nil dst.
//
// The dst and src must not overlap. It is valid to pass a nil dst.
func Decode(dst, src []byte) ([]byte, error) {
dLen, s, err := decodedLen(src)
if err != nil {
return nil, err
}
if len(dst) < dLen {
if dLen <= len(dst) {
dst = dst[:dLen]
} else {
dst = make([]byte, dLen)
}
var d, offset, length int
for s < len(src) {
switch src[s] & 0x03 {
case tagLiteral:
x := uint(src[s] >> 2)
switch {
case x < 60:
s++
case x == 60:
s += 2
if s > len(src) {
return nil, ErrCorrupt
}
x = uint(src[s-1])
case x == 61:
s += 3
if s > len(src) {
return nil, ErrCorrupt
}
x = uint(src[s-2]) | uint(src[s-1])<<8
case x == 62:
s += 4
if s > len(src) {
return nil, ErrCorrupt
}
x = uint(src[s-3]) | uint(src[s-2])<<8 | uint(src[s-1])<<16
case x == 63:
s += 5
if s > len(src) {
return nil, ErrCorrupt
}
x = uint(src[s-4]) | uint(src[s-3])<<8 | uint(src[s-2])<<16 | uint(src[s-1])<<24
}
length = int(x + 1)
if length <= 0 {
return nil, errors.New("snappy: unsupported literal length")
}
if length > len(dst)-d || length > len(src)-s {
return nil, ErrCorrupt
}
copy(dst[d:], src[s:s+length])
d += length
s += length
continue
case tagCopy1:
s += 2
if s > len(src) {
return nil, ErrCorrupt
}
length = 4 + int(src[s-2])>>2&0x7
offset = int(src[s-2])&0xe0<<3 | int(src[s-1])
case tagCopy2:
s += 3
if s > len(src) {
return nil, ErrCorrupt
}
length = 1 + int(src[s-3])>>2
offset = int(src[s-2]) | int(src[s-1])<<8
case tagCopy4:
return nil, errors.New("snappy: unsupported COPY_4 tag")
}
end := d + length
if offset > d || end > len(dst) {
return nil, ErrCorrupt
}
for ; d < end; d++ {
dst[d] = dst[d-offset]
}
switch decode(dst, src[s:]) {
case 0:
return dst, nil
case decodeErrCodeUnsupportedLiteralLength:
return nil, errUnsupportedLiteralLength
}
if d != dLen {
return nil, ErrCorrupt
}
return dst[:d], nil
return nil, ErrCorrupt
}
// NewReader returns a new Reader that decompresses from r, using the framing
@ -138,12 +77,12 @@ func Decode(dst, src []byte) ([]byte, error) {
func NewReader(r io.Reader) *Reader {
return &Reader{
r: r,
decoded: make([]byte, maxUncompressedChunkLen),
buf: make([]byte, MaxEncodedLen(maxUncompressedChunkLen)+checksumSize),
decoded: make([]byte, maxBlockSize),
buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize),
}
}
// Reader is an io.Reader than can read Snappy-compressed bytes.
// Reader is an io.Reader that can read Snappy-compressed bytes.
type Reader struct {
r io.Reader
err error
@ -165,9 +104,9 @@ func (r *Reader) Reset(reader io.Reader) {
r.readHeader = false
}
func (r *Reader) readFull(p []byte) (ok bool) {
func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) {
if _, r.err = io.ReadFull(r.r, p); r.err != nil {
if r.err == io.ErrUnexpectedEOF {
if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
r.err = ErrCorrupt
}
return false
@ -186,7 +125,7 @@ func (r *Reader) Read(p []byte) (int, error) {
r.i += n
return n, nil
}
if !r.readFull(r.buf[:4]) {
if !r.readFull(r.buf[:4], true) {
return 0, r.err
}
chunkType := r.buf[0]
@ -213,7 +152,7 @@ func (r *Reader) Read(p []byte) (int, error) {
return 0, r.err
}
buf := r.buf[:chunkLen]
if !r.readFull(buf) {
if !r.readFull(buf, false) {
return 0, r.err
}
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
@ -246,13 +185,17 @@ func (r *Reader) Read(p []byte) (int, error) {
return 0, r.err
}
buf := r.buf[:checksumSize]
if !r.readFull(buf) {
if !r.readFull(buf, false) {
return 0, r.err
}
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
// Read directly into r.decoded instead of via r.buf.
n := chunkLen - checksumSize
if !r.readFull(r.decoded[:n]) {
if n > len(r.decoded) {
r.err = ErrCorrupt
return 0, r.err
}
if !r.readFull(r.decoded[:n], false) {
return 0, r.err
}
if crc(r.decoded[:n]) != checksum {
@ -268,7 +211,7 @@ func (r *Reader) Read(p []byte) (int, error) {
r.err = ErrCorrupt
return 0, r.err
}
if !r.readFull(r.buf[:len(magicBody)]) {
if !r.readFull(r.buf[:len(magicBody)], false) {
return 0, r.err
}
for i := 0; i < len(magicBody); i++ {
@ -287,7 +230,7 @@ func (r *Reader) Read(p []byte) (int, error) {
}
// Section 4.4 Padding (chunk type 0xfe).
// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
if !r.readFull(r.buf[:chunkLen]) {
if !r.readFull(r.buf[:chunkLen], false) {
return 0, r.err
}
}

View File

@ -0,0 +1,14 @@
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
// +build gc
// +build !noasm
package snappy
// decode has the same semantics as in decode_other.go.
//
//go:noescape
func decode(dst, src []byte) int

490
_vendor/vendor/github.com/golang/snappy/decode_amd64.s generated vendored Normal file
View File

@ -0,0 +1,490 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
// The asm code generally follows the pure Go code in decode_other.go, except
// where marked with a "!!!".
// func decode(dst, src []byte) int
//
// All local variables fit into registers. The non-zero stack size is only to
// spill registers and push args when issuing a CALL. The register allocation:
// - AX scratch
// - BX scratch
// - CX length or x
// - DX offset
// - SI &src[s]
// - DI &dst[d]
// + R8 dst_base
// + R9 dst_len
// + R10 dst_base + dst_len
// + R11 src_base
// + R12 src_len
// + R13 src_base + src_len
// - R14 used by doCopy
// - R15 used by doCopy
//
// The registers R8-R13 (marked with a "+") are set at the start of the
// function, and after a CALL returns, and are not otherwise modified.
//
// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI.
// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI.
TEXT ·decode(SB), NOSPLIT, $48-56
// Initialize SI, DI and R8-R13.
MOVQ dst_base+0(FP), R8
MOVQ dst_len+8(FP), R9
MOVQ R8, DI
MOVQ R8, R10
ADDQ R9, R10
MOVQ src_base+24(FP), R11
MOVQ src_len+32(FP), R12
MOVQ R11, SI
MOVQ R11, R13
ADDQ R12, R13
loop:
// for s < len(src)
CMPQ SI, R13
JEQ end
// CX = uint32(src[s])
//
// switch src[s] & 0x03
MOVBLZX (SI), CX
MOVL CX, BX
ANDL $3, BX
CMPL BX, $1
JAE tagCopy
// ----------------------------------------
// The code below handles literal tags.
// case tagLiteral:
// x := uint32(src[s] >> 2)
// switch
SHRL $2, CX
CMPL CX, $60
JAE tagLit60Plus
// case x < 60:
// s++
INCQ SI
doLit:
// This is the end of the inner "switch", when we have a literal tag.
//
// We assume that CX == x and x fits in a uint32, where x is the variable
// used in the pure Go decode_other.go code.
// length = int(x) + 1
//
// Unlike the pure Go code, we don't need to check if length <= 0 because
// CX can hold 64 bits, so the increment cannot overflow.
INCQ CX
// Prepare to check if copying length bytes will run past the end of dst or
// src.
//
// AX = len(dst) - d
// BX = len(src) - s
MOVQ R10, AX
SUBQ DI, AX
MOVQ R13, BX
SUBQ SI, BX
// !!! Try a faster technique for short (16 or fewer bytes) copies.
//
// if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
// goto callMemmove // Fall back on calling runtime·memmove.
// }
//
// The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
// against 21 instead of 16, because it cannot assume that all of its input
// is contiguous in memory and so it needs to leave enough source bytes to
// read the next tag without refilling buffers, but Go's Decode assumes
// contiguousness (the src argument is a []byte).
CMPQ CX, $16
JGT callMemmove
CMPQ AX, $16
JLT callMemmove
CMPQ BX, $16
JLT callMemmove
// !!! Implement the copy from src to dst as a 16-byte load and store.
// (Decode's documentation says that dst and src must not overlap.)
//
// This always copies 16 bytes, instead of only length bytes, but that's
// OK. If the input is a valid Snappy encoding then subsequent iterations
// will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
// non-nil error), so the overrun will be ignored.
//
// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
// 16-byte loads and stores. This technique probably wouldn't be as
// effective on architectures that are fussier about alignment.
MOVOU 0(SI), X0
MOVOU X0, 0(DI)
// d += length
// s += length
ADDQ CX, DI
ADDQ CX, SI
JMP loop
callMemmove:
// if length > len(dst)-d || length > len(src)-s { etc }
CMPQ CX, AX
JGT errCorrupt
CMPQ CX, BX
JGT errCorrupt
// copy(dst[d:], src[s:s+length])
//
// This means calling runtime·memmove(&dst[d], &src[s], length), so we push
// DI, SI and CX as arguments. Coincidentally, we also need to spill those
// three registers to the stack, to save local variables across the CALL.
MOVQ DI, 0(SP)
MOVQ SI, 8(SP)
MOVQ CX, 16(SP)
MOVQ DI, 24(SP)
MOVQ SI, 32(SP)
MOVQ CX, 40(SP)
CALL runtime·memmove(SB)
// Restore local variables: unspill registers from the stack and
// re-calculate R8-R13.
MOVQ 24(SP), DI
MOVQ 32(SP), SI
MOVQ 40(SP), CX
MOVQ dst_base+0(FP), R8
MOVQ dst_len+8(FP), R9
MOVQ R8, R10
ADDQ R9, R10
MOVQ src_base+24(FP), R11
MOVQ src_len+32(FP), R12
MOVQ R11, R13
ADDQ R12, R13
// d += length
// s += length
ADDQ CX, DI
ADDQ CX, SI
JMP loop
tagLit60Plus:
// !!! This fragment does the
//
// s += x - 58; if uint(s) > uint(len(src)) { etc }
//
// checks. In the asm version, we code it once instead of once per switch case.
ADDQ CX, SI
SUBQ $58, SI
MOVQ SI, BX
SUBQ R11, BX
CMPQ BX, R12
JA errCorrupt
// case x == 60:
CMPL CX, $61
JEQ tagLit61
JA tagLit62Plus
// x = uint32(src[s-1])
MOVBLZX -1(SI), CX
JMP doLit
tagLit61:
// case x == 61:
// x = uint32(src[s-2]) | uint32(src[s-1])<<8
MOVWLZX -2(SI), CX
JMP doLit
tagLit62Plus:
CMPL CX, $62
JA tagLit63
// case x == 62:
// x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
MOVWLZX -3(SI), CX
MOVBLZX -1(SI), BX
SHLL $16, BX
ORL BX, CX
JMP doLit
tagLit63:
// case x == 63:
// x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
MOVL -4(SI), CX
JMP doLit
// The code above handles literal tags.
// ----------------------------------------
// The code below handles copy tags.
tagCopy4:
// case tagCopy4:
// s += 5
ADDQ $5, SI
// if uint(s) > uint(len(src)) { etc }
MOVQ SI, BX
SUBQ R11, BX
CMPQ BX, R12
JA errCorrupt
// length = 1 + int(src[s-5])>>2
SHRQ $2, CX
INCQ CX
// offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
MOVLQZX -4(SI), DX
JMP doCopy
tagCopy2:
// case tagCopy2:
// s += 3
ADDQ $3, SI
// if uint(s) > uint(len(src)) { etc }
MOVQ SI, BX
SUBQ R11, BX
CMPQ BX, R12
JA errCorrupt
// length = 1 + int(src[s-3])>>2
SHRQ $2, CX
INCQ CX
// offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
MOVWQZX -2(SI), DX
JMP doCopy
tagCopy:
// We have a copy tag. We assume that:
// - BX == src[s] & 0x03
// - CX == src[s]
CMPQ BX, $2
JEQ tagCopy2
JA tagCopy4
// case tagCopy1:
// s += 2
ADDQ $2, SI
// if uint(s) > uint(len(src)) { etc }
MOVQ SI, BX
SUBQ R11, BX
CMPQ BX, R12
JA errCorrupt
// offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
MOVQ CX, DX
ANDQ $0xe0, DX
SHLQ $3, DX
MOVBQZX -1(SI), BX
ORQ BX, DX
// length = 4 + int(src[s-2])>>2&0x7
SHRQ $2, CX
ANDQ $7, CX
ADDQ $4, CX
doCopy:
// This is the end of the outer "switch", when we have a copy tag.
//
// We assume that:
// - CX == length && CX > 0
// - DX == offset
// if offset <= 0 { etc }
CMPQ DX, $0
JLE errCorrupt
// if d < offset { etc }
MOVQ DI, BX
SUBQ R8, BX
CMPQ BX, DX
JLT errCorrupt
// if length > len(dst)-d { etc }
MOVQ R10, BX
SUBQ DI, BX
CMPQ CX, BX
JGT errCorrupt
// forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
//
// Set:
// - R14 = len(dst)-d
// - R15 = &dst[d-offset]
MOVQ R10, R14
SUBQ DI, R14
MOVQ DI, R15
SUBQ DX, R15
// !!! Try a faster technique for short (16 or fewer bytes) forward copies.
//
// First, try using two 8-byte load/stores, similar to the doLit technique
// above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
// still OK if offset >= 8. Note that this has to be two 8-byte load/stores
// and not one 16-byte load/store, and the first store has to be before the
// second load, due to the overlap if offset is in the range [8, 16).
//
// if length > 16 || offset < 8 || len(dst)-d < 16 {
// goto slowForwardCopy
// }
// copy 16 bytes
// d += length
CMPQ CX, $16
JGT slowForwardCopy
CMPQ DX, $8
JLT slowForwardCopy
CMPQ R14, $16
JLT slowForwardCopy
MOVQ 0(R15), AX
MOVQ AX, 0(DI)
MOVQ 8(R15), BX
MOVQ BX, 8(DI)
ADDQ CX, DI
JMP loop
slowForwardCopy:
// !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
// can still try 8-byte load stores, provided we can overrun up to 10 extra
// bytes. As above, the overrun will be fixed up by subsequent iterations
// of the outermost loop.
//
// The C++ snappy code calls this technique IncrementalCopyFastPath. Its
// commentary says:
//
// ----
//
// The main part of this loop is a simple copy of eight bytes at a time
// until we've copied (at least) the requested amount of bytes. However,
// if d and d-offset are less than eight bytes apart (indicating a
// repeating pattern of length < 8), we first need to expand the pattern in
// order to get the correct results. For instance, if the buffer looks like
// this, with the eight-byte <d-offset> and <d> patterns marked as
// intervals:
//
// abxxxxxxxxxxxx
// [------] d-offset
// [------] d
//
// a single eight-byte copy from <d-offset> to <d> will repeat the pattern
// once, after which we can move <d> two bytes without moving <d-offset>:
//
// ababxxxxxxxxxx
// [------] d-offset
// [------] d
//
// and repeat the exercise until the two no longer overlap.
//
// This allows us to do very well in the special case of one single byte
// repeated many times, without taking a big hit for more general cases.
//
// The worst case of extra writing past the end of the match occurs when
// offset == 1 and length == 1; the last copy will read from byte positions
// [0..7] and write to [4..11], whereas it was only supposed to write to
// position 1. Thus, ten excess bytes.
//
// ----
//
// That "10 byte overrun" worst case is confirmed by Go's
// TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
// and finishSlowForwardCopy algorithm.
//
// if length > len(dst)-d-10 {
// goto verySlowForwardCopy
// }
SUBQ $10, R14
CMPQ CX, R14
JGT verySlowForwardCopy
makeOffsetAtLeast8:
// !!! As above, expand the pattern so that offset >= 8 and we can use
// 8-byte load/stores.
//
// for offset < 8 {
// copy 8 bytes from dst[d-offset:] to dst[d:]
// length -= offset
// d += offset
// offset += offset
// // The two previous lines together means that d-offset, and therefore
// // R15, is unchanged.
// }
CMPQ DX, $8
JGE fixUpSlowForwardCopy
MOVQ (R15), BX
MOVQ BX, (DI)
SUBQ DX, CX
ADDQ DX, DI
ADDQ DX, DX
JMP makeOffsetAtLeast8
fixUpSlowForwardCopy:
// !!! Add length (which might be negative now) to d (implied by DI being
// &dst[d]) so that d ends up at the right place when we jump back to the
// top of the loop. Before we do that, though, we save DI to AX so that, if
// length is positive, copying the remaining length bytes will write to the
// right place.
MOVQ DI, AX
ADDQ CX, DI
finishSlowForwardCopy:
// !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
// length means that we overrun, but as above, that will be fixed up by
// subsequent iterations of the outermost loop.
CMPQ CX, $0
JLE loop
MOVQ (R15), BX
MOVQ BX, (AX)
ADDQ $8, R15
ADDQ $8, AX
SUBQ $8, CX
JMP finishSlowForwardCopy
verySlowForwardCopy:
// verySlowForwardCopy is a simple implementation of forward copy. In C
// parlance, this is a do/while loop instead of a while loop, since we know
// that length > 0. In Go syntax:
//
// for {
// dst[d] = dst[d - offset]
// d++
// length--
// if length == 0 {
// break
// }
// }
MOVB (R15), BX
MOVB BX, (DI)
INCQ R15
INCQ DI
DECQ CX
JNZ verySlowForwardCopy
JMP loop
// The code above handles copy tags.
// ----------------------------------------
end:
// This is the end of the "for s < len(src)".
//
// if d != len(dst) { etc }
CMPQ DI, R10
JNE errCorrupt
// return 0
MOVQ $0, ret+48(FP)
RET
errCorrupt:
// return decodeErrCodeCorrupt
MOVQ $1, ret+48(FP)
RET

101
_vendor/vendor/github.com/golang/snappy/decode_other.go generated vendored Normal file
View File

@ -0,0 +1,101 @@
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !amd64 appengine !gc noasm
package snappy
// decode writes the decoding of src to dst. It assumes that the varint-encoded
// length of the decompressed bytes has already been read, and that len(dst)
// equals that length.
//
// It returns 0 on success or a decodeErrCodeXxx error code on failure.
func decode(dst, src []byte) int {
var d, s, offset, length int
for s < len(src) {
switch src[s] & 0x03 {
case tagLiteral:
x := uint32(src[s] >> 2)
switch {
case x < 60:
s++
case x == 60:
s += 2
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
x = uint32(src[s-1])
case x == 61:
s += 3
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
x = uint32(src[s-2]) | uint32(src[s-1])<<8
case x == 62:
s += 4
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
case x == 63:
s += 5
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
}
length = int(x) + 1
if length <= 0 {
return decodeErrCodeUnsupportedLiteralLength
}
if length > len(dst)-d || length > len(src)-s {
return decodeErrCodeCorrupt
}
copy(dst[d:], src[s:s+length])
d += length
s += length
continue
case tagCopy1:
s += 2
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
length = 4 + int(src[s-2])>>2&0x7
offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
case tagCopy2:
s += 3
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
length = 1 + int(src[s-3])>>2
offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
case tagCopy4:
s += 5
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
length = 1 + int(src[s-5])>>2
offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
}
if offset <= 0 || d < offset || length > len(dst)-d {
return decodeErrCodeCorrupt
}
// Copy from an earlier sub-slice of dst to a later sub-slice. Unlike
// the built-in copy function, this byte-by-byte copy always runs
// forwards, even if the slices overlap. Conceptually, this is:
//
// d += forwardCopy(dst[d:d+length], dst[d-offset:])
for end := d + length; d != end; d++ {
dst[d] = dst[d-offset]
}
}
if d != len(dst) {
return decodeErrCodeCorrupt
}
return 0
}

285
_vendor/vendor/github.com/golang/snappy/encode.go generated vendored Normal file
View File

@ -0,0 +1,285 @@
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package snappy
import (
"encoding/binary"
"errors"
"io"
)
// Encode returns the encoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire encoded block.
// Otherwise, a newly allocated slice will be returned.
//
// The dst and src must not overlap. It is valid to pass a nil dst.
func Encode(dst, src []byte) []byte {
if n := MaxEncodedLen(len(src)); n < 0 {
panic(ErrTooLarge)
} else if len(dst) < n {
dst = make([]byte, n)
}
// The block starts with the varint-encoded length of the decompressed bytes.
d := binary.PutUvarint(dst, uint64(len(src)))
for len(src) > 0 {
p := src
src = nil
if len(p) > maxBlockSize {
p, src = p[:maxBlockSize], p[maxBlockSize:]
}
if len(p) < minNonLiteralBlockSize {
d += emitLiteral(dst[d:], p)
} else {
d += encodeBlock(dst[d:], p)
}
}
return dst[:d]
}
// inputMargin is the minimum number of extra input bytes to keep, inside
// encodeBlock's inner loop. On some architectures, this margin lets us
// implement a fast path for emitLiteral, where the copy of short (<= 16 byte)
// literals can be implemented as a single load to and store from a 16-byte
// register. That literal's actual length can be as short as 1 byte, so this
// can copy up to 15 bytes too much, but that's OK as subsequent iterations of
// the encoding loop will fix up the copy overrun, and this inputMargin ensures
// that we don't overrun the dst and src buffers.
const inputMargin = 16 - 1
// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that
// could be encoded with a copy tag. This is the minimum with respect to the
// algorithm used by encodeBlock, not a minimum enforced by the file format.
//
// The encoded output must start with at least a 1 byte literal, as there are
// no previous bytes to copy. A minimal (1 byte) copy after that, generated
// from an emitCopy call in encodeBlock's main loop, would require at least
// another inputMargin bytes, for the reason above: we want any emitLiteral
// calls inside encodeBlock's main loop to use the fast path if possible, which
// requires being able to overrun by inputMargin bytes. Thus,
// minNonLiteralBlockSize equals 1 + 1 + inputMargin.
//
// The C++ code doesn't use this exact threshold, but it could, as discussed at
// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion
// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an
// optimization. It should not affect the encoded form. This is tested by
// TestSameEncodingAsCppShortCopies.
const minNonLiteralBlockSize = 1 + 1 + inputMargin
// MaxEncodedLen returns the maximum length of a snappy block, given its
// uncompressed length.
//
// It will return a negative value if srcLen is too large to encode.
func MaxEncodedLen(srcLen int) int {
n := uint64(srcLen)
if n > 0xffffffff {
return -1
}
// Compressed data can be defined as:
// compressed := item* literal*
// item := literal* copy
//
// The trailing literal sequence has a space blowup of at most 62/60
// since a literal of length 60 needs one tag byte + one extra byte
// for length information.
//
// Item blowup is trickier to measure. Suppose the "copy" op copies
// 4 bytes of data. Because of a special check in the encoding code,
// we produce a 4-byte copy only if the offset is < 65536. Therefore
// the copy op takes 3 bytes to encode, and this type of item leads
// to at most the 62/60 blowup for representing literals.
//
// Suppose the "copy" op copies 5 bytes of data. If the offset is big
// enough, it will take 5 bytes to encode the copy op. Therefore the
// worst case here is a one-byte literal followed by a five-byte copy.
// That is, 6 bytes of input turn into 7 bytes of "compressed" data.
//
// This last factor dominates the blowup, so the final estimate is:
n = 32 + n + n/6
if n > 0xffffffff {
return -1
}
return int(n)
}
var errClosed = errors.New("snappy: Writer is closed")
// NewWriter returns a new Writer that compresses to w.
//
// The Writer returned does not buffer writes. There is no need to Flush or
// Close such a Writer.
//
// Deprecated: the Writer returned is not suitable for many small writes, only
// for few large writes. Use NewBufferedWriter instead, which is efficient
// regardless of the frequency and shape of the writes, and remember to Close
// that Writer when done.
func NewWriter(w io.Writer) *Writer {
return &Writer{
w: w,
obuf: make([]byte, obufLen),
}
}
// NewBufferedWriter returns a new Writer that compresses to w, using the
// framing format described at
// https://github.com/google/snappy/blob/master/framing_format.txt
//
// The Writer returned buffers writes. Users must call Close to guarantee all
// data has been forwarded to the underlying io.Writer. They may also call
// Flush zero or more times before calling Close.
func NewBufferedWriter(w io.Writer) *Writer {
return &Writer{
w: w,
ibuf: make([]byte, 0, maxBlockSize),
obuf: make([]byte, obufLen),
}
}
// Writer is an io.Writer that can write Snappy-compressed bytes.
type Writer struct {
w io.Writer
err error
// ibuf is a buffer for the incoming (uncompressed) bytes.
//
// Its use is optional. For backwards compatibility, Writers created by the
// NewWriter function have ibuf == nil, do not buffer incoming bytes, and
// therefore do not need to be Flush'ed or Close'd.
ibuf []byte
// obuf is a buffer for the outgoing (compressed) bytes.
obuf []byte
// wroteStreamHeader is whether we have written the stream header.
wroteStreamHeader bool
}
// Reset discards the writer's state and switches the Snappy writer to write to
// w. This permits reusing a Writer rather than allocating a new one.
func (w *Writer) Reset(writer io.Writer) {
w.w = writer
w.err = nil
if w.ibuf != nil {
w.ibuf = w.ibuf[:0]
}
w.wroteStreamHeader = false
}
// Write satisfies the io.Writer interface.
func (w *Writer) Write(p []byte) (nRet int, errRet error) {
if w.ibuf == nil {
// Do not buffer incoming bytes. This does not perform or compress well
// if the caller of Writer.Write writes many small slices. This
// behavior is therefore deprecated, but still supported for backwards
// compatibility with code that doesn't explicitly Flush or Close.
return w.write(p)
}
// The remainder of this method is based on bufio.Writer.Write from the
// standard library.
for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil {
var n int
if len(w.ibuf) == 0 {
// Large write, empty buffer.
// Write directly from p to avoid copy.
n, _ = w.write(p)
} else {
n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
w.ibuf = w.ibuf[:len(w.ibuf)+n]
w.Flush()
}
nRet += n
p = p[n:]
}
if w.err != nil {
return nRet, w.err
}
n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
w.ibuf = w.ibuf[:len(w.ibuf)+n]
nRet += n
return nRet, nil
}
func (w *Writer) write(p []byte) (nRet int, errRet error) {
if w.err != nil {
return 0, w.err
}
for len(p) > 0 {
obufStart := len(magicChunk)
if !w.wroteStreamHeader {
w.wroteStreamHeader = true
copy(w.obuf, magicChunk)
obufStart = 0
}
var uncompressed []byte
if len(p) > maxBlockSize {
uncompressed, p = p[:maxBlockSize], p[maxBlockSize:]
} else {
uncompressed, p = p, nil
}
checksum := crc(uncompressed)
// Compress the buffer, discarding the result if the improvement
// isn't at least 12.5%.
compressed := Encode(w.obuf[obufHeaderLen:], uncompressed)
chunkType := uint8(chunkTypeCompressedData)
chunkLen := 4 + len(compressed)
obufEnd := obufHeaderLen + len(compressed)
if len(compressed) >= len(uncompressed)-len(uncompressed)/8 {
chunkType = chunkTypeUncompressedData
chunkLen = 4 + len(uncompressed)
obufEnd = obufHeaderLen
}
// Fill in the per-chunk header that comes before the body.
w.obuf[len(magicChunk)+0] = chunkType
w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0)
w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8)
w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16)
w.obuf[len(magicChunk)+4] = uint8(checksum >> 0)
w.obuf[len(magicChunk)+5] = uint8(checksum >> 8)
w.obuf[len(magicChunk)+6] = uint8(checksum >> 16)
w.obuf[len(magicChunk)+7] = uint8(checksum >> 24)
if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil {
w.err = err
return nRet, err
}
if chunkType == chunkTypeUncompressedData {
if _, err := w.w.Write(uncompressed); err != nil {
w.err = err
return nRet, err
}
}
nRet += len(uncompressed)
}
return nRet, nil
}
// Flush flushes the Writer to its underlying io.Writer.
func (w *Writer) Flush() error {
if w.err != nil {
return w.err
}
if len(w.ibuf) == 0 {
return nil
}
w.write(w.ibuf)
w.ibuf = w.ibuf[:0]
return w.err
}
// Close calls Flush and then closes the Writer.
func (w *Writer) Close() error {
w.Flush()
ret := w.err
if w.err == nil {
w.err = errClosed
}
return ret
}

View File

@ -0,0 +1,29 @@
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
// +build gc
// +build !noasm
package snappy
// emitLiteral has the same semantics as in encode_other.go.
//
//go:noescape
func emitLiteral(dst, lit []byte) int
// emitCopy has the same semantics as in encode_other.go.
//
//go:noescape
func emitCopy(dst []byte, offset, length int) int
// extendMatch has the same semantics as in encode_other.go.
//
//go:noescape
func extendMatch(src []byte, i, j int) int
// encodeBlock has the same semantics as in encode_other.go.
//
//go:noescape
func encodeBlock(dst, src []byte) (d int)

730
_vendor/vendor/github.com/golang/snappy/encode_amd64.s generated vendored Normal file
View File

@ -0,0 +1,730 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a
// Go toolchain regression. See https://github.com/golang/go/issues/15426 and
// https://github.com/golang/snappy/issues/29
//
// As a workaround, the package was built with a known good assembler, and
// those instructions were disassembled by "objdump -d" to yield the
// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
// style comments, in AT&T asm syntax. Note that rsp here is a physical
// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm).
// The instructions were then encoded as "BYTE $0x.." sequences, which assemble
// fine on Go 1.6.
// The asm code generally follows the pure Go code in encode_other.go, except
// where marked with a "!!!".
// ----------------------------------------------------------------------------
// func emitLiteral(dst, lit []byte) int
//
// All local variables fit into registers. The register allocation:
// - AX len(lit)
// - BX n
// - DX return value
// - DI &dst[i]
// - R10 &lit[0]
//
// The 24 bytes of stack space is to call runtime·memmove.
//
// The unusual register allocation of local variables, such as R10 for the
// source pointer, matches the allocation used at the call site in encodeBlock,
// which makes it easier to manually inline this function.
TEXT ·emitLiteral(SB), NOSPLIT, $24-56
MOVQ dst_base+0(FP), DI
MOVQ lit_base+24(FP), R10
MOVQ lit_len+32(FP), AX
MOVQ AX, DX
MOVL AX, BX
SUBL $1, BX
CMPL BX, $60
JLT oneByte
CMPL BX, $256
JLT twoBytes
threeBytes:
MOVB $0xf4, 0(DI)
MOVW BX, 1(DI)
ADDQ $3, DI
ADDQ $3, DX
JMP memmove
twoBytes:
MOVB $0xf0, 0(DI)
MOVB BX, 1(DI)
ADDQ $2, DI
ADDQ $2, DX
JMP memmove
oneByte:
SHLB $2, BX
MOVB BX, 0(DI)
ADDQ $1, DI
ADDQ $1, DX
memmove:
MOVQ DX, ret+48(FP)
// copy(dst[i:], lit)
//
// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
// DI, R10 and AX as arguments.
MOVQ DI, 0(SP)
MOVQ R10, 8(SP)
MOVQ AX, 16(SP)
CALL runtime·memmove(SB)
RET
// ----------------------------------------------------------------------------
// func emitCopy(dst []byte, offset, length int) int
//
// All local variables fit into registers. The register allocation:
// - AX length
// - SI &dst[0]
// - DI &dst[i]
// - R11 offset
//
// The unusual register allocation of local variables, such as R11 for the
// offset, matches the allocation used at the call site in encodeBlock, which
// makes it easier to manually inline this function.
TEXT ·emitCopy(SB), NOSPLIT, $0-48
MOVQ dst_base+0(FP), DI
MOVQ DI, SI
MOVQ offset+24(FP), R11
MOVQ length+32(FP), AX
loop0:
// for length >= 68 { etc }
CMPL AX, $68
JLT step1
// Emit a length 64 copy, encoded as 3 bytes.
MOVB $0xfe, 0(DI)
MOVW R11, 1(DI)
ADDQ $3, DI
SUBL $64, AX
JMP loop0
step1:
// if length > 64 { etc }
CMPL AX, $64
JLE step2
// Emit a length 60 copy, encoded as 3 bytes.
MOVB $0xee, 0(DI)
MOVW R11, 1(DI)
ADDQ $3, DI
SUBL $60, AX
step2:
// if length >= 12 || offset >= 2048 { goto step3 }
CMPL AX, $12
JGE step3
CMPL R11, $2048
JGE step3
// Emit the remaining copy, encoded as 2 bytes.
MOVB R11, 1(DI)
SHRL $8, R11
SHLB $5, R11
SUBB $4, AX
SHLB $2, AX
ORB AX, R11
ORB $1, R11
MOVB R11, 0(DI)
ADDQ $2, DI
// Return the number of bytes written.
SUBQ SI, DI
MOVQ DI, ret+40(FP)
RET
step3:
// Emit the remaining copy, encoded as 3 bytes.
SUBL $1, AX
SHLB $2, AX
ORB $2, AX
MOVB AX, 0(DI)
MOVW R11, 1(DI)
ADDQ $3, DI
// Return the number of bytes written.
SUBQ SI, DI
MOVQ DI, ret+40(FP)
RET
// ----------------------------------------------------------------------------
// func extendMatch(src []byte, i, j int) int
//
// All local variables fit into registers. The register allocation:
// - DX &src[0]
// - SI &src[j]
// - R13 &src[len(src) - 8]
// - R14 &src[len(src)]
// - R15 &src[i]
//
// The unusual register allocation of local variables, such as R15 for a source
// pointer, matches the allocation used at the call site in encodeBlock, which
// makes it easier to manually inline this function.
TEXT ·extendMatch(SB), NOSPLIT, $0-48
MOVQ src_base+0(FP), DX
MOVQ src_len+8(FP), R14
MOVQ i+24(FP), R15
MOVQ j+32(FP), SI
ADDQ DX, R14
ADDQ DX, R15
ADDQ DX, SI
MOVQ R14, R13
SUBQ $8, R13
cmp8:
// As long as we are 8 or more bytes before the end of src, we can load and
// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
CMPQ SI, R13
JA cmp1
MOVQ (R15), AX
MOVQ (SI), BX
CMPQ AX, BX
JNE bsf
ADDQ $8, R15
ADDQ $8, SI
JMP cmp8
bsf:
// If those 8 bytes were not equal, XOR the two 8 byte values, and return
// the index of the first byte that differs. The BSF instruction finds the
// least significant 1 bit, the amd64 architecture is little-endian, and
// the shift by 3 converts a bit index to a byte index.
XORQ AX, BX
BSFQ BX, BX
SHRQ $3, BX
ADDQ BX, SI
// Convert from &src[ret] to ret.
SUBQ DX, SI
MOVQ SI, ret+40(FP)
RET
cmp1:
// In src's tail, compare 1 byte at a time.
CMPQ SI, R14
JAE extendMatchEnd
MOVB (R15), AX
MOVB (SI), BX
CMPB AX, BX
JNE extendMatchEnd
ADDQ $1, R15
ADDQ $1, SI
JMP cmp1
extendMatchEnd:
// Convert from &src[ret] to ret.
SUBQ DX, SI
MOVQ SI, ret+40(FP)
RET
// ----------------------------------------------------------------------------
// func encodeBlock(dst, src []byte) (d int)
//
// All local variables fit into registers, other than "var table". The register
// allocation:
// - AX . .
// - BX . .
// - CX 56 shift (note that amd64 shifts by non-immediates must use CX).
// - DX 64 &src[0], tableSize
// - SI 72 &src[s]
// - DI 80 &dst[d]
// - R9 88 sLimit
// - R10 . &src[nextEmit]
// - R11 96 prevHash, currHash, nextHash, offset
// - R12 104 &src[base], skip
// - R13 . &src[nextS], &src[len(src) - 8]
// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x
// - R15 112 candidate
//
// The second column (56, 64, etc) is the stack offset to spill the registers
// when calling other functions. We could pack this slightly tighter, but it's
// simpler to have a dedicated spill map independent of the function called.
//
// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An
// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill
// local variables (registers) during calls gives 32768 + 56 + 64 = 32888.
TEXT ·encodeBlock(SB), 0, $32888-56
MOVQ dst_base+0(FP), DI
MOVQ src_base+24(FP), SI
MOVQ src_len+32(FP), R14
// shift, tableSize := uint32(32-8), 1<<8
MOVQ $24, CX
MOVQ $256, DX
calcShift:
// for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
// shift--
// }
CMPQ DX, $16384
JGE varTable
CMPQ DX, R14
JGE varTable
SUBQ $1, CX
SHLQ $1, DX
JMP calcShift
varTable:
// var table [maxTableSize]uint16
//
// In the asm code, unlike the Go code, we can zero-initialize only the
// first tableSize elements. Each uint16 element is 2 bytes and each MOVOU
// writes 16 bytes, so we can do only tableSize/8 writes instead of the
// 2048 writes that would zero-initialize all of table's 32768 bytes.
SHRQ $3, DX
LEAQ table-32768(SP), BX
PXOR X0, X0
memclr:
MOVOU X0, 0(BX)
ADDQ $16, BX
SUBQ $1, DX
JNZ memclr
// !!! DX = &src[0]
MOVQ SI, DX
// sLimit := len(src) - inputMargin
MOVQ R14, R9
SUBQ $15, R9
// !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't
// change for the rest of the function.
MOVQ CX, 56(SP)
MOVQ DX, 64(SP)
MOVQ R9, 88(SP)
// nextEmit := 0
MOVQ DX, R10
// s := 1
ADDQ $1, SI
// nextHash := hash(load32(src, s), shift)
MOVL 0(SI), R11
IMULL $0x1e35a7bd, R11
SHRL CX, R11
outer:
// for { etc }
// skip := 32
MOVQ $32, R12
// nextS := s
MOVQ SI, R13
// candidate := 0
MOVQ $0, R15
inner0:
// for { etc }
// s := nextS
MOVQ R13, SI
// bytesBetweenHashLookups := skip >> 5
MOVQ R12, R14
SHRQ $5, R14
// nextS = s + bytesBetweenHashLookups
ADDQ R14, R13
// skip += bytesBetweenHashLookups
ADDQ R14, R12
// if nextS > sLimit { goto emitRemainder }
MOVQ R13, AX
SUBQ DX, AX
CMPQ AX, R9
JA emitRemainder
// candidate = int(table[nextHash])
// XXX: MOVWQZX table-32768(SP)(R11*2), R15
// XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
BYTE $0x4e
BYTE $0x0f
BYTE $0xb7
BYTE $0x7c
BYTE $0x5c
BYTE $0x78
// table[nextHash] = uint16(s)
MOVQ SI, AX
SUBQ DX, AX
// XXX: MOVW AX, table-32768(SP)(R11*2)
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
BYTE $0x66
BYTE $0x42
BYTE $0x89
BYTE $0x44
BYTE $0x5c
BYTE $0x78
// nextHash = hash(load32(src, nextS), shift)
MOVL 0(R13), R11
IMULL $0x1e35a7bd, R11
SHRL CX, R11
// if load32(src, s) != load32(src, candidate) { continue } break
MOVL 0(SI), AX
MOVL (DX)(R15*1), BX
CMPL AX, BX
JNE inner0
fourByteMatch:
// As per the encode_other.go code:
//
// A 4-byte match has been found. We'll later see etc.
// !!! Jump to a fast path for short (<= 16 byte) literals. See the comment
// on inputMargin in encode.go.
MOVQ SI, AX
SUBQ R10, AX
CMPQ AX, $16
JLE emitLiteralFastPath
// ----------------------------------------
// Begin inline of the emitLiteral call.
//
// d += emitLiteral(dst[d:], src[nextEmit:s])
MOVL AX, BX
SUBL $1, BX
CMPL BX, $60
JLT inlineEmitLiteralOneByte
CMPL BX, $256
JLT inlineEmitLiteralTwoBytes
inlineEmitLiteralThreeBytes:
MOVB $0xf4, 0(DI)
MOVW BX, 1(DI)
ADDQ $3, DI
JMP inlineEmitLiteralMemmove
inlineEmitLiteralTwoBytes:
MOVB $0xf0, 0(DI)
MOVB BX, 1(DI)
ADDQ $2, DI
JMP inlineEmitLiteralMemmove
inlineEmitLiteralOneByte:
SHLB $2, BX
MOVB BX, 0(DI)
ADDQ $1, DI
inlineEmitLiteralMemmove:
// Spill local variables (registers) onto the stack; call; unspill.
//
// copy(dst[i:], lit)
//
// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
// DI, R10 and AX as arguments.
MOVQ DI, 0(SP)
MOVQ R10, 8(SP)
MOVQ AX, 16(SP)
ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)".
MOVQ SI, 72(SP)
MOVQ DI, 80(SP)
MOVQ R15, 112(SP)
CALL runtime·memmove(SB)
MOVQ 56(SP), CX
MOVQ 64(SP), DX
MOVQ 72(SP), SI
MOVQ 80(SP), DI
MOVQ 88(SP), R9
MOVQ 112(SP), R15
JMP inner1
inlineEmitLiteralEnd:
// End inline of the emitLiteral call.
// ----------------------------------------
emitLiteralFastPath:
// !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2".
MOVB AX, BX
SUBB $1, BX
SHLB $2, BX
MOVB BX, (DI)
ADDQ $1, DI
// !!! Implement the copy from lit to dst as a 16-byte load and store.
// (Encode's documentation says that dst and src must not overlap.)
//
// This always copies 16 bytes, instead of only len(lit) bytes, but that's
// OK. Subsequent iterations will fix up the overrun.
//
// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
// 16-byte loads and stores. This technique probably wouldn't be as
// effective on architectures that are fussier about alignment.
MOVOU 0(R10), X0
MOVOU X0, 0(DI)
ADDQ AX, DI
inner1:
// for { etc }
// base := s
MOVQ SI, R12
// !!! offset := base - candidate
MOVQ R12, R11
SUBQ R15, R11
SUBQ DX, R11
// ----------------------------------------
// Begin inline of the extendMatch call.
//
// s = extendMatch(src, candidate+4, s+4)
// !!! R14 = &src[len(src)]
MOVQ src_len+32(FP), R14
ADDQ DX, R14
// !!! R13 = &src[len(src) - 8]
MOVQ R14, R13
SUBQ $8, R13
// !!! R15 = &src[candidate + 4]
ADDQ $4, R15
ADDQ DX, R15
// !!! s += 4
ADDQ $4, SI
inlineExtendMatchCmp8:
// As long as we are 8 or more bytes before the end of src, we can load and
// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
CMPQ SI, R13
JA inlineExtendMatchCmp1
MOVQ (R15), AX
MOVQ (SI), BX
CMPQ AX, BX
JNE inlineExtendMatchBSF
ADDQ $8, R15
ADDQ $8, SI
JMP inlineExtendMatchCmp8
inlineExtendMatchBSF:
// If those 8 bytes were not equal, XOR the two 8 byte values, and return
// the index of the first byte that differs. The BSF instruction finds the
// least significant 1 bit, the amd64 architecture is little-endian, and
// the shift by 3 converts a bit index to a byte index.
XORQ AX, BX
BSFQ BX, BX
SHRQ $3, BX
ADDQ BX, SI
JMP inlineExtendMatchEnd
inlineExtendMatchCmp1:
// In src's tail, compare 1 byte at a time.
CMPQ SI, R14
JAE inlineExtendMatchEnd
MOVB (R15), AX
MOVB (SI), BX
CMPB AX, BX
JNE inlineExtendMatchEnd
ADDQ $1, R15
ADDQ $1, SI
JMP inlineExtendMatchCmp1
inlineExtendMatchEnd:
// End inline of the extendMatch call.
// ----------------------------------------
// ----------------------------------------
// Begin inline of the emitCopy call.
//
// d += emitCopy(dst[d:], base-candidate, s-base)
// !!! length := s - base
MOVQ SI, AX
SUBQ R12, AX
inlineEmitCopyLoop0:
// for length >= 68 { etc }
CMPL AX, $68
JLT inlineEmitCopyStep1
// Emit a length 64 copy, encoded as 3 bytes.
MOVB $0xfe, 0(DI)
MOVW R11, 1(DI)
ADDQ $3, DI
SUBL $64, AX
JMP inlineEmitCopyLoop0
inlineEmitCopyStep1:
// if length > 64 { etc }
CMPL AX, $64
JLE inlineEmitCopyStep2
// Emit a length 60 copy, encoded as 3 bytes.
MOVB $0xee, 0(DI)
MOVW R11, 1(DI)
ADDQ $3, DI
SUBL $60, AX
inlineEmitCopyStep2:
// if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 }
CMPL AX, $12
JGE inlineEmitCopyStep3
CMPL R11, $2048
JGE inlineEmitCopyStep3
// Emit the remaining copy, encoded as 2 bytes.
MOVB R11, 1(DI)
SHRL $8, R11
SHLB $5, R11
SUBB $4, AX
SHLB $2, AX
ORB AX, R11
ORB $1, R11
MOVB R11, 0(DI)
ADDQ $2, DI
JMP inlineEmitCopyEnd
inlineEmitCopyStep3:
// Emit the remaining copy, encoded as 3 bytes.
SUBL $1, AX
SHLB $2, AX
ORB $2, AX
MOVB AX, 0(DI)
MOVW R11, 1(DI)
ADDQ $3, DI
inlineEmitCopyEnd:
// End inline of the emitCopy call.
// ----------------------------------------
// nextEmit = s
MOVQ SI, R10
// if s >= sLimit { goto emitRemainder }
MOVQ SI, AX
SUBQ DX, AX
CMPQ AX, R9
JAE emitRemainder
// As per the encode_other.go code:
//
// We could immediately etc.
// x := load64(src, s-1)
MOVQ -1(SI), R14
// prevHash := hash(uint32(x>>0), shift)
MOVL R14, R11
IMULL $0x1e35a7bd, R11
SHRL CX, R11
// table[prevHash] = uint16(s-1)
MOVQ SI, AX
SUBQ DX, AX
SUBQ $1, AX
// XXX: MOVW AX, table-32768(SP)(R11*2)
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
BYTE $0x66
BYTE $0x42
BYTE $0x89
BYTE $0x44
BYTE $0x5c
BYTE $0x78
// currHash := hash(uint32(x>>8), shift)
SHRQ $8, R14
MOVL R14, R11
IMULL $0x1e35a7bd, R11
SHRL CX, R11
// candidate = int(table[currHash])
// XXX: MOVWQZX table-32768(SP)(R11*2), R15
// XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
BYTE $0x4e
BYTE $0x0f
BYTE $0xb7
BYTE $0x7c
BYTE $0x5c
BYTE $0x78
// table[currHash] = uint16(s)
ADDQ $1, AX
// XXX: MOVW AX, table-32768(SP)(R11*2)
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
BYTE $0x66
BYTE $0x42
BYTE $0x89
BYTE $0x44
BYTE $0x5c
BYTE $0x78
// if uint32(x>>8) == load32(src, candidate) { continue }
MOVL (DX)(R15*1), BX
CMPL R14, BX
JEQ inner1
// nextHash = hash(uint32(x>>16), shift)
SHRQ $8, R14
MOVL R14, R11
IMULL $0x1e35a7bd, R11
SHRL CX, R11
// s++
ADDQ $1, SI
// break out of the inner1 for loop, i.e. continue the outer loop.
JMP outer
emitRemainder:
// if nextEmit < len(src) { etc }
MOVQ src_len+32(FP), AX
ADDQ DX, AX
CMPQ R10, AX
JEQ encodeBlockEnd
// d += emitLiteral(dst[d:], src[nextEmit:])
//
// Push args.
MOVQ DI, 0(SP)
MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative.
MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative.
MOVQ R10, 24(SP)
SUBQ R10, AX
MOVQ AX, 32(SP)
MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative.
// Spill local variables (registers) onto the stack; call; unspill.
MOVQ DI, 80(SP)
CALL ·emitLiteral(SB)
MOVQ 80(SP), DI
// Finish the "d +=" part of "d += emitLiteral(etc)".
ADDQ 48(SP), DI
encodeBlockEnd:
MOVQ dst_base+0(FP), AX
SUBQ AX, DI
MOVQ DI, d+48(FP)
RET

238
_vendor/vendor/github.com/golang/snappy/encode_other.go generated vendored Normal file
View File

@ -0,0 +1,238 @@
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !amd64 appengine !gc noasm
package snappy
func load32(b []byte, i int) uint32 {
b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
}
func load64(b []byte, i int) uint64 {
b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
}
// emitLiteral writes a literal chunk and returns the number of bytes written.
//
// It assumes that:
// dst is long enough to hold the encoded bytes
// 1 <= len(lit) && len(lit) <= 65536
func emitLiteral(dst, lit []byte) int {
i, n := 0, uint(len(lit)-1)
switch {
case n < 60:
dst[0] = uint8(n)<<2 | tagLiteral
i = 1
case n < 1<<8:
dst[0] = 60<<2 | tagLiteral
dst[1] = uint8(n)
i = 2
default:
dst[0] = 61<<2 | tagLiteral
dst[1] = uint8(n)
dst[2] = uint8(n >> 8)
i = 3
}
return i + copy(dst[i:], lit)
}
// emitCopy writes a copy chunk and returns the number of bytes written.
//
// It assumes that:
// dst is long enough to hold the encoded bytes
// 1 <= offset && offset <= 65535
// 4 <= length && length <= 65535
func emitCopy(dst []byte, offset, length int) int {
i := 0
// The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The
// threshold for this loop is a little higher (at 68 = 64 + 4), and the
// length emitted down below is is a little lower (at 60 = 64 - 4), because
// it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed
// by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as
// a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as
// 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a
// tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an
// encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1.
for length >= 68 {
// Emit a length 64 copy, encoded as 3 bytes.
dst[i+0] = 63<<2 | tagCopy2
dst[i+1] = uint8(offset)
dst[i+2] = uint8(offset >> 8)
i += 3
length -= 64
}
if length > 64 {
// Emit a length 60 copy, encoded as 3 bytes.
dst[i+0] = 59<<2 | tagCopy2
dst[i+1] = uint8(offset)
dst[i+2] = uint8(offset >> 8)
i += 3
length -= 60
}
if length >= 12 || offset >= 2048 {
// Emit the remaining copy, encoded as 3 bytes.
dst[i+0] = uint8(length-1)<<2 | tagCopy2
dst[i+1] = uint8(offset)
dst[i+2] = uint8(offset >> 8)
return i + 3
}
// Emit the remaining copy, encoded as 2 bytes.
dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
dst[i+1] = uint8(offset)
return i + 2
}
// extendMatch returns the largest k such that k <= len(src) and that
// src[i:i+k-j] and src[j:k] have the same contents.
//
// It assumes that:
// 0 <= i && i < j && j <= len(src)
func extendMatch(src []byte, i, j int) int {
for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
}
return j
}
func hash(u, shift uint32) uint32 {
return (u * 0x1e35a7bd) >> shift
}
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.
//
// It also assumes that:
// len(dst) >= MaxEncodedLen(len(src)) &&
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlock(dst, src []byte) (d int) {
// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
// The table element type is uint16, as s < sLimit and sLimit < len(src)
// and len(src) <= maxBlockSize and maxBlockSize == 65536.
const (
maxTableSize = 1 << 14
// tableMask is redundant, but helps the compiler eliminate bounds
// checks.
tableMask = maxTableSize - 1
)
shift := uint32(32 - 8)
for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
shift--
}
// In Go, all array elements are zero-initialized, so there is no advantage
// to a smaller tableSize per se. However, it matches the C++ algorithm,
// and in the asm versions of this code, we can get away with zeroing only
// the first tableSize elements.
var table [maxTableSize]uint16
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := len(src) - inputMargin
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := 0
// The encoded form must start with a literal, as there are no previous
// bytes to copy, so we start looking for hash matches at s == 1.
s := 1
nextHash := hash(load32(src, s), shift)
for {
// Copied from the C++ snappy implementation:
//
// Heuristic match skipping: If 32 bytes are scanned with no matches
// found, start looking only at every other byte. If 32 more bytes are
// scanned (or skipped), look at every third byte, etc.. When a match
// is found, immediately go back to looking at every byte. This is a
// small loss (~5% performance, ~0.1% density) for compressible data
// due to more bookkeeping, but for non-compressible data (such as
// JPEG) it's a huge win since the compressor quickly "realizes" the
// data is incompressible and doesn't bother looking for matches
// everywhere.
//
// The "skip" variable keeps track of how many bytes there are since
// the last match; dividing it by 32 (ie. right-shifting by five) gives
// the number of bytes to move ahead for each iteration.
skip := 32
nextS := s
candidate := 0
for {
s = nextS
bytesBetweenHashLookups := skip >> 5
nextS = s + bytesBetweenHashLookups
skip += bytesBetweenHashLookups
if nextS > sLimit {
goto emitRemainder
}
candidate = int(table[nextHash&tableMask])
table[nextHash&tableMask] = uint16(s)
nextHash = hash(load32(src, nextS), shift)
if load32(src, s) == load32(src, candidate) {
break
}
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
d += emitLiteral(dst[d:], src[nextEmit:s])
// Call emitCopy, and then see if another emitCopy could be our next
// move. Repeat until we find no match for the input immediately after
// what was consumed by the last emitCopy call.
//
// If we exit this loop normally then we need to call emitLiteral next,
// though we don't yet know how big the literal will be. We handle that
// by proceeding to the next iteration of the main loop. We also can
// exit this loop via goto if we get close to exhausting the input.
for {
// Invariant: we have a 4-byte match at s, and no need to emit any
// literal bytes prior to s.
base := s
// Extend the 4-byte match as long as possible.
//
// This is an inlined version of:
// s = extendMatch(src, candidate+4, s+4)
s += 4
for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 {
}
d += emitCopy(dst[d:], base-candidate, s-base)
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
// We could immediately start working at s now, but to improve
// compression we first update the hash table at s-1 and at s. If
// another emitCopy is not our next move, also calculate nextHash
// at s+1. At least on GOARCH=amd64, these three hash calculations
// are faster as one load64 call (with some shifts) instead of
// three load32 calls.
x := load64(src, s-1)
prevHash := hash(uint32(x>>0), shift)
table[prevHash&tableMask] = uint16(s - 1)
currHash := hash(uint32(x>>8), shift)
candidate = int(table[currHash&tableMask])
table[currHash&tableMask] = uint16(s)
if uint32(x>>8) != load32(src, candidate) {
nextHash = hash(uint32(x>>16), shift)
s++
break
}
}
}
emitRemainder:
if nextEmit < len(src) {
d += emitLiteral(dst[d:], src[nextEmit:])
}
return d
}

View File

@ -6,7 +6,7 @@
// It aims for very high speeds and reasonable compression.
//
// The C++ snappy implementation is at https://github.com/google/snappy
package snappy
package snappy // import "github.com/golang/snappy"
import (
"hash/crc32"
@ -32,7 +32,10 @@ Lempel-Ziv compression algorithms. In particular:
- For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
The length is 1 + m. The offset is the little-endian unsigned integer
denoted by the next 2 bytes.
- For l == 3, this tag is a legacy format that is no longer supported.
- For l == 3, this tag is a legacy format that is no longer issued by most
encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in
[1, 65). The length is 1 + m. The offset is the little-endian unsigned
integer denoted by the next 4 bytes.
*/
const (
tagLiteral = 0x00
@ -46,9 +49,25 @@ const (
chunkHeaderSize = 4
magicChunk = "\xff\x06\x00\x00" + magicBody
magicBody = "sNaPpY"
// maxBlockSize is the maximum size of the input to encodeBlock. It is not
// part of the wire format per se, but some parts of the encoder assume
// that an offset fits into a uint16.
//
// Also, for the framing format (Writer type instead of Encode function),
// https://github.com/google/snappy/blob/master/framing_format.txt says
// that "the uncompressed data in a chunk must be no longer than 65536 bytes".
maxUncompressedChunkLen = 65536
// that "the uncompressed data in a chunk must be no longer than 65536
// bytes".
maxBlockSize = 65536
// maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is
// hard coded to be a const instead of a variable, so that obufLen can also
// be a const. Their equivalence is confirmed by
// TestMaxEncodedLenOfMaxBlockSize.
maxEncodedLenOfMaxBlockSize = 76490
obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize
obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize
)
const (

View File

@ -37,3 +37,5 @@ type termios struct {
Ispeed int32
Ospeed int32
}
const cursorColumn = false

View File

@ -7,7 +7,6 @@ package liner
import (
"bufio"
"bytes"
"container/ring"
"errors"
"fmt"
@ -32,6 +31,8 @@ type commonState struct {
multiLineMode bool
cursorRows int
maxRows int
shouldRestart ShouldRestart
needRefresh bool
}
// TabStyle is used to select how tab completions are displayed.
@ -58,7 +59,12 @@ var ErrPromptAborted = errors.New("prompt aborted")
// platform is normally supported, but stdout has been redirected
var ErrNotTerminalOutput = errors.New("standard output is not a terminal")
// Max elements to save on the killring
// ErrInvalidPrompt is returned from Prompt or PasswordPrompt if the
// prompt contains any unprintable runes (including substrings that could
// be colour codes on some platforms).
var ErrInvalidPrompt = errors.New("invalid prompt")
// KillRingMax is the max number of elements to save on the killring.
const KillRingMax = 60
// HistoryLimit is the maximum number of entries saved in the scrollback history.
@ -133,6 +139,13 @@ func (s *State) AppendHistory(item string) {
}
}
// ClearHistory clears the scroollback history.
func (s *State) ClearHistory() {
s.historyMutex.Lock()
defer s.historyMutex.Unlock()
s.history = nil
}
// Returns the history lines starting with prefix
func (s *State) getHistoryByPrefix(prefix string) (ph []string) {
for _, h := range s.history {
@ -215,6 +228,16 @@ func (s *State) SetMultiLineMode(mlmode bool) {
s.multiLineMode = mlmode
}
// ShouldRestart is passed the error generated by readNext and returns true if
// the the read should be restarted or false if the error should be returned.
type ShouldRestart func(err error) bool
// SetShouldRestart sets the restart function that Liner will call to determine
// whether to retry the call to, or return the error returned by, readNext.
func (s *State) SetShouldRestart(f ShouldRestart) {
s.shouldRestart = f
}
func (s *State) promptUnsupported(p string) (string, error) {
if !s.inputRedirected || !s.terminalSupported {
fmt.Print(p)
@ -223,5 +246,5 @@ func (s *State) promptUnsupported(p string) (string, error) {
if err != nil {
return "", err
}
return string(bytes.TrimSpace(linebuf)), nil
return string(linebuf), nil
}

View File

@ -31,11 +31,6 @@ type State struct {
// NewLiner initializes a new *State, and sets the terminal into raw mode. To
// restore the terminal to its previous state, call State.Close().
//
// Note if you are still using Go 1.0: NewLiner handles SIGWINCH, so it will
// leak a channel every time you call it. Therefore, it is recommened that you
// upgrade to a newer release of Go, or ensure that NewLiner is only called
// once.
func NewLiner() *State {
var s State
s.r = bufio.NewReader(os.Stdin)
@ -67,8 +62,7 @@ func NewLiner() *State {
}
if !s.outputRedirected {
s.getColumns()
s.outputRedirected = s.columns <= 0
s.outputRedirected = !s.getColumns()
}
return &s
@ -88,8 +82,12 @@ func (s *State) startPrompt() {
s.restartPrompt()
}
func (s *State) inputWaiting() bool {
return len(s.next) > 0
}
func (s *State) restartPrompt() {
next := make(chan nexter)
next := make(chan nexter, 200)
go func() {
for {
var n nexter
@ -127,8 +125,6 @@ func (s *State) nextPending(timeout <-chan time.Time) (rune, error) {
s.pending = s.pending[1:]
return rv, errTimedOut
}
// not reached
return 0, nil
}
func (s *State) readNext() (interface{}, error) {
@ -350,7 +346,7 @@ func (s *State) readNext() (interface{}, error) {
// Close returns the terminal to its previous mode
func (s *State) Close() error {
stopSignal(s.winch)
signal.Stop(s.winch)
if !s.inputRedirected {
s.origMode.ApplyMode()
}

View File

@ -37,3 +37,7 @@ type termios struct {
Ispeed uintptr
Ospeed uintptr
}
// Terminal.app needs a column for the cursor when the input line is at the
// bottom of the window.
const cursorColumn = true

View File

@ -24,3 +24,5 @@ const (
type termios struct {
syscall.Termios
}
const cursorColumn = false

View File

@ -10,13 +10,14 @@ import (
var (
kernel32 = syscall.NewLazyDLL("kernel32.dll")
procGetStdHandle = kernel32.NewProc("GetStdHandle")
procReadConsoleInput = kernel32.NewProc("ReadConsoleInputW")
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
procSetConsoleMode = kernel32.NewProc("SetConsoleMode")
procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition")
procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW")
procGetStdHandle = kernel32.NewProc("GetStdHandle")
procReadConsoleInput = kernel32.NewProc("ReadConsoleInputW")
procGetNumberOfConsoleInputEvents = kernel32.NewProc("GetNumberOfConsoleInputEvents")
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
procSetConsoleMode = kernel32.NewProc("SetConsoleMode")
procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition")
procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW")
)
// These names are from the Win32 api, so they use underscores (contrary to
@ -147,6 +148,21 @@ const (
modKeys = shiftPressed | leftAltPressed | rightAltPressed | leftCtrlPressed | rightCtrlPressed
)
// inputWaiting only returns true if the next call to readNext will return immediately.
func (s *State) inputWaiting() bool {
var num uint32
ok, _, _ := procGetNumberOfConsoleInputEvents.Call(uintptr(s.handle), uintptr(unsafe.Pointer(&num)))
if ok == 0 {
// call failed, so we cannot guarantee a non-blocking readNext
return false
}
// during a "paste" input events are always an odd number, and
// the last one results in a blocking readNext, so return false
// when num is 1 or 0.
return num > 1
}
func (s *State) readNext() (interface{}, error) {
if s.repeat > 0 {
s.repeat--
@ -168,6 +184,9 @@ func (s *State) readNext() (interface{}, error) {
if input.eventType == window_buffer_size_event {
xy := (*coord)(unsafe.Pointer(&input.blob[0]))
s.columns = int(xy.x)
if s.columns > 1 {
s.columns--
}
return winch, nil
}
if input.eventType != key_event {
@ -260,7 +279,6 @@ func (s *State) readNext() (interface{}, error) {
}
return s.key, nil
}
return unknown, nil
}
// Close returns the terminal to its previous mode

View File

@ -90,11 +90,11 @@ const (
)
func (s *State) refresh(prompt []rune, buf []rune, pos int) error {
s.needRefresh = false
if s.multiLineMode {
return s.refreshMultiLine(prompt, buf, pos)
} else {
return s.refreshSingleLine(prompt, buf, pos)
}
return s.refreshSingleLine(prompt, buf, pos)
}
func (s *State) refreshSingleLine(prompt []rune, buf []rune, pos int) error {
@ -387,8 +387,6 @@ func (s *State) tabComplete(p []rune, line []rune, pos int) ([]rune, int, interf
}
return []rune(head + pick + tail), hl + utf8.RuneCountInString(pick), next, nil
}
// Not reached
return line, pos, rune(esc), nil
}
// reverse intelligent search, implements a bash-like history search.
@ -556,14 +554,26 @@ func (s *State) yank(p []rune, text []rune, pos int) ([]rune, int, interface{},
}
}
}
return line, pos, esc, nil
}
// Prompt displays p and returns a line of user input, not including a trailing
// newline character. An io.EOF error is returned if the user signals end-of-file
// by pressing Ctrl-D. Prompt allows line editing if the terminal supports it.
func (s *State) Prompt(prompt string) (string, error) {
return s.PromptWithSuggestion(prompt, "", 0)
}
// PromptWithSuggestion displays prompt and an editable text with cursor at
// given position. The cursor will be set to the end of the line if given position
// is negative or greater than length of text. Returns a line of user input, not
// including a trailing newline character. An io.EOF error is returned if the user
// signals end-of-file by pressing Ctrl-D.
func (s *State) PromptWithSuggestion(prompt string, text string, pos int) (string, error) {
for _, r := range prompt {
if unicode.Is(unicode.C, r) {
return "", ErrInvalidPrompt
}
}
if s.inputRedirected || !s.terminalSupported {
return s.promptUnsupported(prompt)
}
@ -574,24 +584,37 @@ func (s *State) Prompt(prompt string) (string, error) {
s.historyMutex.RLock()
defer s.historyMutex.RUnlock()
s.startPrompt()
defer s.stopPrompt()
s.getColumns()
fmt.Print(prompt)
p := []rune(prompt)
var line []rune
pos := 0
var line = []rune(text)
historyEnd := ""
prefixHistory := s.getHistoryByPrefix(string(line))
historyPos := len(prefixHistory)
var historyPrefix []string
historyPos := 0
historyStale := true
historyAction := false // used to mark history related actions
killAction := 0 // used to mark kill related actions
defer s.stopPrompt()
if pos < 0 || len(text) < pos {
pos = len(text)
}
if len(line) > 0 {
s.refresh(p, line, pos)
}
restart:
s.startPrompt()
s.getColumns()
mainLoop:
for {
next, err := s.readNext()
haveNext:
if err != nil {
if s.shouldRestart != nil && s.shouldRestart(err) {
goto restart
}
return "", err
}
@ -600,6 +623,9 @@ mainLoop:
case rune:
switch v {
case cr, lf:
if s.needRefresh {
s.refresh(p, line, pos)
}
if s.multiLineMode {
s.resetMultiLine(p, line, pos)
}
@ -607,21 +633,21 @@ mainLoop:
break mainLoop
case ctrlA: // Start of line
pos = 0
s.refresh(p, line, pos)
s.needRefresh = true
case ctrlE: // End of line
pos = len(line)
s.refresh(p, line, pos)
s.needRefresh = true
case ctrlB: // left
if pos > 0 {
pos -= len(getSuffixGlyphs(line[:pos], 1))
s.refresh(p, line, pos)
s.needRefresh = true
} else {
fmt.Print(beep)
}
case ctrlF: // right
if pos < len(line) {
pos += len(getPrefixGlyphs(line[pos:], 1))
s.refresh(p, line, pos)
s.needRefresh = true
} else {
fmt.Print(beep)
}
@ -640,7 +666,7 @@ mainLoop:
} else {
n := len(getPrefixGlyphs(line[pos:], 1))
line = append(line[:pos], line[pos+n:]...)
s.refresh(p, line, pos)
s.needRefresh = true
}
case ctrlK: // delete remainder of line
if pos >= len(line) {
@ -654,32 +680,42 @@ mainLoop:
killAction = 2 // Mark that there was a kill action
line = line[:pos]
s.refresh(p, line, pos)
s.needRefresh = true
}
case ctrlP: // up
historyAction = true
if historyStale {
historyPrefix = s.getHistoryByPrefix(string(line))
historyPos = len(historyPrefix)
historyStale = false
}
if historyPos > 0 {
if historyPos == len(prefixHistory) {
if historyPos == len(historyPrefix) {
historyEnd = string(line)
}
historyPos--
line = []rune(prefixHistory[historyPos])
line = []rune(historyPrefix[historyPos])
pos = len(line)
s.refresh(p, line, pos)
s.needRefresh = true
} else {
fmt.Print(beep)
}
case ctrlN: // down
historyAction = true
if historyPos < len(prefixHistory) {
if historyStale {
historyPrefix = s.getHistoryByPrefix(string(line))
historyPos = len(historyPrefix)
historyStale = false
}
if historyPos < len(historyPrefix) {
historyPos++
if historyPos == len(prefixHistory) {
if historyPos == len(historyPrefix) {
line = []rune(historyEnd)
} else {
line = []rune(prefixHistory[historyPos])
line = []rune(historyPrefix[historyPos])
}
pos = len(line)
s.refresh(p, line, pos)
s.needRefresh = true
} else {
fmt.Print(beep)
}
@ -697,11 +733,11 @@ mainLoop:
copy(line[pos-len(prev):], next)
copy(line[pos-len(prev)+len(next):], scratch)
pos += len(next)
s.refresh(p, line, pos)
s.needRefresh = true
}
case ctrlL: // clear screen
s.eraseScreen()
s.refresh(p, line, pos)
s.needRefresh = true
case ctrlC: // reset
fmt.Println("^C")
if s.multiLineMode {
@ -721,7 +757,7 @@ mainLoop:
n := len(getSuffixGlyphs(line[:pos], 1))
line = append(line[:pos-n], line[pos:]...)
pos -= n
s.refresh(p, line, pos)
s.needRefresh = true
}
case ctrlU: // Erase line before cursor
if killAction > 0 {
@ -733,7 +769,7 @@ mainLoop:
killAction = 2 // Mark that there was some killing
line = line[pos:]
pos = 0
s.refresh(p, line, pos)
s.needRefresh = true
case ctrlW: // Erase word
if pos == 0 {
fmt.Print(beep)
@ -770,13 +806,13 @@ mainLoop:
}
killAction = 2 // Mark that there was some killing
s.refresh(p, line, pos)
s.needRefresh = true
case ctrlY: // Paste from Yank buffer
line, pos, next, err = s.yank(p, line, pos)
goto haveNext
case ctrlR: // Reverse Search
line, pos, next, err = s.reverseISearch(line, pos)
s.refresh(p, line, pos)
s.needRefresh = true
goto haveNext
case tab: // Tab completion
line, pos, next, err = s.tabComplete(p, line, pos)
@ -791,14 +827,16 @@ mainLoop:
case 0, 28, 29, 30, 31:
fmt.Print(beep)
default:
if pos == len(line) && !s.multiLineMode && countGlyphs(p)+countGlyphs(line) < s.columns-1 {
if pos == len(line) && !s.multiLineMode &&
len(p)+len(line) < s.columns*4 && // Avoid countGlyphs on large lines
countGlyphs(p)+countGlyphs(line) < s.columns-1 {
line = append(line, v)
fmt.Printf("%c", v)
pos++
} else {
line = append(line[:pos], append([]rune{v}, line[pos:]...)...)
pos++
s.refresh(p, line, pos)
s.needRefresh = true
}
}
case action:
@ -866,24 +904,34 @@ mainLoop:
}
case up:
historyAction = true
if historyStale {
historyPrefix = s.getHistoryByPrefix(string(line))
historyPos = len(historyPrefix)
historyStale = false
}
if historyPos > 0 {
if historyPos == len(prefixHistory) {
if historyPos == len(historyPrefix) {
historyEnd = string(line)
}
historyPos--
line = []rune(prefixHistory[historyPos])
line = []rune(historyPrefix[historyPos])
pos = len(line)
} else {
fmt.Print(beep)
}
case down:
historyAction = true
if historyPos < len(prefixHistory) {
if historyStale {
historyPrefix = s.getHistoryByPrefix(string(line))
historyPos = len(historyPrefix)
historyStale = false
}
if historyPos < len(historyPrefix) {
historyPos++
if historyPos == len(prefixHistory) {
if historyPos == len(historyPrefix) {
line = []rune(historyEnd)
} else {
line = []rune(prefixHistory[historyPos])
line = []rune(historyPrefix[historyPos])
}
pos = len(line)
} else {
@ -907,11 +955,13 @@ mainLoop:
s.cursorRows = 1
}
}
s.needRefresh = true
}
if s.needRefresh && !s.inputWaiting() {
s.refresh(p, line, pos)
}
if !historyAction {
prefixHistory = s.getHistoryByPrefix(string(line))
historyPos = len(prefixHistory)
historyStale = true
}
if killAction > 0 {
killAction--
@ -923,6 +973,11 @@ mainLoop:
// PasswordPrompt displays p, and then waits for user input. The input typed by
// the user is not displayed in the terminal.
func (s *State) PasswordPrompt(prompt string) (string, error) {
for _, r := range prompt {
if unicode.Is(unicode.C, r) {
return "", ErrInvalidPrompt
}
}
if !s.terminalSupported {
return "", errors.New("liner: function not supported in this terminal")
}
@ -933,8 +988,10 @@ func (s *State) PasswordPrompt(prompt string) (string, error) {
return "", ErrNotTerminalOutput
}
s.startPrompt()
defer s.stopPrompt()
restart:
s.startPrompt()
s.getColumns()
fmt.Print(prompt)
@ -946,6 +1003,9 @@ mainLoop:
for {
next, err := s.readNext()
if err != nil {
if s.shouldRestart != nil && s.shouldRestart(err) {
goto restart
}
return "", err
}
@ -953,6 +1013,9 @@ mainLoop:
case rune:
switch v {
case cr, lf:
if s.needRefresh {
s.refresh(p, line, pos)
}
if s.multiLineMode {
s.resetMultiLine(p, line, pos)
}

View File

@ -48,14 +48,18 @@ type winSize struct {
xpixel, ypixel uint16
}
func (s *State) getColumns() {
func (s *State) getColumns() bool {
var ws winSize
ok, _, _ := syscall.Syscall(syscall.SYS_IOCTL, uintptr(syscall.Stdout),
syscall.TIOCGWINSZ, uintptr(unsafe.Pointer(&ws)))
if ok < 0 {
s.columns = 80
if int(ok) < 0 {
return false
}
s.columns = int(ws.col)
if cursorColumn && s.columns > 1 {
s.columns--
}
return true
}
func (s *State) checkOutput() {

View File

@ -69,4 +69,8 @@ func (s *State) getColumns() {
var sbi consoleScreenBufferInfo
procGetConsoleScreenBufferInfo.Call(uintptr(s.hOut), uintptr(unsafe.Pointer(&sbi)))
s.columns = int(sbi.dwSize.x)
if s.columns > 1 {
// Windows 10 needs a spare column for the cursor
s.columns--
}
}

View File

@ -25,6 +25,12 @@ var doubleWidth = []*unicode.RangeTable{
func countGlyphs(s []rune) int {
n := 0
for _, r := range s {
// speed up the common case
if r < 127 {
n++
continue
}
switch {
case unicode.IsOneOf(zeroWidth, r):
case unicode.IsOneOf(doubleWidth, r):
@ -39,6 +45,10 @@ func countGlyphs(s []rune) int {
func countMultiLineGlyphs(s []rune, columns int, start int) int {
n := start
for _, r := range s {
if r < 127 {
n++
continue
}
switch {
case unicode.IsOneOf(zeroWidth, r):
case unicode.IsOneOf(doubleWidth, r):
@ -58,6 +68,11 @@ func countMultiLineGlyphs(s []rune, columns int, start int) int {
func getPrefixGlyphs(s []rune, num int) []rune {
p := 0
for n := 0; n < num && p < len(s); p++ {
// speed up the common case
if s[p] < 127 {
n++
continue
}
if !unicode.IsOneOf(zeroWidth, s[p]) {
n++
}
@ -71,6 +86,11 @@ func getPrefixGlyphs(s []rune, num int) []rune {
func getSuffixGlyphs(s []rune, num int) []rune {
p := len(s)
for n := 0; n < num && p > 0; p-- {
// speed up the common case
if s[p-1] < 127 {
n++
continue
}
if !unicode.IsOneOf(zeroWidth, s[p-1]) {
n++
}

Some files were not shown because too many files have changed in this diff Show More