redis/internal/proto/elastic_reader.go

206 lines
3.5 KiB
Go
Raw Normal View History

package proto
import (
"bytes"
"errors"
"io"
)
const defaultBufSize = 4096
2018-08-06 11:54:47 +03:00
// ElasticBufReader is like bufio.Reader but instead of returning ErrBufferFull
// it automatically grows the buffer.
type ElasticBufReader struct {
buf []byte
rd io.Reader // reader provided by the client
r, w int // buf read and write positions
err error
}
2018-08-06 11:54:47 +03:00
func NewElasticBufReader(rd io.Reader) *ElasticBufReader {
return &ElasticBufReader{
2018-08-15 11:53:15 +03:00
rd: rd,
2018-08-06 11:54:47 +03:00
}
}
2018-08-06 11:54:47 +03:00
func (b *ElasticBufReader) Reset(rd io.Reader) {
b.rd = rd
b.r, b.w = 0, 0
b.err = nil
}
2018-08-06 11:54:47 +03:00
func (b *ElasticBufReader) Buffer() []byte {
return b.buf
}
2018-08-06 11:54:47 +03:00
func (b *ElasticBufReader) ResetBuffer(buf []byte) {
b.buf = buf
b.r, b.w = 0, 0
b.err = nil
}
// Buffered returns the number of bytes that can be read from the current buffer.
func (b *ElasticBufReader) Buffered() int {
return b.w - b.r
}
2018-08-06 11:54:47 +03:00
func (b *ElasticBufReader) Bytes() []byte {
return b.buf[b.r:b.w]
}
var errNegativeRead = errors.New("bufio: reader returned negative count from Read")
// fill reads a new chunk into the buffer.
2018-08-06 11:54:47 +03:00
func (b *ElasticBufReader) fill() {
// Slide existing data to beginning.
if b.r > 0 {
copy(b.buf, b.buf[b.r:b.w])
b.w -= b.r
b.r = 0
}
if b.w >= len(b.buf) {
panic("bufio: tried to fill full buffer")
}
// Read new data: try a limited number of times.
const maxConsecutiveEmptyReads = 100
for i := maxConsecutiveEmptyReads; i > 0; i-- {
n, err := b.rd.Read(b.buf[b.w:])
if n < 0 {
panic(errNegativeRead)
}
b.w += n
if err != nil {
b.err = err
return
}
if n > 0 {
return
}
}
b.err = io.ErrNoProgress
}
2018-08-06 11:54:47 +03:00
func (b *ElasticBufReader) readErr() error {
err := b.err
b.err = nil
return err
}
2018-08-06 11:54:47 +03:00
func (b *ElasticBufReader) ReadSlice(delim byte) (line []byte, err error) {
for {
// Search buffer.
if i := bytes.IndexByte(b.buf[b.r:b.w], delim); i >= 0 {
line = b.buf[b.r : b.r+i+1]
b.r += i + 1
break
}
// Pending error?
if b.err != nil {
line = b.buf[b.r:b.w]
b.r = b.w
err = b.readErr()
break
}
// Buffer full?
if b.Buffered() >= len(b.buf) {
2018-08-05 15:07:10 +03:00
b.grow(len(b.buf) + defaultBufSize)
}
b.fill() // buffer is not full
}
return
}
2018-08-06 11:54:47 +03:00
func (b *ElasticBufReader) ReadLine() (line []byte, err error) {
line, err = b.ReadSlice('\n')
if len(line) == 0 {
if err != nil {
line = nil
}
return
}
err = nil
if line[len(line)-1] == '\n' {
drop := 1
if len(line) > 1 && line[len(line)-2] == '\r' {
drop = 2
}
line = line[:len(line)-drop]
}
return
}
2018-08-06 11:54:47 +03:00
func (b *ElasticBufReader) ReadByte() (byte, error) {
2018-08-05 15:07:10 +03:00
for b.r == b.w {
if b.err != nil {
return 0, b.readErr()
}
b.fill() // buffer is empty
}
c := b.buf[b.r]
b.r++
return c, nil
}
2018-08-06 11:54:47 +03:00
func (b *ElasticBufReader) ReadN(n int) ([]byte, error) {
b.grow(n)
for b.Buffered() < n {
// Pending error?
if b.err != nil {
buf := b.buf[b.r:b.w]
b.r = b.w
return buf, b.readErr()
}
b.fill()
}
buf := b.buf[b.r : b.r+n]
b.r += n
return buf, nil
}
2018-08-06 11:54:47 +03:00
func (b *ElasticBufReader) grow(n int) {
if b.w-b.r >= n {
return
}
// Slide existing data to beginning.
if b.r > 0 {
copy(b.buf, b.buf[b.r:b.w])
b.w -= b.r
b.r = 0
}
// Extend buffer if needed.
if d := n - len(b.buf); d > 0 {
b.buf = append(b.buf, make([]byte, d)...)
}
}
2018-08-16 13:25:19 +03:00
func (b *ElasticBufReader) Read(p []byte) (n int, err error) {
if len(p) == 0 {
return 0, b.readErr()
}
if b.r != b.w {
// copy as much as we can
n = copy(p, b.buf[b.r:b.w])
b.r += n
return n, nil
}
if b.err != nil {
return 0, b.readErr()
}
n, b.err = b.rd.Read(p)
return n, b.readErr()
}