Merged in kortschak/av/migrate/ring (pull request #22)

ring: migrate package to utils/ring

Approved-by: kortschak <dan@kortschak.io>
This commit is contained in:
kortschak 2018-06-07 10:58:32 +00:00
commit 273f04cf87
4 changed files with 1 additions and 791 deletions

View File

@ -46,8 +46,8 @@ import (
"bitbucket.org/ausocean/av/generator"
"bitbucket.org/ausocean/av/parser"
"bitbucket.org/ausocean/av/ring"
"bitbucket.org/ausocean/av/rtmp"
"bitbucket.org/ausocean/utils/ring"
)
// Misc constants

View File

@ -1,24 +0,0 @@
# Readme
Package ring provides a ring buffer of io.ReadWriters.
# Author
Dan Kortschak <dan@ausocean.org>
# License
ring is Copyright (C) 2017 the Australian Ocean Lab (AusOcean)
It is free software: you can redistribute it and/or modify them
under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
It is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
or more details.
You should have received a copy of the GNU General Public License
along with revid in gpl.txt. If not, see [GNU licenses](http://www.gnu.org/licenses/).

View File

@ -1,277 +0,0 @@
/*
NAME
ring.go - a structure that encapsulates a Buffer datastructure with conccurency
functionality
DESCRIPTION
See Readme.md
AUTHOR
Dan Kortschak <dan@ausocean.org>
LICENSE
ring.go is Copyright (C) 2017 the Australian Ocean Lab (AusOcean)
It is free software: you can redistribute it and/or modify them
under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
It is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with revid in gpl.txt. If not, see http://www.gnu.org/licenses.
*/
// Package ring provides a ring buffer of io.ReadWriters.
package ring
import (
"errors"
"io"
"time"
)
var (
ErrTimeout = errors.New("ring: buffer cycle timeout")
ErrDropped = errors.New("ring: dropped old write")
ErrStall = errors.New("ring: unable to dump old write")
ErrTooLong = errors.New("ring: write to long for buffer element")
)
// Buffer implements a ring buffer.
//
// The buffer has a writable head and a readable tail with a queue from the head
// to the tail. Concurrent read a write operations are safe.
type Buffer struct {
head, tail *Chunk
full, empty chan *Chunk
timeout time.Duration
}
// NewBuffer returns a Buffer with len elements of the given size. The timeout
// parameter specifies how long a write operation will wait before failing with
// a temporary timeout error.
func NewBuffer(len, size int, timeout time.Duration) *Buffer {
if len <= 0 || size <= 0 {
return nil
}
b := Buffer{
full: make(chan *Chunk, len),
empty: make(chan *Chunk, len),
timeout: timeout,
}
for i := 0; i < len; i++ {
b.empty <- newChunk(make([]byte, 0, size))
}
return &b
}
// Len returns the number of full buffer elements.
func (b *Buffer) Len() int {
return len(b.full)
}
// Write writes the bytes in b to the next current or next available element of the ring buffer
// it returns the number of bytes written and any error.
// If no element can be gained within the timeout or stolen from the queue, ErrStall is returned
// and if the len(p) is greater than the buffer's element size, ErrTooLong is returned. If a
// write was successful but a previous write was dropped, ErrDropped is returned.
//
// Write is safe to use concurrently with Read, but may not be used concurrently with another
// write operation.
func (b *Buffer) Write(p []byte) (int, error) {
var dropped bool
if b.head == nil {
timer := time.NewTimer(b.timeout)
select {
case <-timer.C:
select {
case b.head = <-b.full:
b.head.reset()
dropped = true
default:
// This should never happen.
return 0, ErrStall
}
case b.head = <-b.empty:
timer.Stop()
}
}
if len(p) > b.head.cap() {
return 0, ErrTooLong
}
if len(p) > b.head.cap()-b.head.Len() {
b.full <- b.head
b.head = nil
return b.Write(p)
}
n, err := b.head.write(p)
if b.head.cap()-b.head.Len() == 0 {
b.full <- b.head
b.head = nil
}
if dropped && err == nil {
err = ErrDropped
}
return n, err
}
// Flush puts the currently writing element of the buffer into the queue for reading. Flush
// is idempotent.
//
// Flush is safe to use concurrently with Read, but may not be used concurrently with another
// another write operation.
func (b *Buffer) Flush() {
if b.head == nil {
return
}
b.full <- b.head
b.head = nil
}
// Close closes the buffer. The buffer may not be written to after a call to close, but can
// be drained by calls to Read.
//
// Flush is safe to use concurrently with Read, but may not be used concurrently with another
// another write operation.
func (b *Buffer) Close() error {
b.Flush()
close(b.full)
return nil
}
// Next gets the next element from the queue ready for reading, returning ErrTimeout if no
// element is available within the timeout. If the Buffer has been closed Next returns io.EOF.
//
// Is it the responsibility of the caller to close the returned Chunk.
//
// Next is safe to use concurrently with write operations, but may not be used concurrently with
// another Read call or Next call. A goroutine calling Next must not call Flush or Close.
func (b *Buffer) Next(timeout time.Duration) (*Chunk, error) {
if b.tail == nil {
timer := time.NewTimer(timeout)
var ok bool
select {
case <-timer.C:
return nil, ErrTimeout
case b.tail, ok = <-b.full:
timer.Stop()
if !ok {
return nil, io.EOF
}
}
}
b.tail.owner = b
return b.tail, nil
}
// Read reads bytes from the current tail of the ring buffer into p and returns the number of
// bytes read and any error.
//
// Read is safe to use concurrently with write operations, but may not be used concurrently with
// another Read call or Next call. A goroutine calling Read must not call Flush or Close.
func (b *Buffer) Read(p []byte) (int, error) {
if b.tail == nil {
return 0, io.EOF
}
n, err := b.tail.read(p)
if b.tail.Len() == 0 {
b.tail.reset()
b.empty <- b.tail
b.tail = nil
}
return n, err
}
// Chunk is a simplified version of byte buffer without the capacity to grow beyond the
// buffer's original cap, and a modified WriteTo method that allows multiple calls without
// consuming the buffered data.
type Chunk struct {
buf []byte
off int
owner *Buffer
}
func newChunk(buf []byte) *Chunk {
return &Chunk{buf: buf[:0]}
}
// Len returns the number of bytes held in the chunk.
func (b *Chunk) Len() int {
return len(b.buf) - b.off
}
func (b *Chunk) cap() int {
return cap(b.buf)
}
func (b *Chunk) reset() {
b.buf = b.buf[:0]
b.off = 0
}
func (b *Chunk) write(p []byte) (n int, err error) {
if len(p) > cap(b.buf)-len(b.buf) {
err = ErrTooLong
}
l := len(b.buf)
m := l + len(p)
if m > cap(b.buf) {
m = cap(b.buf)
}
b.buf = b.buf[:m]
n = copy(b.buf[l:], p)
return n, err
}
func (b *Chunk) read(p []byte) (n int, err error) {
if b.Len() <= 0 {
if len(p) == 0 {
return 0, nil
}
return 0, io.EOF
}
n = copy(p, b.buf[b.off:])
b.off += n
return n, nil
}
// WriteTo writes data to w until there's no more data to write or when an error occurs.
// The return value n is the number of bytes written. Any error encountered during the
// write is also returned. Repeated called to WriteTo will write the same data until
// the Chunk's Close method is called.
//
// WriteTo will panic if the Chunk has not been obtained through a call to Buffer.Next or
// has been closed. WriteTo must be used in the same goroutine as the call to Next.
func (b *Chunk) WriteTo(w io.Writer) (n int64, err error) {
if b.owner == nil || b.owner.tail != b {
panic("ring: invalid use of ring buffer chunk")
}
_n, err := w.Write(b.buf)
if _n > len(b.buf) {
panic("ring: invalid byte count")
}
if _n != len(b.buf) {
return int64(_n), io.ErrShortWrite
}
return int64(_n), nil
}
// Close closes the Chunk, reseting its data and releasing it back to the Buffer. A Chunk
// may not be used after it has been closed. Close must be used in the same goroutine as
// the call to Next.
func (b *Chunk) Close() error {
if b.owner == nil || b.owner.tail != b {
panic("ring: invalid use of ring buffer chunk")
}
b.reset()
b.owner.tail = nil
empty := b.owner.empty
b.owner = nil
empty <- b
return nil
}

View File

@ -1,489 +0,0 @@
/*
NAME
ring_test.go - a test suite adopting the golang testing library to test functionality of the
RingBuffer structure
DESCRIPTION
See README.md
AUTHOR
Dan Kortschak <dan@ausocean.org>
LICENSE
ring_test.go is Copyright (C) 2017 the Australian Ocean Lab (AusOcean)
It is free software: you can redistribute it and/or modify them
under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
It is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with revid in gpl.txt. If not, see http://www.gnu.org/licenses.
*/
package ring
import (
"bytes"
"io"
"io/ioutil"
"reflect"
"strings"
"sync"
"testing"
"time"
)
var roundTripTests = []struct {
name string
len int
size int
timeout time.Duration
nextTimeout time.Duration
data [][]string
readDelay time.Duration
writeDelay time.Duration
}{
{
name: "happy",
len: 2, size: 50,
timeout: 100 * time.Millisecond,
nextTimeout: 100 * time.Millisecond,
data: [][]string{
{"frame1", "frame2", "frame3", "frame4"},
{"frame5", "frame6"},
{"frame5", "frame6", "frame7"},
{"frame8", "frame9", "frame10"},
{"frame11"},
{"frame12", "frame13"},
{"frame14", "frame15", "frame16", "frame17"},
},
},
{
name: "slow write",
len: 2, size: 50,
timeout: 100 * time.Millisecond,
nextTimeout: 100 * time.Millisecond,
data: [][]string{
{"frame1", "frame2", "frame3", "frame4"},
{"frame5", "frame6"},
{"frame5", "frame6", "frame7"},
{"frame8", "frame9", "frame10"},
{"frame11"},
{"frame12", "frame13"},
{"frame14", "frame15", "frame16", "frame17"},
},
writeDelay: 500 * time.Millisecond,
},
{
name: "slow read",
len: 2, size: 50,
timeout: 100 * time.Millisecond,
nextTimeout: 100 * time.Millisecond,
data: [][]string{
{"frame1", "frame2", "frame3", "frame4"},
{"frame5", "frame6"},
{"frame5", "frame6", "frame7"},
{"frame8", "frame9", "frame10"},
{"frame11"},
{"frame12", "frame13"},
{"frame14", "frame15", "frame16", "frame17"},
},
readDelay: 500 * time.Millisecond,
},
}
func TestRoundTrip(t *testing.T) {
const maxTimeouts = 100
for _, test := range roundTripTests {
b := NewBuffer(test.len, test.size, test.timeout)
var wg sync.WaitGroup
wg.Add(2)
go func() {
defer wg.Done()
for _, c := range test.data {
var dropped int
for _, f := range c {
time.Sleep(test.writeDelay) // Simulate slow data capture.
_, err := b.Write([]byte(f))
switch err {
case nil:
dropped = 0
case ErrDropped:
if dropped > maxTimeouts {
t.Errorf("too many write drops for %q", test.name)
return
}
dropped++
default:
t.Errorf("unexpected write error for %q: %v", test.name, err)
return
}
}
b.Flush()
}
b.Close()
}()
go func() {
buf := make([]byte, 1<<10)
defer wg.Done()
var got []string
var timeouts int
elements:
for {
_, err := b.Next(test.nextTimeout)
switch err {
case nil:
timeouts = 0
case ErrTimeout:
if timeouts > maxTimeouts {
t.Errorf("too many timeouts for %q", test.name)
return
}
timeouts++
case io.EOF:
break elements
default:
t.Errorf("unexpected read error for %q: %v", test.name, err)
return
}
reads:
for {
n, err := b.Read(buf)
if n != 0 {
time.Sleep(test.readDelay) // Simulate slow data processing.
got = append(got, string(buf[:n]))
}
switch err {
case nil:
case io.EOF:
break reads
default:
t.Errorf("unexpected read error for %q: %v", test.name, err)
return
}
}
}
var want []string
for _, c := range test.data {
want = append(want, strings.Join(c, ""))
}
if test.readDelay == 0 {
if !reflect.DeepEqual(got, want) {
t.Errorf("unexpected round-trip result for %q:\ngot: %#v\nwant:%#v", test.name, got, want)
}
} else {
// We may have dropped writes in this case.
// So just check that we can consume every
// received element with reference to what
// was sent.
// TODO(kortschak): Check that the number of
// missing elements matches the number of
// dropped writes.
var sidx, ridx int
var recd string
for ridx, recd = range got {
for ; sidx < len(want); sidx++ {
if recd == want[sidx] {
break
}
}
}
if ridx != len(got)-1 {
t.Errorf("unexpected round-trip result for %q (unexplained element received):\ngot: %#v\nwant:%#v", test.name, got, want)
}
}
}()
wg.Wait()
}
}
func TestRoundTripWriterTo(t *testing.T) {
const maxTimeouts = 100
for _, test := range roundTripTests {
b := NewBuffer(test.len, test.size, test.timeout)
var wg sync.WaitGroup
wg.Add(2)
go func() {
defer wg.Done()
for _, c := range test.data {
var dropped int
for _, f := range c {
time.Sleep(test.writeDelay) // Simulate slow data capture.
_, err := b.Write([]byte(f))
switch err {
case nil:
dropped = 0
case ErrDropped:
if dropped > maxTimeouts {
t.Errorf("too many write drops for %q", test.name)
return
}
dropped++
default:
t.Errorf("unexpected write error for %q: %v", test.name, err)
return
}
}
b.Flush()
}
b.Close()
}()
go func() {
var buf bytes.Buffer
defer wg.Done()
var got []string
var timeouts int
elements:
for {
chunk, err := b.Next(test.nextTimeout)
switch err {
case nil:
timeouts = 0
case ErrTimeout:
if timeouts > maxTimeouts {
t.Errorf("too many timeouts for %q", test.name)
return
}
timeouts++
continue
case io.EOF:
break elements
default:
t.Errorf("unexpected read error for %q: %v", test.name, err)
return
}
n, err := chunk.WriteTo(&buf)
if n != 0 {
time.Sleep(test.readDelay) // Simulate slow data processing.
got = append(got, buf.String())
buf.Reset()
}
if err != nil {
t.Errorf("unexpected writeto error for %q: %v", test.name, err)
return
}
err = chunk.Close()
if err != nil {
t.Errorf("unexpected close error for %q: %v", test.name, err)
return
}
}
var want []string
for _, c := range test.data {
want = append(want, strings.Join(c, ""))
}
if test.readDelay == 0 {
if !reflect.DeepEqual(got, want) {
t.Errorf("unexpected round-trip result for %q:\ngot: %#v\nwant:%#v", test.name, got, want)
}
} else {
// We may have dropped writes in this case.
// So just check that we can consume every
// received element with reference to what
// was sent.
// TODO(kortschak): Check that the number of
// missing elements matches the number of
// dropped writes.
var sidx, ridx int
var recd string
for ridx, recd = range got {
for ; sidx < len(want); sidx++ {
if recd == want[sidx] {
break
}
}
}
if ridx != len(got)-1 {
t.Errorf("unexpected round-trip result for %q (unexplained element received):\ngot: %#v\nwant:%#v", test.name, got, want)
}
}
}()
wg.Wait()
}
}
func BenchmarkRoundTrip(b *testing.B) {
const (
maxTimeouts = 100
len = 50
size = 150e3
timeout = 10 * time.Millisecond
frameLen = 30e3
writeDelay = 20 * time.Millisecond
readDelay = 50 * time.Millisecond
)
// Allocated prior to timer reset since it is an
// amortised cost.
rb := NewBuffer(len, size, timeout)
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
var timeouts int
elements:
for {
_, err := rb.Next(timeout)
switch err {
case nil:
timeouts = 0
case ErrTimeout:
if timeouts > maxTimeouts {
b.Error("too many timeouts")
return
}
timeouts++
case io.EOF:
break elements
default:
b.Errorf("unexpected read error: %v", err)
return
}
_, err = ioutil.ReadAll(rb)
time.Sleep(readDelay) // Simulate slow data processing.
if err != nil {
b.Errorf("unexpected read error: %v", err)
return
}
}
}()
data := make([]byte, frameLen)
b.ResetTimer()
b.SetBytes(frameLen)
var dropped int
for i := 0; i < b.N; i++ {
time.Sleep(writeDelay) // Simulate slow data capture.
_, err := rb.Write(data)
switch err {
case nil:
dropped = 0
case ErrDropped:
if dropped > maxTimeouts {
b.Error("too many write drops")
return
}
dropped++
default:
b.Errorf("unexpected write error: %v", err)
return
}
}
rb.Close()
wg.Wait()
}
func BenchmarkRoundTripWriterTo(b *testing.B) {
const (
maxTimeouts = 100
len = 50
size = 150e3
timeout = 10 * time.Millisecond
frameLen = 30e3
writeDelay = 20 * time.Millisecond
readDelay = 50 * time.Millisecond
)
// Allocated prior to timer reset since it is an
// amortised cost.
rb := NewBuffer(len, size, timeout)
// This is hoisted here to ensure the allocation
// is not counted since this is outside the control
// of the ring buffer.
buf := bytes.NewBuffer(make([]byte, 0, size+1))
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
var timeouts int
elements:
for {
chunk, err := rb.Next(timeout)
switch err {
case nil:
timeouts = 0
case ErrTimeout:
if timeouts > maxTimeouts {
b.Error("too many timeouts")
return
}
timeouts++
continue
case io.EOF:
break elements
default:
b.Errorf("unexpected read error: %v", err)
return
}
n, err := chunk.WriteTo(buf)
if n != 0 {
time.Sleep(readDelay) // Simulate slow data processing.
buf.Reset()
}
if err != nil {
b.Errorf("unexpected writeto error: %v", err)
return
}
err = chunk.Close()
if err != nil {
b.Errorf("unexpected close error: %v", err)
return
}
}
}()
data := make([]byte, frameLen)
b.ResetTimer()
b.SetBytes(frameLen)
var dropped int
for i := 0; i < b.N; i++ {
time.Sleep(writeDelay) // Simulate slow data capture.
_, err := rb.Write(data)
switch err {
case nil:
dropped = 0
case ErrDropped:
if dropped > maxTimeouts {
b.Error("too many write drops")
return
}
dropped++
default:
b.Errorf("unexpected write error: %v", err)
return
}
}
rb.Close()
wg.Wait()
}