2019-02-15 18:10:35 +03:00
|
|
|
/*
|
|
|
|
NAME
|
|
|
|
mtsSender_test.go
|
|
|
|
|
|
|
|
DESCRIPTION
|
|
|
|
mtsSender_test.go contains tests that validate the functionalilty of the
|
|
|
|
mtsSender under senders.go. Tests include checks that the mtsSender is
|
|
|
|
segmenting sends correctly, and also that it can correct discontinuities.
|
|
|
|
|
|
|
|
AUTHORS
|
|
|
|
Saxon A. Nelson-Milton <saxon@ausocean.org>
|
|
|
|
|
|
|
|
LICENSE
|
|
|
|
mtsSender_test.go is Copyright (C) 2017-2019 the Australian Ocean Lab (AusOcean)
|
|
|
|
|
|
|
|
It is free software: you can redistribute it and/or modify them
|
|
|
|
under the terms of the GNU General Public License as published by the
|
|
|
|
Free Software Foundation, either version 3 of the License, or (at your
|
|
|
|
option) any later version.
|
|
|
|
|
|
|
|
It is distributed in the hope that it will be useful, but WITHOUT
|
|
|
|
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
|
|
for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License
|
2019-03-01 02:32:43 +03:00
|
|
|
in gpl.txt. If not, see http://www.gnu.org/licenses.
|
2019-02-15 18:10:35 +03:00
|
|
|
*/
|
|
|
|
package revid
|
|
|
|
|
2019-02-16 09:03:39 +03:00
|
|
|
import (
|
2019-02-16 20:05:59 +03:00
|
|
|
"errors"
|
2019-02-16 09:03:39 +03:00
|
|
|
"fmt"
|
2019-03-12 15:46:32 +03:00
|
|
|
"reflect"
|
2019-03-12 10:58:20 +03:00
|
|
|
"sync"
|
2019-02-16 09:03:39 +03:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2019-03-01 02:33:39 +03:00
|
|
|
"github.com/Comcast/gots/packet"
|
|
|
|
"github.com/Comcast/gots/pes"
|
|
|
|
|
2019-02-16 09:03:39 +03:00
|
|
|
"bitbucket.org/ausocean/av/stream/mts"
|
2019-02-16 16:22:40 +03:00
|
|
|
"bitbucket.org/ausocean/av/stream/mts/meta"
|
2019-02-16 09:03:39 +03:00
|
|
|
"bitbucket.org/ausocean/utils/logger"
|
|
|
|
"bitbucket.org/ausocean/utils/ring"
|
|
|
|
)
|
|
|
|
|
|
|
|
// Ring buffer sizes and read/write timeouts.
|
|
|
|
const (
|
|
|
|
rbSize = 100
|
|
|
|
rbElementSize = 150000
|
|
|
|
wTimeout = 10 * time.Millisecond
|
|
|
|
rTimeout = 10 * time.Millisecond
|
|
|
|
)
|
|
|
|
|
2019-03-12 09:38:36 +03:00
|
|
|
var (
|
|
|
|
errSendFailed = errors.New("send failed")
|
|
|
|
)
|
|
|
|
|
2019-03-01 05:58:34 +03:00
|
|
|
// sender simulates sending of video data, creating discontinuities if
|
|
|
|
// testDiscontinuities is set to true.
|
2019-03-01 02:46:54 +03:00
|
|
|
type sender struct {
|
2019-03-01 03:47:40 +03:00
|
|
|
buf [][]byte
|
|
|
|
testDiscontinuities bool
|
|
|
|
discontinuityAt int
|
|
|
|
currentPkt int
|
2019-02-16 09:03:39 +03:00
|
|
|
}
|
|
|
|
|
2019-03-01 05:58:34 +03:00
|
|
|
// send takes d and neglects if testDiscontinuities is true, returning an error,
|
|
|
|
// otherwise d is appended to senders buf.
|
2019-03-01 02:46:54 +03:00
|
|
|
func (ts *sender) send(d []byte) error {
|
2019-03-01 03:47:40 +03:00
|
|
|
if ts.testDiscontinuities && ts.currentPkt == ts.discontinuityAt {
|
|
|
|
ts.currentPkt++
|
2019-03-12 09:38:36 +03:00
|
|
|
return errSendFailed
|
2019-02-16 17:01:30 +03:00
|
|
|
}
|
2019-02-16 20:05:59 +03:00
|
|
|
cpy := make([]byte, len(d))
|
|
|
|
copy(cpy, d)
|
2019-03-01 03:41:20 +03:00
|
|
|
ts.buf = append(ts.buf, cpy)
|
2019-03-01 03:47:40 +03:00
|
|
|
ts.currentPkt++
|
2019-02-16 09:03:39 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-03-01 05:58:34 +03:00
|
|
|
// log implements the required logging func for some of the structs in use
|
|
|
|
// within tests.
|
2019-02-16 09:03:39 +03:00
|
|
|
func log(lvl int8, msg string, args ...interface{}) {
|
|
|
|
var l string
|
|
|
|
switch lvl {
|
|
|
|
case logger.Warning:
|
|
|
|
l = "warning"
|
|
|
|
case logger.Debug:
|
|
|
|
l = "debug"
|
|
|
|
case logger.Info:
|
|
|
|
l = "info"
|
|
|
|
case logger.Error:
|
|
|
|
l = "error"
|
|
|
|
case logger.Fatal:
|
|
|
|
l = "fatal"
|
|
|
|
}
|
|
|
|
msg = l + ": " + msg
|
|
|
|
for i := 0; i < len(args); i++ {
|
|
|
|
msg += " %v"
|
|
|
|
}
|
|
|
|
fmt.Printf(msg, args)
|
|
|
|
}
|
|
|
|
|
2019-03-01 05:58:34 +03:00
|
|
|
// TestSegment ensures that the mtsSender correctly segments data into clips
|
|
|
|
// based on positioning of PSI in the mtsEncoder's output stream.
|
2019-03-12 08:52:11 +03:00
|
|
|
func TestMtsSenderSegment(t *testing.T) {
|
2019-02-16 16:22:40 +03:00
|
|
|
mts.Meta = meta.New()
|
2019-03-01 07:45:44 +03:00
|
|
|
|
2019-03-01 05:58:34 +03:00
|
|
|
// Create ringBuffer, sender, loadsender and the MPEGTS encoder.
|
2019-03-01 02:46:54 +03:00
|
|
|
tstSender := &sender{}
|
2019-03-12 13:23:27 +03:00
|
|
|
loadSender := newMtsSender(tstSender, false, log)
|
2019-03-01 07:58:40 +03:00
|
|
|
rb := ring.NewBuffer(rbSize, rbElementSize, wTimeout)
|
2019-03-02 13:07:04 +03:00
|
|
|
encoder := mts.NewEncoder((*buffer)(rb), 25)
|
2019-02-16 09:03:39 +03:00
|
|
|
|
2019-03-01 05:58:34 +03:00
|
|
|
// Turn time based PSI writing off for encoder.
|
2019-02-16 09:03:39 +03:00
|
|
|
const psiSendCount = 10
|
|
|
|
encoder.TimeBasedPsi(false, psiSendCount)
|
|
|
|
|
|
|
|
const noOfPacketsToWrite = 100
|
|
|
|
for i := 0; i < noOfPacketsToWrite; i++ {
|
2019-03-01 05:58:34 +03:00
|
|
|
// Insert a payload so that we check that the segmentation works correctly
|
2019-03-01 07:18:26 +03:00
|
|
|
// in this regard. Packet number will be used.
|
2019-03-10 05:30:58 +03:00
|
|
|
encoder.Write([]byte{byte(i)})
|
2019-02-16 16:22:40 +03:00
|
|
|
rb.Flush()
|
2019-02-16 09:03:39 +03:00
|
|
|
|
2019-02-16 20:05:59 +03:00
|
|
|
for {
|
|
|
|
next, err := rb.Next(rTimeout)
|
|
|
|
if err != nil {
|
|
|
|
break
|
|
|
|
}
|
2019-02-16 09:03:39 +03:00
|
|
|
|
2019-03-09 07:58:07 +03:00
|
|
|
err = loadSender.load(next.Bytes())
|
2019-02-16 20:05:59 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Unexpected err: %v\n", err)
|
|
|
|
}
|
2019-02-16 09:03:39 +03:00
|
|
|
|
2019-02-16 20:05:59 +03:00
|
|
|
err = loadSender.send()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Unexpected err: %v\n", err)
|
|
|
|
}
|
|
|
|
loadSender.release()
|
2019-03-10 08:26:56 +03:00
|
|
|
next.Close()
|
|
|
|
next = nil
|
2019-02-16 20:05:59 +03:00
|
|
|
}
|
2019-02-16 09:03:39 +03:00
|
|
|
}
|
|
|
|
|
2019-03-01 03:41:20 +03:00
|
|
|
result := tstSender.buf
|
2019-02-16 20:05:59 +03:00
|
|
|
expectData := 0
|
2019-02-16 09:03:39 +03:00
|
|
|
for clipNo, clip := range result {
|
2019-02-16 22:02:44 +03:00
|
|
|
t.Logf("Checking clip: %v\n", clipNo)
|
2019-03-01 07:45:44 +03:00
|
|
|
|
2019-03-01 05:58:34 +03:00
|
|
|
// Check that the clip is of expected length.
|
2019-02-16 09:03:39 +03:00
|
|
|
clipLen := len(clip)
|
2019-02-16 20:05:59 +03:00
|
|
|
if clipLen != psiSendCount*mts.PacketSize {
|
2019-02-16 16:22:40 +03:00
|
|
|
t.Fatalf("Clip %v is not correct length. Got: %v Want: %v\n Clip: %v\n", clipNo, clipLen, psiSendCount*mts.PacketSize, clip)
|
2019-02-16 09:03:39 +03:00
|
|
|
}
|
|
|
|
|
2019-03-01 05:58:34 +03:00
|
|
|
// Also check that the first packet is a PAT.
|
2019-02-16 09:03:39 +03:00
|
|
|
firstPkt := clip[:mts.PacketSize]
|
2019-03-01 07:18:26 +03:00
|
|
|
var pkt packet.Packet
|
2019-02-16 09:03:39 +03:00
|
|
|
copy(pkt[:], firstPkt)
|
2019-03-01 07:18:26 +03:00
|
|
|
pid := pkt.PID()
|
2019-02-16 09:03:39 +03:00
|
|
|
if pid != mts.PatPid {
|
2019-02-16 17:01:30 +03:00
|
|
|
t.Fatalf("First packet of clip %v is not pat, but rather: %v\n", clipNo, pid)
|
|
|
|
}
|
|
|
|
|
2019-03-01 05:58:34 +03:00
|
|
|
// Check that the clip data is okay.
|
2019-02-16 17:01:30 +03:00
|
|
|
for i := 0; i < len(clip); i += mts.PacketSize {
|
2019-03-01 07:58:40 +03:00
|
|
|
copy(pkt[:], clip[i:i+mts.PacketSize])
|
|
|
|
if pkt.PID() == mts.VideoPid {
|
2019-03-01 07:18:26 +03:00
|
|
|
payload, err := pkt.Payload()
|
2019-02-16 17:01:30 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Unexpected err: %v\n", err)
|
|
|
|
}
|
|
|
|
|
2019-03-01 05:58:34 +03:00
|
|
|
// Parse PES from the MTS payload.
|
2019-02-16 17:01:30 +03:00
|
|
|
pes, err := pes.NewPESHeader(payload)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Unexpected err: %v\n", err)
|
|
|
|
}
|
|
|
|
|
2019-03-01 05:58:34 +03:00
|
|
|
// Get the data from the PES packet and convert to an int.
|
2019-02-16 20:05:59 +03:00
|
|
|
data := int8(pes.Data()[0])
|
2019-02-16 17:01:30 +03:00
|
|
|
|
2019-03-01 05:58:34 +03:00
|
|
|
// Calc expected data in the PES and then check.
|
2019-02-16 20:05:59 +03:00
|
|
|
if data != int8(expectData) {
|
|
|
|
t.Errorf("Did not get expected pkt data. ClipNo: %v, pktNoInClip: %v, Got: %v, want: %v\n", clipNo, i/mts.PacketSize, data, expectData)
|
2019-02-16 17:01:30 +03:00
|
|
|
}
|
2019-02-16 20:05:59 +03:00
|
|
|
expectData++
|
2019-02-16 17:01:30 +03:00
|
|
|
}
|
2019-02-16 09:03:39 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-02-15 18:10:35 +03:00
|
|
|
|
2019-03-12 08:52:11 +03:00
|
|
|
func TestMtsSenderDiscontinuity(t *testing.T) {
|
2019-02-16 17:01:30 +03:00
|
|
|
mts.Meta = meta.New()
|
2019-03-01 07:45:44 +03:00
|
|
|
|
2019-03-01 05:58:34 +03:00
|
|
|
// Create ringBuffer sender, loadSender and the MPEGTS encoder.
|
2019-03-01 06:05:22 +03:00
|
|
|
const clipWithDiscontinuity = 3
|
|
|
|
tstSender := &sender{testDiscontinuities: true, discontinuityAt: clipWithDiscontinuity}
|
2019-03-12 13:23:27 +03:00
|
|
|
loadSender := newMtsSender(tstSender, false, log)
|
2019-03-01 07:58:40 +03:00
|
|
|
rb := ring.NewBuffer(rbSize, rbElementSize, wTimeout)
|
2019-03-02 13:07:04 +03:00
|
|
|
encoder := mts.NewEncoder((*buffer)(rb), 25)
|
2019-02-16 17:01:30 +03:00
|
|
|
|
2019-03-01 05:58:34 +03:00
|
|
|
// Turn time based PSI writing off for encoder.
|
2019-02-16 17:01:30 +03:00
|
|
|
const psiSendCount = 10
|
|
|
|
encoder.TimeBasedPsi(false, psiSendCount)
|
|
|
|
|
|
|
|
const noOfPacketsToWrite = 100
|
|
|
|
for i := 0; i < noOfPacketsToWrite; i++ {
|
2019-03-01 08:05:54 +03:00
|
|
|
// Our payload will just be packet number.
|
2019-03-10 05:30:58 +03:00
|
|
|
encoder.Write([]byte{byte(i)})
|
2019-02-16 17:01:30 +03:00
|
|
|
rb.Flush()
|
|
|
|
|
2019-02-16 22:02:44 +03:00
|
|
|
for {
|
|
|
|
next, err := rb.Next(rTimeout)
|
|
|
|
if err != nil {
|
|
|
|
break
|
|
|
|
}
|
2019-02-16 17:01:30 +03:00
|
|
|
|
2019-03-09 07:58:07 +03:00
|
|
|
err = loadSender.load(next.Bytes())
|
2019-02-16 22:02:44 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Unexpected err: %v\n", err)
|
|
|
|
}
|
2019-02-16 17:01:30 +03:00
|
|
|
|
2019-03-01 06:02:58 +03:00
|
|
|
loadSender.send()
|
2019-02-16 22:02:44 +03:00
|
|
|
loadSender.release()
|
2019-03-10 08:26:56 +03:00
|
|
|
next.Close()
|
|
|
|
next = nil
|
2019-02-16 22:02:44 +03:00
|
|
|
}
|
2019-02-16 17:01:30 +03:00
|
|
|
}
|
|
|
|
|
2019-03-01 03:41:20 +03:00
|
|
|
result := tstSender.buf
|
2019-02-16 17:01:30 +03:00
|
|
|
|
2019-03-01 05:58:34 +03:00
|
|
|
// First check that we have less clips as expected.
|
2019-02-16 22:02:44 +03:00
|
|
|
expectedLen := (((noOfPacketsToWrite/psiSendCount)*2 + noOfPacketsToWrite) / psiSendCount) - 1
|
2019-02-16 17:01:30 +03:00
|
|
|
gotLen := len(result)
|
|
|
|
if gotLen != expectedLen {
|
2019-02-16 20:05:59 +03:00
|
|
|
t.Errorf("We don't have one less clip as we should. Got: %v, want: %v\n", gotLen, expectedLen)
|
2019-02-16 17:01:30 +03:00
|
|
|
}
|
2019-03-01 07:45:44 +03:00
|
|
|
|
2019-03-01 05:58:34 +03:00
|
|
|
// Now check that the discontinuity indicator is set at the discontinuityClip PAT.
|
2019-03-01 06:05:22 +03:00
|
|
|
disconClip := result[clipWithDiscontinuity]
|
2019-02-16 17:01:30 +03:00
|
|
|
firstPkt := disconClip[:mts.PacketSize]
|
2019-03-01 07:18:26 +03:00
|
|
|
var pkt packet.Packet
|
2019-02-16 17:01:30 +03:00
|
|
|
copy(pkt[:], firstPkt)
|
2019-03-01 07:18:26 +03:00
|
|
|
discon, err := (*packet.AdaptationField)(&pkt).Discontinuity()
|
2019-02-16 17:01:30 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Unexpected err: %v\n", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !discon {
|
|
|
|
t.Fatalf("Did not get discontinuity indicator for PAT")
|
|
|
|
}
|
2019-02-15 18:10:35 +03:00
|
|
|
}
|
2019-03-12 08:58:30 +03:00
|
|
|
|
|
|
|
// TestNewMultiSender checks that newMultiSender performs as expected when an
|
|
|
|
// active function is not provided, and when an active function is provided.
|
|
|
|
func TestNewMultiSender(t *testing.T) {
|
|
|
|
// First test without giving an 'active' function.
|
2019-03-12 13:23:27 +03:00
|
|
|
_, err := newMultiSender(nil, nil)
|
2019-03-12 08:58:30 +03:00
|
|
|
if err == nil {
|
2019-03-12 09:38:36 +03:00
|
|
|
t.Fatal("did not get expected error")
|
2019-03-12 08:58:30 +03:00
|
|
|
}
|
|
|
|
|
2019-03-12 09:38:36 +03:00
|
|
|
// Now test with providing an active function.
|
2019-03-12 13:23:27 +03:00
|
|
|
_, err = newMultiSender(nil, func() bool { return true })
|
2019-03-12 08:58:30 +03:00
|
|
|
if err != nil {
|
2019-03-12 09:38:36 +03:00
|
|
|
t.Fatalf("unespected error: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-12 09:49:47 +03:00
|
|
|
// dummyLoadSender is a loadSender implementation that allows us to simulate
|
|
|
|
// the behaviour of a loadSender and check that it performas as expected.
|
2019-03-12 09:38:36 +03:00
|
|
|
type dummyLoadSender struct {
|
|
|
|
data []byte
|
|
|
|
buf [][]byte
|
|
|
|
failOnSend bool
|
|
|
|
failHandled bool
|
2019-03-12 13:23:27 +03:00
|
|
|
retry bool
|
2019-03-12 16:25:18 +03:00
|
|
|
mu sync.Mutex
|
2019-03-12 09:38:36 +03:00
|
|
|
}
|
|
|
|
|
2019-03-12 09:49:47 +03:00
|
|
|
// newDummyLoadSender returns a pointer to a new dummyLoadSender.
|
2019-03-12 13:23:27 +03:00
|
|
|
func newDummyLoadSender(fail bool, retry bool) *dummyLoadSender {
|
|
|
|
return &dummyLoadSender{failOnSend: fail, failHandled: true, retry: retry}
|
2019-03-12 09:38:36 +03:00
|
|
|
}
|
|
|
|
|
2019-03-12 09:49:47 +03:00
|
|
|
// load takes a byte slice and assigns it to the dummyLoadSenders data slice.
|
2019-03-12 09:38:36 +03:00
|
|
|
func (s *dummyLoadSender) load(d []byte) error {
|
|
|
|
s.data = d
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-03-12 09:49:47 +03:00
|
|
|
// send will append to dummyLoadSender's buf slice, only if failOnSend is false.
|
|
|
|
// If failOnSend is set to true, we expect that data sent won't be written to
|
|
|
|
// the buf simulating a failed send.
|
2019-03-12 09:38:36 +03:00
|
|
|
func (s *dummyLoadSender) send() error {
|
2019-03-12 16:25:18 +03:00
|
|
|
if !s.getFailOnSend() {
|
2019-03-12 09:38:36 +03:00
|
|
|
s.buf = append(s.buf, s.data)
|
|
|
|
return nil
|
|
|
|
}
|
2019-03-12 15:46:32 +03:00
|
|
|
s.failHandled = false
|
2019-03-12 09:38:36 +03:00
|
|
|
return errSendFailed
|
|
|
|
}
|
|
|
|
|
2019-03-12 16:25:18 +03:00
|
|
|
func (s *dummyLoadSender) getFailOnSend() bool {
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
return s.failOnSend
|
|
|
|
}
|
|
|
|
|
2019-03-12 09:49:47 +03:00
|
|
|
// release sets dummyLoadSender's data slice to nil. data can be checked to see
|
|
|
|
// if release has been called at the right time.
|
2019-03-12 09:38:36 +03:00
|
|
|
func (s *dummyLoadSender) release() {
|
|
|
|
s.data = nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *dummyLoadSender) close() error { return nil }
|
|
|
|
|
2019-03-12 09:49:47 +03:00
|
|
|
// handleSendFail simply sets the failHandled flag to true. This can be checked
|
|
|
|
// to see if handleSendFail has been called by the multiSender at the right time.
|
2019-03-12 09:38:36 +03:00
|
|
|
func (s *dummyLoadSender) handleSendFail(err error) error {
|
|
|
|
s.failHandled = true
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-03-12 13:23:27 +03:00
|
|
|
func (s *dummyLoadSender) retrySend() bool { return s.retry }
|
|
|
|
|
2019-03-12 09:49:47 +03:00
|
|
|
// TestMultiSenderWrite checks that we can do basic writing to multiple senders
|
|
|
|
// using the multiSender.
|
2019-03-12 09:38:36 +03:00
|
|
|
func TestMultiSenderWrite(t *testing.T) {
|
|
|
|
senders := []loadSender{
|
2019-03-12 13:23:27 +03:00
|
|
|
newDummyLoadSender(false, false),
|
|
|
|
newDummyLoadSender(false, false),
|
|
|
|
newDummyLoadSender(false, false),
|
2019-03-12 09:38:36 +03:00
|
|
|
}
|
2019-03-12 13:23:27 +03:00
|
|
|
ms, err := newMultiSender(senders, func() bool { return true })
|
2019-03-12 09:38:36 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Unexpected error: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Perform some multiSender writes.
|
|
|
|
const noOfWrites = 5
|
|
|
|
for i := byte(0); i < noOfWrites; i++ {
|
|
|
|
ms.Write([]byte{i})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that the senders got the data correctly from the writes.
|
|
|
|
for i := byte(0); i < noOfWrites; i++ {
|
|
|
|
for j, dest := range ms.senders {
|
|
|
|
got := dest.(*dummyLoadSender).buf[i][0]
|
|
|
|
if got != i {
|
|
|
|
t.Errorf("Did not get expected result for sender: %v. \nGot: %v\nWant: %v\n", j, got, i)
|
|
|
|
}
|
|
|
|
}
|
2019-03-12 08:58:30 +03:00
|
|
|
}
|
|
|
|
}
|
2019-03-12 09:38:36 +03:00
|
|
|
|
2019-03-12 10:38:43 +03:00
|
|
|
// TestMultiSenderNotActiveNoRetry checks that if the active func passed to
|
|
|
|
// newMultiSender returns false before a write, or in the middle of write with
|
|
|
|
// retries, then we return from Write as expected.
|
|
|
|
func TestMultiSenderNotActiveNoRetry(t *testing.T) {
|
|
|
|
senders := []loadSender{
|
2019-03-12 13:23:27 +03:00
|
|
|
newDummyLoadSender(false, false),
|
|
|
|
newDummyLoadSender(false, false),
|
|
|
|
newDummyLoadSender(false, false),
|
2019-03-12 10:38:43 +03:00
|
|
|
}
|
2019-03-12 10:58:20 +03:00
|
|
|
|
|
|
|
// This will allow us to simulate a change in running state of
|
|
|
|
// multiSender's 'owner'.
|
2019-03-12 10:38:43 +03:00
|
|
|
active := true
|
|
|
|
activeFunc := func() bool {
|
|
|
|
return active
|
|
|
|
}
|
2019-03-12 10:58:20 +03:00
|
|
|
|
2019-03-12 13:23:27 +03:00
|
|
|
ms, err := newMultiSender(senders, activeFunc)
|
2019-03-12 10:38:43 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Unexpected error: %v", err)
|
|
|
|
}
|
|
|
|
|
2019-03-12 10:58:20 +03:00
|
|
|
// We will perform two writes. We expect the second write not to be complete,
|
|
|
|
// i.e. the senders should not send anything on this write.
|
2019-03-12 10:38:43 +03:00
|
|
|
ms.Write([]byte{0x00})
|
|
|
|
active = false
|
|
|
|
ms.Write([]byte{0x01})
|
|
|
|
|
2019-03-12 10:58:20 +03:00
|
|
|
// Check that the senders only sent data once.
|
2019-03-12 10:38:43 +03:00
|
|
|
for _, dest := range ms.senders {
|
|
|
|
if len(dest.(*dummyLoadSender).buf) != 1 {
|
|
|
|
t.Errorf("length of sender buf is not 1 as expected")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-12 10:58:20 +03:00
|
|
|
// TestMultiSenderNotActiveRetry checks that we correctly returns from a call to
|
|
|
|
// multiSender.Write when the active callback func return false during repeated
|
|
|
|
// send retries.
|
|
|
|
func TestMultiSenderNotActiveRetry(t *testing.T) {
|
|
|
|
senders := []loadSender{
|
2019-03-12 13:23:27 +03:00
|
|
|
newDummyLoadSender(false, false),
|
2019-03-12 10:58:20 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Active will simulate the running state of the multiSender's 'owner'.
|
|
|
|
active := true
|
|
|
|
|
|
|
|
// We will run the ms.Write as routine so we need some sync.
|
|
|
|
var mu sync.Mutex
|
|
|
|
|
|
|
|
// Once we use setActive to change the state of the fake owner, this will
|
|
|
|
// return false and we expect the ms.Write method to return from the continous
|
|
|
|
// send retry state.
|
|
|
|
activeFunc := func() bool {
|
|
|
|
mu.Lock()
|
|
|
|
defer mu.Unlock()
|
|
|
|
return active
|
|
|
|
}
|
|
|
|
|
2019-03-12 13:23:27 +03:00
|
|
|
ms, err := newMultiSender(senders, activeFunc)
|
2019-03-12 10:58:20 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Unexpected error: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We run this in background so that we can change running state during the
|
|
|
|
// the write. We then expect done to be true after some period of time.
|
|
|
|
done := false
|
|
|
|
go func() {
|
|
|
|
ms.Write([]byte{0x00})
|
|
|
|
done = true
|
|
|
|
}()
|
|
|
|
|
|
|
|
// Wait for half a second and then change the active state.
|
|
|
|
time.Sleep(500 * time.Millisecond)
|
2019-03-12 16:25:18 +03:00
|
|
|
mu.Lock()
|
|
|
|
active = false
|
|
|
|
mu.Unlock()
|
2019-03-12 10:58:20 +03:00
|
|
|
|
|
|
|
// Wait half a second for the routine to return and check that done is true.
|
|
|
|
time.Sleep(500 * time.Millisecond)
|
|
|
|
if !done {
|
|
|
|
t.Fatal("multiSender.Write did not return as expected with active=false")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-12 15:46:32 +03:00
|
|
|
// TestMultiSenderFailNoRetry checks that behaviour is as expected when a sender
|
|
|
|
// fails at a send and does not retry.
|
|
|
|
func TestMultiSenderFailNoRetry(t *testing.T) {
|
|
|
|
senders := []loadSender{
|
|
|
|
newDummyLoadSender(false, false),
|
|
|
|
newDummyLoadSender(false, false),
|
|
|
|
newDummyLoadSender(false, false),
|
|
|
|
}
|
|
|
|
|
|
|
|
ms, err := newMultiSender(senders, func() bool { return true })
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Unexpected error: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We will perform two writes. We expect the second write not to be complete,
|
|
|
|
// i.e. the senders should not send anything on this write.
|
|
|
|
ms.Write([]byte{0x00})
|
|
|
|
|
|
|
|
// Make second sender fail a send.
|
|
|
|
const failedSenderIdx = 1
|
|
|
|
failedSender := ms.senders[failedSenderIdx].(*dummyLoadSender)
|
|
|
|
failedSender.failOnSend = true
|
|
|
|
ms.Write([]byte{0x01})
|
|
|
|
|
|
|
|
// Check that handleSendFail was called.
|
|
|
|
if !failedSender.failHandled {
|
|
|
|
t.Fatal("the failed send was not handled")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now for next send we don't want to fail.
|
|
|
|
failedSender.failOnSend = false
|
|
|
|
ms.Write([]byte{0x02})
|
|
|
|
|
|
|
|
// Check number of slices sent for each sender and also check data.
|
|
|
|
for i, sender := range ms.senders {
|
|
|
|
// First check number of slices sent for each sender.
|
|
|
|
wantLen := 3
|
|
|
|
if i == failedSenderIdx {
|
|
|
|
wantLen = 2
|
|
|
|
}
|
|
|
|
curSender := sender.(*dummyLoadSender)
|
|
|
|
gotLen := len(curSender.buf)
|
|
|
|
if gotLen != wantLen {
|
|
|
|
t.Errorf("len of sender that failed is not expected: \nGot: %v\nWant: %v\n", gotLen, wantLen)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now check the quality of the data.
|
|
|
|
wantData := [][]byte{{0x00}, {0x01}, {0x02}}
|
|
|
|
if i == failedSenderIdx {
|
|
|
|
wantData = [][]byte{{0x00}, {0x02}}
|
|
|
|
}
|
|
|
|
gotData := curSender.buf
|
|
|
|
if !reflect.DeepEqual(gotData, wantData) {
|
|
|
|
t.Errorf("unexpect data sent through sender idx: %v. \nGot: %v\nWant: %v\n", i, gotData, wantData)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-03-12 17:19:25 +03:00
|
|
|
|
|
|
|
// TestMultiSenderFailRetry checks that a if a sender is set to retry on failed
|
|
|
|
// sends, that it does so repeatedly until it can successfully send.
|
|
|
|
func TestMultiSenderFailRetry(t *testing.T) {
|
|
|
|
// NB: This is only being tested with one sender - this is AusOcean's use case.
|
|
|
|
senders := []loadSender{newDummyLoadSender(false, true)}
|
|
|
|
ms, err := newMultiSender(senders, func() bool { return true })
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Unexpected error: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Perform one write with successful send.
|
|
|
|
ms.Write([]byte{0x00})
|
|
|
|
|
|
|
|
// Now cause sender to fail on next write.
|
|
|
|
failedSender := ms.senders[0].(*dummyLoadSender)
|
|
|
|
failedSender.failOnSend = true
|
|
|
|
|
|
|
|
// Wrap next write in a routine. It will keep trying to send until we set
|
|
|
|
// failOnSend to false.
|
|
|
|
done := false
|
|
|
|
go func() {
|
|
|
|
ms.Write([]byte{0x01})
|
|
|
|
done = true
|
|
|
|
}()
|
|
|
|
|
|
|
|
// Now set failOnSend to false.
|
|
|
|
failedSender.mu.Lock()
|
|
|
|
failedSender.failOnSend = false
|
|
|
|
failedSender.mu.Unlock()
|
|
|
|
|
|
|
|
// Sleep and then check that we've successfully returned from the write.
|
|
|
|
time.Sleep(10 * time.Millisecond)
|
|
|
|
if done != true {
|
|
|
|
t.Fatal("did not exit write when send was successful")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write on last time.
|
|
|
|
ms.Write([]byte{0x02})
|
|
|
|
|
|
|
|
// Check that all the data is there.
|
|
|
|
got := failedSender.buf
|
|
|
|
want := [][]byte{{0x00}, {0x01}, {0x02}}
|
|
|
|
if !reflect.DeepEqual(got, want) {
|
|
|
|
t.Errorf("sender did not send expected data. \nGot: %v\nWant: %v\n", got, want)
|
|
|
|
}
|
|
|
|
}
|