More graceful Tile38 shutdown

This commit is contained in:
tidwall 2022-09-25 06:28:17 -07:00
parent f2c3b3924a
commit 906824323b
19 changed files with 298 additions and 111 deletions

View File

@ -27,13 +27,21 @@ func (conn *AMQPConn) Expired() bool {
defer conn.mu.Unlock() defer conn.mu.Unlock()
if !conn.ex { if !conn.ex {
if time.Since(conn.t) > amqpExpiresAfter { if time.Since(conn.t) > amqpExpiresAfter {
conn.ex = true
conn.close() conn.close()
conn.ex = true
} }
} }
return conn.ex return conn.ex
} }
// ExpireNow forces the connection to expire
func (conn *AMQPConn) ExpireNow() {
conn.mu.Lock()
defer conn.mu.Unlock()
conn.close()
conn.ex = true
}
func (conn *AMQPConn) close() { func (conn *AMQPConn) close() {
if conn.conn != nil { if conn.conn != nil {
conn.conn.Close() conn.conn.Close()

View File

@ -33,15 +33,21 @@ func (conn *DisqueConn) Expired() bool {
defer conn.mu.Unlock() defer conn.mu.Unlock()
if !conn.ex { if !conn.ex {
if time.Since(conn.t) > disqueExpiresAfter { if time.Since(conn.t) > disqueExpiresAfter {
if conn.conn != nil {
conn.close() conn.close()
}
conn.ex = true conn.ex = true
} }
} }
return conn.ex return conn.ex
} }
// ExpireNow forces the connection to expire
func (conn *DisqueConn) ExpireNow() {
conn.mu.Lock()
defer conn.mu.Unlock()
conn.close()
conn.ex = true
}
func (conn *DisqueConn) close() { func (conn *DisqueConn) close() {
if conn.conn != nil { if conn.conn != nil {
conn.conn.Close() conn.conn.Close()

View File

@ -6,6 +6,7 @@ import (
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
"sync/atomic"
"time" "time"
"github.com/streadway/amqp" "github.com/streadway/amqp"
@ -136,6 +137,7 @@ type Endpoint struct {
// Conn is an endpoint connection // Conn is an endpoint connection
type Conn interface { type Conn interface {
ExpireNow()
Expired() bool Expired() bool
Send(val string) error Send(val string) error
} }
@ -145,6 +147,8 @@ type Manager struct {
mu sync.RWMutex mu sync.RWMutex
conns map[string]Conn conns map[string]Conn
publisher LocalPublisher publisher LocalPublisher
shutdown int32 // atomic bool
wg sync.WaitGroup // run wait group
} }
// NewManager returns a new manager // NewManager returns a new manager
@ -153,13 +157,29 @@ func NewManager(publisher LocalPublisher) *Manager {
conns: make(map[string]Conn), conns: make(map[string]Conn),
publisher: publisher, publisher: publisher,
} }
go epc.Run() epc.wg.Add(1)
go epc.run()
return epc return epc
} }
func (epc *Manager) Shutdown() {
defer epc.wg.Wait()
atomic.StoreInt32(&epc.shutdown, 1)
// expire the connections
epc.mu.Lock()
defer epc.mu.Unlock()
for _, conn := range epc.conns {
conn.ExpireNow()
}
}
// Run starts the managing of endpoints // Run starts the managing of endpoints
func (epc *Manager) Run() { func (epc *Manager) run() {
defer epc.wg.Done()
for { for {
if atomic.LoadInt32(&epc.shutdown) != 0 {
return
}
time.Sleep(time.Second) time.Sleep(time.Second)
func() { func() {
epc.mu.Lock() epc.mu.Lock()

View File

@ -28,6 +28,10 @@ func (conn *EvenHubConn) Expired() bool {
return false return false
} }
// ExpireNow forces the connection to expire
func (conn *EvenHubConn) ExpireNow() {
}
// Send sends a message // Send sends a message
func (conn *EvenHubConn) Send(msg string) error { func (conn *EvenHubConn) Send(msg string) error {
hub, err := eventhub.NewHubFromConnectionString(conn.ep.EventHub.ConnectionString) hub, err := eventhub.NewHubFromConnectionString(conn.ep.EventHub.ConnectionString)

View File

@ -36,14 +36,21 @@ func (conn *GRPCConn) Expired() bool {
defer conn.mu.Unlock() defer conn.mu.Unlock()
if !conn.ex { if !conn.ex {
if time.Since(conn.t) > grpcExpiresAfter { if time.Since(conn.t) > grpcExpiresAfter {
if conn.conn != nil {
conn.close() conn.close()
}
conn.ex = true conn.ex = true
} }
} }
return conn.ex return conn.ex
} }
// ExpireNow forces the connection to expire
func (conn *GRPCConn) ExpireNow() {
conn.mu.Lock()
defer conn.mu.Unlock()
conn.close()
conn.ex = true
}
func (conn *GRPCConn) close() { func (conn *GRPCConn) close() {
if conn.conn != nil { if conn.conn != nil {
conn.conn.Close() conn.conn.Close()

View File

@ -38,6 +38,10 @@ func (conn *HTTPConn) Expired() bool {
return false return false
} }
// ExpireNow forces the connection to expire
func (conn *HTTPConn) ExpireNow() {
}
// Send sends a message // Send sends a message
func (conn *HTTPConn) Send(msg string) error { func (conn *HTTPConn) Send(msg string) error {
req, err := http.NewRequest("POST", conn.ep.Original, bytes.NewBufferString(msg)) req, err := http.NewRequest("POST", conn.ep.Original, bytes.NewBufferString(msg))

View File

@ -34,15 +34,21 @@ func (conn *KafkaConn) Expired() bool {
defer conn.mu.Unlock() defer conn.mu.Unlock()
if !conn.ex { if !conn.ex {
if time.Since(conn.t) > kafkaExpiresAfter { if time.Since(conn.t) > kafkaExpiresAfter {
if conn.conn != nil {
conn.close() conn.close()
}
conn.ex = true conn.ex = true
} }
} }
return conn.ex return conn.ex
} }
// ExpireNow forces the connection to expire
func (conn *KafkaConn) ExpireNow() {
conn.mu.Lock()
defer conn.mu.Unlock()
conn.close()
conn.ex = true
}
func (conn *KafkaConn) close() { func (conn *KafkaConn) close() {
if conn.conn != nil { if conn.conn != nil {
conn.conn.Close() conn.conn.Close()

View File

@ -23,6 +23,10 @@ func (conn *LocalConn) Expired() bool {
return false return false
} }
// ExpireNow forces the connection to expire
func (conn *LocalConn) ExpireNow() {
}
// Send sends a message // Send sends a message
func (conn *LocalConn) Send(msg string) error { func (conn *LocalConn) Send(msg string) error {
conn.publisher.Publish(conn.ep.Local.Channel, msg) conn.publisher.Publish(conn.ep.Local.Channel, msg)

View File

@ -40,12 +40,19 @@ func (conn *MQTTConn) Expired() bool {
return conn.ex return conn.ex
} }
// ExpireNow forces the connection to expire
func (conn *MQTTConn) ExpireNow() {
conn.mu.Lock()
defer conn.mu.Unlock()
conn.close()
conn.ex = true
}
func (conn *MQTTConn) close() { func (conn *MQTTConn) close() {
if conn.conn != nil { if conn.conn != nil {
if conn.conn.IsConnected() { if conn.conn.IsConnected() {
conn.conn.Disconnect(250) conn.conn.Disconnect(250)
} }
conn.conn = nil conn.conn = nil
} }
} }

View File

@ -32,15 +32,21 @@ func (conn *NATSConn) Expired() bool {
defer conn.mu.Unlock() defer conn.mu.Unlock()
if !conn.ex { if !conn.ex {
if time.Since(conn.t) > natsExpiresAfter { if time.Since(conn.t) > natsExpiresAfter {
if conn.conn != nil {
conn.close() conn.close()
}
conn.ex = true conn.ex = true
} }
} }
return conn.ex return conn.ex
} }
// ExpireNow forces the connection to expire
func (conn *NATSConn) ExpireNow() {
conn.mu.Lock()
defer conn.mu.Unlock()
conn.close()
conn.ex = true
}
func (conn *NATSConn) close() { func (conn *NATSConn) close() {
if conn.conn != nil { if conn.conn != nil {
conn.conn.Close() conn.conn.Close()

View File

@ -83,13 +83,21 @@ func (conn *PubSubConn) Expired() bool {
defer conn.mu.Unlock() defer conn.mu.Unlock()
if !conn.ex { if !conn.ex {
if time.Since(conn.t) > pubsubExpiresAfter { if time.Since(conn.t) > pubsubExpiresAfter {
conn.ex = true
conn.close() conn.close()
conn.ex = true
} }
} }
return conn.ex return conn.ex
} }
// ExpireNow forces the connection to expire
func (conn *PubSubConn) ExpireNow() {
conn.mu.Lock()
defer conn.mu.Unlock()
conn.close()
conn.ex = true
}
func newPubSubConn(ep Endpoint) *PubSubConn { func newPubSubConn(ep Endpoint) *PubSubConn {
return &PubSubConn{ return &PubSubConn{
ep: ep, ep: ep,

View File

@ -32,15 +32,21 @@ func (conn *RedisConn) Expired() bool {
defer conn.mu.Unlock() defer conn.mu.Unlock()
if !conn.ex { if !conn.ex {
if time.Since(conn.t) > redisExpiresAfter { if time.Since(conn.t) > redisExpiresAfter {
if conn.conn != nil {
conn.close() conn.close()
}
conn.ex = true conn.ex = true
} }
} }
return conn.ex return conn.ex
} }
// ExpireNow forces the connection to expire
func (conn *RedisConn) ExpireNow() {
conn.mu.Lock()
defer conn.mu.Unlock()
conn.close()
conn.ex = true
}
func (conn *RedisConn) close() { func (conn *RedisConn) close() {
if conn.conn != nil { if conn.conn != nil {
conn.conn.Close() conn.conn.Close()

View File

@ -39,13 +39,21 @@ func (conn *SQSConn) Expired() bool {
defer conn.mu.Unlock() defer conn.mu.Unlock()
if !conn.ex { if !conn.ex {
if time.Since(conn.t) > sqsExpiresAfter { if time.Since(conn.t) > sqsExpiresAfter {
conn.ex = true
conn.close() conn.close()
conn.ex = true
} }
} }
return conn.ex return conn.ex
} }
// ExpireNow forces the connection to expire
func (conn *SQSConn) ExpireNow() {
conn.mu.Lock()
defer conn.mu.Unlock()
conn.close()
conn.ex = true
}
func (conn *SQSConn) close() { func (conn *SQSConn) close() {
if conn.svc != nil { if conn.svc != nil {
conn.svc = nil conn.svc = nil

View File

@ -1,6 +1,7 @@
package server package server
import ( import (
"sync"
"time" "time"
"github.com/tidwall/tile38/internal/collection" "github.com/tidwall/tile38/internal/collection"
@ -12,20 +13,15 @@ const bgExpireDelay = time.Second / 10
// backgroundExpiring deletes expired items from the database. // backgroundExpiring deletes expired items from the database.
// It's executes every 1/10 of a second. // It's executes every 1/10 of a second.
func (s *Server) backgroundExpiring() { func (s *Server) backgroundExpiring(wg *sync.WaitGroup) {
for { defer wg.Done()
if s.stopServer.on() { s.loopUntilServerStops(bgExpireDelay, func() {
return
}
func() {
s.mu.Lock() s.mu.Lock()
defer s.mu.Unlock() defer s.mu.Unlock()
now := time.Now() now := time.Now()
s.backgroundExpireObjects(now) s.backgroundExpireObjects(now)
s.backgroundExpireHooks(now) s.backgroundExpireHooks(now)
}() })
time.Sleep(bgExpireDelay)
}
} }
func (s *Server) backgroundExpireObjects(now time.Time) { func (s *Server) backgroundExpireObjects(now time.Time) {

View File

@ -21,10 +21,16 @@ type liveBuffer struct {
cond *sync.Cond cond *sync.Cond
} }
func (s *Server) processLives() { func (s *Server) processLives(wg *sync.WaitGroup) {
defer s.lwait.Done() defer wg.Done()
var done abool
wg.Add(1)
go func() { go func() {
defer wg.Done()
for { for {
if done.on() {
break
}
s.lcond.Broadcast() s.lcond.Broadcast()
time.Sleep(time.Second / 4) time.Sleep(time.Second / 4)
} }
@ -33,6 +39,7 @@ func (s *Server) processLives() {
defer s.lcond.L.Unlock() defer s.lcond.L.Unlock()
for { for {
if s.stopServer.on() { if s.stopServer.on() {
done.set(true)
return return
} }
for len(s.lstack) > 0 { for len(s.lstack) > 0 {

View File

@ -79,6 +79,7 @@ type Server struct {
started time.Time started time.Time
config *Config config *Config
epc *endpoint.Manager epc *endpoint.Manager
ln net.Listener // server listener
// env opts // env opts
geomParseOpts geojson.ParseOptions geomParseOpts geojson.ParseOptions
@ -114,7 +115,6 @@ type Server struct {
lstack []*commandDetails lstack []*commandDetails
lives map[*liveBuffer]bool lives map[*liveBuffer]bool
lcond *sync.Cond lcond *sync.Cond
lwait sync.WaitGroup
fcup bool // follow caught up fcup bool // follow caught up
fcuponce bool // follow caught up once fcuponce bool // follow caught up once
shrinking bool // aof shrinking flag shrinking bool // aof shrinking flag
@ -165,6 +165,9 @@ type Options struct {
// QueueFileName allows for custom queue.db file path // QueueFileName allows for custom queue.db file path
QueueFileName string QueueFileName string
// Shutdown allows for shutting down the server.
Shutdown <-chan bool
} }
// Serve starts a new tile38 server // Serve starts a new tile38 server
@ -180,6 +183,15 @@ func Serve(opts Options) error {
} }
log.Infof("Server started, Tile38 version %s, git %s", core.Version, core.GitSHA) log.Infof("Server started, Tile38 version %s, git %s", core.Version, core.GitSHA)
defer func() {
log.Warn("Server has shutdown, bye now")
if false {
// prints the stack, looking for running goroutines.
buf := make([]byte, 10000)
n := runtime.Stack(buf, true)
println(string(buf[:n]))
}
}()
// Initialize the s // Initialize the s
s := &Server{ s := &Server{
@ -210,6 +222,7 @@ func Serve(opts Options) error {
} }
s.epc = endpoint.NewManager(s) s.epc = endpoint.NewManager(s)
defer s.epc.Shutdown()
s.luascripts = s.newScriptMap() s.luascripts = s.newScriptMap()
s.luapool = s.newPool() s.luapool = s.newPool()
defer s.luapool.Shutdown() defer s.luapool.Shutdown()
@ -279,6 +292,13 @@ func Serve(opts Options) error {
nerr <- s.netServe() nerr <- s.netServe()
}() }()
go func() {
<-opts.Shutdown
s.stopServer.set(true)
log.Warnf("Shutting down...")
s.ln.Close()
}()
// Load the queue before the aof // Load the queue before the aof
qdb, err := buntdb.Open(opts.QueueFileName) qdb, err := buntdb.Open(opts.QueueFileName)
if err != nil { if err != nil {
@ -324,32 +344,60 @@ func Serve(opts Options) error {
} }
// Start background routines // Start background routines
if s.config.followHost() != "" { var bgwg sync.WaitGroup
go s.follow(s.config.followHost(), s.config.followPort(),
s.followc.get())
}
if opts.MetricsAddr != "" { if s.config.followHost() != "" {
log.Infof("Listening for metrics at: %s", opts.MetricsAddr) bgwg.Add(1)
go func() { go func() {
http.HandleFunc("/", s.MetricsIndexHandler) defer bgwg.Done()
http.HandleFunc("/metrics", s.MetricsHandler) s.follow(s.config.followHost(), s.config.followPort(),
log.Fatal(http.ListenAndServe(opts.MetricsAddr, nil)) s.followc.get())
}() }()
} }
s.lwait.Add(1) var mln net.Listener
go s.processLives() if opts.MetricsAddr != "" {
go s.watchOutOfMemory() log.Infof("Listening for metrics at: %s", opts.MetricsAddr)
go s.watchLuaStatePool() mln, err = net.Listen("tcp", opts.MetricsAddr)
go s.watchAutoGC() if err != nil {
go s.backgroundExpiring() return err
go s.backgroundSyncAOF() }
bgwg.Add(1)
go func() {
defer bgwg.Done()
smux := http.NewServeMux()
smux.HandleFunc("/", s.MetricsIndexHandler)
smux.HandleFunc("/metrics", s.MetricsHandler)
err := http.Serve(mln, smux)
if err != nil {
if !s.stopServer.on() {
log.Fatalf("metrics server: %s", err)
}
}
}()
}
bgwg.Add(1)
go s.processLives(&bgwg)
bgwg.Add(1)
go s.watchOutOfMemory(&bgwg)
bgwg.Add(1)
go s.watchLuaStatePool(&bgwg)
bgwg.Add(1)
go s.watchAutoGC(&bgwg)
bgwg.Add(1)
go s.backgroundExpiring(&bgwg)
bgwg.Add(1)
go s.backgroundSyncAOF(&bgwg)
defer func() { defer func() {
log.Debug("Stopping background routines")
// Stop background routines // Stop background routines
s.followc.add(1) // this will force any follow communication to die s.followc.add(1) // this will force any follow communication to die
s.stopServer.set(true) s.stopServer.set(true)
s.lwait.Wait() if mln != nil {
mln.Close() // Stop the metrics server
}
bgwg.Wait()
}() }()
// Server is now loaded and ready. Wait for network error messages. // Server is now loaded and ready. Wait for network error messages.
@ -384,16 +432,37 @@ func (s *Server) netServe() error {
if err != nil { if err != nil {
return err return err
} }
defer ln.Close()
var wg sync.WaitGroup
defer func() {
log.Debug("Closing client connections...")
s.connsmu.RLock()
for _, c := range s.conns {
c.closer.Close()
}
s.connsmu.RUnlock()
wg.Wait()
ln.Close()
log.Debug("Client connection closed")
}()
s.ln = ln
log.Infof("Ready to accept connections at %s", ln.Addr()) log.Infof("Ready to accept connections at %s", ln.Addr())
var clientID int64 var clientID int64
for { for {
conn, err := ln.Accept() conn, err := ln.Accept()
if err != nil { if err != nil {
return err if s.stopServer.on() {
return nil
} }
log.Warn(err)
time.Sleep(time.Second / 5)
continue
}
wg.Add(1)
go func(conn net.Conn) { go func(conn net.Conn) {
defer wg.Done()
// open connection // open connection
// create the client // create the client
client := new(Client) client := new(Client)
@ -617,20 +686,16 @@ func (conn *liveConn) SetWriteDeadline(deadline time.Time) error {
panic("not supported") panic("not supported")
} }
func (s *Server) watchAutoGC() { func (s *Server) watchAutoGC(wg *sync.WaitGroup) {
t := time.NewTicker(time.Second) defer wg.Done()
defer t.Stop()
start := time.Now() start := time.Now()
for range t.C { s.loopUntilServerStops(time.Second, func() {
if s.stopServer.on() {
return
}
autoGC := s.config.autoGC() autoGC := s.config.autoGC()
if autoGC == 0 { if autoGC == 0 {
continue return
} }
if time.Since(start) < time.Second*time.Duration(autoGC) { if time.Since(start) < time.Second*time.Duration(autoGC) {
continue return
} }
var mem1, mem2 runtime.MemStats var mem1, mem2 runtime.MemStats
runtime.ReadMemStats(&mem1) runtime.ReadMemStats(&mem1)
@ -645,7 +710,7 @@ func (s *Server) watchAutoGC() {
"alloc: %v, heap_alloc: %v, heap_released: %v", "alloc: %v, heap_alloc: %v, heap_released: %v",
mem2.Alloc, mem2.HeapAlloc, mem2.HeapReleased) mem2.Alloc, mem2.HeapAlloc, mem2.HeapReleased)
start = time.Now() start = time.Now()
} })
} }
func (s *Server) checkOutOfMemory() { func (s *Server) checkOutOfMemory() {
@ -667,38 +732,43 @@ func (s *Server) checkOutOfMemory() {
s.outOfMemory.set(int(mem.HeapAlloc) > s.config.maxMemory()) s.outOfMemory.set(int(mem.HeapAlloc) > s.config.maxMemory())
} }
func (s *Server) watchOutOfMemory() { func (s *Server) loopUntilServerStops(dur time.Duration, op func()) {
t := time.NewTicker(time.Second * 2) var last time.Time
defer t.Stop() for {
for range t.C {
s.checkOutOfMemory()
}
}
func (s *Server) watchLuaStatePool() {
t := time.NewTicker(time.Second * 10)
defer t.Stop()
for range t.C {
func() {
s.luapool.Prune()
}()
}
}
// backgroundSyncAOF ensures that the aof buffer is does not grow too big.
func (s *Server) backgroundSyncAOF() {
t := time.NewTicker(time.Second)
defer t.Stop()
for range t.C {
if s.stopServer.on() { if s.stopServer.on() {
return return
} }
func() { now := time.Now()
if now.Sub(last) > dur {
op()
last = now
}
time.Sleep(time.Second / 5)
}
}
func (s *Server) watchOutOfMemory(wg *sync.WaitGroup) {
defer wg.Done()
s.loopUntilServerStops(time.Second*4, func() {
s.checkOutOfMemory()
})
}
func (s *Server) watchLuaStatePool(wg *sync.WaitGroup) {
defer wg.Done()
s.loopUntilServerStops(time.Second*10, func() {
s.luapool.Prune()
})
}
// backgroundSyncAOF ensures that the aof buffer is does not grow too big.
func (s *Server) backgroundSyncAOF(wg *sync.WaitGroup) {
defer wg.Done()
s.loopUntilServerStops(time.Second, func() {
s.mu.Lock() s.mu.Lock()
defer s.mu.Unlock() defer s.mu.Unlock()
s.flushAOF(true) s.flushAOF(true)
}() })
}
} }
func isReservedFieldName(field string) bool { func isReservedFieldName(field string) bool {

View File

@ -9,9 +9,8 @@ cd tests
go test -coverpkg=../internal/server -coverprofile=/tmp/coverage.out $GOTEST go test -coverpkg=../internal/server -coverprofile=/tmp/coverage.out $GOTEST
# go test \ # go test -coverpkg=../internal/... -coverprofile=/tmp/coverage.out \
# -coverpkg=../internal/... -coverprofile=/tmp/coverage.out \ # -v ./... $GOTEST
# -v . -v ../... $GOTEST
go tool cover -html=/tmp/coverage.out -o /tmp/coverage.html go tool cover -html=/tmp/coverage.out -o /tmp/coverage.html
echo "details: file:///tmp/coverage.html" echo "details: file:///tmp/coverage.html"

View File

@ -39,6 +39,7 @@ type mockServer struct {
conn redis.Conn conn redis.Conn
ioJSON bool ioJSON bool
dir string dir string
shutdown chan bool
} }
func (mc *mockServer) readAOF() ([]byte, error) { func (mc *mockServer) readAOF() ([]byte, error) {
@ -71,8 +72,10 @@ func mockOpenServer(opts MockServerOptions) (*mockServer, error) {
logOutput := io.Discard logOutput := io.Discard
if os.Getenv("PRINTLOG") == "1" { if os.Getenv("PRINTLOG") == "1" {
logOutput = os.Stderr logOutput = os.Stderr
tlog.Level = 3
} }
s := &mockServer{port: port, dir: dir} shutdown := make(chan bool)
s := &mockServer{port: port, dir: dir, shutdown: shutdown}
tlog.SetOutput(logOutput) tlog.SetOutput(logOutput)
var ferrt int32 // atomic flag for when ferr has been set var ferrt int32 // atomic flag for when ferr has been set
var ferr error // ferr for when the server fails to start var ferr error // ferr for when the server fails to start
@ -84,11 +87,14 @@ func mockOpenServer(opts MockServerOptions) (*mockServer, error) {
UseHTTP: true, UseHTTP: true,
DevMode: true, DevMode: true,
AppendOnly: true, AppendOnly: true,
Shutdown: shutdown,
ShowDebugMessages: true,
} }
if opts.Metrics { if opts.Metrics {
sopts.MetricsAddr = ":4321" sopts.MetricsAddr = ":4321"
} }
if err := server.Serve(sopts); err != nil { err := server.Serve(sopts)
if err != nil {
ferr = err ferr = err
atomic.StoreInt32(&ferrt, 1) atomic.StoreInt32(&ferrt, 1)
} }
@ -133,6 +139,7 @@ func (s *mockServer) waitForStartup(ferr *error, ferrt *int32) error {
} }
func (mc *mockServer) Close() { func (mc *mockServer) Close() {
mc.shutdown <- true
if mc.conn != nil { if mc.conn != nil {
mc.conn.Close() mc.conn.Close()
} }

View File

@ -26,6 +26,18 @@ const (
white = "\x1b[37m" white = "\x1b[37m"
) )
// type mockTest struct {
// }
// func mockTestInit() *mockTest {
// mt := &mockTest{}
// return mt
// }
// func (mt *mockTest) Cleanup() {
// }
func TestAll(t *testing.T) { func TestAll(t *testing.T) {
mockCleanup(false) mockCleanup(false)
defer mockCleanup(false) defer mockCleanup(false)
@ -45,6 +57,8 @@ func TestAll(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
// log.Printf("Waiting a second for everything to cleanly start...")
// time.Sleep(time.Second * 2)
defer mc.Close() defer mc.Close()
runSubTest(t, "keys", mc, subTestKeys) runSubTest(t, "keys", mc, subTestKeys)