diff --git a/vendor/github.com/nats-io/go-nats/LICENSE b/vendor/github.com/nats-io/go-nats/LICENSE new file mode 100644 index 00000000..9798d4ef --- /dev/null +++ b/vendor/github.com/nats-io/go-nats/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2012-2017 Apcera Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/nats-io/go-nats/README.md b/vendor/github.com/nats-io/go-nats/README.md new file mode 100644 index 00000000..ae6868c5 --- /dev/null +++ b/vendor/github.com/nats-io/go-nats/README.md @@ -0,0 +1,350 @@ +# NATS - Go Client +A [Go](http://golang.org) client for the [NATS messaging system](https://nats.io). + +[![License MIT](https://img.shields.io/badge/License-MIT-blue.svg)](http://opensource.org/licenses/MIT) +[![Go Report Card](https://goreportcard.com/badge/github.com/nats-io/go-nats)](https://goreportcard.com/report/github.com/nats-io/go-nats) [![Build Status](https://travis-ci.org/nats-io/go-nats.svg?branch=master)](http://travis-ci.org/nats-io/go-nats) [![GoDoc](https://godoc.org/github.com/nats-io/go-nats?status.svg)](http://godoc.org/github.com/nats-io/go-nats) [![Coverage Status](https://coveralls.io/repos/nats-io/go-nats/badge.svg?branch=master)](https://coveralls.io/r/nats-io/go-nats?branch=master) + +## Installation + +```bash +# Go client +go get github.com/nats-io/go-nats + +# Server +go get github.com/nats-io/gnatsd +``` + +## Basic Usage + +```go + +nc, _ := nats.Connect(nats.DefaultURL) + +// Simple Publisher +nc.Publish("foo", []byte("Hello World")) + +// Simple Async Subscriber +nc.Subscribe("foo", func(m *nats.Msg) { + fmt.Printf("Received a message: %s\n", string(m.Data)) +}) + +// Simple Sync Subscriber +sub, err := nc.SubscribeSync("foo") +m, err := sub.NextMsg(timeout) + +// Channel Subscriber +ch := make(chan *nats.Msg, 64) +sub, err := nc.ChanSubscribe("foo", ch) +msg := <- ch + +// Unsubscribe +sub.Unsubscribe() + +// Requests +msg, err := nc.Request("help", []byte("help me"), 10*time.Millisecond) + +// Replies +nc.Subscribe("help", func(m *Msg) { + nc.Publish(m.Reply, []byte("I can help!")) +}) + +// Close connection +nc, _ := nats.Connect("nats://localhost:4222") +nc.Close(); +``` + +## Encoded Connections + +```go + +nc, _ := nats.Connect(nats.DefaultURL) +c, _ := nats.NewEncodedConn(nc, nats.JSON_ENCODER) +defer c.Close() + +// Simple Publisher +c.Publish("foo", "Hello World") + +// Simple Async Subscriber +c.Subscribe("foo", func(s string) { + fmt.Printf("Received a message: %s\n", s) +}) + +// EncodedConn can Publish any raw Go type using the registered Encoder +type person struct { + Name string + Address string + Age int +} + +// Go type Subscriber +c.Subscribe("hello", func(p *person) { + fmt.Printf("Received a person: %+v\n", p) +}) + +me := &person{Name: "derek", Age: 22, Address: "140 New Montgomery Street, San Francisco, CA"} + +// Go type Publisher +c.Publish("hello", me) + +// Unsubscribe +sub, err := c.Subscribe("foo", nil) +... +sub.Unsubscribe() + +// Requests +var response string +err := c.Request("help", "help me", &response, 10*time.Millisecond) +if err != nil { + fmt.Printf("Request failed: %v\n", err) +} + +// Replying +c.Subscribe("help", func(subj, reply string, msg string) { + c.Publish(reply, "I can help!") +}) + +// Close connection +c.Close(); +``` + +## TLS + +```go +// tls as a scheme will enable secure connections by default. This will also verify the server name. +nc, err := nats.Connect("tls://nats.demo.io:4443") + +// If you are using a self-signed certificate, you need to have a tls.Config with RootCAs setup. +// We provide a helper method to make this case easier. +nc, err = nats.Connect("tls://localhost:4443", nats.RootCAs("./configs/certs/ca.pem")) + +// If the server requires client certificate, there is an helper function for that too: +cert := nats.ClientCert("./configs/certs/client-cert.pem", "./configs/certs/client-key.pem") +nc, err = nats.Connect("tls://localhost:4443", cert) + +// You can also supply a complete tls.Config + +certFile := "./configs/certs/client-cert.pem" +keyFile := "./configs/certs/client-key.pem" +cert, err := tls.LoadX509KeyPair(certFile, keyFile) +if err != nil { + t.Fatalf("error parsing X509 certificate/key pair: %v", err) +} + +config := &tls.Config{ + ServerName: opts.Host, + Certificates: []tls.Certificate{cert}, + RootCAs: pool, + MinVersion: tls.VersionTLS12, +} + +nc, err = nats.Connect("nats://localhost:4443", nats.Secure(config)) +if err != nil { + t.Fatalf("Got an error on Connect with Secure Options: %+v\n", err) +} + +``` + +## Using Go Channels (netchan) + +```go +nc, _ := nats.Connect(nats.DefaultURL) +ec, _ := nats.NewEncodedConn(nc, nats.JSON_ENCODER) +defer ec.Close() + +type person struct { + Name string + Address string + Age int +} + +recvCh := make(chan *person) +ec.BindRecvChan("hello", recvCh) + +sendCh := make(chan *person) +ec.BindSendChan("hello", sendCh) + +me := &person{Name: "derek", Age: 22, Address: "140 New Montgomery Street"} + +// Send via Go channels +sendCh <- me + +// Receive via Go channels +who := <- recvCh +``` + +## Wildcard Subscriptions + +```go + +// "*" matches any token, at any level of the subject. +nc.Subscribe("foo.*.baz", func(m *Msg) { + fmt.Printf("Msg received on [%s] : %s\n", m.Subject, string(m.Data)); +}) + +nc.Subscribe("foo.bar.*", func(m *Msg) { + fmt.Printf("Msg received on [%s] : %s\n", m.Subject, string(m.Data)); +}) + +// ">" matches any length of the tail of a subject, and can only be the last token +// E.g. 'foo.>' will match 'foo.bar', 'foo.bar.baz', 'foo.foo.bar.bax.22' +nc.Subscribe("foo.>", func(m *Msg) { + fmt.Printf("Msg received on [%s] : %s\n", m.Subject, string(m.Data)); +}) + +// Matches all of the above +nc.Publish("foo.bar.baz", []byte("Hello World")) + +``` + +## Queue Groups + +```go +// All subscriptions with the same queue name will form a queue group. +// Each message will be delivered to only one subscriber per queue group, +// using queuing semantics. You can have as many queue groups as you wish. +// Normal subscribers will continue to work as expected. + +nc.QueueSubscribe("foo", "job_workers", func(_ *Msg) { + received += 1; +}) + +``` + +## Advanced Usage + +```go + +// Flush connection to server, returns when all messages have been processed. +nc.Flush() +fmt.Println("All clear!") + +// FlushTimeout specifies a timeout value as well. +err := nc.FlushTimeout(1*time.Second) +if err != nil { + fmt.Println("All clear!") +} else { + fmt.Println("Flushed timed out!") +} + +// Auto-unsubscribe after MAX_WANTED messages received +const MAX_WANTED = 10 +sub, err := nc.Subscribe("foo") +sub.AutoUnsubscribe(MAX_WANTED) + +// Multiple connections +nc1 := nats.Connect("nats://host1:4222") +nc2 := nats.Connect("nats://host2:4222") + +nc1.Subscribe("foo", func(m *Msg) { + fmt.Printf("Received a message: %s\n", string(m.Data)) +}) + +nc2.Publish("foo", []byte("Hello World!")); + +``` + +## Clustered Usage + +```go + +var servers = "nats://localhost:1222, nats://localhost:1223, nats://localhost:1224" + +nc, err := nats.Connect(servers) + +// Optionally set ReconnectWait and MaxReconnect attempts. +// This example means 10 seconds total per backend. +nc, err = nats.Connect(servers, nats.MaxReconnects(5), nats.ReconnectWait(2 * time.Second)) + +// Optionally disable randomization of the server pool +nc, err = nats.Connect(servers, nats.DontRandomize()) + +// Setup callbacks to be notified on disconnects, reconnects and connection closed. +nc, err = nats.Connect(servers, + nats.DisconnectHandler(func(nc *nats.Conn) { + fmt.Printf("Got disconnected!\n") + }), + nats.ReconnectHandler(func(_ *nats.Conn) { + fmt.Printf("Got reconnected to %v!\n", nc.ConnectedUrl()) + }), + nats.ClosedHandler(func(nc *nats.Conn) { + fmt.Printf("Connection closed. Reason: %q\n", nc.LastError()) + }) +) + +// When connecting to a mesh of servers with auto-discovery capabilities, +// you may need to provide a username/password or token in order to connect +// to any server in that mesh when authentication is required. +// Instead of providing the credentials in the initial URL, you will use +// new option setters: +nc, err = nats.Connect("nats://localhost:4222", nats.UserInfo("foo", "bar")) + +// For token based authentication: +nc, err = nats.Connect("nats://localhost:4222", nats.Token("S3cretT0ken")) + +// You can even pass the two at the same time in case one of the server +// in the mesh requires token instead of user name and password. +nc, err = nats.Connect("nats://localhost:4222", + nats.UserInfo("foo", "bar"), + nats.Token("S3cretT0ken")) + +// Note that if credentials are specified in the initial URLs, they take +// precedence on the credentials specfied through the options. +// For instance, in the connect call below, the client library will use +// the user "my" and password "pwd" to connect to locahost:4222, however, +// it will use username "foo" and password "bar" when (re)connecting to +// a different server URL that it got as part of the auto-discovery. +nc, err = nats.Connect("nats://my:pwd@localhost:4222", nats.UserInfo("foo", "bar")) + +``` + +## Context support (+Go 1.7) + +```go +ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) +defer cancel() + +nc, err := nats.Connect(nats.DefaultURL) + +// Request with context +msg, err := nc.RequestWithContext(ctx, "foo", []byte("bar")) + +// Synchronous subscriber with context +sub, err := nc.SubscribeSync("foo") +msg, err := sub.NextMsgWithContext(ctx) + +// Encoded Request with context +c, err := nats.NewEncodedConn(nc, nats.JSON_ENCODER) +type request struct { + Message string `json:"message"` +} +type response struct { + Code int `json:"code"` +} +req := &request{Message: "Hello"} +resp := &response{} +err := c.RequestWithContext(ctx, "foo", req, resp) +``` + +## License + +(The MIT License) + +Copyright (c) 2012-2017 Apcera Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to +deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. diff --git a/vendor/github.com/nats-io/go-nats/TODO.md b/vendor/github.com/nats-io/go-nats/TODO.md new file mode 100644 index 00000000..213aaeca --- /dev/null +++ b/vendor/github.com/nats-io/go-nats/TODO.md @@ -0,0 +1,26 @@ + +- [ ] Better constructors, options handling +- [ ] Functions for callback settings after connection created. +- [ ] Better options for subscriptions. Slow Consumer state settable, Go routines vs Inline. +- [ ] Move off of channels for subscribers, use syncPool linkedLists, etc with highwater. +- [ ] Test for valid subjects on publish and subscribe? +- [ ] SyncSubscriber and Next for EncodedConn +- [ ] Fast Publisher? +- [ ] pooling for structs used? leaky bucket? +- [ ] Timeout 0 should work as no timeout +- [x] Ping timer +- [x] Name in Connect for gnatsd +- [x] Asynchronous error handling +- [x] Parser rewrite +- [x] Reconnect +- [x] Hide Lock +- [x] Easier encoder interface +- [x] QueueSubscribeSync +- [x] Make nats specific errors prefixed with 'nats:' +- [x] API test for closed connection +- [x] TLS/SSL +- [x] Stats collection +- [x] Disconnect detection +- [x] Optimized Publish (coalescing) +- [x] Do Examples via Go style +- [x] Standardized Errors diff --git a/vendor/github.com/nats-io/go-nats/context.go b/vendor/github.com/nats-io/go-nats/context.go new file mode 100644 index 00000000..be6ada4a --- /dev/null +++ b/vendor/github.com/nats-io/go-nats/context.go @@ -0,0 +1,166 @@ +// Copyright 2012-2017 Apcera Inc. All rights reserved. + +// +build go1.7 + +// A Go client for the NATS messaging system (https://nats.io). +package nats + +import ( + "context" + "fmt" + "reflect" +) + +// RequestWithContext takes a context, a subject and payload +// in bytes and request expecting a single response. +func (nc *Conn) RequestWithContext(ctx context.Context, subj string, data []byte) (*Msg, error) { + if ctx == nil { + return nil, ErrInvalidContext + } + if nc == nil { + return nil, ErrInvalidConnection + } + + nc.mu.Lock() + // If user wants the old style. + if nc.Opts.UseOldRequestStyle { + nc.mu.Unlock() + return nc.oldRequestWithContext(ctx, subj, data) + } + + // Do setup for the new style. + if nc.respMap == nil { + // _INBOX wildcard + nc.respSub = fmt.Sprintf("%s.*", NewInbox()) + nc.respMap = make(map[string]chan *Msg) + } + // Create literal Inbox and map to a chan msg. + mch := make(chan *Msg, RequestChanLen) + respInbox := nc.newRespInbox() + token := respToken(respInbox) + nc.respMap[token] = mch + createSub := nc.respMux == nil + ginbox := nc.respSub + nc.mu.Unlock() + + if createSub { + // Make sure scoped subscription is setup only once. + var err error + nc.respSetup.Do(func() { err = nc.createRespMux(ginbox) }) + if err != nil { + return nil, err + } + } + + err := nc.PublishRequest(subj, respInbox, data) + if err != nil { + return nil, err + } + + var ok bool + var msg *Msg + + select { + case msg, ok = <-mch: + if !ok { + return nil, ErrConnectionClosed + } + case <-ctx.Done(): + nc.mu.Lock() + delete(nc.respMap, token) + nc.mu.Unlock() + return nil, ctx.Err() + } + + return msg, nil +} + +// oldRequestWithContext utilizes inbox and subscription per request. +func (nc *Conn) oldRequestWithContext(ctx context.Context, subj string, data []byte) (*Msg, error) { + inbox := NewInbox() + ch := make(chan *Msg, RequestChanLen) + + s, err := nc.subscribe(inbox, _EMPTY_, nil, ch) + if err != nil { + return nil, err + } + s.AutoUnsubscribe(1) + defer s.Unsubscribe() + + err = nc.PublishRequest(subj, inbox, data) + if err != nil { + return nil, err + } + + return s.NextMsgWithContext(ctx) +} + +// NextMsgWithContext takes a context and returns the next message +// available to a synchronous subscriber, blocking until it is delivered +// or context gets canceled. +func (s *Subscription) NextMsgWithContext(ctx context.Context) (*Msg, error) { + if ctx == nil { + return nil, ErrInvalidContext + } + if s == nil { + return nil, ErrBadSubscription + } + + s.mu.Lock() + err := s.validateNextMsgState() + if err != nil { + s.mu.Unlock() + return nil, err + } + + // snapshot + mch := s.mch + s.mu.Unlock() + + var ok bool + var msg *Msg + + select { + case msg, ok = <-mch: + if !ok { + return nil, ErrConnectionClosed + } + err := s.processNextMsgDelivered(msg) + if err != nil { + return nil, err + } + case <-ctx.Done(): + return nil, ctx.Err() + } + + return msg, nil +} + +// RequestWithContext will create an Inbox and perform a Request +// using the provided cancellation context with the Inbox reply +// for the data v. A response will be decoded into the vPtrResponse. +func (c *EncodedConn) RequestWithContext(ctx context.Context, subject string, v interface{}, vPtr interface{}) error { + if ctx == nil { + return ErrInvalidContext + } + + b, err := c.Enc.Encode(subject, v) + if err != nil { + return err + } + m, err := c.Conn.RequestWithContext(ctx, subject, b) + if err != nil { + return err + } + if reflect.TypeOf(vPtr) == emptyMsgType { + mPtr := vPtr.(*Msg) + *mPtr = *m + } else { + err := c.Enc.Decode(m.Subject, m.Data, vPtr) + if err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/nats-io/go-nats/enc.go b/vendor/github.com/nats-io/go-nats/enc.go new file mode 100644 index 00000000..291b7826 --- /dev/null +++ b/vendor/github.com/nats-io/go-nats/enc.go @@ -0,0 +1,249 @@ +// Copyright 2012-2015 Apcera Inc. All rights reserved. + +package nats + +import ( + "errors" + "fmt" + "reflect" + "sync" + "time" + + // Default Encoders + . "github.com/nats-io/go-nats/encoders/builtin" +) + +// Encoder interface is for all register encoders +type Encoder interface { + Encode(subject string, v interface{}) ([]byte, error) + Decode(subject string, data []byte, vPtr interface{}) error +} + +var encMap map[string]Encoder +var encLock sync.Mutex + +// Indexe names into the Registered Encoders. +const ( + JSON_ENCODER = "json" + GOB_ENCODER = "gob" + DEFAULT_ENCODER = "default" +) + +func init() { + encMap = make(map[string]Encoder) + // Register json, gob and default encoder + RegisterEncoder(JSON_ENCODER, &JsonEncoder{}) + RegisterEncoder(GOB_ENCODER, &GobEncoder{}) + RegisterEncoder(DEFAULT_ENCODER, &DefaultEncoder{}) +} + +// EncodedConn are the preferred way to interface with NATS. They wrap a bare connection to +// a nats server and have an extendable encoder system that will encode and decode messages +// from raw Go types. +type EncodedConn struct { + Conn *Conn + Enc Encoder +} + +// NewEncodedConn will wrap an existing Connection and utilize the appropriate registered +// encoder. +func NewEncodedConn(c *Conn, encType string) (*EncodedConn, error) { + if c == nil { + return nil, errors.New("nats: Nil Connection") + } + if c.IsClosed() { + return nil, ErrConnectionClosed + } + ec := &EncodedConn{Conn: c, Enc: EncoderForType(encType)} + if ec.Enc == nil { + return nil, fmt.Errorf("No encoder registered for '%s'", encType) + } + return ec, nil +} + +// RegisterEncoder will register the encType with the given Encoder. Useful for customization. +func RegisterEncoder(encType string, enc Encoder) { + encLock.Lock() + defer encLock.Unlock() + encMap[encType] = enc +} + +// EncoderForType will return the registered Encoder for the encType. +func EncoderForType(encType string) Encoder { + encLock.Lock() + defer encLock.Unlock() + return encMap[encType] +} + +// Publish publishes the data argument to the given subject. The data argument +// will be encoded using the associated encoder. +func (c *EncodedConn) Publish(subject string, v interface{}) error { + b, err := c.Enc.Encode(subject, v) + if err != nil { + return err + } + return c.Conn.publish(subject, _EMPTY_, b) +} + +// PublishRequest will perform a Publish() expecting a response on the +// reply subject. Use Request() for automatically waiting for a response +// inline. +func (c *EncodedConn) PublishRequest(subject, reply string, v interface{}) error { + b, err := c.Enc.Encode(subject, v) + if err != nil { + return err + } + return c.Conn.publish(subject, reply, b) +} + +// Request will create an Inbox and perform a Request() call +// with the Inbox reply for the data v. A response will be +// decoded into the vPtrResponse. +func (c *EncodedConn) Request(subject string, v interface{}, vPtr interface{}, timeout time.Duration) error { + b, err := c.Enc.Encode(subject, v) + if err != nil { + return err + } + m, err := c.Conn.Request(subject, b, timeout) + if err != nil { + return err + } + if reflect.TypeOf(vPtr) == emptyMsgType { + mPtr := vPtr.(*Msg) + *mPtr = *m + } else { + err = c.Enc.Decode(m.Subject, m.Data, vPtr) + } + return err +} + +// Handler is a specific callback used for Subscribe. It is generalized to +// an interface{}, but we will discover its format and arguments at runtime +// and perform the correct callback, including de-marshaling JSON strings +// back into the appropriate struct based on the signature of the Handler. +// +// Handlers are expected to have one of four signatures. +// +// type person struct { +// Name string `json:"name,omitempty"` +// Age uint `json:"age,omitempty"` +// } +// +// handler := func(m *Msg) +// handler := func(p *person) +// handler := func(subject string, o *obj) +// handler := func(subject, reply string, o *obj) +// +// These forms allow a callback to request a raw Msg ptr, where the processing +// of the message from the wire is untouched. Process a JSON representation +// and demarshal it into the given struct, e.g. person. +// There are also variants where the callback wants either the subject, or the +// subject and the reply subject. +type Handler interface{} + +// Dissect the cb Handler's signature +func argInfo(cb Handler) (reflect.Type, int) { + cbType := reflect.TypeOf(cb) + if cbType.Kind() != reflect.Func { + panic("nats: Handler needs to be a func") + } + numArgs := cbType.NumIn() + if numArgs == 0 { + return nil, numArgs + } + return cbType.In(numArgs - 1), numArgs +} + +var emptyMsgType = reflect.TypeOf(&Msg{}) + +// Subscribe will create a subscription on the given subject and process incoming +// messages using the specified Handler. The Handler should be a func that matches +// a signature from the description of Handler from above. +func (c *EncodedConn) Subscribe(subject string, cb Handler) (*Subscription, error) { + return c.subscribe(subject, _EMPTY_, cb) +} + +// QueueSubscribe will create a queue subscription on the given subject and process +// incoming messages using the specified Handler. The Handler should be a func that +// matches a signature from the description of Handler from above. +func (c *EncodedConn) QueueSubscribe(subject, queue string, cb Handler) (*Subscription, error) { + return c.subscribe(subject, queue, cb) +} + +// Internal implementation that all public functions will use. +func (c *EncodedConn) subscribe(subject, queue string, cb Handler) (*Subscription, error) { + if cb == nil { + return nil, errors.New("nats: Handler required for EncodedConn Subscription") + } + argType, numArgs := argInfo(cb) + if argType == nil { + return nil, errors.New("nats: Handler requires at least one argument") + } + + cbValue := reflect.ValueOf(cb) + wantsRaw := (argType == emptyMsgType) + + natsCB := func(m *Msg) { + var oV []reflect.Value + if wantsRaw { + oV = []reflect.Value{reflect.ValueOf(m)} + } else { + var oPtr reflect.Value + if argType.Kind() != reflect.Ptr { + oPtr = reflect.New(argType) + } else { + oPtr = reflect.New(argType.Elem()) + } + if err := c.Enc.Decode(m.Subject, m.Data, oPtr.Interface()); err != nil { + if c.Conn.Opts.AsyncErrorCB != nil { + c.Conn.ach <- func() { + c.Conn.Opts.AsyncErrorCB(c.Conn, m.Sub, errors.New("nats: Got an error trying to unmarshal: "+err.Error())) + } + } + return + } + if argType.Kind() != reflect.Ptr { + oPtr = reflect.Indirect(oPtr) + } + + // Callback Arity + switch numArgs { + case 1: + oV = []reflect.Value{oPtr} + case 2: + subV := reflect.ValueOf(m.Subject) + oV = []reflect.Value{subV, oPtr} + case 3: + subV := reflect.ValueOf(m.Subject) + replyV := reflect.ValueOf(m.Reply) + oV = []reflect.Value{subV, replyV, oPtr} + } + + } + cbValue.Call(oV) + } + + return c.Conn.subscribe(subject, queue, natsCB, nil) +} + +// FlushTimeout allows a Flush operation to have an associated timeout. +func (c *EncodedConn) FlushTimeout(timeout time.Duration) (err error) { + return c.Conn.FlushTimeout(timeout) +} + +// Flush will perform a round trip to the server and return when it +// receives the internal reply. +func (c *EncodedConn) Flush() error { + return c.Conn.Flush() +} + +// Close will close the connection to the server. This call will release +// all blocking calls, such as Flush(), etc. +func (c *EncodedConn) Close() { + c.Conn.Close() +} + +// LastError reports the last error encountered via the Connection. +func (c *EncodedConn) LastError() error { + return c.Conn.err +} diff --git a/vendor/github.com/nats-io/go-nats/enc_test.go b/vendor/github.com/nats-io/go-nats/enc_test.go new file mode 100644 index 00000000..ada5b024 --- /dev/null +++ b/vendor/github.com/nats-io/go-nats/enc_test.go @@ -0,0 +1,257 @@ +package nats_test + +import ( + "fmt" + "testing" + "time" + + . "github.com/nats-io/go-nats" + "github.com/nats-io/go-nats/encoders/protobuf" + "github.com/nats-io/go-nats/encoders/protobuf/testdata" +) + +// Since we import above nats packages, we need to have a different +// const name than TEST_PORT that we used on the other packages. +const ENC_TEST_PORT = 8268 + +var options = Options{ + Url: fmt.Sprintf("nats://localhost:%d", ENC_TEST_PORT), + AllowReconnect: true, + MaxReconnect: 10, + ReconnectWait: 100 * time.Millisecond, + Timeout: DefaultTimeout, +} + +//////////////////////////////////////////////////////////////////////////////// +// Encoded connection tests +//////////////////////////////////////////////////////////////////////////////// + +func TestPublishErrorAfterSubscribeDecodeError(t *testing.T) { + ts := RunServerOnPort(ENC_TEST_PORT) + defer ts.Shutdown() + opts := options + nc, _ := opts.Connect() + defer nc.Close() + c, _ := NewEncodedConn(nc, JSON_ENCODER) + + //Test message type + type Message struct { + Message string + } + const testSubj = "test" + + c.Subscribe(testSubj, func(msg *Message) {}) + + //Publish invalid json to catch decode error in subscription callback + c.Publish(testSubj, `foo`) + c.Flush() + + //Next publish should be successful + if err := c.Publish(testSubj, Message{"2"}); err != nil { + t.Error("Fail to send correct json message after decode error in subscription") + } +} + +func TestPublishErrorAfterInvalidPublishMessage(t *testing.T) { + ts := RunServerOnPort(ENC_TEST_PORT) + defer ts.Shutdown() + opts := options + nc, _ := opts.Connect() + defer nc.Close() + c, _ := NewEncodedConn(nc, protobuf.PROTOBUF_ENCODER) + const testSubj = "test" + + c.Publish(testSubj, &testdata.Person{Name: "Anatolii"}) + + //Publish invalid protobuff message to catch decode error + c.Publish(testSubj, "foo") + + //Next publish with valid protobuf message should be successful + if err := c.Publish(testSubj, &testdata.Person{Name: "Anatolii"}); err != nil { + t.Error("Fail to send correct protobuf message after invalid message publishing", err) + } +} + +func TestVariousFailureConditions(t *testing.T) { + ts := RunServerOnPort(ENC_TEST_PORT) + defer ts.Shutdown() + + dch := make(chan bool) + + opts := options + opts.AsyncErrorCB = func(_ *Conn, _ *Subscription, e error) { + dch <- true + } + nc, _ := opts.Connect() + nc.Close() + + if _, err := NewEncodedConn(nil, protobuf.PROTOBUF_ENCODER); err == nil { + t.Fatal("Expected an error") + } + + if _, err := NewEncodedConn(nc, protobuf.PROTOBUF_ENCODER); err == nil || err != ErrConnectionClosed { + t.Fatalf("Wrong error: %v instead of %v", err, ErrConnectionClosed) + } + + nc, _ = opts.Connect() + defer nc.Close() + + if _, err := NewEncodedConn(nc, "foo"); err == nil { + t.Fatal("Expected an error") + } + + c, err := NewEncodedConn(nc, protobuf.PROTOBUF_ENCODER) + if err != nil { + t.Fatalf("Unable to create encoded connection: %v", err) + } + defer c.Close() + + if _, err := c.Subscribe("bar", func(subj, obj string) {}); err != nil { + t.Fatalf("Unable to create subscription: %v", err) + } + + if err := c.Publish("bar", &testdata.Person{Name: "Ivan"}); err != nil { + t.Fatalf("Unable to publish: %v", err) + } + + if err := Wait(dch); err != nil { + t.Fatal("Did not get the async error callback") + } + + if err := c.PublishRequest("foo", "bar", "foo"); err == nil { + t.Fatal("Expected an error") + } + + if err := c.Request("foo", "foo", nil, 2*time.Second); err == nil { + t.Fatal("Expected an error") + } + + nc.Close() + + if err := c.PublishRequest("foo", "bar", &testdata.Person{Name: "Ivan"}); err == nil { + t.Fatal("Expected an error") + } + + resp := &testdata.Person{} + if err := c.Request("foo", &testdata.Person{Name: "Ivan"}, resp, 2*time.Second); err == nil { + t.Fatal("Expected an error") + } + + if _, err := c.Subscribe("foo", nil); err == nil { + t.Fatal("Expected an error") + } + + if _, err := c.Subscribe("foo", func() {}); err == nil { + t.Fatal("Expected an error") + } + + func() { + defer func() { + if r := recover(); r == nil { + t.Fatal("Expected an error") + } + }() + if _, err := c.Subscribe("foo", "bar"); err == nil { + t.Fatal("Expected an error") + } + }() +} + +func TestRequest(t *testing.T) { + ts := RunServerOnPort(ENC_TEST_PORT) + defer ts.Shutdown() + + dch := make(chan bool) + + opts := options + nc, _ := opts.Connect() + defer nc.Close() + + c, err := NewEncodedConn(nc, protobuf.PROTOBUF_ENCODER) + if err != nil { + t.Fatalf("Unable to create encoded connection: %v", err) + } + defer c.Close() + + sentName := "Ivan" + recvName := "Kozlovic" + + if _, err := c.Subscribe("foo", func(_, reply string, p *testdata.Person) { + if p.Name != sentName { + t.Fatalf("Got wrong name: %v instead of %v", p.Name, sentName) + } + c.Publish(reply, &testdata.Person{Name: recvName}) + dch <- true + }); err != nil { + t.Fatalf("Unable to create subscription: %v", err) + } + if _, err := c.Subscribe("foo", func(_ string, p *testdata.Person) { + if p.Name != sentName { + t.Fatalf("Got wrong name: %v instead of %v", p.Name, sentName) + } + dch <- true + }); err != nil { + t.Fatalf("Unable to create subscription: %v", err) + } + + if err := c.Publish("foo", &testdata.Person{Name: sentName}); err != nil { + t.Fatalf("Unable to publish: %v", err) + } + + if err := Wait(dch); err != nil { + t.Fatal("Did not get message") + } + if err := Wait(dch); err != nil { + t.Fatal("Did not get message") + } + + response := &testdata.Person{} + if err := c.Request("foo", &testdata.Person{Name: sentName}, response, 2*time.Second); err != nil { + t.Fatalf("Unable to publish: %v", err) + } + if response == nil { + t.Fatal("No response received") + } else if response.Name != recvName { + t.Fatalf("Wrong response: %v instead of %v", response.Name, recvName) + } + + if err := Wait(dch); err != nil { + t.Fatal("Did not get message") + } + if err := Wait(dch); err != nil { + t.Fatal("Did not get message") + } + + c2, err := NewEncodedConn(nc, GOB_ENCODER) + if err != nil { + t.Fatalf("Unable to create encoded connection: %v", err) + } + defer c2.Close() + + if _, err := c2.QueueSubscribe("bar", "baz", func(m *Msg) { + response := &Msg{Subject: m.Reply, Data: []byte(recvName)} + c2.Conn.PublishMsg(response) + dch <- true + }); err != nil { + t.Fatalf("Unable to create subscription: %v", err) + } + + mReply := Msg{} + if err := c2.Request("bar", &Msg{Data: []byte(sentName)}, &mReply, 2*time.Second); err != nil { + t.Fatalf("Unable to send request: %v", err) + } + if string(mReply.Data) != recvName { + t.Fatalf("Wrong reply: %v instead of %v", string(mReply.Data), recvName) + } + + if err := Wait(dch); err != nil { + t.Fatal("Did not get message") + } + + if c.LastError() != nil { + t.Fatalf("Unexpected connection error: %v", c.LastError()) + } + if c2.LastError() != nil { + t.Fatalf("Unexpected connection error: %v", c2.LastError()) + } +} diff --git a/vendor/github.com/nats-io/go-nats/example_test.go b/vendor/github.com/nats-io/go-nats/example_test.go new file mode 100644 index 00000000..64a65867 --- /dev/null +++ b/vendor/github.com/nats-io/go-nats/example_test.go @@ -0,0 +1,266 @@ +package nats_test + +import ( + "fmt" + "time" + + "github.com/nats-io/go-nats" +) + +// Shows different ways to create a Conn +func ExampleConnect() { + + nc, _ := nats.Connect(nats.DefaultURL) + nc.Close() + + nc, _ = nats.Connect("nats://derek:secretpassword@demo.nats.io:4222") + nc.Close() + + nc, _ = nats.Connect("tls://derek:secretpassword@demo.nats.io:4443") + nc.Close() + + opts := nats.Options{ + AllowReconnect: true, + MaxReconnect: 10, + ReconnectWait: 5 * time.Second, + Timeout: 1 * time.Second, + } + + nc, _ = opts.Connect() + nc.Close() +} + +// This Example shows an asynchronous subscriber. +func ExampleConn_Subscribe() { + nc, _ := nats.Connect(nats.DefaultURL) + defer nc.Close() + + nc.Subscribe("foo", func(m *nats.Msg) { + fmt.Printf("Received a message: %s\n", string(m.Data)) + }) +} + +// This Example shows a synchronous subscriber. +func ExampleConn_SubscribeSync() { + nc, _ := nats.Connect(nats.DefaultURL) + defer nc.Close() + + sub, _ := nc.SubscribeSync("foo") + m, err := sub.NextMsg(1 * time.Second) + if err == nil { + fmt.Printf("Received a message: %s\n", string(m.Data)) + } else { + fmt.Println("NextMsg timed out.") + } +} + +func ExampleSubscription_NextMsg() { + nc, _ := nats.Connect(nats.DefaultURL) + defer nc.Close() + + sub, _ := nc.SubscribeSync("foo") + m, err := sub.NextMsg(1 * time.Second) + if err == nil { + fmt.Printf("Received a message: %s\n", string(m.Data)) + } else { + fmt.Println("NextMsg timed out.") + } +} + +func ExampleSubscription_Unsubscribe() { + nc, _ := nats.Connect(nats.DefaultURL) + defer nc.Close() + + sub, _ := nc.SubscribeSync("foo") + // ... + sub.Unsubscribe() +} + +func ExampleConn_Publish() { + nc, _ := nats.Connect(nats.DefaultURL) + defer nc.Close() + + nc.Publish("foo", []byte("Hello World!")) +} + +func ExampleConn_PublishMsg() { + nc, _ := nats.Connect(nats.DefaultURL) + defer nc.Close() + + msg := &nats.Msg{Subject: "foo", Reply: "bar", Data: []byte("Hello World!")} + nc.PublishMsg(msg) +} + +func ExampleConn_Flush() { + nc, _ := nats.Connect(nats.DefaultURL) + defer nc.Close() + + msg := &nats.Msg{Subject: "foo", Reply: "bar", Data: []byte("Hello World!")} + for i := 0; i < 1000; i++ { + nc.PublishMsg(msg) + } + err := nc.Flush() + if err == nil { + // Everything has been processed by the server for nc *Conn. + } +} + +func ExampleConn_FlushTimeout() { + nc, _ := nats.Connect(nats.DefaultURL) + defer nc.Close() + + msg := &nats.Msg{Subject: "foo", Reply: "bar", Data: []byte("Hello World!")} + for i := 0; i < 1000; i++ { + nc.PublishMsg(msg) + } + // Only wait for up to 1 second for Flush + err := nc.FlushTimeout(1 * time.Second) + if err == nil { + // Everything has been processed by the server for nc *Conn. + } +} + +func ExampleConn_Request() { + nc, _ := nats.Connect(nats.DefaultURL) + defer nc.Close() + + nc.Subscribe("foo", func(m *nats.Msg) { + nc.Publish(m.Reply, []byte("I will help you")) + }) + nc.Request("foo", []byte("help"), 50*time.Millisecond) +} + +func ExampleConn_QueueSubscribe() { + nc, _ := nats.Connect(nats.DefaultURL) + defer nc.Close() + + received := 0 + + nc.QueueSubscribe("foo", "worker_group", func(_ *nats.Msg) { + received++ + }) +} + +func ExampleSubscription_AutoUnsubscribe() { + nc, _ := nats.Connect(nats.DefaultURL) + defer nc.Close() + + received, wanted, total := 0, 10, 100 + + sub, _ := nc.Subscribe("foo", func(_ *nats.Msg) { + received++ + }) + sub.AutoUnsubscribe(wanted) + + for i := 0; i < total; i++ { + nc.Publish("foo", []byte("Hello")) + } + nc.Flush() + + fmt.Printf("Received = %d", received) +} + +func ExampleConn_Close() { + nc, _ := nats.Connect(nats.DefaultURL) + nc.Close() +} + +// Shows how to wrap a Conn into an EncodedConn +func ExampleNewEncodedConn() { + nc, _ := nats.Connect(nats.DefaultURL) + c, _ := nats.NewEncodedConn(nc, "json") + c.Close() +} + +// EncodedConn can publish virtually anything just +// by passing it in. The encoder will be used to properly +// encode the raw Go type +func ExampleEncodedConn_Publish() { + nc, _ := nats.Connect(nats.DefaultURL) + c, _ := nats.NewEncodedConn(nc, "json") + defer c.Close() + + type person struct { + Name string + Address string + Age int + } + + me := &person{Name: "derek", Age: 22, Address: "85 Second St"} + c.Publish("hello", me) +} + +// EncodedConn's subscribers will automatically decode the +// wire data into the requested Go type using the Decode() +// method of the registered Encoder. The callback signature +// can also vary to include additional data, such as subject +// and reply subjects. +func ExampleEncodedConn_Subscribe() { + nc, _ := nats.Connect(nats.DefaultURL) + c, _ := nats.NewEncodedConn(nc, "json") + defer c.Close() + + type person struct { + Name string + Address string + Age int + } + + c.Subscribe("hello", func(p *person) { + fmt.Printf("Received a person! %+v\n", p) + }) + + c.Subscribe("hello", func(subj, reply string, p *person) { + fmt.Printf("Received a person on subject %s! %+v\n", subj, p) + }) + + me := &person{Name: "derek", Age: 22, Address: "85 Second St"} + c.Publish("hello", me) +} + +// BindSendChan() allows binding of a Go channel to a nats +// subject for publish operations. The Encoder attached to the +// EncodedConn will be used for marshaling. +func ExampleEncodedConn_BindSendChan() { + nc, _ := nats.Connect(nats.DefaultURL) + c, _ := nats.NewEncodedConn(nc, "json") + defer c.Close() + + type person struct { + Name string + Address string + Age int + } + + ch := make(chan *person) + c.BindSendChan("hello", ch) + + me := &person{Name: "derek", Age: 22, Address: "85 Second St"} + ch <- me +} + +// BindRecvChan() allows binding of a Go channel to a nats +// subject for subscribe operations. The Encoder attached to the +// EncodedConn will be used for un-marshaling. +func ExampleEncodedConn_BindRecvChan() { + nc, _ := nats.Connect(nats.DefaultURL) + c, _ := nats.NewEncodedConn(nc, "json") + defer c.Close() + + type person struct { + Name string + Address string + Age int + } + + ch := make(chan *person) + c.BindRecvChan("hello", ch) + + me := &person{Name: "derek", Age: 22, Address: "85 Second St"} + c.Publish("hello", me) + + // Receive the publish directly on a channel + who := <-ch + + fmt.Printf("%v says hello!\n", who) +} diff --git a/vendor/github.com/nats-io/go-nats/nats.go b/vendor/github.com/nats-io/go-nats/nats.go new file mode 100644 index 00000000..fbb86c03 --- /dev/null +++ b/vendor/github.com/nats-io/go-nats/nats.go @@ -0,0 +1,2980 @@ +// Copyright 2012-2017 Apcera Inc. All rights reserved. + +// A Go client for the NATS messaging system (https://nats.io). +package nats + +import ( + "bufio" + "bytes" + "crypto/tls" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "math/rand" + "net" + "net/url" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "github.com/nats-io/go-nats/util" + "github.com/nats-io/nuid" +) + +// Default Constants +const ( + Version = "1.3.1" + DefaultURL = "nats://localhost:4222" + DefaultPort = 4222 + DefaultMaxReconnect = 60 + DefaultReconnectWait = 2 * time.Second + DefaultTimeout = 2 * time.Second + DefaultPingInterval = 2 * time.Minute + DefaultMaxPingOut = 2 + DefaultMaxChanLen = 8192 // 8k + DefaultReconnectBufSize = 8 * 1024 * 1024 // 8MB + RequestChanLen = 8 + LangString = "go" +) + +// STALE_CONNECTION is for detection and proper handling of stale connections. +const STALE_CONNECTION = "stale connection" + +// PERMISSIONS_ERR is for when nats server subject authorization has failed. +const PERMISSIONS_ERR = "permissions violation" + +// AUTHORIZATION_ERR is for when nats server user authorization has failed. +const AUTHORIZATION_ERR = "authorization violation" + +// Errors +var ( + ErrConnectionClosed = errors.New("nats: connection closed") + ErrSecureConnRequired = errors.New("nats: secure connection required") + ErrSecureConnWanted = errors.New("nats: secure connection not available") + ErrBadSubscription = errors.New("nats: invalid subscription") + ErrTypeSubscription = errors.New("nats: invalid subscription type") + ErrBadSubject = errors.New("nats: invalid subject") + ErrSlowConsumer = errors.New("nats: slow consumer, messages dropped") + ErrTimeout = errors.New("nats: timeout") + ErrBadTimeout = errors.New("nats: timeout invalid") + ErrAuthorization = errors.New("nats: authorization violation") + ErrNoServers = errors.New("nats: no servers available for connection") + ErrJsonParse = errors.New("nats: connect message, json parse error") + ErrChanArg = errors.New("nats: argument needs to be a channel type") + ErrMaxPayload = errors.New("nats: maximum payload exceeded") + ErrMaxMessages = errors.New("nats: maximum messages delivered") + ErrSyncSubRequired = errors.New("nats: illegal call on an async subscription") + ErrMultipleTLSConfigs = errors.New("nats: multiple tls.Configs not allowed") + ErrNoInfoReceived = errors.New("nats: protocol exception, INFO not received") + ErrReconnectBufExceeded = errors.New("nats: outbound buffer limit exceeded") + ErrInvalidConnection = errors.New("nats: invalid connection") + ErrInvalidMsg = errors.New("nats: invalid message or message nil") + ErrInvalidArg = errors.New("nats: invalid argument") + ErrInvalidContext = errors.New("nats: invalid context") + ErrStaleConnection = errors.New("nats: " + STALE_CONNECTION) +) + +// GetDefaultOptions returns default configuration options for the client. +func GetDefaultOptions() Options { + return Options{ + AllowReconnect: true, + MaxReconnect: DefaultMaxReconnect, + ReconnectWait: DefaultReconnectWait, + Timeout: DefaultTimeout, + PingInterval: DefaultPingInterval, + MaxPingsOut: DefaultMaxPingOut, + SubChanLen: DefaultMaxChanLen, + ReconnectBufSize: DefaultReconnectBufSize, + } +} + +// DEPRECATED: Use GetDefaultOptions() instead. +// DefaultOptions is not safe for use by multiple clients. +// For details see #308. +var DefaultOptions = GetDefaultOptions() + +// Status represents the state of the connection. +type Status int + +const ( + DISCONNECTED = Status(iota) + CONNECTED + CLOSED + RECONNECTING + CONNECTING +) + +// ConnHandler is used for asynchronous events such as +// disconnected and closed connections. +type ConnHandler func(*Conn) + +// ErrHandler is used to process asynchronous errors encountered +// while processing inbound messages. +type ErrHandler func(*Conn, *Subscription, error) + +// asyncCB is used to preserve order for async callbacks. +type asyncCB func() + +// Option is a function on the options for a connection. +type Option func(*Options) error + +// Options can be used to create a customized connection. +type Options struct { + + // Url represents a single NATS server url to which the client + // will be connecting. If the Servers option is also set, it + // then becomes the first server in the Servers array. + Url string + + // Servers is a configured set of servers which this client + // will use when attempting to connect. + Servers []string + + // NoRandomize configures whether we will randomize the + // server pool. + NoRandomize bool + + // Name is an optional name label which will be sent to the server + // on CONNECT to identify the client. + Name string + + // Verbose signals the server to send an OK ack for commands + // successfully processed by the server. + Verbose bool + + // Pedantic signals the server whether it should be doing further + // validation of subjects. + Pedantic bool + + // Secure enables TLS secure connections that skip server + // verification by default. NOT RECOMMENDED. + Secure bool + + // TLSConfig is a custom TLS configuration to use for secure + // transports. + TLSConfig *tls.Config + + // AllowReconnect enables reconnection logic to be used when we + // encounter a disconnect from the current server. + AllowReconnect bool + + // MaxReconnect sets the number of reconnect attempts that will be + // tried before giving up. If negative, then it will never give up + // trying to reconnect. + MaxReconnect int + + // ReconnectWait sets the time to backoff after attempting a reconnect + // to a server that we were already connected to previously. + ReconnectWait time.Duration + + // Timeout sets the timeout for a Dial operation on a connection. + Timeout time.Duration + + // FlusherTimeout is the maximum time to wait for the flusher loop + // to be able to finish writing to the underlying connection. + FlusherTimeout time.Duration + + // PingInterval is the period at which the client will be sending ping + // commands to the server, disabled if 0 or negative. + PingInterval time.Duration + + // MaxPingsOut is the maximum number of pending ping commands that can + // be awaiting a response before raising an ErrStaleConnection error. + MaxPingsOut int + + // ClosedCB sets the closed handler that is called when a client will + // no longer be connected. + ClosedCB ConnHandler + + // DisconnectedCB sets the disconnected handler that is called + // whenever the connection is disconnected. + DisconnectedCB ConnHandler + + // ReconnectedCB sets the reconnected handler called whenever + // the connection is successfully reconnected. + ReconnectedCB ConnHandler + + // DiscoveredServersCB sets the callback that is invoked whenever a new + // server has joined the cluster. + DiscoveredServersCB ConnHandler + + // AsyncErrorCB sets the async error handler (e.g. slow consumer errors) + AsyncErrorCB ErrHandler + + // ReconnectBufSize is the size of the backing bufio during reconnect. + // Once this has been exhausted publish operations will return an error. + ReconnectBufSize int + + // SubChanLen is the size of the buffered channel used between the socket + // Go routine and the message delivery for SyncSubscriptions. + // NOTE: This does not affect AsyncSubscriptions which are + // dictated by PendingLimits() + SubChanLen int + + // User sets the username to be used when connecting to the server. + User string + + // Password sets the password to be used when connecting to a server. + Password string + + // Token sets the token to be used when connecting to a server. + Token string + + // Dialer allows a custom Dialer when forming connections. + Dialer *net.Dialer + + // UseOldRequestStyle forces the old method of Requests that utilize + // a new Inbox and a new Subscription for each request. + UseOldRequestStyle bool +} + +const ( + // Scratch storage for assembling protocol headers + scratchSize = 512 + + // The size of the bufio reader/writer on top of the socket. + defaultBufSize = 32768 + + // The buffered size of the flush "kick" channel + flushChanSize = 1024 + + // Default server pool size + srvPoolSize = 4 + + // Channel size for the async callback handler. + asyncCBChanSize = 32 + + // NUID size + nuidSize = 22 +) + +// A Conn represents a bare connection to a nats-server. +// It can send and receive []byte payloads. +type Conn struct { + // Keep all members for which we use atomic at the beginning of the + // struct and make sure they are all 64bits (or use padding if necessary). + // atomic.* functions crash on 32bit machines if operand is not aligned + // at 64bit. See https://github.com/golang/go/issues/599 + Statistics + mu sync.Mutex + Opts Options + wg *sync.WaitGroup + url *url.URL + conn net.Conn + srvPool []*srv + urls map[string]struct{} // Keep track of all known URLs (used by processInfo) + bw *bufio.Writer + pending *bytes.Buffer + fch chan struct{} + info serverInfo + ssid int64 + subsMu sync.RWMutex + subs map[int64]*Subscription + ach chan asyncCB + pongs []chan struct{} + scratch [scratchSize]byte + status Status + initc bool // true if the connection is performing the initial connect + err error + ps *parseState + ptmr *time.Timer + pout int + + // New style response handler + respSub string // The wildcard subject + respMux *Subscription // A single response subscription + respMap map[string]chan *Msg // Request map for the response msg channels + respSetup sync.Once // Ensures response subscription occurs once +} + +// A Subscription represents interest in a given subject. +type Subscription struct { + mu sync.Mutex + sid int64 + + // Subject that represents this subscription. This can be different + // than the received subject inside a Msg if this is a wildcard. + Subject string + + // Optional queue group name. If present, all subscriptions with the + // same name will form a distributed queue, and each message will + // only be processed by one member of the group. + Queue string + + delivered uint64 + max uint64 + conn *Conn + mcb MsgHandler + mch chan *Msg + closed bool + sc bool + connClosed bool + + // Type of Subscription + typ SubscriptionType + + // Async linked list + pHead *Msg + pTail *Msg + pCond *sync.Cond + + // Pending stats, async subscriptions, high-speed etc. + pMsgs int + pBytes int + pMsgsMax int + pBytesMax int + pMsgsLimit int + pBytesLimit int + dropped int +} + +// Msg is a structure used by Subscribers and PublishMsg(). +type Msg struct { + Subject string + Reply string + Data []byte + Sub *Subscription + next *Msg +} + +// Tracks various stats received and sent on this connection, +// including counts for messages and bytes. +type Statistics struct { + InMsgs uint64 + OutMsgs uint64 + InBytes uint64 + OutBytes uint64 + Reconnects uint64 +} + +// Tracks individual backend servers. +type srv struct { + url *url.URL + didConnect bool + reconnects int + lastAttempt time.Time + isImplicit bool +} + +type serverInfo struct { + Id string `json:"server_id"` + Host string `json:"host"` + Port uint `json:"port"` + Version string `json:"version"` + AuthRequired bool `json:"auth_required"` + TLSRequired bool `json:"tls_required"` + MaxPayload int64 `json:"max_payload"` + ConnectURLs []string `json:"connect_urls,omitempty"` +} + +const ( + // clientProtoZero is the original client protocol from 2009. + // http://nats.io/documentation/internals/nats-protocol/ + /* clientProtoZero */ _ = iota + // clientProtoInfo signals a client can receive more then the original INFO block. + // This can be used to update clients on other cluster members, etc. + clientProtoInfo +) + +type connectInfo struct { + Verbose bool `json:"verbose"` + Pedantic bool `json:"pedantic"` + User string `json:"user,omitempty"` + Pass string `json:"pass,omitempty"` + Token string `json:"auth_token,omitempty"` + TLS bool `json:"tls_required"` + Name string `json:"name"` + Lang string `json:"lang"` + Version string `json:"version"` + Protocol int `json:"protocol"` +} + +// MsgHandler is a callback function that processes messages delivered to +// asynchronous subscribers. +type MsgHandler func(msg *Msg) + +// Connect will attempt to connect to the NATS system. +// The url can contain username/password semantics. e.g. nats://derek:pass@localhost:4222 +// Comma separated arrays are also supported, e.g. urlA, urlB. +// Options start with the defaults but can be overridden. +func Connect(url string, options ...Option) (*Conn, error) { + opts := GetDefaultOptions() + opts.Servers = processUrlString(url) + for _, opt := range options { + if err := opt(&opts); err != nil { + return nil, err + } + } + return opts.Connect() +} + +// Options that can be passed to Connect. + +// Name is an Option to set the client name. +func Name(name string) Option { + return func(o *Options) error { + o.Name = name + return nil + } +} + +// Secure is an Option to enable TLS secure connections that skip server verification by default. +// Pass a TLS Configuration for proper TLS. +func Secure(tls ...*tls.Config) Option { + return func(o *Options) error { + o.Secure = true + // Use of variadic just simplifies testing scenarios. We only take the first one. + // fixme(DLC) - Could panic if more than one. Could also do TLS option. + if len(tls) > 1 { + return ErrMultipleTLSConfigs + } + if len(tls) == 1 { + o.TLSConfig = tls[0] + } + return nil + } +} + +// RootCAs is a helper option to provide the RootCAs pool from a list of filenames. If Secure is +// not already set this will set it as well. +func RootCAs(file ...string) Option { + return func(o *Options) error { + pool := x509.NewCertPool() + for _, f := range file { + rootPEM, err := ioutil.ReadFile(f) + if err != nil || rootPEM == nil { + return fmt.Errorf("nats: error loading or parsing rootCA file: %v", err) + } + ok := pool.AppendCertsFromPEM(rootPEM) + if !ok { + return fmt.Errorf("nats: failed to parse root certificate from %q", f) + } + } + if o.TLSConfig == nil { + o.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} + } + o.TLSConfig.RootCAs = pool + o.Secure = true + return nil + } +} + +// ClientCert is a helper option to provide the client certificate from a file. If Secure is +// not already set this will set it as well +func ClientCert(certFile, keyFile string) Option { + return func(o *Options) error { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return fmt.Errorf("nats: error loading client certificate: %v", err) + } + cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0]) + if err != nil { + return fmt.Errorf("nats: error parsing client certificate: %v", err) + } + if o.TLSConfig == nil { + o.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} + } + o.TLSConfig.Certificates = []tls.Certificate{cert} + o.Secure = true + return nil + } +} + +// NoReconnect is an Option to turn off reconnect behavior. +func NoReconnect() Option { + return func(o *Options) error { + o.AllowReconnect = false + return nil + } +} + +// DontRandomize is an Option to turn off randomizing the server pool. +func DontRandomize() Option { + return func(o *Options) error { + o.NoRandomize = true + return nil + } +} + +// ReconnectWait is an Option to set the wait time between reconnect attempts. +func ReconnectWait(t time.Duration) Option { + return func(o *Options) error { + o.ReconnectWait = t + return nil + } +} + +// MaxReconnects is an Option to set the maximum number of reconnect attempts. +func MaxReconnects(max int) Option { + return func(o *Options) error { + o.MaxReconnect = max + return nil + } +} + +// Timeout is an Option to set the timeout for Dial on a connection. +func Timeout(t time.Duration) Option { + return func(o *Options) error { + o.Timeout = t + return nil + } +} + +// DisconnectHandler is an Option to set the disconnected handler. +func DisconnectHandler(cb ConnHandler) Option { + return func(o *Options) error { + o.DisconnectedCB = cb + return nil + } +} + +// ReconnectHandler is an Option to set the reconnected handler. +func ReconnectHandler(cb ConnHandler) Option { + return func(o *Options) error { + o.ReconnectedCB = cb + return nil + } +} + +// ClosedHandler is an Option to set the closed handler. +func ClosedHandler(cb ConnHandler) Option { + return func(o *Options) error { + o.ClosedCB = cb + return nil + } +} + +// DiscoveredServersHandler is an Option to set the new servers handler. +func DiscoveredServersHandler(cb ConnHandler) Option { + return func(o *Options) error { + o.DiscoveredServersCB = cb + return nil + } +} + +// ErrHandler is an Option to set the async error handler. +func ErrorHandler(cb ErrHandler) Option { + return func(o *Options) error { + o.AsyncErrorCB = cb + return nil + } +} + +// UserInfo is an Option to set the username and password to +// use when not included directly in the URLs. +func UserInfo(user, password string) Option { + return func(o *Options) error { + o.User = user + o.Password = password + return nil + } +} + +// Token is an Option to set the token to use when not included +// directly in the URLs. +func Token(token string) Option { + return func(o *Options) error { + o.Token = token + return nil + } +} + +// Dialer is an Option to set the dialer which will be used when +// attempting to establish a connection. +func Dialer(dialer *net.Dialer) Option { + return func(o *Options) error { + o.Dialer = dialer + return nil + } +} + +// UseOldRequestyStyle is an Option to force usage of the old Request style. +func UseOldRequestStyle() Option { + return func(o *Options) error { + o.UseOldRequestStyle = true + return nil + } +} + +// Handler processing + +// SetDisconnectHandler will set the disconnect event handler. +func (nc *Conn) SetDisconnectHandler(dcb ConnHandler) { + if nc == nil { + return + } + nc.mu.Lock() + defer nc.mu.Unlock() + nc.Opts.DisconnectedCB = dcb +} + +// SetReconnectHandler will set the reconnect event handler. +func (nc *Conn) SetReconnectHandler(rcb ConnHandler) { + if nc == nil { + return + } + nc.mu.Lock() + defer nc.mu.Unlock() + nc.Opts.ReconnectedCB = rcb +} + +// SetDiscoveredServersHandler will set the discovered servers handler. +func (nc *Conn) SetDiscoveredServersHandler(dscb ConnHandler) { + if nc == nil { + return + } + nc.mu.Lock() + defer nc.mu.Unlock() + nc.Opts.DiscoveredServersCB = dscb +} + +// SetClosedHandler will set the reconnect event handler. +func (nc *Conn) SetClosedHandler(cb ConnHandler) { + if nc == nil { + return + } + nc.mu.Lock() + defer nc.mu.Unlock() + nc.Opts.ClosedCB = cb +} + +// SetErrHandler will set the async error handler. +func (nc *Conn) SetErrorHandler(cb ErrHandler) { + if nc == nil { + return + } + nc.mu.Lock() + defer nc.mu.Unlock() + nc.Opts.AsyncErrorCB = cb +} + +// Process the url string argument to Connect. Return an array of +// urls, even if only one. +func processUrlString(url string) []string { + urls := strings.Split(url, ",") + for i, s := range urls { + urls[i] = strings.TrimSpace(s) + } + return urls +} + +// Connect will attempt to connect to a NATS server with multiple options. +func (o Options) Connect() (*Conn, error) { + nc := &Conn{Opts: o} + + // Some default options processing. + if nc.Opts.MaxPingsOut == 0 { + nc.Opts.MaxPingsOut = DefaultMaxPingOut + } + // Allow old default for channel length to work correctly. + if nc.Opts.SubChanLen == 0 { + nc.Opts.SubChanLen = DefaultMaxChanLen + } + // Default ReconnectBufSize + if nc.Opts.ReconnectBufSize == 0 { + nc.Opts.ReconnectBufSize = DefaultReconnectBufSize + } + // Ensure that Timeout is not 0 + if nc.Opts.Timeout == 0 { + nc.Opts.Timeout = DefaultTimeout + } + + // Allow custom Dialer for connecting using DialTimeout by default + if nc.Opts.Dialer == nil { + nc.Opts.Dialer = &net.Dialer{ + Timeout: nc.Opts.Timeout, + } + } + + if err := nc.setupServerPool(); err != nil { + return nil, err + } + + // Create the async callback channel. + nc.ach = make(chan asyncCB, asyncCBChanSize) + + if err := nc.connect(); err != nil { + return nil, err + } + + // Spin up the async cb dispatcher on success + go nc.asyncDispatch() + + return nc, nil +} + +const ( + _CRLF_ = "\r\n" + _EMPTY_ = "" + _SPC_ = " " + _PUB_P_ = "PUB " +) + +const ( + _OK_OP_ = "+OK" + _ERR_OP_ = "-ERR" + _PONG_OP_ = "PONG" + _INFO_OP_ = "INFO" +) + +const ( + conProto = "CONNECT %s" + _CRLF_ + pingProto = "PING" + _CRLF_ + pongProto = "PONG" + _CRLF_ + subProto = "SUB %s %s %d" + _CRLF_ + unsubProto = "UNSUB %d %s" + _CRLF_ + okProto = _OK_OP_ + _CRLF_ +) + +// Return the currently selected server +func (nc *Conn) currentServer() (int, *srv) { + for i, s := range nc.srvPool { + if s == nil { + continue + } + if s.url == nc.url { + return i, s + } + } + return -1, nil +} + +// Pop the current server and put onto the end of the list. Select head of list as long +// as number of reconnect attempts under MaxReconnect. +func (nc *Conn) selectNextServer() (*srv, error) { + i, s := nc.currentServer() + if i < 0 { + return nil, ErrNoServers + } + sp := nc.srvPool + num := len(sp) + copy(sp[i:num-1], sp[i+1:num]) + maxReconnect := nc.Opts.MaxReconnect + if maxReconnect < 0 || s.reconnects < maxReconnect { + nc.srvPool[num-1] = s + } else { + nc.srvPool = sp[0 : num-1] + } + if len(nc.srvPool) <= 0 { + nc.url = nil + return nil, ErrNoServers + } + nc.url = nc.srvPool[0].url + return nc.srvPool[0], nil +} + +// Will assign the correct server to the nc.Url +func (nc *Conn) pickServer() error { + nc.url = nil + if len(nc.srvPool) <= 0 { + return ErrNoServers + } + for _, s := range nc.srvPool { + if s != nil { + nc.url = s.url + return nil + } + } + return ErrNoServers +} + +const tlsScheme = "tls" + +// Create the server pool using the options given. +// We will place a Url option first, followed by any +// Server Options. We will randomize the server pool unless +// the NoRandomize flag is set. +func (nc *Conn) setupServerPool() error { + nc.srvPool = make([]*srv, 0, srvPoolSize) + nc.urls = make(map[string]struct{}, srvPoolSize) + + // Create srv objects from each url string in nc.Opts.Servers + // and add them to the pool + for _, urlString := range nc.Opts.Servers { + if err := nc.addURLToPool(urlString, false); err != nil { + return err + } + } + + // Randomize if allowed to + if !nc.Opts.NoRandomize { + nc.shufflePool() + } + + // Normally, if this one is set, Options.Servers should not be, + // but we always allowed that, so continue to do so. + if nc.Opts.Url != _EMPTY_ { + // Add to the end of the array + if err := nc.addURLToPool(nc.Opts.Url, false); err != nil { + return err + } + // Then swap it with first to guarantee that Options.Url is tried first. + last := len(nc.srvPool) - 1 + if last > 0 { + nc.srvPool[0], nc.srvPool[last] = nc.srvPool[last], nc.srvPool[0] + } + } else if len(nc.srvPool) <= 0 { + // Place default URL if pool is empty. + if err := nc.addURLToPool(DefaultURL, false); err != nil { + return err + } + } + + // Check for Scheme hint to move to TLS mode. + for _, srv := range nc.srvPool { + if srv.url.Scheme == tlsScheme { + // FIXME(dlc), this is for all in the pool, should be case by case. + nc.Opts.Secure = true + if nc.Opts.TLSConfig == nil { + nc.Opts.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} + } + } + } + + return nc.pickServer() +} + +// addURLToPool adds an entry to the server pool +func (nc *Conn) addURLToPool(sURL string, implicit bool) error { + u, err := url.Parse(sURL) + if err != nil { + return err + } + s := &srv{url: u, isImplicit: implicit} + nc.srvPool = append(nc.srvPool, s) + nc.urls[u.Host] = struct{}{} + return nil +} + +// shufflePool swaps randomly elements in the server pool +func (nc *Conn) shufflePool() { + if len(nc.srvPool) <= 1 { + return + } + source := rand.NewSource(time.Now().UnixNano()) + r := rand.New(source) + for i := range nc.srvPool { + j := r.Intn(i + 1) + nc.srvPool[i], nc.srvPool[j] = nc.srvPool[j], nc.srvPool[i] + } +} + +// createConn will connect to the server and wrap the appropriate +// bufio structures. It will do the right thing when an existing +// connection is in place. +func (nc *Conn) createConn() (err error) { + if nc.Opts.Timeout < 0 { + return ErrBadTimeout + } + if _, cur := nc.currentServer(); cur == nil { + return ErrNoServers + } else { + cur.lastAttempt = time.Now() + } + + dialer := nc.Opts.Dialer + nc.conn, err = dialer.Dial("tcp", nc.url.Host) + if err != nil { + return err + } + + // No clue why, but this stalls and kills performance on Mac (Mavericks). + // https://code.google.com/p/go/issues/detail?id=6930 + //if ip, ok := nc.conn.(*net.TCPConn); ok { + // ip.SetReadBuffer(defaultBufSize) + //} + + if nc.pending != nil && nc.bw != nil { + // Move to pending buffer. + nc.bw.Flush() + } + nc.bw = bufio.NewWriterSize(nc.conn, defaultBufSize) + return nil +} + +// makeTLSConn will wrap an existing Conn using TLS +func (nc *Conn) makeTLSConn() { + // Allow the user to configure their own tls.Config structure, otherwise + // default to InsecureSkipVerify. + // TODO(dlc) - We should make the more secure version the default. + if nc.Opts.TLSConfig != nil { + tlsCopy := util.CloneTLSConfig(nc.Opts.TLSConfig) + // If its blank we will override it with the current host + if tlsCopy.ServerName == _EMPTY_ { + h, _, _ := net.SplitHostPort(nc.url.Host) + tlsCopy.ServerName = h + } + nc.conn = tls.Client(nc.conn, tlsCopy) + } else { + nc.conn = tls.Client(nc.conn, &tls.Config{InsecureSkipVerify: true}) + } + conn := nc.conn.(*tls.Conn) + conn.Handshake() + nc.bw = bufio.NewWriterSize(nc.conn, defaultBufSize) +} + +// waitForExits will wait for all socket watcher Go routines to +// be shutdown before proceeding. +func (nc *Conn) waitForExits(wg *sync.WaitGroup) { + // Kick old flusher forcefully. + select { + case nc.fch <- struct{}{}: + default: + } + + // Wait for any previous go routines. + if wg != nil { + wg.Wait() + } +} + +// spinUpGoRoutines will launch the Go routines responsible for +// reading and writing to the socket. This will be launched via a +// go routine itself to release any locks that may be held. +// We also use a WaitGroup to make sure we only start them on a +// reconnect when the previous ones have exited. +func (nc *Conn) spinUpGoRoutines() { + // Make sure everything has exited. + nc.waitForExits(nc.wg) + + // Create a new waitGroup instance for this run. + nc.wg = &sync.WaitGroup{} + // We will wait on both. + nc.wg.Add(2) + + // Spin up the readLoop and the socket flusher. + go nc.readLoop(nc.wg) + go nc.flusher(nc.wg) + + nc.mu.Lock() + if nc.Opts.PingInterval > 0 { + if nc.ptmr == nil { + nc.ptmr = time.AfterFunc(nc.Opts.PingInterval, nc.processPingTimer) + } else { + nc.ptmr.Reset(nc.Opts.PingInterval) + } + } + nc.mu.Unlock() +} + +// Report the connected server's Url +func (nc *Conn) ConnectedUrl() string { + if nc == nil { + return _EMPTY_ + } + nc.mu.Lock() + defer nc.mu.Unlock() + if nc.status != CONNECTED { + return _EMPTY_ + } + return nc.url.String() +} + +// Report the connected server's Id +func (nc *Conn) ConnectedServerId() string { + if nc == nil { + return _EMPTY_ + } + nc.mu.Lock() + defer nc.mu.Unlock() + if nc.status != CONNECTED { + return _EMPTY_ + } + return nc.info.Id +} + +// Low level setup for structs, etc +func (nc *Conn) setup() { + nc.subs = make(map[int64]*Subscription) + nc.pongs = make([]chan struct{}, 0, 8) + + nc.fch = make(chan struct{}, flushChanSize) + + // Setup scratch outbound buffer for PUB + pub := nc.scratch[:len(_PUB_P_)] + copy(pub, _PUB_P_) +} + +// Process a connected connection and initialize properly. +func (nc *Conn) processConnectInit() error { + + // Set out deadline for the whole connect process + nc.conn.SetDeadline(time.Now().Add(nc.Opts.Timeout)) + defer nc.conn.SetDeadline(time.Time{}) + + // Set our status to connecting. + nc.status = CONNECTING + + // Process the INFO protocol received from the server + err := nc.processExpectedInfo() + if err != nil { + return err + } + + // Send the CONNECT protocol along with the initial PING protocol. + // Wait for the PONG response (or any error that we get from the server). + err = nc.sendConnect() + if err != nil { + return err + } + + // Reset the number of PING sent out + nc.pout = 0 + + go nc.spinUpGoRoutines() + + return nil +} + +// Main connect function. Will connect to the nats-server +func (nc *Conn) connect() error { + var returnedErr error + + // Create actual socket connection + // For first connect we walk all servers in the pool and try + // to connect immediately. + nc.mu.Lock() + nc.initc = true + // The pool may change inside the loop iteration due to INFO protocol. + for i := 0; i < len(nc.srvPool); i++ { + nc.url = nc.srvPool[i].url + + if err := nc.createConn(); err == nil { + // This was moved out of processConnectInit() because + // that function is now invoked from doReconnect() too. + nc.setup() + + err = nc.processConnectInit() + + if err == nil { + nc.srvPool[i].didConnect = true + nc.srvPool[i].reconnects = 0 + returnedErr = nil + break + } else { + returnedErr = err + nc.mu.Unlock() + nc.close(DISCONNECTED, false) + nc.mu.Lock() + nc.url = nil + } + } else { + // Cancel out default connection refused, will trigger the + // No servers error conditional + if matched, _ := regexp.Match(`connection refused`, []byte(err.Error())); matched { + returnedErr = nil + } + } + } + nc.initc = false + defer nc.mu.Unlock() + + if returnedErr == nil && nc.status != CONNECTED { + returnedErr = ErrNoServers + } + return returnedErr +} + +// This will check to see if the connection should be +// secure. This can be dictated from either end and should +// only be called after the INIT protocol has been received. +func (nc *Conn) checkForSecure() error { + // Check to see if we need to engage TLS + o := nc.Opts + + // Check for mismatch in setups + if o.Secure && !nc.info.TLSRequired { + return ErrSecureConnWanted + } else if nc.info.TLSRequired && !o.Secure { + return ErrSecureConnRequired + } + + // Need to rewrap with bufio + if o.Secure { + nc.makeTLSConn() + } + return nil +} + +// processExpectedInfo will look for the expected first INFO message +// sent when a connection is established. The lock should be held entering. +func (nc *Conn) processExpectedInfo() error { + + c := &control{} + + // Read the protocol + err := nc.readOp(c) + if err != nil { + return err + } + + // The nats protocol should send INFO first always. + if c.op != _INFO_OP_ { + return ErrNoInfoReceived + } + + // Parse the protocol + if err := nc.processInfo(c.args); err != nil { + return err + } + + return nc.checkForSecure() +} + +// Sends a protocol control message by queuing into the bufio writer +// and kicking the flush Go routine. These writes are protected. +func (nc *Conn) sendProto(proto string) { + nc.mu.Lock() + nc.bw.WriteString(proto) + nc.kickFlusher() + nc.mu.Unlock() +} + +// Generate a connect protocol message, issuing user/password if +// applicable. The lock is assumed to be held upon entering. +func (nc *Conn) connectProto() (string, error) { + o := nc.Opts + var user, pass, token string + u := nc.url.User + if u != nil { + // if no password, assume username is authToken + if _, ok := u.Password(); !ok { + token = u.Username() + } else { + user = u.Username() + pass, _ = u.Password() + } + } else { + // Take from options (pssibly all empty strings) + user = nc.Opts.User + pass = nc.Opts.Password + token = nc.Opts.Token + } + cinfo := connectInfo{o.Verbose, o.Pedantic, + user, pass, token, + o.Secure, o.Name, LangString, Version, clientProtoInfo} + b, err := json.Marshal(cinfo) + if err != nil { + return _EMPTY_, ErrJsonParse + } + return fmt.Sprintf(conProto, b), nil +} + +// normalizeErr removes the prefix -ERR, trim spaces and remove the quotes. +func normalizeErr(line string) string { + s := strings.ToLower(strings.TrimSpace(strings.TrimPrefix(line, _ERR_OP_))) + s = strings.TrimLeft(strings.TrimRight(s, "'"), "'") + return s +} + +// Send a connect protocol message to the server, issue user/password if +// applicable. Will wait for a flush to return from the server for error +// processing. +func (nc *Conn) sendConnect() error { + + // Construct the CONNECT protocol string + cProto, err := nc.connectProto() + if err != nil { + return err + } + + // Write the protocol into the buffer + _, err = nc.bw.WriteString(cProto) + if err != nil { + return err + } + + // Add to the buffer the PING protocol + _, err = nc.bw.WriteString(pingProto) + if err != nil { + return err + } + + // Flush the buffer + err = nc.bw.Flush() + if err != nil { + return err + } + + // Now read the response from the server. + br := bufio.NewReaderSize(nc.conn, defaultBufSize) + line, err := br.ReadString('\n') + if err != nil { + return err + } + + // If opts.Verbose is set, handle +OK + if nc.Opts.Verbose && line == okProto { + // Read the rest now... + line, err = br.ReadString('\n') + if err != nil { + return err + } + } + + // We expect a PONG + if line != pongProto { + // But it could be something else, like -ERR + + // Since we no longer use ReadLine(), trim the trailing "\r\n" + line = strings.TrimRight(line, "\r\n") + + // If it's a server error... + if strings.HasPrefix(line, _ERR_OP_) { + // Remove -ERR, trim spaces and quotes, and convert to lower case. + line = normalizeErr(line) + return errors.New("nats: " + line) + } + + // Notify that we got an unexpected protocol. + return fmt.Errorf("nats: expected '%s', got '%s'", _PONG_OP_, line) + } + + // This is where we are truly connected. + nc.status = CONNECTED + + return nil +} + +// A control protocol line. +type control struct { + op, args string +} + +// Read a control line and process the intended op. +func (nc *Conn) readOp(c *control) error { + br := bufio.NewReaderSize(nc.conn, defaultBufSize) + line, err := br.ReadString('\n') + if err != nil { + return err + } + parseControl(line, c) + return nil +} + +// Parse a control line from the server. +func parseControl(line string, c *control) { + toks := strings.SplitN(line, _SPC_, 2) + if len(toks) == 1 { + c.op = strings.TrimSpace(toks[0]) + c.args = _EMPTY_ + } else if len(toks) == 2 { + c.op, c.args = strings.TrimSpace(toks[0]), strings.TrimSpace(toks[1]) + } else { + c.op = _EMPTY_ + } +} + +// flushReconnectPending will push the pending items that were +// gathered while we were in a RECONNECTING state to the socket. +func (nc *Conn) flushReconnectPendingItems() { + if nc.pending == nil { + return + } + if nc.pending.Len() > 0 { + nc.bw.Write(nc.pending.Bytes()) + } +} + +// Try to reconnect using the option parameters. +// This function assumes we are allowed to reconnect. +func (nc *Conn) doReconnect() { + // We want to make sure we have the other watchers shutdown properly + // here before we proceed past this point. + nc.mu.Lock() + wg := nc.wg + nc.mu.Unlock() + nc.waitForExits(wg) + + // FIXME(dlc) - We have an issue here if we have + // outstanding flush points (pongs) and they were not + // sent out, but are still in the pipe. + + // Hold the lock manually and release where needed below, + // can't do defer here. + nc.mu.Lock() + + // Clear any queued pongs, e.g. pending flush calls. + nc.clearPendingFlushCalls() + + // Clear any errors. + nc.err = nil + + // Perform appropriate callback if needed for a disconnect. + if nc.Opts.DisconnectedCB != nil { + nc.ach <- func() { nc.Opts.DisconnectedCB(nc) } + } + + for len(nc.srvPool) > 0 { + cur, err := nc.selectNextServer() + if err != nil { + nc.err = err + break + } + + sleepTime := int64(0) + + // Sleep appropriate amount of time before the + // connection attempt if connecting to same server + // we just got disconnected from.. + if time.Since(cur.lastAttempt) < nc.Opts.ReconnectWait { + sleepTime = int64(nc.Opts.ReconnectWait - time.Since(cur.lastAttempt)) + } + + // On Windows, createConn() will take more than a second when no + // server is running at that address. So it could be that the + // time elapsed between reconnect attempts is always > than + // the set option. Release the lock to give a chance to a parallel + // nc.Close() to break the loop. + nc.mu.Unlock() + if sleepTime <= 0 { + runtime.Gosched() + } else { + time.Sleep(time.Duration(sleepTime)) + } + nc.mu.Lock() + + // Check if we have been closed first. + if nc.isClosed() { + break + } + + // Mark that we tried a reconnect + cur.reconnects++ + + // Try to create a new connection + err = nc.createConn() + + // Not yet connected, retry... + // Continue to hold the lock + if err != nil { + nc.err = nil + continue + } + + // We are reconnected + nc.Reconnects++ + + // Process connect logic + if nc.err = nc.processConnectInit(); nc.err != nil { + nc.status = RECONNECTING + continue + } + + // Clear out server stats for the server we connected to.. + cur.didConnect = true + cur.reconnects = 0 + + // Send existing subscription state + nc.resendSubscriptions() + + // Now send off and clear pending buffer + nc.flushReconnectPendingItems() + + // Flush the buffer + nc.err = nc.bw.Flush() + if nc.err != nil { + nc.status = RECONNECTING + continue + } + + // Done with the pending buffer + nc.pending = nil + + // This is where we are truly connected. + nc.status = CONNECTED + + // Queue up the reconnect callback. + if nc.Opts.ReconnectedCB != nil { + nc.ach <- func() { nc.Opts.ReconnectedCB(nc) } + } + + // Release lock here, we will return below. + nc.mu.Unlock() + + // Make sure to flush everything + nc.Flush() + + return + } + + // Call into close.. We have no servers left.. + if nc.err == nil { + nc.err = ErrNoServers + } + nc.mu.Unlock() + nc.Close() +} + +// processOpErr handles errors from reading or parsing the protocol. +// The lock should not be held entering this function. +func (nc *Conn) processOpErr(err error) { + nc.mu.Lock() + if nc.isConnecting() || nc.isClosed() || nc.isReconnecting() { + nc.mu.Unlock() + return + } + + if nc.Opts.AllowReconnect && nc.status == CONNECTED { + // Set our new status + nc.status = RECONNECTING + if nc.ptmr != nil { + nc.ptmr.Stop() + } + if nc.conn != nil { + nc.bw.Flush() + nc.conn.Close() + nc.conn = nil + } + + // Create a new pending buffer to underpin the bufio Writer while + // we are reconnecting. + nc.pending = &bytes.Buffer{} + nc.bw = bufio.NewWriterSize(nc.pending, nc.Opts.ReconnectBufSize) + + go nc.doReconnect() + nc.mu.Unlock() + return + } + + nc.status = DISCONNECTED + nc.err = err + nc.mu.Unlock() + nc.Close() +} + +// Marker to close the channel to kick out the Go routine. +func (nc *Conn) closeAsyncFunc() asyncCB { + return func() { + nc.mu.Lock() + if nc.ach != nil { + close(nc.ach) + nc.ach = nil + } + nc.mu.Unlock() + } +} + +// asyncDispatch is responsible for calling any async callbacks +func (nc *Conn) asyncDispatch() { + // snapshot since they can change from underneath of us. + nc.mu.Lock() + ach := nc.ach + nc.mu.Unlock() + + // Loop on the channel and process async callbacks. + for { + if f, ok := <-ach; !ok { + return + } else { + f() + } + } +} + +// readLoop() will sit on the socket reading and processing the +// protocol from the server. It will dispatch appropriately based +// on the op type. +func (nc *Conn) readLoop(wg *sync.WaitGroup) { + // Release the wait group on exit + defer wg.Done() + + // Create a parseState if needed. + nc.mu.Lock() + if nc.ps == nil { + nc.ps = &parseState{} + } + nc.mu.Unlock() + + // Stack based buffer. + b := make([]byte, defaultBufSize) + + for { + // FIXME(dlc): RWLock here? + nc.mu.Lock() + sb := nc.isClosed() || nc.isReconnecting() + if sb { + nc.ps = &parseState{} + } + conn := nc.conn + nc.mu.Unlock() + + if sb || conn == nil { + break + } + + n, err := conn.Read(b) + if err != nil { + nc.processOpErr(err) + break + } + + if err := nc.parse(b[:n]); err != nil { + nc.processOpErr(err) + break + } + } + // Clear the parseState here.. + nc.mu.Lock() + nc.ps = nil + nc.mu.Unlock() +} + +// waitForMsgs waits on the conditional shared with readLoop and processMsg. +// It is used to deliver messages to asynchronous subscribers. +func (nc *Conn) waitForMsgs(s *Subscription) { + var closed bool + var delivered, max uint64 + + for { + s.mu.Lock() + if s.pHead == nil && !s.closed { + s.pCond.Wait() + } + // Pop the msg off the list + m := s.pHead + if m != nil { + s.pHead = m.next + if s.pHead == nil { + s.pTail = nil + } + s.pMsgs-- + s.pBytes -= len(m.Data) + } + mcb := s.mcb + max = s.max + closed = s.closed + if !s.closed { + s.delivered++ + delivered = s.delivered + } + s.mu.Unlock() + + if closed { + break + } + + // Deliver the message. + if m != nil && (max == 0 || delivered <= max) { + mcb(m) + } + // If we have hit the max for delivered msgs, remove sub. + if max > 0 && delivered >= max { + nc.mu.Lock() + nc.removeSub(s) + nc.mu.Unlock() + break + } + } +} + +// processMsg is called by parse and will place the msg on the +// appropriate channel/pending queue for processing. If the channel is full, +// or the pending queue is over the pending limits, the connection is +// considered a slow consumer. +func (nc *Conn) processMsg(data []byte) { + // Don't lock the connection to avoid server cutting us off if the + // flusher is holding the connection lock, trying to send to the server + // that is itself trying to send data to us. + nc.subsMu.RLock() + + // Stats + nc.InMsgs++ + nc.InBytes += uint64(len(data)) + + sub := nc.subs[nc.ps.ma.sid] + if sub == nil { + nc.subsMu.RUnlock() + return + } + + // Copy them into string + subj := string(nc.ps.ma.subject) + reply := string(nc.ps.ma.reply) + + // Doing message create outside of the sub's lock to reduce contention. + // It's possible that we end-up not using the message, but that's ok. + + // FIXME(dlc): Need to copy, should/can do COW? + msgPayload := make([]byte, len(data)) + copy(msgPayload, data) + + // FIXME(dlc): Should we recycle these containers? + m := &Msg{Data: msgPayload, Subject: subj, Reply: reply, Sub: sub} + + sub.mu.Lock() + + // Subscription internal stats (applicable only for non ChanSubscription's) + if sub.typ != ChanSubscription { + sub.pMsgs++ + if sub.pMsgs > sub.pMsgsMax { + sub.pMsgsMax = sub.pMsgs + } + sub.pBytes += len(m.Data) + if sub.pBytes > sub.pBytesMax { + sub.pBytesMax = sub.pBytes + } + + // Check for a Slow Consumer + if (sub.pMsgsLimit > 0 && sub.pMsgs > sub.pMsgsLimit) || + (sub.pBytesLimit > 0 && sub.pBytes > sub.pBytesLimit) { + goto slowConsumer + } + } + + // We have two modes of delivery. One is the channel, used by channel + // subscribers and syncSubscribers, the other is a linked list for async. + if sub.mch != nil { + select { + case sub.mch <- m: + default: + goto slowConsumer + } + } else { + // Push onto the async pList + if sub.pHead == nil { + sub.pHead = m + sub.pTail = m + sub.pCond.Signal() + } else { + sub.pTail.next = m + sub.pTail = m + } + } + + // Clear SlowConsumer status. + sub.sc = false + + sub.mu.Unlock() + nc.subsMu.RUnlock() + return + +slowConsumer: + sub.dropped++ + sc := !sub.sc + sub.sc = true + // Undo stats from above + if sub.typ != ChanSubscription { + sub.pMsgs-- + sub.pBytes -= len(m.Data) + } + sub.mu.Unlock() + nc.subsMu.RUnlock() + if sc { + // Now we need connection's lock and we may end-up in the situation + // that we were trying to avoid, except that in this case, the client + // is already experiencing client-side slow consumer situation. + nc.mu.Lock() + nc.err = ErrSlowConsumer + if nc.Opts.AsyncErrorCB != nil { + nc.ach <- func() { nc.Opts.AsyncErrorCB(nc, sub, ErrSlowConsumer) } + } + nc.mu.Unlock() + } +} + +// processPermissionsViolation is called when the server signals a subject +// permissions violation on either publish or subscribe. +func (nc *Conn) processPermissionsViolation(err string) { + nc.mu.Lock() + // create error here so we can pass it as a closure to the async cb dispatcher. + e := errors.New("nats: " + err) + nc.err = e + if nc.Opts.AsyncErrorCB != nil { + nc.ach <- func() { nc.Opts.AsyncErrorCB(nc, nil, e) } + } + nc.mu.Unlock() +} + +// processAuthorizationViolation is called when the server signals a user +// authorization violation. +func (nc *Conn) processAuthorizationViolation(err string) { + nc.mu.Lock() + nc.err = ErrAuthorization + if nc.Opts.AsyncErrorCB != nil { + nc.ach <- func() { nc.Opts.AsyncErrorCB(nc, nil, ErrAuthorization) } + } + nc.mu.Unlock() +} + +// flusher is a separate Go routine that will process flush requests for the write +// bufio. This allows coalescing of writes to the underlying socket. +func (nc *Conn) flusher(wg *sync.WaitGroup) { + // Release the wait group + defer wg.Done() + + // snapshot the bw and conn since they can change from underneath of us. + nc.mu.Lock() + bw := nc.bw + conn := nc.conn + fch := nc.fch + flusherTimeout := nc.Opts.FlusherTimeout + nc.mu.Unlock() + + if conn == nil || bw == nil { + return + } + + for { + if _, ok := <-fch; !ok { + return + } + nc.mu.Lock() + + // Check to see if we should bail out. + if !nc.isConnected() || nc.isConnecting() || bw != nc.bw || conn != nc.conn { + nc.mu.Unlock() + return + } + if bw.Buffered() > 0 { + // Allow customizing how long we should wait for a flush to be done + // to prevent unhealthy connections blocking the client for too long. + if flusherTimeout > 0 { + conn.SetWriteDeadline(time.Now().Add(flusherTimeout)) + } + + if err := bw.Flush(); err != nil { + if nc.err == nil { + nc.err = err + } + } + conn.SetWriteDeadline(time.Time{}) + } + nc.mu.Unlock() + } +} + +// processPing will send an immediate pong protocol response to the +// server. The server uses this mechanism to detect dead clients. +func (nc *Conn) processPing() { + nc.sendProto(pongProto) +} + +// processPong is used to process responses to the client's ping +// messages. We use pings for the flush mechanism as well. +func (nc *Conn) processPong() { + var ch chan struct{} + + nc.mu.Lock() + if len(nc.pongs) > 0 { + ch = nc.pongs[0] + nc.pongs = nc.pongs[1:] + } + nc.pout = 0 + nc.mu.Unlock() + if ch != nil { + ch <- struct{}{} + } +} + +// processOK is a placeholder for processing OK messages. +func (nc *Conn) processOK() { + // do nothing +} + +// processInfo is used to parse the info messages sent +// from the server. +// This function may update the server pool. +func (nc *Conn) processInfo(info string) error { + if info == _EMPTY_ { + return nil + } + if err := json.Unmarshal([]byte(info), &nc.info); err != nil { + return err + } + urls := nc.info.ConnectURLs + if len(urls) > 0 { + added := false + // If randomization is allowed, shuffle the received array, not the + // entire pool. We want to preserve the pool's order up to this point + // (this would otherwise be problematic for the (re)connect loop). + if !nc.Opts.NoRandomize { + for i := range urls { + j := rand.Intn(i + 1) + urls[i], urls[j] = urls[j], urls[i] + } + } + for _, curl := range urls { + if _, present := nc.urls[curl]; !present { + if err := nc.addURLToPool(fmt.Sprintf("nats://%s", curl), true); err != nil { + continue + } + added = true + } + } + if added && !nc.initc && nc.Opts.DiscoveredServersCB != nil { + nc.ach <- func() { nc.Opts.DiscoveredServersCB(nc) } + } + } + return nil +} + +// processAsyncInfo does the same than processInfo, but is called +// from the parser. Calls processInfo under connection's lock +// protection. +func (nc *Conn) processAsyncInfo(info []byte) { + nc.mu.Lock() + // Ignore errors, we will simply not update the server pool... + nc.processInfo(string(info)) + nc.mu.Unlock() +} + +// LastError reports the last error encountered via the connection. +// It can be used reliably within ClosedCB in order to find out reason +// why connection was closed for example. +func (nc *Conn) LastError() error { + if nc == nil { + return ErrInvalidConnection + } + nc.mu.Lock() + err := nc.err + nc.mu.Unlock() + return err +} + +// processErr processes any error messages from the server and +// sets the connection's lastError. +func (nc *Conn) processErr(e string) { + // Trim, remove quotes, convert to lower case. + e = normalizeErr(e) + + // FIXME(dlc) - process Slow Consumer signals special. + if e == STALE_CONNECTION { + nc.processOpErr(ErrStaleConnection) + } else if strings.HasPrefix(e, PERMISSIONS_ERR) { + nc.processPermissionsViolation(e) + } else if strings.HasPrefix(e, AUTHORIZATION_ERR) { + nc.processAuthorizationViolation(e) + } else { + nc.mu.Lock() + nc.err = errors.New("nats: " + e) + nc.mu.Unlock() + nc.Close() + } +} + +// kickFlusher will send a bool on a channel to kick the +// flush Go routine to flush data to the server. +func (nc *Conn) kickFlusher() { + if nc.bw != nil { + select { + case nc.fch <- struct{}{}: + default: + } + } +} + +// Publish publishes the data argument to the given subject. The data +// argument is left untouched and needs to be correctly interpreted on +// the receiver. +func (nc *Conn) Publish(subj string, data []byte) error { + return nc.publish(subj, _EMPTY_, data) +} + +// PublishMsg publishes the Msg structure, which includes the +// Subject, an optional Reply and an optional Data field. +func (nc *Conn) PublishMsg(m *Msg) error { + if m == nil { + return ErrInvalidMsg + } + return nc.publish(m.Subject, m.Reply, m.Data) +} + +// PublishRequest will perform a Publish() excpecting a response on the +// reply subject. Use Request() for automatically waiting for a response +// inline. +func (nc *Conn) PublishRequest(subj, reply string, data []byte) error { + return nc.publish(subj, reply, data) +} + +// Used for handrolled itoa +const digits = "0123456789" + +// publish is the internal function to publish messages to a nats-server. +// Sends a protocol data message by queuing into the bufio writer +// and kicking the flush go routine. These writes should be protected. +func (nc *Conn) publish(subj, reply string, data []byte) error { + if nc == nil { + return ErrInvalidConnection + } + if subj == "" { + return ErrBadSubject + } + nc.mu.Lock() + + // Proactively reject payloads over the threshold set by server. + msgSize := int64(len(data)) + if msgSize > nc.info.MaxPayload { + nc.mu.Unlock() + return ErrMaxPayload + } + + if nc.isClosed() { + nc.mu.Unlock() + return ErrConnectionClosed + } + + // Check if we are reconnecting, and if so check if + // we have exceeded our reconnect outbound buffer limits. + if nc.isReconnecting() { + // Flush to underlying buffer. + nc.bw.Flush() + // Check if we are over + if nc.pending.Len() >= nc.Opts.ReconnectBufSize { + nc.mu.Unlock() + return ErrReconnectBufExceeded + } + } + + msgh := nc.scratch[:len(_PUB_P_)] + msgh = append(msgh, subj...) + msgh = append(msgh, ' ') + if reply != "" { + msgh = append(msgh, reply...) + msgh = append(msgh, ' ') + } + + // We could be smarter here, but simple loop is ok, + // just avoid strconv in fast path + // FIXME(dlc) - Find a better way here. + // msgh = strconv.AppendInt(msgh, int64(len(data)), 10) + + var b [12]byte + var i = len(b) + if len(data) > 0 { + for l := len(data); l > 0; l /= 10 { + i -= 1 + b[i] = digits[l%10] + } + } else { + i -= 1 + b[i] = digits[0] + } + + msgh = append(msgh, b[i:]...) + msgh = append(msgh, _CRLF_...) + + _, err := nc.bw.Write(msgh) + if err == nil { + _, err = nc.bw.Write(data) + } + if err == nil { + _, err = nc.bw.WriteString(_CRLF_) + } + if err != nil { + nc.mu.Unlock() + return err + } + + nc.OutMsgs++ + nc.OutBytes += uint64(len(data)) + + if len(nc.fch) == 0 { + nc.kickFlusher() + } + nc.mu.Unlock() + return nil +} + +// respHandler is the global response handler. It will look up +// the appropriate channel based on the last token and place +// the message on the channel if possible. +func (nc *Conn) respHandler(m *Msg) { + rt := respToken(m.Subject) + + nc.mu.Lock() + // Just return if closed. + if nc.isClosed() { + nc.mu.Unlock() + return + } + + // Grab mch + mch := nc.respMap[rt] + // Delete the key regardless, one response only. + // FIXME(dlc) - should we track responses past 1 + // just statistics wise? + delete(nc.respMap, rt) + nc.mu.Unlock() + + // Don't block, let Request timeout instead, mch is + // buffered and we should delete the key before a + // second response is processed. + select { + case mch <- m: + default: + return + } +} + +// Create the response subscription we will use for all +// new style responses. This will be on an _INBOX with an +// additional terminal token. The subscription will be on +// a wildcard. Caller is responsible for ensuring this is +// only called once. +func (nc *Conn) createRespMux(respSub string) error { + s, err := nc.Subscribe(respSub, nc.respHandler) + if err != nil { + return err + } + nc.mu.Lock() + nc.respMux = s + nc.mu.Unlock() + return nil +} + +// Request will send a request payload and deliver the response message, +// or an error, including a timeout if no message was received properly. +func (nc *Conn) Request(subj string, data []byte, timeout time.Duration) (*Msg, error) { + if nc == nil { + return nil, ErrInvalidConnection + } + + nc.mu.Lock() + // If user wants the old style. + if nc.Opts.UseOldRequestStyle { + nc.mu.Unlock() + return nc.oldRequest(subj, data, timeout) + } + + // Do setup for the new style. + if nc.respMap == nil { + // _INBOX wildcard + nc.respSub = fmt.Sprintf("%s.*", NewInbox()) + nc.respMap = make(map[string]chan *Msg) + } + // Create literal Inbox and map to a chan msg. + mch := make(chan *Msg, RequestChanLen) + respInbox := nc.newRespInbox() + token := respToken(respInbox) + nc.respMap[token] = mch + createSub := nc.respMux == nil + ginbox := nc.respSub + nc.mu.Unlock() + + if createSub { + // Make sure scoped subscription is setup only once. + var err error + nc.respSetup.Do(func() { err = nc.createRespMux(ginbox) }) + if err != nil { + return nil, err + } + } + + if err := nc.PublishRequest(subj, respInbox, data); err != nil { + return nil, err + } + + t := globalTimerPool.Get(timeout) + defer globalTimerPool.Put(t) + + var ok bool + var msg *Msg + + select { + case msg, ok = <-mch: + if !ok { + return nil, ErrConnectionClosed + } + case <-t.C: + nc.mu.Lock() + delete(nc.respMap, token) + nc.mu.Unlock() + return nil, ErrTimeout + } + + return msg, nil +} + +// oldRequest will create an Inbox and perform a Request() call +// with the Inbox reply and return the first reply received. +// This is optimized for the case of multiple responses. +func (nc *Conn) oldRequest(subj string, data []byte, timeout time.Duration) (*Msg, error) { + inbox := NewInbox() + ch := make(chan *Msg, RequestChanLen) + + s, err := nc.subscribe(inbox, _EMPTY_, nil, ch) + if err != nil { + return nil, err + } + s.AutoUnsubscribe(1) + defer s.Unsubscribe() + + err = nc.PublishRequest(subj, inbox, data) + if err != nil { + return nil, err + } + return s.NextMsg(timeout) +} + +// InboxPrefix is the prefix for all inbox subjects. +const InboxPrefix = "_INBOX." +const inboxPrefixLen = len(InboxPrefix) +const respInboxPrefixLen = inboxPrefixLen + nuidSize + 1 + +// NewInbox will return an inbox string which can be used for directed replies from +// subscribers. These are guaranteed to be unique, but can be shared and subscribed +// to by others. +func NewInbox() string { + var b [inboxPrefixLen + nuidSize]byte + pres := b[:inboxPrefixLen] + copy(pres, InboxPrefix) + ns := b[inboxPrefixLen:] + copy(ns, nuid.Next()) + return string(b[:]) +} + +// Creates a new literal response subject that will trigger +// the global subscription handler. +func (nc *Conn) newRespInbox() string { + var b [inboxPrefixLen + (2 * nuidSize) + 1]byte + pres := b[:respInboxPrefixLen] + copy(pres, nc.respSub) + ns := b[respInboxPrefixLen:] + copy(ns, nuid.Next()) + return string(b[:]) +} + +// respToken will return the last token of a literal response inbox +// which we use for the message channel lookup. +func respToken(respInbox string) string { + return respInbox[respInboxPrefixLen:] +} + +// Subscribe will express interest in the given subject. The subject +// can have wildcards (partial:*, full:>). Messages will be delivered +// to the associated MsgHandler. If no MsgHandler is given, the +// subscription is a synchronous subscription and can be polled via +// Subscription.NextMsg(). +func (nc *Conn) Subscribe(subj string, cb MsgHandler) (*Subscription, error) { + return nc.subscribe(subj, _EMPTY_, cb, nil) +} + +// ChanSubscribe will place all messages received on the channel. +// You should not close the channel until sub.Unsubscribe() has been called. +func (nc *Conn) ChanSubscribe(subj string, ch chan *Msg) (*Subscription, error) { + return nc.subscribe(subj, _EMPTY_, nil, ch) +} + +// ChanQueueSubscribe will place all messages received on the channel. +// You should not close the channel until sub.Unsubscribe() has been called. +func (nc *Conn) ChanQueueSubscribe(subj, group string, ch chan *Msg) (*Subscription, error) { + return nc.subscribe(subj, group, nil, ch) +} + +// SubscribeSync is syntactic sugar for Subscribe(subject, nil). +func (nc *Conn) SubscribeSync(subj string) (*Subscription, error) { + if nc == nil { + return nil, ErrInvalidConnection + } + mch := make(chan *Msg, nc.Opts.SubChanLen) + s, e := nc.subscribe(subj, _EMPTY_, nil, mch) + if s != nil { + s.typ = SyncSubscription + } + return s, e +} + +// QueueSubscribe creates an asynchronous queue subscriber on the given subject. +// All subscribers with the same queue name will form the queue group and +// only one member of the group will be selected to receive any given +// message asynchronously. +func (nc *Conn) QueueSubscribe(subj, queue string, cb MsgHandler) (*Subscription, error) { + return nc.subscribe(subj, queue, cb, nil) +} + +// QueueSubscribeSync creates a synchronous queue subscriber on the given +// subject. All subscribers with the same queue name will form the queue +// group and only one member of the group will be selected to receive any +// given message synchronously. +func (nc *Conn) QueueSubscribeSync(subj, queue string) (*Subscription, error) { + mch := make(chan *Msg, nc.Opts.SubChanLen) + s, e := nc.subscribe(subj, queue, nil, mch) + if s != nil { + s.typ = SyncSubscription + } + return s, e +} + +// QueueSubscribeSyncWithChan is syntactic sugar for ChanQueueSubscribe(subject, group, ch). +func (nc *Conn) QueueSubscribeSyncWithChan(subj, queue string, ch chan *Msg) (*Subscription, error) { + return nc.subscribe(subj, queue, nil, ch) +} + +// subscribe is the internal subscribe function that indicates interest in a subject. +func (nc *Conn) subscribe(subj, queue string, cb MsgHandler, ch chan *Msg) (*Subscription, error) { + if nc == nil { + return nil, ErrInvalidConnection + } + nc.mu.Lock() + // ok here, but defer is generally expensive + defer nc.mu.Unlock() + defer nc.kickFlusher() + + // Check for some error conditions. + if nc.isClosed() { + return nil, ErrConnectionClosed + } + + if cb == nil && ch == nil { + return nil, ErrBadSubscription + } + + sub := &Subscription{Subject: subj, Queue: queue, mcb: cb, conn: nc} + // Set pending limits. + sub.pMsgsLimit = DefaultSubPendingMsgsLimit + sub.pBytesLimit = DefaultSubPendingBytesLimit + + // If we have an async callback, start up a sub specific + // Go routine to deliver the messages. + if cb != nil { + sub.typ = AsyncSubscription + sub.pCond = sync.NewCond(&sub.mu) + go nc.waitForMsgs(sub) + } else { + sub.typ = ChanSubscription + sub.mch = ch + } + + nc.subsMu.Lock() + nc.ssid++ + sub.sid = nc.ssid + nc.subs[sub.sid] = sub + nc.subsMu.Unlock() + + // We will send these for all subs when we reconnect + // so that we can suppress here. + if !nc.isReconnecting() { + fmt.Fprintf(nc.bw, subProto, subj, queue, sub.sid) + } + return sub, nil +} + +// Lock for nc should be held here upon entry +func (nc *Conn) removeSub(s *Subscription) { + nc.subsMu.Lock() + delete(nc.subs, s.sid) + nc.subsMu.Unlock() + s.mu.Lock() + defer s.mu.Unlock() + // Release callers on NextMsg for SyncSubscription only + if s.mch != nil && s.typ == SyncSubscription { + close(s.mch) + } + s.mch = nil + + // Mark as invalid + s.conn = nil + s.closed = true + if s.pCond != nil { + s.pCond.Broadcast() + } +} + +// SubscriptionType is the type of the Subscription. +type SubscriptionType int + +// The different types of subscription types. +const ( + AsyncSubscription = SubscriptionType(iota) + SyncSubscription + ChanSubscription + NilSubscription +) + +// Type returns the type of Subscription. +func (s *Subscription) Type() SubscriptionType { + if s == nil { + return NilSubscription + } + s.mu.Lock() + defer s.mu.Unlock() + return s.typ +} + +// IsValid returns a boolean indicating whether the subscription +// is still active. This will return false if the subscription has +// already been closed. +func (s *Subscription) IsValid() bool { + if s == nil { + return false + } + s.mu.Lock() + defer s.mu.Unlock() + return s.conn != nil +} + +// Unsubscribe will remove interest in the given subject. +func (s *Subscription) Unsubscribe() error { + if s == nil { + return ErrBadSubscription + } + s.mu.Lock() + conn := s.conn + s.mu.Unlock() + if conn == nil { + return ErrBadSubscription + } + return conn.unsubscribe(s, 0) +} + +// AutoUnsubscribe will issue an automatic Unsubscribe that is +// processed by the server when max messages have been received. +// This can be useful when sending a request to an unknown number +// of subscribers. Request() uses this functionality. +func (s *Subscription) AutoUnsubscribe(max int) error { + if s == nil { + return ErrBadSubscription + } + s.mu.Lock() + conn := s.conn + s.mu.Unlock() + if conn == nil { + return ErrBadSubscription + } + return conn.unsubscribe(s, max) +} + +// unsubscribe performs the low level unsubscribe to the server. +// Use Subscription.Unsubscribe() +func (nc *Conn) unsubscribe(sub *Subscription, max int) error { + nc.mu.Lock() + // ok here, but defer is expensive + defer nc.mu.Unlock() + defer nc.kickFlusher() + + if nc.isClosed() { + return ErrConnectionClosed + } + + nc.subsMu.RLock() + s := nc.subs[sub.sid] + nc.subsMu.RUnlock() + // Already unsubscribed + if s == nil { + return nil + } + + maxStr := _EMPTY_ + if max > 0 { + s.max = uint64(max) + maxStr = strconv.Itoa(max) + } else { + nc.removeSub(s) + } + // We will send these for all subs when we reconnect + // so that we can suppress here. + if !nc.isReconnecting() { + fmt.Fprintf(nc.bw, unsubProto, s.sid, maxStr) + } + return nil +} + +// NextMsg will return the next message available to a synchronous subscriber +// or block until one is available. A timeout can be used to return when no +// message has been delivered. +func (s *Subscription) NextMsg(timeout time.Duration) (*Msg, error) { + if s == nil { + return nil, ErrBadSubscription + } + + s.mu.Lock() + err := s.validateNextMsgState() + if err != nil { + s.mu.Unlock() + return nil, err + } + + // snapshot + mch := s.mch + s.mu.Unlock() + + var ok bool + var msg *Msg + + t := globalTimerPool.Get(timeout) + defer globalTimerPool.Put(t) + + select { + case msg, ok = <-mch: + if !ok { + return nil, ErrConnectionClosed + } + err := s.processNextMsgDelivered(msg) + if err != nil { + return nil, err + } + case <-t.C: + return nil, ErrTimeout + } + + return msg, nil +} + +// validateNextMsgState checks whether the subscription is in a valid +// state to call NextMsg and be delivered another message synchronously. +// This should be called while holding the lock. +func (s *Subscription) validateNextMsgState() error { + if s.connClosed { + return ErrConnectionClosed + } + if s.mch == nil { + if s.max > 0 && s.delivered >= s.max { + return ErrMaxMessages + } else if s.closed { + return ErrBadSubscription + } + } + if s.mcb != nil { + return ErrSyncSubRequired + } + if s.sc { + s.sc = false + return ErrSlowConsumer + } + + return nil +} + +// processNextMsgDelivered takes a message and applies the needed +// accounting to the stats from the subscription, returning an +// error in case we have the maximum number of messages have been +// delivered already. It should not be called while holding the lock. +func (s *Subscription) processNextMsgDelivered(msg *Msg) error { + s.mu.Lock() + nc := s.conn + max := s.max + + // Update some stats. + s.delivered++ + delivered := s.delivered + if s.typ == SyncSubscription { + s.pMsgs-- + s.pBytes -= len(msg.Data) + } + s.mu.Unlock() + + if max > 0 { + if delivered > max { + return ErrMaxMessages + } + // Remove subscription if we have reached max. + if delivered == max { + nc.mu.Lock() + nc.removeSub(s) + nc.mu.Unlock() + } + } + + return nil +} + +// Queued returns the number of queued messages in the client for this subscription. +// DEPRECATED: Use Pending() +func (s *Subscription) QueuedMsgs() (int, error) { + m, _, err := s.Pending() + return int(m), err +} + +// Pending returns the number of queued messages and queued bytes in the client for this subscription. +func (s *Subscription) Pending() (int, int, error) { + if s == nil { + return -1, -1, ErrBadSubscription + } + s.mu.Lock() + defer s.mu.Unlock() + if s.conn == nil { + return -1, -1, ErrBadSubscription + } + if s.typ == ChanSubscription { + return -1, -1, ErrTypeSubscription + } + return s.pMsgs, s.pBytes, nil +} + +// MaxPending returns the maximum number of queued messages and queued bytes seen so far. +func (s *Subscription) MaxPending() (int, int, error) { + if s == nil { + return -1, -1, ErrBadSubscription + } + s.mu.Lock() + defer s.mu.Unlock() + if s.conn == nil { + return -1, -1, ErrBadSubscription + } + if s.typ == ChanSubscription { + return -1, -1, ErrTypeSubscription + } + return s.pMsgsMax, s.pBytesMax, nil +} + +// ClearMaxPending resets the maximums seen so far. +func (s *Subscription) ClearMaxPending() error { + if s == nil { + return ErrBadSubscription + } + s.mu.Lock() + defer s.mu.Unlock() + if s.conn == nil { + return ErrBadSubscription + } + if s.typ == ChanSubscription { + return ErrTypeSubscription + } + s.pMsgsMax, s.pBytesMax = 0, 0 + return nil +} + +// Pending Limits +const ( + DefaultSubPendingMsgsLimit = 65536 + DefaultSubPendingBytesLimit = 65536 * 1024 +) + +// PendingLimits returns the current limits for this subscription. +// If no error is returned, a negative value indicates that the +// given metric is not limited. +func (s *Subscription) PendingLimits() (int, int, error) { + if s == nil { + return -1, -1, ErrBadSubscription + } + s.mu.Lock() + defer s.mu.Unlock() + if s.conn == nil { + return -1, -1, ErrBadSubscription + } + if s.typ == ChanSubscription { + return -1, -1, ErrTypeSubscription + } + return s.pMsgsLimit, s.pBytesLimit, nil +} + +// SetPendingLimits sets the limits for pending msgs and bytes for this subscription. +// Zero is not allowed. Any negative value means that the given metric is not limited. +func (s *Subscription) SetPendingLimits(msgLimit, bytesLimit int) error { + if s == nil { + return ErrBadSubscription + } + s.mu.Lock() + defer s.mu.Unlock() + if s.conn == nil { + return ErrBadSubscription + } + if s.typ == ChanSubscription { + return ErrTypeSubscription + } + if msgLimit == 0 || bytesLimit == 0 { + return ErrInvalidArg + } + s.pMsgsLimit, s.pBytesLimit = msgLimit, bytesLimit + return nil +} + +// Delivered returns the number of delivered messages for this subscription. +func (s *Subscription) Delivered() (int64, error) { + if s == nil { + return -1, ErrBadSubscription + } + s.mu.Lock() + defer s.mu.Unlock() + if s.conn == nil { + return -1, ErrBadSubscription + } + return int64(s.delivered), nil +} + +// Dropped returns the number of known dropped messages for this subscription. +// This will correspond to messages dropped by violations of PendingLimits. If +// the server declares the connection a SlowConsumer, this number may not be +// valid. +func (s *Subscription) Dropped() (int, error) { + if s == nil { + return -1, ErrBadSubscription + } + s.mu.Lock() + defer s.mu.Unlock() + if s.conn == nil { + return -1, ErrBadSubscription + } + return s.dropped, nil +} + +// FIXME: This is a hack +// removeFlushEntry is needed when we need to discard queued up responses +// for our pings as part of a flush call. This happens when we have a flush +// call outstanding and we call close. +func (nc *Conn) removeFlushEntry(ch chan struct{}) bool { + nc.mu.Lock() + defer nc.mu.Unlock() + if nc.pongs == nil { + return false + } + for i, c := range nc.pongs { + if c == ch { + nc.pongs[i] = nil + return true + } + } + return false +} + +// The lock must be held entering this function. +func (nc *Conn) sendPing(ch chan struct{}) { + nc.pongs = append(nc.pongs, ch) + nc.bw.WriteString(pingProto) + // Flush in place. + nc.bw.Flush() +} + +// This will fire periodically and send a client origin +// ping to the server. Will also check that we have received +// responses from the server. +func (nc *Conn) processPingTimer() { + nc.mu.Lock() + + if nc.status != CONNECTED { + nc.mu.Unlock() + return + } + + // Check for violation + nc.pout++ + if nc.pout > nc.Opts.MaxPingsOut { + nc.mu.Unlock() + nc.processOpErr(ErrStaleConnection) + return + } + + nc.sendPing(nil) + nc.ptmr.Reset(nc.Opts.PingInterval) + nc.mu.Unlock() +} + +// FlushTimeout allows a Flush operation to have an associated timeout. +func (nc *Conn) FlushTimeout(timeout time.Duration) (err error) { + if nc == nil { + return ErrInvalidConnection + } + if timeout <= 0 { + return ErrBadTimeout + } + + nc.mu.Lock() + if nc.isClosed() { + nc.mu.Unlock() + return ErrConnectionClosed + } + t := globalTimerPool.Get(timeout) + defer globalTimerPool.Put(t) + + // Create a buffered channel to prevent chan send to block + // in processPong() if this code here times out just when + // PONG was received. + ch := make(chan struct{}, 1) + nc.sendPing(ch) + nc.mu.Unlock() + + select { + case _, ok := <-ch: + if !ok { + err = ErrConnectionClosed + } else { + close(ch) + } + case <-t.C: + err = ErrTimeout + } + + if err != nil { + nc.removeFlushEntry(ch) + } + return +} + +// Flush will perform a round trip to the server and return when it +// receives the internal reply. +func (nc *Conn) Flush() error { + return nc.FlushTimeout(60 * time.Second) +} + +// Buffered will return the number of bytes buffered to be sent to the server. +// FIXME(dlc) take into account disconnected state. +func (nc *Conn) Buffered() (int, error) { + nc.mu.Lock() + defer nc.mu.Unlock() + if nc.isClosed() || nc.bw == nil { + return -1, ErrConnectionClosed + } + return nc.bw.Buffered(), nil +} + +// resendSubscriptions will send our subscription state back to the +// server. Used in reconnects +func (nc *Conn) resendSubscriptions() { + // Since we are going to send protocols to the server, we don't want to + // be holding the subsMu lock (which is used in processMsg). So copy + // the subscriptions in a temporary array. + nc.subsMu.RLock() + subs := make([]*Subscription, 0, len(nc.subs)) + for _, s := range nc.subs { + subs = append(subs, s) + } + nc.subsMu.RUnlock() + for _, s := range subs { + adjustedMax := uint64(0) + s.mu.Lock() + if s.max > 0 { + if s.delivered < s.max { + adjustedMax = s.max - s.delivered + } + + // adjustedMax could be 0 here if the number of delivered msgs + // reached the max, if so unsubscribe. + if adjustedMax == 0 { + s.mu.Unlock() + fmt.Fprintf(nc.bw, unsubProto, s.sid, _EMPTY_) + continue + } + } + s.mu.Unlock() + + fmt.Fprintf(nc.bw, subProto, s.Subject, s.Queue, s.sid) + if adjustedMax > 0 { + maxStr := strconv.Itoa(int(adjustedMax)) + fmt.Fprintf(nc.bw, unsubProto, s.sid, maxStr) + } + } +} + +// This will clear any pending flush calls and release pending calls. +// Lock is assumed to be held by the caller. +func (nc *Conn) clearPendingFlushCalls() { + // Clear any queued pongs, e.g. pending flush calls. + for _, ch := range nc.pongs { + if ch != nil { + close(ch) + } + } + nc.pongs = nil +} + +// This will clear any pending Request calls. +// Lock is assumed to be held by the caller. +func (nc *Conn) clearPendingRequestCalls() { + if nc.respMap == nil { + return + } + for key, ch := range nc.respMap { + if ch != nil { + close(ch) + delete(nc.respMap, key) + } + } +} + +// Low level close call that will do correct cleanup and set +// desired status. Also controls whether user defined callbacks +// will be triggered. The lock should not be held entering this +// function. This function will handle the locking manually. +func (nc *Conn) close(status Status, doCBs bool) { + nc.mu.Lock() + if nc.isClosed() { + nc.status = status + nc.mu.Unlock() + return + } + nc.status = CLOSED + + // Kick the Go routines so they fall out. + nc.kickFlusher() + nc.mu.Unlock() + + nc.mu.Lock() + + // Clear any queued pongs, e.g. pending flush calls. + nc.clearPendingFlushCalls() + + // Clear any queued and blocking Requests. + nc.clearPendingRequestCalls() + + if nc.ptmr != nil { + nc.ptmr.Stop() + } + + // Go ahead and make sure we have flushed the outbound + if nc.conn != nil { + nc.bw.Flush() + defer nc.conn.Close() + } + + // Close sync subscriber channels and release any + // pending NextMsg() calls. + nc.subsMu.Lock() + for _, s := range nc.subs { + s.mu.Lock() + + // Release callers on NextMsg for SyncSubscription only + if s.mch != nil && s.typ == SyncSubscription { + close(s.mch) + } + s.mch = nil + // Mark as invalid, for signaling to deliverMsgs + s.closed = true + // Mark connection closed in subscription + s.connClosed = true + // If we have an async subscription, signals it to exit + if s.typ == AsyncSubscription && s.pCond != nil { + s.pCond.Signal() + } + + s.mu.Unlock() + } + nc.subs = nil + nc.subsMu.Unlock() + + // Perform appropriate callback if needed for a disconnect. + if doCBs { + if nc.Opts.DisconnectedCB != nil && nc.conn != nil { + nc.ach <- func() { nc.Opts.DisconnectedCB(nc) } + } + if nc.Opts.ClosedCB != nil { + nc.ach <- func() { nc.Opts.ClosedCB(nc) } + } + nc.ach <- nc.closeAsyncFunc() + } + nc.status = status + nc.mu.Unlock() +} + +// Close will close the connection to the server. This call will release +// all blocking calls, such as Flush() and NextMsg() +func (nc *Conn) Close() { + nc.close(CLOSED, true) +} + +// IsClosed tests if a Conn has been closed. +func (nc *Conn) IsClosed() bool { + nc.mu.Lock() + defer nc.mu.Unlock() + return nc.isClosed() +} + +// IsReconnecting tests if a Conn is reconnecting. +func (nc *Conn) IsReconnecting() bool { + nc.mu.Lock() + defer nc.mu.Unlock() + return nc.isReconnecting() +} + +// IsConnected tests if a Conn is connected. +func (nc *Conn) IsConnected() bool { + nc.mu.Lock() + defer nc.mu.Unlock() + return nc.isConnected() +} + +// caller must lock +func (nc *Conn) getServers(implicitOnly bool) []string { + poolSize := len(nc.srvPool) + var servers = make([]string, 0) + for i := 0; i < poolSize; i++ { + if implicitOnly && !nc.srvPool[i].isImplicit { + continue + } + url := nc.srvPool[i].url + servers = append(servers, fmt.Sprintf("%s://%s", url.Scheme, url.Host)) + } + return servers +} + +// Servers returns the list of known server urls, including additional +// servers discovered after a connection has been established. If +// authentication is enabled, use UserInfo or Token when connecting with +// these urls. +func (nc *Conn) Servers() []string { + nc.mu.Lock() + defer nc.mu.Unlock() + return nc.getServers(false) +} + +// DiscoveredServers returns only the server urls that have been discovered +// after a connection has been established. If authentication is enabled, +// use UserInfo or Token when connecting with these urls. +func (nc *Conn) DiscoveredServers() []string { + nc.mu.Lock() + defer nc.mu.Unlock() + return nc.getServers(true) +} + +// Status returns the current state of the connection. +func (nc *Conn) Status() Status { + nc.mu.Lock() + defer nc.mu.Unlock() + return nc.status +} + +// Test if Conn has been closed Lock is assumed held. +func (nc *Conn) isClosed() bool { + return nc.status == CLOSED +} + +// Test if Conn is in the process of connecting +func (nc *Conn) isConnecting() bool { + return nc.status == CONNECTING +} + +// Test if Conn is being reconnected. +func (nc *Conn) isReconnecting() bool { + return nc.status == RECONNECTING +} + +// Test if Conn is connected or connecting. +func (nc *Conn) isConnected() bool { + return nc.status == CONNECTED +} + +// Stats will return a race safe copy of the Statistics section for the connection. +func (nc *Conn) Stats() Statistics { + // Stats are updated either under connection's mu or subsMu mutexes. + // Lock both to safely get them. + nc.mu.Lock() + nc.subsMu.RLock() + stats := Statistics{ + InMsgs: nc.InMsgs, + InBytes: nc.InBytes, + OutMsgs: nc.OutMsgs, + OutBytes: nc.OutBytes, + Reconnects: nc.Reconnects, + } + nc.subsMu.RUnlock() + nc.mu.Unlock() + return stats +} + +// MaxPayload returns the size limit that a message payload can have. +// This is set by the server configuration and delivered to the client +// upon connect. +func (nc *Conn) MaxPayload() int64 { + nc.mu.Lock() + defer nc.mu.Unlock() + return nc.info.MaxPayload +} + +// AuthRequired will return if the connected server requires authorization. +func (nc *Conn) AuthRequired() bool { + nc.mu.Lock() + defer nc.mu.Unlock() + return nc.info.AuthRequired +} + +// TLSRequired will return if the connected server requires TLS connections. +func (nc *Conn) TLSRequired() bool { + nc.mu.Lock() + defer nc.mu.Unlock() + return nc.info.TLSRequired +} diff --git a/vendor/github.com/nats-io/go-nats/nats_test.go b/vendor/github.com/nats-io/go-nats/nats_test.go new file mode 100644 index 00000000..cbd95632 --- /dev/null +++ b/vendor/github.com/nats-io/go-nats/nats_test.go @@ -0,0 +1,1177 @@ +package nats + +//////////////////////////////////////////////////////////////////////////////// +// Package scoped specific tests here.. +//////////////////////////////////////////////////////////////////////////////// + +import ( + "bufio" + "bytes" + "encoding/json" + "errors" + "fmt" + "reflect" + "runtime" + "strings" + "testing" + "time" + + "github.com/nats-io/gnatsd/server" + gnatsd "github.com/nats-io/gnatsd/test" +) + +// Dumb wait program to sync on callbacks, etc... Will timeout +func Wait(ch chan bool) error { + return WaitTime(ch, 5*time.Second) +} + +func WaitTime(ch chan bool, timeout time.Duration) error { + select { + case <-ch: + return nil + case <-time.After(timeout): + } + return errors.New("timeout") +} + +func stackFatalf(t *testing.T, f string, args ...interface{}) { + lines := make([]string, 0, 32) + msg := fmt.Sprintf(f, args...) + lines = append(lines, msg) + + // Generate the Stack of callers: Skip us and verify* frames. + for i := 2; true; i++ { + _, file, line, ok := runtime.Caller(i) + if !ok { + break + } + msg := fmt.Sprintf("%d - %s:%d", i, file, line) + lines = append(lines, msg) + } + t.Fatalf("%s", strings.Join(lines, "\n")) +} + +//////////////////////////////////////////////////////////////////////////////// +// Reconnect tests +//////////////////////////////////////////////////////////////////////////////// + +const TEST_PORT = 8368 + +var reconnectOpts = Options{ + Url: fmt.Sprintf("nats://localhost:%d", TEST_PORT), + AllowReconnect: true, + MaxReconnect: 10, + ReconnectWait: 100 * time.Millisecond, + Timeout: DefaultTimeout, +} + +func RunServerOnPort(port int) *server.Server { + opts := gnatsd.DefaultTestOptions + opts.Port = port + return RunServerWithOptions(opts) +} + +func RunServerWithOptions(opts server.Options) *server.Server { + return gnatsd.RunServer(&opts) +} + +func TestReconnectServerStats(t *testing.T) { + ts := RunServerOnPort(TEST_PORT) + + opts := reconnectOpts + nc, _ := opts.Connect() + defer nc.Close() + nc.Flush() + + ts.Shutdown() + // server is stopped here... + + ts = RunServerOnPort(TEST_PORT) + defer ts.Shutdown() + + if err := nc.FlushTimeout(5 * time.Second); err != nil { + t.Fatalf("Error on Flush: %v", err) + } + + // Make sure the server who is reconnected has the reconnects stats reset. + nc.mu.Lock() + _, cur := nc.currentServer() + nc.mu.Unlock() + + if cur.reconnects != 0 { + t.Fatalf("Current Server's reconnects should be 0 vs %d\n", cur.reconnects) + } +} + +func TestParseStateReconnectFunctionality(t *testing.T) { + ts := RunServerOnPort(TEST_PORT) + ch := make(chan bool) + + opts := reconnectOpts + dch := make(chan bool) + opts.DisconnectedCB = func(_ *Conn) { + dch <- true + } + + nc, errc := opts.Connect() + if errc != nil { + t.Fatalf("Failed to create a connection: %v\n", errc) + } + ec, errec := NewEncodedConn(nc, DEFAULT_ENCODER) + if errec != nil { + nc.Close() + t.Fatalf("Failed to create an encoded connection: %v\n", errec) + } + defer ec.Close() + + testString := "bar" + ec.Subscribe("foo", func(s string) { + if s != testString { + t.Fatal("String doesn't match") + } + ch <- true + }) + ec.Flush() + + // Got a RACE condition with Travis build. The locking below does not + // really help because the parser running in the readLoop accesses + // nc.ps without the connection lock. Sleeping may help better since + // it would make the memory write in parse.go (when processing the + // pong) further away from the modification below. + time.Sleep(1 * time.Second) + + // Simulate partialState, this needs to be cleared + nc.mu.Lock() + nc.ps.state = OP_PON + nc.mu.Unlock() + + ts.Shutdown() + // server is stopped here... + + if err := Wait(dch); err != nil { + t.Fatal("Did not get the DisconnectedCB") + } + + if err := ec.Publish("foo", testString); err != nil { + t.Fatalf("Failed to publish message: %v\n", err) + } + + ts = RunServerOnPort(TEST_PORT) + defer ts.Shutdown() + + if err := ec.FlushTimeout(5 * time.Second); err != nil { + t.Fatalf("Error on Flush: %v", err) + } + + if err := Wait(ch); err != nil { + t.Fatal("Did not receive our message") + } + + expectedReconnectCount := uint64(1) + reconnectedCount := ec.Conn.Stats().Reconnects + + if reconnectedCount != expectedReconnectCount { + t.Fatalf("Reconnect count incorrect: %d vs %d\n", + reconnectedCount, expectedReconnectCount) + } +} + +//////////////////////////////////////////////////////////////////////////////// +// ServerPool tests +//////////////////////////////////////////////////////////////////////////////// + +var testServers = []string{ + "nats://localhost:1222", + "nats://localhost:1223", + "nats://localhost:1224", + "nats://localhost:1225", + "nats://localhost:1226", + "nats://localhost:1227", + "nats://localhost:1228", +} + +func TestServersRandomize(t *testing.T) { + opts := GetDefaultOptions() + opts.Servers = testServers + nc := &Conn{Opts: opts} + if err := nc.setupServerPool(); err != nil { + t.Fatalf("Problem setting up Server Pool: %v\n", err) + } + // Build []string from srvPool + clientServers := []string{} + for _, s := range nc.srvPool { + clientServers = append(clientServers, s.url.String()) + } + // In theory this could happen.. + if reflect.DeepEqual(testServers, clientServers) { + t.Fatalf("ServerPool list not randomized\n") + } + + // Now test that we do not randomize if proper flag is set. + opts = GetDefaultOptions() + opts.Servers = testServers + opts.NoRandomize = true + nc = &Conn{Opts: opts} + if err := nc.setupServerPool(); err != nil { + t.Fatalf("Problem setting up Server Pool: %v\n", err) + } + // Build []string from srvPool + clientServers = []string{} + for _, s := range nc.srvPool { + clientServers = append(clientServers, s.url.String()) + } + if !reflect.DeepEqual(testServers, clientServers) { + t.Fatalf("ServerPool list should not be randomized\n") + } + + // Although the original intent was that if Opts.Url is + // set, Opts.Servers is not (and vice versa), the behavior + // is that Opts.Url is always first, even when randomization + // is enabled. So make sure that this is still the case. + opts = GetDefaultOptions() + opts.Url = DefaultURL + opts.Servers = testServers + nc = &Conn{Opts: opts} + if err := nc.setupServerPool(); err != nil { + t.Fatalf("Problem setting up Server Pool: %v\n", err) + } + // Build []string from srvPool + clientServers = []string{} + for _, s := range nc.srvPool { + clientServers = append(clientServers, s.url.String()) + } + // In theory this could happen.. + if reflect.DeepEqual(testServers, clientServers) { + t.Fatalf("ServerPool list not randomized\n") + } + if clientServers[0] != DefaultURL { + t.Fatalf("Options.Url should be first in the array, got %v", clientServers[0]) + } +} + +func TestSelectNextServer(t *testing.T) { + opts := GetDefaultOptions() + opts.Servers = testServers + opts.NoRandomize = true + nc := &Conn{Opts: opts} + if err := nc.setupServerPool(); err != nil { + t.Fatalf("Problem setting up Server Pool: %v\n", err) + } + if nc.url != nc.srvPool[0].url { + t.Fatalf("Wrong default selection: %v\n", nc.url) + } + + sel, err := nc.selectNextServer() + if err != nil { + t.Fatalf("Got an err: %v\n", err) + } + // Check that we are now looking at #2, and current is now last. + if len(nc.srvPool) != len(testServers) { + t.Fatalf("List is incorrect size: %d vs %d\n", len(nc.srvPool), len(testServers)) + } + if nc.url.String() != testServers[1] { + t.Fatalf("Selection incorrect: %v vs %v\n", nc.url, testServers[1]) + } + if nc.srvPool[len(nc.srvPool)-1].url.String() != testServers[0] { + t.Fatalf("Did not push old to last position\n") + } + if sel != nc.srvPool[0] { + t.Fatalf("Did not return correct server: %v vs %v\n", sel.url, nc.srvPool[0].url) + } + + // Test that we do not keep servers where we have tried to reconnect past our limit. + nc.srvPool[0].reconnects = int(opts.MaxReconnect) + if _, err := nc.selectNextServer(); err != nil { + t.Fatalf("Got an err: %v\n", err) + } + // Check that we are now looking at #3, and current is not in the list. + if len(nc.srvPool) != len(testServers)-1 { + t.Fatalf("List is incorrect size: %d vs %d\n", len(nc.srvPool), len(testServers)-1) + } + if nc.url.String() != testServers[2] { + t.Fatalf("Selection incorrect: %v vs %v\n", nc.url, testServers[2]) + } + if nc.srvPool[len(nc.srvPool)-1].url.String() == testServers[1] { + t.Fatalf("Did not throw away the last server correctly\n") + } +} + +// This will test that comma separated url strings work properly for +// the Connect() command. +func TestUrlArgument(t *testing.T) { + check := func(url string, expected []string) { + if !reflect.DeepEqual(processUrlString(url), expected) { + t.Fatalf("Got wrong response processing URL: %q, RES: %#v\n", url, processUrlString(url)) + } + } + // This is normal case + oneExpected := []string{"nats://localhost:1222"} + + check("nats://localhost:1222", oneExpected) + check("nats://localhost:1222 ", oneExpected) + check(" nats://localhost:1222", oneExpected) + check(" nats://localhost:1222 ", oneExpected) + + var multiExpected = []string{ + "nats://localhost:1222", + "nats://localhost:1223", + "nats://localhost:1224", + } + + check("nats://localhost:1222,nats://localhost:1223,nats://localhost:1224", multiExpected) + check("nats://localhost:1222, nats://localhost:1223, nats://localhost:1224", multiExpected) + check(" nats://localhost:1222, nats://localhost:1223, nats://localhost:1224 ", multiExpected) + check("nats://localhost:1222, nats://localhost:1223 ,nats://localhost:1224", multiExpected) +} + +func TestParserPing(t *testing.T) { + c := &Conn{} + fake := &bytes.Buffer{} + c.bw = bufio.NewWriterSize(fake, c.Opts.ReconnectBufSize) + + c.ps = &parseState{} + + if c.ps.state != OP_START { + t.Fatalf("Expected OP_START vs %d\n", c.ps.state) + } + ping := []byte("PING\r\n") + err := c.parse(ping[:1]) + if err != nil || c.ps.state != OP_P { + t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) + } + err = c.parse(ping[1:2]) + if err != nil || c.ps.state != OP_PI { + t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) + } + err = c.parse(ping[2:3]) + if err != nil || c.ps.state != OP_PIN { + t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) + } + err = c.parse(ping[3:4]) + if err != nil || c.ps.state != OP_PING { + t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) + } + err = c.parse(ping[4:5]) + if err != nil || c.ps.state != OP_PING { + t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) + } + err = c.parse(ping[5:6]) + if err != nil || c.ps.state != OP_START { + t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) + } + err = c.parse(ping) + if err != nil || c.ps.state != OP_START { + t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) + } + // Should tolerate spaces + ping = []byte("PING \r") + err = c.parse(ping) + if err != nil || c.ps.state != OP_PING { + t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) + } + c.ps.state = OP_START + ping = []byte("PING \r \n") + err = c.parse(ping) + if err != nil || c.ps.state != OP_START { + t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) + } +} + +func TestParserErr(t *testing.T) { + c := &Conn{} + c.status = CLOSED + fake := &bytes.Buffer{} + c.bw = bufio.NewWriterSize(fake, c.Opts.ReconnectBufSize) + + c.ps = &parseState{} + + // This test focuses on the parser only, not how the error is + // actually processed by the upper layer. + + if c.ps.state != OP_START { + t.Fatalf("Expected OP_START vs %d\n", c.ps.state) + } + + expectedError := "'Any kind of error'" + errProto := []byte("-ERR " + expectedError + "\r\n") + err := c.parse(errProto[:1]) + if err != nil || c.ps.state != OP_MINUS { + t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) + } + err = c.parse(errProto[1:2]) + if err != nil || c.ps.state != OP_MINUS_E { + t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) + } + err = c.parse(errProto[2:3]) + if err != nil || c.ps.state != OP_MINUS_ER { + t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) + } + err = c.parse(errProto[3:4]) + if err != nil || c.ps.state != OP_MINUS_ERR { + t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) + } + err = c.parse(errProto[4:5]) + if err != nil || c.ps.state != OP_MINUS_ERR_SPC { + t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) + } + err = c.parse(errProto[5:6]) + if err != nil || c.ps.state != OP_MINUS_ERR_SPC { + t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) + } + + // Check with split arg buffer + err = c.parse(errProto[6:7]) + if err != nil || c.ps.state != MINUS_ERR_ARG { + t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) + } + err = c.parse(errProto[7:10]) + if err != nil || c.ps.state != MINUS_ERR_ARG { + t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) + } + err = c.parse(errProto[10 : len(errProto)-2]) + if err != nil || c.ps.state != MINUS_ERR_ARG { + t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) + } + if c.ps.argBuf == nil { + t.Fatal("ArgBuf should not be nil") + } + s := string(c.ps.argBuf) + if s != expectedError { + t.Fatalf("Expected %v, got %v", expectedError, s) + } + err = c.parse(errProto[len(errProto)-2:]) + if err != nil || c.ps.state != OP_START { + t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) + } + + // Check without split arg buffer + errProto = []byte("-ERR 'Any error'\r\n") + err = c.parse(errProto) + if err != nil || c.ps.state != OP_START { + t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) + } +} + +func TestParserOK(t *testing.T) { + c := &Conn{} + c.ps = &parseState{} + + if c.ps.state != OP_START { + t.Fatalf("Expected OP_START vs %d\n", c.ps.state) + } + errProto := []byte("+OKay\r\n") + err := c.parse(errProto[:1]) + if err != nil || c.ps.state != OP_PLUS { + t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) + } + err = c.parse(errProto[1:2]) + if err != nil || c.ps.state != OP_PLUS_O { + t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) + } + err = c.parse(errProto[2:3]) + if err != nil || c.ps.state != OP_PLUS_OK { + t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) + } + err = c.parse(errProto[3:]) + if err != nil || c.ps.state != OP_START { + t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) + } +} + +func TestParserShouldFail(t *testing.T) { + c := &Conn{} + c.ps = &parseState{} + + if err := c.parse([]byte(" PING")); err == nil { + t.Fatal("Should have received a parse error") + } + c.ps.state = OP_START + if err := c.parse([]byte("POO")); err == nil { + t.Fatal("Should have received a parse error") + } + c.ps.state = OP_START + if err := c.parse([]byte("Px")); err == nil { + t.Fatal("Should have received a parse error") + } + c.ps.state = OP_START + if err := c.parse([]byte("PIx")); err == nil { + t.Fatal("Should have received a parse error") + } + c.ps.state = OP_START + if err := c.parse([]byte("PINx")); err == nil { + t.Fatal("Should have received a parse error") + } + // Stop here because 'PING' protos are tolerant for anything between PING and \n + + c.ps.state = OP_START + if err := c.parse([]byte("POx")); err == nil { + t.Fatal("Should have received a parse error") + } + c.ps.state = OP_START + if err := c.parse([]byte("PONx")); err == nil { + t.Fatal("Should have received a parse error") + } + // Stop here because 'PONG' protos are tolerant for anything between PONG and \n + + c.ps.state = OP_START + if err := c.parse([]byte("ZOO")); err == nil { + t.Fatal("Should have received a parse error") + } + c.ps.state = OP_START + if err := c.parse([]byte("Mx\r\n")); err == nil { + t.Fatal("Should have received a parse error") + } + c.ps.state = OP_START + if err := c.parse([]byte("MSx\r\n")); err == nil { + t.Fatal("Should have received a parse error") + } + c.ps.state = OP_START + if err := c.parse([]byte("MSGx\r\n")); err == nil { + t.Fatal("Should have received a parse error") + } + c.ps.state = OP_START + if err := c.parse([]byte("MSG foo\r\n")); err == nil { + t.Fatal("Should have received a parse error") + } + c.ps.state = OP_START + if err := c.parse([]byte("MSG \r\n")); err == nil { + t.Fatal("Should have received a parse error") + } + c.ps.state = OP_START + if err := c.parse([]byte("MSG foo 1\r\n")); err == nil { + t.Fatal("Should have received a parse error") + } + c.ps.state = OP_START + if err := c.parse([]byte("MSG foo bar 1\r\n")); err == nil { + t.Fatal("Should have received a parse error") + } + c.ps.state = OP_START + if err := c.parse([]byte("MSG foo bar 1 baz\r\n")); err == nil { + t.Fatal("Should have received a parse error") + } + c.ps.state = OP_START + if err := c.parse([]byte("MSG foo 1 bar baz\r\n")); err == nil { + t.Fatal("Should have received a parse error") + } + c.ps.state = OP_START + if err := c.parse([]byte("+x\r\n")); err == nil { + t.Fatal("Should have received a parse error") + } + c.ps.state = OP_START + if err := c.parse([]byte("+Ox\r\n")); err == nil { + t.Fatal("Should have received a parse error") + } + c.ps.state = OP_START + if err := c.parse([]byte("-x\r\n")); err == nil { + t.Fatal("Should have received a parse error") + } + c.ps.state = OP_START + if err := c.parse([]byte("-Ex\r\n")); err == nil { + t.Fatal("Should have received a parse error") + } + c.ps.state = OP_START + if err := c.parse([]byte("-ERx\r\n")); err == nil { + t.Fatal("Should have received a parse error") + } + c.ps.state = OP_START + if err := c.parse([]byte("-ERRx\r\n")); err == nil { + t.Fatal("Should have received a parse error") + } +} + +func TestParserSplitMsg(t *testing.T) { + + nc := &Conn{} + nc.ps = &parseState{} + + buf := []byte("MSG a\r\n") + err := nc.parse(buf) + if err == nil { + t.Fatal("Expected an error") + } + nc.ps = &parseState{} + + buf = []byte("MSG a b c\r\n") + err = nc.parse(buf) + if err == nil { + t.Fatal("Expected an error") + } + nc.ps = &parseState{} + + expectedCount := uint64(1) + expectedSize := uint64(3) + + buf = []byte("MSG a") + err = nc.parse(buf) + if err != nil { + t.Fatalf("Parser error: %v", err) + } + if nc.ps.argBuf == nil { + t.Fatal("Arg buffer should have been created") + } + + buf = []byte(" 1 3\r\nf") + err = nc.parse(buf) + if err != nil { + t.Fatalf("Parser error: %v", err) + } + if nc.ps.ma.size != 3 { + t.Fatalf("Wrong msg size: %d instead of 3", nc.ps.ma.size) + } + if nc.ps.ma.sid != 1 { + t.Fatalf("Wrong sid: %d instead of 1", nc.ps.ma.sid) + } + if string(nc.ps.ma.subject) != "a" { + t.Fatalf("Wrong subject: '%s' instead of 'a'", string(nc.ps.ma.subject)) + } + if nc.ps.msgBuf == nil { + t.Fatal("Msg buffer should have been created") + } + + buf = []byte("oo\r\n") + err = nc.parse(buf) + if err != nil { + t.Fatalf("Parser error: %v", err) + } + if (nc.Statistics.InMsgs != expectedCount) || (nc.Statistics.InBytes != expectedSize) { + t.Fatalf("Wrong stats: %d - %d instead of %d - %d", nc.Statistics.InMsgs, nc.Statistics.InBytes, expectedCount, expectedSize) + } + if (nc.ps.argBuf != nil) || (nc.ps.msgBuf != nil) { + t.Fatal("Buffers should be nil now") + } + + buf = []byte("MSG a 1 3\r\nfo") + err = nc.parse(buf) + if err != nil { + t.Fatalf("Parser error: %v", err) + } + if nc.ps.ma.size != 3 { + t.Fatalf("Wrong msg size: %d instead of 3", nc.ps.ma.size) + } + if nc.ps.ma.sid != 1 { + t.Fatalf("Wrong sid: %d instead of 1", nc.ps.ma.sid) + } + if string(nc.ps.ma.subject) != "a" { + t.Fatalf("Wrong subject: '%s' instead of 'a'", string(nc.ps.ma.subject)) + } + if nc.ps.argBuf == nil { + t.Fatal("Arg buffer should have been created") + } + if nc.ps.msgBuf == nil { + t.Fatal("Msg buffer should have been created") + } + + expectedCount++ + expectedSize += 3 + + buf = []byte("o\r\n") + err = nc.parse(buf) + if err != nil { + t.Fatalf("Parser error: %v", err) + } + if (nc.Statistics.InMsgs != expectedCount) || (nc.Statistics.InBytes != expectedSize) { + t.Fatalf("Wrong stats: %d - %d instead of %d - %d", nc.Statistics.InMsgs, nc.Statistics.InBytes, expectedCount, expectedSize) + } + if (nc.ps.argBuf != nil) || (nc.ps.msgBuf != nil) { + t.Fatal("Buffers should be nil now") + } + + buf = []byte("MSG a 1 6\r\nfo") + err = nc.parse(buf) + if err != nil { + t.Fatalf("Parser error: %v", err) + } + if nc.ps.ma.size != 6 { + t.Fatalf("Wrong msg size: %d instead of 3", nc.ps.ma.size) + } + if nc.ps.ma.sid != 1 { + t.Fatalf("Wrong sid: %d instead of 1", nc.ps.ma.sid) + } + if string(nc.ps.ma.subject) != "a" { + t.Fatalf("Wrong subject: '%s' instead of 'a'", string(nc.ps.ma.subject)) + } + if nc.ps.argBuf == nil { + t.Fatal("Arg buffer should have been created") + } + if nc.ps.msgBuf == nil { + t.Fatal("Msg buffer should have been created") + } + + buf = []byte("ob") + err = nc.parse(buf) + if err != nil { + t.Fatalf("Parser error: %v", err) + } + + expectedCount++ + expectedSize += 6 + + buf = []byte("ar\r\n") + err = nc.parse(buf) + if err != nil { + t.Fatalf("Parser error: %v", err) + } + if (nc.Statistics.InMsgs != expectedCount) || (nc.Statistics.InBytes != expectedSize) { + t.Fatalf("Wrong stats: %d - %d instead of %d - %d", nc.Statistics.InMsgs, nc.Statistics.InBytes, expectedCount, expectedSize) + } + if (nc.ps.argBuf != nil) || (nc.ps.msgBuf != nil) { + t.Fatal("Buffers should be nil now") + } + + // Let's have a msg that is bigger than the parser's scratch size. + // Since we prepopulate the msg with 'foo', adding 3 to the size. + msgSize := cap(nc.ps.scratch) + 100 + 3 + buf = []byte(fmt.Sprintf("MSG a 1 b %d\r\nfoo", msgSize)) + err = nc.parse(buf) + if err != nil { + t.Fatalf("Parser error: %v", err) + } + if nc.ps.ma.size != msgSize { + t.Fatalf("Wrong msg size: %d instead of %d", nc.ps.ma.size, msgSize) + } + if nc.ps.ma.sid != 1 { + t.Fatalf("Wrong sid: %d instead of 1", nc.ps.ma.sid) + } + if string(nc.ps.ma.subject) != "a" { + t.Fatalf("Wrong subject: '%s' instead of 'a'", string(nc.ps.ma.subject)) + } + if string(nc.ps.ma.reply) != "b" { + t.Fatalf("Wrong reply: '%s' instead of 'b'", string(nc.ps.ma.reply)) + } + if nc.ps.argBuf == nil { + t.Fatal("Arg buffer should have been created") + } + if nc.ps.msgBuf == nil { + t.Fatal("Msg buffer should have been created") + } + + expectedCount++ + expectedSize += uint64(msgSize) + + bufSize := msgSize - 3 + + buf = make([]byte, bufSize) + for i := 0; i < bufSize; i++ { + buf[i] = byte('a' + (i % 26)) + } + + err = nc.parse(buf) + if err != nil { + t.Fatalf("Parser error: %v", err) + } + if nc.ps.state != MSG_PAYLOAD { + t.Fatalf("Wrong state: %v instead of %v", nc.ps.state, MSG_PAYLOAD) + } + if nc.ps.ma.size != msgSize { + t.Fatalf("Wrong (ma) msg size: %d instead of %d", nc.ps.ma.size, msgSize) + } + if len(nc.ps.msgBuf) != msgSize { + t.Fatalf("Wrong msg size: %d instead of %d", len(nc.ps.msgBuf), msgSize) + } + // Check content: + if string(nc.ps.msgBuf[0:3]) != "foo" { + t.Fatalf("Wrong msg content: %s", string(nc.ps.msgBuf)) + } + for k := 3; k < nc.ps.ma.size; k++ { + if nc.ps.msgBuf[k] != byte('a'+((k-3)%26)) { + t.Fatalf("Wrong msg content: %s", string(nc.ps.msgBuf)) + } + } + + buf = []byte("\r\n") + if err := nc.parse(buf); err != nil { + t.Fatalf("Unexpected error during parsing: %v", err) + } + if (nc.Statistics.InMsgs != expectedCount) || (nc.Statistics.InBytes != expectedSize) { + t.Fatalf("Wrong stats: %d - %d instead of %d - %d", nc.Statistics.InMsgs, nc.Statistics.InBytes, expectedCount, expectedSize) + } + if (nc.ps.argBuf != nil) || (nc.ps.msgBuf != nil) { + t.Fatal("Buffers should be nil now") + } + if nc.ps.state != OP_START { + t.Fatalf("Wrong state: %v", nc.ps.state) + } +} + +func TestNormalizeError(t *testing.T) { + received := "Typical Error" + expected := strings.ToLower(received) + if s := normalizeErr("-ERR '" + received + "'"); s != expected { + t.Fatalf("Expected '%s', got '%s'", expected, s) + } + + received = "Trim Surrounding Spaces" + expected = strings.ToLower(received) + if s := normalizeErr("-ERR '" + received + "' "); s != expected { + t.Fatalf("Expected '%s', got '%s'", expected, s) + } + + received = "Trim Surrounding Spaces Without Quotes" + expected = strings.ToLower(received) + if s := normalizeErr("-ERR " + received + " "); s != expected { + t.Fatalf("Expected '%s', got '%s'", expected, s) + } + + received = "Error Without Quotes" + expected = strings.ToLower(received) + if s := normalizeErr("-ERR " + received); s != expected { + t.Fatalf("Expected '%s', got '%s'", expected, s) + } + + received = "Error With Quote Only On Left" + expected = strings.ToLower(received) + if s := normalizeErr("-ERR '" + received); s != expected { + t.Fatalf("Expected '%s', got '%s'", expected, s) + } + + received = "Error With Quote Only On Right" + expected = strings.ToLower(received) + if s := normalizeErr("-ERR " + received + "'"); s != expected { + t.Fatalf("Expected '%s', got '%s'", expected, s) + } +} + +func TestAsyncINFO(t *testing.T) { + opts := GetDefaultOptions() + c := &Conn{Opts: opts} + + c.ps = &parseState{} + + if c.ps.state != OP_START { + t.Fatalf("Expected OP_START vs %d\n", c.ps.state) + } + + info := []byte("INFO {}\r\n") + if c.ps.state != OP_START { + t.Fatalf("Expected OP_START vs %d\n", c.ps.state) + } + err := c.parse(info[:1]) + if err != nil || c.ps.state != OP_I { + t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) + } + err = c.parse(info[1:2]) + if err != nil || c.ps.state != OP_IN { + t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) + } + err = c.parse(info[2:3]) + if err != nil || c.ps.state != OP_INF { + t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) + } + err = c.parse(info[3:4]) + if err != nil || c.ps.state != OP_INFO { + t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) + } + err = c.parse(info[4:5]) + if err != nil || c.ps.state != OP_INFO_SPC { + t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) + } + err = c.parse(info[5:]) + if err != nil || c.ps.state != OP_START { + t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) + } + + // All at once + err = c.parse(info) + if err != nil || c.ps.state != OP_START { + t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) + } + + // Server pool needs to be setup + c.setupServerPool() + + // Partials requiring argBuf + expectedServer := serverInfo{ + Id: "test", + Host: "localhost", + Port: 4222, + Version: "1.2.3", + AuthRequired: true, + TLSRequired: true, + MaxPayload: 2 * 1024 * 1024, + ConnectURLs: []string{"localhost:5222", "localhost:6222"}, + } + // Set NoRandomize so that the check with expectedServer info + // matches. + c.Opts.NoRandomize = true + + b, _ := json.Marshal(expectedServer) + info = []byte(fmt.Sprintf("INFO %s\r\n", b)) + if c.ps.state != OP_START { + t.Fatalf("Expected OP_START vs %d\n", c.ps.state) + } + err = c.parse(info[:9]) + if err != nil || c.ps.state != INFO_ARG || c.ps.argBuf == nil { + t.Fatalf("Unexpected: %d err: %v argBuf: %v\n", c.ps.state, err, c.ps.argBuf) + } + err = c.parse(info[9:11]) + if err != nil || c.ps.state != INFO_ARG || c.ps.argBuf == nil { + t.Fatalf("Unexpected: %d err: %v argBuf: %v\n", c.ps.state, err, c.ps.argBuf) + } + err = c.parse(info[11:]) + if err != nil || c.ps.state != OP_START || c.ps.argBuf != nil { + t.Fatalf("Unexpected: %d err: %v argBuf: %v\n", c.ps.state, err, c.ps.argBuf) + } + if !reflect.DeepEqual(c.info, expectedServer) { + t.Fatalf("Expected server info to be: %v, got: %v", expectedServer, c.info) + } + + // Good INFOs + good := []string{"INFO {}\r\n", "INFO {}\r\n", "INFO {} \r\n", "INFO { \"server_id\": \"test\" } \r\n", "INFO {\"connect_urls\":[]}\r\n"} + for _, gi := range good { + c.ps = &parseState{} + err = c.parse([]byte(gi)) + if err != nil || c.ps.state != OP_START { + t.Fatalf("Protocol %q should be fine. Err=%v state=%v", gi, err, c.ps.state) + } + } + + // Wrong INFOs + wrong := []string{"IxNFO {}\r\n", "INxFO {}\r\n", "INFxO {}\r\n", "INFOx {}\r\n", "INFO{}\r\n", "INFO {}"} + for _, wi := range wrong { + c.ps = &parseState{} + err = c.parse([]byte(wi)) + if err == nil && c.ps.state == OP_START { + t.Fatalf("Protocol %q should have failed", wi) + } + } + + checkPool := func(inThatOrder bool, urls ...string) { + // Check both pool and urls map + if len(c.srvPool) != len(urls) { + stackFatalf(t, "Pool should have %d elements, has %d", len(urls), len(c.srvPool)) + } + if len(c.urls) != len(urls) { + stackFatalf(t, "Map should have %d elements, has %d", len(urls), len(c.urls)) + } + for i, url := range urls { + if inThatOrder { + if c.srvPool[i].url.Host != url { + stackFatalf(t, "Pool should have %q at index %q, has %q", url, i, c.srvPool[i].url.Host) + } + } else { + if _, present := c.urls[url]; !present { + stackFatalf(t, "Pool should have %q", url) + } + } + } + } + + // Now test the decoding of "connect_urls" + + // No randomize for now + c.Opts.NoRandomize = true + // Reset the pool + c.setupServerPool() + // Reinitialize the parser + c.ps = &parseState{} + + info = []byte("INFO {\"connect_urls\":[\"localhost:5222\"]}\r\n") + err = c.parse(info) + if err != nil || c.ps.state != OP_START { + t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) + } + // Pool now should contain localhost:4222 (the default URL) and localhost:5222 + checkPool(true, "localhost:4222", "localhost:5222") + + // Make sure that if client receives the same, it is not added again. + err = c.parse(info) + if err != nil || c.ps.state != OP_START { + t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) + } + // Pool should still contain localhost:4222 (the default URL) and localhost:5222 + checkPool(true, "localhost:4222", "localhost:5222") + + // Receive a new URL + info = []byte("INFO {\"connect_urls\":[\"localhost:6222\"]}\r\n") + err = c.parse(info) + if err != nil || c.ps.state != OP_START { + t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) + } + // Pool now should contain localhost:4222 (the default URL) localhost:5222 and localhost:6222 + checkPool(true, "localhost:4222", "localhost:5222", "localhost:6222") + + // Receive more than 1 URL at once + info = []byte("INFO {\"connect_urls\":[\"localhost:7222\", \"localhost:8222\"]}\r\n") + err = c.parse(info) + if err != nil || c.ps.state != OP_START { + t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) + } + // Pool now should contain localhost:4222 (the default URL) localhost:5222, localhost:6222 + // localhost:7222 and localhost:8222 + checkPool(true, "localhost:4222", "localhost:5222", "localhost:6222", "localhost:7222", "localhost:8222") + + // Test with pool randomization now. Note that with randominzation, + // the initial pool is randomize, then each array of urls that the + // client gets from the INFO protocol is randomized, but added to + // the end of the pool. + c.Opts.NoRandomize = false + c.setupServerPool() + + info = []byte("INFO {\"connect_urls\":[\"localhost:5222\"]}\r\n") + err = c.parse(info) + if err != nil || c.ps.state != OP_START { + t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) + } + // Pool now should contain localhost:4222 (the default URL) and localhost:5222 + checkPool(true, "localhost:4222", "localhost:5222") + + // Make sure that if client receives the same, it is not added again. + err = c.parse(info) + if err != nil || c.ps.state != OP_START { + t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) + } + // Pool should still contain localhost:4222 (the default URL) and localhost:5222 + checkPool(true, "localhost:4222", "localhost:5222") + + // Receive a new URL + info = []byte("INFO {\"connect_urls\":[\"localhost:6222\"]}\r\n") + err = c.parse(info) + if err != nil || c.ps.state != OP_START { + t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) + } + // Pool now should contain localhost:4222 (the default URL) localhost:5222 and localhost:6222 + checkPool(true, "localhost:4222", "localhost:5222", "localhost:6222") + + // Receive more than 1 URL at once. Add more than 2 to increase the chance of + // the array being shuffled. + info = []byte("INFO {\"connect_urls\":[\"localhost:7222\", \"localhost:8222\", " + + "\"localhost:9222\", \"localhost:10222\", \"localhost:11222\"]}\r\n") + err = c.parse(info) + if err != nil || c.ps.state != OP_START { + t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) + } + // Pool now should contain localhost:4222 (the default URL) localhost:5222, localhost:6222 + // localhost:7222, localhost:8222, localhost:9222, localhost:10222 and localhost:11222 + checkPool(false, "localhost:4222", "localhost:5222", "localhost:6222", "localhost:7222", "localhost:8222", + "localhost:9222", "localhost:10222", "localhost:11222") + + // Finally, check that (part of) the pool should be randomized. + allUrls := []string{"localhost:4222", "localhost:5222", "localhost:6222", "localhost:7222", "localhost:8222", + "localhost:9222", "localhost:10222", "localhost:11222"} + same := 0 + for i, url := range c.srvPool { + if url.url.Host == allUrls[i] { + same++ + } + } + if same == len(allUrls) { + t.Fatal("Pool does not seem to be randomized") + } + + // Check that pool may be randomized on setup, but new URLs are always + // added at end of pool. + c.Opts.NoRandomize = false + c.Opts.Servers = testServers + // Reset the pool + c.setupServerPool() + // Reinitialize the parser + c.ps = &parseState{} + // Capture the pool sequence after randomization + urlsAfterPoolSetup := make([]string, 0, len(c.srvPool)) + for _, srv := range c.srvPool { + urlsAfterPoolSetup = append(urlsAfterPoolSetup, srv.url.Host) + } + checkPoolOrderDidNotChange := func() { + for i := 0; i < len(urlsAfterPoolSetup); i++ { + if c.srvPool[i].url.Host != urlsAfterPoolSetup[i] { + stackFatalf(t, "Pool should have %q at index %q, has %q", urlsAfterPoolSetup[i], i, c.srvPool[i].url.Host) + } + } + } + // Add new urls + newURLs := []string{ + "localhost:6222", + "localhost:7222", + "localhost:8222\", \"localhost:9222", + "localhost:10222\", \"localhost:11222\", \"localhost:12222,", + } + for _, newURL := range newURLs { + info = []byte("INFO {\"connect_urls\":[\"" + newURL + "]}\r\n") + err = c.parse(info) + if err != nil || c.ps.state != OP_START { + t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) + } + // Check that pool order does not change up to the new addition(s). + checkPoolOrderDidNotChange() + } +} + +func TestConnServers(t *testing.T) { + opts := GetDefaultOptions() + c := &Conn{Opts: opts} + c.ps = &parseState{} + c.setupServerPool() + + validateURLs := func(serverUrls []string, expectedUrls ...string) { + var found bool + if len(serverUrls) != len(expectedUrls) { + stackFatalf(t, "Array should have %d elements, has %d", len(expectedUrls), len(serverUrls)) + } + + for _, ev := range expectedUrls { + found = false + for _, av := range serverUrls { + if ev == av { + found = true + break + } + } + if !found { + stackFatalf(t, "array is missing %q in %v", ev, serverUrls) + } + } + } + + // check the default url + validateURLs(c.Servers(), "nats://localhost:4222") + if len(c.DiscoveredServers()) != 0 { + t.Fatalf("Expected no discovered servers") + } + + // Add a new URL + err := c.parse([]byte("INFO {\"connect_urls\":[\"localhost:5222\"]}\r\n")) + if err != nil { + t.Fatalf("Unexpected: %d : %v\n", c.ps.state, err) + } + // Server list should now contain both the default and the new url. + validateURLs(c.Servers(), "nats://localhost:4222", "nats://localhost:5222") + // Discovered servers should only contain the new url. + validateURLs(c.DiscoveredServers(), "nats://localhost:5222") + + // verify user credentials are stripped out. + opts.Servers = []string{"nats://user:pass@localhost:4333", "nats://token@localhost:4444"} + c = &Conn{Opts: opts} + c.ps = &parseState{} + c.setupServerPool() + + validateURLs(c.Servers(), "nats://localhost:4333", "nats://localhost:4444") +} + +func TestProcessErrAuthorizationError(t *testing.T) { + ach := make(chan asyncCB, 1) + called := make(chan error, 1) + c := &Conn{ + ach: ach, + Opts: Options{ + AsyncErrorCB: func(nc *Conn, sub *Subscription, err error) { + called <- err + }, + }, + } + c.processErr("Authorization Violation") + select { + case cb := <-ach: + cb() + default: + t.Fatal("Expected callback on channel") + } + + select { + case err := <-called: + if err != ErrAuthorization { + t.Fatalf("Expected ErrAuthorization, got: %v", err) + } + default: + t.Fatal("Expected error on channel") + } +} diff --git a/vendor/github.com/nats-io/go-nats/netchan.go b/vendor/github.com/nats-io/go-nats/netchan.go new file mode 100644 index 00000000..0608fd7a --- /dev/null +++ b/vendor/github.com/nats-io/go-nats/netchan.go @@ -0,0 +1,100 @@ +// Copyright 2013-2017 Apcera Inc. All rights reserved. + +package nats + +import ( + "errors" + "reflect" +) + +// This allows the functionality for network channels by binding send and receive Go chans +// to subjects and optionally queue groups. +// Data will be encoded and decoded via the EncodedConn and its associated encoders. + +// BindSendChan binds a channel for send operations to NATS. +func (c *EncodedConn) BindSendChan(subject string, channel interface{}) error { + chVal := reflect.ValueOf(channel) + if chVal.Kind() != reflect.Chan { + return ErrChanArg + } + go chPublish(c, chVal, subject) + return nil +} + +// Publish all values that arrive on the channel until it is closed or we +// encounter an error. +func chPublish(c *EncodedConn, chVal reflect.Value, subject string) { + for { + val, ok := chVal.Recv() + if !ok { + // Channel has most likely been closed. + return + } + if e := c.Publish(subject, val.Interface()); e != nil { + // Do this under lock. + c.Conn.mu.Lock() + defer c.Conn.mu.Unlock() + + if c.Conn.Opts.AsyncErrorCB != nil { + // FIXME(dlc) - Not sure this is the right thing to do. + // FIXME(ivan) - If the connection is not yet closed, try to schedule the callback + if c.Conn.isClosed() { + go c.Conn.Opts.AsyncErrorCB(c.Conn, nil, e) + } else { + c.Conn.ach <- func() { c.Conn.Opts.AsyncErrorCB(c.Conn, nil, e) } + } + } + return + } + } +} + +// BindRecvChan binds a channel for receive operations from NATS. +func (c *EncodedConn) BindRecvChan(subject string, channel interface{}) (*Subscription, error) { + return c.bindRecvChan(subject, _EMPTY_, channel) +} + +// BindRecvQueueChan binds a channel for queue-based receive operations from NATS. +func (c *EncodedConn) BindRecvQueueChan(subject, queue string, channel interface{}) (*Subscription, error) { + return c.bindRecvChan(subject, queue, channel) +} + +// Internal function to bind receive operations for a channel. +func (c *EncodedConn) bindRecvChan(subject, queue string, channel interface{}) (*Subscription, error) { + chVal := reflect.ValueOf(channel) + if chVal.Kind() != reflect.Chan { + return nil, ErrChanArg + } + argType := chVal.Type().Elem() + + cb := func(m *Msg) { + var oPtr reflect.Value + if argType.Kind() != reflect.Ptr { + oPtr = reflect.New(argType) + } else { + oPtr = reflect.New(argType.Elem()) + } + if err := c.Enc.Decode(m.Subject, m.Data, oPtr.Interface()); err != nil { + c.Conn.err = errors.New("nats: Got an error trying to unmarshal: " + err.Error()) + if c.Conn.Opts.AsyncErrorCB != nil { + c.Conn.ach <- func() { c.Conn.Opts.AsyncErrorCB(c.Conn, m.Sub, c.Conn.err) } + } + return + } + if argType.Kind() != reflect.Ptr { + oPtr = reflect.Indirect(oPtr) + } + // This is a bit hacky, but in this instance we may be trying to send to a closed channel. + // and the user does not know when it is safe to close the channel. + defer func() { + // If we have panicked, recover and close the subscription. + if r := recover(); r != nil { + m.Sub.Unsubscribe() + } + }() + // Actually do the send to the channel. + chVal.Send(oPtr) + } + + return c.Conn.subscribe(subject, queue, cb, nil) +} diff --git a/vendor/github.com/nats-io/go-nats/parser.go b/vendor/github.com/nats-io/go-nats/parser.go new file mode 100644 index 00000000..8359b8bc --- /dev/null +++ b/vendor/github.com/nats-io/go-nats/parser.go @@ -0,0 +1,470 @@ +// Copyright 2012-2017 Apcera Inc. All rights reserved. + +package nats + +import ( + "fmt" +) + +type msgArg struct { + subject []byte + reply []byte + sid int64 + size int +} + +const MAX_CONTROL_LINE_SIZE = 1024 + +type parseState struct { + state int + as int + drop int + ma msgArg + argBuf []byte + msgBuf []byte + scratch [MAX_CONTROL_LINE_SIZE]byte +} + +const ( + OP_START = iota + OP_PLUS + OP_PLUS_O + OP_PLUS_OK + OP_MINUS + OP_MINUS_E + OP_MINUS_ER + OP_MINUS_ERR + OP_MINUS_ERR_SPC + MINUS_ERR_ARG + OP_M + OP_MS + OP_MSG + OP_MSG_SPC + MSG_ARG + MSG_PAYLOAD + MSG_END + OP_P + OP_PI + OP_PIN + OP_PING + OP_PO + OP_PON + OP_PONG + OP_I + OP_IN + OP_INF + OP_INFO + OP_INFO_SPC + INFO_ARG +) + +// parse is the fast protocol parser engine. +func (nc *Conn) parse(buf []byte) error { + var i int + var b byte + + // Move to loop instead of range syntax to allow jumping of i + for i = 0; i < len(buf); i++ { + b = buf[i] + + switch nc.ps.state { + case OP_START: + switch b { + case 'M', 'm': + nc.ps.state = OP_M + case 'P', 'p': + nc.ps.state = OP_P + case '+': + nc.ps.state = OP_PLUS + case '-': + nc.ps.state = OP_MINUS + case 'I', 'i': + nc.ps.state = OP_I + default: + goto parseErr + } + case OP_M: + switch b { + case 'S', 's': + nc.ps.state = OP_MS + default: + goto parseErr + } + case OP_MS: + switch b { + case 'G', 'g': + nc.ps.state = OP_MSG + default: + goto parseErr + } + case OP_MSG: + switch b { + case ' ', '\t': + nc.ps.state = OP_MSG_SPC + default: + goto parseErr + } + case OP_MSG_SPC: + switch b { + case ' ', '\t': + continue + default: + nc.ps.state = MSG_ARG + nc.ps.as = i + } + case MSG_ARG: + switch b { + case '\r': + nc.ps.drop = 1 + case '\n': + var arg []byte + if nc.ps.argBuf != nil { + arg = nc.ps.argBuf + } else { + arg = buf[nc.ps.as : i-nc.ps.drop] + } + if err := nc.processMsgArgs(arg); err != nil { + return err + } + nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, MSG_PAYLOAD + + // jump ahead with the index. If this overruns + // what is left we fall out and process split + // buffer. + i = nc.ps.as + nc.ps.ma.size - 1 + default: + if nc.ps.argBuf != nil { + nc.ps.argBuf = append(nc.ps.argBuf, b) + } + } + case MSG_PAYLOAD: + if nc.ps.msgBuf != nil { + if len(nc.ps.msgBuf) >= nc.ps.ma.size { + nc.processMsg(nc.ps.msgBuf) + nc.ps.argBuf, nc.ps.msgBuf, nc.ps.state = nil, nil, MSG_END + } else { + // copy as much as we can to the buffer and skip ahead. + toCopy := nc.ps.ma.size - len(nc.ps.msgBuf) + avail := len(buf) - i + + if avail < toCopy { + toCopy = avail + } + + if toCopy > 0 { + start := len(nc.ps.msgBuf) + // This is needed for copy to work. + nc.ps.msgBuf = nc.ps.msgBuf[:start+toCopy] + copy(nc.ps.msgBuf[start:], buf[i:i+toCopy]) + // Update our index + i = (i + toCopy) - 1 + } else { + nc.ps.msgBuf = append(nc.ps.msgBuf, b) + } + } + } else if i-nc.ps.as >= nc.ps.ma.size { + nc.processMsg(buf[nc.ps.as:i]) + nc.ps.argBuf, nc.ps.msgBuf, nc.ps.state = nil, nil, MSG_END + } + case MSG_END: + switch b { + case '\n': + nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, OP_START + default: + continue + } + case OP_PLUS: + switch b { + case 'O', 'o': + nc.ps.state = OP_PLUS_O + default: + goto parseErr + } + case OP_PLUS_O: + switch b { + case 'K', 'k': + nc.ps.state = OP_PLUS_OK + default: + goto parseErr + } + case OP_PLUS_OK: + switch b { + case '\n': + nc.processOK() + nc.ps.drop, nc.ps.state = 0, OP_START + } + case OP_MINUS: + switch b { + case 'E', 'e': + nc.ps.state = OP_MINUS_E + default: + goto parseErr + } + case OP_MINUS_E: + switch b { + case 'R', 'r': + nc.ps.state = OP_MINUS_ER + default: + goto parseErr + } + case OP_MINUS_ER: + switch b { + case 'R', 'r': + nc.ps.state = OP_MINUS_ERR + default: + goto parseErr + } + case OP_MINUS_ERR: + switch b { + case ' ', '\t': + nc.ps.state = OP_MINUS_ERR_SPC + default: + goto parseErr + } + case OP_MINUS_ERR_SPC: + switch b { + case ' ', '\t': + continue + default: + nc.ps.state = MINUS_ERR_ARG + nc.ps.as = i + } + case MINUS_ERR_ARG: + switch b { + case '\r': + nc.ps.drop = 1 + case '\n': + var arg []byte + if nc.ps.argBuf != nil { + arg = nc.ps.argBuf + nc.ps.argBuf = nil + } else { + arg = buf[nc.ps.as : i-nc.ps.drop] + } + nc.processErr(string(arg)) + nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, OP_START + default: + if nc.ps.argBuf != nil { + nc.ps.argBuf = append(nc.ps.argBuf, b) + } + } + case OP_P: + switch b { + case 'I', 'i': + nc.ps.state = OP_PI + case 'O', 'o': + nc.ps.state = OP_PO + default: + goto parseErr + } + case OP_PO: + switch b { + case 'N', 'n': + nc.ps.state = OP_PON + default: + goto parseErr + } + case OP_PON: + switch b { + case 'G', 'g': + nc.ps.state = OP_PONG + default: + goto parseErr + } + case OP_PONG: + switch b { + case '\n': + nc.processPong() + nc.ps.drop, nc.ps.state = 0, OP_START + } + case OP_PI: + switch b { + case 'N', 'n': + nc.ps.state = OP_PIN + default: + goto parseErr + } + case OP_PIN: + switch b { + case 'G', 'g': + nc.ps.state = OP_PING + default: + goto parseErr + } + case OP_PING: + switch b { + case '\n': + nc.processPing() + nc.ps.drop, nc.ps.state = 0, OP_START + } + case OP_I: + switch b { + case 'N', 'n': + nc.ps.state = OP_IN + default: + goto parseErr + } + case OP_IN: + switch b { + case 'F', 'f': + nc.ps.state = OP_INF + default: + goto parseErr + } + case OP_INF: + switch b { + case 'O', 'o': + nc.ps.state = OP_INFO + default: + goto parseErr + } + case OP_INFO: + switch b { + case ' ', '\t': + nc.ps.state = OP_INFO_SPC + default: + goto parseErr + } + case OP_INFO_SPC: + switch b { + case ' ', '\t': + continue + default: + nc.ps.state = INFO_ARG + nc.ps.as = i + } + case INFO_ARG: + switch b { + case '\r': + nc.ps.drop = 1 + case '\n': + var arg []byte + if nc.ps.argBuf != nil { + arg = nc.ps.argBuf + nc.ps.argBuf = nil + } else { + arg = buf[nc.ps.as : i-nc.ps.drop] + } + nc.processAsyncInfo(arg) + nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, OP_START + default: + if nc.ps.argBuf != nil { + nc.ps.argBuf = append(nc.ps.argBuf, b) + } + } + default: + goto parseErr + } + } + // Check for split buffer scenarios + if (nc.ps.state == MSG_ARG || nc.ps.state == MINUS_ERR_ARG || nc.ps.state == INFO_ARG) && nc.ps.argBuf == nil { + nc.ps.argBuf = nc.ps.scratch[:0] + nc.ps.argBuf = append(nc.ps.argBuf, buf[nc.ps.as:i-nc.ps.drop]...) + // FIXME, check max len + } + // Check for split msg + if nc.ps.state == MSG_PAYLOAD && nc.ps.msgBuf == nil { + // We need to clone the msgArg if it is still referencing the + // read buffer and we are not able to process the msg. + if nc.ps.argBuf == nil { + nc.cloneMsgArg() + } + + // If we will overflow the scratch buffer, just create a + // new buffer to hold the split message. + if nc.ps.ma.size > cap(nc.ps.scratch)-len(nc.ps.argBuf) { + lrem := len(buf[nc.ps.as:]) + + nc.ps.msgBuf = make([]byte, lrem, nc.ps.ma.size) + copy(nc.ps.msgBuf, buf[nc.ps.as:]) + } else { + nc.ps.msgBuf = nc.ps.scratch[len(nc.ps.argBuf):len(nc.ps.argBuf)] + nc.ps.msgBuf = append(nc.ps.msgBuf, (buf[nc.ps.as:])...) + } + } + + return nil + +parseErr: + return fmt.Errorf("nats: Parse Error [%d]: '%s'", nc.ps.state, buf[i:]) +} + +// cloneMsgArg is used when the split buffer scenario has the pubArg in the existing read buffer, but +// we need to hold onto it into the next read. +func (nc *Conn) cloneMsgArg() { + nc.ps.argBuf = nc.ps.scratch[:0] + nc.ps.argBuf = append(nc.ps.argBuf, nc.ps.ma.subject...) + nc.ps.argBuf = append(nc.ps.argBuf, nc.ps.ma.reply...) + nc.ps.ma.subject = nc.ps.argBuf[:len(nc.ps.ma.subject)] + if nc.ps.ma.reply != nil { + nc.ps.ma.reply = nc.ps.argBuf[len(nc.ps.ma.subject):] + } +} + +const argsLenMax = 4 + +func (nc *Conn) processMsgArgs(arg []byte) error { + // Unroll splitArgs to avoid runtime/heap issues + a := [argsLenMax][]byte{} + args := a[:0] + start := -1 + for i, b := range arg { + switch b { + case ' ', '\t', '\r', '\n': + if start >= 0 { + args = append(args, arg[start:i]) + start = -1 + } + default: + if start < 0 { + start = i + } + } + } + if start >= 0 { + args = append(args, arg[start:]) + } + + switch len(args) { + case 3: + nc.ps.ma.subject = args[0] + nc.ps.ma.sid = parseInt64(args[1]) + nc.ps.ma.reply = nil + nc.ps.ma.size = int(parseInt64(args[2])) + case 4: + nc.ps.ma.subject = args[0] + nc.ps.ma.sid = parseInt64(args[1]) + nc.ps.ma.reply = args[2] + nc.ps.ma.size = int(parseInt64(args[3])) + default: + return fmt.Errorf("nats: processMsgArgs Parse Error: '%s'", arg) + } + if nc.ps.ma.sid < 0 { + return fmt.Errorf("nats: processMsgArgs Bad or Missing Sid: '%s'", arg) + } + if nc.ps.ma.size < 0 { + return fmt.Errorf("nats: processMsgArgs Bad or Missing Size: '%s'", arg) + } + return nil +} + +// Ascii numbers 0-9 +const ( + ascii_0 = 48 + ascii_9 = 57 +) + +// parseInt64 expects decimal positive numbers. We +// return -1 to signal error +func parseInt64(d []byte) (n int64) { + if len(d) == 0 { + return -1 + } + for _, dec := range d { + if dec < ascii_0 || dec > ascii_9 { + return -1 + } + n = n*10 + (int64(dec) - ascii_0) + } + return n +} diff --git a/vendor/github.com/nats-io/go-nats/staticcheck.ignore b/vendor/github.com/nats-io/go-nats/staticcheck.ignore new file mode 100644 index 00000000..25bbf020 --- /dev/null +++ b/vendor/github.com/nats-io/go-nats/staticcheck.ignore @@ -0,0 +1,4 @@ +github.com/nats-io/go-nats/*_test.go:SA2002 +github.com/nats-io/go-nats/*/*_test.go:SA2002 +github.com/nats-io/go-nats/test/context_test.go:SA1012 +github.com/nats-io/go-nats/nats.go:SA6000 diff --git a/vendor/github.com/nats-io/go-nats/timer.go b/vendor/github.com/nats-io/go-nats/timer.go new file mode 100644 index 00000000..1b96fd52 --- /dev/null +++ b/vendor/github.com/nats-io/go-nats/timer.go @@ -0,0 +1,43 @@ +package nats + +import ( + "sync" + "time" +) + +// global pool of *time.Timer's. can be used by multiple goroutines concurrently. +var globalTimerPool timerPool + +// timerPool provides GC-able pooling of *time.Timer's. +// can be used by multiple goroutines concurrently. +type timerPool struct { + p sync.Pool +} + +// Get returns a timer that completes after the given duration. +func (tp *timerPool) Get(d time.Duration) *time.Timer { + if t, _ := tp.p.Get().(*time.Timer); t != nil { + t.Reset(d) + return t + } + + return time.NewTimer(d) +} + +// Put pools the given timer. +// +// There is no need to call t.Stop() before calling Put. +// +// Put will try to stop the timer before pooling. If the +// given timer already expired, Put will read the unreceived +// value if there is one. +func (tp *timerPool) Put(t *time.Timer) { + if !t.Stop() { + select { + case <-t.C: + default: + } + } + + tp.p.Put(t) +} diff --git a/vendor/github.com/nats-io/go-nats/timer_test.go b/vendor/github.com/nats-io/go-nats/timer_test.go new file mode 100644 index 00000000..fb02a769 --- /dev/null +++ b/vendor/github.com/nats-io/go-nats/timer_test.go @@ -0,0 +1,29 @@ +package nats + +import ( + "testing" + "time" +) + +func TestTimerPool(t *testing.T) { + var tp timerPool + + for i := 0; i < 10; i++ { + tm := tp.Get(time.Millisecond * 20) + + select { + case <-tm.C: + t.Errorf("Timer already expired") + continue + default: + } + + select { + case <-tm.C: + case <-time.After(time.Millisecond * 100): + t.Errorf("Timer didn't expire in time") + } + + tp.Put(tm) + } +} diff --git a/vendor/vendor.json b/vendor/vendor.json new file mode 100644 index 00000000..84f657a3 --- /dev/null +++ b/vendor/vendor.json @@ -0,0 +1,13 @@ +{ + "comment": "", + "ignore": "", + "package": [ + { + "checksumSHA1": "nWIa0L7ux21Cb8kzB4rJHXMblpI=", + "path": "github.com/nats-io/go-nats", + "revision": "f0d9c5988d4c2a17ad466fcdffe010165c46434e", + "revisionTime": "2017-11-14T23:23:38Z" + } + ], + "rootPath": "github.com/tidwall/tile38" +}