Merge branch 'master' into master

This commit is contained in:
Dennis 2018-08-05 22:34:28 +02:00 committed by GitHub
commit f9ef1703ff
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 549 additions and 121 deletions

View File

@ -1,9 +1,7 @@
language: go language: go
go: go:
- 1.6.x - 1.9.x
- 1.7.x - 1.10.x
- 1.8.x
- tip
env: env:
- GOMAXPROCS=4 GORACE=halt_on_error=1 - GOMAXPROCS=4 GORACE=halt_on_error=1
install: install:

View File

@ -1,3 +1,8 @@
# 1.0.5
* Fix hooks race (#707)
* Fix panic deadlock (#695)
# 1.0.4 # 1.0.4
* Fix race when adding hooks (#612) * Fix race when adding hooks (#612)

View File

@ -220,7 +220,7 @@ Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
```go ```go
import ( import (
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake" "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake"
logrus_syslog "github.com/sirupsen/logrus/hooks/syslog" logrus_syslog "github.com/sirupsen/logrus/hooks/syslog"
"log/syslog" "log/syslog"
) )
@ -241,58 +241,8 @@ func init() {
``` ```
Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
| Hook | Description | A list of currently known of service hook can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks)
| ----- | ----------- |
| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
| [Amazon Kinesis](https://github.com/evalphobia/logrus_kinesis) | Hook for logging to [Amazon Kinesis](https://aws.amazon.com/kinesis/) |
| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
| [AzureTableHook](https://github.com/kpfaulkner/azuretablehook/) | Hook for logging to Azure Table Storage|
| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
| [Discordrus](https://github.com/kz/discordrus) | Hook for logging to [Discord](https://discordapp.com/) |
| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
| [Firehose](https://github.com/beaubrewer/logrus_firehose) | Hook for logging to [Amazon Firehose](https://aws.amazon.com/kinesis/firehose/)
| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
| [Go-Slack](https://github.com/multiplay/go-slack) | Hook for logging to [Slack](https://slack.com) |
| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
| [Influxus](http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB](http://influxdata.com/) |
| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
| [KafkaLogrus](https://github.com/tracer0tong/kafkalogrus) | Hook for logging to Kafka |
| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
| [Logbeat](https://github.com/macandmia/logbeat) | Hook for logging to [Opbeat](https://opbeat.com/) |
| [Logentries](https://github.com/jcftang/logentriesrus) | Hook for logging to [Logentries](https://logentries.com/) |
| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) |
| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) |
| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
| [Mattermost](https://github.com/shuLhan/mattermost-integration/tree/master/hooks/logrus) | Hook for logging to [Mattermost](https://mattermost.com/) |
| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
| [NATS-Hook](https://github.com/rybit/nats_logrus_hook) | Hook for logging to [NATS](https://nats.io) |
| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) |
| [Promrus](https://github.com/weaveworks/promrus) | Expose number of log messages as [Prometheus](https://prometheus.io/) metrics |
| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) |
| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)|
| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
| [Stackdriver](https://github.com/knq/sdhook) | Hook for logging to [Google Stackdriver](https://cloud.google.com/logging/) |
| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
| [Syslog](https://github.com/sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
| [Syslog TLS](https://github.com/shinji62/logrus-syslog-ng) | Send errors to remote syslog server with TLS support. |
| [Telegram](https://github.com/rossmcdonald/telegram_hook) | Hook for logging errors to [Telegram](https://telegram.org/) |
| [TraceView](https://github.com/evalphobia/logrus_appneta) | Hook for logging to [AppNeta TraceView](https://www.appneta.com/products/traceview/) |
| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash |
| [SQS-Hook](https://github.com/tsarpaul/logrus_sqs) | Hook for logging to [Amazon Simple Queue Service (SQS)](https://aws.amazon.com/sqs/) |
#### Level logging #### Level logging
@ -370,6 +320,8 @@ The built-in logging formatters are:
field to `true`. To force no colored output even if there is a TTY set the field to `true`. To force no colored output even if there is a TTY set the
`DisableColors` field to `true`. For Windows, see `DisableColors` field to `true`. For Windows, see
[github.com/mattn/go-colorable](https://github.com/mattn/go-colorable). [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable).
* When colors are enabled, levels are truncated to 4 characters by default. To disable
truncation set the `DisableLevelTruncation` field to `true`.
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter). * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter).
* `logrus.JSONFormatter`. Logs fields as JSON. * `logrus.JSONFormatter`. Logs fields as JSON.
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter). * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter).
@ -493,7 +445,7 @@ logrus.RegisterExitHandler(handler)
#### Thread safety #### Thread safety
By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs. By default, Logger is protected by a mutex for concurrent writes. The mutex is held when calling hooks and writing logs.
If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking. If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
Situation when locking is not needed includes: Situation when locking is not needed includes:

View File

@ -48,7 +48,7 @@ type Entry struct {
func NewEntry(logger *Logger) *Entry { func NewEntry(logger *Logger) *Entry {
return &Entry{ return &Entry{
Logger: logger, Logger: logger,
// Default is three fields, give a little extra room // Default is five fields, give a little extra room
Data: make(Fields, 5), Data: make(Fields, 5),
} }
} }
@ -83,43 +83,41 @@ func (entry *Entry) WithFields(fields Fields) *Entry {
for k, v := range fields { for k, v := range fields {
data[k] = v data[k] = v
} }
return &Entry{Logger: entry.Logger, Data: data} return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time}
}
// Overrides the time of the Entry.
func (entry *Entry) WithTime(t time.Time) *Entry {
return &Entry{Logger: entry.Logger, Data: entry.Data, Time: t}
} }
// This function is not declared with a pointer value because otherwise // This function is not declared with a pointer value because otherwise
// race conditions will occur when using multiple goroutines // race conditions will occur when using multiple goroutines
func (entry Entry) log(level Level, msg string) { func (entry Entry) log(level Level, msg string) {
var buffer *bytes.Buffer var buffer *bytes.Buffer
// Default to now, but allow users to override if they want.
//
// We don't have to worry about polluting future calls to Entry#log()
// with this assignment because this function is declared with a
// non-pointer receiver.
if entry.Time.IsZero() {
entry.Time = time.Now() entry.Time = time.Now()
}
entry.Level = level entry.Level = level
entry.Message = msg entry.Message = msg
entry.Logger.mu.Lock() entry.fireHooks()
err := entry.Logger.Hooks.Fire(level, &entry)
entry.Logger.mu.Unlock()
if err != nil {
entry.Logger.mu.Lock()
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
entry.Logger.mu.Unlock()
}
buffer = bufferPool.Get().(*bytes.Buffer) buffer = bufferPool.Get().(*bytes.Buffer)
buffer.Reset() buffer.Reset()
defer bufferPool.Put(buffer) defer bufferPool.Put(buffer)
entry.Buffer = buffer entry.Buffer = buffer
serialized, err := entry.Logger.Formatter.Format(&entry)
entry.write()
entry.Buffer = nil entry.Buffer = nil
if err != nil {
entry.Logger.mu.Lock()
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
entry.Logger.mu.Unlock()
} else {
entry.Logger.mu.Lock()
_, err = entry.Logger.Out.Write(serialized)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
}
entry.Logger.mu.Unlock()
}
// To avoid Entry#log() returning a value that only would make sense for // To avoid Entry#log() returning a value that only would make sense for
// panic() to use in Entry#Panic(), we avoid the allocation by checking // panic() to use in Entry#Panic(), we avoid the allocation by checking
@ -129,6 +127,29 @@ func (entry Entry) log(level Level, msg string) {
} }
} }
func (entry *Entry) fireHooks() {
entry.Logger.mu.Lock()
defer entry.Logger.mu.Unlock()
err := entry.Logger.Hooks.Fire(entry.Level, entry)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
}
}
func (entry *Entry) write() {
entry.Logger.mu.Lock()
defer entry.Logger.mu.Unlock()
serialized, err := entry.Logger.Formatter.Format(entry)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
} else {
_, err = entry.Logger.Out.Write(serialized)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
}
}
}
func (entry *Entry) Debug(args ...interface{}) { func (entry *Entry) Debug(args ...interface{}) {
if entry.Logger.level() >= DebugLevel { if entry.Logger.level() >= DebugLevel {
entry.log(DebugLevel, fmt.Sprint(args...)) entry.log(DebugLevel, fmt.Sprint(args...))

View File

@ -75,3 +75,41 @@ func TestEntryPanicf(t *testing.T) {
entry := NewEntry(logger) entry := NewEntry(logger)
entry.WithField("err", errBoom).Panicf("kaboom %v", true) entry.WithField("err", errBoom).Panicf("kaboom %v", true)
} }
const (
badMessage = "this is going to panic"
panicMessage = "this is broken"
)
type panickyHook struct{}
func (p *panickyHook) Levels() []Level {
return []Level{InfoLevel}
}
func (p *panickyHook) Fire(entry *Entry) error {
if entry.Message == badMessage {
panic(panicMessage)
}
return nil
}
func TestEntryHooksPanic(t *testing.T) {
logger := New()
logger.Out = &bytes.Buffer{}
logger.Level = InfoLevel
logger.Hooks.Add(&panickyHook{})
defer func() {
p := recover()
assert.NotNil(t, p)
assert.Equal(t, panicMessage, p)
entry := NewEntry(logger)
entry.Info("another message")
}()
entry := NewEntry(logger)
entry.Info(badMessage)
}

View File

@ -2,6 +2,7 @@ package logrus
import ( import (
"io" "io"
"time"
) )
var ( var (
@ -15,9 +16,7 @@ func StandardLogger() *Logger {
// SetOutput sets the standard logger output. // SetOutput sets the standard logger output.
func SetOutput(out io.Writer) { func SetOutput(out io.Writer) {
std.mu.Lock() std.SetOutput(out)
defer std.mu.Unlock()
std.Out = out
} }
// SetFormatter sets the standard logger formatter. // SetFormatter sets the standard logger formatter.
@ -72,6 +71,15 @@ func WithFields(fields Fields) *Entry {
return std.WithFields(fields) return std.WithFields(fields)
} }
// WithTime creats an entry from the standard logger and overrides the time of
// logs generated with it.
//
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
// or Panic on the Entry it returns.
func WithTime(t time.Time) *Entry {
return std.WithTime(t)
}
// Debug logs a message at level Debug on the standard logger. // Debug logs a message at level Debug on the standard logger.
func Debug(args ...interface{}) { func Debug(args ...interface{}) {
std.Debug(args...) std.Debug(args...)
@ -107,7 +115,7 @@ func Panic(args ...interface{}) {
std.Panic(args...) std.Panic(args...)
} }
// Fatal logs a message at level Fatal on the standard logger. // Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
func Fatal(args ...interface{}) { func Fatal(args ...interface{}) {
std.Fatal(args...) std.Fatal(args...)
} }
@ -147,7 +155,7 @@ func Panicf(format string, args ...interface{}) {
std.Panicf(format, args...) std.Panicf(format, args...)
} }
// Fatalf logs a message at level Fatal on the standard logger. // Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
func Fatalf(format string, args ...interface{}) { func Fatalf(format string, args ...interface{}) {
std.Fatalf(format, args...) std.Fatalf(format, args...)
} }
@ -187,7 +195,7 @@ func Panicln(args ...interface{}) {
std.Panicln(args...) std.Panicln(args...)
} }
// Fatalln logs a message at level Fatal on the standard logger. // Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
func Fatalln(args ...interface{}) { func Fatalln(args ...interface{}) {
std.Fatalln(args...) std.Fatalln(args...)
} }

View File

@ -30,16 +30,22 @@ type Formatter interface {
// //
// It's not exported because it's still using Data in an opinionated way. It's to // It's not exported because it's still using Data in an opinionated way. It's to
// avoid code duplication between the two default formatters. // avoid code duplication between the two default formatters.
func prefixFieldClashes(data Fields) { func prefixFieldClashes(data Fields, fieldMap FieldMap) {
if t, ok := data["time"]; ok { timeKey := fieldMap.resolve(FieldKeyTime)
data["fields.time"] = t if t, ok := data[timeKey]; ok {
data["fields."+timeKey] = t
delete(data, timeKey)
} }
if m, ok := data["msg"]; ok { msgKey := fieldMap.resolve(FieldKeyMsg)
data["fields.msg"] = m if m, ok := data[msgKey]; ok {
data["fields."+msgKey] = m
delete(data, msgKey)
} }
if l, ok := data["level"]; ok { levelKey := fieldMap.resolve(FieldKeyLevel)
data["fields.level"] = l if l, ok := data[levelKey]; ok {
data["fields."+levelKey] = l
delete(data, levelKey)
} }
} }

View File

@ -1,10 +1,13 @@
package logrus package logrus
import ( import (
"bytes"
"encoding/json"
"sync" "sync"
"testing" "testing"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
type TestHook struct { type TestHook struct {
@ -85,6 +88,46 @@ func TestCanFireMultipleHooks(t *testing.T) {
}) })
} }
type SingleLevelModifyHook struct {
ModifyHook
}
func (h *SingleLevelModifyHook) Levels() []Level {
return []Level{InfoLevel}
}
func TestHookEntryIsPristine(t *testing.T) {
l := New()
b := &bytes.Buffer{}
l.Formatter = &JSONFormatter{}
l.Out = b
l.AddHook(&SingleLevelModifyHook{})
l.Error("error message")
data := map[string]string{}
err := json.Unmarshal(b.Bytes(), &data)
require.NoError(t, err)
_, ok := data["wow"]
require.False(t, ok)
b.Reset()
l.Info("error message")
data = map[string]string{}
err = json.Unmarshal(b.Bytes(), &data)
require.NoError(t, err)
_, ok = data["wow"]
require.True(t, ok)
b.Reset()
l.Error("error message")
data = map[string]string{}
err = json.Unmarshal(b.Bytes(), &data)
require.NoError(t, err)
_, ok = data["wow"]
require.False(t, ok)
b.Reset()
}
type ErrorHook struct { type ErrorHook struct {
Fired bool Fired bool
} }

View File

@ -15,7 +15,7 @@ type Hook struct {
// Entries is an array of all entries that have been received by this hook. // Entries is an array of all entries that have been received by this hook.
// For safe access, use the AllEntries() method, rather than reading this // For safe access, use the AllEntries() method, rather than reading this
// value directly. // value directly.
Entries []*logrus.Entry Entries []logrus.Entry
mu sync.RWMutex mu sync.RWMutex
} }
@ -52,7 +52,7 @@ func NewNullLogger() (*logrus.Logger, *Hook) {
func (t *Hook) Fire(e *logrus.Entry) error { func (t *Hook) Fire(e *logrus.Entry) error {
t.mu.Lock() t.mu.Lock()
defer t.mu.Unlock() defer t.mu.Unlock()
t.Entries = append(t.Entries, e) t.Entries = append(t.Entries, *e)
return nil return nil
} }
@ -68,9 +68,7 @@ func (t *Hook) LastEntry() *logrus.Entry {
if i < 0 { if i < 0 {
return nil return nil
} }
// Make a copy, for safety return &t.Entries[i]
e := *t.Entries[i]
return &e
} }
// AllEntries returns all entries that were logged. // AllEntries returns all entries that were logged.
@ -79,10 +77,9 @@ func (t *Hook) AllEntries() []*logrus.Entry {
defer t.mu.RUnlock() defer t.mu.RUnlock()
// Make a copy so the returned value won't race with future log requests // Make a copy so the returned value won't race with future log requests
entries := make([]*logrus.Entry, len(t.Entries)) entries := make([]*logrus.Entry, len(t.Entries))
for i, entry := range t.Entries { for i := 0; i < len(t.Entries); i++ {
// Make a copy, for safety // Make a copy, for safety
e := *entry entries[i] = &t.Entries[i]
entries[i] = &e
} }
return entries return entries
} }
@ -91,5 +88,5 @@ func (t *Hook) AllEntries() []*logrus.Entry {
func (t *Hook) Reset() { func (t *Hook) Reset() {
t.mu.Lock() t.mu.Lock()
defer t.mu.Unlock() defer t.mu.Unlock()
t.Entries = make([]*logrus.Entry, 0) t.Entries = make([]logrus.Entry, 0)
} }

View File

@ -1,14 +1,16 @@
package test package test
import ( import (
"math/rand"
"sync"
"testing" "testing"
"time"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
func TestAllHooks(t *testing.T) { func TestAllHooks(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
logger, hook := NewNullLogger() logger, hook := NewNullLogger()
@ -35,5 +37,37 @@ func TestAllHooks(t *testing.T) {
assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level) assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
assert.Equal("Hello error", hook.LastEntry().Message) assert.Equal("Hello error", hook.LastEntry().Message)
assert.Equal(1, len(hook.Entries)) assert.Equal(1, len(hook.Entries))
}
func TestLoggingWithHooksRace(t *testing.T) {
rand.Seed(time.Now().Unix())
unlocker := rand.Int() % 100
assert := assert.New(t)
logger, hook := NewNullLogger()
var wgOne, wgAll sync.WaitGroup
wgOne.Add(1)
wgAll.Add(100)
for i := 0; i < 100; i++ {
go func(i int) {
logger.Info("info")
wgAll.Done()
if i == unlocker {
wgOne.Done()
}
}(i)
}
wgOne.Wait()
assert.Equal(logrus.InfoLevel, hook.LastEntry().Level)
assert.Equal("info", hook.LastEntry().Message)
wgAll.Wait()
entries := hook.AllEntries()
assert.Equal(100, len(entries))
} }

View File

@ -1,6 +1,7 @@
package logrus package logrus
import ( import (
"bytes"
"encoding/json" "encoding/json"
"fmt" "fmt"
) )
@ -33,6 +34,9 @@ type JSONFormatter struct {
// DisableTimestamp allows disabling automatic timestamps in output // DisableTimestamp allows disabling automatic timestamps in output
DisableTimestamp bool DisableTimestamp bool
// DataKey allows users to put all the log entry parameters into a nested dictionary at a given key.
DataKey string
// FieldMap allows users to customize the names of keys for default fields. // FieldMap allows users to customize the names of keys for default fields.
// As an example: // As an example:
// formatter := &JSONFormatter{ // formatter := &JSONFormatter{
@ -58,7 +62,14 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
data[k] = v data[k] = v
} }
} }
prefixFieldClashes(data)
if f.DataKey != "" {
newData := make(Fields, 4)
newData[f.DataKey] = data
data = newData
}
prefixFieldClashes(data, f.FieldMap)
timestampFormat := f.TimestampFormat timestampFormat := f.TimestampFormat
if timestampFormat == "" { if timestampFormat == "" {
@ -71,9 +82,15 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
serialized, err := json.Marshal(data) var b *bytes.Buffer
if entry.Buffer != nil {
b = entry.Buffer
} else {
b = &bytes.Buffer{}
}
err := json.NewEncoder(b).Encode(data)
if err != nil { if err != nil {
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
} }
return append(serialized, '\n'), nil return b.Bytes(), nil
} }

View File

@ -3,6 +3,7 @@ package logrus
import ( import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt"
"strings" "strings"
"testing" "testing"
) )
@ -106,6 +107,102 @@ func TestFieldClashWithLevel(t *testing.T) {
} }
} }
func TestFieldClashWithRemappedFields(t *testing.T) {
formatter := &JSONFormatter{
FieldMap: FieldMap{
FieldKeyTime: "@timestamp",
FieldKeyLevel: "@level",
FieldKeyMsg: "@message",
},
}
b, err := formatter.Format(WithFields(Fields{
"@timestamp": "@timestamp",
"@level": "@level",
"@message": "@message",
"timestamp": "timestamp",
"level": "level",
"msg": "msg",
}))
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
entry := make(map[string]interface{})
err = json.Unmarshal(b, &entry)
if err != nil {
t.Fatal("Unable to unmarshal formatted entry: ", err)
}
for _, field := range []string{"timestamp", "level", "msg"} {
if entry[field] != field {
t.Errorf("Expected field %v to be untouched; got %v", field, entry[field])
}
remappedKey := fmt.Sprintf("fields.%s", field)
if remapped, ok := entry[remappedKey]; ok {
t.Errorf("Expected %s to be empty; got %v", remappedKey, remapped)
}
}
for _, field := range []string{"@timestamp", "@level", "@message"} {
if entry[field] == field {
t.Errorf("Expected field %v to be mapped to an Entry value", field)
}
remappedKey := fmt.Sprintf("fields.%s", field)
if remapped, ok := entry[remappedKey]; ok {
if remapped != field {
t.Errorf("Expected field %v to be copied to %s; got %v", field, remappedKey, remapped)
}
} else {
t.Errorf("Expected field %v to be copied to %s; was absent", field, remappedKey)
}
}
}
func TestFieldsInNestedDictionary(t *testing.T) {
formatter := &JSONFormatter{
DataKey: "args",
}
logEntry := WithFields(Fields{
"level": "level",
"test": "test",
})
logEntry.Level = InfoLevel
b, err := formatter.Format(logEntry)
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
entry := make(map[string]interface{})
err = json.Unmarshal(b, &entry)
if err != nil {
t.Fatal("Unable to unmarshal formatted entry: ", err)
}
args := entry["args"].(map[string]interface{})
for _, field := range []string{"test", "level"} {
if value, present := args[field]; !present || value != field {
t.Errorf("Expected field %v to be present under 'args'; untouched", field)
}
}
for _, field := range []string{"test", "fields.level"} {
if _, present := entry[field]; present {
t.Errorf("Expected field %v not to be present at top level", field)
}
}
// with nested object, "level" shouldn't clash
if entry["level"] != "info" {
t.Errorf("Expected 'level' field to contain 'info'")
}
}
func TestJSONEntryEndsWithNewline(t *testing.T) { func TestJSONEntryEndsWithNewline(t *testing.T) {
formatter := &JSONFormatter{} formatter := &JSONFormatter{}

View File

@ -5,6 +5,7 @@ import (
"os" "os"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time"
) )
type Logger struct { type Logger struct {
@ -84,11 +85,12 @@ func (logger *Logger) newEntry() *Entry {
} }
func (logger *Logger) releaseEntry(entry *Entry) { func (logger *Logger) releaseEntry(entry *Entry) {
entry.Data = map[string]interface{}{}
logger.entryPool.Put(entry) logger.entryPool.Put(entry)
} }
// Adds a field to the log entry, note that it doesn't log until you call // Adds a field to the log entry, note that it doesn't log until you call
// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry. // Debug, Print, Info, Warn, Error, Fatal or Panic. It only creates a log entry.
// If you want multiple fields, use `WithFields`. // If you want multiple fields, use `WithFields`.
func (logger *Logger) WithField(key string, value interface{}) *Entry { func (logger *Logger) WithField(key string, value interface{}) *Entry {
entry := logger.newEntry() entry := logger.newEntry()
@ -112,6 +114,13 @@ func (logger *Logger) WithError(err error) *Entry {
return entry.WithError(err) return entry.WithError(err)
} }
// Overrides the time of the log entry.
func (logger *Logger) WithTime(t time.Time) *Entry {
entry := logger.newEntry()
defer logger.releaseEntry(entry)
return entry.WithTime(t)
}
func (logger *Logger) Debugf(format string, args ...interface{}) { func (logger *Logger) Debugf(format string, args ...interface{}) {
if logger.level() >= DebugLevel { if logger.level() >= DebugLevel {
entry := logger.newEntry() entry := logger.newEntry()
@ -316,6 +325,12 @@ func (logger *Logger) SetLevel(level Level) {
atomic.StoreUint32((*uint32)(&logger.Level), uint32(level)) atomic.StoreUint32((*uint32)(&logger.Level), uint32(level))
} }
func (logger *Logger) SetOutput(out io.Writer) {
logger.mu.Lock()
defer logger.mu.Unlock()
logger.Out = out
}
func (logger *Logger) AddHook(hook Hook) { func (logger *Logger) AddHook(hook Hook) {
logger.mu.Lock() logger.mu.Lock()
defer logger.mu.Unlock() defer logger.mu.Unlock()

View File

@ -1,6 +1,7 @@
package logrus package logrus
import ( import (
"io/ioutil"
"os" "os"
"testing" "testing"
) )
@ -59,3 +60,26 @@ func doLoggerBenchmarkNoLock(b *testing.B, out *os.File, formatter Formatter, fi
} }
}) })
} }
func BenchmarkLoggerJSONFormatter(b *testing.B) {
doLoggerBenchmarkWithFormatter(b, &JSONFormatter{})
}
func BenchmarkLoggerTextFormatter(b *testing.B) {
doLoggerBenchmarkWithFormatter(b, &TextFormatter{})
}
func doLoggerBenchmarkWithFormatter(b *testing.B, f Formatter) {
b.SetParallelism(100)
log := New()
log.Formatter = f
log.Out = ioutil.Discard
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
log.
WithField("foo1", "bar1").
WithField("foo2", "bar2").
Info("this is a dummy log")
}
})
}

View File

@ -7,6 +7,7 @@ import (
"strings" "strings"
"sync" "sync"
"testing" "testing"
"time"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
@ -209,6 +210,65 @@ func TestDefaultFieldsAreNotPrefixed(t *testing.T) {
}) })
} }
func TestWithTimeShouldOverrideTime(t *testing.T) {
now := time.Now().Add(24 * time.Hour)
LogAndAssertJSON(t, func(log *Logger) {
log.WithTime(now).Info("foobar")
}, func(fields Fields) {
assert.Equal(t, fields["time"], now.Format(defaultTimestampFormat))
})
}
func TestWithTimeShouldNotOverrideFields(t *testing.T) {
now := time.Now().Add(24 * time.Hour)
LogAndAssertJSON(t, func(log *Logger) {
log.WithField("herp", "derp").WithTime(now).Info("blah")
}, func(fields Fields) {
assert.Equal(t, fields["time"], now.Format(defaultTimestampFormat))
assert.Equal(t, fields["herp"], "derp")
})
}
func TestWithFieldShouldNotOverrideTime(t *testing.T) {
now := time.Now().Add(24 * time.Hour)
LogAndAssertJSON(t, func(log *Logger) {
log.WithTime(now).WithField("herp", "derp").Info("blah")
}, func(fields Fields) {
assert.Equal(t, fields["time"], now.Format(defaultTimestampFormat))
assert.Equal(t, fields["herp"], "derp")
})
}
func TestTimeOverrideMultipleLogs(t *testing.T) {
var buffer bytes.Buffer
var firstFields, secondFields Fields
logger := New()
logger.Out = &buffer
formatter := new(JSONFormatter)
formatter.TimestampFormat = time.StampMilli
logger.Formatter = formatter
llog := logger.WithField("herp", "derp")
llog.Info("foo")
err := json.Unmarshal(buffer.Bytes(), &firstFields)
assert.NoError(t, err, "should have decoded first message")
buffer.Reset()
time.Sleep(10 * time.Millisecond)
llog.Info("bar")
err = json.Unmarshal(buffer.Bytes(), &secondFields)
assert.NoError(t, err, "should have decoded second message")
assert.NotEqual(t, firstFields["time"], secondFields["time"], "timestamps should not be equal")
}
func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) { func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) {
var buffer bytes.Buffer var buffer bytes.Buffer
@ -343,6 +403,24 @@ func TestLoggingRace(t *testing.T) {
wg.Wait() wg.Wait()
} }
func TestLoggingRaceWithHooksOnEntry(t *testing.T) {
logger := New()
hook := new(ModifyHook)
logger.AddHook(hook)
entry := logger.WithField("context", "clue")
var wg sync.WaitGroup
wg.Add(100)
for i := 0; i < 100; i++ {
go func() {
entry.Info("info")
wg.Done()
}()
}
wg.Wait()
}
// Compile test // Compile test
func TestLogrusInterface(t *testing.T) { func TestLogrusInterface(t *testing.T) {
var buffer bytes.Buffer var buffer bytes.Buffer

View File

@ -1,5 +1,5 @@
// +build darwin freebsd openbsd netbsd dragonfly // +build darwin freebsd openbsd netbsd dragonfly
// +build !appengine // +build !appengine,!gopherjs
package logrus package logrus

View File

@ -1,4 +1,4 @@
// +build appengine // +build appengine gopherjs
package logrus package logrus

View File

@ -1,4 +1,4 @@
// +build !appengine // +build !appengine,!gopherjs
package logrus package logrus

View File

@ -3,7 +3,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build !appengine // +build !appengine,!gopherjs
package logrus package logrus

View File

@ -20,6 +20,7 @@ const (
var ( var (
baseTimestamp time.Time baseTimestamp time.Time
emptyFieldMap FieldMap
) )
func init() { func init() {
@ -50,12 +51,24 @@ type TextFormatter struct {
// be desired. // be desired.
DisableSorting bool DisableSorting bool
// Disables the truncation of the level text to 4 characters.
DisableLevelTruncation bool
// QuoteEmptyFields will wrap empty fields in quotes if true // QuoteEmptyFields will wrap empty fields in quotes if true
QuoteEmptyFields bool QuoteEmptyFields bool
// Whether the logger's out is to a terminal // Whether the logger's out is to a terminal
isTerminal bool isTerminal bool
// FieldMap allows users to customize the names of keys for default fields.
// As an example:
// formatter := &TextFormatter{
// FieldMap: FieldMap{
// FieldKeyTime: "@timestamp",
// FieldKeyLevel: "@level",
// FieldKeyMsg: "@message"}}
FieldMap FieldMap
sync.Once sync.Once
} }
@ -67,7 +80,8 @@ func (f *TextFormatter) init(entry *Entry) {
// Format renders a single log entry // Format renders a single log entry
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
var b *bytes.Buffer prefixFieldClashes(entry.Data, f.FieldMap)
keys := make([]string, 0, len(entry.Data)) keys := make([]string, 0, len(entry.Data))
for k := range entry.Data { for k := range entry.Data {
keys = append(keys, k) keys = append(keys, k)
@ -76,14 +90,14 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
if !f.DisableSorting { if !f.DisableSorting {
sort.Strings(keys) sort.Strings(keys)
} }
var b *bytes.Buffer
if entry.Buffer != nil { if entry.Buffer != nil {
b = entry.Buffer b = entry.Buffer
} else { } else {
b = &bytes.Buffer{} b = &bytes.Buffer{}
} }
prefixFieldClashes(entry.Data)
f.Do(func() { f.init(entry) }) f.Do(func() { f.init(entry) })
isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors
@ -96,11 +110,11 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
f.printColored(b, entry, keys, timestampFormat) f.printColored(b, entry, keys, timestampFormat)
} else { } else {
if !f.DisableTimestamp { if !f.DisableTimestamp {
f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat)) f.appendKeyValue(b, f.FieldMap.resolve(FieldKeyTime), entry.Time.Format(timestampFormat))
} }
f.appendKeyValue(b, "level", entry.Level.String()) f.appendKeyValue(b, f.FieldMap.resolve(FieldKeyLevel), entry.Level.String())
if entry.Message != "" { if entry.Message != "" {
f.appendKeyValue(b, "msg", entry.Message) f.appendKeyValue(b, f.FieldMap.resolve(FieldKeyMsg), entry.Message)
} }
for _, key := range keys { for _, key := range keys {
f.appendKeyValue(b, key, entry.Data[key]) f.appendKeyValue(b, key, entry.Data[key])
@ -124,7 +138,10 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin
levelColor = blue levelColor = blue
} }
levelText := strings.ToUpper(entry.Level.String())[0:4] levelText := strings.ToUpper(entry.Level.String())
if !f.DisableLevelTruncation {
levelText = levelText[0:4]
}
// Remove a single newline if it already exists in the message to keep // Remove a single newline if it already exists in the message to keep
// the behavior of logrus text_formatter the same as the stdlib log package // the behavior of logrus text_formatter the same as the stdlib log package

View File

@ -7,6 +7,8 @@ import (
"strings" "strings"
"testing" "testing"
"time" "time"
"github.com/stretchr/testify/assert"
) )
func TestFormatting(t *testing.T) { func TestFormatting(t *testing.T) {
@ -128,6 +130,44 @@ func TestTimestampFormat(t *testing.T) {
checkTimeStr("") checkTimeStr("")
} }
func TestDisableLevelTruncation(t *testing.T) {
entry := &Entry{
Time: time.Now(),
Message: "testing",
}
keys := []string{}
timestampFormat := "Mon Jan 2 15:04:05 -0700 MST 2006"
checkDisableTruncation := func(disabled bool, level Level) {
tf := &TextFormatter{DisableLevelTruncation: disabled}
var b bytes.Buffer
entry.Level = level
tf.printColored(&b, entry, keys, timestampFormat)
logLine := (&b).String()
if disabled {
expected := strings.ToUpper(level.String())
if !strings.Contains(logLine, expected) {
t.Errorf("level string expected to be %s when truncation disabled", expected)
}
} else {
expected := strings.ToUpper(level.String())
if len(level.String()) > 4 {
if strings.Contains(logLine, expected) {
t.Errorf("level string %s expected to be truncated to %s when truncation is enabled", expected, expected[0:4])
}
} else {
if !strings.Contains(logLine, expected) {
t.Errorf("level string expected to be %s when truncation is enabled and level string is below truncation threshold", expected)
}
}
}
}
checkDisableTruncation(true, DebugLevel)
checkDisableTruncation(true, InfoLevel)
checkDisableTruncation(false, ErrorLevel)
checkDisableTruncation(false, InfoLevel)
}
func TestDisableTimestampWithColoredOutput(t *testing.T) { func TestDisableTimestampWithColoredOutput(t *testing.T) {
tf := &TextFormatter{DisableTimestamp: true, ForceColors: true} tf := &TextFormatter{DisableTimestamp: true, ForceColors: true}
@ -158,6 +198,44 @@ func TestNewlineBehavior(t *testing.T) {
if !bytes.Contains(b, []byte("test message\n")) { if !bytes.Contains(b, []byte("test message\n")) {
t.Error("Double newline at end of Entry.Message did not result in a single newline after formatting") t.Error("Double newline at end of Entry.Message did not result in a single newline after formatting")
} }
func TestTextFormatterFieldMap(t *testing.T) {
formatter := &TextFormatter{
DisableColors: true,
FieldMap: FieldMap{
FieldKeyMsg: "message",
FieldKeyLevel: "somelevel",
FieldKeyTime: "timeywimey",
},
}
entry := &Entry{
Message: "oh hi",
Level: WarnLevel,
Time: time.Date(1981, time.February, 24, 4, 28, 3, 100, time.UTC),
Data: Fields{
"field1": "f1",
"message": "messagefield",
"somelevel": "levelfield",
"timeywimey": "timeywimeyfield",
},
}
b, err := formatter.Format(entry)
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
assert.Equal(t,
`timeywimey="1981-02-24T04:28:03Z" `+
`somelevel=warning `+
`message="oh hi" `+
`field1=f1 `+
`fields.message=messagefield `+
`fields.somelevel=levelfield `+
`fields.timeywimey=timeywimeyfield`+"\n",
string(b),
"Formatted output doesn't respect FieldMap")
} }
// TODO add tests for sorting etc., this requires a parser for the text // TODO add tests for sorting etc., this requires a parser for the text