Remove the text, model, and extraction packages

They have all been moved to common/expfmt. All uses of it have been
migrated.
This commit is contained in:
beorn7 2015-09-17 14:22:05 +02:00
parent 90ddfa1c1e
commit cf0a294118
68 changed files with 0 additions and 7145 deletions

View File

@ -1,74 +0,0 @@
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package extraction
import (
"errors"
"fmt"
"mime"
"net/http"
)
// ProcessorForRequestHeader interprets a HTTP request header to determine
// what Processor should be used for the given input. If no acceptable
// Processor can be found, an error is returned.
func ProcessorForRequestHeader(header http.Header) (Processor, error) {
if header == nil {
return nil, errors.New("received illegal and nil header")
}
mediatype, params, err := mime.ParseMediaType(header.Get("Content-Type"))
if err != nil {
return nil, fmt.Errorf("invalid Content-Type header %q: %s", header.Get("Content-Type"), err)
}
switch mediatype {
case "application/vnd.google.protobuf":
if params["proto"] != "io.prometheus.client.MetricFamily" {
return nil, fmt.Errorf("unrecognized protocol message %s", params["proto"])
}
if params["encoding"] != "delimited" {
return nil, fmt.Errorf("unsupported encoding %s", params["encoding"])
}
return MetricFamilyProcessor, nil
case "text/plain":
switch params["version"] {
case "0.0.4":
return Processor004, nil
case "":
// Fallback: most recent version.
return Processor004, nil
default:
return nil, fmt.Errorf("unrecognized API version %s", params["version"])
}
case "application/json":
var prometheusAPIVersion string
if params["schema"] == "prometheus/telemetry" && params["version"] != "" {
prometheusAPIVersion = params["version"]
} else {
prometheusAPIVersion = header.Get("X-Prometheus-API-Version")
}
switch prometheusAPIVersion {
case "0.0.2":
return Processor002, nil
case "0.0.1":
return Processor001, nil
default:
return nil, fmt.Errorf("unrecognized API version %s", prometheusAPIVersion)
}
default:
return nil, fmt.Errorf("unsupported media type %q, expected %q", mediatype, "application/json")
}
}

View File

@ -1,126 +0,0 @@
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package extraction
import (
"errors"
"net/http"
"testing"
)
func testDiscriminatorHTTPHeader(t testing.TB) {
var scenarios = []struct {
input map[string]string
output Processor
err error
}{
{
output: nil,
err: errors.New("received illegal and nil header"),
},
{
input: map[string]string{"Content-Type": "application/json", "X-Prometheus-API-Version": "0.0.0"},
output: nil,
err: errors.New("unrecognized API version 0.0.0"),
},
{
input: map[string]string{"Content-Type": "application/json", "X-Prometheus-API-Version": "0.0.1"},
output: Processor001,
err: nil,
},
{
input: map[string]string{"Content-Type": `application/json; schema="prometheus/telemetry"; version=0.0.0`},
output: nil,
err: errors.New("unrecognized API version 0.0.0"),
},
{
input: map[string]string{"Content-Type": `application/json; schema="prometheus/telemetry"; version=0.0.1`},
output: Processor001,
err: nil,
},
{
input: map[string]string{"Content-Type": `application/json; schema="prometheus/telemetry"; version=0.0.2`},
output: Processor002,
err: nil,
},
{
input: map[string]string{"Content-Type": `application/vnd.google.protobuf; proto="io.prometheus.client.MetricFamily"; encoding="delimited"`},
output: MetricFamilyProcessor,
err: nil,
},
{
input: map[string]string{"Content-Type": `application/vnd.google.protobuf; proto="illegal"; encoding="delimited"`},
output: nil,
err: errors.New("unrecognized protocol message illegal"),
},
{
input: map[string]string{"Content-Type": `application/vnd.google.protobuf; proto="io.prometheus.client.MetricFamily"; encoding="illegal"`},
output: nil,
err: errors.New("unsupported encoding illegal"),
},
{
input: map[string]string{"Content-Type": `text/plain; version=0.0.4`},
output: Processor004,
err: nil,
},
{
input: map[string]string{"Content-Type": `text/plain`},
output: Processor004,
err: nil,
},
{
input: map[string]string{"Content-Type": `text/plain; version=0.0.3`},
output: nil,
err: errors.New("unrecognized API version 0.0.3"),
},
}
for i, scenario := range scenarios {
var header http.Header
if len(scenario.input) > 0 {
header = http.Header{}
}
for key, value := range scenario.input {
header.Add(key, value)
}
actual, err := ProcessorForRequestHeader(header)
if scenario.err != err {
if scenario.err != nil && err != nil {
if scenario.err.Error() != err.Error() {
t.Errorf("%d. expected %s, got %s", i, scenario.err, err)
}
} else if scenario.err != nil || err != nil {
t.Errorf("%d. expected %s, got %s", i, scenario.err, err)
}
}
if scenario.output != actual {
t.Errorf("%d. expected %s, got %s", i, scenario.output, actual)
}
}
}
func TestDiscriminatorHTTPHeader(t *testing.T) {
testDiscriminatorHTTPHeader(t)
}
func BenchmarkDiscriminatorHTTPHeader(b *testing.B) {
for i := 0; i < b.N; i++ {
testDiscriminatorHTTPHeader(b)
}
}

View File

@ -1,15 +0,0 @@
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package extraction decodes Prometheus clients' data streams for consumers.
package extraction

File diff suppressed because it is too large Load Diff

View File

@ -1,79 +0,0 @@
[
{
"baseLabels": {
"__name__": "rpc_calls_total",
"job": "batch_job"
},
"docstring": "RPC calls.",
"metric": {
"type": "counter",
"value": [
{
"labels": {
"service": "zed"
},
"value": 25
},
{
"labels": {
"service": "bar"
},
"value": 25
},
{
"labels": {
"service": "foo"
},
"value": 25
}
]
}
},
{
"baseLabels": {
"__name__": "rpc_latency_microseconds"
},
"docstring": "RPC latency.",
"metric": {
"type": "histogram",
"value": [
{
"labels": {
"service": "foo"
},
"value": {
"0.010000": 15.890724674774395,
"0.050000": 15.890724674774395,
"0.500000": 84.63044031436561,
"0.900000": 160.21100853053224,
"0.990000": 172.49828748957728
}
},
{
"labels": {
"service": "zed"
},
"value": {
"0.010000": 0.0459814091918713,
"0.050000": 0.0459814091918713,
"0.500000": 0.6120456642749681,
"0.900000": 1.355915069887731,
"0.990000": 1.772733213161236
}
},
{
"labels": {
"service": "bar"
},
"value": {
"0.010000": 78.48563317257356,
"0.050000": 78.48563317257356,
"0.500000": 97.31798360385088,
"0.900000": 109.89202084295582,
"0.990000": 109.99626121011262
}
}
]
}
}
]

View File

@ -1,318 +0,0 @@
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package extraction
import (
"fmt"
"io"
"math"
dto "github.com/prometheus/client_model/go"
"github.com/matttproud/golang_protobuf_extensions/pbutil"
"github.com/prometheus/client_golang/model"
)
type metricFamilyProcessor struct{}
// MetricFamilyProcessor decodes varint encoded record length-delimited streams
// of io.prometheus.client.MetricFamily.
//
// See http://godoc.org/github.com/matttproud/golang_protobuf_extensions/ext for
// more details.
var MetricFamilyProcessor = &metricFamilyProcessor{}
func (m *metricFamilyProcessor) ProcessSingle(i io.Reader, out Ingester, o *ProcessOptions) error {
family := &dto.MetricFamily{}
for {
family.Reset()
if _, err := pbutil.ReadDelimited(i, family); err != nil {
if err == io.EOF {
return nil
}
return err
}
if err := extractMetricFamily(out, o, family); err != nil {
return err
}
}
}
func extractMetricFamily(out Ingester, o *ProcessOptions, family *dto.MetricFamily) error {
switch family.GetType() {
case dto.MetricType_COUNTER:
if err := extractCounter(out, o, family); err != nil {
return err
}
case dto.MetricType_GAUGE:
if err := extractGauge(out, o, family); err != nil {
return err
}
case dto.MetricType_SUMMARY:
if err := extractSummary(out, o, family); err != nil {
return err
}
case dto.MetricType_UNTYPED:
if err := extractUntyped(out, o, family); err != nil {
return err
}
case dto.MetricType_HISTOGRAM:
if err := extractHistogram(out, o, family); err != nil {
return err
}
}
return nil
}
func extractCounter(out Ingester, o *ProcessOptions, f *dto.MetricFamily) error {
samples := make(model.Samples, 0, len(f.Metric))
for _, m := range f.Metric {
if m.Counter == nil {
continue
}
sample := &model.Sample{
Metric: model.Metric{},
Value: model.SampleValue(m.Counter.GetValue()),
}
samples = append(samples, sample)
if m.TimestampMs != nil {
sample.Timestamp = model.TimestampFromUnixNano(*m.TimestampMs * 1000000)
} else {
sample.Timestamp = o.Timestamp
}
metric := sample.Metric
for _, p := range m.Label {
metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
}
metric[model.MetricNameLabel] = model.LabelValue(f.GetName())
}
return out.Ingest(samples)
}
func extractGauge(out Ingester, o *ProcessOptions, f *dto.MetricFamily) error {
samples := make(model.Samples, 0, len(f.Metric))
for _, m := range f.Metric {
if m.Gauge == nil {
continue
}
sample := &model.Sample{
Metric: model.Metric{},
Value: model.SampleValue(m.Gauge.GetValue()),
}
samples = append(samples, sample)
if m.TimestampMs != nil {
sample.Timestamp = model.TimestampFromUnixNano(*m.TimestampMs * 1000000)
} else {
sample.Timestamp = o.Timestamp
}
metric := sample.Metric
for _, p := range m.Label {
metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
}
metric[model.MetricNameLabel] = model.LabelValue(f.GetName())
}
return out.Ingest(samples)
}
func extractSummary(out Ingester, o *ProcessOptions, f *dto.MetricFamily) error {
samples := make(model.Samples, 0, len(f.Metric))
for _, m := range f.Metric {
if m.Summary == nil {
continue
}
timestamp := o.Timestamp
if m.TimestampMs != nil {
timestamp = model.TimestampFromUnixNano(*m.TimestampMs * 1000000)
}
for _, q := range m.Summary.Quantile {
sample := &model.Sample{
Metric: model.Metric{},
Value: model.SampleValue(q.GetValue()),
Timestamp: timestamp,
}
samples = append(samples, sample)
metric := sample.Metric
for _, p := range m.Label {
metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
}
// BUG(matt): Update other names to "quantile".
metric[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile()))
metric[model.MetricNameLabel] = model.LabelValue(f.GetName())
}
if m.Summary.SampleSum != nil {
sum := &model.Sample{
Metric: model.Metric{},
Value: model.SampleValue(m.Summary.GetSampleSum()),
Timestamp: timestamp,
}
samples = append(samples, sum)
metric := sum.Metric
for _, p := range m.Label {
metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
}
metric[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
}
if m.Summary.SampleCount != nil {
count := &model.Sample{
Metric: model.Metric{},
Value: model.SampleValue(m.Summary.GetSampleCount()),
Timestamp: timestamp,
}
samples = append(samples, count)
metric := count.Metric
for _, p := range m.Label {
metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
}
metric[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
}
}
return out.Ingest(samples)
}
func extractUntyped(out Ingester, o *ProcessOptions, f *dto.MetricFamily) error {
samples := make(model.Samples, 0, len(f.Metric))
for _, m := range f.Metric {
if m.Untyped == nil {
continue
}
sample := &model.Sample{
Metric: model.Metric{},
Value: model.SampleValue(m.Untyped.GetValue()),
}
samples = append(samples, sample)
if m.TimestampMs != nil {
sample.Timestamp = model.TimestampFromUnixNano(*m.TimestampMs * 1000000)
} else {
sample.Timestamp = o.Timestamp
}
metric := sample.Metric
for _, p := range m.Label {
metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
}
metric[model.MetricNameLabel] = model.LabelValue(f.GetName())
}
return out.Ingest(samples)
}
func extractHistogram(out Ingester, o *ProcessOptions, f *dto.MetricFamily) error {
samples := make(model.Samples, 0, len(f.Metric))
for _, m := range f.Metric {
if m.Histogram == nil {
continue
}
timestamp := o.Timestamp
if m.TimestampMs != nil {
timestamp = model.TimestampFromUnixNano(*m.TimestampMs * 1000000)
}
infSeen := false
for _, q := range m.Histogram.Bucket {
sample := &model.Sample{
Metric: model.Metric{},
Value: model.SampleValue(q.GetCumulativeCount()),
Timestamp: timestamp,
}
samples = append(samples, sample)
metric := sample.Metric
for _, p := range m.Label {
metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
}
metric[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound()))
metric[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
if math.IsInf(q.GetUpperBound(), +1) {
infSeen = true
}
}
if m.Histogram.SampleSum != nil {
sum := &model.Sample{
Metric: model.Metric{},
Value: model.SampleValue(m.Histogram.GetSampleSum()),
Timestamp: timestamp,
}
samples = append(samples, sum)
metric := sum.Metric
for _, p := range m.Label {
metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
}
metric[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
}
if m.Histogram.SampleCount != nil {
count := &model.Sample{
Metric: model.Metric{},
Value: model.SampleValue(m.Histogram.GetSampleCount()),
Timestamp: timestamp,
}
samples = append(samples, count)
metric := count.Metric
for _, p := range m.Label {
metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
}
metric[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
if !infSeen {
infBucket := &model.Sample{
Metric: model.Metric{},
Value: count.Value,
Timestamp: timestamp,
}
samples = append(samples, infBucket)
metric := infBucket.Metric
for _, p := range m.Label {
metric[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
}
metric[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf")
metric[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
}
}
}
return out.Ingest(samples)
}

View File

@ -1,153 +0,0 @@
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package extraction
import (
"sort"
"strings"
"testing"
"github.com/prometheus/client_golang/model"
)
var testTime = model.Now()
type metricFamilyProcessorScenario struct {
in string
expected, actual []model.Samples
}
func (s *metricFamilyProcessorScenario) Ingest(samples model.Samples) error {
s.actual = append(s.actual, samples)
return nil
}
func (s *metricFamilyProcessorScenario) test(t *testing.T, set int) {
i := strings.NewReader(s.in)
o := &ProcessOptions{
Timestamp: testTime,
}
err := MetricFamilyProcessor.ProcessSingle(i, s, o)
if err != nil {
t.Fatalf("%d. got error: %s", set, err)
}
if len(s.expected) != len(s.actual) {
t.Fatalf("%d. expected length %d, got %d", set, len(s.expected), len(s.actual))
}
for i, expected := range s.expected {
sort.Sort(s.actual[i])
sort.Sort(expected)
if !expected.Equal(s.actual[i]) {
t.Errorf("%d.%d. expected %s, got %s", set, i, expected, s.actual[i])
}
}
}
func TestMetricFamilyProcessor(t *testing.T) {
scenarios := []metricFamilyProcessorScenario{
{
in: "",
},
{
in: "\x8f\x01\n\rrequest_count\x12\x12Number of requests\x18\x00\"0\n#\n\x0fsome_label_name\x12\x10some_label_value\x1a\t\t\x00\x00\x00\x00\x00\x00E\xc0\"6\n)\n\x12another_label_name\x12\x13another_label_value\x1a\t\t\x00\x00\x00\x00\x00\x00U@",
expected: []model.Samples{
model.Samples{
&model.Sample{
Metric: model.Metric{model.MetricNameLabel: "request_count", "some_label_name": "some_label_value"},
Value: -42,
Timestamp: testTime,
},
&model.Sample{
Metric: model.Metric{model.MetricNameLabel: "request_count", "another_label_name": "another_label_value"},
Value: 84,
Timestamp: testTime,
},
},
},
},
{
in: "\xb9\x01\n\rrequest_count\x12\x12Number of requests\x18\x02\"O\n#\n\x0fsome_label_name\x12\x10some_label_value\"(\x1a\x12\t\xaeG\xe1z\x14\xae\xef?\x11\x00\x00\x00\x00\x00\x00E\xc0\x1a\x12\t+\x87\x16\xd9\xce\xf7\xef?\x11\x00\x00\x00\x00\x00\x00U\xc0\"A\n)\n\x12another_label_name\x12\x13another_label_value\"\x14\x1a\x12\t\x00\x00\x00\x00\x00\x00\xe0?\x11\x00\x00\x00\x00\x00\x00$@",
expected: []model.Samples{
model.Samples{
&model.Sample{
Metric: model.Metric{model.MetricNameLabel: "request_count", "some_label_name": "some_label_value", "quantile": "0.99"},
Value: -42,
Timestamp: testTime,
},
&model.Sample{
Metric: model.Metric{model.MetricNameLabel: "request_count", "some_label_name": "some_label_value", "quantile": "0.999"},
Value: -84,
Timestamp: testTime,
},
&model.Sample{
Metric: model.Metric{model.MetricNameLabel: "request_count", "another_label_name": "another_label_value", "quantile": "0.5"},
Value: 10,
Timestamp: testTime,
},
},
},
},
{
in: "\x8d\x01\n\x1drequest_duration_microseconds\x12\x15The response latency.\x18\x04\"S:Q\b\x85\x15\x11\xcd\xcc\xccL\x8f\xcb:A\x1a\v\b{\x11\x00\x00\x00\x00\x00\x00Y@\x1a\f\b\x9c\x03\x11\x00\x00\x00\x00\x00\x00^@\x1a\f\b\xd0\x04\x11\x00\x00\x00\x00\x00\x00b@\x1a\f\b\xf4\v\x11\x9a\x99\x99\x99\x99\x99e@\x1a\f\b\x85\x15\x11\x00\x00\x00\x00\x00\x00\xf0\u007f",
expected: []model.Samples{
model.Samples{
&model.Sample{
Metric: model.Metric{model.MetricNameLabel: "request_duration_microseconds_bucket", "le": "100"},
Value: 123,
Timestamp: testTime,
},
&model.Sample{
Metric: model.Metric{model.MetricNameLabel: "request_duration_microseconds_bucket", "le": "120"},
Value: 412,
Timestamp: testTime,
},
&model.Sample{
Metric: model.Metric{model.MetricNameLabel: "request_duration_microseconds_bucket", "le": "144"},
Value: 592,
Timestamp: testTime,
},
&model.Sample{
Metric: model.Metric{model.MetricNameLabel: "request_duration_microseconds_bucket", "le": "172.8"},
Value: 1524,
Timestamp: testTime,
},
&model.Sample{
Metric: model.Metric{model.MetricNameLabel: "request_duration_microseconds_bucket", "le": "+Inf"},
Value: 2693,
Timestamp: testTime,
},
&model.Sample{
Metric: model.Metric{model.MetricNameLabel: "request_duration_microseconds_sum"},
Value: 1756047.3,
Timestamp: testTime,
},
&model.Sample{
Metric: model.Metric{model.MetricNameLabel: "request_duration_microseconds_count"},
Value: 2693,
Timestamp: testTime,
},
},
},
},
}
for i, scenario := range scenarios {
scenario.test(t, i)
}
}

View File

@ -1,84 +0,0 @@
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package extraction
import (
"io"
"time"
"github.com/prometheus/client_golang/model"
)
// ProcessOptions dictates how the interpreted stream should be rendered for
// consumption.
type ProcessOptions struct {
// Timestamp is added to each value from the stream that has no explicit
// timestamp set.
Timestamp model.Timestamp
}
// Ingester consumes result streams in whatever way is desired by the user.
type Ingester interface {
Ingest(model.Samples) error
}
// Processor is responsible for decoding the actual message responses from
// stream into a format that can be consumed with the end result written
// to the results channel.
type Processor interface {
// ProcessSingle treats the input as a single self-contained message body and
// transforms it accordingly. It has no support for streaming.
ProcessSingle(in io.Reader, out Ingester, o *ProcessOptions) error
}
// Helper function to convert map[string]string into LabelSet.
//
// NOTE: This should be deleted when support for go 1.0.3 is removed; 1.1 is
// smart enough to unmarshal JSON objects into LabelSet directly.
func labelSet(labels map[string]string) model.LabelSet {
labelset := make(model.LabelSet, len(labels))
for k, v := range labels {
labelset[model.LabelName(k)] = model.LabelValue(v)
}
return labelset
}
// A basic interface only useful in testing contexts for dispensing the time
// in a controlled manner.
type instantProvider interface {
// The current instant.
Now() time.Time
}
// Clock is a simple means for fluently wrapping around standard Go timekeeping
// mechanisms to enhance testability without compromising code readability.
//
// It is sufficient for use on bare initialization. A provider should be
// set only for test contexts. When not provided, it emits the current
// system time.
type clock struct {
// The underlying means through which time is provided, if supplied.
Provider instantProvider
}
// Emit the current instant.
func (t *clock) Now() time.Time {
if t.Provider == nil {
return time.Now()
}
return t.Provider.Now()
}

View File

@ -1,127 +0,0 @@
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package extraction
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"github.com/prometheus/client_golang/model"
)
const (
baseLabels001 = "baseLabels"
counter001 = "counter"
docstring001 = "docstring"
gauge001 = "gauge"
histogram001 = "histogram"
labels001 = "labels"
metric001 = "metric"
type001 = "type"
value001 = "value"
percentile001 = "percentile"
)
// Processor001 is responsible for decoding payloads from protocol version
// 0.0.1.
var Processor001 = &processor001{}
// processor001 is responsible for handling API version 0.0.1.
type processor001 struct{}
// entity001 represents a the JSON structure that 0.0.1 uses.
type entity001 []struct {
BaseLabels map[string]string `json:"baseLabels"`
Docstring string `json:"docstring"`
Metric struct {
MetricType string `json:"type"`
Value []struct {
Labels map[string]string `json:"labels"`
Value interface{} `json:"value"`
} `json:"value"`
} `json:"metric"`
}
func (p *processor001) ProcessSingle(in io.Reader, out Ingester, o *ProcessOptions) error {
// TODO(matt): Replace with plain-jane JSON unmarshalling.
buffer, err := ioutil.ReadAll(in)
if err != nil {
return err
}
entities := entity001{}
if err = json.Unmarshal(buffer, &entities); err != nil {
return err
}
// TODO(matt): This outer loop is a great basis for parallelization.
pendingSamples := model.Samples{}
for _, entity := range entities {
for _, value := range entity.Metric.Value {
labels := labelSet(entity.BaseLabels).Merge(labelSet(value.Labels))
switch entity.Metric.MetricType {
case gauge001, counter001:
sampleValue, ok := value.Value.(float64)
if !ok {
return fmt.Errorf("could not convert value from %s %s to float64", entity, value)
}
pendingSamples = append(pendingSamples, &model.Sample{
Metric: model.Metric(labels),
Timestamp: o.Timestamp,
Value: model.SampleValue(sampleValue),
})
break
case histogram001:
sampleValue, ok := value.Value.(map[string]interface{})
if !ok {
return fmt.Errorf("could not convert value from %q to a map[string]interface{}", value.Value)
}
for percentile, percentileValue := range sampleValue {
individualValue, ok := percentileValue.(float64)
if !ok {
return fmt.Errorf("could not convert value from %q to a float64", percentileValue)
}
childMetric := make(map[model.LabelName]model.LabelValue, len(labels)+1)
for k, v := range labels {
childMetric[k] = v
}
childMetric[model.LabelName(percentile001)] = model.LabelValue(percentile)
pendingSamples = append(pendingSamples, &model.Sample{
Metric: model.Metric(childMetric),
Timestamp: o.Timestamp,
Value: model.SampleValue(individualValue),
})
}
break
}
}
}
if len(pendingSamples) > 0 {
return out.Ingest(pendingSamples)
}
return nil
}

View File

@ -1,185 +0,0 @@
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package extraction
import (
"errors"
"os"
"path"
"sort"
"testing"
"github.com/prometheus/client_golang/model"
)
var test001Time = model.Now()
type testProcessor001ProcessScenario struct {
in string
expected, actual []model.Samples
err error
}
func (s *testProcessor001ProcessScenario) Ingest(samples model.Samples) error {
s.actual = append(s.actual, samples)
return nil
}
func (s *testProcessor001ProcessScenario) test(t testing.TB, set int) {
reader, err := os.Open(path.Join("fixtures", s.in))
if err != nil {
t.Fatalf("%d. couldn't open scenario input file %s: %s", set, s.in, err)
}
options := &ProcessOptions{
Timestamp: test001Time,
}
err = Processor001.ProcessSingle(reader, s, options)
if s.err != err && (s.err == nil || err == nil || err.Error() != s.err.Error()) {
t.Fatalf("%d. expected err of %s, got %s", set, s.err, err)
}
if len(s.actual) != len(s.expected) {
t.Fatalf("%d. expected output length of %d, got %d", set, len(s.expected), len(s.actual))
}
for i, expected := range s.expected {
sort.Sort(s.actual[i])
sort.Sort(expected)
if !expected.Equal(s.actual[i]) {
t.Errorf("%d.%d. expected %s, got %s", set, i, expected, s.actual[i])
}
}
}
func testProcessor001Process(t testing.TB) {
var scenarios = []testProcessor001ProcessScenario{
{
in: "empty.json",
err: errors.New("unexpected end of JSON input"),
},
{
in: "test0_0_1-0_0_2.json",
expected: []model.Samples{
model.Samples{
&model.Sample{
Metric: model.Metric{"service": "zed", model.MetricNameLabel: "rpc_calls_total", "job": "batch_job"},
Value: 25,
Timestamp: test001Time,
},
&model.Sample{
Metric: model.Metric{"service": "bar", model.MetricNameLabel: "rpc_calls_total", "job": "batch_job"},
Value: 25,
Timestamp: test001Time,
},
&model.Sample{
Metric: model.Metric{"service": "foo", model.MetricNameLabel: "rpc_calls_total", "job": "batch_job"},
Value: 25,
Timestamp: test001Time,
},
&model.Sample{
Metric: model.Metric{"percentile": "0.010000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "zed"},
Value: 0.0459814091918713,
Timestamp: test001Time,
},
&model.Sample{
Metric: model.Metric{"percentile": "0.010000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "bar"},
Value: 78.48563317257356,
Timestamp: test001Time,
},
&model.Sample{
Metric: model.Metric{"percentile": "0.010000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "foo"},
Value: 15.890724674774395,
Timestamp: test001Time,
},
&model.Sample{
Metric: model.Metric{"percentile": "0.050000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "zed"},
Value: 0.0459814091918713,
Timestamp: test001Time,
},
&model.Sample{
Metric: model.Metric{"percentile": "0.050000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "bar"},
Value: 78.48563317257356,
Timestamp: test001Time,
},
&model.Sample{
Metric: model.Metric{"percentile": "0.050000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "foo"},
Value: 15.890724674774395,
Timestamp: test001Time,
},
&model.Sample{
Metric: model.Metric{"percentile": "0.500000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "zed"},
Value: 0.6120456642749681,
Timestamp: test001Time,
},
&model.Sample{
Metric: model.Metric{"percentile": "0.500000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "bar"},
Value: 97.31798360385088,
Timestamp: test001Time,
},
&model.Sample{
Metric: model.Metric{"percentile": "0.500000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "foo"},
Value: 84.63044031436561,
Timestamp: test001Time,
},
&model.Sample{
Metric: model.Metric{"percentile": "0.900000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "zed"},
Value: 1.355915069887731,
Timestamp: test001Time,
},
&model.Sample{
Metric: model.Metric{"percentile": "0.900000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "bar"},
Value: 109.89202084295582,
Timestamp: test001Time,
},
&model.Sample{
Metric: model.Metric{"percentile": "0.900000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "foo"},
Value: 160.21100853053224,
Timestamp: test001Time,
},
&model.Sample{
Metric: model.Metric{"percentile": "0.990000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "zed"},
Value: 1.772733213161236,
Timestamp: test001Time,
},
&model.Sample{
Metric: model.Metric{"percentile": "0.990000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "bar"},
Value: 109.99626121011262,
Timestamp: test001Time,
},
&model.Sample{
Metric: model.Metric{"percentile": "0.990000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "foo"},
Value: 172.49828748957728,
Timestamp: test001Time,
},
},
},
},
}
for i, scenario := range scenarios {
scenario.test(t, i)
}
}
func TestProcessor001Process(t *testing.T) {
testProcessor001Process(t)
}
func BenchmarkProcessor001Process(b *testing.B) {
for i := 0; i < b.N; i++ {
testProcessor001Process(b)
}
}

View File

@ -1,106 +0,0 @@
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package extraction
import (
"encoding/json"
"fmt"
"io"
"github.com/prometheus/client_golang/model"
)
// Processor002 is responsible for decoding payloads from protocol version
// 0.0.2.
var Processor002 = &processor002{}
type histogram002 struct {
Labels map[string]string `json:"labels"`
Values map[string]model.SampleValue `json:"value"`
}
type counter002 struct {
Labels map[string]string `json:"labels"`
Value model.SampleValue `json:"value"`
}
type processor002 struct{}
func (p *processor002) ProcessSingle(in io.Reader, out Ingester, o *ProcessOptions) error {
// Processor for telemetry schema version 0.0.2.
// container for telemetry data
var entities []struct {
BaseLabels map[string]string `json:"baseLabels"`
Docstring string `json:"docstring"`
Metric struct {
Type string `json:"type"`
Values json.RawMessage `json:"value"`
} `json:"metric"`
}
if err := json.NewDecoder(in).Decode(&entities); err != nil {
return err
}
pendingSamples := model.Samples{}
for _, entity := range entities {
switch entity.Metric.Type {
case "counter", "gauge":
var values []counter002
if err := json.Unmarshal(entity.Metric.Values, &values); err != nil {
return fmt.Errorf("could not extract %s value: %s", entity.Metric.Type, err)
}
for _, counter := range values {
labels := labelSet(entity.BaseLabels).Merge(labelSet(counter.Labels))
pendingSamples = append(pendingSamples, &model.Sample{
Metric: model.Metric(labels),
Timestamp: o.Timestamp,
Value: counter.Value,
})
}
case "histogram":
var values []histogram002
if err := json.Unmarshal(entity.Metric.Values, &values); err != nil {
return fmt.Errorf("could not extract %s value: %s", entity.Metric.Type, err)
}
for _, histogram := range values {
for percentile, value := range histogram.Values {
labels := labelSet(entity.BaseLabels).Merge(labelSet(histogram.Labels))
labels[model.LabelName("percentile")] = model.LabelValue(percentile)
pendingSamples = append(pendingSamples, &model.Sample{
Metric: model.Metric(labels),
Timestamp: o.Timestamp,
Value: value,
})
}
}
default:
return fmt.Errorf("unknown metric type %q", entity.Metric.Type)
}
}
if len(pendingSamples) > 0 {
return out.Ingest(pendingSamples)
}
return nil
}

View File

@ -1,225 +0,0 @@
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package extraction
import (
"bytes"
"errors"
"io/ioutil"
"os"
"path"
"runtime"
"sort"
"testing"
"github.com/prometheus/client_golang/model"
)
var test002Time = model.Now()
type testProcessor002ProcessScenario struct {
in string
expected, actual []model.Samples
err error
}
func (s *testProcessor002ProcessScenario) Ingest(samples model.Samples) error {
s.actual = append(s.actual, samples)
return nil
}
func (s *testProcessor002ProcessScenario) test(t testing.TB, set int) {
reader, err := os.Open(path.Join("fixtures", s.in))
if err != nil {
t.Fatalf("%d. couldn't open scenario input file %s: %s", set, s.in, err)
}
options := &ProcessOptions{
Timestamp: test002Time,
}
err = Processor002.ProcessSingle(reader, s, options)
if s.err != err && (s.err == nil || err == nil || err.Error() != s.err.Error()) {
t.Fatalf("%d. expected err of %s, got %s", set, s.err, err)
}
if len(s.actual) != len(s.expected) {
t.Fatalf("%d. expected output length of %d, got %d", set, len(s.expected), len(s.actual))
}
for i, expected := range s.expected {
sort.Sort(s.actual[i])
sort.Sort(expected)
if !expected.Equal(s.actual[i]) {
t.Fatalf("%d.%d. expected %s, got %s", set, i, expected, s.actual[i])
}
}
}
func testProcessor002Process(t testing.TB) {
var scenarios = []testProcessor002ProcessScenario{
{
in: "empty.json",
err: errors.New("EOF"),
},
{
in: "test0_0_1-0_0_2.json",
expected: []model.Samples{
model.Samples{
&model.Sample{
Metric: model.Metric{"service": "zed", model.MetricNameLabel: "rpc_calls_total", "job": "batch_job"},
Value: 25,
Timestamp: test002Time,
},
&model.Sample{
Metric: model.Metric{"service": "bar", model.MetricNameLabel: "rpc_calls_total", "job": "batch_job"},
Value: 25,
Timestamp: test002Time,
},
&model.Sample{
Metric: model.Metric{"service": "foo", model.MetricNameLabel: "rpc_calls_total", "job": "batch_job"},
Value: 25,
Timestamp: test002Time,
},
&model.Sample{
Metric: model.Metric{"percentile": "0.010000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "zed"},
Value: 0.0459814091918713,
Timestamp: test002Time,
},
&model.Sample{
Metric: model.Metric{"percentile": "0.010000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "bar"},
Value: 78.48563317257356,
Timestamp: test002Time,
},
&model.Sample{
Metric: model.Metric{"percentile": "0.010000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "foo"},
Value: 15.890724674774395,
Timestamp: test002Time,
},
&model.Sample{
Metric: model.Metric{"percentile": "0.050000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "zed"},
Value: 0.0459814091918713,
Timestamp: test002Time,
},
&model.Sample{
Metric: model.Metric{"percentile": "0.050000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "bar"},
Value: 78.48563317257356,
Timestamp: test002Time,
},
&model.Sample{
Metric: model.Metric{"percentile": "0.050000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "foo"},
Value: 15.890724674774395,
Timestamp: test002Time,
},
&model.Sample{
Metric: model.Metric{"percentile": "0.500000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "zed"},
Value: 0.6120456642749681,
Timestamp: test002Time,
},
&model.Sample{
Metric: model.Metric{"percentile": "0.500000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "bar"},
Value: 97.31798360385088,
Timestamp: test002Time,
},
&model.Sample{
Metric: model.Metric{"percentile": "0.500000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "foo"},
Value: 84.63044031436561,
Timestamp: test002Time,
},
&model.Sample{
Metric: model.Metric{"percentile": "0.900000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "zed"},
Value: 1.355915069887731,
Timestamp: test002Time,
},
&model.Sample{
Metric: model.Metric{"percentile": "0.900000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "bar"},
Value: 109.89202084295582,
Timestamp: test002Time,
},
&model.Sample{
Metric: model.Metric{"percentile": "0.900000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "foo"},
Value: 160.21100853053224,
Timestamp: test002Time,
},
&model.Sample{
Metric: model.Metric{"percentile": "0.990000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "zed"},
Value: 1.772733213161236,
Timestamp: test002Time,
},
&model.Sample{
Metric: model.Metric{"percentile": "0.990000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "bar"},
Value: 109.99626121011262,
Timestamp: test002Time,
},
&model.Sample{
Metric: model.Metric{"percentile": "0.990000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "foo"},
Value: 172.49828748957728,
Timestamp: test002Time,
},
},
},
},
}
for i, scenario := range scenarios {
scenario.test(t, i)
}
}
func TestProcessor002Process(t *testing.T) {
testProcessor002Process(t)
}
func BenchmarkProcessor002Process(b *testing.B) {
b.StopTimer()
pre := runtime.MemStats{}
runtime.ReadMemStats(&pre)
b.StartTimer()
for i := 0; i < b.N; i++ {
testProcessor002Process(b)
}
post := runtime.MemStats{}
runtime.ReadMemStats(&post)
allocated := post.TotalAlloc - pre.TotalAlloc
b.Logf("Allocated %d at %f per cycle with %d cycles.", allocated, float64(allocated)/float64(b.N), b.N)
}
func BenchmarkProcessor002ParseOnly(b *testing.B) {
b.StopTimer()
data, err := ioutil.ReadFile("fixtures/test0_0_1-0_0_2-large.json")
if err != nil {
b.Fatal(err)
}
ing := fakeIngester{}
b.StartTimer()
for i := 0; i < b.N; i++ {
if err := Processor002.ProcessSingle(bytes.NewReader(data), ing, &ProcessOptions{}); err != nil {
b.Fatal(err)
}
}
}
type fakeIngester struct{}
func (i fakeIngester) Ingest(model.Samples) error {
return nil
}

View File

@ -1,40 +0,0 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package extraction
import (
"io"
"github.com/prometheus/client_golang/text"
)
type processor004 struct{}
// Processor004 s responsible for decoding payloads from the text based variety
// of protocol version 0.0.4.
var Processor004 = &processor004{}
func (t *processor004) ProcessSingle(i io.Reader, out Ingester, o *ProcessOptions) error {
var parser text.Parser
metricFamilies, err := parser.TextToMetricFamilies(i)
if err != nil {
return err
}
for _, metricFamily := range metricFamilies {
if err := extractMetricFamily(out, o, metricFamily); err != nil {
return err
}
}
return nil
}

View File

@ -1,100 +0,0 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package extraction
import (
"sort"
"strings"
"testing"
"github.com/prometheus/client_golang/model"
)
var (
ts = model.Now()
in = `
# Only a quite simple scenario with two metric families.
# More complicated tests of the parser itself can be found in the text package.
# TYPE mf2 counter
mf2 3
mf1{label="value1"} -3.14 123456
mf1{label="value2"} 42
mf2 4
`
out = map[model.LabelValue]model.Samples{
"mf1": model.Samples{
&model.Sample{
Metric: model.Metric{model.MetricNameLabel: "mf1", "label": "value1"},
Value: -3.14,
Timestamp: 123456,
},
&model.Sample{
Metric: model.Metric{model.MetricNameLabel: "mf1", "label": "value2"},
Value: 42,
Timestamp: ts,
},
},
"mf2": model.Samples{
&model.Sample{
Metric: model.Metric{model.MetricNameLabel: "mf2"},
Value: 3,
Timestamp: ts,
},
&model.Sample{
Metric: model.Metric{model.MetricNameLabel: "mf2"},
Value: 4,
Timestamp: ts,
},
},
}
)
type testIngester struct {
results []model.Samples
}
func (i *testIngester) Ingest(s model.Samples) error {
i.results = append(i.results, s)
return nil
}
func TestTextProcessor(t *testing.T) {
var ingester testIngester
i := strings.NewReader(in)
o := &ProcessOptions{
Timestamp: ts,
}
err := Processor004.ProcessSingle(i, &ingester, o)
if err != nil {
t.Fatal(err)
}
if expected, got := len(out), len(ingester.results); expected != got {
t.Fatalf("Expected length %d, got %d", expected, got)
}
for _, r := range ingester.results {
expected, ok := out[r[0].Metric[model.MetricNameLabel]]
if !ok {
t.Fatalf(
"Unexpected metric name %q",
r[0].Metric[model.MetricNameLabel],
)
}
sort.Sort(expected)
sort.Sort(r)
if !expected.Equal(r) {
t.Errorf("expected %s, got %s", expected, r)
}
}
}

View File

@ -1,110 +0,0 @@
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"fmt"
"strconv"
)
// Fingerprint provides a hash-capable representation of a Metric.
// For our purposes, FNV-1A 64-bit is used.
type Fingerprint uint64
func (f Fingerprint) String() string {
return fmt.Sprintf("%016x", uint64(f))
}
// Less implements sort.Interface.
func (f Fingerprint) Less(o Fingerprint) bool {
return f < o
}
// Equal implements sort.Interface.
func (f Fingerprint) Equal(o Fingerprint) bool {
return f == o
}
// LoadFromString transforms a string representation into a Fingerprint.
func (f *Fingerprint) LoadFromString(s string) error {
num, err := strconv.ParseUint(s, 16, 64)
if err != nil {
return err
}
*f = Fingerprint(num)
return nil
}
// Fingerprints represents a collection of Fingerprint subject to a given
// natural sorting scheme. It implements sort.Interface.
type Fingerprints []Fingerprint
// Len implements sort.Interface.
func (f Fingerprints) Len() int {
return len(f)
}
// Less implements sort.Interface.
func (f Fingerprints) Less(i, j int) bool {
return f[i] < f[j]
}
// Swap implements sort.Interface.
func (f Fingerprints) Swap(i, j int) {
f[i], f[j] = f[j], f[i]
}
// FingerprintSet is a set of Fingerprints.
type FingerprintSet map[Fingerprint]struct{}
// Equal returns true if both sets contain the same elements (and not more).
func (s FingerprintSet) Equal(o FingerprintSet) bool {
if len(s) != len(o) {
return false
}
for k := range s {
if _, ok := o[k]; !ok {
return false
}
}
return true
}
// Intersection returns the elements contained in both sets.
func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet {
myLength, otherLength := len(s), len(o)
if myLength == 0 || otherLength == 0 {
return FingerprintSet{}
}
subSet := s
superSet := o
if otherLength < myLength {
subSet = o
superSet = s
}
out := FingerprintSet{}
for k := range subSet {
if _, ok := superSet[k]; ok {
out[k] = struct{}{}
}
}
return out
}

View File

@ -1,133 +0,0 @@
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"encoding/json"
"fmt"
"regexp"
"strings"
)
const (
// ExportedLabelPrefix is the prefix to prepend to the label names present in
// exported metrics if a label of the same name is added by the server.
ExportedLabelPrefix LabelName = "exported_"
// MetricNameLabel is the label name indicating the metric name of a
// timeseries.
MetricNameLabel LabelName = "__name__"
// SchemeLabel is the name of the label that holds the scheme on which to
// scrape a target.
SchemeLabel LabelName = "__scheme__"
// AddressLabel is the name of the label that holds the address of
// a scrape target.
AddressLabel LabelName = "__address__"
// MetricsPathLabel is the name of the label that holds the path on which to
// scrape a target.
MetricsPathLabel LabelName = "__metrics_path__"
// ReservedLabelPrefix is a prefix which is not legal in user-supplied
// label names.
ReservedLabelPrefix = "__"
// MetaLabelPrefix is a prefix for labels that provide meta information.
// Labels with this prefix are used for intermediate label processing and
// will not be attached to time series.
MetaLabelPrefix = "__meta_"
// TmpLabelPrefix is a prefix for temporary labels as part of relabelling.
// Labels with this prefix are used for intermediate label processing and
// will not be attached to time series. This is reserved for use in
// Prometheus configuration files by users.
TmpLabelPrefix = "__tmp_"
// ParamLabelPrefix is a prefix for labels that provide URL parameters
// used to scrape a target.
ParamLabelPrefix = "__param_"
// JobLabel is the label name indicating the job from which a timeseries
// was scraped.
JobLabel LabelName = "job"
// InstanceLabel is the label name used for the instance label.
InstanceLabel LabelName = "instance"
// BucketLabel is used for the label that defines the upper bound of a
// bucket of a histogram ("le" -> "less or equal").
BucketLabel = "le"
// QuantileLabel is used for the label that defines the quantile in a
// summary.
QuantileLabel = "quantile"
)
// LabelNameRE is a regular expression matching valid label names.
var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
// A LabelName is a key for a LabelSet or Metric. It has a value associated
// therewith.
type LabelName string
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error {
var s string
if err := unmarshal(&s); err != nil {
return err
}
if !LabelNameRE.MatchString(s) {
return fmt.Errorf("%q is not a valid label name", s)
}
*ln = LabelName(s)
return nil
}
// UnmarshalJSON implements the json.Unmarshaler interface.
func (ln *LabelName) UnmarshalJSON(b []byte) error {
var s string
if err := json.Unmarshal(b, &s); err != nil {
return err
}
if !LabelNameRE.MatchString(s) {
return fmt.Errorf("%q is not a valid label name", s)
}
*ln = LabelName(s)
return nil
}
// LabelNames is a sortable LabelName slice. In implements sort.Interface.
type LabelNames []LabelName
func (l LabelNames) Len() int {
return len(l)
}
func (l LabelNames) Less(i, j int) bool {
return l[i] < l[j]
}
func (l LabelNames) Swap(i, j int) {
l[i], l[j] = l[j], l[i]
}
func (l LabelNames) String() string {
labelStrings := make([]string, 0, len(l))
for _, label := range l {
labelStrings = append(labelStrings, string(label))
}
return strings.Join(labelStrings, ", ")
}

View File

@ -1,55 +0,0 @@
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"sort"
"testing"
)
func testLabelNames(t testing.TB) {
var scenarios = []struct {
in LabelNames
out LabelNames
}{
{
in: LabelNames{"ZZZ", "zzz"},
out: LabelNames{"ZZZ", "zzz"},
},
{
in: LabelNames{"aaa", "AAA"},
out: LabelNames{"AAA", "aaa"},
},
}
for i, scenario := range scenarios {
sort.Sort(scenario.in)
for j, expected := range scenario.out {
if expected != scenario.in[j] {
t.Errorf("%d.%d expected %s, got %s", i, j, expected, scenario.in[j])
}
}
}
}
func TestLabelNames(t *testing.T) {
testLabelNames(t)
}
func BenchmarkLabelNames(b *testing.B) {
for i := 0; i < b.N; i++ {
testLabelNames(b)
}
}

View File

@ -1,83 +0,0 @@
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"encoding/json"
"fmt"
"sort"
"strings"
)
// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet
// may be fully-qualified down to the point where it may resolve to a single
// Metric in the data store or not. All operations that occur within the realm
// of a LabelSet can emit a vector of Metric entities to which the LabelSet may
// match.
type LabelSet map[LabelName]LabelValue
// Merge is a helper function to non-destructively merge two label sets.
func (l LabelSet) Merge(other LabelSet) LabelSet {
result := make(LabelSet, len(l))
for k, v := range l {
result[k] = v
}
for k, v := range other {
result[k] = v
}
return result
}
func (l LabelSet) String() string {
labelStrings := make([]string, 0, len(l))
for label, value := range l {
labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value))
}
switch len(labelStrings) {
case 0:
return ""
default:
sort.Strings(labelStrings)
return fmt.Sprintf("{%s}", strings.Join(labelStrings, ", "))
}
}
// MergeFromMetric merges Metric into this LabelSet.
func (l LabelSet) MergeFromMetric(m Metric) {
for k, v := range m {
l[k] = v
}
}
// UnmarshalJSON implements the json.Unmarshaler interface.
func (l *LabelSet) UnmarshalJSON(b []byte) error {
var m map[LabelName]LabelValue
if err := json.Unmarshal(b, &m); err != nil {
return err
}
// encoding/json only unmarshals maps of the form map[string]T. It treats
// LabelName as a string and does not call its UnmarshalJSON method.
// Thus, we have to replicate the behavior here.
for ln := range m {
if !LabelNameRE.MatchString(string(ln)) {
return fmt.Errorf("%q is not a valid label name", ln)
}
}
*l = LabelSet(m)
return nil
}

View File

@ -1,36 +0,0 @@
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"sort"
)
// A LabelValue is an associated value for a LabelName.
type LabelValue string
// LabelValues is a sortable LabelValue slice. It implements sort.Interface.
type LabelValues []LabelValue
func (l LabelValues) Len() int {
return len(l)
}
func (l LabelValues) Less(i, j int) bool {
return sort.StringsAreSorted([]string{string(l[i]), string(l[j])})
}
func (l LabelValues) Swap(i, j int) {
l[i], l[j] = l[j], l[i]
}

View File

@ -1,55 +0,0 @@
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"sort"
"testing"
)
func testLabelValues(t testing.TB) {
var scenarios = []struct {
in LabelValues
out LabelValues
}{
{
in: LabelValues{"ZZZ", "zzz"},
out: LabelValues{"ZZZ", "zzz"},
},
{
in: LabelValues{"aaa", "AAA"},
out: LabelValues{"AAA", "aaa"},
},
}
for i, scenario := range scenarios {
sort.Sort(scenario.in)
for j, expected := range scenario.out {
if expected != scenario.in[j] {
t.Errorf("%d.%d expected %s, got %s", i, j, expected, scenario.in[j])
}
}
}
}
func TestLabelValues(t *testing.T) {
testLabelValues(t)
}
func BenchmarkLabelValues(b *testing.B) {
for i := 0; i < b.N; i++ {
testLabelValues(b)
}
}

View File

@ -1,192 +0,0 @@
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"encoding/json"
"fmt"
"sort"
"strings"
)
var separator = []byte{0}
// A Metric is similar to a LabelSet, but the key difference is that a Metric is
// a singleton and refers to one and only one stream of samples.
type Metric map[LabelName]LabelValue
// Equal compares the metrics.
func (m Metric) Equal(o Metric) bool {
if len(m) != len(o) {
return false
}
for ln, lv := range m {
olv, ok := o[ln]
if !ok {
return false
}
if olv != lv {
return false
}
}
return true
}
// Before compares the metrics, using the following criteria:
//
// If m has fewer labels than o, it is before o. If it has more, it is not.
//
// If the number of labels is the same, the superset of all label names is
// sorted alphanumerically. The first differing label pair found in that order
// determines the outcome: If the label does not exist at all in m, then m is
// before o, and vice versa. Otherwise the label value is compared
// alphanumerically.
//
// If m and o are equal, the method returns false.
func (m Metric) Before(o Metric) bool {
if len(m) < len(o) {
return true
}
if len(m) > len(o) {
return false
}
lns := make(LabelNames, 0, len(m)+len(o))
for ln := range m {
lns = append(lns, ln)
}
for ln := range o {
lns = append(lns, ln)
}
// It's probably not worth it to de-dup lns.
sort.Sort(lns)
for _, ln := range lns {
mlv, ok := m[ln]
if !ok {
return true
}
olv, ok := o[ln]
if !ok {
return false
}
if mlv < olv {
return true
}
if mlv > olv {
return false
}
}
return false
}
// String implements Stringer.
func (m Metric) String() string {
metricName, hasName := m[MetricNameLabel]
numLabels := len(m) - 1
if !hasName {
numLabels = len(m)
}
labelStrings := make([]string, 0, numLabels)
for label, value := range m {
if label != MetricNameLabel {
labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value))
}
}
switch numLabels {
case 0:
if hasName {
return string(metricName)
}
return "{}"
default:
sort.Strings(labelStrings)
return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", "))
}
}
// Fingerprint returns a Metric's Fingerprint.
func (m Metric) Fingerprint() Fingerprint {
return metricToFingerprint(m)
}
// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing
// algorithm, which is, however, more susceptible to hash collisions.
func (m Metric) FastFingerprint() Fingerprint {
return metricToFastFingerprint(m)
}
// Clone returns a copy of the Metric.
func (m Metric) Clone() Metric {
clone := Metric{}
for k, v := range m {
clone[k] = v
}
return clone
}
// MergeFromLabelSet merges a label set into this Metric, prefixing a collision
// prefix to the label names merged from the label set where required.
func (m Metric) MergeFromLabelSet(labels LabelSet, collisionPrefix LabelName) {
for k, v := range labels {
if collisionPrefix != "" {
for {
if _, exists := m[k]; !exists {
break
}
k = collisionPrefix + k
}
}
m[k] = v
}
}
// COWMetric wraps a Metric to enable copy-on-write access patterns.
type COWMetric struct {
Copied bool
Metric Metric
}
// Set sets a label name in the wrapped Metric to a given value and copies the
// Metric initially, if it is not already a copy.
func (m *COWMetric) Set(ln LabelName, lv LabelValue) {
m.doCOW()
m.Metric[ln] = lv
}
// Delete deletes a given label name from the wrapped Metric and copies the
// Metric initially, if it is not already a copy.
func (m *COWMetric) Delete(ln LabelName) {
m.doCOW()
delete(m.Metric, ln)
}
// doCOW copies the underlying Metric if it is not already a copy.
func (m *COWMetric) doCOW() {
if !m.Copied {
m.Metric = m.Metric.Clone()
m.Copied = true
}
}
// String implements fmt.Stringer.
func (m COWMetric) String() string {
return m.Metric.String()
}
// MarshalJSON implements json.Marshaler.
func (m COWMetric) MarshalJSON() ([]byte, error) {
return json.Marshal(m.Metric)
}

View File

@ -1,130 +0,0 @@
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import "testing"
func testMetric(t testing.TB) {
var scenarios = []struct {
input Metric
fingerprint Fingerprint
fastFingerprint Fingerprint
}{
{
input: Metric{},
fingerprint: 14695981039346656037,
fastFingerprint: 14695981039346656037,
},
{
input: Metric{
"first_name": "electro",
"occupation": "robot",
"manufacturer": "westinghouse",
},
fingerprint: 5911716720268894962,
fastFingerprint: 11310079640881077873,
},
{
input: Metric{
"x": "y",
},
fingerprint: 8241431561484471700,
fastFingerprint: 13948396922932177635,
},
{
input: Metric{
"a": "bb",
"b": "c",
},
fingerprint: 3016285359649981711,
fastFingerprint: 3198632812309449502,
},
{
input: Metric{
"a": "b",
"bb": "c",
},
fingerprint: 7122421792099404749,
fastFingerprint: 5774953389407657638,
},
}
for i, scenario := range scenarios {
if scenario.fingerprint != scenario.input.Fingerprint() {
t.Errorf("%d. expected %d, got %d", i, scenario.fingerprint, scenario.input.Fingerprint())
}
if scenario.fastFingerprint != scenario.input.FastFingerprint() {
t.Errorf("%d. expected %d, got %d", i, scenario.fastFingerprint, scenario.input.FastFingerprint())
}
}
}
func TestMetric(t *testing.T) {
testMetric(t)
}
func BenchmarkMetric(b *testing.B) {
for i := 0; i < b.N; i++ {
testMetric(b)
}
}
func TestCOWMetric(t *testing.T) {
testMetric := Metric{
"to_delete": "test1",
"to_change": "test2",
}
scenarios := []struct {
fn func(*COWMetric)
out Metric
}{
{
fn: func(cm *COWMetric) {
cm.Delete("to_delete")
},
out: Metric{
"to_change": "test2",
},
},
{
fn: func(cm *COWMetric) {
cm.Set("to_change", "changed")
},
out: Metric{
"to_delete": "test1",
"to_change": "changed",
},
},
}
for i, s := range scenarios {
orig := testMetric.Clone()
cm := &COWMetric{
Metric: orig,
}
s.fn(cm)
// Test that the original metric was not modified.
if !orig.Equal(testMetric) {
t.Fatalf("%d. original metric changed; expected %v, got %v", i, testMetric, orig)
}
// Test that the new metric has the right changes.
if !cm.Metric.Equal(s.out) {
t.Fatalf("%d. copied metric doesn't contain expected changes; expected %v, got %v", i, s.out, cm.Metric)
}
}
}

View File

@ -1,15 +0,0 @@
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package model contains core representation of Prometheus client primitives.
package model

View File

@ -1,79 +0,0 @@
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
// Sample is a sample value with a timestamp and a metric.
type Sample struct {
Metric Metric
Value SampleValue
Timestamp Timestamp
}
// Equal compares first the metrics, then the timestamp, then the value.
func (s *Sample) Equal(o *Sample) bool {
if s == o {
return true
}
if !s.Metric.Equal(o.Metric) {
return false
}
if !s.Timestamp.Equal(o.Timestamp) {
return false
}
if !s.Value.Equal(o.Value) {
return false
}
return true
}
// Samples is a sortable Sample slice. It implements sort.Interface.
type Samples []*Sample
func (s Samples) Len() int {
return len(s)
}
// Less compares first the metrics, then the timestamp.
func (s Samples) Less(i, j int) bool {
switch {
case s[i].Metric.Before(s[j].Metric):
return true
case s[j].Metric.Before(s[i].Metric):
return false
case s[i].Timestamp.Before(s[j].Timestamp):
return true
default:
return false
}
}
func (s Samples) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
// Equal compares two sets of samples and returns true if they are equal.
func (s Samples) Equal(o Samples) bool {
if len(s) != len(o) {
return false
}
for i, sample := range s {
if !sample.Equal(o[i]) {
return false
}
}
return true
}

View File

@ -1,114 +0,0 @@
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"sort"
"testing"
)
func TestSamplesSort(t *testing.T) {
input := Samples{
&Sample{
Metric: Metric{
MetricNameLabel: "A",
},
Timestamp: 1,
},
&Sample{
Metric: Metric{
MetricNameLabel: "A",
},
Timestamp: 2,
},
&Sample{
Metric: Metric{
MetricNameLabel: "C",
},
Timestamp: 1,
},
&Sample{
Metric: Metric{
MetricNameLabel: "C",
},
Timestamp: 2,
},
&Sample{
Metric: Metric{
MetricNameLabel: "B",
},
Timestamp: 1,
},
&Sample{
Metric: Metric{
MetricNameLabel: "B",
},
Timestamp: 2,
},
}
expected := Samples{
&Sample{
Metric: Metric{
MetricNameLabel: "A",
},
Timestamp: 1,
},
&Sample{
Metric: Metric{
MetricNameLabel: "A",
},
Timestamp: 2,
},
&Sample{
Metric: Metric{
MetricNameLabel: "B",
},
Timestamp: 1,
},
&Sample{
Metric: Metric{
MetricNameLabel: "B",
},
Timestamp: 2,
},
&Sample{
Metric: Metric{
MetricNameLabel: "C",
},
Timestamp: 1,
},
&Sample{
Metric: Metric{
MetricNameLabel: "C",
},
Timestamp: 2,
},
}
sort.Sort(input)
for i, actual := range input {
actualFp := actual.Metric.Fingerprint()
expectedFp := expected[i].Metric.Fingerprint()
if !actualFp.Equal(expectedFp) {
t.Fatalf("%d. Incorrect fingerprint. Got %s; want %s", i, actualFp.String(), expectedFp.String())
}
if actual.Timestamp != expected[i].Timestamp {
t.Fatalf("%d. Incorrect timestamp. Got %s; want %s", i, actual.Timestamp, expected[i].Timestamp)
}
}
}

View File

@ -1,37 +0,0 @@
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"fmt"
"strconv"
)
// A SampleValue is a representation of a value for a given sample at a given
// time.
type SampleValue float64
// Equal does a straight v==o.
func (v SampleValue) Equal(o SampleValue) bool {
return v == o
}
// MarshalJSON implements json.Marshaler.
func (v SampleValue) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf(`"%s"`, v)), nil
}
func (v SampleValue) String() string {
return strconv.FormatFloat(float64(v), 'f', -1, 64)
}

View File

@ -1,190 +0,0 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"bytes"
"hash"
"hash/fnv"
"sort"
"sync"
)
// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is
// used to separate label names, label values, and other strings from each other
// when calculating their combined hash value (aka signature aka fingerprint).
const SeparatorByte byte = 255
var (
// cache the signature of an empty label set.
emptyLabelSignature = fnv.New64a().Sum64()
hashAndBufPool sync.Pool
)
type hashAndBuf struct {
h hash.Hash64
b bytes.Buffer
}
func getHashAndBuf() *hashAndBuf {
hb := hashAndBufPool.Get()
if hb == nil {
return &hashAndBuf{h: fnv.New64a()}
}
return hb.(*hashAndBuf)
}
func putHashAndBuf(hb *hashAndBuf) {
hb.h.Reset()
hb.b.Reset()
hashAndBufPool.Put(hb)
}
// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a
// given label set. (Collisions are possible but unlikely if the number of label
// sets the function is applied to is small.)
func LabelsToSignature(labels map[string]string) uint64 {
if len(labels) == 0 {
return emptyLabelSignature
}
labelNames := make([]string, 0, len(labels))
for labelName := range labels {
labelNames = append(labelNames, labelName)
}
sort.Strings(labelNames)
hb := getHashAndBuf()
defer putHashAndBuf(hb)
for _, labelName := range labelNames {
hb.b.WriteString(labelName)
hb.b.WriteByte(SeparatorByte)
hb.b.WriteString(labels[labelName])
hb.b.WriteByte(SeparatorByte)
hb.h.Write(hb.b.Bytes())
hb.b.Reset()
}
return hb.h.Sum64()
}
// metricToFingerprint works exactly as LabelsToSignature but takes a Metric as
// parameter (rather than a label map) and returns a Fingerprint.
func metricToFingerprint(m Metric) Fingerprint {
if len(m) == 0 {
return Fingerprint(emptyLabelSignature)
}
labelNames := make(LabelNames, 0, len(m))
for labelName := range m {
labelNames = append(labelNames, labelName)
}
sort.Sort(labelNames)
hb := getHashAndBuf()
defer putHashAndBuf(hb)
for _, labelName := range labelNames {
hb.b.WriteString(string(labelName))
hb.b.WriteByte(SeparatorByte)
hb.b.WriteString(string(m[labelName]))
hb.b.WriteByte(SeparatorByte)
hb.h.Write(hb.b.Bytes())
hb.b.Reset()
}
return Fingerprint(hb.h.Sum64())
}
// metricToFastFingerprint works similar to metricToFingerprint but uses a
// faster and less allocation-heavy hash function, which is more susceptible to
// create hash collisions. Therefore, collision detection should be applied.
func metricToFastFingerprint(m Metric) Fingerprint {
if len(m) == 0 {
return Fingerprint(emptyLabelSignature)
}
var result uint64
hb := getHashAndBuf()
defer putHashAndBuf(hb)
for labelName, labelValue := range m {
hb.b.WriteString(string(labelName))
hb.b.WriteByte(SeparatorByte)
hb.b.WriteString(string(labelValue))
hb.h.Write(hb.b.Bytes())
result ^= hb.h.Sum64()
hb.h.Reset()
hb.b.Reset()
}
return Fingerprint(result)
}
// SignatureForLabels works like LabelsToSignature but takes a Metric as
// parameter (rather than a label map) and only includes the labels with the
// specified LabelNames into the signature calculation. The labels passed in
// will be sorted by this function.
func SignatureForLabels(m Metric, labels LabelNames) uint64 {
if len(m) == 0 || len(labels) == 0 {
return emptyLabelSignature
}
sort.Sort(labels)
hb := getHashAndBuf()
defer putHashAndBuf(hb)
for _, label := range labels {
hb.b.WriteString(string(label))
hb.b.WriteByte(SeparatorByte)
hb.b.WriteString(string(m[label]))
hb.b.WriteByte(SeparatorByte)
hb.h.Write(hb.b.Bytes())
hb.b.Reset()
}
return hb.h.Sum64()
}
// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as
// parameter (rather than a label map) and excludes the labels with any of the
// specified LabelNames from the signature calculation.
func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 {
if len(m) == 0 {
return emptyLabelSignature
}
labelNames := make(LabelNames, 0, len(m))
for labelName := range m {
if _, exclude := labels[labelName]; !exclude {
labelNames = append(labelNames, labelName)
}
}
if len(labelNames) == 0 {
return emptyLabelSignature
}
sort.Sort(labelNames)
hb := getHashAndBuf()
defer putHashAndBuf(hb)
for _, labelName := range labelNames {
hb.b.WriteString(string(labelName))
hb.b.WriteByte(SeparatorByte)
hb.b.WriteString(string(m[labelName]))
hb.b.WriteByte(SeparatorByte)
hb.h.Write(hb.b.Bytes())
hb.b.Reset()
}
return hb.h.Sum64()
}

View File

@ -1,305 +0,0 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"runtime"
"sync"
"testing"
)
func TestLabelsToSignature(t *testing.T) {
var scenarios = []struct {
in map[string]string
out uint64
}{
{
in: map[string]string{},
out: 14695981039346656037,
},
{
in: map[string]string{"name": "garland, briggs", "fear": "love is not enough"},
out: 5799056148416392346,
},
}
for i, scenario := range scenarios {
actual := LabelsToSignature(scenario.in)
if actual != scenario.out {
t.Errorf("%d. expected %d, got %d", i, scenario.out, actual)
}
}
}
func TestMetricToFingerprint(t *testing.T) {
var scenarios = []struct {
in Metric
out Fingerprint
}{
{
in: Metric{},
out: 14695981039346656037,
},
{
in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
out: 5799056148416392346,
},
}
for i, scenario := range scenarios {
actual := metricToFingerprint(scenario.in)
if actual != scenario.out {
t.Errorf("%d. expected %d, got %d", i, scenario.out, actual)
}
}
}
func TestMetricToFastFingerprint(t *testing.T) {
var scenarios = []struct {
in Metric
out Fingerprint
}{
{
in: Metric{},
out: 14695981039346656037,
},
{
in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
out: 12952432476264840823,
},
}
for i, scenario := range scenarios {
actual := metricToFastFingerprint(scenario.in)
if actual != scenario.out {
t.Errorf("%d. expected %d, got %d", i, scenario.out, actual)
}
}
}
func TestSignatureForLabels(t *testing.T) {
var scenarios = []struct {
in Metric
labels LabelNames
out uint64
}{
{
in: Metric{},
labels: nil,
out: 14695981039346656037,
},
{
in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
labels: LabelNames{"fear", "name"},
out: 5799056148416392346,
},
{
in: Metric{"name": "garland, briggs", "fear": "love is not enough", "foo": "bar"},
labels: LabelNames{"fear", "name"},
out: 5799056148416392346,
},
{
in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
labels: LabelNames{},
out: 14695981039346656037,
},
{
in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
labels: nil,
out: 14695981039346656037,
},
}
for i, scenario := range scenarios {
actual := SignatureForLabels(scenario.in, scenario.labels)
if actual != scenario.out {
t.Errorf("%d. expected %d, got %d", i, scenario.out, actual)
}
}
}
func TestSignatureWithoutLabels(t *testing.T) {
var scenarios = []struct {
in Metric
labels map[LabelName]struct{}
out uint64
}{
{
in: Metric{},
labels: nil,
out: 14695981039346656037,
},
{
in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
labels: map[LabelName]struct{}{"fear": struct{}{}, "name": struct{}{}},
out: 14695981039346656037,
},
{
in: Metric{"name": "garland, briggs", "fear": "love is not enough", "foo": "bar"},
labels: map[LabelName]struct{}{"foo": struct{}{}},
out: 5799056148416392346,
},
{
in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
labels: map[LabelName]struct{}{},
out: 5799056148416392346,
},
{
in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
labels: nil,
out: 5799056148416392346,
},
}
for i, scenario := range scenarios {
actual := SignatureWithoutLabels(scenario.in, scenario.labels)
if actual != scenario.out {
t.Errorf("%d. expected %d, got %d", i, scenario.out, actual)
}
}
}
func benchmarkLabelToSignature(b *testing.B, l map[string]string, e uint64) {
for i := 0; i < b.N; i++ {
if a := LabelsToSignature(l); a != e {
b.Fatalf("expected signature of %d for %s, got %d", e, l, a)
}
}
}
func BenchmarkLabelToSignatureScalar(b *testing.B) {
benchmarkLabelToSignature(b, nil, 14695981039346656037)
}
func BenchmarkLabelToSignatureSingle(b *testing.B) {
benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value"}, 5146282821936882169)
}
func BenchmarkLabelToSignatureDouble(b *testing.B) {
benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value", "second-label": "second-label-value"}, 3195800080984914717)
}
func BenchmarkLabelToSignatureTriple(b *testing.B) {
benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 13843036195897128121)
}
func benchmarkMetricToFingerprint(b *testing.B, m Metric, e Fingerprint) {
for i := 0; i < b.N; i++ {
if a := metricToFingerprint(m); a != e {
b.Fatalf("expected signature of %d for %s, got %d", e, m, a)
}
}
}
func BenchmarkMetricToFingerprintScalar(b *testing.B) {
benchmarkMetricToFingerprint(b, nil, 14695981039346656037)
}
func BenchmarkMetricToFingerprintSingle(b *testing.B) {
benchmarkMetricToFingerprint(b, Metric{"first-label": "first-label-value"}, 5146282821936882169)
}
func BenchmarkMetricToFingerprintDouble(b *testing.B) {
benchmarkMetricToFingerprint(b, Metric{"first-label": "first-label-value", "second-label": "second-label-value"}, 3195800080984914717)
}
func BenchmarkMetricToFingerprintTriple(b *testing.B) {
benchmarkMetricToFingerprint(b, Metric{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 13843036195897128121)
}
func benchmarkMetricToFastFingerprint(b *testing.B, m Metric, e Fingerprint) {
for i := 0; i < b.N; i++ {
if a := metricToFastFingerprint(m); a != e {
b.Fatalf("expected signature of %d for %s, got %d", e, m, a)
}
}
}
func BenchmarkMetricToFastFingerprintScalar(b *testing.B) {
benchmarkMetricToFastFingerprint(b, nil, 14695981039346656037)
}
func BenchmarkMetricToFastFingerprintSingle(b *testing.B) {
benchmarkMetricToFastFingerprint(b, Metric{"first-label": "first-label-value"}, 5147259542624943964)
}
func BenchmarkMetricToFastFingerprintDouble(b *testing.B) {
benchmarkMetricToFastFingerprint(b, Metric{"first-label": "first-label-value", "second-label": "second-label-value"}, 18269973311206963528)
}
func BenchmarkMetricToFastFingerprintTriple(b *testing.B) {
benchmarkMetricToFastFingerprint(b, Metric{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676)
}
func BenchmarkEmptyLabelSignature(b *testing.B) {
b.StopTimer()
input := []map[string]string{nil, {}}
var ms runtime.MemStats
runtime.ReadMemStats(&ms)
alloc := ms.Alloc
for _, labels := range input {
LabelsToSignature(labels)
}
runtime.ReadMemStats(&ms)
if got := ms.Alloc; alloc != got {
b.Error("expected LabelsToSignature with empty labels not to perform allocations")
}
}
func benchmarkMetricToFastFingerprintConc(b *testing.B, m Metric, e Fingerprint, concLevel int) {
var start, end sync.WaitGroup
start.Add(1)
end.Add(concLevel)
for i := 0; i < concLevel; i++ {
go func() {
start.Wait()
for j := b.N / concLevel; j >= 0; j-- {
if a := metricToFastFingerprint(m); a != e {
b.Fatalf("expected signature of %d for %s, got %d", e, m, a)
}
}
end.Done()
}()
}
b.ResetTimer()
start.Done()
end.Wait()
}
func BenchmarkMetricToFastFingerprintTripleConc1(b *testing.B) {
benchmarkMetricToFastFingerprintConc(b, Metric{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 1)
}
func BenchmarkMetricToFastFingerprintTripleConc2(b *testing.B) {
benchmarkMetricToFastFingerprintConc(b, Metric{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 2)
}
func BenchmarkMetricToFastFingerprintTripleConc4(b *testing.B) {
benchmarkMetricToFastFingerprintConc(b, Metric{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 4)
}
func BenchmarkMetricToFastFingerprintTripleConc8(b *testing.B) {
benchmarkMetricToFastFingerprintConc(b, Metric{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 8)
}

View File

@ -1,116 +0,0 @@
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"math"
"strconv"
native_time "time"
)
// Timestamp is the number of milliseconds since the epoch
// (1970-01-01 00:00 UTC) excluding leap seconds.
type Timestamp int64
const (
// MinimumTick is the minimum supported time resolution. This has to be
// at least native_time.Second in order for the code below to work.
MinimumTick = native_time.Millisecond
// second is the timestamp duration equivalent to one second.
second = int64(native_time.Second / MinimumTick)
// The number of nanoseconds per minimum tick.
nanosPerTick = int64(MinimumTick / native_time.Nanosecond)
// Earliest is the earliest timestamp representable. Handy for
// initializing a high watermark.
Earliest = Timestamp(math.MinInt64)
// Latest is the latest timestamp representable. Handy for initializing
// a low watermark.
Latest = Timestamp(math.MaxInt64)
)
// Equal reports whether two timestamps represent the same instant.
func (t Timestamp) Equal(o Timestamp) bool {
return t == o
}
// Before reports whether the timestamp t is before o.
func (t Timestamp) Before(o Timestamp) bool {
return t < o
}
// After reports whether the timestamp t is after o.
func (t Timestamp) After(o Timestamp) bool {
return t > o
}
// Add returns the Timestamp t + d.
func (t Timestamp) Add(d native_time.Duration) Timestamp {
return t + Timestamp(d/MinimumTick)
}
// Sub returns the Duration t - o.
func (t Timestamp) Sub(o Timestamp) native_time.Duration {
return native_time.Duration(t-o) * MinimumTick
}
// Time returns the time.Time representation of t.
func (t Timestamp) Time() native_time.Time {
return native_time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick)
}
// Unix returns t as a Unix time, the number of seconds elapsed
// since January 1, 1970 UTC.
func (t Timestamp) Unix() int64 {
return int64(t) / second
}
// UnixNano returns t as a Unix time, the number of nanoseconds elapsed
// since January 1, 1970 UTC.
func (t Timestamp) UnixNano() int64 {
return int64(t) * nanosPerTick
}
// String returns a string representation of the timestamp.
func (t Timestamp) String() string {
return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64)
}
// MarshalJSON implements the json.Marshaler interface.
func (t Timestamp) MarshalJSON() ([]byte, error) {
return []byte(t.String()), nil
}
// Now returns the current time as a Timestamp.
func Now() Timestamp {
return TimestampFromTime(native_time.Now())
}
// TimestampFromTime returns the Timestamp equivalent to the time.Time t.
func TimestampFromTime(t native_time.Time) Timestamp {
return TimestampFromUnixNano(t.UnixNano())
}
// TimestampFromUnix returns the Timestamp equivalent to the Unix timestamp t
// provided in seconds.
func TimestampFromUnix(t int64) Timestamp {
return Timestamp(t * second)
}
// TimestampFromUnixNano returns the Timestamp equivalent to the Unix timestamp
// t provided in nanoseconds.
func TimestampFromUnixNano(t int64) Timestamp {
return Timestamp(t / nanosPerTick)
}

View File

@ -1,86 +0,0 @@
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"testing"
native_time "time"
)
func TestComparators(t *testing.T) {
t1a := TimestampFromUnix(0)
t1b := TimestampFromUnix(0)
t2 := TimestampFromUnix(2*second - 1)
if !t1a.Equal(t1b) {
t.Fatalf("Expected %s to be equal to %s", t1a, t1b)
}
if t1a.Equal(t2) {
t.Fatalf("Expected %s to not be equal to %s", t1a, t2)
}
if !t1a.Before(t2) {
t.Fatalf("Expected %s to be before %s", t1a, t2)
}
if t1a.Before(t1b) {
t.Fatalf("Expected %s to not be before %s", t1a, t1b)
}
if !t2.After(t1a) {
t.Fatalf("Expected %s to be after %s", t2, t1a)
}
if t1b.After(t1a) {
t.Fatalf("Expected %s to not be after %s", t1b, t1a)
}
}
func TestTimestampConversions(t *testing.T) {
unixSecs := int64(1136239445)
unixNsecs := int64(123456789)
unixNano := unixSecs*1000000000 + unixNsecs
t1 := native_time.Unix(unixSecs, unixNsecs-unixNsecs%nanosPerTick)
t2 := native_time.Unix(unixSecs, unixNsecs)
ts := TimestampFromUnixNano(unixNano)
if !ts.Time().Equal(t1) {
t.Fatalf("Expected %s, got %s", t1, ts.Time())
}
// Test available precision.
ts = TimestampFromTime(t2)
if !ts.Time().Equal(t1) {
t.Fatalf("Expected %s, got %s", t1, ts.Time())
}
if ts.UnixNano() != unixNano-unixNano%nanosPerTick {
t.Fatalf("Expected %d, got %d", unixNano, ts.UnixNano())
}
}
func TestDuration(t *testing.T) {
duration := native_time.Second + native_time.Minute + native_time.Hour
goTime := native_time.Unix(1136239445, 0)
ts := TimestampFromTime(goTime)
if !goTime.Add(duration).Equal(ts.Add(duration).Time()) {
t.Fatalf("Expected %s to be equal to %s", goTime.Add(duration), ts.Add(duration))
}
earlier := ts.Add(-duration)
delta := ts.Sub(earlier)
if delta != duration {
t.Fatalf("Expected %s to be equal to %s", delta, duration)
}
}

View File

@ -1,169 +0,0 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package text
import (
"bytes"
"compress/gzip"
"io"
"io/ioutil"
"testing"
"github.com/matttproud/golang_protobuf_extensions/pbutil"
dto "github.com/prometheus/client_model/go"
)
// Benchmarks to show how much penalty text format parsing actually inflicts.
//
// Example results on Linux 3.13.0, Intel(R) Core(TM) i7-4700MQ CPU @ 2.40GHz, go1.4.
//
// BenchmarkParseText 1000 1188535 ns/op 205085 B/op 6135 allocs/op
// BenchmarkParseTextGzip 1000 1376567 ns/op 246224 B/op 6151 allocs/op
// BenchmarkParseProto 10000 172790 ns/op 52258 B/op 1160 allocs/op
// BenchmarkParseProtoGzip 5000 324021 ns/op 94931 B/op 1211 allocs/op
// BenchmarkParseProtoMap 10000 187946 ns/op 58714 B/op 1203 allocs/op
//
// CONCLUSION: The overhead for the map is negligible. Text format needs ~5x more allocations.
// Without compression, it needs ~7x longer, but with compression (the more relevant scenario),
// the difference becomes less relevant, only ~4x.
//
// The test data contains 248 samples.
//
// BenchmarkProcessor002ParseOnly in the extraction package is not quite
// comparable to the benchmarks here, but it gives an idea: JSON parsing is even
// slower than text parsing and needs a comparable amount of allocs.
// BenchmarkParseText benchmarks the parsing of a text-format scrape into metric
// family DTOs.
func BenchmarkParseText(b *testing.B) {
b.StopTimer()
data, err := ioutil.ReadFile("testdata/text")
if err != nil {
b.Fatal(err)
}
b.StartTimer()
for i := 0; i < b.N; i++ {
if _, err := parser.TextToMetricFamilies(bytes.NewReader(data)); err != nil {
b.Fatal(err)
}
}
}
// BenchmarkParseTextGzip benchmarks the parsing of a gzipped text-format scrape
// into metric family DTOs.
func BenchmarkParseTextGzip(b *testing.B) {
b.StopTimer()
data, err := ioutil.ReadFile("testdata/text.gz")
if err != nil {
b.Fatal(err)
}
b.StartTimer()
for i := 0; i < b.N; i++ {
in, err := gzip.NewReader(bytes.NewReader(data))
if err != nil {
b.Fatal(err)
}
if _, err := parser.TextToMetricFamilies(in); err != nil {
b.Fatal(err)
}
}
}
// BenchmarkParseProto benchmarks the parsing of a protobuf-format scrape into
// metric family DTOs. Note that this does not build a map of metric families
// (as the text version does), because it is not required for Prometheus
// ingestion either. (However, it is required for the text-format parsing, as
// the metric family might be sprinkled all over the text, while the
// protobuf-format guarantees bundling at one place.)
func BenchmarkParseProto(b *testing.B) {
b.StopTimer()
data, err := ioutil.ReadFile("testdata/protobuf")
if err != nil {
b.Fatal(err)
}
b.StartTimer()
for i := 0; i < b.N; i++ {
family := &dto.MetricFamily{}
in := bytes.NewReader(data)
for {
family.Reset()
if _, err := pbutil.ReadDelimited(in, family); err != nil {
if err == io.EOF {
break
}
b.Fatal(err)
}
}
}
}
// BenchmarkParseProtoGzip is like BenchmarkParseProto above, but parses gzipped
// protobuf format.
func BenchmarkParseProtoGzip(b *testing.B) {
b.StopTimer()
data, err := ioutil.ReadFile("testdata/protobuf.gz")
if err != nil {
b.Fatal(err)
}
b.StartTimer()
for i := 0; i < b.N; i++ {
family := &dto.MetricFamily{}
in, err := gzip.NewReader(bytes.NewReader(data))
if err != nil {
b.Fatal(err)
}
for {
family.Reset()
if _, err := pbutil.ReadDelimited(in, family); err != nil {
if err == io.EOF {
break
}
b.Fatal(err)
}
}
}
}
// BenchmarkParseProtoMap is like BenchmarkParseProto but DOES put the parsed
// metric family DTOs into a map. This is not happening during Prometheus
// ingestion. It is just here to measure the overhead of that map creation and
// separate it from the overhead of the text format parsing.
func BenchmarkParseProtoMap(b *testing.B) {
b.StopTimer()
data, err := ioutil.ReadFile("testdata/protobuf")
if err != nil {
b.Fatal(err)
}
b.StartTimer()
for i := 0; i < b.N; i++ {
families := map[string]*dto.MetricFamily{}
in := bytes.NewReader(data)
for {
family := &dto.MetricFamily{}
if _, err := pbutil.ReadDelimited(in, family); err != nil {
if err == io.EOF {
break
}
b.Fatal(err)
}
families[family.GetName()] = family
}
}
}

View File

@ -1,315 +0,0 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package text contains helper functions to parse and create text-based
// exchange formats. The package currently supports (only) version 0.0.4 of the
// exchange format. Should other versions be supported in the future, some
// versioning scheme has to be applied. Possibilities include separate packages
// or separate functions. The best way depends on the nature of future changes,
// which is the reason why no versioning scheme has been applied prematurely
// here.
package text
import (
"bytes"
"fmt"
"io"
"math"
"strings"
"github.com/prometheus/client_golang/model"
dto "github.com/prometheus/client_model/go"
)
// MetricFamilyToText converts a MetricFamily proto message into text format and
// writes the resulting lines to 'out'. It returns the number of bytes written
// and any error encountered. This function does not perform checks on the
// content of the metric and label names, i.e. invalid metric or label names
// will result in invalid text format output.
// This method fulfills the type 'prometheus.encoder'.
func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
var written int
// Fail-fast checks.
if len(in.Metric) == 0 {
return written, fmt.Errorf("MetricFamily has no metrics: %s", in)
}
name := in.GetName()
if name == "" {
return written, fmt.Errorf("MetricFamily has no name: %s", in)
}
if in.Type == nil {
return written, fmt.Errorf("MetricFamily has no type: %s", in)
}
// Comments, first HELP, then TYPE.
if in.Help != nil {
n, err := fmt.Fprintf(
out, "# HELP %s %s\n",
name, escapeString(*in.Help, false),
)
written += n
if err != nil {
return written, err
}
}
metricType := in.GetType()
n, err := fmt.Fprintf(
out, "# TYPE %s %s\n",
name, strings.ToLower(metricType.String()),
)
written += n
if err != nil {
return written, err
}
// Finally the samples, one line for each.
for _, metric := range in.Metric {
switch metricType {
case dto.MetricType_COUNTER:
if metric.Counter == nil {
return written, fmt.Errorf(
"expected counter in metric %s %s", name, metric,
)
}
n, err = writeSample(
name, metric, "", "",
metric.Counter.GetValue(),
out,
)
case dto.MetricType_GAUGE:
if metric.Gauge == nil {
return written, fmt.Errorf(
"expected gauge in metric %s %s", name, metric,
)
}
n, err = writeSample(
name, metric, "", "",
metric.Gauge.GetValue(),
out,
)
case dto.MetricType_UNTYPED:
if metric.Untyped == nil {
return written, fmt.Errorf(
"expected untyped in metric %s %s", name, metric,
)
}
n, err = writeSample(
name, metric, "", "",
metric.Untyped.GetValue(),
out,
)
case dto.MetricType_SUMMARY:
if metric.Summary == nil {
return written, fmt.Errorf(
"expected summary in metric %s %s", name, metric,
)
}
for _, q := range metric.Summary.Quantile {
n, err = writeSample(
name, metric,
model.QuantileLabel, fmt.Sprint(q.GetQuantile()),
q.GetValue(),
out,
)
written += n
if err != nil {
return written, err
}
}
n, err = writeSample(
name+"_sum", metric, "", "",
metric.Summary.GetSampleSum(),
out,
)
if err != nil {
return written, err
}
written += n
n, err = writeSample(
name+"_count", metric, "", "",
float64(metric.Summary.GetSampleCount()),
out,
)
case dto.MetricType_HISTOGRAM:
if metric.Histogram == nil {
return written, fmt.Errorf(
"expected histogram in metric %s %s", name, metric,
)
}
infSeen := false
for _, q := range metric.Histogram.Bucket {
n, err = writeSample(
name+"_bucket", metric,
model.BucketLabel, fmt.Sprint(q.GetUpperBound()),
float64(q.GetCumulativeCount()),
out,
)
written += n
if err != nil {
return written, err
}
if math.IsInf(q.GetUpperBound(), +1) {
infSeen = true
}
}
if !infSeen {
n, err = writeSample(
name+"_bucket", metric,
model.BucketLabel, "+Inf",
float64(metric.Histogram.GetSampleCount()),
out,
)
if err != nil {
return written, err
}
written += n
}
n, err = writeSample(
name+"_sum", metric, "", "",
metric.Histogram.GetSampleSum(),
out,
)
if err != nil {
return written, err
}
written += n
n, err = writeSample(
name+"_count", metric, "", "",
float64(metric.Histogram.GetSampleCount()),
out,
)
default:
return written, fmt.Errorf(
"unexpected type in metric %s %s", name, metric,
)
}
written += n
if err != nil {
return written, err
}
}
return written, nil
}
// writeSample writes a single sample in text format to out, given the metric
// name, the metric proto message itself, optionally an additional label name
// and value (use empty strings if not required), and the value. The function
// returns the number of bytes written and any error encountered.
func writeSample(
name string,
metric *dto.Metric,
additionalLabelName, additionalLabelValue string,
value float64,
out io.Writer,
) (int, error) {
var written int
n, err := fmt.Fprint(out, name)
written += n
if err != nil {
return written, err
}
n, err = labelPairsToText(
metric.Label,
additionalLabelName, additionalLabelValue,
out,
)
written += n
if err != nil {
return written, err
}
n, err = fmt.Fprintf(out, " %v", value)
written += n
if err != nil {
return written, err
}
if metric.TimestampMs != nil {
n, err = fmt.Fprintf(out, " %v", *metric.TimestampMs)
written += n
if err != nil {
return written, err
}
}
n, err = out.Write([]byte{'\n'})
written += n
if err != nil {
return written, err
}
return written, nil
}
// labelPairsToText converts a slice of LabelPair proto messages plus the
// explicitly given additional label pair into text formatted as required by the
// text format and writes it to 'out'. An empty slice in combination with an
// empty string 'additionalLabelName' results in nothing being
// written. Otherwise, the label pairs are written, escaped as required by the
// text format, and enclosed in '{...}'. The function returns the number of
// bytes written and any error encountered.
func labelPairsToText(
in []*dto.LabelPair,
additionalLabelName, additionalLabelValue string,
out io.Writer,
) (int, error) {
if len(in) == 0 && additionalLabelName == "" {
return 0, nil
}
var written int
separator := '{'
for _, lp := range in {
n, err := fmt.Fprintf(
out, `%c%s="%s"`,
separator, lp.GetName(), escapeString(lp.GetValue(), true),
)
written += n
if err != nil {
return written, err
}
separator = ','
}
if additionalLabelName != "" {
n, err := fmt.Fprintf(
out, `%c%s="%s"`,
separator, additionalLabelName,
escapeString(additionalLabelValue, true),
)
written += n
if err != nil {
return written, err
}
}
n, err := out.Write([]byte{'}'})
written += n
if err != nil {
return written, err
}
return written, nil
}
// escapeString replaces '\' by '\\', new line character by '\n', and - if
// includeDoubleQuote is true - '"' by '\"'.
func escapeString(v string, includeDoubleQuote bool) string {
result := bytes.NewBuffer(make([]byte, 0, len(v)))
for _, c := range v {
switch {
case c == '\\':
result.WriteString(`\\`)
case includeDoubleQuote && c == '"':
result.WriteString(`\"`)
case c == '\n':
result.WriteString(`\n`)
default:
result.WriteRune(c)
}
}
return result.String()
}

View File

@ -1,439 +0,0 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package text
import (
"bytes"
"math"
"strings"
"testing"
"github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go"
)
func testCreate(t testing.TB) {
var scenarios = []struct {
in *dto.MetricFamily
out string
}{
// 0: Counter, NaN as value, timestamp given.
{
in: &dto.MetricFamily{
Name: proto.String("name"),
Help: proto.String("two-line\n doc str\\ing"),
Type: dto.MetricType_COUNTER.Enum(),
Metric: []*dto.Metric{
&dto.Metric{
Label: []*dto.LabelPair{
&dto.LabelPair{
Name: proto.String("labelname"),
Value: proto.String("val1"),
},
&dto.LabelPair{
Name: proto.String("basename"),
Value: proto.String("basevalue"),
},
},
Counter: &dto.Counter{
Value: proto.Float64(math.NaN()),
},
},
&dto.Metric{
Label: []*dto.LabelPair{
&dto.LabelPair{
Name: proto.String("labelname"),
Value: proto.String("val2"),
},
&dto.LabelPair{
Name: proto.String("basename"),
Value: proto.String("basevalue"),
},
},
Counter: &dto.Counter{
Value: proto.Float64(.23),
},
TimestampMs: proto.Int64(1234567890),
},
},
},
out: `# HELP name two-line\n doc str\\ing
# TYPE name counter
name{labelname="val1",basename="basevalue"} NaN
name{labelname="val2",basename="basevalue"} 0.23 1234567890
`,
},
// 1: Gauge, some escaping required, +Inf as value, multi-byte characters in label values.
{
in: &dto.MetricFamily{
Name: proto.String("gauge_name"),
Help: proto.String("gauge\ndoc\nstr\"ing"),
Type: dto.MetricType_GAUGE.Enum(),
Metric: []*dto.Metric{
&dto.Metric{
Label: []*dto.LabelPair{
&dto.LabelPair{
Name: proto.String("name_1"),
Value: proto.String("val with\nnew line"),
},
&dto.LabelPair{
Name: proto.String("name_2"),
Value: proto.String("val with \\backslash and \"quotes\""),
},
},
Gauge: &dto.Gauge{
Value: proto.Float64(math.Inf(+1)),
},
},
&dto.Metric{
Label: []*dto.LabelPair{
&dto.LabelPair{
Name: proto.String("name_1"),
Value: proto.String("Björn"),
},
&dto.LabelPair{
Name: proto.String("name_2"),
Value: proto.String("佖佥"),
},
},
Gauge: &dto.Gauge{
Value: proto.Float64(3.14E42),
},
},
},
},
out: `# HELP gauge_name gauge\ndoc\nstr"ing
# TYPE gauge_name gauge
gauge_name{name_1="val with\nnew line",name_2="val with \\backslash and \"quotes\""} +Inf
gauge_name{name_1="Björn",name_2="佖佥"} 3.14e+42
`,
},
// 2: Untyped, no help, one sample with no labels and -Inf as value, another sample with one label.
{
in: &dto.MetricFamily{
Name: proto.String("untyped_name"),
Type: dto.MetricType_UNTYPED.Enum(),
Metric: []*dto.Metric{
&dto.Metric{
Untyped: &dto.Untyped{
Value: proto.Float64(math.Inf(-1)),
},
},
&dto.Metric{
Label: []*dto.LabelPair{
&dto.LabelPair{
Name: proto.String("name_1"),
Value: proto.String("value 1"),
},
},
Untyped: &dto.Untyped{
Value: proto.Float64(-1.23e-45),
},
},
},
},
out: `# TYPE untyped_name untyped
untyped_name -Inf
untyped_name{name_1="value 1"} -1.23e-45
`,
},
// 3: Summary.
{
in: &dto.MetricFamily{
Name: proto.String("summary_name"),
Help: proto.String("summary docstring"),
Type: dto.MetricType_SUMMARY.Enum(),
Metric: []*dto.Metric{
&dto.Metric{
Summary: &dto.Summary{
SampleCount: proto.Uint64(42),
SampleSum: proto.Float64(-3.4567),
Quantile: []*dto.Quantile{
&dto.Quantile{
Quantile: proto.Float64(0.5),
Value: proto.Float64(-1.23),
},
&dto.Quantile{
Quantile: proto.Float64(0.9),
Value: proto.Float64(.2342354),
},
&dto.Quantile{
Quantile: proto.Float64(0.99),
Value: proto.Float64(0),
},
},
},
},
&dto.Metric{
Label: []*dto.LabelPair{
&dto.LabelPair{
Name: proto.String("name_1"),
Value: proto.String("value 1"),
},
&dto.LabelPair{
Name: proto.String("name_2"),
Value: proto.String("value 2"),
},
},
Summary: &dto.Summary{
SampleCount: proto.Uint64(4711),
SampleSum: proto.Float64(2010.1971),
Quantile: []*dto.Quantile{
&dto.Quantile{
Quantile: proto.Float64(0.5),
Value: proto.Float64(1),
},
&dto.Quantile{
Quantile: proto.Float64(0.9),
Value: proto.Float64(2),
},
&dto.Quantile{
Quantile: proto.Float64(0.99),
Value: proto.Float64(3),
},
},
},
},
},
},
out: `# HELP summary_name summary docstring
# TYPE summary_name summary
summary_name{quantile="0.5"} -1.23
summary_name{quantile="0.9"} 0.2342354
summary_name{quantile="0.99"} 0
summary_name_sum -3.4567
summary_name_count 42
summary_name{name_1="value 1",name_2="value 2",quantile="0.5"} 1
summary_name{name_1="value 1",name_2="value 2",quantile="0.9"} 2
summary_name{name_1="value 1",name_2="value 2",quantile="0.99"} 3
summary_name_sum{name_1="value 1",name_2="value 2"} 2010.1971
summary_name_count{name_1="value 1",name_2="value 2"} 4711
`,
},
// 4: Histogram
{
in: &dto.MetricFamily{
Name: proto.String("request_duration_microseconds"),
Help: proto.String("The response latency."),
Type: dto.MetricType_HISTOGRAM.Enum(),
Metric: []*dto.Metric{
&dto.Metric{
Histogram: &dto.Histogram{
SampleCount: proto.Uint64(2693),
SampleSum: proto.Float64(1756047.3),
Bucket: []*dto.Bucket{
&dto.Bucket{
UpperBound: proto.Float64(100),
CumulativeCount: proto.Uint64(123),
},
&dto.Bucket{
UpperBound: proto.Float64(120),
CumulativeCount: proto.Uint64(412),
},
&dto.Bucket{
UpperBound: proto.Float64(144),
CumulativeCount: proto.Uint64(592),
},
&dto.Bucket{
UpperBound: proto.Float64(172.8),
CumulativeCount: proto.Uint64(1524),
},
&dto.Bucket{
UpperBound: proto.Float64(math.Inf(+1)),
CumulativeCount: proto.Uint64(2693),
},
},
},
},
},
},
out: `# HELP request_duration_microseconds The response latency.
# TYPE request_duration_microseconds histogram
request_duration_microseconds_bucket{le="100"} 123
request_duration_microseconds_bucket{le="120"} 412
request_duration_microseconds_bucket{le="144"} 592
request_duration_microseconds_bucket{le="172.8"} 1524
request_duration_microseconds_bucket{le="+Inf"} 2693
request_duration_microseconds_sum 1.7560473e+06
request_duration_microseconds_count 2693
`,
},
// 5: Histogram with missing +Inf bucket.
{
in: &dto.MetricFamily{
Name: proto.String("request_duration_microseconds"),
Help: proto.String("The response latency."),
Type: dto.MetricType_HISTOGRAM.Enum(),
Metric: []*dto.Metric{
&dto.Metric{
Histogram: &dto.Histogram{
SampleCount: proto.Uint64(2693),
SampleSum: proto.Float64(1756047.3),
Bucket: []*dto.Bucket{
&dto.Bucket{
UpperBound: proto.Float64(100),
CumulativeCount: proto.Uint64(123),
},
&dto.Bucket{
UpperBound: proto.Float64(120),
CumulativeCount: proto.Uint64(412),
},
&dto.Bucket{
UpperBound: proto.Float64(144),
CumulativeCount: proto.Uint64(592),
},
&dto.Bucket{
UpperBound: proto.Float64(172.8),
CumulativeCount: proto.Uint64(1524),
},
},
},
},
},
},
out: `# HELP request_duration_microseconds The response latency.
# TYPE request_duration_microseconds histogram
request_duration_microseconds_bucket{le="100"} 123
request_duration_microseconds_bucket{le="120"} 412
request_duration_microseconds_bucket{le="144"} 592
request_duration_microseconds_bucket{le="172.8"} 1524
request_duration_microseconds_bucket{le="+Inf"} 2693
request_duration_microseconds_sum 1.7560473e+06
request_duration_microseconds_count 2693
`,
},
}
for i, scenario := range scenarios {
out := bytes.NewBuffer(make([]byte, 0, len(scenario.out)))
n, err := MetricFamilyToText(out, scenario.in)
if err != nil {
t.Errorf("%d. error: %s", i, err)
continue
}
if expected, got := len(scenario.out), n; expected != got {
t.Errorf(
"%d. expected %d bytes written, got %d",
i, expected, got,
)
}
if expected, got := scenario.out, out.String(); expected != got {
t.Errorf(
"%d. expected out=%q, got %q",
i, expected, got,
)
}
}
}
func TestCreate(t *testing.T) {
testCreate(t)
}
func BenchmarkCreate(b *testing.B) {
for i := 0; i < b.N; i++ {
testCreate(b)
}
}
func testCreateError(t testing.TB) {
var scenarios = []struct {
in *dto.MetricFamily
err string
}{
// 0: No metric.
{
in: &dto.MetricFamily{
Name: proto.String("name"),
Help: proto.String("doc string"),
Type: dto.MetricType_COUNTER.Enum(),
Metric: []*dto.Metric{},
},
err: "MetricFamily has no metrics",
},
// 1: No metric name.
{
in: &dto.MetricFamily{
Help: proto.String("doc string"),
Type: dto.MetricType_UNTYPED.Enum(),
Metric: []*dto.Metric{
&dto.Metric{
Untyped: &dto.Untyped{
Value: proto.Float64(math.Inf(-1)),
},
},
},
},
err: "MetricFamily has no name",
},
// 2: No metric type.
{
in: &dto.MetricFamily{
Name: proto.String("name"),
Help: proto.String("doc string"),
Metric: []*dto.Metric{
&dto.Metric{
Untyped: &dto.Untyped{
Value: proto.Float64(math.Inf(-1)),
},
},
},
},
err: "MetricFamily has no type",
},
// 3: Wrong type.
{
in: &dto.MetricFamily{
Name: proto.String("name"),
Help: proto.String("doc string"),
Type: dto.MetricType_COUNTER.Enum(),
Metric: []*dto.Metric{
&dto.Metric{
Untyped: &dto.Untyped{
Value: proto.Float64(math.Inf(-1)),
},
},
},
},
err: "expected counter in metric",
},
}
for i, scenario := range scenarios {
var out bytes.Buffer
_, err := MetricFamilyToText(&out, scenario.in)
if err == nil {
t.Errorf("%d. expected error, got nil", i)
continue
}
if expected, got := scenario.err, err.Error(); strings.Index(got, expected) != 0 {
t.Errorf(
"%d. expected error starting with %q, got %q",
i, expected, got,
)
}
}
}
func TestCreateError(t *testing.T) {
testCreateError(t)
}
func BenchmarkCreateError(b *testing.B) {
for i := 0; i < b.N; i++ {
testCreateError(b)
}
}

View File

@ -1,36 +0,0 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Build only when actually fuzzing
// +build gofuzz
package text
import "bytes"
// Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
//
// go-fuzz-build github.com/prometheus/client_golang/text
// go-fuzz -bin text-fuzz.zip -workdir fuzz
//
// Further input samples should go in the folder fuzz/corpus.
func Fuzz(in []byte) int {
parser := Parser{}
_, err := parser.TextToMetricFamilies(bytes.NewReader(in))
if err != nil {
return 0
}
return 1
}

View File

@ -1,2 +0,0 @@

View File

@ -1,6 +0,0 @@
minimal_metric 1.234
another_metric -3e3 103948
# Even that:
no_labels{} 3
# HELP line for non-existing metric will be ignored.

View File

@ -1,12 +0,0 @@
# A normal comment.
#
# TYPE name counter
name{labelname="val1",basename="basevalue"} NaN
name {labelname="val2",basename="base\"v\\al\nue"} 0.23 1234567890
# HELP name two-line\n doc str\\ing
# HELP name2 doc str"ing 2
# TYPE name2 gauge
name2{labelname="val2" ,basename = "basevalue2" } +Inf 54321
name2{ labelname = "val1" , }-Inf

View File

@ -1,22 +0,0 @@
# TYPE my_summary summary
my_summary{n1="val1",quantile="0.5"} 110
decoy -1 -2
my_summary{n1="val1",quantile="0.9"} 140 1
my_summary_count{n1="val1"} 42
# Latest timestamp wins in case of a summary.
my_summary_sum{n1="val1"} 4711 2
fake_sum{n1="val1"} 2001
# TYPE another_summary summary
another_summary_count{n2="val2",n1="val1"} 20
my_summary_count{n2="val2",n1="val1"} 5 5
another_summary{n1="val1",n2="val2",quantile=".3"} -1.2
my_summary_sum{n1="val2"} 08 15
my_summary{n1="val3", quantile="0.2"} 4711
my_summary{n1="val1",n2="val2",quantile="-12.34",} NaN
# some
# funny comments
# HELP
# HELP
# HELP my_summary
# HELP my_summary

View File

@ -1,10 +0,0 @@
# HELP request_duration_microseconds The response latency.
# TYPE request_duration_microseconds histogram
request_duration_microseconds_bucket{le="100"} 123
request_duration_microseconds_bucket{le="120"} 412
request_duration_microseconds_bucket{le="144"} 592
request_duration_microseconds_bucket{le="172.8"} 1524
request_duration_microseconds_bucket{le="+Inf"} 2693
request_duration_microseconds_sum 1.7560473e+06
request_duration_microseconds_count 2693

View File

@ -1 +0,0 @@
bla 3.14

View File

@ -1 +0,0 @@
metric{label="\t"} 3.14

View File

@ -1 +0,0 @@
metric{label="bla"} 3.14 2 3

View File

@ -1 +0,0 @@
metric{label="bla"} blubb

View File

@ -1,3 +0,0 @@
# HELP metric one
# HELP metric two

View File

@ -1,3 +0,0 @@
# TYPE metric counter
# TYPE metric untyped

View File

@ -1,3 +0,0 @@
metric 4.12
# TYPE metric counter

View File

@ -1,2 +0,0 @@
# TYPE metric bla

View File

@ -1,2 +0,0 @@
# TYPE met-ric

View File

@ -1 +0,0 @@
@invalidmetric{label="bla"} 3.14 2

View File

@ -1 +0,0 @@
{label="bla"} 3.14 2

View File

@ -1,3 +0,0 @@
# TYPE metric histogram
metric_bucket{le="bla"} 3.14

View File

@ -1,3 +0,0 @@
metric{label="new
line"} 3.14

View File

@ -1 +0,0 @@
metric{@="bla"} 3.14

View File

@ -1 +0,0 @@
metric{__name__="bla"} 3.14

View File

@ -1 +0,0 @@
metric{label+="bla"} 3.14

View File

@ -1 +0,0 @@
metric{label=bla} 3.14

View File

@ -1,3 +0,0 @@
# TYPE metric summary
metric{quantile="bla"} 3.14

View File

@ -1 +0,0 @@
metric{label="bla"+} 3.14

View File

@ -1 +0,0 @@
metric{label="bla"} 3.14 2.72

View File

@ -1 +0,0 @@
m{} 0

View File

@ -1,746 +0,0 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package text
import (
"bufio"
"bytes"
"fmt"
"io"
"math"
"strconv"
"strings"
dto "github.com/prometheus/client_model/go"
"github.com/golang/protobuf/proto"
"github.com/prometheus/client_golang/model"
)
// A stateFn is a function that represents a state in a state machine. By
// executing it, the state is progressed to the next state. The stateFn returns
// another stateFn, which represents the new state. The end state is represented
// by nil.
type stateFn func() stateFn
// ParseError signals errors while parsing the simple and flat text-based
// exchange format.
type ParseError struct {
Line int
Msg string
}
// Error implements the error interface.
func (e ParseError) Error() string {
return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg)
}
// Parser is used to parse the simple and flat text-based exchange format. Its
// nil value is ready to use.
type Parser struct {
metricFamiliesByName map[string]*dto.MetricFamily
buf *bufio.Reader // Where the parsed input is read through.
err error // Most recent error.
lineCount int // Tracks the line count for error messages.
currentByte byte // The most recent byte read.
currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes.
currentMF *dto.MetricFamily
currentMetric *dto.Metric
currentLabelPair *dto.LabelPair
// The remaining member variables are only used for summaries/histograms.
currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le'
// Summary specific.
summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature.
currentQuantile float64
// Histogram specific.
histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature.
currentBucket float64
// These tell us if the currently processed line ends on '_count' or
// '_sum' respectively and belong to a summary/histogram, representing the sample
// count and sum of that summary/histogram.
currentIsSummaryCount, currentIsSummarySum bool
currentIsHistogramCount, currentIsHistogramSum bool
}
// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange
// format and creates MetricFamily proto messages. It returns the MetricFamily
// proto messages in a map where the metric names are the keys, along with any
// error encountered.
//
// If the input contains duplicate metrics (i.e. lines with the same metric name
// and exactly the same label set), the resulting MetricFamily will contain
// duplicate Metric proto messages. Similar is true for duplicate label
// names. Checks for duplicates have to be performed separately, if required.
// Also note that neither the metrics within each MetricFamily are sorted nor
// the label pairs within each Metric. Sorting is not required for the most
// frequent use of this method, which is sample ingestion in the Prometheus
// server. However, for presentation purposes, you might want to sort the
// metrics, and in some cases, you must sort the labels, e.g. for consumption by
// the metric family injection hook of the Prometheus registry.
//
// Summaries and histograms are rather special beasts. You would probably not
// use them in the simple text format anyway. This method can deal with
// summaries and histograms if they are presented in exactly the way the
// text.Create function creates them.
//
// This method must not be called concurrently. If you want to parse different
// input concurrently, instantiate a separate Parser for each goroutine.
func (p *Parser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) {
p.reset(in)
for nextState := p.startOfLine; nextState != nil; nextState = nextState() {
// Magic happens here...
}
// Get rid of empty metric families.
for k, mf := range p.metricFamiliesByName {
if len(mf.GetMetric()) == 0 {
delete(p.metricFamiliesByName, k)
}
}
return p.metricFamiliesByName, p.err
}
func (p *Parser) reset(in io.Reader) {
p.metricFamiliesByName = map[string]*dto.MetricFamily{}
if p.buf == nil {
p.buf = bufio.NewReader(in)
} else {
p.buf.Reset(in)
}
p.err = nil
p.lineCount = 0
if p.summaries == nil || len(p.summaries) > 0 {
p.summaries = map[uint64]*dto.Metric{}
}
if p.histograms == nil || len(p.histograms) > 0 {
p.histograms = map[uint64]*dto.Metric{}
}
p.currentQuantile = math.NaN()
p.currentBucket = math.NaN()
}
// startOfLine represents the state where the next byte read from p.buf is the
// start of a line (or whitespace leading up to it).
func (p *Parser) startOfLine() stateFn {
p.lineCount++
if p.skipBlankTab(); p.err != nil {
// End of input reached. This is the only case where
// that is not an error but a signal that we are done.
p.err = nil
return nil
}
switch p.currentByte {
case '#':
return p.startComment
case '\n':
return p.startOfLine // Empty line, start the next one.
}
return p.readingMetricName
}
// startComment represents the state where the next byte read from p.buf is the
// start of a comment (or whitespace leading up to it).
func (p *Parser) startComment() stateFn {
if p.skipBlankTab(); p.err != nil {
return nil // Unexpected end of input.
}
if p.currentByte == '\n' {
return p.startOfLine
}
if p.readTokenUntilWhitespace(); p.err != nil {
return nil // Unexpected end of input.
}
// If we have hit the end of line already, there is nothing left
// to do. This is not considered a syntax error.
if p.currentByte == '\n' {
return p.startOfLine
}
keyword := p.currentToken.String()
if keyword != "HELP" && keyword != "TYPE" {
// Generic comment, ignore by fast forwarding to end of line.
for p.currentByte != '\n' {
if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
return nil // Unexpected end of input.
}
}
return p.startOfLine
}
// There is something. Next has to be a metric name.
if p.skipBlankTab(); p.err != nil {
return nil // Unexpected end of input.
}
if p.readTokenAsMetricName(); p.err != nil {
return nil // Unexpected end of input.
}
if p.currentByte == '\n' {
// At the end of the line already.
// Again, this is not considered a syntax error.
return p.startOfLine
}
if !isBlankOrTab(p.currentByte) {
p.parseError("invalid metric name in comment")
return nil
}
p.setOrCreateCurrentMF()
if p.skipBlankTab(); p.err != nil {
return nil // Unexpected end of input.
}
if p.currentByte == '\n' {
// At the end of the line already.
// Again, this is not considered a syntax error.
return p.startOfLine
}
switch keyword {
case "HELP":
return p.readingHelp
case "TYPE":
return p.readingType
}
panic(fmt.Sprintf("code error: unexpected keyword %q", keyword))
}
// readingMetricName represents the state where the last byte read (now in
// p.currentByte) is the first byte of a metric name.
func (p *Parser) readingMetricName() stateFn {
if p.readTokenAsMetricName(); p.err != nil {
return nil
}
if p.currentToken.Len() == 0 {
p.parseError("invalid metric name")
return nil
}
p.setOrCreateCurrentMF()
// Now is the time to fix the type if it hasn't happened yet.
if p.currentMF.Type == nil {
p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
}
p.currentMetric = &dto.Metric{}
// Do not append the newly created currentMetric to
// currentMF.Metric right now. First wait if this is a summary,
// and the metric exists already, which we can only know after
// having read all the labels.
if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
return nil // Unexpected end of input.
}
return p.readingLabels
}
// readingLabels represents the state where the last byte read (now in
// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the
// first byte of the value (otherwise).
func (p *Parser) readingLabels() stateFn {
// Summaries/histograms are special. We have to reset the
// currentLabels map, currentQuantile and currentBucket before starting to
// read labels.
if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
p.currentLabels = map[string]string{}
p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName()
p.currentQuantile = math.NaN()
p.currentBucket = math.NaN()
}
if p.currentByte != '{' {
return p.readingValue
}
return p.startLabelName
}
// startLabelName represents the state where the next byte read from p.buf is
// the start of a label name (or whitespace leading up to it).
func (p *Parser) startLabelName() stateFn {
if p.skipBlankTab(); p.err != nil {
return nil // Unexpected end of input.
}
if p.currentByte == '}' {
if p.skipBlankTab(); p.err != nil {
return nil // Unexpected end of input.
}
return p.readingValue
}
if p.readTokenAsLabelName(); p.err != nil {
return nil // Unexpected end of input.
}
if p.currentToken.Len() == 0 {
p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName()))
return nil
}
p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())}
if p.currentLabelPair.GetName() == string(model.MetricNameLabel) {
p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel))
return nil
}
// Special summary/histogram treatment. Don't add 'quantile' and 'le'
// labels to 'real' labels.
if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) &&
!(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) {
p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair)
}
if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
return nil // Unexpected end of input.
}
if p.currentByte != '=' {
p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
return nil
}
return p.startLabelValue
}
// startLabelValue represents the state where the next byte read from p.buf is
// the start of a (quoted) label value (or whitespace leading up to it).
func (p *Parser) startLabelValue() stateFn {
if p.skipBlankTab(); p.err != nil {
return nil // Unexpected end of input.
}
if p.currentByte != '"' {
p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte))
return nil
}
if p.readTokenAsLabelValue(); p.err != nil {
return nil
}
p.currentLabelPair.Value = proto.String(p.currentToken.String())
// Special treatment of summaries:
// - Quantile labels are special, will result in dto.Quantile later.
// - Other labels have to be added to currentLabels for signature calculation.
if p.currentMF.GetType() == dto.MetricType_SUMMARY {
if p.currentLabelPair.GetName() == model.QuantileLabel {
if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
// Create a more helpful error message.
p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
return nil
}
} else {
p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
}
}
// Similar special treatment of histograms.
if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
if p.currentLabelPair.GetName() == model.BucketLabel {
if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
// Create a more helpful error message.
p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue()))
return nil
}
} else {
p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
}
}
if p.skipBlankTab(); p.err != nil {
return nil // Unexpected end of input.
}
switch p.currentByte {
case ',':
return p.startLabelName
case '}':
if p.skipBlankTab(); p.err != nil {
return nil // Unexpected end of input.
}
return p.readingValue
default:
p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value))
return nil
}
}
// readingValue represents the state where the last byte read (now in
// p.currentByte) is the first byte of the sample value (i.e. a float).
func (p *Parser) readingValue() stateFn {
// When we are here, we have read all the labels, so for the
// special case of a summary/histogram, we can finally find out
// if the metric already exists.
if p.currentMF.GetType() == dto.MetricType_SUMMARY {
signature := model.LabelsToSignature(p.currentLabels)
if summary := p.summaries[signature]; summary != nil {
p.currentMetric = summary
} else {
p.summaries[signature] = p.currentMetric
p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
}
} else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
signature := model.LabelsToSignature(p.currentLabels)
if histogram := p.histograms[signature]; histogram != nil {
p.currentMetric = histogram
} else {
p.histograms[signature] = p.currentMetric
p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
}
} else {
p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
}
if p.readTokenUntilWhitespace(); p.err != nil {
return nil // Unexpected end of input.
}
value, err := strconv.ParseFloat(p.currentToken.String(), 64)
if err != nil {
// Create a more helpful error message.
p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String()))
return nil
}
switch p.currentMF.GetType() {
case dto.MetricType_COUNTER:
p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)}
case dto.MetricType_GAUGE:
p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)}
case dto.MetricType_UNTYPED:
p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)}
case dto.MetricType_SUMMARY:
// *sigh*
if p.currentMetric.Summary == nil {
p.currentMetric.Summary = &dto.Summary{}
}
switch {
case p.currentIsSummaryCount:
p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value))
case p.currentIsSummarySum:
p.currentMetric.Summary.SampleSum = proto.Float64(value)
case !math.IsNaN(p.currentQuantile):
p.currentMetric.Summary.Quantile = append(
p.currentMetric.Summary.Quantile,
&dto.Quantile{
Quantile: proto.Float64(p.currentQuantile),
Value: proto.Float64(value),
},
)
}
case dto.MetricType_HISTOGRAM:
// *sigh*
if p.currentMetric.Histogram == nil {
p.currentMetric.Histogram = &dto.Histogram{}
}
switch {
case p.currentIsHistogramCount:
p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value))
case p.currentIsHistogramSum:
p.currentMetric.Histogram.SampleSum = proto.Float64(value)
case !math.IsNaN(p.currentBucket):
p.currentMetric.Histogram.Bucket = append(
p.currentMetric.Histogram.Bucket,
&dto.Bucket{
UpperBound: proto.Float64(p.currentBucket),
CumulativeCount: proto.Uint64(uint64(value)),
},
)
}
default:
p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName())
}
if p.currentByte == '\n' {
return p.startOfLine
}
return p.startTimestamp
}
// startTimestamp represents the state where the next byte read from p.buf is
// the start of the timestamp (or whitespace leading up to it).
func (p *Parser) startTimestamp() stateFn {
if p.skipBlankTab(); p.err != nil {
return nil // Unexpected end of input.
}
if p.readTokenUntilWhitespace(); p.err != nil {
return nil // Unexpected end of input.
}
timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64)
if err != nil {
// Create a more helpful error message.
p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String()))
return nil
}
p.currentMetric.TimestampMs = proto.Int64(timestamp)
if p.readTokenUntilNewline(false); p.err != nil {
return nil // Unexpected end of input.
}
if p.currentToken.Len() > 0 {
p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String()))
return nil
}
return p.startOfLine
}
// readingHelp represents the state where the last byte read (now in
// p.currentByte) is the first byte of the docstring after 'HELP'.
func (p *Parser) readingHelp() stateFn {
if p.currentMF.Help != nil {
p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName()))
return nil
}
// Rest of line is the docstring.
if p.readTokenUntilNewline(true); p.err != nil {
return nil // Unexpected end of input.
}
p.currentMF.Help = proto.String(p.currentToken.String())
return p.startOfLine
}
// readingType represents the state where the last byte read (now in
// p.currentByte) is the first byte of the type hint after 'HELP'.
func (p *Parser) readingType() stateFn {
if p.currentMF.Type != nil {
p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName()))
return nil
}
// Rest of line is the type.
if p.readTokenUntilNewline(false); p.err != nil {
return nil // Unexpected end of input.
}
metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())]
if !ok {
p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String()))
return nil
}
p.currentMF.Type = dto.MetricType(metricType).Enum()
return p.startOfLine
}
// parseError sets p.err to a ParseError at the current line with the given
// message.
func (p *Parser) parseError(msg string) {
p.err = ParseError{
Line: p.lineCount,
Msg: msg,
}
}
// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte
// that is neither ' ' nor '\t'. That byte is left in p.currentByte.
func (p *Parser) skipBlankTab() {
for {
if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) {
return
}
}
}
// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do
// anything if p.currentByte is neither ' ' nor '\t'.
func (p *Parser) skipBlankTabIfCurrentBlankTab() {
if isBlankOrTab(p.currentByte) {
p.skipBlankTab()
}
}
// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The
// first byte considered is the byte already read (now in p.currentByte). The
// first whitespace byte encountered is still copied into p.currentByte, but not
// into p.currentToken.
func (p *Parser) readTokenUntilWhitespace() {
p.currentToken.Reset()
for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' {
p.currentToken.WriteByte(p.currentByte)
p.currentByte, p.err = p.buf.ReadByte()
}
}
// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first
// byte considered is the byte already read (now in p.currentByte). The first
// newline byte encountered is still copied into p.currentByte, but not into
// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are
// recognized: '\\' tranlates into '\', and '\n' into a line-feed character. All
// other escape sequences are invalid and cause an error.
func (p *Parser) readTokenUntilNewline(recognizeEscapeSequence bool) {
p.currentToken.Reset()
escaped := false
for p.err == nil {
if recognizeEscapeSequence && escaped {
switch p.currentByte {
case '\\':
p.currentToken.WriteByte(p.currentByte)
case 'n':
p.currentToken.WriteByte('\n')
default:
p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
return
}
escaped = false
} else {
switch p.currentByte {
case '\n':
return
case '\\':
escaped = true
default:
p.currentToken.WriteByte(p.currentByte)
}
}
p.currentByte, p.err = p.buf.ReadByte()
}
}
// readTokenAsMetricName copies a metric name from p.buf into p.currentToken.
// The first byte considered is the byte already read (now in p.currentByte).
// The first byte not part of a metric name is still copied into p.currentByte,
// but not into p.currentToken.
func (p *Parser) readTokenAsMetricName() {
p.currentToken.Reset()
if !isValidMetricNameStart(p.currentByte) {
return
}
for {
p.currentToken.WriteByte(p.currentByte)
p.currentByte, p.err = p.buf.ReadByte()
if p.err != nil || !isValidMetricNameContinuation(p.currentByte) {
return
}
}
}
// readTokenAsLabelName copies a label name from p.buf into p.currentToken.
// The first byte considered is the byte already read (now in p.currentByte).
// The first byte not part of a label name is still copied into p.currentByte,
// but not into p.currentToken.
func (p *Parser) readTokenAsLabelName() {
p.currentToken.Reset()
if !isValidLabelNameStart(p.currentByte) {
return
}
for {
p.currentToken.WriteByte(p.currentByte)
p.currentByte, p.err = p.buf.ReadByte()
if p.err != nil || !isValidLabelNameContinuation(p.currentByte) {
return
}
}
}
// readTokenAsLabelValue copies a label value from p.buf into p.currentToken.
// In contrast to the other 'readTokenAs...' functions, which start with the
// last read byte in p.currentByte, this method ignores p.currentByte and starts
// with reading a new byte from p.buf. The first byte not part of a label value
// is still copied into p.currentByte, but not into p.currentToken.
func (p *Parser) readTokenAsLabelValue() {
p.currentToken.Reset()
escaped := false
for {
if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
return
}
if escaped {
switch p.currentByte {
case '"', '\\':
p.currentToken.WriteByte(p.currentByte)
case 'n':
p.currentToken.WriteByte('\n')
default:
p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
return
}
escaped = false
continue
}
switch p.currentByte {
case '"':
return
case '\n':
p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String()))
return
case '\\':
escaped = true
default:
p.currentToken.WriteByte(p.currentByte)
}
}
}
func (p *Parser) setOrCreateCurrentMF() {
p.currentIsSummaryCount = false
p.currentIsSummarySum = false
p.currentIsHistogramCount = false
p.currentIsHistogramSum = false
name := p.currentToken.String()
if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil {
return
}
// Try out if this is a _sum or _count for a summary/histogram.
summaryName := summaryMetricName(name)
if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil {
if p.currentMF.GetType() == dto.MetricType_SUMMARY {
if isCount(name) {
p.currentIsSummaryCount = true
}
if isSum(name) {
p.currentIsSummarySum = true
}
return
}
}
histogramName := histogramMetricName(name)
if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil {
if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
if isCount(name) {
p.currentIsHistogramCount = true
}
if isSum(name) {
p.currentIsHistogramSum = true
}
return
}
}
p.currentMF = &dto.MetricFamily{Name: proto.String(name)}
p.metricFamiliesByName[name] = p.currentMF
}
func isValidLabelNameStart(b byte) bool {
return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_'
}
func isValidLabelNameContinuation(b byte) bool {
return isValidLabelNameStart(b) || (b >= '0' && b <= '9')
}
func isValidMetricNameStart(b byte) bool {
return isValidLabelNameStart(b) || b == ':'
}
func isValidMetricNameContinuation(b byte) bool {
return isValidLabelNameContinuation(b) || b == ':'
}
func isBlankOrTab(b byte) bool {
return b == ' ' || b == '\t'
}
func isCount(name string) bool {
return len(name) > 6 && name[len(name)-6:] == "_count"
}
func isSum(name string) bool {
return len(name) > 4 && name[len(name)-4:] == "_sum"
}
func isBucket(name string) bool {
return len(name) > 7 && name[len(name)-7:] == "_bucket"
}
func summaryMetricName(name string) string {
switch {
case isCount(name):
return name[:len(name)-6]
case isSum(name):
return name[:len(name)-4]
default:
return name
}
}
func histogramMetricName(name string) string {
switch {
case isCount(name):
return name[:len(name)-6]
case isSum(name):
return name[:len(name)-4]
case isBucket(name):
return name[:len(name)-7]
default:
return name
}
}

View File

@ -1,588 +0,0 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package text
import (
"math"
"strings"
"testing"
"github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go"
)
var parser Parser
func testParse(t testing.TB) {
var scenarios = []struct {
in string
out []*dto.MetricFamily
}{
// 0: Empty lines as input.
{
in: `
`,
out: []*dto.MetricFamily{},
},
// 1: Minimal case.
{
in: `
minimal_metric 1.234
another_metric -3e3 103948
# Even that:
no_labels{} 3
# HELP line for non-existing metric will be ignored.
`,
out: []*dto.MetricFamily{
&dto.MetricFamily{
Name: proto.String("minimal_metric"),
Type: dto.MetricType_UNTYPED.Enum(),
Metric: []*dto.Metric{
&dto.Metric{
Untyped: &dto.Untyped{
Value: proto.Float64(1.234),
},
},
},
},
&dto.MetricFamily{
Name: proto.String("another_metric"),
Type: dto.MetricType_UNTYPED.Enum(),
Metric: []*dto.Metric{
&dto.Metric{
Untyped: &dto.Untyped{
Value: proto.Float64(-3e3),
},
TimestampMs: proto.Int64(103948),
},
},
},
&dto.MetricFamily{
Name: proto.String("no_labels"),
Type: dto.MetricType_UNTYPED.Enum(),
Metric: []*dto.Metric{
&dto.Metric{
Untyped: &dto.Untyped{
Value: proto.Float64(3),
},
},
},
},
},
},
// 2: Counters & gauges, docstrings, various whitespace, escape sequences.
{
in: `
# A normal comment.
#
# TYPE name counter
name{labelname="val1",basename="basevalue"} NaN
name {labelname="val2",basename="base\"v\\al\nue"} 0.23 1234567890
# HELP name two-line\n doc str\\ing
# HELP name2 doc str"ing 2
# TYPE name2 gauge
name2{labelname="val2" ,basename = "basevalue2" } +Inf 54321
name2{ labelname = "val1" , }-Inf
`,
out: []*dto.MetricFamily{
&dto.MetricFamily{
Name: proto.String("name"),
Help: proto.String("two-line\n doc str\\ing"),
Type: dto.MetricType_COUNTER.Enum(),
Metric: []*dto.Metric{
&dto.Metric{
Label: []*dto.LabelPair{
&dto.LabelPair{
Name: proto.String("labelname"),
Value: proto.String("val1"),
},
&dto.LabelPair{
Name: proto.String("basename"),
Value: proto.String("basevalue"),
},
},
Counter: &dto.Counter{
Value: proto.Float64(math.NaN()),
},
},
&dto.Metric{
Label: []*dto.LabelPair{
&dto.LabelPair{
Name: proto.String("labelname"),
Value: proto.String("val2"),
},
&dto.LabelPair{
Name: proto.String("basename"),
Value: proto.String("base\"v\\al\nue"),
},
},
Counter: &dto.Counter{
Value: proto.Float64(.23),
},
TimestampMs: proto.Int64(1234567890),
},
},
},
&dto.MetricFamily{
Name: proto.String("name2"),
Help: proto.String("doc str\"ing 2"),
Type: dto.MetricType_GAUGE.Enum(),
Metric: []*dto.Metric{
&dto.Metric{
Label: []*dto.LabelPair{
&dto.LabelPair{
Name: proto.String("labelname"),
Value: proto.String("val2"),
},
&dto.LabelPair{
Name: proto.String("basename"),
Value: proto.String("basevalue2"),
},
},
Gauge: &dto.Gauge{
Value: proto.Float64(math.Inf(+1)),
},
TimestampMs: proto.Int64(54321),
},
&dto.Metric{
Label: []*dto.LabelPair{
&dto.LabelPair{
Name: proto.String("labelname"),
Value: proto.String("val1"),
},
},
Gauge: &dto.Gauge{
Value: proto.Float64(math.Inf(-1)),
},
},
},
},
},
},
// 3: The evil summary, mixed with other types and funny comments.
{
in: `
# TYPE my_summary summary
my_summary{n1="val1",quantile="0.5"} 110
decoy -1 -2
my_summary{n1="val1",quantile="0.9"} 140 1
my_summary_count{n1="val1"} 42
# Latest timestamp wins in case of a summary.
my_summary_sum{n1="val1"} 4711 2
fake_sum{n1="val1"} 2001
# TYPE another_summary summary
another_summary_count{n2="val2",n1="val1"} 20
my_summary_count{n2="val2",n1="val1"} 5 5
another_summary{n1="val1",n2="val2",quantile=".3"} -1.2
my_summary_sum{n1="val2"} 08 15
my_summary{n1="val3", quantile="0.2"} 4711
my_summary{n1="val1",n2="val2",quantile="-12.34",} NaN
# some
# funny comments
# HELP
# HELP
# HELP my_summary
# HELP my_summary
`,
out: []*dto.MetricFamily{
&dto.MetricFamily{
Name: proto.String("fake_sum"),
Type: dto.MetricType_UNTYPED.Enum(),
Metric: []*dto.Metric{
&dto.Metric{
Label: []*dto.LabelPair{
&dto.LabelPair{
Name: proto.String("n1"),
Value: proto.String("val1"),
},
},
Untyped: &dto.Untyped{
Value: proto.Float64(2001),
},
},
},
},
&dto.MetricFamily{
Name: proto.String("decoy"),
Type: dto.MetricType_UNTYPED.Enum(),
Metric: []*dto.Metric{
&dto.Metric{
Untyped: &dto.Untyped{
Value: proto.Float64(-1),
},
TimestampMs: proto.Int64(-2),
},
},
},
&dto.MetricFamily{
Name: proto.String("my_summary"),
Type: dto.MetricType_SUMMARY.Enum(),
Metric: []*dto.Metric{
&dto.Metric{
Label: []*dto.LabelPair{
&dto.LabelPair{
Name: proto.String("n1"),
Value: proto.String("val1"),
},
},
Summary: &dto.Summary{
SampleCount: proto.Uint64(42),
SampleSum: proto.Float64(4711),
Quantile: []*dto.Quantile{
&dto.Quantile{
Quantile: proto.Float64(0.5),
Value: proto.Float64(110),
},
&dto.Quantile{
Quantile: proto.Float64(0.9),
Value: proto.Float64(140),
},
},
},
TimestampMs: proto.Int64(2),
},
&dto.Metric{
Label: []*dto.LabelPair{
&dto.LabelPair{
Name: proto.String("n2"),
Value: proto.String("val2"),
},
&dto.LabelPair{
Name: proto.String("n1"),
Value: proto.String("val1"),
},
},
Summary: &dto.Summary{
SampleCount: proto.Uint64(5),
Quantile: []*dto.Quantile{
&dto.Quantile{
Quantile: proto.Float64(-12.34),
Value: proto.Float64(math.NaN()),
},
},
},
TimestampMs: proto.Int64(5),
},
&dto.Metric{
Label: []*dto.LabelPair{
&dto.LabelPair{
Name: proto.String("n1"),
Value: proto.String("val2"),
},
},
Summary: &dto.Summary{
SampleSum: proto.Float64(8),
},
TimestampMs: proto.Int64(15),
},
&dto.Metric{
Label: []*dto.LabelPair{
&dto.LabelPair{
Name: proto.String("n1"),
Value: proto.String("val3"),
},
},
Summary: &dto.Summary{
Quantile: []*dto.Quantile{
&dto.Quantile{
Quantile: proto.Float64(0.2),
Value: proto.Float64(4711),
},
},
},
},
},
},
&dto.MetricFamily{
Name: proto.String("another_summary"),
Type: dto.MetricType_SUMMARY.Enum(),
Metric: []*dto.Metric{
&dto.Metric{
Label: []*dto.LabelPair{
&dto.LabelPair{
Name: proto.String("n2"),
Value: proto.String("val2"),
},
&dto.LabelPair{
Name: proto.String("n1"),
Value: proto.String("val1"),
},
},
Summary: &dto.Summary{
SampleCount: proto.Uint64(20),
Quantile: []*dto.Quantile{
&dto.Quantile{
Quantile: proto.Float64(0.3),
Value: proto.Float64(-1.2),
},
},
},
},
},
},
},
},
// 4: The histogram.
{
in: `
# HELP request_duration_microseconds The response latency.
# TYPE request_duration_microseconds histogram
request_duration_microseconds_bucket{le="100"} 123
request_duration_microseconds_bucket{le="120"} 412
request_duration_microseconds_bucket{le="144"} 592
request_duration_microseconds_bucket{le="172.8"} 1524
request_duration_microseconds_bucket{le="+Inf"} 2693
request_duration_microseconds_sum 1.7560473e+06
request_duration_microseconds_count 2693
`,
out: []*dto.MetricFamily{
{
Name: proto.String("request_duration_microseconds"),
Help: proto.String("The response latency."),
Type: dto.MetricType_HISTOGRAM.Enum(),
Metric: []*dto.Metric{
&dto.Metric{
Histogram: &dto.Histogram{
SampleCount: proto.Uint64(2693),
SampleSum: proto.Float64(1756047.3),
Bucket: []*dto.Bucket{
&dto.Bucket{
UpperBound: proto.Float64(100),
CumulativeCount: proto.Uint64(123),
},
&dto.Bucket{
UpperBound: proto.Float64(120),
CumulativeCount: proto.Uint64(412),
},
&dto.Bucket{
UpperBound: proto.Float64(144),
CumulativeCount: proto.Uint64(592),
},
&dto.Bucket{
UpperBound: proto.Float64(172.8),
CumulativeCount: proto.Uint64(1524),
},
&dto.Bucket{
UpperBound: proto.Float64(math.Inf(+1)),
CumulativeCount: proto.Uint64(2693),
},
},
},
},
},
},
},
},
}
for i, scenario := range scenarios {
out, err := parser.TextToMetricFamilies(strings.NewReader(scenario.in))
if err != nil {
t.Errorf("%d. error: %s", i, err)
continue
}
if expected, got := len(scenario.out), len(out); expected != got {
t.Errorf(
"%d. expected %d MetricFamilies, got %d",
i, expected, got,
)
}
for _, expected := range scenario.out {
got, ok := out[expected.GetName()]
if !ok {
t.Errorf(
"%d. expected MetricFamily %q, found none",
i, expected.GetName(),
)
continue
}
if expected.String() != got.String() {
t.Errorf(
"%d. expected MetricFamily %s, got %s",
i, expected, got,
)
}
}
}
}
func TestParse(t *testing.T) {
testParse(t)
}
func BenchmarkParse(b *testing.B) {
for i := 0; i < b.N; i++ {
testParse(b)
}
}
func testParseError(t testing.TB) {
var scenarios = []struct {
in string
err string
}{
// 0: No new-line at end of input.
{
in: `bla 3.14`,
err: "EOF",
},
// 1: Invalid escape sequence in label value.
{
in: `metric{label="\t"} 3.14`,
err: "text format parsing error in line 1: invalid escape sequence",
},
// 2: Newline in label value.
{
in: `
metric{label="new
line"} 3.14
`,
err: `text format parsing error in line 2: label value "new" contains unescaped new-line`,
},
// 3:
{
in: `metric{@="bla"} 3.14`,
err: "text format parsing error in line 1: invalid label name for metric",
},
// 4:
{
in: `metric{__name__="bla"} 3.14`,
err: `text format parsing error in line 1: label name "__name__" is reserved`,
},
// 5:
{
in: `metric{label+="bla"} 3.14`,
err: "text format parsing error in line 1: expected '=' after label name",
},
// 6:
{
in: `metric{label=bla} 3.14`,
err: "text format parsing error in line 1: expected '\"' at start of label value",
},
// 7:
{
in: `
# TYPE metric summary
metric{quantile="bla"} 3.14
`,
err: "text format parsing error in line 3: expected float as value for 'quantile' label",
},
// 8:
{
in: `metric{label="bla"+} 3.14`,
err: "text format parsing error in line 1: unexpected end of label value",
},
// 9:
{
in: `metric{label="bla"} 3.14 2.72
`,
err: "text format parsing error in line 1: expected integer as timestamp",
},
// 10:
{
in: `metric{label="bla"} 3.14 2 3
`,
err: "text format parsing error in line 1: spurious string after timestamp",
},
// 11:
{
in: `metric{label="bla"} blubb
`,
err: "text format parsing error in line 1: expected float as value",
},
// 12:
{
in: `
# HELP metric one
# HELP metric two
`,
err: "text format parsing error in line 3: second HELP line for metric name",
},
// 13:
{
in: `
# TYPE metric counter
# TYPE metric untyped
`,
err: `text format parsing error in line 3: second TYPE line for metric name "metric", or TYPE reported after samples`,
},
// 14:
{
in: `
metric 4.12
# TYPE metric counter
`,
err: `text format parsing error in line 3: second TYPE line for metric name "metric", or TYPE reported after samples`,
},
// 14:
{
in: `
# TYPE metric bla
`,
err: "text format parsing error in line 2: unknown metric type",
},
// 15:
{
in: `
# TYPE met-ric
`,
err: "text format parsing error in line 2: invalid metric name in comment",
},
// 16:
{
in: `@invalidmetric{label="bla"} 3.14 2`,
err: "text format parsing error in line 1: invalid metric name",
},
// 17:
{
in: `{label="bla"} 3.14 2`,
err: "text format parsing error in line 1: invalid metric name",
},
// 18:
{
in: `
# TYPE metric histogram
metric_bucket{le="bla"} 3.14
`,
err: "text format parsing error in line 3: expected float as value for 'le' label",
},
}
for i, scenario := range scenarios {
_, err := parser.TextToMetricFamilies(strings.NewReader(scenario.in))
if err == nil {
t.Errorf("%d. expected error, got nil", i)
continue
}
if expected, got := scenario.err, err.Error(); strings.Index(got, expected) != 0 {
t.Errorf(
"%d. expected error starting with %q, got %q",
i, expected, got,
)
}
}
}
func TestParseError(t *testing.T) {
testParseError(t)
}
func BenchmarkParseError(b *testing.B) {
for i := 0; i < b.N; i++ {
testParseError(b)
}
}

View File

@ -1,43 +0,0 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package text
import (
"fmt"
"io"
"github.com/golang/protobuf/proto"
"github.com/matttproud/golang_protobuf_extensions/pbutil"
dto "github.com/prometheus/client_model/go"
)
// WriteProtoDelimited writes the MetricFamily to the writer in delimited
// protobuf format and returns the number of bytes written and any error
// encountered.
func WriteProtoDelimited(w io.Writer, p *dto.MetricFamily) (int, error) {
return pbutil.WriteDelimited(w, p)
}
// WriteProtoText writes the MetricFamily to the writer in text format and
// returns the number of bytes written and any error encountered.
func WriteProtoText(w io.Writer, p *dto.MetricFamily) (int, error) {
return fmt.Fprintf(w, "%s\n", proto.MarshalTextString(p))
}
// WriteProtoCompactText writes the MetricFamily to the writer in compact text
// format and returns the number of bytes written and any error encountered.
func WriteProtoCompactText(w io.Writer, p *dto.MetricFamily) (int, error) {
return fmt.Fprintf(w, "%s\n", p)
}

BIN
text/testdata/protobuf vendored

Binary file not shown.

Binary file not shown.

322
text/testdata/text vendored
View File

@ -1,322 +0,0 @@
# HELP http_request_duration_microseconds The HTTP request latencies in microseconds.
# TYPE http_request_duration_microseconds summary
http_request_duration_microseconds{handler="/",quantile="0.5"} 0
http_request_duration_microseconds{handler="/",quantile="0.9"} 0
http_request_duration_microseconds{handler="/",quantile="0.99"} 0
http_request_duration_microseconds_sum{handler="/"} 0
http_request_duration_microseconds_count{handler="/"} 0
http_request_duration_microseconds{handler="/alerts",quantile="0.5"} 0
http_request_duration_microseconds{handler="/alerts",quantile="0.9"} 0
http_request_duration_microseconds{handler="/alerts",quantile="0.99"} 0
http_request_duration_microseconds_sum{handler="/alerts"} 0
http_request_duration_microseconds_count{handler="/alerts"} 0
http_request_duration_microseconds{handler="/api/metrics",quantile="0.5"} 0
http_request_duration_microseconds{handler="/api/metrics",quantile="0.9"} 0
http_request_duration_microseconds{handler="/api/metrics",quantile="0.99"} 0
http_request_duration_microseconds_sum{handler="/api/metrics"} 0
http_request_duration_microseconds_count{handler="/api/metrics"} 0
http_request_duration_microseconds{handler="/api/query",quantile="0.5"} 0
http_request_duration_microseconds{handler="/api/query",quantile="0.9"} 0
http_request_duration_microseconds{handler="/api/query",quantile="0.99"} 0
http_request_duration_microseconds_sum{handler="/api/query"} 0
http_request_duration_microseconds_count{handler="/api/query"} 0
http_request_duration_microseconds{handler="/api/query_range",quantile="0.5"} 0
http_request_duration_microseconds{handler="/api/query_range",quantile="0.9"} 0
http_request_duration_microseconds{handler="/api/query_range",quantile="0.99"} 0
http_request_duration_microseconds_sum{handler="/api/query_range"} 0
http_request_duration_microseconds_count{handler="/api/query_range"} 0
http_request_duration_microseconds{handler="/api/targets",quantile="0.5"} 0
http_request_duration_microseconds{handler="/api/targets",quantile="0.9"} 0
http_request_duration_microseconds{handler="/api/targets",quantile="0.99"} 0
http_request_duration_microseconds_sum{handler="/api/targets"} 0
http_request_duration_microseconds_count{handler="/api/targets"} 0
http_request_duration_microseconds{handler="/consoles/",quantile="0.5"} 0
http_request_duration_microseconds{handler="/consoles/",quantile="0.9"} 0
http_request_duration_microseconds{handler="/consoles/",quantile="0.99"} 0
http_request_duration_microseconds_sum{handler="/consoles/"} 0
http_request_duration_microseconds_count{handler="/consoles/"} 0
http_request_duration_microseconds{handler="/graph",quantile="0.5"} 0
http_request_duration_microseconds{handler="/graph",quantile="0.9"} 0
http_request_duration_microseconds{handler="/graph",quantile="0.99"} 0
http_request_duration_microseconds_sum{handler="/graph"} 0
http_request_duration_microseconds_count{handler="/graph"} 0
http_request_duration_microseconds{handler="/heap",quantile="0.5"} 0
http_request_duration_microseconds{handler="/heap",quantile="0.9"} 0
http_request_duration_microseconds{handler="/heap",quantile="0.99"} 0
http_request_duration_microseconds_sum{handler="/heap"} 0
http_request_duration_microseconds_count{handler="/heap"} 0
http_request_duration_microseconds{handler="/static/",quantile="0.5"} 0
http_request_duration_microseconds{handler="/static/",quantile="0.9"} 0
http_request_duration_microseconds{handler="/static/",quantile="0.99"} 0
http_request_duration_microseconds_sum{handler="/static/"} 0
http_request_duration_microseconds_count{handler="/static/"} 0
http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 1307.275
http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 1858.632
http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 3087.384
http_request_duration_microseconds_sum{handler="prometheus"} 179886.5000000001
http_request_duration_microseconds_count{handler="prometheus"} 119
# HELP http_request_size_bytes The HTTP request sizes in bytes.
# TYPE http_request_size_bytes summary
http_request_size_bytes{handler="/",quantile="0.5"} 0
http_request_size_bytes{handler="/",quantile="0.9"} 0
http_request_size_bytes{handler="/",quantile="0.99"} 0
http_request_size_bytes_sum{handler="/"} 0
http_request_size_bytes_count{handler="/"} 0
http_request_size_bytes{handler="/alerts",quantile="0.5"} 0
http_request_size_bytes{handler="/alerts",quantile="0.9"} 0
http_request_size_bytes{handler="/alerts",quantile="0.99"} 0
http_request_size_bytes_sum{handler="/alerts"} 0
http_request_size_bytes_count{handler="/alerts"} 0
http_request_size_bytes{handler="/api/metrics",quantile="0.5"} 0
http_request_size_bytes{handler="/api/metrics",quantile="0.9"} 0
http_request_size_bytes{handler="/api/metrics",quantile="0.99"} 0
http_request_size_bytes_sum{handler="/api/metrics"} 0
http_request_size_bytes_count{handler="/api/metrics"} 0
http_request_size_bytes{handler="/api/query",quantile="0.5"} 0
http_request_size_bytes{handler="/api/query",quantile="0.9"} 0
http_request_size_bytes{handler="/api/query",quantile="0.99"} 0
http_request_size_bytes_sum{handler="/api/query"} 0
http_request_size_bytes_count{handler="/api/query"} 0
http_request_size_bytes{handler="/api/query_range",quantile="0.5"} 0
http_request_size_bytes{handler="/api/query_range",quantile="0.9"} 0
http_request_size_bytes{handler="/api/query_range",quantile="0.99"} 0
http_request_size_bytes_sum{handler="/api/query_range"} 0
http_request_size_bytes_count{handler="/api/query_range"} 0
http_request_size_bytes{handler="/api/targets",quantile="0.5"} 0
http_request_size_bytes{handler="/api/targets",quantile="0.9"} 0
http_request_size_bytes{handler="/api/targets",quantile="0.99"} 0
http_request_size_bytes_sum{handler="/api/targets"} 0
http_request_size_bytes_count{handler="/api/targets"} 0
http_request_size_bytes{handler="/consoles/",quantile="0.5"} 0
http_request_size_bytes{handler="/consoles/",quantile="0.9"} 0
http_request_size_bytes{handler="/consoles/",quantile="0.99"} 0
http_request_size_bytes_sum{handler="/consoles/"} 0
http_request_size_bytes_count{handler="/consoles/"} 0
http_request_size_bytes{handler="/graph",quantile="0.5"} 0
http_request_size_bytes{handler="/graph",quantile="0.9"} 0
http_request_size_bytes{handler="/graph",quantile="0.99"} 0
http_request_size_bytes_sum{handler="/graph"} 0
http_request_size_bytes_count{handler="/graph"} 0
http_request_size_bytes{handler="/heap",quantile="0.5"} 0
http_request_size_bytes{handler="/heap",quantile="0.9"} 0
http_request_size_bytes{handler="/heap",quantile="0.99"} 0
http_request_size_bytes_sum{handler="/heap"} 0
http_request_size_bytes_count{handler="/heap"} 0
http_request_size_bytes{handler="/static/",quantile="0.5"} 0
http_request_size_bytes{handler="/static/",quantile="0.9"} 0
http_request_size_bytes{handler="/static/",quantile="0.99"} 0
http_request_size_bytes_sum{handler="/static/"} 0
http_request_size_bytes_count{handler="/static/"} 0
http_request_size_bytes{handler="prometheus",quantile="0.5"} 291
http_request_size_bytes{handler="prometheus",quantile="0.9"} 291
http_request_size_bytes{handler="prometheus",quantile="0.99"} 291
http_request_size_bytes_sum{handler="prometheus"} 34488
http_request_size_bytes_count{handler="prometheus"} 119
# HELP http_requests_total Total number of HTTP requests made.
# TYPE http_requests_total counter
http_requests_total{code="200",handler="prometheus",method="get"} 119
# HELP http_response_size_bytes The HTTP response sizes in bytes.
# TYPE http_response_size_bytes summary
http_response_size_bytes{handler="/",quantile="0.5"} 0
http_response_size_bytes{handler="/",quantile="0.9"} 0
http_response_size_bytes{handler="/",quantile="0.99"} 0
http_response_size_bytes_sum{handler="/"} 0
http_response_size_bytes_count{handler="/"} 0
http_response_size_bytes{handler="/alerts",quantile="0.5"} 0
http_response_size_bytes{handler="/alerts",quantile="0.9"} 0
http_response_size_bytes{handler="/alerts",quantile="0.99"} 0
http_response_size_bytes_sum{handler="/alerts"} 0
http_response_size_bytes_count{handler="/alerts"} 0
http_response_size_bytes{handler="/api/metrics",quantile="0.5"} 0
http_response_size_bytes{handler="/api/metrics",quantile="0.9"} 0
http_response_size_bytes{handler="/api/metrics",quantile="0.99"} 0
http_response_size_bytes_sum{handler="/api/metrics"} 0
http_response_size_bytes_count{handler="/api/metrics"} 0
http_response_size_bytes{handler="/api/query",quantile="0.5"} 0
http_response_size_bytes{handler="/api/query",quantile="0.9"} 0
http_response_size_bytes{handler="/api/query",quantile="0.99"} 0
http_response_size_bytes_sum{handler="/api/query"} 0
http_response_size_bytes_count{handler="/api/query"} 0
http_response_size_bytes{handler="/api/query_range",quantile="0.5"} 0
http_response_size_bytes{handler="/api/query_range",quantile="0.9"} 0
http_response_size_bytes{handler="/api/query_range",quantile="0.99"} 0
http_response_size_bytes_sum{handler="/api/query_range"} 0
http_response_size_bytes_count{handler="/api/query_range"} 0
http_response_size_bytes{handler="/api/targets",quantile="0.5"} 0
http_response_size_bytes{handler="/api/targets",quantile="0.9"} 0
http_response_size_bytes{handler="/api/targets",quantile="0.99"} 0
http_response_size_bytes_sum{handler="/api/targets"} 0
http_response_size_bytes_count{handler="/api/targets"} 0
http_response_size_bytes{handler="/consoles/",quantile="0.5"} 0
http_response_size_bytes{handler="/consoles/",quantile="0.9"} 0
http_response_size_bytes{handler="/consoles/",quantile="0.99"} 0
http_response_size_bytes_sum{handler="/consoles/"} 0
http_response_size_bytes_count{handler="/consoles/"} 0
http_response_size_bytes{handler="/graph",quantile="0.5"} 0
http_response_size_bytes{handler="/graph",quantile="0.9"} 0
http_response_size_bytes{handler="/graph",quantile="0.99"} 0
http_response_size_bytes_sum{handler="/graph"} 0
http_response_size_bytes_count{handler="/graph"} 0
http_response_size_bytes{handler="/heap",quantile="0.5"} 0
http_response_size_bytes{handler="/heap",quantile="0.9"} 0
http_response_size_bytes{handler="/heap",quantile="0.99"} 0
http_response_size_bytes_sum{handler="/heap"} 0
http_response_size_bytes_count{handler="/heap"} 0
http_response_size_bytes{handler="/static/",quantile="0.5"} 0
http_response_size_bytes{handler="/static/",quantile="0.9"} 0
http_response_size_bytes{handler="/static/",quantile="0.99"} 0
http_response_size_bytes_sum{handler="/static/"} 0
http_response_size_bytes_count{handler="/static/"} 0
http_response_size_bytes{handler="prometheus",quantile="0.5"} 2049
http_response_size_bytes{handler="prometheus",quantile="0.9"} 2058
http_response_size_bytes{handler="prometheus",quantile="0.99"} 2064
http_response_size_bytes_sum{handler="prometheus"} 247001
http_response_size_bytes_count{handler="prometheus"} 119
# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.
# TYPE process_cpu_seconds_total counter
process_cpu_seconds_total 0.55
# HELP go_goroutines Number of goroutines that currently exist.
# TYPE go_goroutines gauge
go_goroutines 70
# HELP process_max_fds Maximum number of open file descriptors.
# TYPE process_max_fds gauge
process_max_fds 8192
# HELP process_open_fds Number of open file descriptors.
# TYPE process_open_fds gauge
process_open_fds 29
# HELP process_resident_memory_bytes Resident memory size in bytes.
# TYPE process_resident_memory_bytes gauge
process_resident_memory_bytes 5.3870592e+07
# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.
# TYPE process_start_time_seconds gauge
process_start_time_seconds 1.42236894836e+09
# HELP process_virtual_memory_bytes Virtual memory size in bytes.
# TYPE process_virtual_memory_bytes gauge
process_virtual_memory_bytes 5.41478912e+08
# HELP prometheus_dns_sd_lookup_failures_total The number of DNS-SD lookup failures.
# TYPE prometheus_dns_sd_lookup_failures_total counter
prometheus_dns_sd_lookup_failures_total 0
# HELP prometheus_dns_sd_lookups_total The number of DNS-SD lookups.
# TYPE prometheus_dns_sd_lookups_total counter
prometheus_dns_sd_lookups_total 7
# HELP prometheus_evaluator_duration_milliseconds The duration for all evaluations to execute.
# TYPE prometheus_evaluator_duration_milliseconds summary
prometheus_evaluator_duration_milliseconds{quantile="0.01"} 0
prometheus_evaluator_duration_milliseconds{quantile="0.05"} 0
prometheus_evaluator_duration_milliseconds{quantile="0.5"} 0
prometheus_evaluator_duration_milliseconds{quantile="0.9"} 1
prometheus_evaluator_duration_milliseconds{quantile="0.99"} 1
prometheus_evaluator_duration_milliseconds_sum 12
prometheus_evaluator_duration_milliseconds_count 23
# HELP prometheus_local_storage_checkpoint_duration_milliseconds The duration (in milliseconds) it took to checkpoint in-memory metrics and head chunks.
# TYPE prometheus_local_storage_checkpoint_duration_milliseconds gauge
prometheus_local_storage_checkpoint_duration_milliseconds 0
# HELP prometheus_local_storage_chunk_ops_total The total number of chunk operations by their type.
# TYPE prometheus_local_storage_chunk_ops_total counter
prometheus_local_storage_chunk_ops_total{type="create"} 598
prometheus_local_storage_chunk_ops_total{type="persist"} 174
prometheus_local_storage_chunk_ops_total{type="pin"} 920
prometheus_local_storage_chunk_ops_total{type="transcode"} 415
prometheus_local_storage_chunk_ops_total{type="unpin"} 920
# HELP prometheus_local_storage_indexing_batch_latency_milliseconds Quantiles for batch indexing latencies in milliseconds.
# TYPE prometheus_local_storage_indexing_batch_latency_milliseconds summary
prometheus_local_storage_indexing_batch_latency_milliseconds{quantile="0.5"} 0
prometheus_local_storage_indexing_batch_latency_milliseconds{quantile="0.9"} 0
prometheus_local_storage_indexing_batch_latency_milliseconds{quantile="0.99"} 0
prometheus_local_storage_indexing_batch_latency_milliseconds_sum 0
prometheus_local_storage_indexing_batch_latency_milliseconds_count 1
# HELP prometheus_local_storage_indexing_batch_sizes Quantiles for indexing batch sizes (number of metrics per batch).
# TYPE prometheus_local_storage_indexing_batch_sizes summary
prometheus_local_storage_indexing_batch_sizes{quantile="0.5"} 2
prometheus_local_storage_indexing_batch_sizes{quantile="0.9"} 2
prometheus_local_storage_indexing_batch_sizes{quantile="0.99"} 2
prometheus_local_storage_indexing_batch_sizes_sum 2
prometheus_local_storage_indexing_batch_sizes_count 1
# HELP prometheus_local_storage_indexing_queue_capacity The capacity of the indexing queue.
# TYPE prometheus_local_storage_indexing_queue_capacity gauge
prometheus_local_storage_indexing_queue_capacity 16384
# HELP prometheus_local_storage_indexing_queue_length The number of metrics waiting to be indexed.
# TYPE prometheus_local_storage_indexing_queue_length gauge
prometheus_local_storage_indexing_queue_length 0
# HELP prometheus_local_storage_ingested_samples_total The total number of samples ingested.
# TYPE prometheus_local_storage_ingested_samples_total counter
prometheus_local_storage_ingested_samples_total 30473
# HELP prometheus_local_storage_invalid_preload_requests_total The total number of preload requests referring to a non-existent series. This is an indication of outdated label indexes.
# TYPE prometheus_local_storage_invalid_preload_requests_total counter
prometheus_local_storage_invalid_preload_requests_total 0
# HELP prometheus_local_storage_memory_chunkdescs The current number of chunk descriptors in memory.
# TYPE prometheus_local_storage_memory_chunkdescs gauge
prometheus_local_storage_memory_chunkdescs 1059
# HELP prometheus_local_storage_memory_chunks The current number of chunks in memory, excluding cloned chunks (i.e. chunks without a descriptor).
# TYPE prometheus_local_storage_memory_chunks gauge
prometheus_local_storage_memory_chunks 1020
# HELP prometheus_local_storage_memory_series The current number of series in memory.
# TYPE prometheus_local_storage_memory_series gauge
prometheus_local_storage_memory_series 424
# HELP prometheus_local_storage_persist_latency_microseconds A summary of latencies for persisting each chunk.
# TYPE prometheus_local_storage_persist_latency_microseconds summary
prometheus_local_storage_persist_latency_microseconds{quantile="0.5"} 30.377
prometheus_local_storage_persist_latency_microseconds{quantile="0.9"} 203.539
prometheus_local_storage_persist_latency_microseconds{quantile="0.99"} 2626.463
prometheus_local_storage_persist_latency_microseconds_sum 20424.415
prometheus_local_storage_persist_latency_microseconds_count 174
# HELP prometheus_local_storage_persist_queue_capacity The total capacity of the persist queue.
# TYPE prometheus_local_storage_persist_queue_capacity gauge
prometheus_local_storage_persist_queue_capacity 1024
# HELP prometheus_local_storage_persist_queue_length The current number of chunks waiting in the persist queue.
# TYPE prometheus_local_storage_persist_queue_length gauge
prometheus_local_storage_persist_queue_length 0
# HELP prometheus_local_storage_series_ops_total The total number of series operations by their type.
# TYPE prometheus_local_storage_series_ops_total counter
prometheus_local_storage_series_ops_total{type="create"} 2
prometheus_local_storage_series_ops_total{type="maintenance_in_memory"} 11
# HELP prometheus_notifications_latency_milliseconds Latency quantiles for sending alert notifications (not including dropped notifications).
# TYPE prometheus_notifications_latency_milliseconds summary
prometheus_notifications_latency_milliseconds{quantile="0.5"} 0
prometheus_notifications_latency_milliseconds{quantile="0.9"} 0
prometheus_notifications_latency_milliseconds{quantile="0.99"} 0
prometheus_notifications_latency_milliseconds_sum 0
prometheus_notifications_latency_milliseconds_count 0
# HELP prometheus_notifications_queue_capacity The capacity of the alert notifications queue.
# TYPE prometheus_notifications_queue_capacity gauge
prometheus_notifications_queue_capacity 100
# HELP prometheus_notifications_queue_length The number of alert notifications in the queue.
# TYPE prometheus_notifications_queue_length gauge
prometheus_notifications_queue_length 0
# HELP prometheus_rule_evaluation_duration_milliseconds The duration for a rule to execute.
# TYPE prometheus_rule_evaluation_duration_milliseconds summary
prometheus_rule_evaluation_duration_milliseconds{rule_type="alerting",quantile="0.5"} 0
prometheus_rule_evaluation_duration_milliseconds{rule_type="alerting",quantile="0.9"} 0
prometheus_rule_evaluation_duration_milliseconds{rule_type="alerting",quantile="0.99"} 2
prometheus_rule_evaluation_duration_milliseconds_sum{rule_type="alerting"} 12
prometheus_rule_evaluation_duration_milliseconds_count{rule_type="alerting"} 115
prometheus_rule_evaluation_duration_milliseconds{rule_type="recording",quantile="0.5"} 0
prometheus_rule_evaluation_duration_milliseconds{rule_type="recording",quantile="0.9"} 0
prometheus_rule_evaluation_duration_milliseconds{rule_type="recording",quantile="0.99"} 3
prometheus_rule_evaluation_duration_milliseconds_sum{rule_type="recording"} 15
prometheus_rule_evaluation_duration_milliseconds_count{rule_type="recording"} 115
# HELP prometheus_rule_evaluation_failures_total The total number of rule evaluation failures.
# TYPE prometheus_rule_evaluation_failures_total counter
prometheus_rule_evaluation_failures_total 0
# HELP prometheus_samples_queue_capacity Capacity of the queue for unwritten samples.
# TYPE prometheus_samples_queue_capacity gauge
prometheus_samples_queue_capacity 4096
# HELP prometheus_samples_queue_length Current number of items in the queue for unwritten samples. Each item comprises all samples exposed by one target as one metric family (i.e. metrics of the same name).
# TYPE prometheus_samples_queue_length gauge
prometheus_samples_queue_length 0
# HELP prometheus_target_interval_length_seconds Actual intervals between scrapes.
# TYPE prometheus_target_interval_length_seconds summary
prometheus_target_interval_length_seconds{interval="15s",quantile="0.01"} 14
prometheus_target_interval_length_seconds{interval="15s",quantile="0.05"} 14
prometheus_target_interval_length_seconds{interval="15s",quantile="0.5"} 15
prometheus_target_interval_length_seconds{interval="15s",quantile="0.9"} 15
prometheus_target_interval_length_seconds{interval="15s",quantile="0.99"} 15
prometheus_target_interval_length_seconds_sum{interval="15s"} 175
prometheus_target_interval_length_seconds_count{interval="15s"} 12
prometheus_target_interval_length_seconds{interval="1s",quantile="0.01"} 0
prometheus_target_interval_length_seconds{interval="1s",quantile="0.05"} 0
prometheus_target_interval_length_seconds{interval="1s",quantile="0.5"} 0
prometheus_target_interval_length_seconds{interval="1s",quantile="0.9"} 1
prometheus_target_interval_length_seconds{interval="1s",quantile="0.99"} 1
prometheus_target_interval_length_seconds_sum{interval="1s"} 55
prometheus_target_interval_length_seconds_count{interval="1s"} 117

BIN
text/testdata/text.gz vendored

Binary file not shown.