2017-04-20 00:24:14 +03:00
|
|
|
// Copyright 2017 The Prometheus Authors
|
2015-08-19 16:33:59 +03:00
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2017-04-20 00:24:14 +03:00
|
|
|
// Package v1 provides bindings to the Prometheus HTTP API v1:
|
2015-08-19 16:33:59 +03:00
|
|
|
// http://prometheus.io/docs/querying/api/
|
2017-04-20 00:24:14 +03:00
|
|
|
package v1
|
2015-08-19 16:33:59 +03:00
|
|
|
|
|
|
|
import (
|
2017-04-20 15:57:46 +03:00
|
|
|
"context"
|
2019-01-16 00:34:51 +03:00
|
|
|
"errors"
|
2015-08-19 16:33:59 +03:00
|
|
|
"fmt"
|
2019-05-28 14:45:06 +03:00
|
|
|
"math"
|
2015-08-19 16:33:59 +03:00
|
|
|
"net/http"
|
2019-12-10 23:16:44 +03:00
|
|
|
"net/url"
|
2015-08-19 16:33:59 +03:00
|
|
|
"strconv"
|
2019-12-10 23:16:44 +03:00
|
|
|
"strings"
|
2015-08-19 16:33:59 +03:00
|
|
|
"time"
|
2019-05-28 14:45:06 +03:00
|
|
|
"unsafe"
|
|
|
|
|
|
|
|
json "github.com/json-iterator/go"
|
2015-08-19 16:33:59 +03:00
|
|
|
|
2017-04-20 00:24:14 +03:00
|
|
|
"github.com/prometheus/common/model"
|
2019-05-28 14:45:06 +03:00
|
|
|
|
|
|
|
"github.com/prometheus/client_golang/api"
|
2015-08-19 16:33:59 +03:00
|
|
|
)
|
|
|
|
|
2019-05-28 14:45:06 +03:00
|
|
|
func init() {
|
2023-02-24 00:34:07 +03:00
|
|
|
json.RegisterTypeEncoderFunc("model.SamplePair", marshalSamplePairJSON, marshalJSONIsEmpty)
|
|
|
|
json.RegisterTypeDecoderFunc("model.SamplePair", unmarshalSamplePairJSON)
|
|
|
|
json.RegisterTypeEncoderFunc("model.SampleHistogramPair", marshalSampleHistogramPairJSON, marshalJSONIsEmpty)
|
|
|
|
json.RegisterTypeDecoderFunc("model.SampleHistogramPair", unmarshalSampleHistogramPairJSON)
|
|
|
|
json.RegisterTypeEncoderFunc("model.SampleStream", marshalSampleStreamJSON, marshalJSONIsEmpty) // Only needed for benchmark.
|
|
|
|
json.RegisterTypeDecoderFunc("model.SampleStream", unmarshalSampleStreamJSON) // Only needed for benchmark.
|
2019-05-28 14:45:06 +03:00
|
|
|
}
|
|
|
|
|
2023-02-24 00:34:07 +03:00
|
|
|
func unmarshalSamplePairJSON(ptr unsafe.Pointer, iter *json.Iterator) {
|
2019-05-28 14:45:06 +03:00
|
|
|
p := (*model.SamplePair)(ptr)
|
|
|
|
if !iter.ReadArray() {
|
|
|
|
iter.ReportError("unmarshal model.SamplePair", "SamplePair must be [timestamp, value]")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
t := iter.ReadNumber()
|
|
|
|
if err := p.Timestamp.UnmarshalJSON([]byte(t)); err != nil {
|
|
|
|
iter.ReportError("unmarshal model.SamplePair", err.Error())
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if !iter.ReadArray() {
|
|
|
|
iter.ReportError("unmarshal model.SamplePair", "SamplePair missing value")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
f, err := strconv.ParseFloat(iter.ReadString(), 64)
|
|
|
|
if err != nil {
|
|
|
|
iter.ReportError("unmarshal model.SamplePair", err.Error())
|
|
|
|
return
|
|
|
|
}
|
|
|
|
p.Value = model.SampleValue(f)
|
|
|
|
|
|
|
|
if iter.ReadArray() {
|
|
|
|
iter.ReportError("unmarshal model.SamplePair", "SamplePair has too many values, must be [timestamp, value]")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-24 00:34:07 +03:00
|
|
|
func marshalSamplePairJSON(ptr unsafe.Pointer, stream *json.Stream) {
|
2019-05-28 14:45:06 +03:00
|
|
|
p := *((*model.SamplePair)(ptr))
|
|
|
|
stream.WriteArrayStart()
|
2023-02-24 00:34:07 +03:00
|
|
|
marshalTimestamp(p.Timestamp, stream)
|
|
|
|
stream.WriteMore()
|
|
|
|
marshalFloat(float64(p.Value), stream)
|
|
|
|
stream.WriteArrayEnd()
|
|
|
|
}
|
|
|
|
|
|
|
|
func unmarshalSampleHistogramPairJSON(ptr unsafe.Pointer, iter *json.Iterator) {
|
|
|
|
p := (*model.SampleHistogramPair)(ptr)
|
|
|
|
if !iter.ReadArray() {
|
|
|
|
iter.ReportError("unmarshal model.SampleHistogramPair", "SampleHistogramPair must be [timestamp, {histogram}]")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
t := iter.ReadNumber()
|
|
|
|
if err := p.Timestamp.UnmarshalJSON([]byte(t)); err != nil {
|
|
|
|
iter.ReportError("unmarshal model.SampleHistogramPair", err.Error())
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if !iter.ReadArray() {
|
|
|
|
iter.ReportError("unmarshal model.SampleHistogramPair", "SamplePair missing histogram")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
h := &model.SampleHistogram{}
|
|
|
|
p.Histogram = h
|
|
|
|
for key := iter.ReadObject(); key != ""; key = iter.ReadObject() {
|
|
|
|
switch key {
|
|
|
|
case "count":
|
|
|
|
f, err := strconv.ParseFloat(iter.ReadString(), 64)
|
|
|
|
if err != nil {
|
|
|
|
iter.ReportError("unmarshal model.SampleHistogramPair", "count of histogram is not a float")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
h.Count = model.FloatString(f)
|
|
|
|
case "sum":
|
|
|
|
f, err := strconv.ParseFloat(iter.ReadString(), 64)
|
|
|
|
if err != nil {
|
|
|
|
iter.ReportError("unmarshal model.SampleHistogramPair", "sum of histogram is not a float")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
h.Sum = model.FloatString(f)
|
|
|
|
case "buckets":
|
|
|
|
for iter.ReadArray() {
|
|
|
|
b, err := unmarshalHistogramBucket(iter)
|
|
|
|
if err != nil {
|
|
|
|
iter.ReportError("unmarshal model.HistogramBucket", err.Error())
|
|
|
|
return
|
|
|
|
}
|
|
|
|
h.Buckets = append(h.Buckets, b)
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
iter.ReportError("unmarshal model.SampleHistogramPair", fmt.Sprint("unexpected key in histogram:", key))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if iter.ReadArray() {
|
|
|
|
iter.ReportError("unmarshal model.SampleHistogramPair", "SampleHistogramPair has too many values, must be [timestamp, {histogram}]")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func marshalSampleHistogramPairJSON(ptr unsafe.Pointer, stream *json.Stream) {
|
|
|
|
p := *((*model.SampleHistogramPair)(ptr))
|
|
|
|
stream.WriteArrayStart()
|
|
|
|
marshalTimestamp(p.Timestamp, stream)
|
|
|
|
stream.WriteMore()
|
|
|
|
marshalHistogram(*p.Histogram, stream)
|
|
|
|
stream.WriteArrayEnd()
|
|
|
|
}
|
|
|
|
|
|
|
|
func unmarshalSampleStreamJSON(ptr unsafe.Pointer, iter *json.Iterator) {
|
|
|
|
ss := (*model.SampleStream)(ptr)
|
|
|
|
for key := iter.ReadObject(); key != ""; key = iter.ReadObject() {
|
|
|
|
switch key {
|
|
|
|
case "metric":
|
|
|
|
metricString := iter.ReadAny().ToString()
|
|
|
|
if err := json.UnmarshalFromString(metricString, &ss.Metric); err != nil {
|
|
|
|
iter.ReportError("unmarshal model.SampleStream", err.Error())
|
|
|
|
return
|
|
|
|
}
|
|
|
|
case "values":
|
|
|
|
for iter.ReadArray() {
|
|
|
|
v := model.SamplePair{}
|
|
|
|
unmarshalSamplePairJSON(unsafe.Pointer(&v), iter)
|
|
|
|
ss.Values = append(ss.Values, v)
|
|
|
|
}
|
|
|
|
case "histograms":
|
|
|
|
for iter.ReadArray() {
|
|
|
|
h := model.SampleHistogramPair{}
|
|
|
|
unmarshalSampleHistogramPairJSON(unsafe.Pointer(&h), iter)
|
|
|
|
ss.Histograms = append(ss.Histograms, h)
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
iter.ReportError("unmarshal model.SampleStream", fmt.Sprint("unexpected key:", key))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func marshalSampleStreamJSON(ptr unsafe.Pointer, stream *json.Stream) {
|
|
|
|
ss := *((*model.SampleStream)(ptr))
|
|
|
|
stream.WriteObjectStart()
|
|
|
|
stream.WriteObjectField(`metric`)
|
|
|
|
m, err := json.ConfigCompatibleWithStandardLibrary.Marshal(ss.Metric)
|
|
|
|
if err != nil {
|
|
|
|
stream.Error = err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
stream.SetBuffer(append(stream.Buffer(), m...))
|
|
|
|
if len(ss.Values) > 0 {
|
|
|
|
stream.WriteMore()
|
|
|
|
stream.WriteObjectField(`values`)
|
|
|
|
stream.WriteArrayStart()
|
|
|
|
for i, v := range ss.Values {
|
|
|
|
if i > 0 {
|
|
|
|
stream.WriteMore()
|
|
|
|
}
|
|
|
|
marshalSamplePairJSON(unsafe.Pointer(&v), stream)
|
|
|
|
}
|
|
|
|
stream.WriteArrayEnd()
|
|
|
|
}
|
|
|
|
if len(ss.Histograms) > 0 {
|
|
|
|
stream.WriteMore()
|
|
|
|
stream.WriteObjectField(`histograms`)
|
|
|
|
stream.WriteArrayStart()
|
|
|
|
for i, h := range ss.Histograms {
|
|
|
|
if i > 0 {
|
|
|
|
stream.WriteMore()
|
|
|
|
}
|
|
|
|
marshalSampleHistogramPairJSON(unsafe.Pointer(&h), stream)
|
|
|
|
}
|
|
|
|
stream.WriteArrayEnd()
|
|
|
|
}
|
|
|
|
stream.WriteObjectEnd()
|
|
|
|
}
|
|
|
|
|
|
|
|
func marshalFloat(v float64, stream *json.Stream) {
|
|
|
|
stream.WriteRaw(`"`)
|
|
|
|
// Taken from https://github.com/json-iterator/go/blob/master/stream_float.go#L71 as a workaround
|
|
|
|
// to https://github.com/json-iterator/go/issues/365 (json-iterator, to follow json standard, doesn't allow inf/nan).
|
|
|
|
buf := stream.Buffer()
|
|
|
|
abs := math.Abs(v)
|
|
|
|
fmt := byte('f')
|
|
|
|
// Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right.
|
|
|
|
if abs != 0 {
|
|
|
|
if abs < 1e-6 || abs >= 1e21 {
|
|
|
|
fmt = 'e'
|
|
|
|
}
|
|
|
|
}
|
|
|
|
buf = strconv.AppendFloat(buf, v, fmt, -1, 64)
|
|
|
|
stream.SetBuffer(buf)
|
|
|
|
stream.WriteRaw(`"`)
|
|
|
|
}
|
|
|
|
|
|
|
|
func marshalTimestamp(timestamp model.Time, stream *json.Stream) {
|
|
|
|
t := int64(timestamp)
|
2019-05-28 14:45:06 +03:00
|
|
|
// Write out the timestamp as a float divided by 1000.
|
|
|
|
// This is ~3x faster than converting to a float.
|
|
|
|
if t < 0 {
|
|
|
|
stream.WriteRaw(`-`)
|
|
|
|
t = -t
|
|
|
|
}
|
|
|
|
stream.WriteInt64(t / 1000)
|
|
|
|
fraction := t % 1000
|
|
|
|
if fraction != 0 {
|
|
|
|
stream.WriteRaw(`.`)
|
|
|
|
if fraction < 100 {
|
|
|
|
stream.WriteRaw(`0`)
|
|
|
|
}
|
|
|
|
if fraction < 10 {
|
|
|
|
stream.WriteRaw(`0`)
|
|
|
|
}
|
|
|
|
stream.WriteInt64(fraction)
|
|
|
|
}
|
2023-02-24 00:34:07 +03:00
|
|
|
}
|
2019-05-28 14:45:06 +03:00
|
|
|
|
2023-02-24 00:34:07 +03:00
|
|
|
func unmarshalHistogramBucket(iter *json.Iterator) (*model.HistogramBucket, error) {
|
|
|
|
b := model.HistogramBucket{}
|
|
|
|
if !iter.ReadArray() {
|
|
|
|
return nil, errors.New("HistogramBucket must be [boundaries, lower, upper, count]")
|
2019-05-28 14:45:06 +03:00
|
|
|
}
|
2023-02-24 00:34:07 +03:00
|
|
|
boundaries, err := iter.ReadNumber().Int64()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
b.Boundaries = int32(boundaries)
|
|
|
|
if !iter.ReadArray() {
|
|
|
|
return nil, errors.New("HistogramBucket must be [boundaries, lower, upper, count]")
|
|
|
|
}
|
|
|
|
f, err := strconv.ParseFloat(iter.ReadString(), 64)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
b.Lower = model.FloatString(f)
|
|
|
|
if !iter.ReadArray() {
|
|
|
|
return nil, errors.New("HistogramBucket must be [boundaries, lower, upper, count]")
|
|
|
|
}
|
|
|
|
f, err = strconv.ParseFloat(iter.ReadString(), 64)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
b.Upper = model.FloatString(f)
|
|
|
|
if !iter.ReadArray() {
|
|
|
|
return nil, errors.New("HistogramBucket must be [boundaries, lower, upper, count]")
|
|
|
|
}
|
|
|
|
f, err = strconv.ParseFloat(iter.ReadString(), 64)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
b.Count = model.FloatString(f)
|
|
|
|
if iter.ReadArray() {
|
|
|
|
return nil, errors.New("HistogramBucket has too many values, must be [boundaries, lower, upper, count]")
|
|
|
|
}
|
|
|
|
return &b, nil
|
|
|
|
}
|
2019-05-28 14:45:06 +03:00
|
|
|
|
2023-02-24 00:34:07 +03:00
|
|
|
// marshalHistogramBucket writes something like: [ 3, "-0.25", "0.25", "3"]
|
|
|
|
// See marshalHistogram to understand what the numbers mean
|
|
|
|
func marshalHistogramBucket(b model.HistogramBucket, stream *json.Stream) {
|
|
|
|
stream.WriteArrayStart()
|
|
|
|
stream.WriteInt32(b.Boundaries)
|
|
|
|
stream.WriteMore()
|
|
|
|
marshalFloat(float64(b.Lower), stream)
|
|
|
|
stream.WriteMore()
|
|
|
|
marshalFloat(float64(b.Upper), stream)
|
|
|
|
stream.WriteMore()
|
|
|
|
marshalFloat(float64(b.Count), stream)
|
2019-05-28 14:45:06 +03:00
|
|
|
stream.WriteArrayEnd()
|
|
|
|
}
|
|
|
|
|
2023-02-24 00:34:07 +03:00
|
|
|
// marshalHistogram writes something like:
|
|
|
|
//
|
|
|
|
// {
|
|
|
|
// "count": "42",
|
|
|
|
// "sum": "34593.34",
|
|
|
|
// "buckets": [
|
|
|
|
// [ 3, "-0.25", "0.25", "3"],
|
|
|
|
// [ 0, "0.25", "0.5", "12"],
|
|
|
|
// [ 0, "0.5", "1", "21"],
|
|
|
|
// [ 0, "2", "4", "6"]
|
|
|
|
// ]
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// The 1st element in each bucket array determines if the boundaries are
|
|
|
|
// inclusive (AKA closed) or exclusive (AKA open):
|
|
|
|
//
|
|
|
|
// 0: lower exclusive, upper inclusive
|
|
|
|
// 1: lower inclusive, upper exclusive
|
|
|
|
// 2: both exclusive
|
|
|
|
// 3: both inclusive
|
|
|
|
//
|
|
|
|
// The 2nd and 3rd elements are the lower and upper boundary. The 4th element is
|
|
|
|
// the bucket count.
|
|
|
|
func marshalHistogram(h model.SampleHistogram, stream *json.Stream) {
|
|
|
|
stream.WriteObjectStart()
|
|
|
|
stream.WriteObjectField(`count`)
|
|
|
|
marshalFloat(float64(h.Count), stream)
|
|
|
|
stream.WriteMore()
|
|
|
|
stream.WriteObjectField(`sum`)
|
|
|
|
marshalFloat(float64(h.Sum), stream)
|
|
|
|
|
|
|
|
bucketFound := false
|
|
|
|
for _, bucket := range h.Buckets {
|
|
|
|
if bucket.Count == 0 {
|
|
|
|
continue // No need to expose empty buckets in JSON.
|
|
|
|
}
|
|
|
|
stream.WriteMore()
|
|
|
|
if !bucketFound {
|
|
|
|
stream.WriteObjectField(`buckets`)
|
|
|
|
stream.WriteArrayStart()
|
|
|
|
}
|
|
|
|
bucketFound = true
|
|
|
|
marshalHistogramBucket(*bucket, stream)
|
|
|
|
}
|
|
|
|
if bucketFound {
|
|
|
|
stream.WriteArrayEnd()
|
|
|
|
}
|
|
|
|
stream.WriteObjectEnd()
|
|
|
|
}
|
|
|
|
|
|
|
|
func marshalJSONIsEmpty(ptr unsafe.Pointer) bool {
|
2019-05-28 14:45:06 +03:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2015-08-19 16:33:59 +03:00
|
|
|
const (
|
2017-04-20 00:24:14 +03:00
|
|
|
apiPrefix = "/api/v1"
|
|
|
|
|
2019-03-25 11:23:28 +03:00
|
|
|
epAlerts = apiPrefix + "/alerts"
|
2018-04-10 17:21:25 +03:00
|
|
|
epAlertManagers = apiPrefix + "/alertmanagers"
|
2018-04-06 02:47:48 +03:00
|
|
|
epQuery = apiPrefix + "/query"
|
|
|
|
epQueryRange = apiPrefix + "/query_range"
|
2021-04-24 07:32:14 +03:00
|
|
|
epQueryExemplars = apiPrefix + "/query_exemplars"
|
2019-06-14 17:49:58 +03:00
|
|
|
epLabels = apiPrefix + "/labels"
|
2018-04-06 02:47:48 +03:00
|
|
|
epLabelValues = apiPrefix + "/label/:name/values"
|
|
|
|
epSeries = apiPrefix + "/series"
|
2018-04-10 17:21:25 +03:00
|
|
|
epTargets = apiPrefix + "/targets"
|
2019-05-28 15:27:09 +03:00
|
|
|
epTargetsMetadata = apiPrefix + "/targets/metadata"
|
2020-02-27 20:45:23 +03:00
|
|
|
epMetadata = apiPrefix + "/metadata"
|
2019-01-16 00:34:51 +03:00
|
|
|
epRules = apiPrefix + "/rules"
|
2018-04-06 02:47:48 +03:00
|
|
|
epSnapshot = apiPrefix + "/admin/tsdb/snapshot"
|
|
|
|
epDeleteSeries = apiPrefix + "/admin/tsdb/delete_series"
|
|
|
|
epCleanTombstones = apiPrefix + "/admin/tsdb/clean_tombstones"
|
2018-04-10 17:21:25 +03:00
|
|
|
epConfig = apiPrefix + "/status/config"
|
|
|
|
epFlags = apiPrefix + "/status/flags"
|
2021-03-06 10:46:30 +03:00
|
|
|
epBuildinfo = apiPrefix + "/status/buildinfo"
|
2020-05-18 17:52:37 +03:00
|
|
|
epRuntimeinfo = apiPrefix + "/status/runtimeinfo"
|
2020-06-24 14:35:25 +03:00
|
|
|
epTSDB = apiPrefix + "/status/tsdb"
|
2021-12-05 09:19:58 +03:00
|
|
|
epWalReplay = apiPrefix + "/status/walreplay"
|
2015-08-19 16:33:59 +03:00
|
|
|
)
|
|
|
|
|
2019-01-16 00:34:51 +03:00
|
|
|
// AlertState models the state of an alert.
|
|
|
|
type AlertState string
|
|
|
|
|
2016-09-16 20:59:04 +03:00
|
|
|
// ErrorType models the different API error types.
|
2015-08-19 16:33:59 +03:00
|
|
|
type ErrorType string
|
|
|
|
|
2018-04-11 17:38:10 +03:00
|
|
|
// HealthStatus models the health status of a scrape target.
|
|
|
|
type HealthStatus string
|
|
|
|
|
2019-01-16 00:34:51 +03:00
|
|
|
// RuleType models the type of a rule.
|
|
|
|
type RuleType string
|
|
|
|
|
|
|
|
// RuleHealth models the health status of a rule.
|
|
|
|
type RuleHealth string
|
|
|
|
|
2019-05-28 15:27:09 +03:00
|
|
|
// MetricType models the type of a metric.
|
|
|
|
type MetricType string
|
|
|
|
|
2015-08-19 16:33:59 +03:00
|
|
|
const (
|
2019-01-16 00:34:51 +03:00
|
|
|
// Possible values for AlertState.
|
|
|
|
AlertStateFiring AlertState = "firing"
|
|
|
|
AlertStateInactive AlertState = "inactive"
|
|
|
|
AlertStatePending AlertState = "pending"
|
|
|
|
|
2018-04-11 17:38:10 +03:00
|
|
|
// Possible values for ErrorType.
|
2015-08-19 16:33:59 +03:00
|
|
|
ErrBadData ErrorType = "bad_data"
|
2018-11-15 19:51:14 +03:00
|
|
|
ErrTimeout ErrorType = "timeout"
|
|
|
|
ErrCanceled ErrorType = "canceled"
|
|
|
|
ErrExec ErrorType = "execution"
|
|
|
|
ErrBadResponse ErrorType = "bad_response"
|
|
|
|
ErrServer ErrorType = "server_error"
|
|
|
|
ErrClient ErrorType = "client_error"
|
2018-04-11 17:38:10 +03:00
|
|
|
|
|
|
|
// Possible values for HealthStatus.
|
|
|
|
HealthGood HealthStatus = "up"
|
|
|
|
HealthUnknown HealthStatus = "unknown"
|
|
|
|
HealthBad HealthStatus = "down"
|
2019-01-16 00:34:51 +03:00
|
|
|
|
|
|
|
// Possible values for RuleType.
|
|
|
|
RuleTypeRecording RuleType = "recording"
|
|
|
|
RuleTypeAlerting RuleType = "alerting"
|
|
|
|
|
|
|
|
// Possible values for RuleHealth.
|
|
|
|
RuleHealthGood = "ok"
|
|
|
|
RuleHealthUnknown = "unknown"
|
|
|
|
RuleHealthBad = "err"
|
2019-05-28 15:27:09 +03:00
|
|
|
|
|
|
|
// Possible values for MetricType
|
|
|
|
MetricTypeCounter MetricType = "counter"
|
|
|
|
MetricTypeGauge MetricType = "gauge"
|
|
|
|
MetricTypeHistogram MetricType = "histogram"
|
|
|
|
MetricTypeGaugeHistogram MetricType = "gaugehistogram"
|
|
|
|
MetricTypeSummary MetricType = "summary"
|
|
|
|
MetricTypeInfo MetricType = "info"
|
|
|
|
MetricTypeStateset MetricType = "stateset"
|
|
|
|
MetricTypeUnknown MetricType = "unknown"
|
2015-08-19 16:33:59 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
// Error is an error returned by the API.
|
|
|
|
type Error struct {
|
2019-06-14 02:40:59 +03:00
|
|
|
Type ErrorType
|
|
|
|
Msg string
|
|
|
|
Detail string
|
2015-08-19 16:33:59 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func (e *Error) Error() string {
|
2019-06-14 02:40:59 +03:00
|
|
|
return fmt.Sprintf("%s: %s", e.Type, e.Msg)
|
2015-08-19 16:33:59 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Range represents a sliced time range.
|
|
|
|
type Range struct {
|
|
|
|
// The boundaries of the time range.
|
|
|
|
Start, End time.Time
|
|
|
|
// The maximum time between two slices within the boundaries.
|
|
|
|
Step time.Duration
|
|
|
|
}
|
|
|
|
|
2017-04-26 00:15:57 +03:00
|
|
|
// API provides bindings for Prometheus's v1 API.
|
2017-04-20 00:24:14 +03:00
|
|
|
type API interface {
|
2019-03-25 11:23:28 +03:00
|
|
|
// Alerts returns a list of all active alerts.
|
2019-06-14 02:40:59 +03:00
|
|
|
Alerts(ctx context.Context) (AlertsResult, error)
|
2018-04-10 17:21:25 +03:00
|
|
|
// AlertManagers returns an overview of the current state of the Prometheus alert manager discovery.
|
2019-06-14 02:40:59 +03:00
|
|
|
AlertManagers(ctx context.Context) (AlertManagersResult, error)
|
2018-04-10 17:21:25 +03:00
|
|
|
// CleanTombstones removes the deleted data from disk and cleans up the existing tombstones.
|
2019-06-14 02:40:59 +03:00
|
|
|
CleanTombstones(ctx context.Context) error
|
2018-04-10 17:21:25 +03:00
|
|
|
// Config returns the current Prometheus configuration.
|
2019-06-14 02:40:59 +03:00
|
|
|
Config(ctx context.Context) (ConfigResult, error)
|
2018-04-10 17:21:25 +03:00
|
|
|
// DeleteSeries deletes data for a selection of series in a time range.
|
2022-06-17 10:04:06 +03:00
|
|
|
DeleteSeries(ctx context.Context, matches []string, startTime, endTime time.Time) error
|
2018-04-10 17:21:25 +03:00
|
|
|
// Flags returns the flag values that Prometheus was launched with.
|
2019-06-14 02:40:59 +03:00
|
|
|
Flags(ctx context.Context) (FlagsResult, error)
|
2021-02-04 03:44:12 +03:00
|
|
|
// LabelNames returns the unique label names present in the block in sorted order by given time range and matchers.
|
2022-06-17 10:04:06 +03:00
|
|
|
LabelNames(ctx context.Context, matches []string, startTime, endTime time.Time) ([]string, Warnings, error)
|
2021-02-04 03:44:12 +03:00
|
|
|
// LabelValues performs a query for the values of the given label, time range and matchers.
|
2022-06-17 10:04:06 +03:00
|
|
|
LabelValues(ctx context.Context, label string, matches []string, startTime, endTime time.Time) (model.LabelValues, Warnings, error)
|
2017-04-20 00:24:14 +03:00
|
|
|
// Query performs a query for the given time.
|
2022-04-21 08:23:16 +03:00
|
|
|
Query(ctx context.Context, query string, ts time.Time, opts ...Option) (model.Value, Warnings, error)
|
2017-04-20 00:24:14 +03:00
|
|
|
// QueryRange performs a query for the given range.
|
2022-04-21 08:23:16 +03:00
|
|
|
QueryRange(ctx context.Context, query string, r Range, opts ...Option) (model.Value, Warnings, error)
|
2021-04-24 07:32:14 +03:00
|
|
|
// QueryExemplars performs a query for exemplars by the given query and time range.
|
2022-06-17 10:04:06 +03:00
|
|
|
QueryExemplars(ctx context.Context, query string, startTime, endTime time.Time) ([]ExemplarQueryResult, error)
|
2021-03-06 10:46:30 +03:00
|
|
|
// Buildinfo returns various build information properties about the Prometheus server
|
|
|
|
Buildinfo(ctx context.Context) (BuildinfoResult, error)
|
2020-05-18 17:52:37 +03:00
|
|
|
// Runtimeinfo returns the various runtime information properties about the Prometheus server.
|
|
|
|
Runtimeinfo(ctx context.Context) (RuntimeinfoResult, error)
|
2017-11-24 15:12:53 +03:00
|
|
|
// Series finds series by label matchers.
|
2022-06-17 10:04:06 +03:00
|
|
|
Series(ctx context.Context, matches []string, startTime, endTime time.Time) ([]model.LabelSet, Warnings, error)
|
2018-04-06 02:47:48 +03:00
|
|
|
// Snapshot creates a snapshot of all current data into snapshots/<datetime>-<rand>
|
|
|
|
// under the TSDB's data directory and returns the directory as response.
|
2019-06-14 02:40:59 +03:00
|
|
|
Snapshot(ctx context.Context, skipHead bool) (SnapshotResult, error)
|
2019-01-16 00:34:51 +03:00
|
|
|
// Rules returns a list of alerting and recording rules that are currently loaded.
|
2019-06-14 02:40:59 +03:00
|
|
|
Rules(ctx context.Context) (RulesResult, error)
|
2018-04-10 17:21:25 +03:00
|
|
|
// Targets returns an overview of the current state of the Prometheus target discovery.
|
2019-06-14 02:40:59 +03:00
|
|
|
Targets(ctx context.Context) (TargetsResult, error)
|
2019-05-28 15:27:09 +03:00
|
|
|
// TargetsMetadata returns metadata about metrics currently scraped by the target.
|
2022-06-17 10:04:06 +03:00
|
|
|
TargetsMetadata(ctx context.Context, matchTarget, metric, limit string) ([]MetricMetadata, error)
|
2020-02-27 20:45:23 +03:00
|
|
|
// Metadata returns metadata about metrics currently scraped by the metric name.
|
2022-06-17 10:04:06 +03:00
|
|
|
Metadata(ctx context.Context, metric, limit string) (map[string][]Metadata, error)
|
2020-06-24 14:35:25 +03:00
|
|
|
// TSDB returns the cardinality statistics.
|
|
|
|
TSDB(ctx context.Context) (TSDBResult, error)
|
2021-12-05 09:19:58 +03:00
|
|
|
// WalReplay returns the current replay status of the wal.
|
|
|
|
WalReplay(ctx context.Context) (WalReplayStatus, error)
|
2018-04-10 17:21:25 +03:00
|
|
|
}
|
|
|
|
|
2019-03-25 11:23:28 +03:00
|
|
|
// AlertsResult contains the result from querying the alerts endpoint.
|
|
|
|
type AlertsResult struct {
|
|
|
|
Alerts []Alert `json:"alerts"`
|
|
|
|
}
|
|
|
|
|
2018-04-10 17:21:25 +03:00
|
|
|
// AlertManagersResult contains the result from querying the alertmanagers endpoint.
|
|
|
|
type AlertManagersResult struct {
|
|
|
|
Active []AlertManager `json:"activeAlertManagers"`
|
|
|
|
Dropped []AlertManager `json:"droppedAlertManagers"`
|
|
|
|
}
|
|
|
|
|
|
|
|
// AlertManager models a configured Alert Manager.
|
|
|
|
type AlertManager struct {
|
2018-04-11 17:38:10 +03:00
|
|
|
URL string `json:"url"`
|
2018-04-10 17:21:25 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// ConfigResult contains the result from querying the config endpoint.
|
|
|
|
type ConfigResult struct {
|
|
|
|
YAML string `json:"yaml"`
|
|
|
|
}
|
|
|
|
|
|
|
|
// FlagsResult contains the result from querying the flag endpoint.
|
2018-04-11 17:38:10 +03:00
|
|
|
type FlagsResult map[string]string
|
2018-04-10 17:21:25 +03:00
|
|
|
|
2021-03-06 10:46:30 +03:00
|
|
|
// BuildinfoResult contains the results from querying the buildinfo endpoint.
|
|
|
|
type BuildinfoResult struct {
|
|
|
|
Version string `json:"version"`
|
|
|
|
Revision string `json:"revision"`
|
|
|
|
Branch string `json:"branch"`
|
|
|
|
BuildUser string `json:"buildUser"`
|
|
|
|
BuildDate string `json:"buildDate"`
|
|
|
|
GoVersion string `json:"goVersion"`
|
|
|
|
}
|
|
|
|
|
2020-05-18 17:52:37 +03:00
|
|
|
// RuntimeinfoResult contains the result from querying the runtimeinfo endpoint.
|
|
|
|
type RuntimeinfoResult struct {
|
2020-06-24 18:55:36 +03:00
|
|
|
StartTime time.Time `json:"startTime"`
|
|
|
|
CWD string `json:"CWD"`
|
|
|
|
ReloadConfigSuccess bool `json:"reloadConfigSuccess"`
|
|
|
|
LastConfigTime time.Time `json:"lastConfigTime"`
|
|
|
|
CorruptionCount int `json:"corruptionCount"`
|
|
|
|
GoroutineCount int `json:"goroutineCount"`
|
|
|
|
GOMAXPROCS int `json:"GOMAXPROCS"`
|
|
|
|
GOGC string `json:"GOGC"`
|
|
|
|
GODEBUG string `json:"GODEBUG"`
|
|
|
|
StorageRetention string `json:"storageRetention"`
|
2020-05-18 17:52:37 +03:00
|
|
|
}
|
|
|
|
|
2018-04-10 17:21:25 +03:00
|
|
|
// SnapshotResult contains the result from querying the snapshot endpoint.
|
|
|
|
type SnapshotResult struct {
|
|
|
|
Name string `json:"name"`
|
|
|
|
}
|
|
|
|
|
2019-01-16 00:34:51 +03:00
|
|
|
// RulesResult contains the result from querying the rules endpoint.
|
|
|
|
type RulesResult struct {
|
|
|
|
Groups []RuleGroup `json:"groups"`
|
|
|
|
}
|
|
|
|
|
|
|
|
// RuleGroup models a rule group that contains a set of recording and alerting rules.
|
|
|
|
type RuleGroup struct {
|
|
|
|
Name string `json:"name"`
|
|
|
|
File string `json:"file"`
|
|
|
|
Interval float64 `json:"interval"`
|
|
|
|
Rules Rules `json:"rules"`
|
|
|
|
}
|
|
|
|
|
|
|
|
// Recording and alerting rules are stored in the same slice to preserve the order
|
|
|
|
// that rules are returned in by the API.
|
|
|
|
//
|
|
|
|
// Rule types can be determined using a type switch:
|
2022-11-08 02:14:19 +03:00
|
|
|
//
|
|
|
|
// switch v := rule.(type) {
|
|
|
|
// case RecordingRule:
|
|
|
|
// fmt.Print("got a recording rule")
|
|
|
|
// case AlertingRule:
|
|
|
|
// fmt.Print("got a alerting rule")
|
|
|
|
// default:
|
|
|
|
// fmt.Printf("unknown rule type %s", v)
|
|
|
|
// }
|
2019-01-16 00:34:51 +03:00
|
|
|
type Rules []interface{}
|
|
|
|
|
|
|
|
// AlertingRule models a alerting rule.
|
|
|
|
type AlertingRule struct {
|
2021-04-11 13:17:26 +03:00
|
|
|
Name string `json:"name"`
|
|
|
|
Query string `json:"query"`
|
|
|
|
Duration float64 `json:"duration"`
|
|
|
|
Labels model.LabelSet `json:"labels"`
|
|
|
|
Annotations model.LabelSet `json:"annotations"`
|
|
|
|
Alerts []*Alert `json:"alerts"`
|
|
|
|
Health RuleHealth `json:"health"`
|
|
|
|
LastError string `json:"lastError,omitempty"`
|
|
|
|
EvaluationTime float64 `json:"evaluationTime"`
|
|
|
|
LastEvaluation time.Time `json:"lastEvaluation"`
|
|
|
|
State string `json:"state"`
|
2019-01-16 00:34:51 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// RecordingRule models a recording rule.
|
|
|
|
type RecordingRule struct {
|
2021-04-11 13:17:26 +03:00
|
|
|
Name string `json:"name"`
|
|
|
|
Query string `json:"query"`
|
|
|
|
Labels model.LabelSet `json:"labels,omitempty"`
|
|
|
|
Health RuleHealth `json:"health"`
|
|
|
|
LastError string `json:"lastError,omitempty"`
|
|
|
|
EvaluationTime float64 `json:"evaluationTime"`
|
|
|
|
LastEvaluation time.Time `json:"lastEvaluation"`
|
2019-01-16 00:34:51 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Alert models an active alert.
|
|
|
|
type Alert struct {
|
|
|
|
ActiveAt time.Time `json:"activeAt"`
|
|
|
|
Annotations model.LabelSet
|
|
|
|
Labels model.LabelSet
|
|
|
|
State AlertState
|
2019-05-28 14:57:09 +03:00
|
|
|
Value string
|
2019-01-16 00:34:51 +03:00
|
|
|
}
|
|
|
|
|
2018-04-10 17:21:25 +03:00
|
|
|
// TargetsResult contains the result from querying the targets endpoint.
|
|
|
|
type TargetsResult struct {
|
|
|
|
Active []ActiveTarget `json:"activeTargets"`
|
|
|
|
Dropped []DroppedTarget `json:"droppedTargets"`
|
|
|
|
}
|
|
|
|
|
|
|
|
// ActiveTarget models an active Prometheus scrape target.
|
|
|
|
type ActiveTarget struct {
|
2021-04-11 18:50:23 +03:00
|
|
|
DiscoveredLabels map[string]string `json:"discoveredLabels"`
|
|
|
|
Labels model.LabelSet `json:"labels"`
|
|
|
|
ScrapePool string `json:"scrapePool"`
|
|
|
|
ScrapeURL string `json:"scrapeUrl"`
|
|
|
|
GlobalURL string `json:"globalUrl"`
|
|
|
|
LastError string `json:"lastError"`
|
|
|
|
LastScrape time.Time `json:"lastScrape"`
|
|
|
|
LastScrapeDuration float64 `json:"lastScrapeDuration"`
|
|
|
|
Health HealthStatus `json:"health"`
|
2018-04-10 17:21:25 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// DroppedTarget models a dropped Prometheus scrape target.
|
|
|
|
type DroppedTarget struct {
|
2019-01-23 18:39:45 +03:00
|
|
|
DiscoveredLabels map[string]string `json:"discoveredLabels"`
|
2017-04-20 00:24:14 +03:00
|
|
|
}
|
|
|
|
|
2020-02-26 20:44:09 +03:00
|
|
|
// MetricMetadata models the metadata of a metric with its scrape target and name.
|
2019-05-28 15:27:09 +03:00
|
|
|
type MetricMetadata struct {
|
|
|
|
Target map[string]string `json:"target"`
|
|
|
|
Metric string `json:"metric,omitempty"`
|
|
|
|
Type MetricType `json:"type"`
|
|
|
|
Help string `json:"help"`
|
|
|
|
Unit string `json:"unit"`
|
|
|
|
}
|
|
|
|
|
2020-02-26 20:44:09 +03:00
|
|
|
// Metadata models the metadata of a metric.
|
|
|
|
type Metadata struct {
|
|
|
|
Type MetricType `json:"type"`
|
|
|
|
Help string `json:"help"`
|
|
|
|
Unit string `json:"unit"`
|
|
|
|
}
|
|
|
|
|
2015-08-19 16:33:59 +03:00
|
|
|
// queryResult contains result data for a query.
|
|
|
|
type queryResult struct {
|
|
|
|
Type model.ValueType `json:"resultType"`
|
|
|
|
Result interface{} `json:"result"`
|
|
|
|
|
|
|
|
// The decoded value.
|
|
|
|
v model.Value
|
|
|
|
}
|
|
|
|
|
2020-06-24 14:35:25 +03:00
|
|
|
// TSDBResult contains the result from querying the tsdb endpoint.
|
|
|
|
type TSDBResult struct {
|
2022-01-05 12:09:29 +03:00
|
|
|
HeadStats TSDBHeadStats `json:"headStats"`
|
|
|
|
SeriesCountByMetricName []Stat `json:"seriesCountByMetricName"`
|
|
|
|
LabelValueCountByLabelName []Stat `json:"labelValueCountByLabelName"`
|
|
|
|
MemoryInBytesByLabelName []Stat `json:"memoryInBytesByLabelName"`
|
|
|
|
SeriesCountByLabelValuePair []Stat `json:"seriesCountByLabelValuePair"`
|
|
|
|
}
|
|
|
|
|
|
|
|
// TSDBHeadStats contains TSDB stats
|
|
|
|
type TSDBHeadStats struct {
|
|
|
|
NumSeries int `json:"numSeries"`
|
|
|
|
NumLabelPairs int `json:"numLabelPairs"`
|
|
|
|
ChunkCount int `json:"chunkCount"`
|
|
|
|
MinTime int `json:"minTime"`
|
|
|
|
MaxTime int `json:"maxTime"`
|
2020-06-24 14:35:25 +03:00
|
|
|
}
|
|
|
|
|
2021-12-05 09:19:58 +03:00
|
|
|
// WalReplayStatus represents the wal replay status.
|
|
|
|
type WalReplayStatus struct {
|
|
|
|
Min int `json:"min"`
|
|
|
|
Max int `json:"max"`
|
|
|
|
Current int `json:"current"`
|
|
|
|
}
|
|
|
|
|
2020-06-24 14:35:25 +03:00
|
|
|
// Stat models information about statistic value.
|
|
|
|
type Stat struct {
|
|
|
|
Name string `json:"name"`
|
|
|
|
Value uint64 `json:"value"`
|
|
|
|
}
|
|
|
|
|
2019-01-16 00:34:51 +03:00
|
|
|
func (rg *RuleGroup) UnmarshalJSON(b []byte) error {
|
|
|
|
v := struct {
|
|
|
|
Name string `json:"name"`
|
|
|
|
File string `json:"file"`
|
|
|
|
Interval float64 `json:"interval"`
|
|
|
|
Rules []json.RawMessage `json:"rules"`
|
|
|
|
}{}
|
|
|
|
|
|
|
|
if err := json.Unmarshal(b, &v); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
rg.Name = v.Name
|
|
|
|
rg.File = v.File
|
|
|
|
rg.Interval = v.Interval
|
|
|
|
|
|
|
|
for _, rule := range v.Rules {
|
|
|
|
alertingRule := AlertingRule{}
|
|
|
|
if err := json.Unmarshal(rule, &alertingRule); err == nil {
|
|
|
|
rg.Rules = append(rg.Rules, alertingRule)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
recordingRule := RecordingRule{}
|
|
|
|
if err := json.Unmarshal(rule, &recordingRule); err == nil {
|
|
|
|
rg.Rules = append(rg.Rules, recordingRule)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
return errors.New("failed to decode JSON into an alerting or recording rule")
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *AlertingRule) UnmarshalJSON(b []byte) error {
|
|
|
|
v := struct {
|
|
|
|
Type string `json:"type"`
|
|
|
|
}{}
|
|
|
|
if err := json.Unmarshal(b, &v); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if v.Type == "" {
|
|
|
|
return errors.New("type field not present in rule")
|
|
|
|
}
|
|
|
|
if v.Type != string(RuleTypeAlerting) {
|
|
|
|
return fmt.Errorf("expected rule of type %s but got %s", string(RuleTypeAlerting), v.Type)
|
|
|
|
}
|
|
|
|
|
|
|
|
rule := struct {
|
2021-04-11 13:17:26 +03:00
|
|
|
Name string `json:"name"`
|
|
|
|
Query string `json:"query"`
|
|
|
|
Duration float64 `json:"duration"`
|
|
|
|
Labels model.LabelSet `json:"labels"`
|
|
|
|
Annotations model.LabelSet `json:"annotations"`
|
|
|
|
Alerts []*Alert `json:"alerts"`
|
|
|
|
Health RuleHealth `json:"health"`
|
|
|
|
LastError string `json:"lastError,omitempty"`
|
|
|
|
EvaluationTime float64 `json:"evaluationTime"`
|
|
|
|
LastEvaluation time.Time `json:"lastEvaluation"`
|
|
|
|
State string `json:"state"`
|
2019-01-16 00:34:51 +03:00
|
|
|
}{}
|
|
|
|
if err := json.Unmarshal(b, &rule); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
r.Health = rule.Health
|
|
|
|
r.Annotations = rule.Annotations
|
|
|
|
r.Name = rule.Name
|
|
|
|
r.Query = rule.Query
|
|
|
|
r.Alerts = rule.Alerts
|
|
|
|
r.Duration = rule.Duration
|
|
|
|
r.Labels = rule.Labels
|
|
|
|
r.LastError = rule.LastError
|
2021-04-11 13:17:26 +03:00
|
|
|
r.EvaluationTime = rule.EvaluationTime
|
|
|
|
r.LastEvaluation = rule.LastEvaluation
|
|
|
|
r.State = rule.State
|
2019-01-16 00:34:51 +03:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *RecordingRule) UnmarshalJSON(b []byte) error {
|
|
|
|
v := struct {
|
|
|
|
Type string `json:"type"`
|
|
|
|
}{}
|
|
|
|
if err := json.Unmarshal(b, &v); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if v.Type == "" {
|
|
|
|
return errors.New("type field not present in rule")
|
|
|
|
}
|
|
|
|
if v.Type != string(RuleTypeRecording) {
|
|
|
|
return fmt.Errorf("expected rule of type %s but got %s", string(RuleTypeRecording), v.Type)
|
|
|
|
}
|
|
|
|
|
|
|
|
rule := struct {
|
2021-04-11 13:17:26 +03:00
|
|
|
Name string `json:"name"`
|
|
|
|
Query string `json:"query"`
|
|
|
|
Labels model.LabelSet `json:"labels,omitempty"`
|
|
|
|
Health RuleHealth `json:"health"`
|
|
|
|
LastError string `json:"lastError,omitempty"`
|
|
|
|
EvaluationTime float64 `json:"evaluationTime"`
|
|
|
|
LastEvaluation time.Time `json:"lastEvaluation"`
|
2019-01-16 00:34:51 +03:00
|
|
|
}{}
|
|
|
|
if err := json.Unmarshal(b, &rule); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
r.Health = rule.Health
|
|
|
|
r.Labels = rule.Labels
|
|
|
|
r.Name = rule.Name
|
|
|
|
r.LastError = rule.LastError
|
|
|
|
r.Query = rule.Query
|
2021-04-11 13:17:26 +03:00
|
|
|
r.EvaluationTime = rule.EvaluationTime
|
|
|
|
r.LastEvaluation = rule.LastEvaluation
|
2019-01-16 00:34:51 +03:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-08-19 16:33:59 +03:00
|
|
|
func (qr *queryResult) UnmarshalJSON(b []byte) error {
|
|
|
|
v := struct {
|
|
|
|
Type model.ValueType `json:"resultType"`
|
|
|
|
Result json.RawMessage `json:"result"`
|
|
|
|
}{}
|
|
|
|
|
|
|
|
err := json.Unmarshal(b, &v)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
switch v.Type {
|
|
|
|
case model.ValScalar:
|
|
|
|
var sv model.Scalar
|
|
|
|
err = json.Unmarshal(v.Result, &sv)
|
|
|
|
qr.v = &sv
|
|
|
|
|
|
|
|
case model.ValVector:
|
|
|
|
var vv model.Vector
|
|
|
|
err = json.Unmarshal(v.Result, &vv)
|
|
|
|
qr.v = vv
|
|
|
|
|
|
|
|
case model.ValMatrix:
|
|
|
|
var mv model.Matrix
|
|
|
|
err = json.Unmarshal(v.Result, &mv)
|
|
|
|
qr.v = mv
|
|
|
|
|
|
|
|
default:
|
|
|
|
err = fmt.Errorf("unexpected value type %q", v.Type)
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-04-24 07:32:14 +03:00
|
|
|
// Exemplar is additional information associated with a time series.
|
|
|
|
type Exemplar struct {
|
|
|
|
Labels model.LabelSet `json:"labels"`
|
|
|
|
Value model.SampleValue `json:"value"`
|
|
|
|
Timestamp model.Time `json:"timestamp"`
|
|
|
|
}
|
|
|
|
|
|
|
|
type ExemplarQueryResult struct {
|
|
|
|
SeriesLabels model.LabelSet `json:"seriesLabels"`
|
|
|
|
Exemplars []Exemplar `json:"exemplars"`
|
|
|
|
}
|
|
|
|
|
2017-04-20 00:24:14 +03:00
|
|
|
// NewAPI returns a new API for the client.
|
2016-08-01 01:11:57 +03:00
|
|
|
//
|
2017-04-20 00:24:14 +03:00
|
|
|
// It is safe to use the returned API from multiple goroutines.
|
|
|
|
func NewAPI(c api.Client) API {
|
2019-12-11 00:53:38 +03:00
|
|
|
return &httpAPI{
|
|
|
|
client: &apiClientImpl{
|
|
|
|
client: c,
|
|
|
|
},
|
|
|
|
}
|
2015-08-19 16:33:59 +03:00
|
|
|
}
|
|
|
|
|
2017-04-20 00:24:14 +03:00
|
|
|
type httpAPI struct {
|
2019-12-11 00:53:38 +03:00
|
|
|
client apiClient
|
2015-08-19 16:33:59 +03:00
|
|
|
}
|
|
|
|
|
2019-06-14 02:40:59 +03:00
|
|
|
func (h *httpAPI) Alerts(ctx context.Context) (AlertsResult, error) {
|
2019-03-25 11:23:28 +03:00
|
|
|
u := h.client.URL(epAlerts, nil)
|
|
|
|
|
|
|
|
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
|
|
|
if err != nil {
|
2019-06-14 02:40:59 +03:00
|
|
|
return AlertsResult{}, err
|
2019-03-25 11:23:28 +03:00
|
|
|
}
|
|
|
|
|
2019-12-11 00:53:38 +03:00
|
|
|
_, body, _, err := h.client.Do(ctx, req)
|
2019-06-14 02:40:59 +03:00
|
|
|
if err != nil {
|
|
|
|
return AlertsResult{}, err
|
2019-03-25 11:23:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
var res AlertsResult
|
2019-06-14 02:40:59 +03:00
|
|
|
return res, json.Unmarshal(body, &res)
|
2019-03-25 11:23:28 +03:00
|
|
|
}
|
|
|
|
|
2019-06-14 02:40:59 +03:00
|
|
|
func (h *httpAPI) AlertManagers(ctx context.Context) (AlertManagersResult, error) {
|
2018-04-10 17:21:25 +03:00
|
|
|
u := h.client.URL(epAlertManagers, nil)
|
|
|
|
|
|
|
|
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
|
|
|
if err != nil {
|
2019-06-14 02:40:59 +03:00
|
|
|
return AlertManagersResult{}, err
|
2018-04-10 17:21:25 +03:00
|
|
|
}
|
|
|
|
|
2019-12-11 00:53:38 +03:00
|
|
|
_, body, _, err := h.client.Do(ctx, req)
|
2019-06-14 02:40:59 +03:00
|
|
|
if err != nil {
|
|
|
|
return AlertManagersResult{}, err
|
2018-04-10 17:21:25 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
var res AlertManagersResult
|
2019-06-14 02:40:59 +03:00
|
|
|
return res, json.Unmarshal(body, &res)
|
2018-04-10 17:21:25 +03:00
|
|
|
}
|
|
|
|
|
2019-06-14 02:40:59 +03:00
|
|
|
func (h *httpAPI) CleanTombstones(ctx context.Context) error {
|
2018-04-10 17:21:25 +03:00
|
|
|
u := h.client.URL(epCleanTombstones, nil)
|
|
|
|
|
|
|
|
req, err := http.NewRequest(http.MethodPost, u.String(), nil)
|
|
|
|
if err != nil {
|
2019-06-14 02:40:59 +03:00
|
|
|
return err
|
2018-04-10 17:21:25 +03:00
|
|
|
}
|
|
|
|
|
2019-12-11 00:53:38 +03:00
|
|
|
_, _, _, err = h.client.Do(ctx, req)
|
2019-06-14 02:40:59 +03:00
|
|
|
return err
|
2018-04-10 17:21:25 +03:00
|
|
|
}
|
|
|
|
|
2019-06-14 02:40:59 +03:00
|
|
|
func (h *httpAPI) Config(ctx context.Context) (ConfigResult, error) {
|
2018-04-10 17:21:25 +03:00
|
|
|
u := h.client.URL(epConfig, nil)
|
|
|
|
|
|
|
|
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
|
|
|
if err != nil {
|
2019-06-14 02:40:59 +03:00
|
|
|
return ConfigResult{}, err
|
2018-04-10 17:21:25 +03:00
|
|
|
}
|
|
|
|
|
2019-12-11 00:53:38 +03:00
|
|
|
_, body, _, err := h.client.Do(ctx, req)
|
2019-06-14 02:40:59 +03:00
|
|
|
if err != nil {
|
|
|
|
return ConfigResult{}, err
|
2018-04-10 17:21:25 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
var res ConfigResult
|
2019-06-14 02:40:59 +03:00
|
|
|
return res, json.Unmarshal(body, &res)
|
2018-04-10 17:21:25 +03:00
|
|
|
}
|
|
|
|
|
2022-06-17 10:04:06 +03:00
|
|
|
func (h *httpAPI) DeleteSeries(ctx context.Context, matches []string, startTime, endTime time.Time) error {
|
2018-04-10 17:21:25 +03:00
|
|
|
u := h.client.URL(epDeleteSeries, nil)
|
|
|
|
q := u.Query()
|
|
|
|
|
|
|
|
for _, m := range matches {
|
|
|
|
q.Add("match[]", m)
|
|
|
|
}
|
|
|
|
|
2023-03-21 21:58:57 +03:00
|
|
|
if !startTime.IsZero() {
|
|
|
|
q.Set("start", formatTime(startTime))
|
|
|
|
}
|
|
|
|
if !endTime.IsZero() {
|
|
|
|
q.Set("end", formatTime(endTime))
|
|
|
|
}
|
2018-04-10 17:21:25 +03:00
|
|
|
|
|
|
|
u.RawQuery = q.Encode()
|
|
|
|
|
|
|
|
req, err := http.NewRequest(http.MethodPost, u.String(), nil)
|
|
|
|
if err != nil {
|
2019-06-14 02:40:59 +03:00
|
|
|
return err
|
2018-04-10 17:21:25 +03:00
|
|
|
}
|
|
|
|
|
2019-12-11 00:53:38 +03:00
|
|
|
_, _, _, err = h.client.Do(ctx, req)
|
2019-06-14 02:40:59 +03:00
|
|
|
return err
|
2018-04-10 17:21:25 +03:00
|
|
|
}
|
|
|
|
|
2019-06-14 02:40:59 +03:00
|
|
|
func (h *httpAPI) Flags(ctx context.Context) (FlagsResult, error) {
|
2018-04-10 17:21:25 +03:00
|
|
|
u := h.client.URL(epFlags, nil)
|
|
|
|
|
|
|
|
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
|
|
|
if err != nil {
|
2019-06-14 02:40:59 +03:00
|
|
|
return FlagsResult{}, err
|
2018-04-10 17:21:25 +03:00
|
|
|
}
|
|
|
|
|
2019-12-11 00:53:38 +03:00
|
|
|
_, body, _, err := h.client.Do(ctx, req)
|
2019-06-14 02:40:59 +03:00
|
|
|
if err != nil {
|
|
|
|
return FlagsResult{}, err
|
2018-04-10 17:21:25 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
var res FlagsResult
|
2019-06-14 02:40:59 +03:00
|
|
|
return res, json.Unmarshal(body, &res)
|
2018-04-10 17:21:25 +03:00
|
|
|
}
|
|
|
|
|
2021-03-06 10:46:30 +03:00
|
|
|
func (h *httpAPI) Buildinfo(ctx context.Context) (BuildinfoResult, error) {
|
|
|
|
u := h.client.URL(epBuildinfo, nil)
|
|
|
|
|
|
|
|
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
|
|
|
if err != nil {
|
|
|
|
return BuildinfoResult{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
_, body, _, err := h.client.Do(ctx, req)
|
|
|
|
if err != nil {
|
|
|
|
return BuildinfoResult{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
var res BuildinfoResult
|
|
|
|
return res, json.Unmarshal(body, &res)
|
|
|
|
}
|
|
|
|
|
2020-05-18 17:52:37 +03:00
|
|
|
func (h *httpAPI) Runtimeinfo(ctx context.Context) (RuntimeinfoResult, error) {
|
|
|
|
u := h.client.URL(epRuntimeinfo, nil)
|
|
|
|
|
|
|
|
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
|
|
|
if err != nil {
|
|
|
|
return RuntimeinfoResult{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
_, body, _, err := h.client.Do(ctx, req)
|
|
|
|
if err != nil {
|
|
|
|
return RuntimeinfoResult{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
var res RuntimeinfoResult
|
|
|
|
return res, json.Unmarshal(body, &res)
|
|
|
|
}
|
|
|
|
|
2022-06-17 10:04:06 +03:00
|
|
|
func (h *httpAPI) LabelNames(ctx context.Context, matches []string, startTime, endTime time.Time) ([]string, Warnings, error) {
|
2019-06-14 17:49:58 +03:00
|
|
|
u := h.client.URL(epLabels, nil)
|
2020-06-11 14:02:32 +03:00
|
|
|
q := u.Query()
|
2023-03-21 21:58:57 +03:00
|
|
|
if !startTime.IsZero() {
|
|
|
|
q.Set("start", formatTime(startTime))
|
|
|
|
}
|
|
|
|
if !endTime.IsZero() {
|
|
|
|
q.Set("end", formatTime(endTime))
|
|
|
|
}
|
2020-12-29 21:49:42 +03:00
|
|
|
for _, m := range matches {
|
|
|
|
q.Add("match[]", m)
|
|
|
|
}
|
2020-06-11 14:02:32 +03:00
|
|
|
|
2023-04-16 15:41:34 +03:00
|
|
|
_, body, w, err := h.client.DoGetFallback(ctx, u, q)
|
2019-06-14 17:49:58 +03:00
|
|
|
if err != nil {
|
2019-06-17 21:27:57 +03:00
|
|
|
return nil, w, err
|
2019-06-14 17:49:58 +03:00
|
|
|
}
|
|
|
|
var labelNames []string
|
2019-06-17 21:27:57 +03:00
|
|
|
return labelNames, w, json.Unmarshal(body, &labelNames)
|
2019-06-14 17:49:58 +03:00
|
|
|
}
|
|
|
|
|
2022-06-17 10:04:06 +03:00
|
|
|
func (h *httpAPI) LabelValues(ctx context.Context, label string, matches []string, startTime, endTime time.Time) (model.LabelValues, Warnings, error) {
|
2018-04-10 17:21:25 +03:00
|
|
|
u := h.client.URL(epLabelValues, map[string]string{"name": label})
|
2020-06-11 16:45:46 +03:00
|
|
|
q := u.Query()
|
2023-03-21 21:58:57 +03:00
|
|
|
if !startTime.IsZero() {
|
|
|
|
q.Set("start", formatTime(startTime))
|
|
|
|
}
|
|
|
|
if !endTime.IsZero() {
|
|
|
|
q.Set("end", formatTime(endTime))
|
|
|
|
}
|
2020-12-29 21:49:42 +03:00
|
|
|
for _, m := range matches {
|
|
|
|
q.Add("match[]", m)
|
|
|
|
}
|
2020-06-11 16:45:46 +03:00
|
|
|
|
2020-06-20 01:57:25 +03:00
|
|
|
u.RawQuery = q.Encode()
|
|
|
|
|
2018-04-10 17:21:25 +03:00
|
|
|
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
|
|
|
if err != nil {
|
2019-06-17 21:27:57 +03:00
|
|
|
return nil, nil, err
|
2018-04-10 17:21:25 +03:00
|
|
|
}
|
2019-12-11 00:53:38 +03:00
|
|
|
_, body, w, err := h.client.Do(ctx, req)
|
2019-06-14 02:40:59 +03:00
|
|
|
if err != nil {
|
2019-06-17 21:27:57 +03:00
|
|
|
return nil, w, err
|
2018-04-10 17:21:25 +03:00
|
|
|
}
|
|
|
|
var labelValues model.LabelValues
|
2019-06-17 21:27:57 +03:00
|
|
|
return labelValues, w, json.Unmarshal(body, &labelValues)
|
2018-04-10 17:21:25 +03:00
|
|
|
}
|
|
|
|
|
2022-04-21 08:23:16 +03:00
|
|
|
type apiOptions struct {
|
|
|
|
timeout time.Duration
|
|
|
|
}
|
|
|
|
|
|
|
|
type Option func(c *apiOptions)
|
|
|
|
|
2022-04-29 08:34:50 +03:00
|
|
|
// WithTimeout can be used to provide an optional query evaluation timeout for Query and QueryRange.
|
|
|
|
// https://prometheus.io/docs/prometheus/latest/querying/api/#instant-queries
|
2022-04-21 08:23:16 +03:00
|
|
|
func WithTimeout(timeout time.Duration) Option {
|
|
|
|
return func(o *apiOptions) {
|
|
|
|
o.timeout = timeout
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (h *httpAPI) Query(ctx context.Context, query string, ts time.Time, opts ...Option) (model.Value, Warnings, error) {
|
2017-04-20 00:24:14 +03:00
|
|
|
u := h.client.URL(epQuery, nil)
|
2015-08-19 16:33:59 +03:00
|
|
|
q := u.Query()
|
|
|
|
|
2022-04-21 08:23:16 +03:00
|
|
|
opt := &apiOptions{}
|
|
|
|
for _, o := range opts {
|
|
|
|
o(opt)
|
|
|
|
}
|
|
|
|
|
|
|
|
d := opt.timeout
|
|
|
|
if d > 0 {
|
|
|
|
q.Set("timeout", d.String())
|
|
|
|
}
|
|
|
|
|
2015-08-19 16:33:59 +03:00
|
|
|
q.Set("query", query)
|
2017-11-17 00:39:52 +03:00
|
|
|
if !ts.IsZero() {
|
2019-07-07 22:40:02 +03:00
|
|
|
q.Set("time", formatTime(ts))
|
2017-11-17 00:39:52 +03:00
|
|
|
}
|
2015-08-19 16:33:59 +03:00
|
|
|
|
2019-12-11 00:53:38 +03:00
|
|
|
_, body, warnings, err := h.client.DoGetFallback(ctx, u, q)
|
2019-06-14 02:40:59 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, warnings, err
|
2015-08-19 16:33:59 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
var qres queryResult
|
2022-08-03 07:30:51 +03:00
|
|
|
return qres.v, warnings, json.Unmarshal(body, &qres)
|
2015-08-19 16:33:59 +03:00
|
|
|
}
|
|
|
|
|
2022-04-21 08:23:16 +03:00
|
|
|
func (h *httpAPI) QueryRange(ctx context.Context, query string, r Range, opts ...Option) (model.Value, Warnings, error) {
|
2017-04-20 00:24:14 +03:00
|
|
|
u := h.client.URL(epQueryRange, nil)
|
2015-08-19 16:33:59 +03:00
|
|
|
q := u.Query()
|
|
|
|
|
|
|
|
q.Set("query", query)
|
2019-07-07 22:40:02 +03:00
|
|
|
q.Set("start", formatTime(r.Start))
|
|
|
|
q.Set("end", formatTime(r.End))
|
2019-07-09 17:31:01 +03:00
|
|
|
q.Set("step", strconv.FormatFloat(r.Step.Seconds(), 'f', -1, 64))
|
2015-08-19 16:33:59 +03:00
|
|
|
|
2022-04-21 08:23:16 +03:00
|
|
|
opt := &apiOptions{}
|
|
|
|
for _, o := range opts {
|
|
|
|
o(opt)
|
|
|
|
}
|
|
|
|
|
|
|
|
d := opt.timeout
|
|
|
|
if d > 0 {
|
|
|
|
q.Set("timeout", d.String())
|
|
|
|
}
|
|
|
|
|
2019-12-11 00:53:38 +03:00
|
|
|
_, body, warnings, err := h.client.DoGetFallback(ctx, u, q)
|
2019-06-14 02:40:59 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, warnings, err
|
2015-08-19 16:33:59 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
var qres queryResult
|
|
|
|
|
2022-08-03 07:30:51 +03:00
|
|
|
return qres.v, warnings, json.Unmarshal(body, &qres)
|
2015-08-19 16:33:59 +03:00
|
|
|
}
|
2017-04-01 01:42:19 +03:00
|
|
|
|
2022-06-17 10:04:06 +03:00
|
|
|
func (h *httpAPI) Series(ctx context.Context, matches []string, startTime, endTime time.Time) ([]model.LabelSet, Warnings, error) {
|
2017-11-24 15:12:53 +03:00
|
|
|
u := h.client.URL(epSeries, nil)
|
|
|
|
q := u.Query()
|
|
|
|
|
|
|
|
for _, m := range matches {
|
|
|
|
q.Add("match[]", m)
|
|
|
|
}
|
|
|
|
|
2023-03-21 21:58:57 +03:00
|
|
|
if !startTime.IsZero() {
|
|
|
|
q.Set("start", formatTime(startTime))
|
|
|
|
}
|
|
|
|
if !endTime.IsZero() {
|
|
|
|
q.Set("end", formatTime(endTime))
|
|
|
|
}
|
2017-11-24 15:12:53 +03:00
|
|
|
|
2023-04-16 15:41:34 +03:00
|
|
|
_, body, warnings, err := h.client.DoGetFallback(ctx, u, q)
|
2019-06-14 02:40:59 +03:00
|
|
|
if err != nil {
|
2019-06-15 00:28:28 +03:00
|
|
|
return nil, warnings, err
|
2017-11-24 15:12:53 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
var mset []model.LabelSet
|
2019-06-15 00:28:28 +03:00
|
|
|
return mset, warnings, json.Unmarshal(body, &mset)
|
2017-11-24 15:12:53 +03:00
|
|
|
}
|
|
|
|
|
2019-06-14 02:40:59 +03:00
|
|
|
func (h *httpAPI) Snapshot(ctx context.Context, skipHead bool) (SnapshotResult, error) {
|
2018-04-06 02:47:48 +03:00
|
|
|
u := h.client.URL(epSnapshot, nil)
|
|
|
|
q := u.Query()
|
|
|
|
|
|
|
|
q.Set("skip_head", strconv.FormatBool(skipHead))
|
|
|
|
|
|
|
|
u.RawQuery = q.Encode()
|
|
|
|
|
|
|
|
req, err := http.NewRequest(http.MethodPost, u.String(), nil)
|
|
|
|
if err != nil {
|
2019-06-14 02:40:59 +03:00
|
|
|
return SnapshotResult{}, err
|
2018-04-06 02:47:48 +03:00
|
|
|
}
|
|
|
|
|
2019-12-11 00:53:38 +03:00
|
|
|
_, body, _, err := h.client.Do(ctx, req)
|
2019-06-14 02:40:59 +03:00
|
|
|
if err != nil {
|
|
|
|
return SnapshotResult{}, err
|
2018-04-06 02:47:48 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
var res SnapshotResult
|
2019-06-14 02:40:59 +03:00
|
|
|
return res, json.Unmarshal(body, &res)
|
2018-04-06 02:47:48 +03:00
|
|
|
}
|
|
|
|
|
2019-06-14 02:40:59 +03:00
|
|
|
func (h *httpAPI) Rules(ctx context.Context) (RulesResult, error) {
|
2019-01-16 00:34:51 +03:00
|
|
|
u := h.client.URL(epRules, nil)
|
|
|
|
|
|
|
|
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
|
|
|
if err != nil {
|
2019-06-14 02:40:59 +03:00
|
|
|
return RulesResult{}, err
|
2019-01-16 00:34:51 +03:00
|
|
|
}
|
|
|
|
|
2019-12-11 00:53:38 +03:00
|
|
|
_, body, _, err := h.client.Do(ctx, req)
|
2019-06-14 02:40:59 +03:00
|
|
|
if err != nil {
|
|
|
|
return RulesResult{}, err
|
2019-01-16 00:34:51 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
var res RulesResult
|
2019-06-14 02:40:59 +03:00
|
|
|
return res, json.Unmarshal(body, &res)
|
2019-01-16 00:34:51 +03:00
|
|
|
}
|
|
|
|
|
2019-06-14 02:40:59 +03:00
|
|
|
func (h *httpAPI) Targets(ctx context.Context) (TargetsResult, error) {
|
2018-04-10 17:21:25 +03:00
|
|
|
u := h.client.URL(epTargets, nil)
|
2018-04-06 02:47:48 +03:00
|
|
|
|
2018-04-10 17:21:25 +03:00
|
|
|
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
2018-04-06 02:47:48 +03:00
|
|
|
if err != nil {
|
2019-06-14 02:40:59 +03:00
|
|
|
return TargetsResult{}, err
|
2018-04-06 02:47:48 +03:00
|
|
|
}
|
|
|
|
|
2019-12-11 00:53:38 +03:00
|
|
|
_, body, _, err := h.client.Do(ctx, req)
|
2019-06-14 02:40:59 +03:00
|
|
|
if err != nil {
|
|
|
|
return TargetsResult{}, err
|
2018-04-06 02:47:48 +03:00
|
|
|
}
|
|
|
|
|
2018-04-10 17:21:25 +03:00
|
|
|
var res TargetsResult
|
2019-06-14 02:40:59 +03:00
|
|
|
return res, json.Unmarshal(body, &res)
|
2019-05-28 15:27:09 +03:00
|
|
|
}
|
|
|
|
|
2022-06-17 10:04:06 +03:00
|
|
|
func (h *httpAPI) TargetsMetadata(ctx context.Context, matchTarget, metric, limit string) ([]MetricMetadata, error) {
|
2019-05-28 15:27:09 +03:00
|
|
|
u := h.client.URL(epTargetsMetadata, nil)
|
|
|
|
q := u.Query()
|
|
|
|
|
|
|
|
q.Set("match_target", matchTarget)
|
|
|
|
q.Set("metric", metric)
|
|
|
|
q.Set("limit", limit)
|
|
|
|
|
|
|
|
u.RawQuery = q.Encode()
|
|
|
|
|
|
|
|
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
|
|
|
if err != nil {
|
2019-06-14 02:40:59 +03:00
|
|
|
return nil, err
|
2019-05-28 15:27:09 +03:00
|
|
|
}
|
|
|
|
|
2019-12-11 00:53:38 +03:00
|
|
|
_, body, _, err := h.client.Do(ctx, req)
|
2019-06-14 02:40:59 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2019-05-28 15:27:09 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
var res []MetricMetadata
|
2019-06-14 02:40:59 +03:00
|
|
|
return res, json.Unmarshal(body, &res)
|
2018-04-06 02:47:48 +03:00
|
|
|
}
|
|
|
|
|
2022-06-17 10:04:06 +03:00
|
|
|
func (h *httpAPI) Metadata(ctx context.Context, metric, limit string) (map[string][]Metadata, error) {
|
2020-02-27 20:45:23 +03:00
|
|
|
u := h.client.URL(epMetadata, nil)
|
2020-02-26 20:44:09 +03:00
|
|
|
q := u.Query()
|
|
|
|
|
|
|
|
q.Set("metric", metric)
|
|
|
|
q.Set("limit", limit)
|
|
|
|
|
|
|
|
u.RawQuery = q.Encode()
|
|
|
|
|
|
|
|
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
_, body, _, err := h.client.Do(ctx, req)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
var res map[string][]Metadata
|
|
|
|
return res, json.Unmarshal(body, &res)
|
|
|
|
}
|
|
|
|
|
2020-06-24 14:35:25 +03:00
|
|
|
func (h *httpAPI) TSDB(ctx context.Context) (TSDBResult, error) {
|
|
|
|
u := h.client.URL(epTSDB, nil)
|
|
|
|
|
|
|
|
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
|
|
|
if err != nil {
|
|
|
|
return TSDBResult{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
_, body, _, err := h.client.Do(ctx, req)
|
|
|
|
if err != nil {
|
|
|
|
return TSDBResult{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
var res TSDBResult
|
|
|
|
return res, json.Unmarshal(body, &res)
|
2021-04-24 07:32:14 +03:00
|
|
|
}
|
|
|
|
|
2021-12-05 09:19:58 +03:00
|
|
|
func (h *httpAPI) WalReplay(ctx context.Context) (WalReplayStatus, error) {
|
|
|
|
u := h.client.URL(epWalReplay, nil)
|
|
|
|
|
|
|
|
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
|
|
|
if err != nil {
|
|
|
|
return WalReplayStatus{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
_, body, _, err := h.client.Do(ctx, req)
|
|
|
|
if err != nil {
|
|
|
|
return WalReplayStatus{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
var res WalReplayStatus
|
|
|
|
return res, json.Unmarshal(body, &res)
|
|
|
|
}
|
|
|
|
|
2022-06-17 10:04:06 +03:00
|
|
|
func (h *httpAPI) QueryExemplars(ctx context.Context, query string, startTime, endTime time.Time) ([]ExemplarQueryResult, error) {
|
2021-04-24 07:32:14 +03:00
|
|
|
u := h.client.URL(epQueryExemplars, nil)
|
|
|
|
q := u.Query()
|
|
|
|
|
|
|
|
q.Set("query", query)
|
2023-03-21 21:58:57 +03:00
|
|
|
if !startTime.IsZero() {
|
|
|
|
q.Set("start", formatTime(startTime))
|
|
|
|
}
|
|
|
|
if !endTime.IsZero() {
|
|
|
|
q.Set("end", formatTime(endTime))
|
|
|
|
}
|
2020-06-24 14:35:25 +03:00
|
|
|
|
2023-04-16 15:41:34 +03:00
|
|
|
_, body, _, err := h.client.DoGetFallback(ctx, u, q)
|
2021-04-24 07:32:14 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
var res []ExemplarQueryResult
|
|
|
|
return res, json.Unmarshal(body, &res)
|
2020-06-24 14:35:25 +03:00
|
|
|
}
|
|
|
|
|
2019-12-11 18:04:00 +03:00
|
|
|
// Warnings is an array of non critical errors
|
|
|
|
type Warnings []string
|
|
|
|
|
2019-12-11 00:53:38 +03:00
|
|
|
// apiClient wraps a regular client and processes successful API responses.
|
|
|
|
// Successful also includes responses that errored at the API level.
|
|
|
|
type apiClient interface {
|
|
|
|
URL(ep string, args map[string]string) *url.URL
|
2019-12-11 18:04:00 +03:00
|
|
|
Do(context.Context, *http.Request) (*http.Response, []byte, Warnings, error)
|
|
|
|
DoGetFallback(ctx context.Context, u *url.URL, args url.Values) (*http.Response, []byte, Warnings, error)
|
2019-12-11 00:53:38 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
type apiClientImpl struct {
|
|
|
|
client api.Client
|
|
|
|
}
|
|
|
|
|
2017-04-20 00:24:14 +03:00
|
|
|
type apiResponse struct {
|
|
|
|
Status string `json:"status"`
|
|
|
|
Data json.RawMessage `json:"data"`
|
|
|
|
ErrorType ErrorType `json:"errorType"`
|
|
|
|
Error string `json:"error"`
|
2019-04-29 23:48:09 +03:00
|
|
|
Warnings []string `json:"warnings,omitempty"`
|
2017-04-20 00:24:14 +03:00
|
|
|
}
|
|
|
|
|
2018-05-31 17:15:36 +03:00
|
|
|
func apiError(code int) bool {
|
|
|
|
// These are the codes that Prometheus sends when it returns an error.
|
2020-08-22 14:32:48 +03:00
|
|
|
return code == http.StatusUnprocessableEntity || code == http.StatusBadRequest
|
2018-05-31 17:15:36 +03:00
|
|
|
}
|
|
|
|
|
2018-10-25 20:44:21 +03:00
|
|
|
func errorTypeAndMsgFor(resp *http.Response) (ErrorType, string) {
|
|
|
|
switch resp.StatusCode / 100 {
|
|
|
|
case 4:
|
|
|
|
return ErrClient, fmt.Sprintf("client error: %d", resp.StatusCode)
|
|
|
|
case 5:
|
|
|
|
return ErrServer, fmt.Sprintf("server error: %d", resp.StatusCode)
|
|
|
|
}
|
|
|
|
return ErrBadResponse, fmt.Sprintf("bad response code %d", resp.StatusCode)
|
|
|
|
}
|
|
|
|
|
2019-12-11 00:53:38 +03:00
|
|
|
func (h *apiClientImpl) URL(ep string, args map[string]string) *url.URL {
|
|
|
|
return h.client.URL(ep, args)
|
|
|
|
}
|
|
|
|
|
2019-12-11 18:04:00 +03:00
|
|
|
func (h *apiClientImpl) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, Warnings, error) {
|
2019-12-11 00:53:38 +03:00
|
|
|
resp, body, err := h.client.Do(ctx, req)
|
2019-06-14 02:40:59 +03:00
|
|
|
if err != nil {
|
2019-12-11 00:53:38 +03:00
|
|
|
return resp, body, nil, err
|
2017-04-20 00:24:14 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
code := resp.StatusCode
|
|
|
|
|
2018-05-31 17:15:36 +03:00
|
|
|
if code/100 != 2 && !apiError(code) {
|
2018-10-25 20:44:21 +03:00
|
|
|
errorType, errorMsg := errorTypeAndMsgFor(resp)
|
2019-12-11 00:53:38 +03:00
|
|
|
return resp, body, nil, &Error{
|
2018-10-25 20:44:21 +03:00
|
|
|
Type: errorType,
|
|
|
|
Msg: errorMsg,
|
|
|
|
Detail: string(body),
|
2017-04-20 00:24:14 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var result apiResponse
|
|
|
|
|
2018-10-10 17:20:17 +03:00
|
|
|
if http.StatusNoContent != code {
|
2019-04-29 23:48:09 +03:00
|
|
|
if jsonErr := json.Unmarshal(body, &result); jsonErr != nil {
|
2019-12-11 00:53:38 +03:00
|
|
|
return resp, body, nil, &Error{
|
2018-10-10 17:20:17 +03:00
|
|
|
Type: ErrBadResponse,
|
2019-04-29 23:48:09 +03:00
|
|
|
Msg: jsonErr.Error(),
|
2018-10-10 17:20:17 +03:00
|
|
|
}
|
2017-04-20 00:24:14 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-02 09:36:37 +03:00
|
|
|
if apiError(code) && result.Status == "success" {
|
2017-04-20 00:24:14 +03:00
|
|
|
err = &Error{
|
2019-06-14 02:40:59 +03:00
|
|
|
Type: ErrBadResponse,
|
|
|
|
Msg: "inconsistent body for response code",
|
2017-04-20 00:24:14 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-02 09:36:37 +03:00
|
|
|
if result.Status == "error" {
|
2017-04-20 00:24:14 +03:00
|
|
|
err = &Error{
|
2019-06-14 02:40:59 +03:00
|
|
|
Type: result.ErrorType,
|
|
|
|
Msg: result.Error,
|
2017-04-20 00:24:14 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-11 00:53:38 +03:00
|
|
|
return resp, []byte(result.Data), result.Warnings, err
|
2017-04-20 00:24:14 +03:00
|
|
|
}
|
2019-07-07 22:40:02 +03:00
|
|
|
|
2020-09-10 14:14:07 +03:00
|
|
|
// DoGetFallback will attempt to do the request as-is, and on a 405 or 501 it
|
|
|
|
// will fallback to a GET request.
|
2019-12-11 18:04:00 +03:00
|
|
|
func (h *apiClientImpl) DoGetFallback(ctx context.Context, u *url.URL, args url.Values) (*http.Response, []byte, Warnings, error) {
|
2022-04-07 10:13:14 +03:00
|
|
|
encodedArgs := args.Encode()
|
|
|
|
req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(encodedArgs))
|
2019-12-10 23:16:44 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil, nil, err
|
|
|
|
}
|
|
|
|
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
2022-04-13 08:21:50 +03:00
|
|
|
// Following comment originates from https://pkg.go.dev/net/http#Transport
|
|
|
|
// Transport only retries a request upon encountering a network error if the request is
|
|
|
|
// idempotent and either has no body or has its Request.GetBody defined. HTTP requests
|
|
|
|
// are considered idempotent if they have HTTP methods GET, HEAD, OPTIONS, or TRACE; or
|
|
|
|
// if their Header map contains an "Idempotency-Key" or "X-Idempotency-Key" entry. If the
|
|
|
|
// idempotency key value is a zero-length slice, the request is treated as idempotent but
|
|
|
|
// the header is not sent on the wire.
|
|
|
|
req.Header["Idempotency-Key"] = nil
|
2019-12-10 23:16:44 +03:00
|
|
|
|
|
|
|
resp, body, warnings, err := h.Do(ctx, req)
|
2020-09-10 14:14:07 +03:00
|
|
|
if resp != nil && (resp.StatusCode == http.StatusMethodNotAllowed || resp.StatusCode == http.StatusNotImplemented) {
|
2022-04-07 10:13:14 +03:00
|
|
|
u.RawQuery = encodedArgs
|
2019-12-10 23:16:44 +03:00
|
|
|
req, err = http.NewRequest(http.MethodGet, u.String(), nil)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, warnings, err
|
|
|
|
}
|
2022-04-07 10:13:14 +03:00
|
|
|
return h.Do(ctx, req)
|
2019-12-10 23:16:44 +03:00
|
|
|
}
|
2022-04-07 10:13:14 +03:00
|
|
|
return resp, body, warnings, err
|
2019-12-10 23:16:44 +03:00
|
|
|
}
|
|
|
|
|
2019-07-07 22:40:02 +03:00
|
|
|
func formatTime(t time.Time) string {
|
2019-07-09 19:23:52 +03:00
|
|
|
return strconv.FormatFloat(float64(t.Unix())+float64(t.Nanosecond())/1e9, 'f', -1, 64)
|
2019-07-07 22:40:02 +03:00
|
|
|
}
|