diff --git a/.golangci.yml b/.golangci.yml index 8548774b762..d379b4db70d 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -40,10 +40,6 @@ issues: exclude-files: # Skip autogenerated files. - ^.*\.(pb|y)\.go$ - exclude-dirs: - # Copied it from a different source. - - storage/remote/otlptranslator/prometheusremotewrite - - storage/remote/otlptranslator/prometheus exclude-rules: - linters: - errcheck diff --git a/CHANGELOG.md b/CHANGELOG.md index 9ac0351d426..a9072422a2f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ ## unreleased * [CHANGE] Make setting out-of-order native histograms feature (`--enable-feature=ooo-native-histograms`) a no-op. Out-of-order native histograms are now always enabled when `out_of_order_time_window` is greater than zero and `--enable-feature=native-histograms` is set. #16207 +* [FEATURE] OTLP translate: Add feature flag for optionally translating OTel explicit bucket histograms into native histograms with custom buckets. #15850 * [ENHANCEMENT] TSDB: add `prometheus_tsdb_wal_replay_unknown_refs_total` and `prometheus_tsdb_wbl_replay_unknown_refs_total` metrics to track unknown series references during WAL/WBL replay. #16166 * [BUGFIX] TSDB: fix unknown series errors and possible lost data during WAL replay when series are removed from the head due to inactivity and reappear before the next WAL checkpoint. #16060 diff --git a/config/config.go b/config/config.go index a38080f22a7..780b49a64e4 100644 --- a/config/config.go +++ b/config/config.go @@ -1443,6 +1443,7 @@ type OTLPConfig struct { PromoteResourceAttributes []string `yaml:"promote_resource_attributes,omitempty"` TranslationStrategy translationStrategyOption `yaml:"translation_strategy,omitempty"` KeepIdentifyingResourceAttributes bool `yaml:"keep_identifying_resource_attributes,omitempty"` + ConvertHistogramsToNHCB bool `yaml:"convert_histograms_to_nhcb,omitempty"` } // UnmarshalYAML implements the yaml.Unmarshaler interface. diff --git a/config/config_test.go b/config/config_test.go index 295e835f1ec..be21a62c070 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -1563,6 +1563,20 @@ func TestOTLPAllowServiceNameInTargetInfo(t *testing.T) { }) } +func TestOTLPConvertHistogramsToNHCB(t *testing.T) { + t.Run("good config", func(t *testing.T) { + want, err := LoadFile(filepath.Join("testdata", "otlp_convert_histograms_to_nhcb.good.yml"), false, promslog.NewNopLogger()) + require.NoError(t, err) + + out, err := yaml.Marshal(want) + require.NoError(t, err) + var got Config + require.NoError(t, yaml.UnmarshalStrict(out, &got)) + + require.True(t, got.OTLPConfig.ConvertHistogramsToNHCB) + }) +} + func TestOTLPAllowUTF8(t *testing.T) { t.Run("good config", func(t *testing.T) { fpath := filepath.Join("testdata", "otlp_allow_utf8.good.yml") diff --git a/config/testdata/otlp_convert_histograms_to_nhcb.good.yml b/config/testdata/otlp_convert_histograms_to_nhcb.good.yml new file mode 100644 index 00000000000..1462cafe9b3 --- /dev/null +++ b/config/testdata/otlp_convert_histograms_to_nhcb.good.yml @@ -0,0 +1,2 @@ +otlp: + convert_histograms_to_nhcb: true diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 3550094ff27..b6cdbe51bd4 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -187,6 +187,8 @@ otlp: # resource attributes to the "target_info" metric, on top of converting # them into the "instance" and "job" labels. [ keep_identifying_resource_attributes: | default = false] + # Configures optional translation of OTLP explicit bucket histograms into native histograms with custom buckets. + [ convert_histograms_to_nhcb: | default = false] # Settings related to the remote read feature. remote_read: diff --git a/go.mod b/go.mod index 2d2c4ffa12c..6731251e7f2 100644 --- a/go.mod +++ b/go.mod @@ -62,7 +62,7 @@ require ( github.com/vultr/govultr/v2 v2.17.2 go.opentelemetry.io/collector/component v1.27.0 go.opentelemetry.io/collector/consumer v1.27.0 - go.opentelemetry.io/collector/pdata v1.27.0 + go.opentelemetry.io/collector/pdata v1.28.1 go.opentelemetry.io/collector/processor v0.121.0 go.opentelemetry.io/collector/semconv v0.121.0 go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.60.0 @@ -80,7 +80,7 @@ require ( go.uber.org/multierr v1.11.0 golang.org/x/oauth2 v0.27.0 golang.org/x/sync v0.12.0 - golang.org/x/sys v0.30.0 + golang.org/x/sys v0.31.0 golang.org/x/text v0.23.0 google.golang.org/api v0.224.0 google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb @@ -181,6 +181,7 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/otlptranslator v0.0.0-20250320144820-d800c8b0eb07 github.com/prometheus/procfs v0.15.1 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/objx v0.5.2 // indirect @@ -193,11 +194,11 @@ require ( go.opentelemetry.io/collector/pipeline v0.121.0 // indirect go.opentelemetry.io/proto/otlp v1.5.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.35.0 // indirect + golang.org/x/crypto v0.36.0 // indirect golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect golang.org/x/mod v0.23.0 // indirect - golang.org/x/net v0.35.0 // indirect - golang.org/x/term v0.29.0 // indirect + golang.org/x/net v0.37.0 // indirect + golang.org/x/term v0.30.0 // indirect golang.org/x/time v0.10.0 // indirect golang.org/x/tools v0.30.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb // indirect diff --git a/go.sum b/go.sum index 5eb7b8c81d8..28e6b109c1d 100644 --- a/go.sum +++ b/go.sum @@ -424,6 +424,8 @@ github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/ github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI= github.com/prometheus/exporter-toolkit v0.14.0 h1:NMlswfibpcZZ+H0sZBiTjrA3/aBFHkNZqE+iCj5EmRg= github.com/prometheus/exporter-toolkit v0.14.0/go.mod h1:Gu5LnVvt7Nr/oqTBUC23WILZepW0nffNo10XdhQcwWA= +github.com/prometheus/otlptranslator v0.0.0-20250320144820-d800c8b0eb07 h1:YaJ1JqyKGIUFIMUpMeT22yewZMXiTt5sLgWG1D/m4Yc= +github.com/prometheus/otlptranslator v0.0.0-20250320144820-d800c8b0eb07/go.mod h1:ZO/4EUanXL7wbvfMHcS+rq9sCBxICdaU8RBFkVg5wv0= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= @@ -496,8 +498,8 @@ go.opentelemetry.io/collector/consumer/consumertest v0.121.0 h1:EIJPAXQY0w9j1k/e go.opentelemetry.io/collector/consumer/consumertest v0.121.0/go.mod h1:Hmj+TizzsLU0EmS2n/rJYScOybNmm3mrAjis6ed7qTw= go.opentelemetry.io/collector/consumer/xconsumer v0.121.0 h1:/FJ7L6+G++FvktXc/aBnnYDIKLoYsWLh0pKbvzFFwF8= go.opentelemetry.io/collector/consumer/xconsumer v0.121.0/go.mod h1:KKy8Qg/vOnyseoi7A9/x1a1oEqSmf0WBHkJFlnQH0Ow= -go.opentelemetry.io/collector/pdata v1.27.0 h1:66yI7FYkUDia74h48Fd2/KG2Vk8DxZnGw54wRXykCEU= -go.opentelemetry.io/collector/pdata v1.27.0/go.mod h1:18e8/xDZsqyj00h/5HM5GLdJgBzzG9Ei8g9SpNoiMtI= +go.opentelemetry.io/collector/pdata v1.28.1 h1:ORl5WLpQJvjzBVpHu12lqKMdcf/qDBwRXMcUubhybiQ= +go.opentelemetry.io/collector/pdata v1.28.1/go.mod h1:asKE8MD/4SOKz1mCrGdAz4VO2U2HUNg8A6094uK7pq0= go.opentelemetry.io/collector/pdata/pprofile v0.121.0 h1:DFBelDRsZYxEaSoxSRtseAazsHJfqfC/Yl64uPicl2g= go.opentelemetry.io/collector/pdata/pprofile v0.121.0/go.mod h1:j/fjrd7ybJp/PXkba92QLzx7hykUVmU8x/WJvI2JWSg= go.opentelemetry.io/collector/pdata/testdata v0.121.0 h1:FFz+rdb7o6JRZ82Zmp6WKEdKnEMaoF3jLb7F1F21ijg= @@ -549,8 +551,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs= -golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ= +golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= +golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -567,8 +569,8 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= -golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8= -golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= +golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c= +golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -604,11 +606,11 @@ golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= -golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU= -golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= +golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= +golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= diff --git a/prompb/codec.go b/prompb/codec.go index ad30cd5e7b5..b2574fd9e1f 100644 --- a/prompb/codec.go +++ b/prompb/codec.go @@ -90,6 +90,7 @@ func (h Histogram) ToIntHistogram() *histogram.Histogram { PositiveBuckets: h.GetPositiveDeltas(), NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()), NegativeBuckets: h.GetNegativeDeltas(), + CustomValues: h.CustomValues, } } @@ -109,6 +110,7 @@ func (h Histogram) ToFloatHistogram() *histogram.FloatHistogram { PositiveBuckets: h.GetPositiveCounts(), NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()), NegativeBuckets: h.GetNegativeCounts(), + CustomValues: h.CustomValues, } } // Conversion from integer histogram. diff --git a/prompb/types.pb.go b/prompb/types.pb.go index 93883daa133..2f5dc773502 100644 --- a/prompb/types.pb.go +++ b/prompb/types.pb.go @@ -402,10 +402,13 @@ type Histogram struct { ResetHint Histogram_ResetHint `protobuf:"varint,14,opt,name=reset_hint,json=resetHint,proto3,enum=prometheus.Histogram_ResetHint" json:"reset_hint,omitempty"` // timestamp is in ms format, see model/timestamp/timestamp.go for // conversion from time.Time to Prometheus timestamp. - Timestamp int64 `protobuf:"varint,15,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Timestamp int64 `protobuf:"varint,15,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // custom_values are not part of the specification, DO NOT use in remote write clients. + // Used only for converting from OpenTelemetry to Prometheus internally. + CustomValues []float64 `protobuf:"fixed64,16,rep,packed,name=custom_values,json=customValues,proto3" json:"custom_values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Histogram) Reset() { *m = Histogram{} } @@ -588,6 +591,13 @@ func (m *Histogram) GetTimestamp() int64 { return 0 } +func (m *Histogram) GetCustomValues() []float64 { + if m != nil { + return m.CustomValues + } + return nil +} + // XXX_OneofWrappers is for the internal use of the proto package. func (*Histogram) XXX_OneofWrappers() []interface{} { return []interface{}{ @@ -1146,76 +1156,77 @@ func init() { func init() { proto.RegisterFile("types.proto", fileDescriptor_d938547f84707355) } var fileDescriptor_d938547f84707355 = []byte{ - // 1092 bytes of a gzipped FileDescriptorProto + // 1114 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xdb, 0x6e, 0xdb, 0x46, - 0x13, 0x36, 0x49, 0x89, 0x12, 0x47, 0x87, 0xd0, 0xfb, 0x3b, 0xf9, 0x59, 0xa3, 0x71, 0x54, 0x02, + 0x13, 0x36, 0x49, 0x89, 0x12, 0x47, 0x87, 0xd0, 0xfb, 0x3b, 0xf9, 0xd9, 0xa0, 0x71, 0x54, 0x16, 0x69, 0x85, 0xa2, 0x90, 0x11, 0xb7, 0x17, 0x0d, 0x1a, 0x14, 0xb0, 0x1d, 0xf9, 0x80, 0x5a, 0x12, - 0xb2, 0x92, 0xd1, 0xa6, 0x37, 0xc2, 0x5a, 0x5a, 0x4b, 0x44, 0xc4, 0x43, 0xb9, 0xab, 0xc0, 0xea, - 0x7b, 0xf4, 0xae, 0x2f, 0xd1, 0xb7, 0x08, 0xd0, 0x9b, 0xf6, 0x05, 0x8a, 0xc2, 0x57, 0x7d, 0x8c, - 0x62, 0x87, 0xa4, 0x48, 0xc5, 0x29, 0xd0, 0xf4, 0x6e, 0xe7, 0x9b, 0x6f, 0x76, 0x3e, 0xee, 0xce, - 0xcc, 0x12, 0x6a, 0x72, 0x15, 0x71, 0xd1, 0x89, 0xe2, 0x50, 0x86, 0x04, 0xa2, 0x38, 0xf4, 0xb9, - 0x9c, 0xf3, 0xa5, 0xd8, 0xdd, 0x99, 0x85, 0xb3, 0x10, 0xe1, 0x7d, 0xb5, 0x4a, 0x18, 0xee, 0xcf, - 0x3a, 0x34, 0x7b, 0x5c, 0xc6, 0xde, 0xa4, 0xc7, 0x25, 0x9b, 0x32, 0xc9, 0xc8, 0x53, 0x28, 0xa9, - 0x3d, 0x1c, 0xad, 0xa5, 0xb5, 0x9b, 0x07, 0x8f, 0x3b, 0xf9, 0x1e, 0x9d, 0x4d, 0x66, 0x6a, 0x8e, - 0x56, 0x11, 0xa7, 0x18, 0x42, 0x3e, 0x03, 0xe2, 0x23, 0x36, 0xbe, 0x66, 0xbe, 0xb7, 0x58, 0x8d, - 0x03, 0xe6, 0x73, 0x47, 0x6f, 0x69, 0x6d, 0x8b, 0xda, 0x89, 0xe7, 0x04, 0x1d, 0x7d, 0xe6, 0x73, - 0x42, 0xa0, 0x34, 0xe7, 0x8b, 0xc8, 0x29, 0xa1, 0x1f, 0xd7, 0x0a, 0x5b, 0x06, 0x9e, 0x74, 0xca, - 0x09, 0xa6, 0xd6, 0xee, 0x0a, 0x20, 0xcf, 0x44, 0x6a, 0x50, 0xb9, 0xec, 0x7f, 0xd3, 0x1f, 0x7c, - 0xdb, 0xb7, 0xb7, 0x94, 0x71, 0x3c, 0xb8, 0xec, 0x8f, 0xba, 0xd4, 0xd6, 0x88, 0x05, 0xe5, 0xd3, - 0xc3, 0xcb, 0xd3, 0xae, 0xad, 0x93, 0x06, 0x58, 0x67, 0xe7, 0xc3, 0xd1, 0xe0, 0x94, 0x1e, 0xf6, - 0x6c, 0x83, 0x10, 0x68, 0xa2, 0x27, 0xc7, 0x4a, 0x2a, 0x74, 0x78, 0xd9, 0xeb, 0x1d, 0xd2, 0x97, - 0x76, 0x99, 0x54, 0xa1, 0x74, 0xde, 0x3f, 0x19, 0xd8, 0x26, 0xa9, 0x43, 0x75, 0x38, 0x3a, 0x1c, - 0x75, 0x87, 0xdd, 0x91, 0x5d, 0x71, 0x9f, 0x81, 0x39, 0x64, 0x7e, 0xb4, 0xe0, 0x64, 0x07, 0xca, - 0xaf, 0xd9, 0x62, 0x99, 0x1c, 0x8b, 0x46, 0x13, 0x83, 0x7c, 0x08, 0x96, 0xf4, 0x7c, 0x2e, 0x24, - 0xf3, 0x23, 0xfc, 0x4e, 0x83, 0xe6, 0x80, 0x1b, 0x42, 0xb5, 0x7b, 0xc3, 0xfd, 0x68, 0xc1, 0x62, - 0xb2, 0x0f, 0xe6, 0x82, 0x5d, 0xf1, 0x85, 0x70, 0xb4, 0x96, 0xd1, 0xae, 0x1d, 0x6c, 0x17, 0xcf, - 0xf5, 0x42, 0x79, 0x8e, 0x4a, 0x6f, 0xfe, 0x78, 0xb4, 0x45, 0x53, 0x5a, 0x9e, 0x50, 0xff, 0xc7, - 0x84, 0xc6, 0xdb, 0x09, 0x7f, 0x2d, 0x83, 0x75, 0xe6, 0x09, 0x19, 0xce, 0x62, 0xe6, 0x93, 0x87, - 0x60, 0x4d, 0xc2, 0x65, 0x20, 0xc7, 0x5e, 0x20, 0x51, 0x76, 0xe9, 0x6c, 0x8b, 0x56, 0x11, 0x3a, - 0x0f, 0x24, 0xf9, 0x08, 0x6a, 0x89, 0xfb, 0x7a, 0x11, 0x32, 0x99, 0xa4, 0x39, 0xdb, 0xa2, 0x80, - 0xe0, 0x89, 0xc2, 0x88, 0x0d, 0x86, 0x58, 0xfa, 0x98, 0x47, 0xa3, 0x6a, 0x49, 0x1e, 0x80, 0x29, - 0x26, 0x73, 0xee, 0x33, 0xbc, 0xb5, 0x6d, 0x9a, 0x5a, 0xe4, 0x31, 0x34, 0x7f, 0xe4, 0x71, 0x38, - 0x96, 0xf3, 0x98, 0x8b, 0x79, 0xb8, 0x98, 0xe2, 0x0d, 0x6a, 0xb4, 0xa1, 0xd0, 0x51, 0x06, 0x92, - 0x8f, 0x53, 0x5a, 0xae, 0xcb, 0x44, 0x5d, 0x1a, 0xad, 0x2b, 0xfc, 0x38, 0xd3, 0xf6, 0x29, 0xd8, - 0x05, 0x5e, 0x22, 0xb0, 0x82, 0x02, 0x35, 0xda, 0x5c, 0x33, 0x13, 0x91, 0xc7, 0xd0, 0x0c, 0xf8, - 0x8c, 0x49, 0xef, 0x35, 0x1f, 0x8b, 0x88, 0x05, 0xc2, 0xa9, 0xe2, 0x09, 0x3f, 0x28, 0x9e, 0xf0, - 0xd1, 0x72, 0xf2, 0x8a, 0xcb, 0x61, 0xc4, 0x82, 0xf4, 0x98, 0x1b, 0x59, 0x8c, 0xc2, 0x04, 0xf9, - 0x04, 0xee, 0xad, 0x37, 0x99, 0xf2, 0x85, 0x64, 0xc2, 0xb1, 0x5a, 0x46, 0x9b, 0xd0, 0xf5, 0xde, - 0xcf, 0x11, 0xdd, 0x20, 0xa2, 0x3a, 0xe1, 0x40, 0xcb, 0x68, 0x6b, 0x39, 0x11, 0xa5, 0x09, 0x25, - 0x2b, 0x0a, 0x85, 0x57, 0x90, 0x55, 0xfb, 0x37, 0xb2, 0xb2, 0x98, 0xb5, 0xac, 0xf5, 0x26, 0xa9, - 0xac, 0x7a, 0x22, 0x2b, 0x83, 0x73, 0x59, 0x6b, 0x62, 0x2a, 0xab, 0x91, 0xc8, 0xca, 0xe0, 0x54, - 0xd6, 0xd7, 0x00, 0x31, 0x17, 0x5c, 0x8e, 0xe7, 0xea, 0xf4, 0x9b, 0xd8, 0xe3, 0x8f, 0x8a, 0x92, - 0xd6, 0xf5, 0xd3, 0xa1, 0x8a, 0x77, 0xe6, 0x05, 0x92, 0x5a, 0x71, 0xb6, 0xdc, 0x2c, 0xc0, 0x7b, - 0x6f, 0x17, 0xe0, 0x17, 0x60, 0xad, 0xa3, 0x36, 0x3b, 0xb5, 0x02, 0xc6, 0xcb, 0xee, 0xd0, 0xd6, - 0x88, 0x09, 0x7a, 0x7f, 0x60, 0xeb, 0x79, 0xb7, 0x1a, 0x47, 0x15, 0x28, 0xa3, 0xe6, 0xa3, 0x3a, - 0x40, 0x7e, 0xed, 0xee, 0x33, 0x80, 0xfc, 0x7c, 0x54, 0xe5, 0x85, 0xd7, 0xd7, 0x82, 0x27, 0xa5, - 0xbc, 0x4d, 0x53, 0x4b, 0xe1, 0x0b, 0x1e, 0xcc, 0xe4, 0x1c, 0x2b, 0xb8, 0x41, 0x53, 0xcb, 0xfd, - 0x4b, 0x03, 0x18, 0x79, 0x3e, 0x1f, 0xf2, 0xd8, 0xe3, 0xe2, 0xfd, 0xfb, 0xef, 0x00, 0x2a, 0x02, - 0x5b, 0x5f, 0x38, 0x3a, 0x46, 0x90, 0x62, 0x44, 0x32, 0x15, 0xd2, 0x90, 0x8c, 0x48, 0xbe, 0x04, - 0x8b, 0xa7, 0x0d, 0x2f, 0x1c, 0x03, 0xa3, 0x76, 0x8a, 0x51, 0xd9, 0x34, 0x48, 0xe3, 0x72, 0x32, - 0xf9, 0x0a, 0x60, 0x9e, 0x1d, 0xbc, 0x70, 0x4a, 0x18, 0x7a, 0xff, 0x9d, 0xd7, 0x92, 0xc6, 0x16, - 0xe8, 0xee, 0x13, 0x28, 0xe3, 0x17, 0xa8, 0xe9, 0x89, 0x13, 0x57, 0x4b, 0xa6, 0xa7, 0x5a, 0x6f, - 0xce, 0x11, 0x2b, 0x9d, 0x23, 0xee, 0x53, 0x30, 0x2f, 0x92, 0xef, 0x7c, 0xdf, 0x83, 0x71, 0x7f, - 0xd2, 0xa0, 0x8e, 0x78, 0x8f, 0xc9, 0xc9, 0x9c, 0xc7, 0xe4, 0xc9, 0xc6, 0x83, 0xf1, 0xf0, 0x4e, - 0x7c, 0xca, 0xeb, 0x14, 0x1e, 0x8a, 0x4c, 0xa8, 0xfe, 0x2e, 0xa1, 0x46, 0x51, 0x68, 0x1b, 0x4a, - 0x38, 0xf6, 0x4d, 0xd0, 0xbb, 0x2f, 0x92, 0x3a, 0xea, 0x77, 0x5f, 0x24, 0x75, 0x44, 0xd5, 0xa8, - 0x57, 0x00, 0xed, 0xda, 0x86, 0xfb, 0x8b, 0xa6, 0x8a, 0x8f, 0x4d, 0x55, 0xed, 0x09, 0xf2, 0x7f, - 0xa8, 0x08, 0xc9, 0xa3, 0xb1, 0x2f, 0x50, 0x97, 0x41, 0x4d, 0x65, 0xf6, 0x84, 0x4a, 0x7d, 0xbd, - 0x0c, 0x26, 0x59, 0x6a, 0xb5, 0x26, 0x1f, 0x40, 0x55, 0x48, 0x16, 0x4b, 0xc5, 0x4e, 0x86, 0x6a, - 0x05, 0xed, 0x9e, 0x20, 0xf7, 0xc1, 0xe4, 0xc1, 0x74, 0x8c, 0x97, 0xa2, 0x1c, 0x65, 0x1e, 0x4c, - 0x7b, 0x82, 0xec, 0x42, 0x75, 0x16, 0x87, 0xcb, 0xc8, 0x0b, 0x66, 0x4e, 0xb9, 0x65, 0xb4, 0x2d, - 0xba, 0xb6, 0x49, 0x13, 0xf4, 0xab, 0x15, 0x0e, 0xb6, 0x2a, 0xd5, 0xaf, 0x56, 0x6a, 0xf7, 0x98, - 0x05, 0x33, 0xae, 0x36, 0xa9, 0x24, 0xbb, 0xa3, 0xdd, 0x13, 0xee, 0xef, 0x1a, 0x94, 0x8f, 0xe7, - 0xcb, 0xe0, 0x15, 0xd9, 0x83, 0x9a, 0xef, 0x05, 0x63, 0xd5, 0x4a, 0xb9, 0x66, 0xcb, 0xf7, 0x02, - 0x55, 0xc3, 0x3d, 0x81, 0x7e, 0x76, 0xb3, 0xf6, 0xa7, 0x6f, 0x8d, 0xcf, 0x6e, 0x52, 0x7f, 0x27, - 0xbd, 0x04, 0x03, 0x2f, 0x61, 0xb7, 0x78, 0x09, 0x98, 0xa0, 0xd3, 0x0d, 0x26, 0xe1, 0xd4, 0x0b, - 0x66, 0xf9, 0x0d, 0xa8, 0x37, 0x1c, 0xbf, 0xaa, 0x4e, 0x71, 0xed, 0x3e, 0x87, 0x6a, 0xc6, 0xba, - 0xd3, 0xbc, 0xdf, 0x0d, 0xd4, 0x13, 0xbb, 0xf1, 0xae, 0xea, 0xe4, 0x7f, 0x70, 0xef, 0xe4, 0x62, - 0x70, 0x38, 0x1a, 0x17, 0x1e, 0x5b, 0xf7, 0x07, 0x68, 0x60, 0x46, 0x3e, 0xfd, 0xaf, 0xad, 0xb7, - 0x0f, 0xe6, 0x44, 0xed, 0x90, 0x75, 0xde, 0xf6, 0x9d, 0xaf, 0xc9, 0x02, 0x12, 0xda, 0xd1, 0xce, - 0x9b, 0xdb, 0x3d, 0xed, 0xb7, 0xdb, 0x3d, 0xed, 0xcf, 0xdb, 0x3d, 0xed, 0x7b, 0x53, 0xb1, 0xa3, - 0xab, 0x2b, 0x13, 0x7f, 0x71, 0x3e, 0xff, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xfb, 0x5f, 0xf2, 0x4d, - 0x13, 0x09, 0x00, 0x00, + 0xb2, 0x92, 0xdb, 0xa6, 0x37, 0xc2, 0x5a, 0x5a, 0x4b, 0x44, 0xc4, 0x43, 0xb9, 0xab, 0xc0, 0xea, + 0x7b, 0xf4, 0xae, 0x2f, 0xd1, 0xb7, 0xc8, 0x65, 0xfb, 0x02, 0x45, 0xe1, 0xab, 0x5e, 0xf6, 0x11, + 0x8a, 0x1d, 0x92, 0x22, 0x15, 0xa7, 0x40, 0xd3, 0xbb, 0x9d, 0x6f, 0xbe, 0x99, 0xf9, 0xb8, 0x3b, + 0x3b, 0x4b, 0xa8, 0xc9, 0x55, 0xc4, 0x45, 0x27, 0x8a, 0x43, 0x19, 0x12, 0x88, 0xe2, 0xd0, 0xe7, + 0x72, 0xce, 0x97, 0xe2, 0xfe, 0xce, 0x2c, 0x9c, 0x85, 0x08, 0xef, 0xa9, 0x55, 0xc2, 0x70, 0x7f, + 0xd6, 0xa1, 0xd9, 0xe3, 0x32, 0xf6, 0x26, 0x3d, 0x2e, 0xd9, 0x94, 0x49, 0x46, 0x9e, 0x40, 0x49, + 0xe5, 0x70, 0xb4, 0x96, 0xd6, 0x6e, 0xee, 0x3f, 0xea, 0xe4, 0x39, 0x3a, 0x9b, 0xcc, 0xd4, 0x1c, + 0xad, 0x22, 0x4e, 0x31, 0x84, 0x7c, 0x0a, 0xc4, 0x47, 0x6c, 0x7c, 0xc5, 0x7c, 0x6f, 0xb1, 0x1a, + 0x07, 0xcc, 0xe7, 0x8e, 0xde, 0xd2, 0xda, 0x16, 0xb5, 0x13, 0xcf, 0x31, 0x3a, 0xfa, 0xcc, 0xe7, + 0x84, 0x40, 0x69, 0xce, 0x17, 0x91, 0x53, 0x42, 0x3f, 0xae, 0x15, 0xb6, 0x0c, 0x3c, 0xe9, 0x94, + 0x13, 0x4c, 0xad, 0xdd, 0x15, 0x40, 0x5e, 0x89, 0xd4, 0xa0, 0x72, 0xd1, 0xff, 0xba, 0x3f, 0xf8, + 0xb6, 0x6f, 0x6f, 0x29, 0xe3, 0x68, 0x70, 0xd1, 0x1f, 0x75, 0xa9, 0xad, 0x11, 0x0b, 0xca, 0x27, + 0x07, 0x17, 0x27, 0x5d, 0x5b, 0x27, 0x0d, 0xb0, 0x4e, 0xcf, 0x86, 0xa3, 0xc1, 0x09, 0x3d, 0xe8, + 0xd9, 0x06, 0x21, 0xd0, 0x44, 0x4f, 0x8e, 0x95, 0x54, 0xe8, 0xf0, 0xa2, 0xd7, 0x3b, 0xa0, 0x2f, + 0xec, 0x32, 0xa9, 0x42, 0xe9, 0xac, 0x7f, 0x3c, 0xb0, 0x4d, 0x52, 0x87, 0xea, 0x70, 0x74, 0x30, + 0xea, 0x0e, 0xbb, 0x23, 0xbb, 0xe2, 0x3e, 0x05, 0x73, 0xc8, 0xfc, 0x68, 0xc1, 0xc9, 0x0e, 0x94, + 0x5f, 0xb1, 0xc5, 0x32, 0xd9, 0x16, 0x8d, 0x26, 0x06, 0x79, 0x1f, 0x2c, 0xe9, 0xf9, 0x5c, 0x48, + 0xe6, 0x47, 0xf8, 0x9d, 0x06, 0xcd, 0x01, 0x37, 0x84, 0x6a, 0xf7, 0x9a, 0xfb, 0xd1, 0x82, 0xc5, + 0x64, 0x0f, 0xcc, 0x05, 0xbb, 0xe4, 0x0b, 0xe1, 0x68, 0x2d, 0xa3, 0x5d, 0xdb, 0xdf, 0x2e, 0xee, + 0xeb, 0xb9, 0xf2, 0x1c, 0x96, 0x5e, 0xff, 0xfe, 0x70, 0x8b, 0xa6, 0xb4, 0xbc, 0xa0, 0xfe, 0x8f, + 0x05, 0x8d, 0x37, 0x0b, 0xfe, 0x55, 0x06, 0xeb, 0xd4, 0x13, 0x32, 0x9c, 0xc5, 0xcc, 0x27, 0x0f, + 0xc0, 0x9a, 0x84, 0xcb, 0x40, 0x8e, 0xbd, 0x40, 0xa2, 0xec, 0xd2, 0xe9, 0x16, 0xad, 0x22, 0x74, + 0x16, 0x48, 0xf2, 0x01, 0xd4, 0x12, 0xf7, 0xd5, 0x22, 0x64, 0x32, 0x29, 0x73, 0xba, 0x45, 0x01, + 0xc1, 0x63, 0x85, 0x11, 0x1b, 0x0c, 0xb1, 0xf4, 0xb1, 0x8e, 0x46, 0xd5, 0x92, 0xdc, 0x03, 0x53, + 0x4c, 0xe6, 0xdc, 0x67, 0x78, 0x6a, 0xdb, 0x34, 0xb5, 0xc8, 0x23, 0x68, 0xfe, 0xc8, 0xe3, 0x70, + 0x2c, 0xe7, 0x31, 0x17, 0xf3, 0x70, 0x31, 0xc5, 0x13, 0xd4, 0x68, 0x43, 0xa1, 0xa3, 0x0c, 0x24, + 0x1f, 0xa5, 0xb4, 0x5c, 0x97, 0x89, 0xba, 0x34, 0x5a, 0x57, 0xf8, 0x51, 0xa6, 0xed, 0x13, 0xb0, + 0x0b, 0xbc, 0x44, 0x60, 0x05, 0x05, 0x6a, 0xb4, 0xb9, 0x66, 0x26, 0x22, 0x8f, 0xa0, 0x19, 0xf0, + 0x19, 0x93, 0xde, 0x2b, 0x3e, 0x16, 0x11, 0x0b, 0x84, 0x53, 0xc5, 0x1d, 0xbe, 0x57, 0xdc, 0xe1, + 0xc3, 0xe5, 0xe4, 0x25, 0x97, 0xc3, 0x88, 0x05, 0xe9, 0x36, 0x37, 0xb2, 0x18, 0x85, 0x09, 0xf2, + 0x31, 0xdc, 0x59, 0x27, 0x99, 0xf2, 0x85, 0x64, 0xc2, 0xb1, 0x5a, 0x46, 0x9b, 0xd0, 0x75, 0xee, + 0x67, 0x88, 0x6e, 0x10, 0x51, 0x9d, 0x70, 0xa0, 0x65, 0xb4, 0xb5, 0x9c, 0x88, 0xd2, 0x84, 0x92, + 0x15, 0x85, 0xc2, 0x2b, 0xc8, 0xaa, 0xfd, 0x1b, 0x59, 0x59, 0xcc, 0x5a, 0xd6, 0x3a, 0x49, 0x2a, + 0xab, 0x9e, 0xc8, 0xca, 0xe0, 0x5c, 0xd6, 0x9a, 0x98, 0xca, 0x6a, 0x24, 0xb2, 0x32, 0x38, 0x95, + 0xf5, 0x15, 0x40, 0xcc, 0x05, 0x97, 0xe3, 0xb9, 0xda, 0xfd, 0x26, 0xde, 0xf1, 0x87, 0x45, 0x49, + 0xeb, 0xfe, 0xe9, 0x50, 0xc5, 0x3b, 0xf5, 0x02, 0x49, 0xad, 0x38, 0x5b, 0x6e, 0x36, 0xe0, 0x9d, + 0x37, 0x1a, 0x90, 0x7c, 0x08, 0x8d, 0xc9, 0x52, 0xc8, 0xd0, 0x1f, 0x63, 0xbb, 0x0a, 0xc7, 0x46, + 0x11, 0xf5, 0x04, 0xfc, 0x06, 0x31, 0xf7, 0x73, 0xb0, 0xd6, 0xa9, 0x37, 0xaf, 0x73, 0x05, 0x8c, + 0x17, 0xdd, 0xa1, 0xad, 0x11, 0x13, 0xf4, 0xfe, 0xc0, 0xd6, 0xf3, 0x2b, 0x6d, 0x1c, 0x56, 0xa0, + 0x8c, 0x1f, 0x76, 0x58, 0x07, 0xc8, 0x7b, 0xc3, 0x7d, 0x0a, 0x90, 0x6f, 0xa2, 0x6a, 0xcf, 0xf0, + 0xea, 0x4a, 0xf0, 0xa4, 0xdf, 0xb7, 0x69, 0x6a, 0x29, 0x7c, 0xc1, 0x83, 0x99, 0x9c, 0x63, 0x9b, + 0x37, 0x68, 0x6a, 0xb9, 0x7f, 0x6a, 0x00, 0x23, 0xcf, 0xe7, 0x43, 0x1e, 0x7b, 0x5c, 0xbc, 0xfb, + 0x25, 0xdd, 0x87, 0x8a, 0xc0, 0xf9, 0x20, 0x1c, 0x1d, 0x23, 0x48, 0x31, 0x22, 0x19, 0x1d, 0x69, + 0x48, 0x46, 0x24, 0x5f, 0x80, 0xc5, 0xd3, 0xa9, 0x20, 0x1c, 0x03, 0xa3, 0x76, 0x8a, 0x51, 0xd9, + 0xc8, 0x48, 0xe3, 0x72, 0x32, 0xf9, 0x12, 0x60, 0x9e, 0x9d, 0x8e, 0x70, 0x4a, 0x18, 0x7a, 0xf7, + 0xad, 0x67, 0x97, 0xc6, 0x16, 0xe8, 0xee, 0x63, 0x28, 0xe3, 0x17, 0xa8, 0x11, 0x8b, 0x63, 0x59, + 0x4b, 0x46, 0xac, 0x5a, 0x6f, 0x0e, 0x1b, 0x2b, 0x1d, 0x36, 0xee, 0x13, 0x30, 0xcf, 0x93, 0xef, + 0x7c, 0xd7, 0x8d, 0x71, 0x7f, 0xd2, 0xa0, 0x8e, 0x78, 0x8f, 0xc9, 0xc9, 0x9c, 0xc7, 0xe4, 0xf1, + 0xc6, 0xab, 0xf2, 0xe0, 0x56, 0x7c, 0xca, 0xeb, 0x14, 0x5e, 0x93, 0x4c, 0xa8, 0xfe, 0x36, 0xa1, + 0x46, 0x51, 0x68, 0x1b, 0x4a, 0xf8, 0x36, 0x98, 0xa0, 0x77, 0x9f, 0x27, 0x7d, 0xd4, 0xef, 0x3e, + 0x4f, 0xfa, 0x88, 0xaa, 0xf7, 0x40, 0x01, 0xb4, 0x6b, 0x1b, 0xee, 0x2f, 0x9a, 0x6a, 0x3e, 0x36, + 0x55, 0xbd, 0x27, 0xc8, 0xff, 0xa1, 0x22, 0x24, 0x8f, 0xc6, 0xbe, 0x40, 0x5d, 0x06, 0x35, 0x95, + 0xd9, 0x13, 0xaa, 0xf4, 0xd5, 0x32, 0x98, 0x64, 0xa5, 0xd5, 0x9a, 0xbc, 0x07, 0x55, 0x21, 0x59, + 0x2c, 0x15, 0x3b, 0x99, 0xbc, 0x15, 0xb4, 0x7b, 0x82, 0xdc, 0x05, 0x93, 0x07, 0xd3, 0x31, 0x1e, + 0x8a, 0x72, 0x94, 0x79, 0x30, 0xed, 0x09, 0x72, 0x1f, 0xaa, 0xb3, 0x38, 0x5c, 0x46, 0x5e, 0x30, + 0x73, 0xca, 0x2d, 0xa3, 0x6d, 0xd1, 0xb5, 0x4d, 0x9a, 0xa0, 0x5f, 0xae, 0x70, 0xfa, 0x55, 0xa9, + 0x7e, 0xb9, 0x52, 0xd9, 0x63, 0x16, 0xcc, 0xb8, 0x4a, 0x52, 0x49, 0xb2, 0xa3, 0xdd, 0x13, 0xee, + 0x6f, 0x1a, 0x94, 0x8f, 0xe6, 0xcb, 0xe0, 0x25, 0xd9, 0x85, 0x9a, 0xef, 0x05, 0x63, 0x75, 0xdf, + 0x72, 0xcd, 0x96, 0xef, 0x05, 0xaa, 0x87, 0x7b, 0x02, 0xfd, 0xec, 0x7a, 0xed, 0x4f, 0x1f, 0x24, + 0x9f, 0x5d, 0xa7, 0xfe, 0x4e, 0x7a, 0x08, 0x06, 0x1e, 0xc2, 0xfd, 0xe2, 0x21, 0x60, 0x81, 0x4e, + 0x37, 0x98, 0x84, 0x53, 0x2f, 0x98, 0xe5, 0x27, 0xa0, 0x1e, 0x7a, 0xfc, 0xaa, 0x3a, 0xc5, 0xb5, + 0xfb, 0x0c, 0xaa, 0x19, 0xeb, 0xd6, 0xe5, 0xfd, 0x6e, 0xa0, 0xde, 0xe1, 0x8d, 0xc7, 0x57, 0x27, + 0xff, 0x83, 0x3b, 0xc7, 0xe7, 0x83, 0x83, 0xd1, 0xb8, 0xf0, 0x22, 0xbb, 0x3f, 0x40, 0x03, 0x2b, + 0xf2, 0xe9, 0x7f, 0xbd, 0x7a, 0x7b, 0x60, 0x4e, 0x54, 0x86, 0xec, 0xe6, 0x6d, 0xdf, 0xfa, 0x9a, + 0x2c, 0x20, 0xa1, 0x1d, 0xee, 0xbc, 0xbe, 0xd9, 0xd5, 0x7e, 0xbd, 0xd9, 0xd5, 0xfe, 0xb8, 0xd9, + 0xd5, 0xbe, 0x37, 0x15, 0x3b, 0xba, 0xbc, 0x34, 0xf1, 0x3f, 0xe8, 0xb3, 0xbf, 0x03, 0x00, 0x00, + 0xff, 0xff, 0x8b, 0x63, 0xd6, 0x2e, 0x38, 0x09, 0x00, 0x00, } func (m *MetricMetadata) Marshal() (dAtA []byte, err error) { @@ -1385,6 +1396,18 @@ func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.CustomValues) > 0 { + for iNdEx := len(m.CustomValues) - 1; iNdEx >= 0; iNdEx-- { + f1 := math.Float64bits(float64(m.CustomValues[iNdEx])) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f1)) + } + i = encodeVarintTypes(dAtA, i, uint64(len(m.CustomValues)*8)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } if m.Timestamp != 0 { i = encodeVarintTypes(dAtA, i, uint64(m.Timestamp)) i-- @@ -1397,30 +1420,30 @@ func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { } if len(m.PositiveCounts) > 0 { for iNdEx := len(m.PositiveCounts) - 1; iNdEx >= 0; iNdEx-- { - f1 := math.Float64bits(float64(m.PositiveCounts[iNdEx])) + f2 := math.Float64bits(float64(m.PositiveCounts[iNdEx])) i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f1)) + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f2)) } i = encodeVarintTypes(dAtA, i, uint64(len(m.PositiveCounts)*8)) i-- dAtA[i] = 0x6a } if len(m.PositiveDeltas) > 0 { - var j2 int - dAtA4 := make([]byte, len(m.PositiveDeltas)*10) + var j3 int + dAtA5 := make([]byte, len(m.PositiveDeltas)*10) for _, num := range m.PositiveDeltas { - x3 := (uint64(num) << 1) ^ uint64((num >> 63)) - for x3 >= 1<<7 { - dAtA4[j2] = uint8(uint64(x3)&0x7f | 0x80) - j2++ - x3 >>= 7 - } - dAtA4[j2] = uint8(x3) - j2++ + x4 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x4 >= 1<<7 { + dAtA5[j3] = uint8(uint64(x4)&0x7f | 0x80) + j3++ + x4 >>= 7 + } + dAtA5[j3] = uint8(x4) + j3++ } - i -= j2 - copy(dAtA[i:], dAtA4[:j2]) - i = encodeVarintTypes(dAtA, i, uint64(j2)) + i -= j3 + copy(dAtA[i:], dAtA5[:j3]) + i = encodeVarintTypes(dAtA, i, uint64(j3)) i-- dAtA[i] = 0x62 } @@ -1440,30 +1463,30 @@ func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { } if len(m.NegativeCounts) > 0 { for iNdEx := len(m.NegativeCounts) - 1; iNdEx >= 0; iNdEx-- { - f5 := math.Float64bits(float64(m.NegativeCounts[iNdEx])) + f6 := math.Float64bits(float64(m.NegativeCounts[iNdEx])) i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f5)) + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f6)) } i = encodeVarintTypes(dAtA, i, uint64(len(m.NegativeCounts)*8)) i-- dAtA[i] = 0x52 } if len(m.NegativeDeltas) > 0 { - var j6 int - dAtA8 := make([]byte, len(m.NegativeDeltas)*10) + var j7 int + dAtA9 := make([]byte, len(m.NegativeDeltas)*10) for _, num := range m.NegativeDeltas { - x7 := (uint64(num) << 1) ^ uint64((num >> 63)) - for x7 >= 1<<7 { - dAtA8[j6] = uint8(uint64(x7)&0x7f | 0x80) - j6++ - x7 >>= 7 - } - dAtA8[j6] = uint8(x7) - j6++ + x8 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x8 >= 1<<7 { + dAtA9[j7] = uint8(uint64(x8)&0x7f | 0x80) + j7++ + x8 >>= 7 + } + dAtA9[j7] = uint8(x8) + j7++ } - i -= j6 - copy(dAtA[i:], dAtA8[:j6]) - i = encodeVarintTypes(dAtA, i, uint64(j6)) + i -= j7 + copy(dAtA[i:], dAtA9[:j7]) + i = encodeVarintTypes(dAtA, i, uint64(j7)) i-- dAtA[i] = 0x4a } @@ -2133,6 +2156,9 @@ func (m *Histogram) Size() (n int) { if m.Timestamp != 0 { n += 1 + sovTypes(uint64(m.Timestamp)) } + if len(m.CustomValues) > 0 { + n += 2 + sovTypes(uint64(len(m.CustomValues)*8)) + len(m.CustomValues)*8 + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -3248,6 +3274,60 @@ func (m *Histogram) Unmarshal(dAtA []byte) error { break } } + case 16: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.CustomValues = append(m.CustomValues, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.CustomValues) == 0 { + m.CustomValues = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.CustomValues = append(m.CustomValues, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field CustomValues", wireType) + } default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) diff --git a/prompb/types.proto b/prompb/types.proto index 61fc1e0143e..8bc69d5b106 100644 --- a/prompb/types.proto +++ b/prompb/types.proto @@ -107,6 +107,10 @@ message Histogram { // timestamp is in ms format, see model/timestamp/timestamp.go for // conversion from time.Time to Prometheus timestamp. int64 timestamp = 15; + + // custom_values are not part of the specification, DO NOT use in remote write clients. + // Used only for converting from OpenTelemetry to Prometheus internally. + repeated double custom_values = 16; } // A BucketSpan defines a number of consecutive buckets with their diff --git a/storage/remote/otlptranslator/prometheus/metric_name_builder.go b/storage/remote/otlptranslator/prometheus/metric_name_builder.go deleted file mode 100644 index 8b5ea2a0464..00000000000 --- a/storage/remote/otlptranslator/prometheus/metric_name_builder.go +++ /dev/null @@ -1,306 +0,0 @@ -// Copyright 2024 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/normalize_name.go -// Provenance-includes-license: Apache-2.0 -// Provenance-includes-copyright: Copyright The OpenTelemetry Authors. - -package prometheus - -import ( - "regexp" - "slices" - "strings" - "unicode" - - "go.opentelemetry.io/collector/pdata/pmetric" -) - -// The map to translate OTLP units to Prometheus units -// OTLP metrics use the c/s notation as specified at https://ucum.org/ucum.html -// (See also https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/semantic_conventions/README.md#instrument-units) -// Prometheus best practices for units: https://prometheus.io/docs/practices/naming/#base-units -// OpenMetrics specification for units: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#units-and-base-units -var unitMap = map[string]string{ - // Time - "d": "days", - "h": "hours", - "min": "minutes", - "s": "seconds", - "ms": "milliseconds", - "us": "microseconds", - "ns": "nanoseconds", - - // Bytes - "By": "bytes", - "KiBy": "kibibytes", - "MiBy": "mebibytes", - "GiBy": "gibibytes", - "TiBy": "tibibytes", - "KBy": "kilobytes", - "MBy": "megabytes", - "GBy": "gigabytes", - "TBy": "terabytes", - - // SI - "m": "meters", - "V": "volts", - "A": "amperes", - "J": "joules", - "W": "watts", - "g": "grams", - - // Misc - "Cel": "celsius", - "Hz": "hertz", - "1": "", - "%": "percent", -} - -// The map that translates the "per" unit -// Example: s => per second (singular) -var perUnitMap = map[string]string{ - "s": "second", - "m": "minute", - "h": "hour", - "d": "day", - "w": "week", - "mo": "month", - "y": "year", -} - -// BuildCompliantMetricName builds a Prometheus-compliant metric name for the specified metric. -// -// Metric name is prefixed with specified namespace and underscore (if any). -// Namespace is not cleaned up. Make sure specified namespace follows Prometheus -// naming convention. -// -// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels, -// https://prometheus.io/docs/practices/naming/#metric-and-label-naming -// and https://github.com/open-telemetry/opentelemetry-specification/blob/v1.38.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus. -func BuildCompliantMetricName(metric pmetric.Metric, namespace string, addMetricSuffixes bool) string { - // Full normalization following standard Prometheus naming conventions - if addMetricSuffixes { - return normalizeName(metric, namespace) - } - - // Simple case (no full normalization, no units, etc.). - metricName := strings.Join(strings.FieldsFunc(metric.Name(), func(r rune) bool { - return invalidMetricCharRE.MatchString(string(r)) - }), "_") - - // Namespace? - if namespace != "" { - return namespace + "_" + metricName - } - - // Metric name starts with a digit? Prefix it with an underscore. - if metricName != "" && unicode.IsDigit(rune(metricName[0])) { - metricName = "_" + metricName - } - - return metricName -} - -var ( - nonMetricNameCharRE = regexp.MustCompile(`[^a-zA-Z0-9:]`) - // Regexp for metric name characters that should be replaced with _. - invalidMetricCharRE = regexp.MustCompile(`[^a-zA-Z0-9:_]`) - multipleUnderscoresRE = regexp.MustCompile(`__+`) -) - -// Build a normalized name for the specified metric. -func normalizeName(metric pmetric.Metric, namespace string) string { - // Split metric name into "tokens" (of supported metric name runes). - // Note that this has the side effect of replacing multiple consecutive underscores with a single underscore. - // This is part of the OTel to Prometheus specification: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.38.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus. - nameTokens := strings.FieldsFunc( - metric.Name(), - func(r rune) bool { return nonMetricNameCharRE.MatchString(string(r)) }, - ) - - mainUnitSuffix, perUnitSuffix := buildUnitSuffixes(metric.Unit()) - nameTokens = addUnitTokens(nameTokens, cleanUpUnit(mainUnitSuffix), cleanUpUnit(perUnitSuffix)) - - // Append _total for Counters - if metric.Type() == pmetric.MetricTypeSum && metric.Sum().IsMonotonic() { - nameTokens = append(removeItem(nameTokens, "total"), "total") - } - - // Append _ratio for metrics with unit "1" - // Some OTel receivers improperly use unit "1" for counters of objects - // See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aissue+some+metric+units+don%27t+follow+otel+semantic+conventions - // Until these issues have been fixed, we're appending `_ratio` for gauges ONLY - // Theoretically, counters could be ratios as well, but it's absurd (for mathematical reasons) - if metric.Unit() == "1" && metric.Type() == pmetric.MetricTypeGauge { - nameTokens = append(removeItem(nameTokens, "ratio"), "ratio") - } - - // Namespace? - if namespace != "" { - nameTokens = append([]string{namespace}, nameTokens...) - } - - // Build the string from the tokens, separated with underscores - normalizedName := strings.Join(nameTokens, "_") - - // Metric name cannot start with a digit, so prefix it with "_" in this case - if normalizedName != "" && unicode.IsDigit(rune(normalizedName[0])) { - normalizedName = "_" + normalizedName - } - - return normalizedName -} - -// addUnitTokens will add the suffixes to the nameTokens if they are not already present. -// It will also remove trailing underscores from the main suffix to avoid double underscores -// when joining the tokens. -// -// If the 'per' unit ends with underscore, the underscore will be removed. If the per unit is just -// 'per_', it will be entirely removed. -func addUnitTokens(nameTokens []string, mainUnitSuffix, perUnitSuffix string) []string { - if slices.Contains(nameTokens, mainUnitSuffix) { - mainUnitSuffix = "" - } - - if perUnitSuffix == "per_" { - perUnitSuffix = "" - } else { - perUnitSuffix = strings.TrimSuffix(perUnitSuffix, "_") - if slices.Contains(nameTokens, perUnitSuffix) { - perUnitSuffix = "" - } - } - - if perUnitSuffix != "" { - mainUnitSuffix = strings.TrimSuffix(mainUnitSuffix, "_") - } - - if mainUnitSuffix != "" { - nameTokens = append(nameTokens, mainUnitSuffix) - } - if perUnitSuffix != "" { - nameTokens = append(nameTokens, perUnitSuffix) - } - return nameTokens -} - -// cleanUpUnit cleans up unit so it matches model.LabelNameRE. -func cleanUpUnit(unit string) string { - // Multiple consecutive underscores are replaced with a single underscore. - // This is part of the OTel to Prometheus specification: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.38.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus. - return strings.TrimPrefix(multipleUnderscoresRE.ReplaceAllString( - nonMetricNameCharRE.ReplaceAllString(unit, "_"), - "_", - ), "_") -} - -// Retrieve the Prometheus "basic" unit corresponding to the specified "basic" unit -// Returns the specified unit if not found in unitMap -func unitMapGetOrDefault(unit string) string { - if promUnit, ok := unitMap[unit]; ok { - return promUnit - } - return unit -} - -// Retrieve the Prometheus "per" unit corresponding to the specified "per" unit -// Returns the specified unit if not found in perUnitMap -func perUnitMapGetOrDefault(perUnit string) string { - if promPerUnit, ok := perUnitMap[perUnit]; ok { - return promPerUnit - } - return perUnit -} - -// Remove the specified value from the slice -func removeItem(slice []string, value string) []string { - newSlice := make([]string, 0, len(slice)) - for _, sliceEntry := range slice { - if sliceEntry != value { - newSlice = append(newSlice, sliceEntry) - } - } - return newSlice -} - -// BuildMetricName builds a valid metric name but without following Prometheus naming conventions. -// It doesn't do any character transformation, it only prefixes the metric name with the namespace, if any, -// and adds metric type suffixes, e.g. "_total" for counters and unit suffixes. -// -// Differently from BuildCompliantMetricName, it doesn't check for the presence of unit and type suffixes. -// If "addMetricSuffixes" is true, it will add them anyway. -// -// Please use BuildCompliantMetricName for a metric name that follows Prometheus naming conventions. -func BuildMetricName(metric pmetric.Metric, namespace string, addMetricSuffixes bool) string { - metricName := metric.Name() - - if namespace != "" { - metricName = namespace + "_" + metricName - } - - if addMetricSuffixes { - mainUnitSuffix, perUnitSuffix := buildUnitSuffixes(metric.Unit()) - if mainUnitSuffix != "" { - metricName = metricName + "_" + mainUnitSuffix - } - if perUnitSuffix != "" { - metricName = metricName + "_" + perUnitSuffix - } - - // Append _total for Counters - if metric.Type() == pmetric.MetricTypeSum && metric.Sum().IsMonotonic() { - metricName = metricName + "_total" - } - - // Append _ratio for metrics with unit "1" - // Some OTel receivers improperly use unit "1" for counters of objects - // See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aissue+some+metric+units+don%27t+follow+otel+semantic+conventions - // Until these issues have been fixed, we're appending `_ratio` for gauges ONLY - // Theoretically, counters could be ratios as well, but it's absurd (for mathematical reasons) - if metric.Unit() == "1" && metric.Type() == pmetric.MetricTypeGauge { - metricName = metricName + "_ratio" - } - } - return metricName -} - -// buildUnitSuffixes builds the main and per unit suffixes for the specified unit -// but doesn't do any special character transformation to accommodate Prometheus naming conventions. -// Removing trailing underscores or appending suffixes is done in the caller. -func buildUnitSuffixes(unit string) (mainUnitSuffix, perUnitSuffix string) { - // Split unit at the '/' if any - unitTokens := strings.SplitN(unit, "/", 2) - - if len(unitTokens) > 0 { - // Main unit - // Update if not blank and doesn't contain '{}' - mainUnitOTel := strings.TrimSpace(unitTokens[0]) - if mainUnitOTel != "" && !strings.ContainsAny(mainUnitOTel, "{}") { - mainUnitSuffix = unitMapGetOrDefault(mainUnitOTel) - } - - // Per unit - // Update if not blank and doesn't contain '{}' - if len(unitTokens) > 1 && unitTokens[1] != "" { - perUnitOTel := strings.TrimSpace(unitTokens[1]) - if perUnitOTel != "" && !strings.ContainsAny(perUnitOTel, "{}") { - perUnitSuffix = perUnitMapGetOrDefault(perUnitOTel) - } - if perUnitSuffix != "" { - perUnitSuffix = "per_" + perUnitSuffix - } - } - } - - return mainUnitSuffix, perUnitSuffix -} diff --git a/storage/remote/otlptranslator/prometheus/metric_name_builder_test.go b/storage/remote/otlptranslator/prometheus/metric_name_builder_test.go deleted file mode 100644 index 1c4a6124c44..00000000000 --- a/storage/remote/otlptranslator/prometheus/metric_name_builder_test.go +++ /dev/null @@ -1,257 +0,0 @@ -// Copyright 2024 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/normalize_name_test.go -// Provenance-includes-license: Apache-2.0 -// Provenance-includes-copyright: Copyright The OpenTelemetry Authors. - -package prometheus - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestByte(t *testing.T) { - require.Equal(t, "system_filesystem_usage_bytes", normalizeName(createGauge("system.filesystem.usage", "By"), "")) -} - -func TestByteCounter(t *testing.T) { - require.Equal(t, "system_io_bytes_total", normalizeName(createCounter("system.io", "By"), "")) - require.Equal(t, "network_transmitted_bytes_total", normalizeName(createCounter("network_transmitted_bytes_total", "By"), "")) -} - -func TestWhiteSpaces(t *testing.T) { - require.Equal(t, "system_filesystem_usage_bytes", normalizeName(createGauge("\t system.filesystem.usage ", " By\t"), "")) -} - -func TestNonStandardUnit(t *testing.T) { - require.Equal(t, "system_network_dropped", normalizeName(createGauge("system.network.dropped", "{packets}"), "")) - // The normal metric name character set is allowed in non-standard units. - require.Equal(t, "system_network_dropped_nonstandard:_1", normalizeName(createGauge("system.network.dropped", "nonstandard:_1"), "")) -} - -func TestNonStandardUnitCounter(t *testing.T) { - require.Equal(t, "system_network_dropped_total", normalizeName(createCounter("system.network.dropped", "{packets}"), "")) -} - -func TestBrokenUnit(t *testing.T) { - require.Equal(t, "system_network_dropped_packets", normalizeName(createGauge("system.network.dropped", "packets"), "")) - require.Equal(t, "system_network_packets_dropped", normalizeName(createGauge("system.network.packets.dropped", "packets"), "")) - require.Equal(t, "system_network_packets", normalizeName(createGauge("system.network.packets", "packets"), "")) -} - -func TestBrokenUnitCounter(t *testing.T) { - require.Equal(t, "system_network_dropped_packets_total", normalizeName(createCounter("system.network.dropped", "packets"), "")) - require.Equal(t, "system_network_packets_dropped_total", normalizeName(createCounter("system.network.packets.dropped", "packets"), "")) - require.Equal(t, "system_network_packets_total", normalizeName(createCounter("system.network.packets", "packets"), "")) -} - -func TestRatio(t *testing.T) { - require.Equal(t, "hw_gpu_memory_utilization_ratio", normalizeName(createGauge("hw.gpu.memory.utilization", "1"), "")) - require.Equal(t, "hw_fan_speed_ratio", normalizeName(createGauge("hw.fan.speed_ratio", "1"), "")) - require.Equal(t, "objects_total", normalizeName(createCounter("objects", "1"), "")) -} - -func TestHertz(t *testing.T) { - require.Equal(t, "hw_cpu_speed_limit_hertz", normalizeName(createGauge("hw.cpu.speed_limit", "Hz"), "")) -} - -func TestPer(t *testing.T) { - require.Equal(t, "broken_metric_speed_km_per_hour", normalizeName(createGauge("broken.metric.speed", "km/h"), "")) - require.Equal(t, "astro_light_speed_limit_meters_per_second", normalizeName(createGauge("astro.light.speed_limit", "m/s"), "")) - // The normal metric name character set is allowed in non-standard units. - require.Equal(t, "system_network_dropped_non_per_standard:_1", normalizeName(createGauge("system.network.dropped", "non/standard:_1"), "")) - - t.Run("invalid per unit", func(t *testing.T) { - require.Equal(t, "broken_metric_speed_km", normalizeName(createGauge("broken.metric.speed", "km/°"), "")) - }) -} - -func TestPercent(t *testing.T) { - require.Equal(t, "broken_metric_success_ratio_percent", normalizeName(createGauge("broken.metric.success_ratio", "%"), "")) - require.Equal(t, "broken_metric_success_percent", normalizeName(createGauge("broken.metric.success_percent", "%"), "")) -} - -func TestEmpty(t *testing.T) { - require.Equal(t, "test_metric_no_unit", normalizeName(createGauge("test.metric.no_unit", ""), "")) - require.Equal(t, "test_metric_spaces", normalizeName(createGauge("test.metric.spaces", " \t "), "")) -} - -func TestOTelReceivers(t *testing.T) { - require.Equal(t, "active_directory_ds_replication_network_io_bytes_total", normalizeName(createCounter("active_directory.ds.replication.network.io", "By"), "")) - require.Equal(t, "active_directory_ds_replication_sync_object_pending_total", normalizeName(createCounter("active_directory.ds.replication.sync.object.pending", "{objects}"), "")) - require.Equal(t, "active_directory_ds_replication_object_rate_per_second", normalizeName(createGauge("active_directory.ds.replication.object.rate", "{objects}/s"), "")) - require.Equal(t, "active_directory_ds_name_cache_hit_rate_percent", normalizeName(createGauge("active_directory.ds.name_cache.hit_rate", "%"), "")) - require.Equal(t, "active_directory_ds_ldap_bind_last_successful_time_milliseconds", normalizeName(createGauge("active_directory.ds.ldap.bind.last_successful.time", "ms"), "")) - require.Equal(t, "apache_current_connections", normalizeName(createGauge("apache.current_connections", "connections"), "")) - require.Equal(t, "apache_workers_connections", normalizeName(createGauge("apache.workers", "connections"), "")) - require.Equal(t, "apache_requests_total", normalizeName(createCounter("apache.requests", "1"), "")) - require.Equal(t, "bigip_virtual_server_request_count_total", normalizeName(createCounter("bigip.virtual_server.request.count", "{requests}"), "")) - require.Equal(t, "system_cpu_utilization_ratio", normalizeName(createGauge("system.cpu.utilization", "1"), "")) - require.Equal(t, "system_disk_operation_time_seconds_total", normalizeName(createCounter("system.disk.operation_time", "s"), "")) - require.Equal(t, "system_cpu_load_average_15m_ratio", normalizeName(createGauge("system.cpu.load_average.15m", "1"), "")) - require.Equal(t, "memcached_operation_hit_ratio_percent", normalizeName(createGauge("memcached.operation_hit_ratio", "%"), "")) - require.Equal(t, "mongodbatlas_process_asserts_per_second", normalizeName(createGauge("mongodbatlas.process.asserts", "{assertions}/s"), "")) - require.Equal(t, "mongodbatlas_process_journaling_data_files_mebibytes", normalizeName(createGauge("mongodbatlas.process.journaling.data_files", "MiBy"), "")) - require.Equal(t, "mongodbatlas_process_network_io_bytes_per_second", normalizeName(createGauge("mongodbatlas.process.network.io", "By/s"), "")) - require.Equal(t, "mongodbatlas_process_oplog_rate_gibibytes_per_hour", normalizeName(createGauge("mongodbatlas.process.oplog.rate", "GiBy/h"), "")) - require.Equal(t, "mongodbatlas_process_db_query_targeting_scanned_per_returned", normalizeName(createGauge("mongodbatlas.process.db.query_targeting.scanned_per_returned", "{scanned}/{returned}"), "")) - require.Equal(t, "nginx_requests", normalizeName(createGauge("nginx.requests", "requests"), "")) - require.Equal(t, "nginx_connections_accepted", normalizeName(createGauge("nginx.connections_accepted", "connections"), "")) - require.Equal(t, "nsxt_node_memory_usage_kilobytes", normalizeName(createGauge("nsxt.node.memory.usage", "KBy"), "")) - require.Equal(t, "redis_latest_fork_microseconds", normalizeName(createGauge("redis.latest_fork", "us"), "")) -} - -func TestNamespace(t *testing.T) { - require.Equal(t, "space_test", normalizeName(createGauge("test", ""), "space")) - require.Equal(t, "space_test", normalizeName(createGauge("#test", ""), "space")) -} - -func TestCleanUpUnit(t *testing.T) { - require.Equal(t, "", cleanUpUnit("")) - require.Equal(t, "a_b", cleanUpUnit("a b")) - require.Equal(t, "hello_world", cleanUpUnit("hello, world")) - require.Equal(t, "hello_you_2", cleanUpUnit("hello you 2")) - require.Equal(t, "1000", cleanUpUnit("$1000")) - require.Equal(t, "", cleanUpUnit("*+$^=)")) -} - -func TestUnitMapGetOrDefault(t *testing.T) { - require.Equal(t, "", unitMapGetOrDefault("")) - require.Equal(t, "seconds", unitMapGetOrDefault("s")) - require.Equal(t, "invalid", unitMapGetOrDefault("invalid")) -} - -func TestPerUnitMapGetOrDefault(t *testing.T) { - require.Equal(t, "", perUnitMapGetOrDefault("")) - require.Equal(t, "second", perUnitMapGetOrDefault("s")) - require.Equal(t, "invalid", perUnitMapGetOrDefault("invalid")) -} - -func TestBuildUnitSuffixes(t *testing.T) { - tests := []struct { - unit string - expectedMain string - expectedPer string - }{ - {"", "", ""}, - {"s", "seconds", ""}, - {"By/s", "bytes", "per_second"}, - {"requests/m", "requests", "per_minute"}, - {"{invalid}/second", "", "per_second"}, - {"bytes/{invalid}", "bytes", ""}, - } - - for _, test := range tests { - mainUnitSuffix, perUnitSuffix := buildUnitSuffixes(test.unit) - require.Equal(t, test.expectedMain, mainUnitSuffix) - require.Equal(t, test.expectedPer, perUnitSuffix) - } -} - -func TestAddUnitTokens(t *testing.T) { - tests := []struct { - nameTokens []string - mainUnitSuffix string - perUnitSuffix string - expected []string - }{ - {[]string{}, "", "", []string{}}, - {[]string{"token1"}, "main", "", []string{"token1", "main"}}, - {[]string{"token1"}, "", "per", []string{"token1", "per"}}, - {[]string{"token1"}, "main", "per", []string{"token1", "main", "per"}}, - {[]string{"token1", "per"}, "main", "per", []string{"token1", "per", "main"}}, - {[]string{"token1", "main"}, "main", "per", []string{"token1", "main", "per"}}, - {[]string{"token1"}, "main_", "per", []string{"token1", "main", "per"}}, - {[]string{"token1"}, "main_unit", "per_seconds_", []string{"token1", "main_unit", "per_seconds"}}, // trailing underscores are removed - {[]string{"token1"}, "main_unit", "per_", []string{"token1", "main_unit"}}, // 'per_' is removed entirely - } - - for _, test := range tests { - result := addUnitTokens(test.nameTokens, test.mainUnitSuffix, test.perUnitSuffix) - require.Equal(t, test.expected, result) - } -} - -func TestRemoveItem(t *testing.T) { - require.Equal(t, []string{}, removeItem([]string{}, "test")) - require.Equal(t, []string{}, removeItem([]string{}, "")) - require.Equal(t, []string{"a", "b", "c"}, removeItem([]string{"a", "b", "c"}, "d")) - require.Equal(t, []string{"a", "b", "c"}, removeItem([]string{"a", "b", "c"}, "")) - require.Equal(t, []string{"a", "b"}, removeItem([]string{"a", "b", "c"}, "c")) - require.Equal(t, []string{"a", "c"}, removeItem([]string{"a", "b", "c"}, "b")) - require.Equal(t, []string{"b", "c"}, removeItem([]string{"a", "b", "c"}, "a")) -} - -func TestBuildCompliantMetricNameWithSuffixes(t *testing.T) { - require.Equal(t, "system_io_bytes_total", BuildCompliantMetricName(createCounter("system.io", "By"), "", true)) - require.Equal(t, "system_network_io_bytes_total", BuildCompliantMetricName(createCounter("network.io", "By"), "system", true)) - require.Equal(t, "_3_14_digits", BuildCompliantMetricName(createGauge("3.14 digits", ""), "", true)) - require.Equal(t, "envoy_rule_engine_zlib_buf_error", BuildCompliantMetricName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", true)) - require.Equal(t, ":foo::bar", BuildCompliantMetricName(createGauge(":foo::bar", ""), "", true)) - require.Equal(t, ":foo::bar_total", BuildCompliantMetricName(createCounter(":foo::bar", ""), "", true)) - // Gauges with unit 1 are considered ratios. - require.Equal(t, "foo_bar_ratio", BuildCompliantMetricName(createGauge("foo.bar", "1"), "", true)) - // Slashes in units are converted. - require.Equal(t, "system_io_foo_per_bar_total", BuildCompliantMetricName(createCounter("system.io", "foo/bar"), "", true)) - require.Equal(t, "metric_with_foreign_characters_total", BuildCompliantMetricName(createCounter("metric_with_字符_foreign_characters", ""), "", true)) - // Removes non aplhanumerical characters from units, but leaves colons. - require.Equal(t, "temperature_:C", BuildCompliantMetricName(createGauge("temperature", "%*()°:C"), "", true)) -} - -func TestBuildCompliantMetricNameWithoutSuffixes(t *testing.T) { - require.Equal(t, "system_io", BuildCompliantMetricName(createCounter("system.io", "By"), "", false)) - require.Equal(t, "system_network_io", BuildCompliantMetricName(createCounter("network.io", "By"), "system", false)) - require.Equal(t, "system_network_I_O", BuildCompliantMetricName(createCounter("network (I/O)", "By"), "system", false)) - require.Equal(t, "_3_14_digits", BuildCompliantMetricName(createGauge("3.14 digits", "By"), "", false)) - require.Equal(t, "envoy__rule_engine_zlib_buf_error", BuildCompliantMetricName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", false)) - require.Equal(t, ":foo::bar", BuildCompliantMetricName(createGauge(":foo::bar", ""), "", false)) - require.Equal(t, ":foo::bar", BuildCompliantMetricName(createCounter(":foo::bar", ""), "", false)) - require.Equal(t, "foo_bar", BuildCompliantMetricName(createGauge("foo.bar", "1"), "", false)) - require.Equal(t, "system_io", BuildCompliantMetricName(createCounter("system.io", "foo/bar"), "", false)) - require.Equal(t, "metric_with___foreign_characters", BuildCompliantMetricName(createCounter("metric_with_字符_foreign_characters", ""), "", false)) -} - -func TestBuildMetricNameWithSuffixes(t *testing.T) { - require.Equal(t, "system.io_bytes_total", BuildMetricName(createCounter("system.io", "By"), "", true)) - require.Equal(t, "system_network.io_bytes_total", BuildMetricName(createCounter("network.io", "By"), "system", true)) - require.Equal(t, "3.14 digits", BuildMetricName(createGauge("3.14 digits", ""), "", true)) - require.Equal(t, "envoy__rule_engine_zlib_buf_error", BuildMetricName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", true)) - require.Equal(t, ":foo::bar", BuildMetricName(createGauge(":foo::bar", ""), "", true)) - require.Equal(t, ":foo::bar_total", BuildMetricName(createCounter(":foo::bar", ""), "", true)) - // Gauges with unit 1 are considered ratios. - require.Equal(t, "foo.bar_ratio", BuildMetricName(createGauge("foo.bar", "1"), "", true)) - // Slashes in units are converted. - require.Equal(t, "system.io_foo_per_bar_total", BuildMetricName(createCounter("system.io", "foo/bar"), "", true)) - require.Equal(t, "metric_with_字符_foreign_characters_total", BuildMetricName(createCounter("metric_with_字符_foreign_characters", ""), "", true)) - require.Equal(t, "temperature_%*()°C", BuildMetricName(createGauge("temperature", "%*()°C"), "", true)) // Keeps the all characters in unit - // Tests below show weird interactions that users can have with the metric names. - // With BuildMetricName we don't check if units/type suffixes are already present in the metric name, we always add them. - require.Equal(t, "system_io_seconds_seconds", BuildMetricName(createGauge("system_io_seconds", "s"), "", true)) - require.Equal(t, "system_io_total_total", BuildMetricName(createCounter("system_io_total", ""), "", true)) -} - -func TestBuildMetricNameWithoutSuffixes(t *testing.T) { - require.Equal(t, "system.io", BuildMetricName(createCounter("system.io", "By"), "", false)) - require.Equal(t, "system_network.io", BuildMetricName(createCounter("network.io", "By"), "system", false)) - require.Equal(t, "3.14 digits", BuildMetricName(createGauge("3.14 digits", ""), "", false)) - require.Equal(t, "envoy__rule_engine_zlib_buf_error", BuildMetricName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", false)) - require.Equal(t, ":foo::bar", BuildMetricName(createGauge(":foo::bar", ""), "", false)) - require.Equal(t, ":foo::bar", BuildMetricName(createCounter(":foo::bar", ""), "", false)) - // Gauges with unit 1 are considered ratios. - require.Equal(t, "foo.bar", BuildMetricName(createGauge("foo.bar", "1"), "", false)) - require.Equal(t, "metric_with_字符_foreign_characters", BuildMetricName(createCounter("metric_with_字符_foreign_characters", ""), "", false)) - require.Equal(t, "system_io_seconds", BuildMetricName(createGauge("system_io_seconds", "s"), "", false)) - require.Equal(t, "system_io_total", BuildMetricName(createCounter("system_io_total", ""), "", false)) -} diff --git a/storage/remote/otlptranslator/prometheus/normalize_label.go b/storage/remote/otlptranslator/prometheus/normalize_label.go deleted file mode 100644 index b51b5e945a3..00000000000 --- a/storage/remote/otlptranslator/prometheus/normalize_label.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2024 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/normalize_label.go -// Provenance-includes-license: Apache-2.0 -// Provenance-includes-copyright: Copyright The OpenTelemetry Authors. - -package prometheus - -import ( - "strings" - "unicode" - - "github.com/prometheus/prometheus/util/strutil" -) - -// Normalizes the specified label to follow Prometheus label names standard. -// -// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels. -// -// Labels that start with non-letter rune will be prefixed with "key_". -// An exception is made for double-underscores which are allowed. -func NormalizeLabel(label string) string { - // Trivial case. - if len(label) == 0 { - return label - } - - label = strutil.SanitizeLabelName(label) - - // If label starts with a number, prepend with "key_". - if unicode.IsDigit(rune(label[0])) { - label = "key_" + label - } else if strings.HasPrefix(label, "_") && !strings.HasPrefix(label, "__") { - label = "key" + label - } - - return label -} diff --git a/storage/remote/otlptranslator/prometheus/normalize_label_test.go b/storage/remote/otlptranslator/prometheus/normalize_label_test.go deleted file mode 100644 index 77538ce8e60..00000000000 --- a/storage/remote/otlptranslator/prometheus/normalize_label_test.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2024 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestNormalizeLabel(t *testing.T) { - tests := []struct { - label string - expected string - }{ - {"", ""}, - {"label:with:colons", "label_with_colons"}, - {"LabelWithCapitalLetters", "LabelWithCapitalLetters"}, - {"label!with&special$chars)", "label_with_special_chars_"}, - {"label_with_foreign_characters_字符", "label_with_foreign_characters___"}, - {"label.with.dots", "label_with_dots"}, - {"123label", "key_123label"}, - {"_label_starting_with_underscore", "key_label_starting_with_underscore"}, - {"__label_starting_with_2underscores", "__label_starting_with_2underscores"}, - } - - for i, test := range tests { - t.Run(fmt.Sprintf("test_%d", i), func(t *testing.T) { - result := NormalizeLabel(test.label) - require.Equal(t, test.expected, result) - }) - } -} diff --git a/storage/remote/otlptranslator/prometheus/testutils_test.go b/storage/remote/otlptranslator/prometheus/testutils_test.go deleted file mode 100644 index 363328c571f..00000000000 --- a/storage/remote/otlptranslator/prometheus/testutils_test.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2024 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/testutils_test.go -// Provenance-includes-license: Apache-2.0 -// Provenance-includes-copyright: Copyright The OpenTelemetry Authors. - -package prometheus - -import ( - "go.opentelemetry.io/collector/pdata/pmetric" -) - -var ilm pmetric.ScopeMetrics - -func init() { - - metrics := pmetric.NewMetrics() - resourceMetrics := metrics.ResourceMetrics().AppendEmpty() - ilm = resourceMetrics.ScopeMetrics().AppendEmpty() - -} - -// Returns a new Metric of type "Gauge" with specified name and unit -func createGauge(name string, unit string) pmetric.Metric { - gauge := ilm.Metrics().AppendEmpty() - gauge.SetName(name) - gauge.SetUnit(unit) - gauge.SetEmptyGauge() - return gauge -} - -// Returns a new Metric of type Monotonic Sum with specified name and unit -func createCounter(name string, unit string) pmetric.Metric { - counter := ilm.Metrics().AppendEmpty() - counter.SetEmptySum().SetIsMonotonic(true) - counter.SetName(name) - counter.SetUnit(unit) - return counter -} diff --git a/storage/remote/otlptranslator/prometheus/unit_to_ucum.go b/storage/remote/otlptranslator/prometheus/unit_to_ucum.go deleted file mode 100644 index 39a42734d76..00000000000 --- a/storage/remote/otlptranslator/prometheus/unit_to_ucum.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2024 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/unit_to_ucum.go -// Provenance-includes-license: Apache-2.0 -// Provenance-includes-copyright: Copyright The OpenTelemetry Authors. - -package prometheus - -import "strings" - -var wordToUCUM = map[string]string{ - // Time - "days": "d", - "hours": "h", - "minutes": "min", - "seconds": "s", - "milliseconds": "ms", - "microseconds": "us", - "nanoseconds": "ns", - - // Bytes - "bytes": "By", - "kibibytes": "KiBy", - "mebibytes": "MiBy", - "gibibytes": "GiBy", - "tibibytes": "TiBy", - "kilobytes": "KBy", - "megabytes": "MBy", - "gigabytes": "GBy", - "terabytes": "TBy", - - // SI - "meters": "m", - "volts": "V", - "amperes": "A", - "joules": "J", - "watts": "W", - "grams": "g", - - // Misc - "celsius": "Cel", - "hertz": "Hz", - "ratio": "1", - "percent": "%", -} - -// The map that translates the "per" unit -// Example: per_second (singular) => /s -var perWordToUCUM = map[string]string{ - "second": "s", - "minute": "m", - "hour": "h", - "day": "d", - "week": "w", - "month": "mo", - "year": "y", -} - -// UnitWordToUCUM converts english unit words to UCUM units: -// https://ucum.org/ucum#section-Alphabetic-Index-By-Symbol -// It also handles rates, such as meters_per_second, by translating the first -// word to UCUM, and the "per" word to UCUM. It joins them with a "/" between. -func UnitWordToUCUM(unit string) string { - unitTokens := strings.SplitN(unit, "_per_", 2) - if len(unitTokens) == 0 { - return "" - } - ucumUnit := wordToUCUMOrDefault(unitTokens[0]) - if len(unitTokens) > 1 && unitTokens[1] != "" { - ucumUnit += "/" + perWordToUCUMOrDefault(unitTokens[1]) - } - return ucumUnit -} - -// wordToUCUMOrDefault retrieves the Prometheus "basic" unit corresponding to -// the specified "basic" unit. Returns the specified unit if not found in -// wordToUCUM. -func wordToUCUMOrDefault(unit string) string { - if promUnit, ok := wordToUCUM[unit]; ok { - return promUnit - } - return unit -} - -// perWordToUCUMOrDefault retrieve the Prometheus "per" unit corresponding to -// the specified "per" unit. Returns the specified unit if not found in perWordToUCUM. -func perWordToUCUMOrDefault(perUnit string) string { - if promPerUnit, ok := perWordToUCUM[perUnit]; ok { - return promPerUnit - } - return perUnit -} diff --git a/storage/remote/otlptranslator/prometheusremotewrite/helper.go b/storage/remote/otlptranslator/prometheusremotewrite/helper.go index 2b2d32f2f7d..0660f8ee5f4 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/helper.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/helper.go @@ -29,6 +29,7 @@ import ( "github.com/cespare/xxhash/v2" "github.com/prometheus/common/model" + "github.com/prometheus/otlptranslator" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" conventions "go.opentelemetry.io/collector/semconv/v1.6.1" @@ -36,8 +37,6 @@ import ( "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/prompb" - - prometheustranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus" ) const ( @@ -117,7 +116,8 @@ var seps = []byte{'\xff'} // if logOnOverwrite is true, the overwrite is logged. Resulting label names are sanitized. // If settings.PromoteResourceAttributes is not empty, it's a set of resource attributes that should be promoted to labels. func createAttributes(resource pcommon.Resource, attributes pcommon.Map, settings Settings, - ignoreAttrs []string, logOnOverwrite bool, extras ...string) []prompb.Label { + ignoreAttrs []string, logOnOverwrite bool, extras ...string, +) []prompb.Label { resourceAttrs := resource.Attributes() serviceName, haveServiceName := resourceAttrs.Get(conventions.AttributeServiceName) instance, haveInstanceID := resourceAttrs.Get(conventions.AttributeServiceInstanceID) @@ -159,7 +159,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, setting for _, label := range labels { finalKey := label.Name if !settings.AllowUTF8 { - finalKey = prometheustranslator.NormalizeLabel(finalKey) + finalKey = otlptranslator.NormalizeLabel(finalKey) } if existingValue, alreadyExists := l[finalKey]; alreadyExists { l[finalKey] = existingValue + ";" + label.Value @@ -171,7 +171,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, setting for _, lbl := range promotedAttrs { normalized := lbl.Name if !settings.AllowUTF8 { - normalized = prometheustranslator.NormalizeLabel(normalized) + normalized = otlptranslator.NormalizeLabel(normalized) } if _, exists := l[normalized]; !exists { l[normalized] = lbl.Value @@ -211,7 +211,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, setting } // internal labels should be maintained if !settings.AllowUTF8 && !(len(name) > 4 && name[:2] == "__" && name[len(name)-2:] == "__") { - name = prometheustranslator.NormalizeLabel(name) + name = otlptranslator.NormalizeLabel(name) } l[name] = extras[i+1] } @@ -249,7 +249,8 @@ func isValidAggregationTemporality(metric pmetric.Metric) bool { // However, work is under way to resolve this shortcoming through a feature called native histograms custom buckets: // https://github.com/prometheus/prometheus/issues/13485. func (c *PrometheusConverter) addHistogramDataPoints(ctx context.Context, dataPoints pmetric.HistogramDataPointSlice, - resource pcommon.Resource, settings Settings, baseName string) error { + resource pcommon.Resource, settings Settings, baseName string, +) error { for x := 0; x < dataPoints.Len(); x++ { if err := c.everyN.checkContext(ctx); err != nil { return err @@ -273,7 +274,6 @@ func (c *PrometheusConverter) addHistogramDataPoints(ctx context.Context, dataPo sumlabels := createLabels(baseName+sumStr, baseLabels) c.addSample(sum, sumlabels) - } // treat count as a sample in an individual TimeSeries @@ -413,7 +413,7 @@ func getPromExemplars[T exemplarType](ctx context.Context, everyN *everyNTimes, return promExemplars, nil } -// mostRecentTimestampInMetric returns the latest timestamp in a batch of metrics +// mostRecentTimestampInMetric returns the latest timestamp in a batch of metrics. func mostRecentTimestampInMetric(metric pmetric.Metric) pcommon.Timestamp { var ts pcommon.Timestamp // handle individual metric based on type @@ -449,7 +449,8 @@ func mostRecentTimestampInMetric(metric pmetric.Metric) pcommon.Timestamp { } func (c *PrometheusConverter) addSummaryDataPoints(ctx context.Context, dataPoints pmetric.SummaryDataPointSlice, resource pcommon.Resource, - settings Settings, baseName string) error { + settings Settings, baseName string, +) error { for x := 0; x < dataPoints.Len(); x++ { if err := c.everyN.checkContext(ctx); err != nil { return err @@ -563,7 +564,7 @@ func (c *PrometheusConverter) getOrCreateTimeSeries(lbls []prompb.Label) (*promp // addTimeSeriesIfNeeded adds a corresponding time series if it doesn't already exist. // If the time series doesn't already exist, it gets added with startTimestamp for its value and timestamp for its timestamp, // both converted to milliseconds. -func (c *PrometheusConverter) addTimeSeriesIfNeeded(lbls []prompb.Label, startTimestamp pcommon.Timestamp, timestamp pcommon.Timestamp) { +func (c *PrometheusConverter) addTimeSeriesIfNeeded(lbls []prompb.Label, startTimestamp, timestamp pcommon.Timestamp) { ts, created := c.getOrCreateTimeSeries(lbls) if created { ts.Samples = []prompb.Sample{ @@ -632,7 +633,7 @@ func addResourceTargetInfo(resource pcommon.Resource, settings Settings, timesta converter.addSample(sample, labels) } -// convertTimeStamp converts OTLP timestamp in ns to timestamp in ms +// convertTimeStamp converts OTLP timestamp in ns to timestamp in ms. func convertTimeStamp(timestamp pcommon.Timestamp) int64 { return int64(timestamp) / 1_000_000 } diff --git a/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go b/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go index b4bc704d4e0..ea5a3fce571 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go @@ -21,11 +21,11 @@ import ( "testing" "time" - "github.com/stretchr/testify/assert" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/prompb" ) @@ -204,7 +204,7 @@ func TestCreateAttributes(t *testing.T) { } lbls := createAttributes(resource, attrs, settings, tc.ignoreAttrs, false, model.MetricNameLabel, "test_metric") - assert.ElementsMatch(t, lbls, tc.expectedLabels) + require.ElementsMatch(t, lbls, tc.expectedLabels) }) } } @@ -222,7 +222,7 @@ func Test_convertTimeStamp(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got := convertTimeStamp(tt.arg) - assert.Equal(t, tt.want, got) + require.Equal(t, tt.want, got) }) } } @@ -330,8 +330,8 @@ func TestPrometheusConverter_AddSummaryDataPoints(t *testing.T) { metric.Name(), ) - assert.Equal(t, tt.want(), converter.unique) - assert.Empty(t, converter.conflicts) + require.Equal(t, tt.want(), converter.unique) + require.Empty(t, converter.conflicts) }) } } @@ -441,8 +441,8 @@ func TestPrometheusConverter_AddHistogramDataPoints(t *testing.T) { metric.Name(), ) - assert.Equal(t, tt.want(), converter.unique) - assert.Empty(t, converter.conflicts) + require.Equal(t, tt.want(), converter.unique) + require.Empty(t, converter.conflicts) }) } } @@ -457,9 +457,9 @@ func TestGetPromExemplars(t *testing.T) { exemplar.SetTimestamp(pcommon.Timestamp(time.Now().UnixNano())) exemplar.SetIntValue(42) exemplars, err := getPromExemplars(ctx, everyN, pt) - assert.NoError(t, err) - assert.Len(t, exemplars, 1) - assert.Equal(t, float64(42), exemplars[0].Value) + require.NoError(t, err) + require.Len(t, exemplars, 1) + require.Equal(t, float64(42), exemplars[0].Value) }) t.Run("Exemplars with double value", func(t *testing.T) { @@ -468,9 +468,9 @@ func TestGetPromExemplars(t *testing.T) { exemplar.SetTimestamp(pcommon.Timestamp(time.Now().UnixNano())) exemplar.SetDoubleValue(69.420) exemplars, err := getPromExemplars(ctx, everyN, pt) - assert.NoError(t, err) - assert.Len(t, exemplars, 1) - assert.Equal(t, 69.420, exemplars[0].Value) + require.NoError(t, err) + require.Len(t, exemplars, 1) + require.Equal(t, 69.420, exemplars[0].Value) }) t.Run("Exemplars with unsupported value type", func(t *testing.T) { @@ -478,6 +478,6 @@ func TestGetPromExemplars(t *testing.T) { exemplar := pt.Exemplars().AppendEmpty() exemplar.SetTimestamp(pcommon.Timestamp(time.Now().UnixNano())) _, err := getPromExemplars(ctx, everyN, pt) - assert.Error(t, err) + require.Error(t, err) }) } diff --git a/storage/remote/otlptranslator/prometheusremotewrite/histograms.go b/storage/remote/otlptranslator/prometheusremotewrite/histograms.go index 8349d4f9070..db26b629258 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/histograms.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/histograms.go @@ -25,6 +25,7 @@ import ( "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/util/annotations" @@ -35,7 +36,8 @@ const defaultZeroThreshold = 1e-128 // addExponentialHistogramDataPoints adds OTel exponential histogram data points to the corresponding time series // as native histogram samples. func (c *PrometheusConverter) addExponentialHistogramDataPoints(ctx context.Context, dataPoints pmetric.ExponentialHistogramDataPointSlice, - resource pcommon.Resource, settings Settings, promName string) (annotations.Annotations, error) { + resource pcommon.Resource, settings Settings, promName string, +) (annotations.Annotations, error) { var annots annotations.Annotations for x := 0; x < dataPoints.Len(); x++ { if err := c.everyN.checkContext(ctx); err != nil { @@ -89,8 +91,8 @@ func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint) (prom scale = 8 } - pSpans, pDeltas := convertBucketsLayout(p.Positive(), scaleDown) - nSpans, nDeltas := convertBucketsLayout(p.Negative(), scaleDown) + pSpans, pDeltas := convertBucketsLayout(p.Positive().BucketCounts().AsRaw(), p.Positive().Offset(), scaleDown, true) + nSpans, nDeltas := convertBucketsLayout(p.Negative().BucketCounts().AsRaw(), p.Negative().Offset(), scaleDown, true) h := prompb.Histogram{ // The counter reset detection must be compatible with Prometheus to @@ -133,19 +135,25 @@ func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint) (prom return h, annots, nil } -// convertBucketsLayout translates OTel Exponential Histogram dense buckets -// representation to Prometheus Native Histogram sparse bucket representation. +// convertBucketsLayout translates OTel Explicit or Exponential Histogram dense buckets +// representation to Prometheus Native Histogram sparse bucket representation. This is used +// for translating Exponential Histograms into Native Histograms, and Explicit Histograms +// into Native Histograms with Custom Buckets. // // The translation logic is taken from the client_golang `histogram.go#makeBuckets` // function, see `makeBuckets` https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go -// The bucket indexes conversion was adjusted, since OTel exp. histogram bucket +// +// scaleDown is the factor by which the buckets are scaled down. In other words 2^scaleDown buckets will be merged into one. +// +// When converting from OTel Exponential Histograms to Native Histograms, the +// bucket indexes conversion is adjusted, since OTel exp. histogram bucket // index 0 corresponds to the range (1, base] while Prometheus bucket index 0 // to the range (base 1]. // -// scaleDown is the factor by which the buckets are scaled down. In other words 2^scaleDown buckets will be merged into one. -func convertBucketsLayout(buckets pmetric.ExponentialHistogramDataPointBuckets, scaleDown int32) ([]prompb.BucketSpan, []int64) { - bucketCounts := buckets.BucketCounts() - if bucketCounts.Len() == 0 { +// When converting from OTel Explicit Histograms to Native Histograms with Custom Buckets, +// the bucket indexes are not scaled, and the indices are not adjusted by 1. +func convertBucketsLayout(bucketCounts []uint64, offset, scaleDown int32, adjustOffset bool) ([]prompb.BucketSpan, []int64) { + if len(bucketCounts) == 0 { return nil, nil } @@ -164,24 +172,28 @@ func convertBucketsLayout(buckets pmetric.ExponentialHistogramDataPointBuckets, // Let the compiler figure out that this is const during this function by // moving it into a local variable. - numBuckets := bucketCounts.Len() + numBuckets := len(bucketCounts) + + bucketIdx := offset>>scaleDown + 1 + + initialOffset := offset + if adjustOffset { + initialOffset = initialOffset>>scaleDown + 1 + } - // The offset is scaled and adjusted by 1 as described above. - bucketIdx := buckets.Offset()>>scaleDown + 1 spans = append(spans, prompb.BucketSpan{ - Offset: bucketIdx, + Offset: initialOffset, Length: 0, }) for i := 0; i < numBuckets; i++ { - // The offset is scaled and adjusted by 1 as described above. - nextBucketIdx := (int32(i)+buckets.Offset())>>scaleDown + 1 + nextBucketIdx := (int32(i)+offset)>>scaleDown + 1 if bucketIdx == nextBucketIdx { // We have not collected enough buckets to merge yet. - count += int64(bucketCounts.At(i)) + count += int64(bucketCounts[i]) continue } if count == 0 { - count = int64(bucketCounts.At(i)) + count = int64(bucketCounts[i]) continue } @@ -202,11 +214,12 @@ func convertBucketsLayout(buckets pmetric.ExponentialHistogramDataPointBuckets, } } appendDelta(count) - count = int64(bucketCounts.At(i)) + count = int64(bucketCounts[i]) bucketIdx = nextBucketIdx } + // Need to use the last item's index. The offset is scaled and adjusted by 1 as described above. - gap := (int32(numBuckets)+buckets.Offset()-1)>>scaleDown + 1 - bucketIdx + gap := (int32(numBuckets)+offset-1)>>scaleDown + 1 - bucketIdx if gap > 2 { // We have to create a new span, because we have found a gap // of more than two buckets. The constant 2 is copied from the logic in @@ -226,3 +239,101 @@ func convertBucketsLayout(buckets pmetric.ExponentialHistogramDataPointBuckets, return spans, deltas } + +func (c *PrometheusConverter) addCustomBucketsHistogramDataPoints(ctx context.Context, dataPoints pmetric.HistogramDataPointSlice, + resource pcommon.Resource, settings Settings, promName string, +) (annotations.Annotations, error) { + var annots annotations.Annotations + + for x := 0; x < dataPoints.Len(); x++ { + if err := c.everyN.checkContext(ctx); err != nil { + return annots, err + } + + pt := dataPoints.At(x) + + histogram, ws, err := explicitHistogramToCustomBucketsHistogram(pt) + annots.Merge(ws) + if err != nil { + return annots, err + } + + lbls := createAttributes( + resource, + pt.Attributes(), + settings, + nil, + true, + model.MetricNameLabel, + promName, + ) + + ts, _ := c.getOrCreateTimeSeries(lbls) + ts.Histograms = append(ts.Histograms, histogram) + + exemplars, err := getPromExemplars[pmetric.HistogramDataPoint](ctx, &c.everyN, pt) + if err != nil { + return annots, err + } + ts.Exemplars = append(ts.Exemplars, exemplars...) + } + + return annots, nil +} + +func explicitHistogramToCustomBucketsHistogram(p pmetric.HistogramDataPoint) (prompb.Histogram, annotations.Annotations, error) { + var annots annotations.Annotations + + buckets := p.BucketCounts().AsRaw() + offset := getBucketOffset(buckets) + bucketCounts := buckets[offset:] + positiveSpans, positiveDeltas := convertBucketsLayout(bucketCounts, int32(offset), 0, false) + + // TODO(carrieedwards): Add setting to limit maximum bucket count + h := prompb.Histogram{ + // The counter reset detection must be compatible with Prometheus to + // safely set ResetHint to NO. This is not ensured currently. + // Sending a sample that triggers counter reset but with ResetHint==NO + // would lead to Prometheus panic as it does not double check the hint. + // Thus we're explicitly saying UNKNOWN here, which is always safe. + // TODO: using created time stamp should be accurate, but we + // need to know here if it was used for the detection. + // Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/28663#issuecomment-1810577303 + // Counter reset detection in Prometheus: https://github.com/prometheus/prometheus/blob/f997c72f294c0f18ca13fa06d51889af04135195/tsdb/chunkenc/histogram.go#L232 + ResetHint: prompb.Histogram_UNKNOWN, + Schema: histogram.CustomBucketsSchema, + + PositiveSpans: positiveSpans, + PositiveDeltas: positiveDeltas, + // Note: OTel explicit histograms have an implicit +Inf bucket, which has a lower bound + // of the last element in the explicit_bounds array. + // This is similar to the custom_values array in native histograms with custom buckets. + // Because of this shared property, the OTel explicit histogram's explicit_bounds array + // can be mapped directly to the custom_values array. + // See: https://github.com/open-telemetry/opentelemetry-proto/blob/d7770822d70c7bd47a6891fc9faacc66fc4af3d3/opentelemetry/proto/metrics/v1/metrics.proto#L469 + CustomValues: p.ExplicitBounds().AsRaw(), + + Timestamp: convertTimeStamp(p.Timestamp()), + } + + if p.Flags().NoRecordedValue() { + h.Sum = math.Float64frombits(value.StaleNaN) + h.Count = &prompb.Histogram_CountInt{CountInt: value.StaleNaN} + } else { + if p.HasSum() { + h.Sum = p.Sum() + } + h.Count = &prompb.Histogram_CountInt{CountInt: p.Count()} + if p.Count() == 0 && h.Sum != 0 { + annots.Add(fmt.Errorf("histogram data point has zero count, but non-zero sum: %f", h.Sum)) + } + } + return h, annots, nil +} + +func getBucketOffset(buckets []uint64) (offset int) { + for offset < len(buckets) && buckets[offset] == 0 { + offset++ + } + return offset +} diff --git a/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go b/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go index 520d571b653..63e453a5358 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go @@ -23,14 +23,12 @@ import ( "time" "github.com/prometheus/common/model" - "github.com/stretchr/testify/assert" + "github.com/prometheus/otlptranslator" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" "github.com/prometheus/prometheus/prompb" - - prometheustranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus" ) type expectedBucketLayout struct { @@ -380,9 +378,9 @@ func TestConvertBucketsLayout(t *testing.T) { for _, tt := range tests { for scaleDown, wantLayout := range tt.wantLayout { t.Run(fmt.Sprintf("%s-scaleby-%d", tt.name, scaleDown), func(t *testing.T) { - gotSpans, gotDeltas := convertBucketsLayout(tt.buckets(), scaleDown) - assert.Equal(t, wantLayout.wantSpans, gotSpans) - assert.Equal(t, wantLayout.wantDeltas, gotDeltas) + gotSpans, gotDeltas := convertBucketsLayout(tt.buckets().BucketCounts().AsRaw(), tt.buckets().Offset(), scaleDown, true) + require.Equal(t, wantLayout.wantSpans, gotSpans) + require.Equal(t, wantLayout.wantDeltas, gotDeltas) }) } } @@ -410,7 +408,7 @@ func BenchmarkConvertBucketLayout(b *testing.B) { } b.Run(fmt.Sprintf("gap %d", scenario.gap), func(b *testing.B) { for i := 0; i < b.N; i++ { - convertBucketsLayout(buckets, 0) + convertBucketsLayout(buckets.BucketCounts().AsRaw(), buckets.Offset(), 0, true) } }) } @@ -570,18 +568,26 @@ func TestExponentialToNativeHistogram(t *testing.T) { validateExponentialHistogramCount(t, tt.exponentialHist()) // Sanity check. got, annots, err := exponentialToNativeHistogram(tt.exponentialHist()) if tt.wantErrMessage != "" { - assert.ErrorContains(t, err, tt.wantErrMessage) + require.ErrorContains(t, err, tt.wantErrMessage) return } require.NoError(t, err) require.Empty(t, annots) - assert.Equal(t, tt.wantNativeHist(), got) + require.Equal(t, tt.wantNativeHist(), got) validateNativeHistogramCount(t, got) }) } } +func validateHistogramCount(t *testing.T, h pmetric.HistogramDataPoint) { + actualCount := uint64(0) + for _, bucket := range h.BucketCounts().AsRaw() { + actualCount += bucket + } + require.Equal(t, h.Count(), actualCount, "histogram count mismatch") +} + func validateExponentialHistogramCount(t *testing.T, h pmetric.ExponentialHistogramDataPoint) { actualCount := uint64(0) for _, bucket := range h.Positive().BucketCounts().AsRaw() { @@ -610,7 +616,7 @@ func validateNativeHistogramCount(t *testing.T, h prompb.Histogram) { prevBucket += delta actualCount += uint64(prevBucket) } - assert.Equal(t, want, actualCount, "native histogram count mismatch") + require.Equal(t, want, actualCount, "native histogram count mismatch") } func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) { @@ -762,13 +768,382 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) { Settings{ ExportCreatedMetric: true, }, - prometheustranslator.BuildCompliantMetricName(metric, "", true), + otlptranslator.BuildCompliantMetricName(metric, "", true), ) require.NoError(t, err) require.Empty(t, annots) - assert.Equal(t, tt.wantSeries(), converter.unique) - assert.Empty(t, converter.conflicts) + require.Equal(t, tt.wantSeries(), converter.unique) + require.Empty(t, converter.conflicts) + }) + } +} + +func TestConvertExplicitHistogramBucketsToNHCBLayout(t *testing.T) { + tests := []struct { + name string + buckets []uint64 + wantLayout expectedBucketLayout + }{ + { + name: "zero offset", + buckets: []uint64{4, 3, 2, 1}, + wantLayout: expectedBucketLayout{ + wantSpans: []prompb.BucketSpan{ + { + Offset: 0, + Length: 4, + }, + }, + wantDeltas: []int64{4, -1, -1, -1}, + }, + }, + { + name: "leading empty buckets", + buckets: []uint64{0, 0, 1, 1, 2, 3}, + wantLayout: expectedBucketLayout{ + wantSpans: []prompb.BucketSpan{ + { + Offset: 2, + Length: 4, + }, + }, + wantDeltas: []int64{1, 0, 1, 1}, + }, + }, + { + name: "trailing empty buckets", + buckets: []uint64{0, 0, 1, 1, 2, 3, 0, 0}, // TODO: add tests for 3 trailing buckets + wantLayout: expectedBucketLayout{ + wantSpans: []prompb.BucketSpan{ + { + Offset: 2, + Length: 6, + }, + }, + wantDeltas: []int64{1, 0, 1, 1, -3, 0}, + }, + }, + { + name: "bucket gap of 2", + buckets: []uint64{1, 2, 0, 0, 2}, + wantLayout: expectedBucketLayout{ + wantSpans: []prompb.BucketSpan{ + { + Offset: 0, + Length: 5, + }, + }, + wantDeltas: []int64{1, 1, -2, 0, 2}, + }, + }, + { + name: "bucket gap > 2", + buckets: []uint64{1, 2, 0, 0, 0, 2, 4, 4}, + wantLayout: expectedBucketLayout{ + wantSpans: []prompb.BucketSpan{ + { + Offset: 0, + Length: 2, + }, + { + Offset: 3, + Length: 3, + }, + }, + wantDeltas: []int64{1, 1, 0, 2, 0}, + }, + }, + { + name: "multiple bucket gaps", + buckets: []uint64{0, 0, 1, 2, 0, 0, 0, 2, 4, 4, 0, 0}, + wantLayout: expectedBucketLayout{ + wantSpans: []prompb.BucketSpan{ + { + Offset: 2, + Length: 2, + }, + { + Offset: 3, + Length: 5, + }, + }, + wantDeltas: []int64{1, 1, 0, 2, 0, -4, 0}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + buckets := tt.buckets + offset := getBucketOffset(buckets) + bucketCounts := buckets[offset:] + + gotSpans, gotDeltas := convertBucketsLayout(bucketCounts, int32(offset), 0, false) + require.Equal(t, tt.wantLayout.wantSpans, gotSpans) + require.Equal(t, tt.wantLayout.wantDeltas, gotDeltas) + }) + } +} + +func BenchmarkConvertHistogramBucketsToNHCBLayout(b *testing.B) { + scenarios := []struct { + gap int + }{ + {gap: 0}, + {gap: 1}, + {gap: 2}, + {gap: 3}, + } + + for _, scenario := range scenarios { + var buckets []uint64 + for i := 0; i < 1000; i++ { + if i%(scenario.gap+1) == 0 { + buckets = append(buckets, uint64(10)) + } else { + buckets = append(buckets, uint64(0)) + } + } + b.Run(fmt.Sprintf("gap %d", scenario.gap), func(b *testing.B) { + for i := 0; i < b.N; i++ { + offset := getBucketOffset(buckets) + convertBucketsLayout(buckets, int32(offset), 0, false) + } + }) + } +} + +func TestHistogramToCustomBucketsHistogram(t *testing.T) { + tests := []struct { + name string + hist func() pmetric.HistogramDataPoint + wantNativeHist func() prompb.Histogram + wantErrMessage string + }{ + { + name: "convert hist to custom buckets hist", + hist: func() pmetric.HistogramDataPoint { + pt := pmetric.NewHistogramDataPoint() + pt.SetStartTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(100))) + pt.SetTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(500))) + pt.SetCount(2) + pt.SetSum(10.1) + + pt.BucketCounts().FromRaw([]uint64{1, 1}) + pt.ExplicitBounds().FromRaw([]float64{0, 1}) + return pt + }, + wantNativeHist: func() prompb.Histogram { + return prompb.Histogram{ + Count: &prompb.Histogram_CountInt{CountInt: 2}, + Sum: 10.1, + Schema: -53, + PositiveSpans: []prompb.BucketSpan{{Offset: 0, Length: 2}}, + PositiveDeltas: []int64{1, 0}, + CustomValues: []float64{0, 1}, + Timestamp: 500, + } + }, + }, + { + name: "convert hist to custom buckets hist with no sum", + hist: func() pmetric.HistogramDataPoint { + pt := pmetric.NewHistogramDataPoint() + pt.SetStartTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(100))) + pt.SetTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(500))) + pt.SetCount(4) + + pt.BucketCounts().FromRaw([]uint64{2, 2}) + pt.ExplicitBounds().FromRaw([]float64{0, 1}) + return pt + }, + wantNativeHist: func() prompb.Histogram { + return prompb.Histogram{ + Count: &prompb.Histogram_CountInt{CountInt: 4}, + Schema: -53, + PositiveSpans: []prompb.BucketSpan{{Offset: 0, Length: 2}}, + PositiveDeltas: []int64{2, 0}, + CustomValues: []float64{0, 1}, + Timestamp: 500, + } + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + validateHistogramCount(t, tt.hist()) + got, annots, err := explicitHistogramToCustomBucketsHistogram(tt.hist()) + if tt.wantErrMessage != "" { + require.ErrorContains(t, err, tt.wantErrMessage) + return + } + + require.NoError(t, err) + require.Empty(t, annots) + require.Equal(t, tt.wantNativeHist(), got) + validateNativeHistogramCount(t, got) + }) + } +} + +func TestPrometheusConverter_addCustomBucketsHistogramDataPoints(t *testing.T) { + tests := []struct { + name string + metric func() pmetric.Metric + wantSeries func() map[uint64]*prompb.TimeSeries + }{ + { + name: "histogram data points with same labels", + metric: func() pmetric.Metric { + metric := pmetric.NewMetric() + metric.SetName("test_hist_to_nhcb") + metric.SetEmptyHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + + pt := metric.Histogram().DataPoints().AppendEmpty() + pt.SetCount(3) + pt.SetSum(3) + pt.BucketCounts().FromRaw([]uint64{2, 0, 1}) + pt.ExplicitBounds().FromRaw([]float64{5, 10}) + pt.Exemplars().AppendEmpty().SetDoubleValue(1) + pt.Attributes().PutStr("attr", "test_attr") + + pt = metric.Histogram().DataPoints().AppendEmpty() + pt.SetCount(11) + pt.SetSum(5) + pt.BucketCounts().FromRaw([]uint64{3, 8, 0}) + pt.ExplicitBounds().FromRaw([]float64{0, 1}) + pt.Exemplars().AppendEmpty().SetDoubleValue(2) + pt.Attributes().PutStr("attr", "test_attr") + + return metric + }, + wantSeries: func() map[uint64]*prompb.TimeSeries { + labels := []prompb.Label{ + {Name: model.MetricNameLabel, Value: "test_hist_to_nhcb"}, + {Name: "attr", Value: "test_attr"}, + } + return map[uint64]*prompb.TimeSeries{ + timeSeriesSignature(labels): { + Labels: labels, + Histograms: []prompb.Histogram{ + { + Count: &prompb.Histogram_CountInt{CountInt: 3}, + Sum: 3, + Schema: -53, + PositiveSpans: []prompb.BucketSpan{{Offset: 0, Length: 3}}, + PositiveDeltas: []int64{2, -2, 1}, + CustomValues: []float64{5, 10}, + }, + { + Count: &prompb.Histogram_CountInt{CountInt: 11}, + Sum: 5, + Schema: -53, + PositiveSpans: []prompb.BucketSpan{{Offset: 0, Length: 3}}, + PositiveDeltas: []int64{3, 5, -8}, + CustomValues: []float64{0, 1}, + }, + }, + Exemplars: []prompb.Exemplar{ + {Value: 1}, + {Value: 2}, + }, + }, + } + }, + }, + { + name: "histogram data points with different labels", + metric: func() pmetric.Metric { + metric := pmetric.NewMetric() + metric.SetName("test_hist_to_nhcb") + metric.SetEmptyHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + + pt := metric.Histogram().DataPoints().AppendEmpty() + pt.SetCount(6) + pt.SetSum(3) + pt.BucketCounts().FromRaw([]uint64{4, 2}) + pt.ExplicitBounds().FromRaw([]float64{0, 1}) + pt.Exemplars().AppendEmpty().SetDoubleValue(1) + pt.Attributes().PutStr("attr", "test_attr") + + pt = metric.Histogram().DataPoints().AppendEmpty() + pt.SetCount(11) + pt.SetSum(5) + pt.BucketCounts().FromRaw([]uint64{3, 8}) + pt.ExplicitBounds().FromRaw([]float64{0, 1}) + pt.Exemplars().AppendEmpty().SetDoubleValue(2) + pt.Attributes().PutStr("attr", "test_attr_two") + + return metric + }, + wantSeries: func() map[uint64]*prompb.TimeSeries { + labels := []prompb.Label{ + {Name: model.MetricNameLabel, Value: "test_hist_to_nhcb"}, + {Name: "attr", Value: "test_attr"}, + } + labelsAnother := []prompb.Label{ + {Name: model.MetricNameLabel, Value: "test_hist_to_nhcb"}, + {Name: "attr", Value: "test_attr_two"}, + } + + return map[uint64]*prompb.TimeSeries{ + timeSeriesSignature(labels): { + Labels: labels, + Histograms: []prompb.Histogram{ + { + Count: &prompb.Histogram_CountInt{CountInt: 6}, + Sum: 3, + Schema: -53, + PositiveSpans: []prompb.BucketSpan{{Offset: 0, Length: 2}}, + PositiveDeltas: []int64{4, -2}, + CustomValues: []float64{0, 1}, + }, + }, + Exemplars: []prompb.Exemplar{ + {Value: 1}, + }, + }, + timeSeriesSignature(labelsAnother): { + Labels: labelsAnother, + Histograms: []prompb.Histogram{ + { + Count: &prompb.Histogram_CountInt{CountInt: 11}, + Sum: 5, + Schema: -53, + PositiveSpans: []prompb.BucketSpan{{Offset: 0, Length: 2}}, + PositiveDeltas: []int64{3, 5}, + CustomValues: []float64{0, 1}, + }, + }, + Exemplars: []prompb.Exemplar{ + {Value: 2}, + }, + }, + } + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + metric := tt.metric() + + converter := NewPrometheusConverter() + annots, err := converter.addCustomBucketsHistogramDataPoints( + context.Background(), + metric.Histogram().DataPoints(), + pcommon.NewResource(), + Settings{ + ExportCreatedMetric: true, + ConvertHistogramsToNHCB: true, + }, + otlptranslator.BuildCompliantMetricName(metric, "", true), + ) + + require.NoError(t, err) + require.Empty(t, annots) + + require.Equal(t, tt.wantSeries(), converter.unique) + require.Empty(t, converter.conflicts) }) } } diff --git a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go index 1545accf2fb..d2e79e4b6ff 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go @@ -22,12 +22,12 @@ import ( "fmt" "sort" + "github.com/prometheus/otlptranslator" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/multierr" "github.com/prometheus/prometheus/prompb" - prometheustranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus" "github.com/prometheus/prometheus/util/annotations" ) @@ -40,6 +40,7 @@ type Settings struct { AllowUTF8 bool PromoteResourceAttributes []string KeepIdentifyingResourceAttributes bool + ConvertHistogramsToNHCB bool } // PrometheusConverter converts from OTel write format to Prometheus remote write format. @@ -98,9 +99,9 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric var promName string if settings.AllowUTF8 { - promName = prometheustranslator.BuildMetricName(metric, settings.Namespace, settings.AddMetricSuffixes) + promName = otlptranslator.BuildMetricName(metric, settings.Namespace, settings.AddMetricSuffixes) } else { - promName = prometheustranslator.BuildCompliantMetricName(metric, settings.Namespace, settings.AddMetricSuffixes) + promName = otlptranslator.BuildCompliantMetricName(metric, settings.Namespace, settings.AddMetricSuffixes) } c.metadata = append(c.metadata, prompb.MetricMetadata{ Type: otelMetricTypeToPromMetricType(metric), @@ -142,10 +143,21 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) break } - if err := c.addHistogramDataPoints(ctx, dataPoints, resource, settings, promName); err != nil { - errs = multierr.Append(errs, err) - if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { - return + if settings.ConvertHistogramsToNHCB { + ws, err := c.addCustomBucketsHistogramDataPoints(ctx, dataPoints, resource, settings, promName) + annots.Merge(ws) + if err != nil { + errs = multierr.Append(errs, err) + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return + } + } + } else { + if err := c.addHistogramDataPoints(ctx, dataPoints, resource, settings, promName); err != nil { + errs = multierr.Append(errs, err) + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return + } } } case pmetric.MetricTypeExponentialHistogram: diff --git a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go index a3b4b08df49..d9f433d7132 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go @@ -23,6 +23,7 @@ import ( "time" "github.com/google/go-cmp/cmp" + "github.com/prometheus/otlptranslator" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" @@ -30,7 +31,6 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/prompb" - prometheustranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus" ) func TestFromMetrics(t *testing.T) { @@ -46,7 +46,7 @@ func TestFromMetrics(t *testing.T) { metricSlice := scopeMetricsSlice.At(j).Metrics() for k := 0; k < metricSlice.Len(); k++ { metric := metricSlice.At(k) - promName := prometheustranslator.BuildCompliantMetricName(metric, "", false) + promName := otlptranslator.BuildCompliantMetricName(metric, "", false) expMetadata = append(expMetadata, prompb.MetricMetadata{ Type: otelMetricTypeToPromMetricType(metric), MetricFamilyName: promName, @@ -72,12 +72,12 @@ func TestFromMetrics(t *testing.T) { ts := converter.TimeSeries() require.Len(t, ts, 1408+1) // +1 for the target_info. - target_info_count := 0 + tgtInfoCount := 0 for _, s := range ts { b := labels.NewScratchBuilder(2) lbls := s.ToLabels(&b, nil) if lbls.Get(labels.MetricName) == "target_info" { - target_info_count++ + tgtInfoCount++ require.Equal(t, "test-namespace/test-service", lbls.Get("job")) require.Equal(t, "id1234", lbls.Get("instance")) if keepIdentifyingResourceAttributes { @@ -91,7 +91,52 @@ func TestFromMetrics(t *testing.T) { } } } - require.Equal(t, 1, target_info_count) + require.Equal(t, 1, tgtInfoCount) + }) + } + + for _, convertHistogramsToNHCB := range []bool{false, true} { + t.Run(fmt.Sprintf("successful/convertHistogramsToNHCB=%v", convertHistogramsToNHCB), func(t *testing.T) { + request := pmetricotlp.NewExportRequest() + rm := request.Metrics().ResourceMetrics().AppendEmpty() + generateAttributes(rm.Resource().Attributes(), "resource", 10) + + metrics := rm.ScopeMetrics().AppendEmpty().Metrics() + ts := pcommon.NewTimestampFromTime(time.Now()) + + m := metrics.AppendEmpty() + m.SetEmptyHistogram() + m.SetName("histogram-1") + m.Histogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + h := m.Histogram().DataPoints().AppendEmpty() + h.SetTimestamp(ts) + + h.SetCount(15) + h.SetSum(155) + + generateAttributes(h.Attributes(), "series", 1) + + converter := NewPrometheusConverter() + annots, err := converter.FromMetrics( + context.Background(), + request.Metrics(), + Settings{ConvertHistogramsToNHCB: convertHistogramsToNHCB}, + ) + require.NoError(t, err) + require.Empty(t, annots) + + series := converter.TimeSeries() + + if convertHistogramsToNHCB { + require.Len(t, series[0].Histograms, 1) + require.Empty(t, series[0].Samples) + } else { + require.Len(t, series, 3) + for i := range series { + require.Len(t, series[i].Samples, 1) + require.Nil(t, series[i].Histograms) + } + } }) } @@ -151,6 +196,43 @@ func TestFromMetrics(t *testing.T) { "exponential histogram data point has zero count, but non-zero sum: 155.000000", }, ws) }) + + t.Run("explicit histogram to NHCB warnings for zero count and non-zero sum", func(t *testing.T) { + request := pmetricotlp.NewExportRequest() + rm := request.Metrics().ResourceMetrics().AppendEmpty() + generateAttributes(rm.Resource().Attributes(), "resource", 10) + + metrics := rm.ScopeMetrics().AppendEmpty().Metrics() + ts := pcommon.NewTimestampFromTime(time.Now()) + + for i := 1; i <= 10; i++ { + m := metrics.AppendEmpty() + m.SetEmptyHistogram() + m.SetName(fmt.Sprintf("histogram-%d", i)) + m.Histogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + h := m.Histogram().DataPoints().AppendEmpty() + h.SetTimestamp(ts) + + h.SetCount(0) + h.SetSum(155) + + generateAttributes(h.Attributes(), "series", 10) + } + + converter := NewPrometheusConverter() + annots, err := converter.FromMetrics( + context.Background(), + request.Metrics(), + Settings{ConvertHistogramsToNHCB: true}, + ) + require.NoError(t, err) + require.NotEmpty(t, annots) + ws, infos := annots.AsStrings("", 0, 0) + require.Empty(t, infos) + require.Equal(t, []string{ + "histogram data point has zero count, but non-zero sum: 155.000000", + }, ws) + }) } func BenchmarkPrometheusConverter_FromMetrics(b *testing.B) { diff --git a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go index 6cdab450e1a..e89dfd98155 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go @@ -29,7 +29,8 @@ import ( ) func (c *PrometheusConverter) addGaugeNumberDataPoints(ctx context.Context, dataPoints pmetric.NumberDataPointSlice, - resource pcommon.Resource, settings Settings, name string) error { + resource pcommon.Resource, settings Settings, name string, +) error { for x := 0; x < dataPoints.Len(); x++ { if err := c.everyN.checkContext(ctx); err != nil { return err @@ -65,7 +66,8 @@ func (c *PrometheusConverter) addGaugeNumberDataPoints(ctx context.Context, data } func (c *PrometheusConverter) addSumNumberDataPoints(ctx context.Context, dataPoints pmetric.NumberDataPointSlice, - resource pcommon.Resource, metric pmetric.Metric, settings Settings, name string) error { + resource pcommon.Resource, metric pmetric.Metric, settings Settings, name string, +) error { for x := 0; x < dataPoints.Len(); x++ { if err := c.everyN.checkContext(ctx); err != nil { return err diff --git a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go index b01d2cb1fe3..ca01a162eca 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go @@ -22,7 +22,7 @@ import ( "time" "github.com/prometheus/common/model" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" @@ -56,7 +56,8 @@ func TestPrometheusConverter_addGaugeNumberDataPoints(t *testing.T) { { Value: 1, Timestamp: convertTimeStamp(pcommon.Timestamp(ts)), - }}, + }, + }, }, } }, @@ -77,8 +78,8 @@ func TestPrometheusConverter_addGaugeNumberDataPoints(t *testing.T) { metric.Name(), ) - assert.Equal(t, tt.want(), converter.unique) - assert.Empty(t, converter.conflicts) + require.Equal(t, tt.want(), converter.unique) + require.Empty(t, converter.conflicts) }) } } @@ -111,7 +112,8 @@ func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) { { Value: 1, Timestamp: convertTimeStamp(ts), - }}, + }, + }, }, } }, @@ -255,8 +257,8 @@ func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) { metric.Name(), ) - assert.Equal(t, tt.want(), converter.unique) - assert.Empty(t, converter.conflicts) + require.Equal(t, tt.want(), converter.unique) + require.Empty(t, converter.conflicts) }) } } diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go index bc860244328..a682b95c310 100644 --- a/storage/remote/write_handler.go +++ b/storage/remote/write_handler.go @@ -585,6 +585,7 @@ func (rw *rwExporter) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) er AllowUTF8: otlpCfg.TranslationStrategy == config.NoUTF8EscapingWithSuffixes, PromoteResourceAttributes: otlpCfg.PromoteResourceAttributes, KeepIdentifyingResourceAttributes: otlpCfg.KeepIdentifyingResourceAttributes, + ConvertHistogramsToNHCB: otlpCfg.ConvertHistogramsToNHCB, }) if err != nil { rw.logger.Warn("Error translating OTLP metrics to Prometheus write request", "err", err) diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go index e97f06a2ce8..51a2cb5ee8f 100644 --- a/tsdb/head_wal.go +++ b/tsdb/head_wal.go @@ -142,10 +142,6 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch missingSeries[e.Ref] = struct{}{} continue } - - if e.T < h.minValidTime.Load() { - continue - } // At the moment the only possible error here is out of order exemplars, which we shouldn't see when // replaying the WAL, so lets just log the error if it's not that type. err = h.exemplars.AddExemplar(ms.labels(), exemplar.Exemplar{Ts: e.T, Value: e.V, Labels: e.Labels}) @@ -334,6 +330,9 @@ Outer: h.wlReplaytStonesPool.Put(v) case []record.RefExemplar: for _, e := range v { + if e.T < h.minValidTime.Load() { + continue + } if r, ok := multiRef[e.Ref]; ok { e.Ref = r }