opentelemetry_proto/proto/tonic/opentelemetry.proto.metrics.v1.rs
1// This file is @generated by prost-build.
2/// MetricsData represents the metrics data that can be stored in a persistent
3/// storage, OR can be embedded by other protocols that transfer OTLP metrics
4/// data but do not implement the OTLP protocol.
5///
6/// MetricsData
7/// └─── ResourceMetrics
8/// ├── Resource
9/// ├── SchemaURL
10/// └── ScopeMetrics
11/// ├── Scope
12/// ├── SchemaURL
13/// └── Metric
14/// ├── Name
15/// ├── Description
16/// ├── Unit
17/// └── data
18/// ├── Gauge
19/// ├── Sum
20/// ├── Histogram
21/// ├── ExponentialHistogram
22/// └── Summary
23///
24/// The main difference between this message and collector protocol is that
25/// in this message there will not be any "control" or "metadata" specific to
26/// OTLP protocol.
27///
28/// When new fields are added into this message, the OTLP request MUST be updated
29/// as well.
30#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
31#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
32#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
33#[derive(Clone, PartialEq, ::prost::Message)]
34pub struct MetricsData {
35 /// An array of ResourceMetrics.
36 /// For data coming from a single resource this array will typically contain
37 /// one element. Intermediary nodes that receive data from multiple origins
38 /// typically batch the data before forwarding further and in that case this
39 /// array will contain multiple elements.
40 #[prost(message, repeated, tag = "1")]
41 pub resource_metrics: ::prost::alloc::vec::Vec<ResourceMetrics>,
42}
43/// A collection of ScopeMetrics from a Resource.
44#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
45#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
46#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
47#[cfg_attr(feature = "with-serde", serde(default))]
48#[derive(Clone, PartialEq, ::prost::Message)]
49pub struct ResourceMetrics {
50 /// The resource for the metrics in this message.
51 /// If this field is not set then no resource info is known.
52 #[prost(message, optional, tag = "1")]
53 pub resource: ::core::option::Option<super::super::resource::v1::Resource>,
54 /// A list of metrics that originate from a resource.
55 #[prost(message, repeated, tag = "2")]
56 pub scope_metrics: ::prost::alloc::vec::Vec<ScopeMetrics>,
57 /// The Schema URL, if known. This is the identifier of the Schema that the resource data
58 /// is recorded in. Notably, the last part of the URL path is the version number of the
59 /// schema: http\[s\]://server\[:port\]/path/<version>. To learn more about Schema URL see
60 /// <https://opentelemetry.io/docs/specs/otel/schemas/#schema-url>
61 /// This schema_url applies to the data in the "resource" field. It does not apply
62 /// to the data in the "scope_metrics" field which have their own schema_url field.
63 #[prost(string, tag = "3")]
64 pub schema_url: ::prost::alloc::string::String,
65}
66/// A collection of Metrics produced by an Scope.
67#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
68#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
69#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
70#[cfg_attr(feature = "with-serde", serde(default))]
71#[derive(Clone, PartialEq, ::prost::Message)]
72pub struct ScopeMetrics {
73 /// The instrumentation scope information for the metrics in this message.
74 /// Semantically when InstrumentationScope isn't set, it is equivalent with
75 /// an empty instrumentation scope name (unknown).
76 #[prost(message, optional, tag = "1")]
77 pub scope: ::core::option::Option<super::super::common::v1::InstrumentationScope>,
78 /// A list of metrics that originate from an instrumentation library.
79 #[prost(message, repeated, tag = "2")]
80 pub metrics: ::prost::alloc::vec::Vec<Metric>,
81 /// The Schema URL, if known. This is the identifier of the Schema that the metric data
82 /// is recorded in. Notably, the last part of the URL path is the version number of the
83 /// schema: http\[s\]://server\[:port\]/path/<version>. To learn more about Schema URL see
84 /// <https://opentelemetry.io/docs/specs/otel/schemas/#schema-url>
85 /// This schema_url applies to all metrics in the "metrics" field.
86 #[prost(string, tag = "3")]
87 pub schema_url: ::prost::alloc::string::String,
88}
89/// Defines a Metric which has one or more timeseries. The following is a
90/// brief summary of the Metric data model. For more details, see:
91///
92/// <https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md>
93///
94/// The data model and relation between entities is shown in the
95/// diagram below. Here, "DataPoint" is the term used to refer to any
96/// one of the specific data point value types, and "points" is the term used
97/// to refer to any one of the lists of points contained in the Metric.
98///
99/// - Metric is composed of a metadata and data.
100/// - Metadata part contains a name, description, unit.
101/// - Data is one of the possible types (Sum, Gauge, Histogram, Summary).
102/// - DataPoint contains timestamps, attributes, and one of the possible value type
103/// fields.
104///
105/// Metric
106/// +------------+
107/// |name |
108/// |description |
109/// |unit | +------------------------------------+
110/// |data |---> |Gauge, Sum, Histogram, Summary, ... |
111/// +------------+ +------------------------------------+
112///
113/// Data \[One of Gauge, Sum, Histogram, Summary, ...\]
114/// +-----------+
115/// |... | // Metadata about the Data.
116/// |points |--+
117/// +-----------+ |
118/// | +---------------------------+
119/// | |DataPoint 1 |
120/// v |+------+------+ +------+ |
121/// +-----+ ||label |label |...|label | |
122/// | 1 |-->||value1|value2|...|valueN| |
123/// +-----+ |+------+------+ +------+ |
124/// | . | |+-----+ |
125/// | . | ||value| |
126/// | . | |+-----+ |
127/// | . | +---------------------------+
128/// | . | .
129/// | . | .
130/// | . | .
131/// | . | +---------------------------+
132/// | . | |DataPoint M |
133/// +-----+ |+------+------+ +------+ |
134/// | M |-->||label |label |...|label | |
135/// +-----+ ||value1|value2|...|valueN| |
136/// |+------+------+ +------+ |
137/// |+-----+ |
138/// ||value| |
139/// |+-----+ |
140/// +---------------------------+
141///
142/// Each distinct type of DataPoint represents the output of a specific
143/// aggregation function, the result of applying the DataPoint's
144/// associated function of to one or more measurements.
145///
146/// All DataPoint types have three common fields:
147/// - Attributes includes key-value pairs associated with the data point
148/// - TimeUnixNano is required, set to the end time of the aggregation
149/// - StartTimeUnixNano is optional, but strongly encouraged for DataPoints
150/// having an AggregationTemporality field, as discussed below.
151///
152/// Both TimeUnixNano and StartTimeUnixNano values are expressed as
153/// UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
154///
155/// # TimeUnixNano
156///
157/// This field is required, having consistent interpretation across
158/// DataPoint types. TimeUnixNano is the moment corresponding to when
159/// the data point's aggregate value was captured.
160///
161/// Data points with the 0 value for TimeUnixNano SHOULD be rejected
162/// by consumers.
163///
164/// # StartTimeUnixNano
165///
166/// StartTimeUnixNano in general allows detecting when a sequence of
167/// observations is unbroken. This field indicates to consumers the
168/// start time for points with cumulative and delta
169/// AggregationTemporality, and it should be included whenever possible
170/// to support correct rate calculation. Although it may be omitted
171/// when the start time is truly unknown, setting StartTimeUnixNano is
172/// strongly encouraged.
173#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
174#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
175#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
176#[cfg_attr(feature = "with-serde", serde(default))]
177#[derive(Clone, PartialEq, ::prost::Message)]
178pub struct Metric {
179 /// name of the metric.
180 #[prost(string, tag = "1")]
181 pub name: ::prost::alloc::string::String,
182 /// description of the metric, which can be used in documentation.
183 #[prost(string, tag = "2")]
184 pub description: ::prost::alloc::string::String,
185 /// unit in which the metric value is reported. Follows the format
186 /// described by <http://unitsofmeasure.org/ucum.html.>
187 #[prost(string, tag = "3")]
188 pub unit: ::prost::alloc::string::String,
189 /// Additional metadata attributes that describe the metric. \[Optional\].
190 /// Attributes are non-identifying.
191 /// Consumers SHOULD NOT need to be aware of these attributes.
192 /// These attributes MAY be used to encode information allowing
193 /// for lossless roundtrip translation to / from another data model.
194 /// Attribute keys MUST be unique (it is not allowed to have more than one
195 /// attribute with the same key).
196 #[prost(message, repeated, tag = "12")]
197 pub metadata: ::prost::alloc::vec::Vec<super::super::common::v1::KeyValue>,
198 /// Data determines the aggregation type (if any) of the metric, what is the
199 /// reported value type for the data points, as well as the relatationship to
200 /// the time interval over which they are reported.
201 #[prost(oneof = "metric::Data", tags = "5, 7, 9, 10, 11")]
202 #[cfg_attr(feature = "with-serde", serde(flatten))]
203 pub data: ::core::option::Option<metric::Data>,
204}
205/// Nested message and enum types in `Metric`.
206pub mod metric {
207 /// Data determines the aggregation type (if any) of the metric, what is the
208 /// reported value type for the data points, as well as the relatationship to
209 /// the time interval over which they are reported.
210 #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
211 #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
212 #[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
213 #[derive(Clone, PartialEq, ::prost::Oneof)]
214 pub enum Data {
215 #[prost(message, tag = "5")]
216 Gauge(super::Gauge),
217 #[prost(message, tag = "7")]
218 Sum(super::Sum),
219 #[prost(message, tag = "9")]
220 Histogram(super::Histogram),
221 #[prost(message, tag = "10")]
222 ExponentialHistogram(super::ExponentialHistogram),
223 #[prost(message, tag = "11")]
224 Summary(super::Summary),
225 }
226}
227/// Gauge represents the type of a scalar metric that always exports the
228/// "current value" for every data point. It should be used for an "unknown"
229/// aggregation.
230///
231/// A Gauge does not support different aggregation temporalities. Given the
232/// aggregation is unknown, points cannot be combined using the same
233/// aggregation, regardless of aggregation temporalities. Therefore,
234/// AggregationTemporality is not included. Consequently, this also means
235/// "StartTimeUnixNano" is ignored for all data points.
236#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
237#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
238#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
239#[cfg_attr(feature = "with-serde", serde(default))]
240#[derive(Clone, PartialEq, ::prost::Message)]
241pub struct Gauge {
242 #[prost(message, repeated, tag = "1")]
243 pub data_points: ::prost::alloc::vec::Vec<NumberDataPoint>,
244}
245/// Sum represents the type of a scalar metric that is calculated as a sum of all
246/// reported measurements over a time interval.
247#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
248#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
249#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
250#[cfg_attr(feature = "with-serde", serde(default))]
251#[derive(Clone, PartialEq, ::prost::Message)]
252pub struct Sum {
253 #[prost(message, repeated, tag = "1")]
254 pub data_points: ::prost::alloc::vec::Vec<NumberDataPoint>,
255 /// aggregation_temporality describes if the aggregator reports delta changes
256 /// since last report time, or cumulative changes since a fixed start time.
257 #[prost(enumeration = "AggregationTemporality", tag = "2")]
258 pub aggregation_temporality: i32,
259 /// If "true" means that the sum is monotonic.
260 #[prost(bool, tag = "3")]
261 pub is_monotonic: bool,
262}
263/// Histogram represents the type of a metric that is calculated by aggregating
264/// as a Histogram of all reported measurements over a time interval.
265#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
266#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
267#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
268#[cfg_attr(feature = "with-serde", serde(default))]
269#[derive(Clone, PartialEq, ::prost::Message)]
270pub struct Histogram {
271 #[prost(message, repeated, tag = "1")]
272 pub data_points: ::prost::alloc::vec::Vec<HistogramDataPoint>,
273 /// aggregation_temporality describes if the aggregator reports delta changes
274 /// since last report time, or cumulative changes since a fixed start time.
275 #[prost(enumeration = "AggregationTemporality", tag = "2")]
276 pub aggregation_temporality: i32,
277}
278/// ExponentialHistogram represents the type of a metric that is calculated by aggregating
279/// as a ExponentialHistogram of all reported double measurements over a time interval.
280#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
281#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
282#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
283#[cfg_attr(feature = "with-serde", serde(default))]
284#[derive(Clone, PartialEq, ::prost::Message)]
285pub struct ExponentialHistogram {
286 #[prost(message, repeated, tag = "1")]
287 pub data_points: ::prost::alloc::vec::Vec<ExponentialHistogramDataPoint>,
288 /// aggregation_temporality describes if the aggregator reports delta changes
289 /// since last report time, or cumulative changes since a fixed start time.
290 #[prost(enumeration = "AggregationTemporality", tag = "2")]
291 pub aggregation_temporality: i32,
292}
293/// Summary metric data are used to convey quantile summaries,
294/// a Prometheus (see: <https://prometheus.io/docs/concepts/metric_types/#summary>)
295/// and OpenMetrics (see: <https://github.com/OpenObservability/OpenMetrics/blob/4dbf6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45>)
296/// data type. These data points cannot always be merged in a meaningful way.
297/// While they can be useful in some applications, histogram data points are
298/// recommended for new applications.
299/// Summary metrics do not have an aggregation temporality field. This is
300/// because the count and sum fields of a SummaryDataPoint are assumed to be
301/// cumulative values.
302#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
303#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
304#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
305#[cfg_attr(feature = "with-serde", serde(default))]
306#[derive(Clone, PartialEq, ::prost::Message)]
307pub struct Summary {
308 #[prost(message, repeated, tag = "1")]
309 pub data_points: ::prost::alloc::vec::Vec<SummaryDataPoint>,
310}
311/// NumberDataPoint is a single data point in a timeseries that describes the
312/// time-varying scalar value of a metric.
313#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
314#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
315#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
316#[cfg_attr(feature = "with-serde", serde(default))]
317#[derive(Clone, PartialEq, ::prost::Message)]
318pub struct NumberDataPoint {
319 /// The set of key/value pairs that uniquely identify the timeseries from
320 /// where this point belongs. The list may be empty (may contain 0 elements).
321 /// Attribute keys MUST be unique (it is not allowed to have more than one
322 /// attribute with the same key).
323 #[prost(message, repeated, tag = "7")]
324 pub attributes: ::prost::alloc::vec::Vec<super::super::common::v1::KeyValue>,
325 /// StartTimeUnixNano is optional but strongly encouraged, see the
326 /// the detailed comments above Metric.
327 ///
328 /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
329 /// 1970.
330 #[prost(fixed64, tag = "2")]
331 #[cfg_attr(
332 feature = "with-serde",
333 serde(
334 serialize_with = "crate::proto::serializers::serialize_u64_to_string",
335 deserialize_with = "crate::proto::serializers::deserialize_string_to_u64"
336 )
337 )]
338 pub start_time_unix_nano: u64,
339 /// TimeUnixNano is required, see the detailed comments above Metric.
340 ///
341 /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
342 /// 1970.
343 #[prost(fixed64, tag = "3")]
344 #[cfg_attr(
345 feature = "with-serde",
346 serde(
347 serialize_with = "crate::proto::serializers::serialize_u64_to_string",
348 deserialize_with = "crate::proto::serializers::deserialize_string_to_u64"
349 )
350 )]
351 pub time_unix_nano: u64,
352 /// (Optional) List of exemplars collected from
353 /// measurements that were used to form the data point
354 #[prost(message, repeated, tag = "5")]
355 pub exemplars: ::prost::alloc::vec::Vec<Exemplar>,
356 /// Flags that apply to this specific data point. See DataPointFlags
357 /// for the available flags and their meaning.
358 #[prost(uint32, tag = "8")]
359 pub flags: u32,
360 /// The value itself. A point is considered invalid when one of the recognized
361 /// value fields is not present inside this oneof.
362 #[prost(oneof = "number_data_point::Value", tags = "4, 6")]
363 #[cfg_attr(feature = "with-serde", serde(flatten))]
364 pub value: ::core::option::Option<number_data_point::Value>,
365}
366/// Nested message and enum types in `NumberDataPoint`.
367pub mod number_data_point {
368 /// The value itself. A point is considered invalid when one of the recognized
369 /// value fields is not present inside this oneof.
370 #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
371 #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
372 #[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
373 #[derive(Clone, Copy, PartialEq, ::prost::Oneof)]
374 pub enum Value {
375 #[prost(double, tag = "4")]
376 AsDouble(f64),
377 #[prost(sfixed64, tag = "6")]
378 AsInt(i64),
379 }
380}
381/// HistogramDataPoint is a single data point in a timeseries that describes the
382/// time-varying values of a Histogram. A Histogram contains summary statistics
383/// for a population of values, it may optionally contain the distribution of
384/// those values across a set of buckets.
385///
386/// If the histogram contains the distribution of values, then both
387/// "explicit_bounds" and "bucket counts" fields must be defined.
388/// If the histogram does not contain the distribution of values, then both
389/// "explicit_bounds" and "bucket_counts" must be omitted and only "count" and
390/// "sum" are known.
391#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
392#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
393#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
394#[cfg_attr(feature = "with-serde", serde(default))]
395#[derive(Clone, PartialEq, ::prost::Message)]
396pub struct HistogramDataPoint {
397 /// The set of key/value pairs that uniquely identify the timeseries from
398 /// where this point belongs. The list may be empty (may contain 0 elements).
399 /// Attribute keys MUST be unique (it is not allowed to have more than one
400 /// attribute with the same key).
401 #[prost(message, repeated, tag = "9")]
402 pub attributes: ::prost::alloc::vec::Vec<super::super::common::v1::KeyValue>,
403 /// StartTimeUnixNano is optional but strongly encouraged, see the
404 /// the detailed comments above Metric.
405 ///
406 /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
407 /// 1970.
408 #[prost(fixed64, tag = "2")]
409 #[cfg_attr(
410 feature = "with-serde",
411 serde(
412 serialize_with = "crate::proto::serializers::serialize_u64_to_string",
413 deserialize_with = "crate::proto::serializers::deserialize_string_to_u64"
414 )
415 )]
416 pub start_time_unix_nano: u64,
417 /// TimeUnixNano is required, see the detailed comments above Metric.
418 ///
419 /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
420 /// 1970.
421 #[prost(fixed64, tag = "3")]
422 #[cfg_attr(
423 feature = "with-serde",
424 serde(
425 serialize_with = "crate::proto::serializers::serialize_u64_to_string",
426 deserialize_with = "crate::proto::serializers::deserialize_string_to_u64"
427 )
428 )]
429 pub time_unix_nano: u64,
430 /// count is the number of values in the population. Must be non-negative. This
431 /// value must be equal to the sum of the "count" fields in buckets if a
432 /// histogram is provided.
433 #[prost(fixed64, tag = "4")]
434 pub count: u64,
435 /// sum of the values in the population. If count is zero then this field
436 /// must be zero.
437 ///
438 /// Note: Sum should only be filled out when measuring non-negative discrete
439 /// events, and is assumed to be monotonic over the values of these events.
440 /// Negative events *can* be recorded, but sum should not be filled out when
441 /// doing so. This is specifically to enforce compatibility w/ OpenMetrics,
442 /// see: <https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#histogram>
443 #[prost(double, optional, tag = "5")]
444 pub sum: ::core::option::Option<f64>,
445 /// bucket_counts is an optional field contains the count values of histogram
446 /// for each bucket.
447 ///
448 /// The sum of the bucket_counts must equal the value in the count field.
449 ///
450 /// The number of elements in bucket_counts array must be by one greater than
451 /// the number of elements in explicit_bounds array.
452 #[prost(fixed64, repeated, tag = "6")]
453 pub bucket_counts: ::prost::alloc::vec::Vec<u64>,
454 /// explicit_bounds specifies buckets with explicitly defined bounds for values.
455 ///
456 /// The boundaries for bucket at index i are:
457 ///
458 /// (-infinity, explicit_bounds\[i]\] for i == 0
459 /// (explicit_bounds\[i-1\], explicit_bounds\[i]\] for 0 < i < size(explicit_bounds)
460 /// (explicit_bounds\[i-1\], +infinity) for i == size(explicit_bounds)
461 ///
462 /// The values in the explicit_bounds array must be strictly increasing.
463 ///
464 /// Histogram buckets are inclusive of their upper boundary, except the last
465 /// bucket where the boundary is at infinity. This format is intentionally
466 /// compatible with the OpenMetrics histogram definition.
467 #[prost(double, repeated, tag = "7")]
468 pub explicit_bounds: ::prost::alloc::vec::Vec<f64>,
469 /// (Optional) List of exemplars collected from
470 /// measurements that were used to form the data point
471 #[prost(message, repeated, tag = "8")]
472 pub exemplars: ::prost::alloc::vec::Vec<Exemplar>,
473 /// Flags that apply to this specific data point. See DataPointFlags
474 /// for the available flags and their meaning.
475 #[prost(uint32, tag = "10")]
476 pub flags: u32,
477 /// min is the minimum value over (start_time, end_time].
478 #[prost(double, optional, tag = "11")]
479 pub min: ::core::option::Option<f64>,
480 /// max is the maximum value over (start_time, end_time].
481 #[prost(double, optional, tag = "12")]
482 pub max: ::core::option::Option<f64>,
483}
484/// ExponentialHistogramDataPoint is a single data point in a timeseries that describes the
485/// time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains
486/// summary statistics for a population of values, it may optionally contain the
487/// distribution of those values across a set of buckets.
488///
489#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
490#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
491#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
492#[derive(Clone, PartialEq, ::prost::Message)]
493pub struct ExponentialHistogramDataPoint {
494 /// The set of key/value pairs that uniquely identify the timeseries from
495 /// where this point belongs. The list may be empty (may contain 0 elements).
496 /// Attribute keys MUST be unique (it is not allowed to have more than one
497 /// attribute with the same key).
498 #[prost(message, repeated, tag = "1")]
499 pub attributes: ::prost::alloc::vec::Vec<super::super::common::v1::KeyValue>,
500 /// StartTimeUnixNano is optional but strongly encouraged, see the
501 /// the detailed comments above Metric.
502 ///
503 /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
504 /// 1970.
505 #[prost(fixed64, tag = "2")]
506 pub start_time_unix_nano: u64,
507 /// TimeUnixNano is required, see the detailed comments above Metric.
508 ///
509 /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
510 /// 1970.
511 #[prost(fixed64, tag = "3")]
512 pub time_unix_nano: u64,
513 /// count is the number of values in the population. Must be
514 /// non-negative. This value must be equal to the sum of the "bucket_counts"
515 /// values in the positive and negative Buckets plus the "zero_count" field.
516 #[prost(fixed64, tag = "4")]
517 pub count: u64,
518 /// sum of the values in the population. If count is zero then this field
519 /// must be zero.
520 ///
521 /// Note: Sum should only be filled out when measuring non-negative discrete
522 /// events, and is assumed to be monotonic over the values of these events.
523 /// Negative events *can* be recorded, but sum should not be filled out when
524 /// doing so. This is specifically to enforce compatibility w/ OpenMetrics,
525 /// see: <https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#histogram>
526 #[prost(double, optional, tag = "5")]
527 pub sum: ::core::option::Option<f64>,
528 /// scale describes the resolution of the histogram. Boundaries are
529 /// located at powers of the base, where:
530 ///
531 /// base = (2^(2^-scale))
532 ///
533 /// The histogram bucket identified by `index`, a signed integer,
534 /// contains values that are greater than (base^index) and
535 /// less than or equal to (base^(index+1)).
536 ///
537 /// The positive and negative ranges of the histogram are expressed
538 /// separately. Negative values are mapped by their absolute value
539 /// into the negative range using the same scale as the positive range.
540 ///
541 /// scale is not restricted by the protocol, as the permissible
542 /// values depend on the range of the data.
543 #[prost(sint32, tag = "6")]
544 pub scale: i32,
545 /// zero_count is the count of values that are either exactly zero or
546 /// within the region considered zero by the instrumentation at the
547 /// tolerated degree of precision. This bucket stores values that
548 /// cannot be expressed using the standard exponential formula as
549 /// well as values that have been rounded to zero.
550 ///
551 /// Implementations MAY consider the zero bucket to have probability
552 /// mass equal to (zero_count / count).
553 #[prost(fixed64, tag = "7")]
554 pub zero_count: u64,
555 /// positive carries the positive range of exponential bucket counts.
556 #[prost(message, optional, tag = "8")]
557 pub positive: ::core::option::Option<exponential_histogram_data_point::Buckets>,
558 /// negative carries the negative range of exponential bucket counts.
559 #[prost(message, optional, tag = "9")]
560 pub negative: ::core::option::Option<exponential_histogram_data_point::Buckets>,
561 /// Flags that apply to this specific data point. See DataPointFlags
562 /// for the available flags and their meaning.
563 #[prost(uint32, tag = "10")]
564 pub flags: u32,
565 /// (Optional) List of exemplars collected from
566 /// measurements that were used to form the data point
567 #[prost(message, repeated, tag = "11")]
568 pub exemplars: ::prost::alloc::vec::Vec<Exemplar>,
569 /// min is the minimum value over (start_time, end_time].
570 #[prost(double, optional, tag = "12")]
571 pub min: ::core::option::Option<f64>,
572 /// max is the maximum value over (start_time, end_time].
573 #[prost(double, optional, tag = "13")]
574 pub max: ::core::option::Option<f64>,
575 /// ZeroThreshold may be optionally set to convey the width of the zero
576 /// region. Where the zero region is defined as the closed interval
577 /// \[-ZeroThreshold, ZeroThreshold\].
578 /// When ZeroThreshold is 0, zero count bucket stores values that cannot be
579 /// expressed using the standard exponential formula as well as values that
580 /// have been rounded to zero.
581 #[prost(double, tag = "14")]
582 pub zero_threshold: f64,
583}
584/// Nested message and enum types in `ExponentialHistogramDataPoint`.
585pub mod exponential_histogram_data_point {
586 /// Buckets are a set of bucket counts, encoded in a contiguous array
587 /// of counts.
588 #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
589 #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
590 #[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
591 #[derive(Clone, PartialEq, ::prost::Message)]
592 pub struct Buckets {
593 /// Offset is the bucket index of the first entry in the bucket_counts array.
594 ///
595 /// Note: This uses a varint encoding as a simple form of compression.
596 #[prost(sint32, tag = "1")]
597 pub offset: i32,
598 /// bucket_counts is an array of count values, where bucket_counts\[i\] carries
599 /// the count of the bucket at index (offset+i). bucket_counts\[i\] is the count
600 /// of values greater than base^(offset+i) and less than or equal to
601 /// base^(offset+i+1).
602 ///
603 /// Note: By contrast, the explicit HistogramDataPoint uses
604 /// fixed64. This field is expected to have many buckets,
605 /// especially zeros, so uint64 has been selected to ensure
606 /// varint encoding.
607 #[prost(uint64, repeated, tag = "2")]
608 pub bucket_counts: ::prost::alloc::vec::Vec<u64>,
609 }
610}
611/// SummaryDataPoint is a single data point in a timeseries that describes the
612/// time-varying values of a Summary metric. The count and sum fields represent
613/// cumulative values.
614#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
615#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
616#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
617#[derive(Clone, PartialEq, ::prost::Message)]
618pub struct SummaryDataPoint {
619 /// The set of key/value pairs that uniquely identify the timeseries from
620 /// where this point belongs. The list may be empty (may contain 0 elements).
621 /// Attribute keys MUST be unique (it is not allowed to have more than one
622 /// attribute with the same key).
623 #[prost(message, repeated, tag = "7")]
624 pub attributes: ::prost::alloc::vec::Vec<super::super::common::v1::KeyValue>,
625 /// StartTimeUnixNano is optional but strongly encouraged, see the
626 /// the detailed comments above Metric.
627 ///
628 /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
629 /// 1970.
630 #[prost(fixed64, tag = "2")]
631 pub start_time_unix_nano: u64,
632 /// TimeUnixNano is required, see the detailed comments above Metric.
633 ///
634 /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
635 /// 1970.
636 #[prost(fixed64, tag = "3")]
637 pub time_unix_nano: u64,
638 /// count is the number of values in the population. Must be non-negative.
639 #[prost(fixed64, tag = "4")]
640 pub count: u64,
641 /// sum of the values in the population. If count is zero then this field
642 /// must be zero.
643 ///
644 /// Note: Sum should only be filled out when measuring non-negative discrete
645 /// events, and is assumed to be monotonic over the values of these events.
646 /// Negative events *can* be recorded, but sum should not be filled out when
647 /// doing so. This is specifically to enforce compatibility w/ OpenMetrics,
648 /// see: <https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#summary>
649 #[prost(double, tag = "5")]
650 pub sum: f64,
651 /// (Optional) list of values at different quantiles of the distribution calculated
652 /// from the current snapshot. The quantiles must be strictly increasing.
653 #[prost(message, repeated, tag = "6")]
654 pub quantile_values: ::prost::alloc::vec::Vec<summary_data_point::ValueAtQuantile>,
655 /// Flags that apply to this specific data point. See DataPointFlags
656 /// for the available flags and their meaning.
657 #[prost(uint32, tag = "8")]
658 pub flags: u32,
659}
660/// Nested message and enum types in `SummaryDataPoint`.
661pub mod summary_data_point {
662 /// Represents the value at a given quantile of a distribution.
663 ///
664 /// To record Min and Max values following conventions are used:
665 /// - The 1.0 quantile is equivalent to the maximum value observed.
666 /// - The 0.0 quantile is equivalent to the minimum value observed.
667 ///
668 /// See the following issue for more context:
669 /// <https://github.com/open-telemetry/opentelemetry-proto/issues/125>
670 #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
671 #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
672 #[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
673 #[derive(Clone, Copy, PartialEq, ::prost::Message)]
674 pub struct ValueAtQuantile {
675 /// The quantile of a distribution. Must be in the interval
676 /// \[0.0, 1.0\].
677 #[prost(double, tag = "1")]
678 pub quantile: f64,
679 /// The value at the given quantile of a distribution.
680 ///
681 /// Quantile values must NOT be negative.
682 #[prost(double, tag = "2")]
683 pub value: f64,
684 }
685}
686/// A representation of an exemplar, which is a sample input measurement.
687/// Exemplars also hold information about the environment when the measurement
688/// was recorded, for example the span and trace ID of the active span when the
689/// exemplar was recorded.
690#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
691#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
692#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
693#[derive(Clone, PartialEq, ::prost::Message)]
694pub struct Exemplar {
695 /// The set of key/value pairs that were filtered out by the aggregator, but
696 /// recorded alongside the original measurement. Only key/value pairs that were
697 /// filtered out by the aggregator should be included
698 #[prost(message, repeated, tag = "7")]
699 pub filtered_attributes: ::prost::alloc::vec::Vec<
700 super::super::common::v1::KeyValue,
701 >,
702 /// time_unix_nano is the exact time when this exemplar was recorded
703 ///
704 /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
705 /// 1970.
706 #[prost(fixed64, tag = "2")]
707 pub time_unix_nano: u64,
708 /// (Optional) Span ID of the exemplar trace.
709 /// span_id may be missing if the measurement is not recorded inside a trace
710 /// or if the trace is not sampled.
711 #[prost(bytes = "vec", tag = "4")]
712 #[cfg_attr(
713 feature = "with-serde",
714 serde(
715 serialize_with = "crate::proto::serializers::serialize_to_hex_string",
716 deserialize_with = "crate::proto::serializers::deserialize_from_hex_string"
717 )
718 )]
719 pub span_id: ::prost::alloc::vec::Vec<u8>,
720 /// (Optional) Trace ID of the exemplar trace.
721 /// trace_id may be missing if the measurement is not recorded inside a trace
722 /// or if the trace is not sampled.
723 #[prost(bytes = "vec", tag = "5")]
724 #[cfg_attr(
725 feature = "with-serde",
726 serde(
727 serialize_with = "crate::proto::serializers::serialize_to_hex_string",
728 deserialize_with = "crate::proto::serializers::deserialize_from_hex_string"
729 )
730 )]
731 pub trace_id: ::prost::alloc::vec::Vec<u8>,
732 /// The value of the measurement that was recorded. An exemplar is
733 /// considered invalid when one of the recognized value fields is not present
734 /// inside this oneof.
735 #[prost(oneof = "exemplar::Value", tags = "3, 6")]
736 pub value: ::core::option::Option<exemplar::Value>,
737}
738/// Nested message and enum types in `Exemplar`.
739pub mod exemplar {
740 /// The value of the measurement that was recorded. An exemplar is
741 /// considered invalid when one of the recognized value fields is not present
742 /// inside this oneof.
743 #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
744 #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
745 #[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
746 #[derive(Clone, Copy, PartialEq, ::prost::Oneof)]
747 pub enum Value {
748 #[prost(double, tag = "3")]
749 AsDouble(f64),
750 #[prost(sfixed64, tag = "6")]
751 AsInt(i64),
752 }
753}
754/// AggregationTemporality defines how a metric aggregator reports aggregated
755/// values. It describes how those values relate to the time interval over
756/// which they are aggregated.
757#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
758#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
759#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
760#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
761#[repr(i32)]
762pub enum AggregationTemporality {
763 /// UNSPECIFIED is the default AggregationTemporality, it MUST not be used.
764 Unspecified = 0,
765 /// DELTA is an AggregationTemporality for a metric aggregator which reports
766 /// changes since last report time. Successive metrics contain aggregation of
767 /// values from continuous and non-overlapping intervals.
768 ///
769 /// The values for a DELTA metric are based only on the time interval
770 /// associated with one measurement cycle. There is no dependency on
771 /// previous measurements like is the case for CUMULATIVE metrics.
772 ///
773 /// For example, consider a system measuring the number of requests that
774 /// it receives and reports the sum of these requests every second as a
775 /// DELTA metric:
776 ///
777 /// 1. The system starts receiving at time=t_0.
778 /// 2. A request is received, the system measures 1 request.
779 /// 3. A request is received, the system measures 1 request.
780 /// 4. A request is received, the system measures 1 request.
781 /// 5. The 1 second collection cycle ends. A metric is exported for the
782 /// number of requests received over the interval of time t_0 to
783 /// t_0+1 with a value of 3.
784 /// 6. A request is received, the system measures 1 request.
785 /// 7. A request is received, the system measures 1 request.
786 /// 8. The 1 second collection cycle ends. A metric is exported for the
787 /// number of requests received over the interval of time t_0+1 to
788 /// t_0+2 with a value of 2.
789 Delta = 1,
790 /// CUMULATIVE is an AggregationTemporality for a metric aggregator which
791 /// reports changes since a fixed start time. This means that current values
792 /// of a CUMULATIVE metric depend on all previous measurements since the
793 /// start time. Because of this, the sender is required to retain this state
794 /// in some form. If this state is lost or invalidated, the CUMULATIVE metric
795 /// values MUST be reset and a new fixed start time following the last
796 /// reported measurement time sent MUST be used.
797 ///
798 /// For example, consider a system measuring the number of requests that
799 /// it receives and reports the sum of these requests every second as a
800 /// CUMULATIVE metric:
801 ///
802 /// 1. The system starts receiving at time=t_0.
803 /// 2. A request is received, the system measures 1 request.
804 /// 3. A request is received, the system measures 1 request.
805 /// 4. A request is received, the system measures 1 request.
806 /// 5. The 1 second collection cycle ends. A metric is exported for the
807 /// number of requests received over the interval of time t_0 to
808 /// t_0+1 with a value of 3.
809 /// 6. A request is received, the system measures 1 request.
810 /// 7. A request is received, the system measures 1 request.
811 /// 8. The 1 second collection cycle ends. A metric is exported for the
812 /// number of requests received over the interval of time t_0 to
813 /// t_0+2 with a value of 5.
814 /// 9. The system experiences a fault and loses state.
815 /// 10. The system recovers and resumes receiving at time=t_1.
816 /// 11. A request is received, the system measures 1 request.
817 /// 12. The 1 second collection cycle ends. A metric is exported for the
818 /// number of requests received over the interval of time t_1 to
819 /// t_0+1 with a value of 1.
820 ///
821 /// Note: Even though, when reporting changes since last report time, using
822 /// CUMULATIVE is valid, it is not recommended. This may cause problems for
823 /// systems that do not use start_time to determine when the aggregation
824 /// value was reset (e.g. Prometheus).
825 Cumulative = 2,
826}
827impl AggregationTemporality {
828 /// String value of the enum field names used in the ProtoBuf definition.
829 ///
830 /// The values are not transformed in any way and thus are considered stable
831 /// (if the ProtoBuf definition does not change) and safe for programmatic use.
832 pub fn as_str_name(&self) -> &'static str {
833 match self {
834 Self::Unspecified => "AGGREGATION_TEMPORALITY_UNSPECIFIED",
835 Self::Delta => "AGGREGATION_TEMPORALITY_DELTA",
836 Self::Cumulative => "AGGREGATION_TEMPORALITY_CUMULATIVE",
837 }
838 }
839 /// Creates an enum from field names used in the ProtoBuf definition.
840 pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
841 match value {
842 "AGGREGATION_TEMPORALITY_UNSPECIFIED" => Some(Self::Unspecified),
843 "AGGREGATION_TEMPORALITY_DELTA" => Some(Self::Delta),
844 "AGGREGATION_TEMPORALITY_CUMULATIVE" => Some(Self::Cumulative),
845 _ => None,
846 }
847 }
848}
849/// DataPointFlags is defined as a protobuf 'uint32' type and is to be used as a
850/// bit-field representing 32 distinct boolean flags. Each flag defined in this
851/// enum is a bit-mask. To test the presence of a single flag in the flags of
852/// a data point, for example, use an expression like:
853///
854/// (point.flags & DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK) == DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK
855///
856#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
857#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
858#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
859#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
860#[repr(i32)]
861pub enum DataPointFlags {
862 /// The zero value for the enum. Should not be used for comparisons.
863 /// Instead use bitwise "and" with the appropriate mask as shown above.
864 DoNotUse = 0,
865 /// This DataPoint is valid but has no recorded value. This value
866 /// SHOULD be used to reflect explicitly missing data in a series, as
867 /// for an equivalent to the Prometheus "staleness marker".
868 NoRecordedValueMask = 1,
869}
870impl DataPointFlags {
871 /// String value of the enum field names used in the ProtoBuf definition.
872 ///
873 /// The values are not transformed in any way and thus are considered stable
874 /// (if the ProtoBuf definition does not change) and safe for programmatic use.
875 pub fn as_str_name(&self) -> &'static str {
876 match self {
877 Self::DoNotUse => "DATA_POINT_FLAGS_DO_NOT_USE",
878 Self::NoRecordedValueMask => "DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK",
879 }
880 }
881 /// Creates an enum from field names used in the ProtoBuf definition.
882 pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
883 match value {
884 "DATA_POINT_FLAGS_DO_NOT_USE" => Some(Self::DoNotUse),
885 "DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK" => Some(Self::NoRecordedValueMask),
886 _ => None,
887 }
888 }
889}