Skip to content

Commit 73190ae

Browse files
committed
Add integration test for exponential histogram
1 parent b6a5e5a commit 73190ae

File tree

1 file changed

+256
-0
lines changed

1 file changed

+256
-0
lines changed
Lines changed: 256 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,256 @@
1+
# Copyright The OpenTelemetry Authors
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
from platform import system
16+
from unittest import TestCase
17+
18+
from pytest import mark
19+
20+
from opentelemetry.sdk.metrics import Histogram, MeterProvider
21+
from opentelemetry.sdk.metrics.export import (
22+
AggregationTemporality,
23+
InMemoryMetricReader,
24+
)
25+
from opentelemetry.sdk.metrics.view import (
26+
ExponentialBucketHistogramAggregation
27+
)
28+
29+
30+
class TestExponentialBucketHistogramAggregation(TestCase):
31+
32+
test_values = [1, 6, 11, 26, 51, 76, 101, 251, 501, 751]
33+
34+
@mark.skipif(
35+
system() == "Windows",
36+
reason=(
37+
"Tests fail because Windows time_ns resolution is too low so "
38+
"two different time measurements may end up having the exact same"
39+
"value."
40+
),
41+
)
42+
def test_synchronous_delta_temporality(self):
43+
44+
aggregation = ExponentialBucketHistogramAggregation()
45+
46+
reader = InMemoryMetricReader(
47+
preferred_aggregation={Histogram: aggregation},
48+
preferred_temporality={Histogram: AggregationTemporality.DELTA},
49+
)
50+
51+
provider = MeterProvider(metric_readers=[reader])
52+
meter = provider.get_meter("name", "version")
53+
54+
histogram = meter.create_histogram("histogram")
55+
56+
results = []
57+
58+
for _ in range(10):
59+
60+
results.append(reader.get_metrics_data())
61+
62+
for metrics_data in results:
63+
self.assertIsNone(metrics_data)
64+
65+
results = []
66+
67+
for test_value in self.test_values:
68+
histogram.record(test_value)
69+
results.append(reader.get_metrics_data())
70+
71+
metric_data = (
72+
results[0]
73+
.resource_metrics[0]
74+
.scope_metrics[0]
75+
.metrics[0]
76+
.data.data_points[0]
77+
)
78+
79+
previous_time_unix_nano = metric_data.time_unix_nano
80+
81+
"""
82+
self.assertEqual(
83+
metric_data.bucket_counts,
84+
(0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
85+
)
86+
"""
87+
88+
self.assertLess(
89+
metric_data.start_time_unix_nano,
90+
previous_time_unix_nano,
91+
)
92+
self.assertEqual(metric_data.min, self.test_values[0])
93+
self.assertEqual(metric_data.max, self.test_values[0])
94+
self.assertEqual(metric_data.sum, self.test_values[0])
95+
96+
for index, metrics_data in enumerate(results[1:]):
97+
metric_data = (
98+
metrics_data.resource_metrics[0]
99+
.scope_metrics[0]
100+
.metrics[0]
101+
.data.data_points[0]
102+
)
103+
104+
self.assertEqual(
105+
previous_time_unix_nano, metric_data.start_time_unix_nano
106+
)
107+
previous_time_unix_nano = metric_data.time_unix_nano
108+
"""
109+
self.assertEqual(
110+
metric_data.bucket_counts,
111+
tuple(
112+
[
113+
1 if internal_index == index + 2 else 0
114+
for internal_index in range(16)
115+
]
116+
),
117+
)
118+
"""
119+
self.assertLess(
120+
metric_data.start_time_unix_nano, metric_data.time_unix_nano
121+
)
122+
self.assertEqual(metric_data.min, self.test_values[index + 1])
123+
self.assertEqual(metric_data.max, self.test_values[index + 1])
124+
self.assertEqual(metric_data.sum, self.test_values[index + 1])
125+
126+
results = []
127+
128+
for _ in range(10):
129+
130+
results.append(reader.get_metrics_data())
131+
132+
provider.shutdown()
133+
134+
for metrics_data in results:
135+
self.assertIsNone(metrics_data)
136+
137+
@mark.skipif(
138+
system() == "Windows",
139+
reason=(
140+
"Tests fail because Windows time_ns resolution is too low so "
141+
"two different time measurements may end up having the exact same"
142+
"value."
143+
),
144+
)
145+
def test_synchronous_cumulative_temporality(self):
146+
147+
aggregation = ExponentialBucketHistogramAggregation()
148+
149+
reader = InMemoryMetricReader(
150+
preferred_aggregation={Histogram: aggregation},
151+
preferred_temporality={
152+
Histogram: AggregationTemporality.CUMULATIVE
153+
},
154+
)
155+
156+
provider = MeterProvider(metric_readers=[reader])
157+
meter = provider.get_meter("name", "version")
158+
159+
histogram = meter.create_histogram("histogram")
160+
161+
results = []
162+
163+
for _ in range(10):
164+
165+
results.append(reader.get_metrics_data())
166+
167+
for metrics_data in results:
168+
self.assertIsNone(metrics_data)
169+
170+
results = []
171+
172+
for test_value in self.test_values:
173+
174+
histogram.record(test_value)
175+
results.append(reader.get_metrics_data())
176+
177+
start_time_unix_nano = (
178+
results[0]
179+
.resource_metrics[0]
180+
.scope_metrics[0]
181+
.metrics[0]
182+
.data.data_points[0]
183+
.start_time_unix_nano
184+
)
185+
186+
for index, metrics_data in enumerate(results):
187+
188+
metric_data = (
189+
metrics_data.resource_metrics[0]
190+
.scope_metrics[0]
191+
.metrics[0]
192+
.data.data_points[0]
193+
)
194+
195+
self.assertEqual(
196+
start_time_unix_nano, metric_data.start_time_unix_nano
197+
)
198+
"""
199+
self.assertEqual(
200+
metric_data.bucket_counts,
201+
tuple(
202+
[
203+
(
204+
0
205+
if internal_index < 1 or internal_index > index + 1
206+
else 1
207+
)
208+
for internal_index in range(16)
209+
]
210+
),
211+
)
212+
"""
213+
self.assertEqual(metric_data.min, self.test_values[0])
214+
self.assertEqual(metric_data.max, self.test_values[index])
215+
self.assertEqual(
216+
metric_data.sum, sum(self.test_values[: index + 1])
217+
)
218+
219+
results = []
220+
221+
for i in range(10):
222+
223+
results.append(reader.get_metrics_data())
224+
225+
provider.shutdown()
226+
227+
start_time_unix_nano = (
228+
results[0]
229+
.resource_metrics[0]
230+
.scope_metrics[0]
231+
.metrics[0]
232+
.data.data_points[0]
233+
.start_time_unix_nano
234+
)
235+
236+
for metrics_data in results:
237+
238+
metric_data = (
239+
metrics_data.resource_metrics[0]
240+
.scope_metrics[0]
241+
.metrics[0]
242+
.data.data_points[0]
243+
)
244+
245+
self.assertEqual(
246+
start_time_unix_nano, metric_data.start_time_unix_nano
247+
)
248+
"""
249+
self.assertEqual(
250+
metric_data.bucket_counts,
251+
(0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0),
252+
)
253+
"""
254+
self.assertEqual(metric_data.min, self.test_values[0])
255+
self.assertEqual(metric_data.max, self.test_values[-1])
256+
self.assertEqual(metric_data.sum, sum(self.test_values))

0 commit comments

Comments
 (0)