|
11 | 11 | from pydantic import BaseModel |
12 | 12 | from holmes.core.tools import StructuredToolResult, StructuredToolResultStatus |
13 | 13 | from holmes.plugins.toolsets.prometheus.data_compression import ( |
14 | | - raw_metric_to_compressed_metric, |
15 | | - summarize_metrics, |
| 14 | + simplify_prometheus_metric_object, |
| 15 | + compact_metrics, |
16 | 16 | ) |
17 | 17 | from holmes.plugins.toolsets.prometheus.model import PromResponse |
18 | 18 | from holmes.plugins.toolsets.utils import toolset_name_for_one_liner |
@@ -80,42 +80,39 @@ def __init__(self, toolset: "NewRelicToolset"): |
80 | 80 | ) |
81 | 81 | self._toolset = toolset |
82 | 82 |
|
83 | | - def compress_metrics_data(self, response: PromResponse) -> Optional[str]: |
| 83 | + def compact_metrics_data(self, response: PromResponse) -> Optional[str]: |
84 | 84 | llm_data: Optional[str] = None |
85 | 85 | try: |
86 | | - if self._toolset.config and self._toolset.compress_metrics: |
87 | | - metrics = [ |
88 | | - raw_metric_to_compressed_metric(metric, remove_labels=set()) |
89 | | - for metric in response.data.result |
90 | | - ] |
91 | | - |
92 | | - compressed_data = summarize_metrics(metrics) |
93 | | - original_size = len(json.dumps(response.to_json())) |
94 | | - compressed_size = len(json.dumps(compressed_data)) |
95 | | - compression_ratio = ( |
96 | | - (1 - compressed_size / original_size) * 100 |
97 | | - if original_size > 0 |
98 | | - else 0 |
99 | | - ) |
| 86 | + metrics = [ |
| 87 | + simplify_prometheus_metric_object(metric, remove_labels=set()) |
| 88 | + for metric in response.data.result |
| 89 | + ] |
| 90 | + |
| 91 | + compacted_data = compact_metrics(metrics) |
| 92 | + original_size = len(json.dumps(response.to_json())) |
| 93 | + compacted_size = len(json.dumps(compacted_data)) |
| 94 | + compaction_ratio = ( |
| 95 | + (1 - compacted_size / original_size) * 100 if original_size > 0 else 0 |
| 96 | + ) |
100 | 97 |
|
101 | | - if compression_ratio > self._toolset.compress_metrics_minimum_ratio: |
102 | | - # below this amount it's likely not worth mutating the response |
103 | | - llm_data = compressed_data |
104 | | - logging.info( |
105 | | - f"Compressed Newrelic metrics: {original_size:,} → {compressed_size:,} chars " |
106 | | - f"({compression_ratio:.1f}% reduction)" |
107 | | - ) |
108 | | - else: |
109 | | - logging.info( |
110 | | - f"Compressed Newrelic metrics: {original_size:,} → {compressed_size:,} chars " |
111 | | - f"({compression_ratio:.1f}% reduction). Original data will be used instead." |
112 | | - ) |
| 98 | + if compaction_ratio > self._toolset.compact_metrics_minimum_ratio: |
| 99 | + # below this amount it's likely not worth mutating the response |
| 100 | + llm_data = compacted_data |
| 101 | + logging.debug( |
| 102 | + f"Compressed Newrelic metrics: {original_size:,} → {compacted_size:,} chars " |
| 103 | + f"({compaction_ratio:.1f}% reduction)" |
| 104 | + ) |
| 105 | + else: |
| 106 | + logging.debug( |
| 107 | + f"Compressed Newrelic metrics: {original_size:,} → {compacted_size:,} chars " |
| 108 | + f"({compaction_ratio:.1f}% reduction). Original data will be used instead." |
| 109 | + ) |
113 | 110 | except Exception: |
114 | 111 | logging.warning("Failed to compress newrelic data", exc_info=True) |
115 | 112 |
|
116 | 113 | return llm_data |
117 | 114 |
|
118 | | - def to_new_relic_records( |
| 115 | + def to_prometheus_records( |
119 | 116 | self, |
120 | 117 | records: List[Dict[str, Any]], |
121 | 118 | params: Optional[Dict[str, Any]] = None, |
@@ -175,15 +172,13 @@ def _invoke( |
175 | 172 | if qtype == "metrics" or "timeseries" in query.lower(): |
176 | 173 | enriched_params = dict(params) |
177 | 174 | enriched_params["query"] = query |
178 | | - prom_data = self.to_new_relic_records(result, params=enriched_params) |
| 175 | + prom_data = self.to_prometheus_records(result, params=enriched_params) |
179 | 176 |
|
180 | 177 | return_result = prom_data.to_json() |
181 | | - if len(return_result.get("data", {}).get("results", [])): |
182 | | - return_result = result # type: ignore[assignment] |
183 | 178 | return StructuredToolResult( |
184 | 179 | status=StructuredToolResultStatus.SUCCESS, |
185 | 180 | data=json.dumps(return_result, indent=2), |
186 | | - llm_data=self.compress_metrics_data(prom_data), |
| 181 | + llm_data=self.compact_metrics_data(prom_data), |
187 | 182 | params=params, |
188 | 183 | ) |
189 | 184 |
|
@@ -246,16 +241,16 @@ class NewrelicConfig(BaseModel): |
246 | 241 | nr_api_key: Optional[str] = None |
247 | 242 | nr_account_id: Optional[str] = None |
248 | 243 | is_eu_datacenter: Optional[bool] = False |
249 | | - compress_metrics: bool = True |
250 | | - compress_metrics_minimum_ratio: int = 30 # 20 means 20% size reduction |
| 244 | + compact_metrics: bool = True |
| 245 | + compact_metrics_minimum_ratio: int = 30 # 20 means 20% size reduction |
251 | 246 |
|
252 | 247 |
|
253 | 248 | class NewRelicToolset(Toolset): |
254 | 249 | nr_api_key: Optional[str] = None |
255 | 250 | nr_account_id: Optional[str] = None |
256 | 251 | is_eu_datacenter: bool = False |
257 | | - compress_metrics: bool = True |
258 | | - compress_metrics_minimum_ratio: int = 30 |
| 252 | + compact_metrics: bool = True |
| 253 | + compact_metrics_minimum_ratio: int = 30 |
259 | 254 |
|
260 | 255 | def __init__(self): |
261 | 256 | super().__init__( |
@@ -286,10 +281,8 @@ def prerequisites_callable( |
286 | 281 | self.nr_account_id = nr_config.nr_account_id |
287 | 282 | self.nr_api_key = nr_config.nr_api_key |
288 | 283 | self.is_eu_datacenter = nr_config.is_eu_datacenter or False |
289 | | - self.compress_metrics = nr_config.compress_metrics |
290 | | - self.compress_metrics_minimum_ratio = ( |
291 | | - nr_config.compress_metrics_minimum_ratio |
292 | | - ) |
| 284 | + self.compact_metrics = nr_config.compact_metrics |
| 285 | + self.compact_metrics_minimum_ratio = nr_config.compact_metrics_minimum_ratio |
293 | 286 |
|
294 | 287 | if not self.nr_account_id or not self.nr_api_key: |
295 | 288 | return False, "New Relic account ID or API key is missing" |
|
0 commit comments