Skip to content

Commit 54a0197

Browse files
authored
Only return predict result when all metrics have predict finished (#8)
1 parent 4267cae commit 54a0197

File tree

2 files changed

+20
-3
lines changed

2 files changed

+20
-3
lines changed

baseline/fetcher.py

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -169,7 +169,9 @@ def fetch_service_metrics_with_rangs(self, service_name: str, normal: bool, metr
169169

170170
results = self.fetch_data(f"{self.base_address}/graphql", payload)['result']['results']
171171
if len(results) == 0:
172+
logger.debug(f"No data found for {metric_name}(service: {service_name}) from {start} to {end}")
172173
return prev_data, 0
174+
logger.debug(f"Fetch {len(results)} data points for {metric_name}(service: {service_name}) from {start} to {end}")
173175

174176
if prev_data is None:
175177
df = pd.DataFrame()
@@ -213,7 +215,8 @@ def fetch_service_metrics_with_rangs(self, service_name: str, normal: bool, metr
213215
df = pd.concat([df, pd.DataFrame([row])], ignore_index=True)
214216
count += 1
215217
prev_data.df = df
216-
logger.info(f"Fetched {count} data points for {metric_name}(service: {service_name}) from {min_date} to {max_date}")
218+
logger.info(f"Fetched {count} data points for {metric_name}(service: {service_name}) from {min_date} to {max_date}, "
219+
f"original query time range({self.conf.server.down_sampling}): {start} to {end}")
217220
return prev_data, count
218221
elif prev_data.multiple is not None:
219222
df = prev_data.df
@@ -237,7 +240,8 @@ def fetch_service_metrics_with_rangs(self, service_name: str, normal: bool, metr
237240
df = pd.concat([df, pd.DataFrame([row])])
238241
count += 1
239242
prev_data.df = df
240-
logger.info(f"Fetched {count} data points for {metric_name}(service: {service_name}) from {min_date} to {max_date}")
243+
logger.info(f"Fetched {count} data points for {metric_name}(service: {service_name}) from {min_date} to {max_date}, "
244+
f"original query time range({self.conf.server.down_sampling}): {start} to {end}")
241245

242246
return prev_data, count
243247

@@ -258,6 +262,8 @@ def fetch_layer_services(self, layer: str) -> list[tuple[str, bool]]:
258262
names = []
259263
for service in services:
260264
names.append((service['label'], bool(service['normal'])))
265+
if logger.isEnabledFor(logging.DEBUG):
266+
logger.debug(f"Fetch {names} services from layer {layer}")
261267
return names
262268

263269
def query_need_period(self) -> int:

baseline/query.py

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,12 +66,18 @@ async def queryPredictedMetrics(self, request: AlarmBaselineRequest, context):
6666
logger.info(
6767
f"receive query predict metrics query, total service with metrics count: {len(request.serviceMetricNames)}, "
6868
f"start time: {request.startTimeBucket}, end time: {request.endTimeBucket}, step: {request.step}")
69+
# check the request metrics is supported
70+
service_must_contains_metrics: dict[str, list[str]] = {}
71+
for service_metrics in request.serviceMetricNames:
72+
service_must_contains_metrics[service_metrics.serviceName] = list(set(service_metrics.metricNames).
73+
intersection(self.support_metrics_names))
74+
6975
if logger.isEnabledFor(logging.DEBUG):
7076
info = [{
7177
'service': service_metrics.serviceName,
7278
'metrics': [m for m in service_metrics.metricNames]
7379
} for service_metrics in request.serviceMetricNames]
74-
logger.debug(f"total service with metrics: {info}")
80+
logger.debug(f"total service with metrics ready to query: {info}")
7581

7682
results: dict[str, dict[str, list[PredictMeterResult]]] = {}
7783
for serviceWithMetrics in request.serviceMetricNames:
@@ -93,6 +99,11 @@ async def queryPredictedMetrics(self, request: AlarmBaselineRequest, context):
9399
predictions = []
94100
for metric_name, result in metricsWithPredictValues.items():
95101
predictions.append(convert_response_metrics(service, metric_name, result, request.step))
102+
103+
# if the predict metrics count not equals the request metrics count, we need to ignore it
104+
if len(service_must_contains_metrics[service]) != len(predictions):
105+
continue
106+
96107
serviceMetrics.append(AlarmBaselineServiceMetric(
97108
serviceName=service,
98109
predictions=predictions

0 commit comments

Comments
 (0)