summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLeaveMyYard <zhukovpavel2001@gmail.com>2024-04-30 19:23:24 +0300
committerLeaveMyYard <zhukovpavel2001@gmail.com>2024-04-30 19:23:24 +0300
commiteb84c954dc831449f09c9e42d233f8ff8eba855c (patch)
tree785ab07dc9dc73aa31653f285e45e6a2bbfc6f62
parentd4adcf8ad60347f4db85973ed80d86c37b0e36b4 (diff)
Logging improvement
-rw-r--r--robusta_krr/core/integrations/kubernetes/cluster_loader/__init__.py4
-rw-r--r--robusta_krr/core/integrations/prometheus/metrics/memory.py5
-rw-r--r--robusta_krr/core/runner.py19
3 files changed, 19 insertions, 9 deletions
diff --git a/robusta_krr/core/integrations/kubernetes/cluster_loader/__init__.py b/robusta_krr/core/integrations/kubernetes/cluster_loader/__init__.py
index b7e86d8..54047a3 100644
--- a/robusta_krr/core/integrations/kubernetes/cluster_loader/__init__.py
+++ b/robusta_krr/core/integrations/kubernetes/cluster_loader/__init__.py
@@ -208,14 +208,14 @@ class KubeAPIWorkloadLoader(BaseWorkloadLoader, IListPodsFallback):
result = [item for request_result in await asyncio.gather(*requests) for item in request_result.items]
- logger.debug(f"Found {len(result)} {kind} in {self.cluster}")
+ logger.debug(f"Found {len(result)} {kind}" + (f" for cluster {self.cluster}" if self.cluster else ""))
return result
async def _fetch_workload(self, loader: BaseKindLoader) -> list[K8sWorkload]:
kind = loader.kind
if not self._should_list_resource(kind):
- logger.debug(f"Skipping {kind}s in {self.cluster}")
+ logger.debug(f"Skipping {kind}s" + (f" for cluster {self.cluster}" if self.cluster else ""))
return
if not self._kind_available[kind]:
diff --git a/robusta_krr/core/integrations/prometheus/metrics/memory.py b/robusta_krr/core/integrations/prometheus/metrics/memory.py
index 9f7e30d..97fb2a6 100644
--- a/robusta_krr/core/integrations/prometheus/metrics/memory.py
+++ b/robusta_krr/core/integrations/prometheus/metrics/memory.py
@@ -70,6 +70,7 @@ class MemoryAmountLoader(PrometheusMetric):
)
"""
+
# TODO: Need to battle test if this one is correct.
class MaxOOMKilledMemoryLoader(PrometheusMetric):
"""
@@ -78,7 +79,7 @@ class MaxOOMKilledMemoryLoader(PrometheusMetric):
warning_on_no_data = False
- def get_query(self, object: K8sObjectData, duration: str, step: str) -> str:
+ def get_query(self, object: K8sWorkload, duration: str, step: str) -> str:
pods_selector = "|".join(pod.name for pod in object.pods)
cluster_label = self.get_prometheus_cluster_label()
return f"""
@@ -91,7 +92,7 @@ class MaxOOMKilledMemoryLoader(PrometheusMetric):
pod=~"{pods_selector}",
container="{object.container}"
{cluster_label}
- }}
+ }}
) by (pod, container, job)
* on(pod, container, job) group_left(reason)
max(
diff --git a/robusta_krr/core/runner.py b/robusta_krr/core/runner.py
index a52f1be..787982f 100644
--- a/robusta_krr/core/runner.py
+++ b/robusta_krr/core/runner.py
@@ -184,8 +184,9 @@ class Runner:
prometheus_loader = self.connector.get_prometheus(cluster)
except PrometheusNotFound:
logger.error(
- f"Wasn't able to connect to any Prometheus service"
- f' for cluster {cluster}' if cluster is not None else ""
+ f"Wasn't able to connect to any Prometheus service" f" for cluster {cluster}"
+ if cluster is not None
+ else ""
"\nTry using port-forwarding and/or setting the url manually (using the -p flag.).\n"
"For more information, see 'Giving the Explicit Prometheus URL' at "
"https://github.com/robusta-dev/krr?tab=readme-ov-file#usage"
@@ -205,7 +206,10 @@ class Runner:
)
return True # We can try to continue without history range
- logger.debug(f"History range for {cluster}: {history_range}")
+ logger.debug(
+ f"History range{f' for cluster {cluster}' if cluster else ''}: "
+ f"({history_range[0]})-({history_range[1]})"
+ )
enough_data = self.strategy.settings.history_range_enough(history_range)
if not enough_data:
@@ -247,7 +251,10 @@ class Runner:
async def _collect_result(self) -> Result:
clusters = await self.connector.list_clusters()
- logger.info(f"Clusters available: {', '.join(clusters)}")
+ if clusters is None:
+ logger.info("Can not list clusters, single cluster mode.")
+ else:
+ logger.info(f"Clusters available: {', '.join(clusters)}")
if clusters and len(clusters) > 1 and settings.prometheus_url:
# this can only happen for multi-cluster querying a single centeralized prometheus
@@ -282,7 +289,9 @@ class Runner:
# We gather all workloads from all clusters in parallel (asyncio.gather)
# Then we chain all workloads together (itertools.chain)
workloads = list(
- itertools.chain(*await asyncio.gather(*[loader.list_workloads() for loader in workload_loaders.values()]))
+ itertools.chain(
+ *await asyncio.gather(*[loader.list_workloads() for loader in workload_loaders.values()])
+ )
)
# Then we gather all recommendations for all workloads in parallel (asyncio.gather)
scans = await asyncio.gather(*[self._gather_object_allocations(k8s_object) for k8s_object in workloads])