summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLeaveMyYard <33721692+LeaveMyYard@users.noreply.github.com>2023-07-11 15:46:49 +0300
committerLeaveMyYard <33721692+LeaveMyYard@users.noreply.github.com>2023-07-11 15:46:49 +0300
commitc137f76e2a6a3572d4cdcaa0fa12909ac858923d (patch)
tree1e90870b77e186ac46b35b40a4bd583e7d8aa3ad
parentcd74b96d6eb2adf8bbd0b1365e37ec026fbb3170 (diff)
parentd120a07c71cfdb2d24de6f912f4256273b4bf364 (diff)
Merge branch 'main' into hpa-v1-fallback
-rw-r--r--docker/README.md12
-rw-r--r--docker/aws.Dockerfile28
-rw-r--r--pyproject.toml2
-rw-r--r--robusta_krr/core/integrations/kubernetes.py34
-rw-r--r--robusta_krr/core/integrations/prometheus/loader.py14
-rw-r--r--robusta_krr/core/integrations/prometheus/metrics/base_filtered_metric.py1
-rw-r--r--robusta_krr/core/integrations/prometheus/metrics/base_metric.py16
-rw-r--r--robusta_krr/core/integrations/prometheus/metrics/cpu_metric.py3
-rw-r--r--robusta_krr/core/integrations/prometheus/metrics/memory_metric.py12
-rw-r--r--robusta_krr/core/integrations/prometheus/metrics_service/base_metric_service.py5
-rw-r--r--robusta_krr/core/integrations/prometheus/metrics_service/prometheus_metrics_service.py12
-rw-r--r--robusta_krr/core/integrations/prometheus/metrics_service/thanos_metrics_service.py8
-rw-r--r--robusta_krr/core/integrations/prometheus/metrics_service/victoria_metrics_service.py8
-rw-r--r--robusta_krr/core/integrations/rollout.py167
-rw-r--r--robusta_krr/core/runner.py7
-rw-r--r--robusta_krr/formatters/table.py2
-rw-r--r--robusta_krr/main.py1
-rw-r--r--robusta_krr/utils/configurable.py6
-rw-r--r--robusta_krr/utils/service_discovery.py7
-rw-r--r--tests/conftest.py2
20 files changed, 297 insertions, 50 deletions
diff --git a/docker/README.md b/docker/README.md
new file mode 100644
index 0000000..a1bf1da
--- /dev/null
+++ b/docker/README.md
@@ -0,0 +1,12 @@
+# Dockerfiles for specific clouds
+
+This directory will include Dockerfiles for various cloud providers.
+
+## AWS
+
+For the usage of `krr` container we need the Dockerfile to have `awscli` installed on it.
+The `aws.Dockerfile` is a modified `krr` dockerfile which includes:
+ - installation of curl & zip
+ - installation of awscli
+
+
diff --git a/docker/aws.Dockerfile b/docker/aws.Dockerfile
new file mode 100644
index 0000000..13d79b8
--- /dev/null
+++ b/docker/aws.Dockerfile
@@ -0,0 +1,28 @@
+# Use the official Python 3.9 slim image as the base image
+FROM python:3.9-slim as builder
+
+# Set the working directory
+WORKDIR /app
+
+# Install system dependencies required for Poetry
+RUN apt-get update && \
+ dpkg --add-architecture arm64
+
+COPY ./requirements.txt requirements.txt
+
+# Install the project dependencies
+RUN pip install -r requirements.txt
+
+# Install curl and unzip for awscli
+RUN apt-get -y update; apt-get -y install curl; apt-get -y install unzip
+
+# Download awscli and unzip it
+RUN curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" && \
+ unzip awscliv2.zip && \
+ ./aws/install
+
+# Copy the rest of the application code
+COPY . .
+
+# Run the application using 'poetry run krr simple'
+ENTRYPOINT ["python", "krr.py", "simple"]
diff --git a/pyproject.toml b/pyproject.toml
index 8d269fb..d64551e 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -17,7 +17,7 @@ multi_line_output = 3
include_trailing_comma = true
[tool.mypy]
-plugins = "numpy.typing.mypy_plugin"
+plugins = "numpy.typing.mypy_plugin,pydantic.mypy"
[tool.poetry.scripts]
krr = "robusta_krr.main:run"
diff --git a/robusta_krr/core/integrations/kubernetes.py b/robusta_krr/core/integrations/kubernetes.py
index 1ffd3f4..12844ae 100644
--- a/robusta_krr/core/integrations/kubernetes.py
+++ b/robusta_krr/core/integrations/kubernetes.py
@@ -1,21 +1,21 @@
import asyncio
-from concurrent.futures import ThreadPoolExecutor
import itertools
+from concurrent.futures import ThreadPoolExecutor
from typing import Optional, Union
from kubernetes import client, config # type: ignore
-from kubernetes.client import ApiException # type: ignore
+from kubernetes.client import ApiException
from kubernetes.client.models import (
V1Container,
V1DaemonSet,
V1DaemonSetList,
V1Deployment,
V1DeploymentList,
+ V1Job,
V1JobList,
V1LabelSelector,
- V1PodList,
V1Pod,
- V1Job,
+ V1PodList,
V1StatefulSet,
V1StatefulSetList,
V1HorizontalPodAutoscalerList,
@@ -23,11 +23,13 @@ from kubernetes.client.models import (
V2HorizontalPodAutoscalerList,
)
-from robusta_krr.core.models.objects import K8sObjectData, PodData, HPAData
+from robusta_krr.core.models.objects import HPAData, K8sObjectData, PodData
from robusta_krr.core.models.result import ResourceAllocations
from robusta_krr.utils.configurable import Configurable
+from .rollout import RolloutAppsV1Api
+
AnyKubernetesAPIObject = Union[V1Deployment, V1DaemonSet, V1StatefulSet, V1Pod, V1Job]
HPAKey = tuple[str, str, str]
@@ -45,6 +47,7 @@ class ClusterLoader(Configurable):
else None
)
self.apps = client.AppsV1Api(api_client=self.api_client)
+ self.rollout = RolloutAppsV1Api(api_client=self.api_client)
self.batch = client.BatchV1Api(api_client=self.api_client)
self.core = client.CoreV1Api(api_client=self.api_client)
self.autoscaling_v1 = client.AutoscalingV1Api(api_client=self.api_client)
@@ -64,10 +67,12 @@ class ClusterLoader(Configurable):
self.__hpa_list = await self.__list_hpa()
objects_tuple = await asyncio.gather(
self._list_deployments(),
+ self._list_rollouts(),
self._list_all_statefulsets(),
self._list_all_daemon_set(),
self._list_all_jobs(),
)
+
except Exception as e:
self.error(f"Error trying to list pods in cluster {self.cluster}: {e}")
self.debug_exception()
@@ -150,6 +155,25 @@ class ClusterLoader(Configurable):
]
)
+ async def _list_rollouts(self) -> list[K8sObjectData]:
+ try:
+ ret: V1DeploymentList = await asyncio.to_thread(self.rollout.list_rollout_for_all_namespaces, watch=False)
+ except ApiException as e:
+ if e.status == 404:
+ self.debug(f"Rollout API not available in {self.cluster}")
+ return []
+ raise
+
+ self.debug(f"Found {len(ret.items)} rollouts in {self.cluster}")
+
+ return await asyncio.gather(
+ *[
+ self.__build_obj(item, container)
+ for item in ret.items
+ for container in item.spec.template.spec.containers
+ ]
+ )
+
async def _list_all_statefulsets(self) -> list[K8sObjectData]:
self.debug(f"Listing statefulsets in {self.cluster}")
loop = asyncio.get_running_loop()
diff --git a/robusta_krr/core/integrations/prometheus/loader.py b/robusta_krr/core/integrations/prometheus/loader.py
index 143fb63..590bc83 100644
--- a/robusta_krr/core/integrations/prometheus/loader.py
+++ b/robusta_krr/core/integrations/prometheus/loader.py
@@ -1,8 +1,6 @@
-import asyncio
import datetime
-from typing import Optional, no_type_check
-
from concurrent.futures import ThreadPoolExecutor
+from typing import Optional
from kubernetes import config as k8s_config
from kubernetes.client.api_client import ApiClient
@@ -49,10 +47,12 @@ class MetricsLoader(Configurable):
if cluster is not None
else None
)
- self.loader = self.get_metrics_service(config, api_client=self.api_client, cluster=cluster)
- if not self.loader:
+ loader = self.get_metrics_service(config, api_client=self.api_client, cluster=cluster)
+ if loader is None:
raise PrometheusNotFound("No Prometheus or metrics service found")
+ self.loader = loader
+
self.info(f"{self.loader.name()} connected successfully for {cluster or 'default'} cluster")
def get_metrics_service(
@@ -68,9 +68,11 @@ class MetricsLoader(Configurable):
self.echo(f"{service_name} found")
loader.validate_cluster_name()
return loader
- except MetricsNotFound as e:
+ except MetricsNotFound:
self.debug(f"{service_name} not found")
+ return None
+
async def gather_data(
self,
object: K8sObjectData,
diff --git a/robusta_krr/core/integrations/prometheus/metrics/base_filtered_metric.py b/robusta_krr/core/integrations/prometheus/metrics/base_filtered_metric.py
index c7ba3e7..5927aa4 100644
--- a/robusta_krr/core/integrations/prometheus/metrics/base_filtered_metric.py
+++ b/robusta_krr/core/integrations/prometheus/metrics/base_filtered_metric.py
@@ -1,4 +1,5 @@
from typing import Any, Optional
+
from robusta_krr.core.abstract.strategies import Metric
from .base_metric import BaseMetricLoader, QueryType
diff --git a/robusta_krr/core/integrations/prometheus/metrics/base_metric.py b/robusta_krr/core/integrations/prometheus/metrics/base_metric.py
index 8bf263c..46150cb 100644
--- a/robusta_krr/core/integrations/prometheus/metrics/base_metric.py
+++ b/robusta_krr/core/integrations/prometheus/metrics/base_metric.py
@@ -2,11 +2,13 @@ from __future__ import annotations
import abc
import asyncio
-from concurrent.futures import ThreadPoolExecutor
import datetime
-from typing import TYPE_CHECKING, Callable, Optional, TypeVar
import enum
+from concurrent.futures import ThreadPoolExecutor
+from typing import TYPE_CHECKING, Callable, Optional, TypeVar
+
import numpy as np
+
from robusta_krr.core.abstract.strategies import Metric, ResourceHistoryData
from robusta_krr.core.models.config import Config
from robusta_krr.core.models.objects import K8sObjectData
@@ -15,7 +17,8 @@ from robusta_krr.utils.configurable import Configurable
if TYPE_CHECKING:
from .. import CustomPrometheusConnect
- MetricsDictionary = dict[str, type[BaseMetricLoader]]
+
+MetricsDictionary = dict[str, type["BaseMetricLoader"]]
class QueryType(str, enum.Enum):
@@ -72,6 +75,9 @@ class BaseMetricLoader(Configurable, abc.ABC):
pass
+ def get_query_type(self) -> QueryType:
+ return QueryType.QueryRange
+
def get_graph_query(self, object: K8sObjectData, resolution: Optional[str]) -> str:
"""
This method should be implemented by all subclasses to provide a query string in the metadata to produce relevant graphs.
@@ -158,7 +164,7 @@ class BaseMetricLoader(Configurable, abc.ABC):
step=self._step_to_string(step),
)
result = await self.query_prometheus(metric=metric, query_type=query_type)
- # adding the query in the results for a graph
+ # adding the query in the results for a graph
metric.query = self.get_graph_query(object, resolution)
if result == []:
@@ -173,7 +179,7 @@ class BaseMetricLoader(Configurable, abc.ABC):
)
@staticmethod
- def get_by_resource(resource: str, strategy: Optional[str]) -> type[BaseMetricLoader]:
+ def get_by_resource(resource: str, strategy: str) -> type[BaseMetricLoader]:
"""
Fetches the metric loader corresponding to the specified resource.
diff --git a/robusta_krr/core/integrations/prometheus/metrics/cpu_metric.py b/robusta_krr/core/integrations/prometheus/metrics/cpu_metric.py
index bf27622..a604652 100644
--- a/robusta_krr/core/integrations/prometheus/metrics/cpu_metric.py
+++ b/robusta_krr/core/integrations/prometheus/metrics/cpu_metric.py
@@ -1,9 +1,10 @@
from typing import Optional
+
from robusta_krr.core.models.allocations import ResourceType
from robusta_krr.core.models.objects import K8sObjectData
from .base_filtered_metric import BaseFilteredMetricLoader
-from .base_metric import bind_metric, QueryType
+from .base_metric import QueryType, bind_metric
@bind_metric(ResourceType.CPU)
diff --git a/robusta_krr/core/integrations/prometheus/metrics/memory_metric.py b/robusta_krr/core/integrations/prometheus/metrics/memory_metric.py
index a8a4521..fa4fd87 100644
--- a/robusta_krr/core/integrations/prometheus/metrics/memory_metric.py
+++ b/robusta_krr/core/integrations/prometheus/metrics/memory_metric.py
@@ -1,9 +1,10 @@
from typing import Optional
+
from robusta_krr.core.models.allocations import ResourceType
from robusta_krr.core.models.objects import K8sObjectData
from .base_filtered_metric import BaseFilteredMetricLoader
-from .base_metric import bind_metric, QueryType, override_metric
+from .base_metric import QueryType, bind_metric, override_metric
@bind_metric(ResourceType.Memory)
@@ -23,9 +24,10 @@ class MemoryMetricLoader(BaseFilteredMetricLoader):
def get_query_type(self) -> QueryType:
return QueryType.QueryRange
+
# This is a temporary solutions, metric loaders will be moved to strategy in the future
@override_metric("simple", ResourceType.Memory)
-class MemoryMetricLoader(MemoryMetricLoader):
+class SimpleMemoryMetricLoader(MemoryMetricLoader):
"""
A class that overrides the memory metric on the simple strategy.
"""
@@ -35,17 +37,17 @@ class MemoryMetricLoader(MemoryMetricLoader):
cluster_label = self.get_prometheus_cluster_label()
resolution_formatted = f"[{resolution}]" if resolution else ""
return (
- f"max(max_over_time(container_memory_working_set_bytes{{"
+ f"max_over_time(container_memory_working_set_bytes{{"
f'namespace="{object.namespace}", '
f'pod=~"{pods_selector}", '
f'container="{object.container}"'
f"{cluster_label}}}"
f"{resolution_formatted}"
- f")) by (container, pod, job, id)"
+ f")"
)
def get_query_type(self) -> QueryType:
return QueryType.Query
- def get_graph_query(self, object: K8sObjectData, resolution: Optional[str]) -> str:
+ def get_graph_query(self, object: K8sObjectData, resolution: Optional[str]) -> str:
return super().get_query(object, resolution)
diff --git a/robusta_krr/core/integrations/prometheus/metrics_service/base_metric_service.py b/robusta_krr/core/integrations/prometheus/metrics_service/base_metric_service.py
index 4337e5e..e2654e8 100644
--- a/robusta_krr/core/integrations/prometheus/metrics_service/base_metric_service.py
+++ b/robusta_krr/core/integrations/prometheus/metrics_service/base_metric_service.py
@@ -1,6 +1,6 @@
import abc
-from concurrent.futures import ThreadPoolExecutor
import datetime
+from concurrent.futures import ThreadPoolExecutor
from typing import List, Optional
from kubernetes.client.api_client import ApiClient
@@ -42,7 +42,7 @@ class MetricsService(Configurable, abc.ABC):
return classname.replace("MetricsService", "") if classname != MetricsService.__name__ else classname
@abc.abstractmethod
- async def get_cluster_names(self) -> Optional[List[str]]:
+ def get_cluster_names(self) -> Optional[List[str]]:
...
@abc.abstractmethod
@@ -51,7 +51,6 @@ class MetricsService(Configurable, abc.ABC):
object: K8sObjectData,
resource: ResourceType,
period: datetime.timedelta,
- *,
step: datetime.timedelta = datetime.timedelta(minutes=30),
) -> ResourceHistoryData:
...
diff --git a/robusta_krr/core/integrations/prometheus/metrics_service/prometheus_metrics_service.py b/robusta_krr/core/integrations/prometheus/metrics_service/prometheus_metrics_service.py
index c4c280b..49daf87 100644
--- a/robusta_krr/core/integrations/prometheus/metrics_service/prometheus_metrics_service.py
+++ b/robusta_krr/core/integrations/prometheus/metrics_service/prometheus_metrics_service.py
@@ -1,7 +1,7 @@
import asyncio
-from concurrent.futures import ThreadPoolExecutor
import datetime
from typing import List, Optional, Type
+from concurrent.futures import ThreadPoolExecutor
from kubernetes.client import ApiClient
from prometheus_api_client import PrometheusApiClientException
@@ -11,14 +11,14 @@ from robusta_krr.core.abstract.strategies import ResourceHistoryData
from robusta_krr.core.models.config import Config
from robusta_krr.core.models.objects import K8sObjectData, PodData
from robusta_krr.core.models.result import ResourceType
-from robusta_krr.utils.service_discovery import ServiceDiscovery
+from robusta_krr.utils.service_discovery import MetricsServiceDiscovery
from ..metrics import BaseMetricLoader
-from ..prometheus_client import CustomPrometheusConnect
+from ..prometheus_client import ClusterNotSpecifiedException, CustomPrometheusConnect
from .base_metric_service import MetricsNotFound, MetricsService
-class PrometheusDiscovery(ServiceDiscovery):
+class PrometheusDiscovery(MetricsServiceDiscovery):
def find_metrics_url(self, *, api_client: Optional[ApiClient] = None) -> Optional[str]:
"""
Finds the Prometheus URL using selectors.
@@ -60,7 +60,7 @@ class PrometheusMetricsService(MetricsService):
*,
cluster: Optional[str] = None,
api_client: Optional[ApiClient] = None,
- service_discovery: Type[ServiceDiscovery] = PrometheusDiscovery,
+ service_discovery: Type[MetricsServiceDiscovery] = PrometheusDiscovery,
executor: Optional[ThreadPoolExecutor] = None,
) -> None:
super().__init__(config=config, api_client=api_client, cluster=cluster, executor=executor)
@@ -87,7 +87,7 @@ class PrometheusMetricsService(MetricsService):
if self.auth_header:
headers = {"Authorization": self.auth_header}
- elif not self.config.inside_cluster:
+ elif not self.config.inside_cluster and self.api_client is not None:
self.api_client.update_params_for_auth(headers, {}, ["BearerToken"])
self.prometheus = CustomPrometheusConnect(url=self.url, disable_ssl=not self.ssl_enabled, headers=headers)
diff --git a/robusta_krr/core/integrations/prometheus/metrics_service/thanos_metrics_service.py b/robusta_krr/core/integrations/prometheus/metrics_service/thanos_metrics_service.py
index 42066ae..f1de8b2 100644
--- a/robusta_krr/core/integrations/prometheus/metrics_service/thanos_metrics_service.py
+++ b/robusta_krr/core/integrations/prometheus/metrics_service/thanos_metrics_service.py
@@ -1,15 +1,15 @@
+from concurrent.futures import ThreadPoolExecutor
from typing import Optional
from kubernetes.client import ApiClient
-from requests.exceptions import ConnectionError, HTTPError
-from concurrent.futures import ThreadPoolExecutor
+
from robusta_krr.core.models.config import Config
-from robusta_krr.utils.service_discovery import ServiceDiscovery
+from robusta_krr.utils.service_discovery import MetricsServiceDiscovery
from .prometheus_metrics_service import MetricsNotFound, PrometheusMetricsService
-class ThanosMetricsDiscovery(ServiceDiscovery):
+class ThanosMetricsDiscovery(MetricsServiceDiscovery):
def find_metrics_url(self, *, api_client: Optional[ApiClient] = None) -> Optional[str]:
"""
Finds the Thanos URL using selectors.
diff --git a/robusta_krr/core/integrations/prometheus/metrics_service/victoria_metrics_service.py b/robusta_krr/core/integrations/prometheus/metrics_service/victoria_metrics_service.py
index 9c15cab..fd9f8a9 100644
--- a/robusta_krr/core/integrations/prometheus/metrics_service/victoria_metrics_service.py
+++ b/robusta_krr/core/integrations/prometheus/metrics_service/victoria_metrics_service.py
@@ -1,15 +1,15 @@
-from typing import Optional
from concurrent.futures import ThreadPoolExecutor
+from typing import Optional
+
from kubernetes.client import ApiClient
-from requests.exceptions import ConnectionError, HTTPError
from robusta_krr.core.models.config import Config
-from robusta_krr.utils.service_discovery import ServiceDiscovery
+from robusta_krr.utils.service_discovery import MetricsServiceDiscovery
from .prometheus_metrics_service import MetricsNotFound, PrometheusMetricsService
-class VictoriaMetricsDiscovery(ServiceDiscovery):
+class VictoriaMetricsDiscovery(MetricsServiceDiscovery):
def find_metrics_url(self, *, api_client: Optional[ApiClient] = None) -> Optional[str]:
"""
Finds the Victoria Metrics URL using selectors.
diff --git a/robusta_krr/core/integrations/rollout.py b/robusta_krr/core/integrations/rollout.py
new file mode 100644
index 0000000..3ac5513
--- /dev/null
+++ b/robusta_krr/core/integrations/rollout.py
@@ -0,0 +1,167 @@
+from kubernetes import client
+import six
+
+from kubernetes.client.exceptions import ( # noqa: F401
+ ApiTypeError,
+
+)
+
+
+class RolloutAppsV1Api(client.AppsV1Api):
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ def list_rollout_for_all_namespaces(self, **kwargs): # noqa: E501
+ """list_rollout_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind Deployment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_rollout_for_all_namespaces(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: V1RolloutList
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+ kwargs['_return_http_data_only'] = True
+ return self.list_rollout_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
+
+ def list_rollout_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
+ """list_rollout_for_all_namespaces # noqa: E501
+
+ list or watch objects of kind Deployment # noqa: E501
+ This method makes a synchronous HTTP request by default. To make an
+ asynchronous HTTP request, please pass async_req=True
+ >>> thread = api.list_deployment_for_all_namespaces_with_http_info(async_req=True)
+ >>> result = thread.get()
+
+ :param async_req bool: execute request asynchronously
+ :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.
+ :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
+ :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
+ :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
+ :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
+ :param str pretty: If 'true', then the output is pretty printed.
+ :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset
+ :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
+ :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
+ :param _return_http_data_only: response data without head status code
+ and headers
+ :param _preload_content: if False, the urllib3.HTTPResponse object will
+ be returned without reading/decoding response
+ data. Default is True.
+ :param _request_timeout: timeout setting for this request. If one
+ number provided, it will be total request
+ timeout. It can also be a pair (tuple) of
+ (connection, read) timeouts.
+ :return: tuple(V1DeploymentList, status_code(int), headers(HTTPHeaderDict))
+ If the method is called asynchronously,
+ returns the request thread.
+ """
+
+ local_var_params = locals()
+
+ all_params = [
+ 'allow_watch_bookmarks',
+ '_continue',
+ 'field_selector',
+ 'label_selector',
+ 'limit',
+ 'pretty',
+ 'resource_version',
+ 'resource_version_match',
+ 'timeout_seconds',
+ 'watch'
+ ]
+ all_params.extend(
+ [
+ 'async_req',
+ '_return_http_data_only',
+ '_preload_content',
+ '_request_timeout'
+ ]
+ )
+
+ for key, val in six.iteritems(local_var_params['kwargs']):
+ if key not in all_params:
+ raise ApiTypeError(
+ "Got an unexpected keyword argument '%s'"
+ " to method list_deployment_for_all_namespaces" % key
+ )
+ local_var_params[key] = val
+ del local_var_params['kwargs']
+
+ collection_formats = {}
+
+ path_params = {}
+
+ query_params = []
+ if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
+ query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
+ if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
+ query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
+ if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
+ query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
+ if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
+ query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
+ if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
+ query_params.append(('limit', local_var_params['limit'])) # noqa: E501
+ if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
+ query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
+ if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
+ query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
+ if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501
+ query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501
+ if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
+ query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
+ if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
+ query_params.append(('watch', local_var_params['watch'])) # noqa: E501
+
+ header_params = {}
+
+ form_params = []
+ local_var_files = {}
+
+ body_params = None
+ # HTTP header `Accept`
+ header_params['Accept'] = self.api_client.select_header_accept(
+ ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
+
+ # Authentication setting
+ auth_settings = ['BearerToken'] # noqa: E501
+
+ return self.api_client.call_api(
+ '/apis/argoproj.io/v1alpha1/rollouts', 'GET',
+ path_params,
+ query_params,
+ header_params,
+ body=body_params,
+ post_params=form_params,
+ files=local_var_files,
+ response_type='V1DeploymentList', # noqa: E501
+ auth_settings=auth_settings,
+ async_req=local_var_params.get('async_req'),
+ _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
+ _preload_content=local_var_params.get('_preload_content', True),
+ _request_timeout=local_var_params.get('_request_timeout'),
+ collection_formats=collection_formats)
diff --git a/robusta_krr/core/runner.py b/robusta_krr/core/runner.py
index 3cdf7b9..68ceb18 100644
--- a/robusta_krr/core/runner.py
+++ b/robusta_krr/core/runner.py
@@ -1,8 +1,7 @@
import asyncio
import math
-from typing import Optional, Union
-
from concurrent.futures import ThreadPoolExecutor
+from typing import Optional, Union
from robusta_krr.core.abstract.strategies import ResourceRecommendation, RunResult
from robusta_krr.core.integrations.kubernetes import KubernetesLoader
@@ -65,7 +64,7 @@ class Runner(Configurable):
Formatter = self.config.Formatter
formatted = result.format(Formatter)
self.echo("\n", no_prefix=True)
- self.print_result(formatted, rich=Formatter.__rich_console__)
+ self.print_result(formatted, rich=getattr(Formatter, "__rich_console__", False))
def __get_resource_minimal(self, resource: ResourceType) -> float:
if resource == ResourceType.CPU:
@@ -204,5 +203,5 @@ class Runner(Configurable):
self._process_result(result)
except ClusterNotSpecifiedException as e:
self.error(e)
- except Exception as e:
+ except Exception:
self.console.print_exception(extra_lines=1, max_frames=10)
diff --git a/robusta_krr/formatters/table.py b/robusta_krr/formatters/table.py
index 846732a..a1c6e42 100644
--- a/robusta_krr/formatters/table.py
+++ b/robusta_krr/formatters/table.py
@@ -1,5 +1,5 @@
import itertools
-from typing import Any, Optional
+from typing import Any
from rich.table import Table
diff --git a/robusta_krr/main.py b/robusta_krr/main.py
index 03b4f21..185a819 100644
--- a/robusta_krr/main.py
+++ b/robusta_krr/main.py
@@ -6,7 +6,6 @@ from datetime import datetime
from typing import List, Literal, Optional, Union
from uuid import UUID
-
import typer
import urllib3
diff --git a/robusta_krr/utils/configurable.py b/robusta_krr/utils/configurable.py
index 8954139..54e71ff 100644
--- a/robusta_krr/utils/configurable.py
+++ b/robusta_krr/utils/configurable.py
@@ -1,6 +1,6 @@
import abc
from inspect import getframeinfo, stack
-from typing import Literal
+from typing import Literal, Union
from rich.console import Console
@@ -93,9 +93,9 @@ class Configurable(abc.ABC):
self.echo(message, type="WARNING")
- def error(self, message: str = "") -> None:
+ def error(self, message: Union[str, Exception] = "") -> None:
"""
Echoes an error message to the user
"""
- self.echo(message, type="ERROR")
+ self.echo(str(message), type="ERROR")
diff --git a/robusta_krr/utils/service_discovery.py b/robusta_krr/utils/service_discovery.py
index 42890d3..c1e2763 100644
--- a/robusta_krr/utils/service_discovery.py
+++ b/robusta_krr/utils/service_discovery.py
@@ -1,3 +1,4 @@
+from abc import ABC, abstractmethod
from typing import Optional
from cachetools import TTLCache
@@ -82,3 +83,9 @@ class ServiceDiscovery(Configurable):
return ingress_url
return None
+
+
+class MetricsServiceDiscovery(ServiceDiscovery, ABC):
+ @abstractmethod
+ def find_metrics_url(self, *, api_client: Optional[ApiClient] = None) -> Optional[str]:
+ pass
diff --git a/tests/conftest.py b/tests/conftest.py
index af43124..3b89e88 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -1,6 +1,6 @@
import random
from datetime import datetime, timedelta
-from unittest.mock import AsyncMock, PropertyMock, patch
+from unittest.mock import AsyncMock, patch
import numpy as np
import pytest