Skip to content

Commit

Permalink
squish (#46)
Browse files Browse the repository at this point in the history
  • Loading branch information
PietroPasotti authored Sep 11, 2024
1 parent c4740fc commit aad960e
Show file tree
Hide file tree
Showing 5 changed files with 377 additions and 5 deletions.
12 changes: 12 additions & 0 deletions tests/test_coordinated_workers/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
from pathlib import Path
from typing import Generator
from unittest.mock import patch

import pytest


@pytest.fixture(autouse=True)
def root_ca_cert(tmp_path: Path) -> Generator[Path, None, None]:
# Prevent the charm's _update_tls_certificates method to try and write our local filesystem
with patch("src.cosl.coordinated_workers.worker.ROOT_CA_CERT", new=tmp_path / "rootcacert"):
yield tmp_path / "rootcacert"
175 changes: 175 additions & 0 deletions tests/test_coordinated_workers/test_coordinator.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,175 @@
import ops
import pytest
from ops import Framework
from scenario import Container, Context, Relation, State

from src.cosl.coordinated_workers.coordinator import (
ClusterRolesConfig,
Coordinator,
S3NotFoundError,
)
from src.cosl.coordinated_workers.interface import ClusterRequirerAppData


@pytest.fixture
def coordinator_state():
requires_relations = {
endpoint: Relation(endpoint=endpoint, interface=interface["interface"])
for endpoint, interface in {
"my-certificates": {"interface": "certificates"},
"my-logging": {"interface": "loki_push_api"},
"my-tracing": {"interface": "tracing"},
}.items()
}
requires_relations["my-s3"] = Relation(
"my-s3",
interface="s3",
remote_app_data={
"endpoint": "s3",
"bucket": "foo-bucket",
"access-key": "my-access-key",
"secret-key": "my-secret-key",
},
)
requires_relations["cluster_worker0"] = Relation(
"my-cluster",
remote_app_name="worker0",
remote_app_data=ClusterRequirerAppData(role="read").dump(),
)
requires_relations["cluster_worker1"] = Relation(
"my-cluster",
remote_app_name="worker1",
remote_app_data=ClusterRequirerAppData(role="write").dump(),
)
requires_relations["cluster_worker2"] = Relation(
"my-cluster",
remote_app_name="worker2",
remote_app_data=ClusterRequirerAppData(role="backend").dump(),
)

provides_relations = {
endpoint: Relation(endpoint=endpoint, interface=interface["interface"])
for endpoint, interface in {
"my-dashboards": {"interface": "grafana_dashboard"},
"my-metrics": {"interface": "prometheus_scrape"},
}.items()
}

return State(
containers=[
Container("nginx", can_connect=True),
Container("nginx-prometheus-exporter", can_connect=True),
],
relations=list(requires_relations.values()) + list(provides_relations.values()),
)


@pytest.fixture()
def coordinator_charm(request):
class MyCoordinator(ops.CharmBase):
META = {
"name": "foo-app",
"requires": {
"my-certificates": {"interface": "certificates"},
"my-cluster": {"interface": "cluster"},
"my-logging": {"interface": "loki_push_api"},
"my-tracing": {"interface": "tracing"},
"my-s3": {"interface": "s3"},
},
"provides": {
"my-dashboards": {"interface": "grafana_dashboard"},
"my-metrics": {"interface": "prometheus_scrape"},
},
"containers": {
"nginx": {"type": "oci-image"},
"nginx-prometheus-exporter": {"type": "oci-image"},
},
}

def __init__(self, framework: Framework):
super().__init__(framework)
# Note: Here it is a good idea not to use context mgr because it is "ops aware"
self.coordinator = Coordinator(
charm=self,
# Roles were take from loki-coordinator-k8s-operator
roles_config=ClusterRolesConfig(
roles={"all", "read", "write", "backend"},
meta_roles={"all": {"all", "read", "write", "backend"}},
minimal_deployment={
"read",
"write",
"backend",
},
recommended_deployment={
"read": 3,
"write": 3,
"backend": 3,
},
),
external_url="https://foo.example.com",
worker_metrics_port=123,
endpoints={
"certificates": "my-certificates",
"cluster": "my-cluster",
"grafana-dashboards": "my-dashboards",
"logging": "my-logging",
"metrics": "my-metrics",
"tracing": "my-tracing",
"s3": "my-s3",
},
nginx_config=lambda coordinator: f"nginx configuration for {coordinator.name}",
workers_config=lambda coordinator: f"workers configuration for {coordinator.name}",
# nginx_options: Optional[NginxMappingOverrides] = None,
# is_coherent: Optional[Callable[[ClusterProvider, ClusterRolesConfig], bool]] = None,
# is_recommended: Optional[Callable[[ClusterProvider, ClusterRolesConfig], bool]] = None,
# tracing_receivers: Optional[Callable[[], Optional[Dict[str, str]]]] = None,
)

return MyCoordinator


def test_worker_roles_subset_of_minimal_deployment(
coordinator_state: State, coordinator_charm: ops.CharmBase
):
# Test that the combination of worker roles is a subset of the minimal deployment roles

# GIVEN a coordinator_charm
ctx = Context(coordinator_charm, meta=coordinator_charm.META)

# AND a coordinator_state defining relations to worker charms with incomplete distributed roles
missing_backend_worker_relation = [
relation
for relation in coordinator_state.relations
if relation.remote_app_name != "worker2"
]

# WHEN we process any event
with ctx.manager(
"update-status",
state=coordinator_state.replace(relations=missing_backend_worker_relation),
) as mgr:
charm: coordinator_charm = mgr.charm

# THEN the deployment is coherent
assert not charm.coordinator.is_coherent


def test_without_s3_integration_raises_error(
coordinator_state: State, coordinator_charm: ops.CharmBase
):
# Test that a charm without an s3 integration raises S3NotFoundError

# GIVEN a coordinator charm without an s3 integration
ctx = Context(coordinator_charm, meta=coordinator_charm.META)
relations_without_s3 = [
relation for relation in coordinator_state.relations if relation.endpoint != "my-s3"
]

# WHEN we process any event
with ctx.manager(
"update-status",
state=coordinator_state.replace(relations=relations_without_s3),
) as mgr:
# THEN the _s3_config method raises and S3NotFoundError
with pytest.raises(S3NotFoundError):
mgr.charm.coordinator._s3_config
133 changes: 133 additions & 0 deletions tests/test_coordinated_workers/test_nginx.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,133 @@
import logging
import tempfile

import pytest
from ops import CharmBase
from scenario import Container, Context, ExecOutput, Mount, State

from src.cosl.coordinated_workers.nginx import (
CA_CERT_PATH,
CERT_PATH,
KEY_PATH,
NGINX_CONFIG,
Nginx,
)

logger = logging.getLogger(__name__)


@pytest.fixture
def certificate_mounts():
temp_files = {}
for path in {KEY_PATH, CERT_PATH, CA_CERT_PATH}:
temp_file = tempfile.NamedTemporaryFile(delete=False)
temp_files[path] = temp_file

mounts = {}
for cert_path, temp_file in temp_files.items():
mounts[cert_path] = Mount(cert_path, temp_file.name)

# TODO: Do we need to clean up the temp files since delete=False was set?
return mounts


@pytest.fixture
def nginx_context():
return Context(CharmBase, meta={"name": "foo", "containers": {"nginx": {"type": "oci-image"}}})


def test_certs_on_disk(certificate_mounts: dict, nginx_context: Context):
# GIVEN any charm with a container
ctx = nginx_context

# WHEN we process any event
with ctx.manager(
"update-status",
state=State(containers=[Container("nginx", can_connect=True, mounts=certificate_mounts)]),
) as mgr:
charm = mgr.charm
nginx = Nginx(charm, lambda: "foo_string", None)

# THEN the certs exist on disk
assert nginx.are_certificates_on_disk


def test_certs_deleted(certificate_mounts: dict, nginx_context: Context):
# Test deleting the certificates.

# GIVEN any charm with a container
ctx = nginx_context

# WHEN we process any event
with ctx.manager(
"update-status",
state=State(containers=[Container("nginx", can_connect=True, mounts=certificate_mounts)]),
) as mgr:
charm = mgr.charm
nginx = Nginx(charm, lambda: "foo_string", None)

# AND when we call delete_certificates
nginx.delete_certificates()

# THEN the certs get deleted from disk
assert not nginx.are_certificates_on_disk


def test_reload_calls_nginx_binary_successfully(nginx_context: Context):
# Test that the reload method calls the nginx binary without error.

# GIVEN any charm with a container
ctx = nginx_context

# WHEN we process any event
with ctx.manager(
"update-status",
state=State(
containers=[
Container(
"nginx",
can_connect=True,
exec_mock={("nginx", "-s", "reload"): ExecOutput(return_code=0)},
)
]
),
) as mgr:
charm = mgr.charm
nginx = Nginx(charm, lambda: "foo_string", None)

# AND when we call reload
# THEN the nginx binary is used rather than container restart
assert nginx.reload() is None


def test_has_config_changed(nginx_context: Context):
# Test changing the nginx config and catching the change.

# GIVEN any charm with a container and a nginx config file
test_config = tempfile.NamedTemporaryFile(delete=False, mode="w+")
ctx = nginx_context
# AND when we write to the config file
with open(test_config.name, "w") as f:
f.write("foo")

# WHEN we process any event
with ctx.manager(
"update-status",
state=State(
containers=[
Container(
"nginx",
can_connect=True,
mounts={"config": Mount(NGINX_CONFIG, test_config.name)},
)
]
),
) as mgr:
charm = mgr.charm
nginx = Nginx(charm, lambda: "foo_string", None)

# AND a unique config is added
new_config = "bar"

# THEN the _has_config_changed method correctly determines that foo != bar
assert nginx._has_config_changed(new_config)
55 changes: 55 additions & 0 deletions tests/test_coordinated_workers/test_roles_config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
import pytest

from src.cosl.coordinated_workers.coordinator import ClusterRolesConfig, ClusterRolesConfigError


def test_meta_role_keys_not_in_roles():
"""Meta roles keys must be a subset of roles."""
# WHEN `meta_roles` has a key that is not specified in `roles`
# THEN instantiation raises a ClusterRolesConfigError
with pytest.raises(ClusterRolesConfigError):
ClusterRolesConfig(
roles={"read"},
meta_roles={"I AM NOT A SUBSET OF ROLES": {"read"}},
minimal_deployment={"read"},
recommended_deployment={"read": 3},
)


def test_meta_role_values_not_in_roles():
"""Meta roles values must be a subset of roles."""
# WHEN `meta_roles` has a value that is not specified in `roles`
# THEN instantiation raises a ClusterRolesConfigError
with pytest.raises(ClusterRolesConfigError):
ClusterRolesConfig(
roles={"read"},
meta_roles={"read": {"I AM NOT A SUBSET OF ROLES"}},
minimal_deployment={"read"},
recommended_deployment={"read": 3},
)


def test_minimal_deployment_roles_not_in_roles():
"""Minimal deployment roles must be a subset of roles."""
# WHEN `minimal_deployment` has a value that is not specified in `roles`
# THEN instantiation raises a ClusterRolesConfigError
with pytest.raises(ClusterRolesConfigError):
ClusterRolesConfig(
roles={"read"},
meta_roles={"read": {"read"}},
minimal_deployment={"I AM NOT A SUBSET OF ROLES"},
recommended_deployment={"read": 3},
)


def test_recommended_deployment_roles_not_in_roles():
"""Recommended deployment roles must be a subset of roles."""
# WHEN `recommended_deployment` has a value that is not specified in `roles`
# THEN instantiation raises a ClusterRolesConfigError
with pytest.raises(ClusterRolesConfigError):
ClusterRolesConfig(
roles={"read"},
meta_roles={"read": {"read"}},
minimal_deployment={"read"},
recommended_deployment={"I AM NOT A SUBSET OF ROLES": 3},
)
7 changes: 2 additions & 5 deletions tox.ini
Original file line number Diff line number Diff line change
Expand Up @@ -73,10 +73,10 @@ deps =
deepdiff
fs
pytest
pytest-cov
ops
PyYAML
typing_extensions
coverage[toml]
ops-scenario<7.0.0
cryptography
jsonschema
Expand All @@ -90,7 +90,4 @@ setenv =
commands =
python -m doctest {[vars]src_path}/cosl/mandatory_relation_pairs.py
/usr/bin/env sh -c 'stat cos-tool-amd64 > /dev/null 2>&1 || curl -L -O https://github.com/canonical/cos-tool/releases/latest/download/cos-tool-amd64'
coverage run \
--source={toxinidir} \
-m pytest -v --tb native --log-cli-level=INFO -s {posargs} {[vars]tst_path}
coverage report
pytest --cov-report=html:.cover --cov {[vars]src_path} -v --tb native --log-cli-level=INFO -s {posargs} {[vars]tst_path}

0 comments on commit aad960e

Please sign in to comment.