From d78e1d88f2254145076e7a627260c044f300be92 Mon Sep 17 00:00:00 2001 From: Siegfried Weber Date: Thu, 5 Aug 2021 12:12:54 +0200 Subject: [PATCH 1/2] Add test cases for service restarts --- Cargo.lock | 24 ++++++ Cargo.toml | 2 + tests/environment.rs | 3 +- tests/repository.rs | 3 +- tests/restart.rs | 163 +++++++++++++++++++++++++++++++++++++ tests/util/services.rs | 17 ++-- tests/util/test_package.rs | 63 ++++++++------ 7 files changed, 237 insertions(+), 38 deletions(-) create mode 100644 tests/restart.rs diff --git a/Cargo.lock b/Cargo.lock index 5333494..04fa8ee 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -19,8 +19,10 @@ dependencies = [ "integration-test-commons", "k8s-openapi", "nix", + "rstest", "serde", "serde_json", + "serde_yaml", "sha2", "tar", "tokio", @@ -1527,12 +1529,34 @@ dependencies = [ "winapi", ] +[[package]] +name = "rstest" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2288c66aeafe3b2ed227c981f364f9968fa952ef0b30e84ada4486e7ee24d00a" +dependencies = [ + "cfg-if", + "proc-macro2", + "quote", + "rustc_version", + "syn", +] + [[package]] name = "rustc-serialize" version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcf128d1287d2ea9d80910b5f1120d0b8eede3fbf1abe91c40d39ea7d51e6fda" +[[package]] +name = "rustc_version" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +dependencies = [ + "semver", +] + [[package]] name = "ryu" version = "1.0.5" diff --git a/Cargo.toml b/Cargo.toml index a778588..3967d48 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,8 +14,10 @@ http = "0.2" integration-test-commons = { git = "https://github.com/stackabletech/integration-test-commons.git", tag = "0.3.0" } k8s-openapi = { version = "0.12", default-features = false, features = ["v1_21"] } nix = "0.22" +rstest = "0.11" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" +serde_yaml = "0.8" sha2 = "0.9" tar = "0.4" tokio = { version = "1.10", features = ["macros", "rt-multi-thread"] } diff --git a/tests/environment.rs b/tests/environment.rs index 5742428..35c06a2 100644 --- a/tests/environment.rs +++ b/tests/environment.rs @@ -39,8 +39,9 @@ async fn kubeconfig_should_be_set() -> Result<()> { let repository_result = StackableRepositoryInstance::new(&repository, &client).await; result.combine(&repository_result); + let pod_definition = job.pod("agent-service-integration-test-kubeconfig"); let pod_result = client - .create(&job.pod_spec("agent-service-integration-test-kubeconfig")) + .create(&serde_yaml::to_string(&pod_definition).unwrap()) .await; result.combine(&pod_result); diff --git a/tests/repository.rs b/tests/repository.rs index bd2944a..554946d 100644 --- a/tests/repository.rs +++ b/tests/repository.rs @@ -73,8 +73,9 @@ async fn invalid_or_unreachable_repositories_should_be_ignored() -> Result<()> { StackableRepositoryInstance::new(&repository_with_service, &client).await; result.combine(&repository3_result); + let pod_definition = service.pod("agent-service-integration-test-repository"); let pod_result = client - .create::(&service.pod_spec("agent-service-integration-test-repository")) + .create::(&serde_yaml::to_string(&pod_definition).unwrap()) .await; result.combine(&pod_result); diff --git a/tests/restart.rs b/tests/restart.rs new file mode 100644 index 0000000..f949309 --- /dev/null +++ b/tests/restart.rs @@ -0,0 +1,163 @@ +mod util; + +use anyhow::Result; +use integration_test_commons::test::prelude::*; +use rstest::rstest; +use util::{ + repository::{StackableRepository, StackableRepositoryInstance}, + result::TestResult, + services::exit_service, +}; +use uuid::Uuid; + +#[rstest] +#[case::failing_service_should_be_restarted_on_restart_policy_always( + "failing_service", + "Always", + "expect_restart" +)] +#[case::failing_service_should_be_restarted_on_restart_policy_onfailure( + "failing_service", + "OnFailure", + "expect_restart" +)] +#[case::failing_service_should_not_be_restarted_on_restart_policy_never( + "failing_service", + "Never", + "expect_no_restart" +)] +#[case::succeeding_service_should_be_restarted_on_restart_policy_always( + "succeeding_service", + "Always", + "expect_restart" +)] +#[case::succeeding_service_should_not_be_restarted_on_restart_policy_onfailure( + "succeeding_service", + "OnFailure", + "expect_no_restart" +)] +#[case::succeeding_service_should_not_be_restarted_on_restart_policy_never( + "succeeding_service", + "Never", + "expect_no_restart" +)] +#[tokio::test] +async fn service_should_be_restarted_according_to_the_restart_policy( + #[case] service: &str, + #[case] restart_policy: &str, + #[case] expected_behavior: &str, +) -> Result<()> { + let client = KubeClient::new().await?; + let mut result = TestResult::default(); + + let (repository_result, pod_result) = set_up( + &client, + &mut result, + match service { + "succeeding_service" => true, + "failing_service" => false, + other => panic!("invalid parameter: {}", other), + }, + restart_policy, + ) + .await; + + match expected_behavior { + "expect_restart" => verify_restart(&client, &mut result, &pod_result).await, + "expect_no_restart" => verify_no_restart(&client, &mut result, &pod_result).await, + other => panic!("invalid parameter: {}", other), + } + + tear_down(&client, &mut result, repository_result, pod_result).await; + + result.into() +} + +async fn set_up( + client: &KubeClient, + result: &mut TestResult, + succeeding: bool, + restart_policy: &str, +) -> (Result, Result) { + let service = exit_service(if succeeding { 0 } else { 1 }); + + let repository = StackableRepository { + name: format!("restart-test-repository-{}", Uuid::new_v4()), + packages: vec![service.to_owned()], + }; + let repository_result = StackableRepositoryInstance::new(&repository, client).await; + result.combine(&repository_result); + + let mut pod_definition = service.pod(&format!( + "agent-service-integration-test-restart-{}", + Uuid::new_v4() + )); + pod_definition + .spec + .get_or_insert_with(Default::default) + .restart_policy + .replace(String::from(restart_policy)); + + let pod_result = client + .create(&serde_yaml::to_string(&pod_definition).unwrap()) + .await; + result.combine(&pod_result); + + (repository_result, pod_result) +} + +async fn tear_down( + client: &KubeClient, + result: &mut TestResult, + repository_result: Result, + pod_result: Result, +) { + if let Ok(pod) = pod_result { + let deletion_result = client.delete(pod).await; + result.combine(&deletion_result); + } + if let Ok(repository) = repository_result { + let close_result = repository.close(client).await; + result.combine(&close_result); + } +} + +async fn verify_restart(client: &KubeClient, result: &mut TestResult, pod_result: &Result) { + if let Ok(pod) = &pod_result { + let verify_status_result = client + .verify_status(pod, |pod| { + pod.status + .as_ref() + .and_then(|status| status.container_statuses.first()) + .filter(|container_status| container_status.restart_count > 3) + .is_some() + }) + .await; + result.combine(&verify_status_result); + } +} + +async fn verify_no_restart(client: &KubeClient, result: &mut TestResult, pod_result: &Result) { + if let Ok(pod) = &pod_result { + let verify_status_result = client + .verify_status(pod, |pod| { + let phase = pod.status.as_ref().and_then(|status| status.phase.as_ref()); + phase == Some(&String::from("Succeeded")) || phase == Some(&String::from("Failed")) + }) + .await; + result.combine(&verify_status_result); + + let get_status_result = client.get_status(pod).await; + result.combine(&get_status_result); + + if let Ok(pod) = get_status_result { + let restart_count_result = pod + .status + .as_ref() + .and_then(|status| status.container_statuses.first()) + .filter(|container_status| container_status.restart_count == 0) + .ok_or("Restart count is not 0."); + result.combine(&restart_count_result); + } + } +} diff --git a/tests/util/services.rs b/tests/util/services.rs index 74a97de..f46c72c 100644 --- a/tests/util/services.rs +++ b/tests/util/services.rs @@ -47,22 +47,21 @@ pub fn echo_service() -> TestPackage { } } -/// The exit-service terminates immediately with the exit code contained -/// in the environment variable `EXIT_CODE`. If the environment variable -/// is not set then the exit code is 0. +/// The exit-service terminates immediately with the given exit code. #[allow(dead_code)] -pub fn exit_service() -> TestPackage { +pub fn exit_service(exit_code: i8) -> TestPackage { TestPackage { - name: String::from("exit-service"), + name: format!("exit-service-{}", exit_code), version: String::from("1.0.0"), job: true, - script: String::from(indoc!( + script: formatdoc!( " #!/bin/sh - exit ${EXIT_CODE:-0} - " - )), + exit {} + ", + exit_code + ), } } diff --git a/tests/util/test_package.rs b/tests/util/test_package.rs index 1d23fff..7ab2e20 100644 --- a/tests/util/test_package.rs +++ b/tests/util/test_package.rs @@ -1,4 +1,8 @@ +use std::collections::BTreeMap; + use flate2::{write::GzEncoder, Compression}; +use integration_test_commons::test::prelude::{Container, Pod, PodSpec, Toleration}; +use k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta; /// Package with a shell script used for testing #[derive(Clone, Debug)] @@ -43,32 +47,37 @@ impl TestPackage { } /// Creates a pod specification for this package - pub fn pod_spec(&self, pod_name: &str) -> String { - format!( - " - apiVersion: v1 - kind: Pod - metadata: - name: {pod_name} - spec: - containers: - - name: {package_name} - image: {package_name}:{package_version} - command: - - {command} - nodeSelector: - kubernetes.io/arch: stackable-linux - restartPolicy: {restart_policy} - tolerations: - - key: kubernetes.io/arch - operator: Equal - value: stackable-linux - ", - command = self.command(), - package_name = self.name, - package_version = self.version, - pod_name = pod_name, - restart_policy = if self.job { "Never" } else { "Always" }, - ) + pub fn pod(&self, pod_name: &str) -> Pod { + Pod { + metadata: ObjectMeta { + name: Some(String::from(pod_name)), + ..Default::default() + }, + spec: Some(PodSpec { + containers: vec![Container { + name: self.name.to_owned(), + image: Some(format!("{}:{}", self.name, self.version)), + command: vec![self.command()], + ..Default::default() + }], + node_selector: { + let mut selectors = BTreeMap::new(); + selectors.insert( + String::from("kubernetes.io/arch"), + String::from("stackable-linux"), + ); + selectors + }, + restart_policy: Some(String::from(if self.job { "Never" } else { "Always" })), + tolerations: vec![Toleration { + key: Some(String::from("kubernetes.io/arch")), + operator: Some(String::from("Equal")), + value: Some(String::from("stackable-linux")), + ..Default::default() + }], + ..Default::default() + }), + ..Default::default() + } } } From 10e4bf00760dda12df57abf6818f78553763f1d2 Mon Sep 17 00:00:00 2001 From: Siegfried Weber Date: Thu, 12 Aug 2021 16:49:41 +0200 Subject: [PATCH 2/2] Increase timeouts --- tests/service.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tests/service.rs b/tests/service.rs index 4319307..2c5d7e9 100644 --- a/tests/service.rs +++ b/tests/service.rs @@ -4,7 +4,8 @@ use std::{fmt, time::Duration}; #[test] fn service_should_be_started_successfully() { - let client = TestKubeClient::new(); + let mut client = TestKubeClient::new(); + client.timeouts().delete = Duration::from_secs(60); setup_repository(&client); @@ -37,7 +38,8 @@ fn service_should_be_started_successfully() { #[test] fn host_ip_and_node_ip_should_be_set() { - let client = TestKubeClient::new(); + let mut client = TestKubeClient::new(); + client.timeouts().delete = Duration::from_secs(60); setup_repository(&client); @@ -89,7 +91,7 @@ fn restart_after_ungraceful_shutdown_should_succeed() { let mut client = TestKubeClient::new(); // delete must await the end of the termination grace period - client.timeouts().delete += termination_grace_period; + client.timeouts().delete = Duration::from_secs(60) + termination_grace_period; setup_repository(&client);