From c9ccb28be880cf97ee4e6c4a1aaf8ef89acba5a8 Mon Sep 17 00:00:00 2001 From: Michael Aspinwall Date: Thu, 26 Jun 2025 18:09:15 +0000 Subject: [PATCH] test reapply of pods for the same resource claim --- examples/repeatresourceclaimtemplate.yaml | 64 +++++++++++++++++++++++ tests/e2e.bats | 40 ++++++++++++++ 2 files changed, 104 insertions(+) create mode 100644 examples/repeatresourceclaimtemplate.yaml diff --git a/examples/repeatresourceclaimtemplate.yaml b/examples/repeatresourceclaimtemplate.yaml new file mode 100644 index 00000000..a2209d10 --- /dev/null +++ b/examples/repeatresourceclaimtemplate.yaml @@ -0,0 +1,64 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +apiVersion: resource.k8s.io/v1beta1 +kind: DeviceClass +metadata: + name: multinic +spec: + selectors: + - cel: + expression: device.driver == "dra.net" +--- +apiVersion: resource.k8s.io/v1beta1 +kind: ResourceClaimTemplate +metadata: + name: reapply-interfaces-template +spec: + spec: + devices: + requests: + - name: reapply-interfaces-template + deviceClassName: multinic + selectors: + - cel: + expression: device.attributes["dra.net"].ifName == "dummy8" +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: server-deployment-reapply + labels: + app: reapplyApp +spec: + replicas: 1 + selector: + matchLabels: + app: reapplyApp + template: + metadata: + labels: + app: reapplyApp + spec: + resourceClaims: + - name: reapply-interfaces + resourceClaimTemplateName: reapply-interfaces-template + containers: + - name: agnhost + image: registry.k8s.io/e2e-test-images/agnhost:2.54 + args: + - netexec + - --http-port=80 + ports: + - containerPort: 80 diff --git a/tests/e2e.bats b/tests/e2e.bats index 5d8ad27a..f472cb05 100644 --- a/tests/e2e.bats +++ b/tests/e2e.bats @@ -255,3 +255,43 @@ load 'test_helper/bats-assert/load' kubectl delete -f "$BATS_TEST_DIRNAME"/../examples/deviceclass.yaml kubectl delete -f "$BATS_TEST_DIRNAME"/../examples/resourceclaimtemplate_double.yaml } + +@test "reapply pod with dummy resource claim" { + docker exec "$CLUSTER_NAME"-worker bash -c "ip link add dummy8 type dummy" + docker exec "$CLUSTER_NAME"-worker bash -c "ip link set up dummy8" + docker exec "$CLUSTER_NAME"-worker bash -c "ip addr add 169.254.169.14/32 dev dummy8" + + # Apply the resource claim template and deployment + kubectl apply -f "$BATS_TEST_DIRNAME"/../examples/repeatresourceclaimtemplate.yaml + kubectl wait --timeout=30s --for=condition=ready pods -l app=reapplyApp + POD_NAME=$(kubectl get pods -l app=reapplyApp -o name) + run kubectl exec $POD_NAME -- ip addr show dummy8 + assert_success + assert_output --partial "169.254.169.14" + # TODO list the specific resourceclaim and the networkdata + run kubectl get resourceclaims -o yaml + assert_success + assert_output --partial "169.254.169.14" + + # Delete the deployment and wait for the resource claims to be removed + kubectl delete deployment/server-deployment-reapply --wait --timeout=30s + kubectl wait --for delete pod -l app=reapplyApp + + # Reapply the IP, dummy devices do not have the ability to reclaim the IP + # when moved back into host NS. + docker exec "$CLUSTER_NAME"-worker bash -c "ip addr add 169.254.169.14/32 dev dummy8" + + # Reapply the deployment, should reclaim the device + kubectl apply -f "$BATS_TEST_DIRNAME"/../examples/repeatresourceclaimtemplate.yaml + kubectl wait --timeout=30s --for=condition=ready pods -l app=reapplyApp + POD_NAME=$(kubectl get pods -l app=reapplyApp -o name) + run kubectl exec $POD_NAME -- ip addr show dummy8 + assert_success + assert_output --partial "169.254.169.14" + # TODO list the specific resourceclaim and the networkdata + run kubectl get resourceclaims -o yaml + assert_success + assert_output --partial "169.254.169.14" + + kubectl delete -f "$BATS_TEST_DIRNAME"/../examples/repeatresourceclaimtemplate.yaml +}