Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -229,6 +229,14 @@ tests:
- env: OPENSTACK_CLOUD
resource_type: openstack-vh-mecha-central-quota-slice
workflow: hypershift-openstack-aws-conformance
- as: e2e-openstack-nested-conformance
minimum_interval: 240h
Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

for now let's just use a periodic job that doesn't run that often because it's resource consuming. We can iterate on that later but I think it's fine for now.

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

it'll run every 10 days for now, let me know if you think it's not frequent enough.

steps:
cluster_profile: openstack-vexxhost
env:
TECH_PREVIEW_NO_UPGRADE: "true"
TEST_SUITE: experimental/reliability/minimal
workflow: hypershift-openstack-nested-conformance
zz_generated_metadata:
branch: release-4.19
org: openshift
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1357,6 +1357,89 @@ periodics:
- name: result-aggregator
secret:
secretName: result-aggregator
- agent: kubernetes
cluster: build01
decorate: true
decoration_config:
skip_cloning: true
extra_refs:
- base_ref: release-4.19
org: openshift
repo: hypershift
labels:
ci-operator.openshift.io/cloud: openstack-vexxhost
ci-operator.openshift.io/cloud-cluster-profile: openstack-vexxhost
ci-operator.openshift.io/variant: periodics
ci.openshift.io/generator: prowgen
job-release: "4.19"
pj-rehearse.openshift.io/can-be-rehearsed: "true"
minimum_interval: 240h
name: periodic-ci-openshift-hypershift-release-4.19-periodics-e2e-openstack-nested-conformance
reporter_config:
slack:
channel: '#shiftstack-bot'
job_states_to_report:
- failure
- error
report_template: ':volcano: Job *{{.Spec.Job}}* ended with *{{.Status.State}}*.
<{{.Status.URL}}|View logs> :volcano:'
spec:
containers:
- args:
- --gcs-upload-secret=/secrets/gcs/service-account.json
- --image-import-pull-secret=/etc/pull-secret/.dockerconfigjson
- --lease-server-credentials-file=/etc/boskos/credentials
- --report-credentials-file=/etc/report/credentials
- --secret-dir=/secrets/ci-pull-credentials
- --target=e2e-openstack-nested-conformance
- --variant=periodics
command:
- ci-operator
image: ci-operator:latest
imagePullPolicy: Always
name: ""
resources:
requests:
cpu: 10m
volumeMounts:
- mountPath: /etc/boskos
name: boskos
readOnly: true
- mountPath: /secrets/ci-pull-credentials
name: ci-pull-credentials
readOnly: true
- mountPath: /secrets/gcs
name: gcs-credentials
readOnly: true
- mountPath: /secrets/manifest-tool
name: manifest-tool-local-pusher
readOnly: true
- mountPath: /etc/pull-secret
name: pull-secret
readOnly: true
- mountPath: /etc/report
name: result-aggregator
readOnly: true
serviceAccountName: ci-operator
volumes:
- name: boskos
secret:
items:
- key: credentials
path: credentials
secretName: boskos-credentials
- name: ci-pull-credentials
secret:
secretName: ci-pull-credentials
- name: manifest-tool-local-pusher
secret:
secretName: manifest-tool-local-pusher
- name: pull-secret
secret:
secretName: registry-pull-credentials
- name: result-aggregator
secret:
secretName: result-aggregator
- agent: kubernetes
cluster: build09
cron: 0 8 * * *
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,8 @@ workflow:
SKIP_MONITOR_TEST: "true"
TEST_CSI_DRIVER_MANIFEST: ""
API_FIP_ENABLED: "false"
INGRESS_FIP_ENABLED: "true"
INGRESS_FIP_ENABLED: "false"
HCP_INGRESS_FIP_ENABLED: "true"
pre:
- ref: ipi-install-rbac
- chain: hypershift-setup-nested-management-cluster
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,15 +42,19 @@ if [[ $ENABLE_ICSP == "true" ]]; then
COMMAND+=(--image-content-sources "${SHARED_DIR}/mgmt_icsp.yaml")
fi

if [ -f "${SHARED_DIR}/INGRESS_IP" ]; then
INGRESS_IP=$(<"${SHARED_DIR}/INGRESS_IP")
COMMAND+=(--openstack-ingress-floating-ip "${INGRESS_IP}")
if [ -f "${SHARED_DIR}/HCP_INGRESS_IP" ]; then
HCP_INGRESS_IP=$(<"${SHARED_DIR}/HCP_INGRESS_IP")
COMMAND+=(--openstack-ingress-floating-ip "${HCP_INGRESS_IP}")
fi

if [[ -n $EXTRA_ARGS ]]; then
COMMAND+=("${EXTRA_ARGS}")
fi

if [[ -n ${ETCD_STORAGE_CLASS} ]]; then
COMMAND+=(--etcd-storage-class "${ETCD_STORAGE_CLASS}")
fi

if [[ $HYPERSHIFT_CREATE_CLUSTER_RENDER == "true" ]]; then
"${COMMAND[@]}" --render > "${SHARED_DIR}/hypershift_create_cluster_render.yaml"
exit 0
Expand All @@ -62,23 +66,3 @@ export CLUSTER_NAME
oc wait --timeout=30m --for=condition=Available --namespace=clusters "hostedcluster/${CLUSTER_NAME}"
echo "Cluster became available, creating kubeconfig"
bin/hypershift create kubeconfig --namespace=clusters --name="${CLUSTER_NAME}" > "${SHARED_DIR}/nested_kubeconfig"

# This block is for when we need to discover what floating IP was picked for Ingress.
# We don't need to run this in the case of a pre-defined floating IP.
if [ ! -f "${SHARED_DIR}/INGRESS_IP" ]; then
export KUBECONFIG=${SHARED_DIR}/nested_kubeconfig
timeout 25m bash -c '
echo "Waiting for router-default to have an IP"
until [[ "$(oc -n openshift-ingress get service router-default -o jsonpath="{.status.loadBalancer.ingress[0].ip}")" != "" ]]; do
sleep 15
echo "router-default does not exist yet, retrying..."
done
'
INGRESS_IP=$(oc -n openshift-ingress get service router-default -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
if [[ -z "${INGRESS_IP}" ]]; then
echo "Ingress IP was not found"
exit 1
fi
echo "${INGRESS_IP}" > "${SHARED_DIR}/INGRESS_IP"
echo "Ingress IP was found: ${INGRESS_IP}"
fi
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,9 @@ ref:
- name: ENABLE_ICSP
default: "false"
documentation: "if true, add image content sources config(path=${SHARED_DIR}/mgmt_icsp.yaml)"
- name: ETCD_STORAGE_CLASS
default: ""
documentation: "Name of a CSI storage class where etcd will be stored instead of the default one."
- name: HYPERSHIFT_HC_RELEASE_IMAGE
default: ""
documentation: "Release image used for the HostedCluster. Empty by default it will use release:latest"
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
{
"path": "hypershift/openstack/nested/conformance/hypershift-openstack-nested-conformance-workflow.yaml",
"owners": {
"approvers": [
"openstack-approvers"
],
"reviewers": [
"openstack-reviewers"
]
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
workflow:
as: hypershift-openstack-nested-conformance
documentation: |-
Sets up all the mgmt/infra Shift On Stack cluster prerequisites necessary for hypershift openstack guest clusters.
Then runs openshift e2e tests.
This workflow can be use for regular conformance tests or CSI, you'll need to set
TEST_SUITE and also TEST_CSI_DRIVER_MANIFEST if testing CSI.

Learn more about HyperShift here: https://github.com/openshift/hypershift

Track HyperShift's development here: https://issues.redhat.com/projects/HOSTEDCP
steps:
env:
BASE_DOMAIN: shiftstack.devcluster.openshift.com
CONFIG_TYPE: "minimal"
ETCD_STORAGE_CLASS: "lvms-vg1"
HCP_INGRESS_FIP_ENABLED: "true"
HYPERSHIFT_BASE_DOMAIN: shiftstack.devcluster.openshift.com
HYPERSHIFT_NODE_COUNT: "2"
LVM_CLUSTER_DEVICE_PATH: "/dev/vdb"
LVM_CLUSTER_WIPE_DEVICE: "true"
LVM_OPERATOR_SUB_CHANNEL: stable-4.17
REQUIRED_DEFAULT_STORAGECLASS: "standard-csi"
SKIP_MONITOR_TEST: "true"
TEST_CSI_DRIVER_MANIFEST: ""
WORKER_REPLICAS: 1
pre:
- chain: ipi-openstack-pre
- chain: storage-conf-csi-optional-topolvm
- ref: hypershift-install
- ref: hypershift-openstack-create-hostedcluster
- ref: hypershift-openstack-create-wait
- ref: storage-obj-save
- ref: storage-conf-csi-openstack-cinder
- ref: storage-conf-csi-openstack-manila
test:
- chain: hypershift-conformance
post:
- ref: storage-obj-check
- chain: hypershift-dump
- chain: gather-core-dump
- ref: hypershift-openstack-destroy
- chain: ipi-openstack-post
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ chain:
- ref: openstack-provision-bastionproxy
- ref: openstack-provision-mirror
- ref: openstack-conf-proxy
- ref: openstack-conf-externalnetworkid
- ref: openstack-provision-vips-ports
- ref: openstack-provision-floatingips
- ref: load-balancer
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ chain:
- ref: openstack-provision-bastionproxy
- ref: openstack-provision-mirror
- ref: openstack-conf-proxy
- ref: openstack-conf-externalnetworkid
- ref: openstack-provision-vips-ports
- ref: openstack-provision-floatingips
- ref: load-balancer
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ chain:
- ref: openstack-provision-bastionproxy
- ref: openstack-provision-mirror
- ref: openstack-conf-proxy
- ref: openstack-conf-externalnetworkid
- ref: openstack-provision-vips-ports
- ref: openstack-provision-floatingips
- ref: load-balancer
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,7 @@ TMP_DIR=$(mktemp -d)
if [ -f "${SHARED_DIR}/CLUSTER_NAME" ]; then
CLUSTER_NAME=$(<"${SHARED_DIR}"/CLUSTER_NAME)
else
HASH="$(echo -n "$PROW_JOB_ID"|sha256sum)"
CLUSTER_NAME=${HASH:0:20}
CLUSTER_NAME="$(echo -n "$PROW_JOB_ID"|sha256sum|cut -c-20)"
fi

echo "Getting the hosted zone ID for domain: ${BASE_DOMAIN}"
Expand Down Expand Up @@ -58,6 +57,20 @@ if [ -f "${SHARED_DIR}/INGRESS_IP" ]; then
cp "${TMP_DIR}/dns_ingress.json" "${SHARED_DIR}/dns_up.json"
fi

if [ -f "${SHARED_DIR}/HCP_INGRESS_IP" ]; then
# Hosted Cluster name always depends on the following pattern.
HOSTED_CLUSTER_NAME="$(echo -n "$PROW_JOB_ID"|sha256sum|cut -c-20)"
HCP_INGRESS_IP=$(<"${SHARED_DIR}"/HCP_INGRESS_IP)
if [[ "${INGRESS_IP}" =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
HCP_INGRESS_RECORD_TYPE="A"
else
HCP_INGRESS_RECORD_TYPE="AAAA"
fi
echo "Creating HCP INGRESS DNS $HCP_INGRESS_RECORD_TYPE record for $CLUSTER_NAME.$BASE_DOMAIN"
jq '.Changes += [{"Action": "UPSERT", "ResourceRecordSet": {"Name": "*.apps.'${HOSTED_CLUSTER_NAME}'.'${BASE_DOMAIN}'.", "Type": "'${HCP_INGRESS_RECORD_TYPE}'", "TTL": 300, "ResourceRecords": [{"Value": "'${HCP_INGRESS_IP}'"}]}}]' "${SHARED_DIR}/dns_up.json" > "${TMP_DIR}/dns_hcp_ingress.json"
cp "${TMP_DIR}/dns_hcp_ingress.json" "${SHARED_DIR}/dns_up.json"
fi

if [ -f "${SHARED_DIR}/MIRROR_REGISTRY_IP" ]; then
MIRROR_REGISTRY_IP=$(<"${SHARED_DIR}"/MIRROR_REGISTRY_IP)
if [[ "${MIRROR_REGISTRY_IP}" =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ CLUSTER_NAME="$(<"${SHARED_DIR}/CLUSTER_NAME")"
OPENSTACK_EXTERNAL_NETWORK="${OPENSTACK_EXTERNAL_NETWORK:-$(<"${SHARED_DIR}/OPENSTACK_EXTERNAL_NETWORK")}"

collect_artifacts() {
for f in API_IP INGRESS_IP DELETE_FIPS; do
for f in API_IP INGRESS_IP HCP_INGRESS_IP DELETE_FIPS; do
if [[ -f "${SHARED_DIR}/${f}" ]]; then
cp "${SHARED_DIR}/${f}" "${ARTIFACT_DIR}/"
fi
Expand Down Expand Up @@ -45,3 +45,15 @@ if [[ "${INGRESS_FIP_ENABLED}" == "true" ]]; then
jq -r '.floating_ip_address' <<<"$INGRESS_FIP" > "${SHARED_DIR}/INGRESS_IP"
jq -r '.id' <<<"$INGRESS_FIP" >> "${SHARED_DIR}/DELETE_FIPS"
fi

if [[ "${HCP_INGRESS_FIP_ENABLED}" == "true" ]]; then
echo "Creating floating IP for Hypershift Ingress"
HCP_INGRESS_FIP="$(openstack floating ip create \
--description "${CLUSTER_NAME}.hcp-ingress-fip" \
--tag "PROW_CLUSTER_NAME=${CLUSTER_NAME}" \
--tag "PROW_JOB_ID=${PROW_JOB_ID}" \
"$OPENSTACK_EXTERNAL_NETWORK" \
--format json -c floating_ip_address -c id)"
jq -r '.floating_ip_address' <<<"$HCP_INGRESS_FIP" > "${SHARED_DIR}/HCP_INGRESS_IP"
jq -r '.id' <<<"$HCP_INGRESS_FIP" >> "${SHARED_DIR}/DELETE_FIPS"
fi
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,10 @@ ref:
default: "true"
documentation: |-
Whether to create a floating IP for the Ingress endpoint. Defaults to true.
- name: HCP_INGRESS_FIP_ENABLED
default: "false"
documentation: |-
Whether to create a floating IP for the Hypershift Ingress endpoint. Defaults to false.
documentation: |-
Creates two floating IPs in OPENSTACK_EXTERNAL_NETWORK with the description
set to "${SHARED_DIR/CLUSTER_NAME".api-fip and
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,9 @@ spec:
deviceClasses:
- name: vg1
deviceSelector:
paths:
- ${LVM_CLUSTER_DEVICE_PATH}
forceWipeDevicesAndDestroyAllData: ${LVM_CLUSTER_WIPE_DEVICE}
paths:
- ${LVM_CLUSTER_DEVICE_PATH}
default: true
thinPoolConfig:
name: thin-pool-1
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,10 @@ ref:
default: "/dev/vda"
documentation: |-
The deviceSelector path of the lvmcluster.
- name: LVM_CLUSTER_WIPE_DEVICE
default: "false"
documentation: |-
Whether or not the devices needs data wipe.
documentation: |-
The storage-create-lvm-cluster step creates lvmcluster and
waiting for it become ready to use.