diff --git a/ci-operator/step-registry/upi/conf/gcp/upi-conf-gcp-commands.sh b/ci-operator/step-registry/upi/conf/gcp/upi-conf-gcp-commands.sh index e698ba17300be..02331540747b9 100755 --- a/ci-operator/step-registry/upi/conf/gcp/upi-conf-gcp-commands.sh +++ b/ci-operator/step-registry/upi/conf/gcp/upi-conf-gcp-commands.sh @@ -12,16 +12,16 @@ if [[ -z "$RELEASE_IMAGE_LATEST" ]]; then echo "RELEASE_IMAGE_LATEST is an empty string, exiting" exit 1 fi -export OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE=${RELEASE_IMAGE_LATEST} +export OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE="${RELEASE_IMAGE_LATEST}" # Ensure ignition assets are configured with the correct invoker to track CI jobs. -export OPENSHIFT_INSTALL_INVOKER=openshift-internal-ci/${JOB_NAME_SAFE}/${BUILD_ID} +export OPENSHIFT_INSTALL_INVOKER="openshift-internal-ci/${JOB_NAME_SAFE}/${BUILD_ID}" -export GOOGLE_CLOUD_KEYFILE_JSON=${CLUSTER_PROFILE_DIR}/gce.json +export GOOGLE_CLOUD_KEYFILE_JSON="${CLUSTER_PROFILE_DIR}/gce.json" dir=/tmp/installer -mkdir "${dir}/" -pushd ${dir} +mkdir "${dir}" +pushd "${dir}" cp -t "${dir}" \ "${SHARED_DIR}/install-config.yaml" @@ -29,12 +29,12 @@ cp -t "${dir}" \ if [[ -s "${SHARED_DIR}/xpn.json" ]]; then echo "Reading variables from ${SHARED_DIR}/xpn.json..." IS_XPN=1 - HOST_PROJECT=$(jq -r '.hostProject' ${SHARED_DIR}/xpn.json) - HOST_PROJECT_NETWORK=$(jq -r '.clusterNetwork' ${SHARED_DIR}/xpn.json) - HOST_PROJECT_COMPUTE_SUBNET=$(jq -r '.computeSubnet' ${SHARED_DIR}/xpn.json) + HOST_PROJECT="$(jq -r '.hostProject' "${SHARED_DIR}/xpn.json")" + HOST_PROJECT_NETWORK="$(jq -r '.clusterNetwork' "${SHARED_DIR}/xpn.json")" + HOST_PROJECT_COMPUTE_SUBNET="$(jq -r '.computeSubnet' "${SHARED_DIR}/xpn.json")" - HOST_PROJECT_NETWORK_NAME=$(basename ${HOST_PROJECT_NETWORK}) - HOST_PROJECT_COMPUTE_SUBNET_NAME=$(basename ${HOST_PROJECT_COMPUTE_SUBNET}) + HOST_PROJECT_NETWORK_NAME="$(basename "${HOST_PROJECT_NETWORK}")" + HOST_PROJECT_COMPUTE_SUBNET_NAME="$(basename "${HOST_PROJECT_COMPUTE_SUBNET}")" fi ### Empty the compute pool (optional) diff --git a/ci-operator/step-registry/upi/deprovision/gcp/upi-deprovision-gcp-commands.sh b/ci-operator/step-registry/upi/deprovision/gcp/upi-deprovision-gcp-commands.sh index fa90c2a316de5..bf3364d8003cb 100755 --- a/ci-operator/step-registry/upi/deprovision/gcp/upi-deprovision-gcp-commands.sh +++ b/ci-operator/step-registry/upi/deprovision/gcp/upi-deprovision-gcp-commands.sh @@ -10,62 +10,62 @@ export HOME=/tmp export GOOGLE_CLOUD_KEYFILE_JSON="${CLUSTER_PROFILE_DIR}/gce.json" gcloud auth activate-service-account --key-file="${GOOGLE_CLOUD_KEYFILE_JSON}" -gcloud config set project "$(jq -r .gcp.projectID ${SHARED_DIR}/metadata.json)" +gcloud config set project "$(jq -r .gcp.projectID "${SHARED_DIR}/metadata.json")" dir=/tmp/installer -mkdir -p "${dir}/" -pushd ${dir} +mkdir -p "${dir}" +pushd "${dir}" if [[ ! -s "${SHARED_DIR}/metadata.json" ]]; then echo "Skipping: ${SHARED_DIR}/metadata.json not found." exit fi BASE_DOMAIN='origin-ci-int-gce.dev.openshift.com' -CLUSTER_NAME=$(jq -r .clusterName ${SHARED_DIR}/metadata.json) -INFRA_ID=$(jq -r .infraID ${SHARED_DIR}/metadata.json) +CLUSTER_NAME="$(jq -r .clusterName "${SHARED_DIR}/metadata.json")" +INFRA_ID="$(jq -r .infraID "${SHARED_DIR}/metadata.json")" ### Read XPN config, if exists if [[ -s "${SHARED_DIR}/xpn.json" ]]; then echo "Reading variables from ${SHARED_DIR}/xpn.json..." IS_XPN=1 - HOST_PROJECT=$(jq -r '.hostProject' ${SHARED_DIR}/xpn.json) - HOST_PROJECT_PRIVATE_ZONE_NAME=$(jq -r '.privateZoneName' "${SHARED_DIR}/xpn.json") + HOST_PROJECT="$(jq -r '.hostProject' "${SHARED_DIR}/xpn.json")" + HOST_PROJECT_PRIVATE_ZONE_NAME="$(jq -r '.privateZoneName' "${SHARED_DIR}/xpn.json")" fi # Delete the bootstrap deployment, but expect it to error. echo "$(date -u --rfc-3339=seconds) - Deleting bootstrap deployment (errors when bootstrap-complete)..." set +e -gcloud deployment-manager deployments delete -q ${INFRA_ID}-bootstrap +gcloud deployment-manager deployments delete -q "${INFRA_ID}-bootstrap" set -e # Delete the deployments that should always exist. echo "$(date -u --rfc-3339=seconds) - Deleting worker, control-plane, and infra deployments..." -gcloud deployment-manager deployments delete -q ${INFRA_ID}-{worker,control-plane,infra} +gcloud deployment-manager deployments delete -q "${INFRA_ID}"-{worker,control-plane,infra} # Only delete these deployments when they are expected to exist. if [[ ! -v IS_XPN ]]; then echo "$(date -u --rfc-3339=seconds) - Deleting security and vpc deployments..." - gcloud deployment-manager deployments delete -q ${INFRA_ID}-{security,vpc} + gcloud deployment-manager deployments delete -q "${INFRA_ID}"-{security,vpc} fi # Delete XPN DNS entries if [[ -v IS_XPN ]]; then set +e if [ -f transaction.yaml ]; then rm transaction.yaml; fi - gcloud --project="${HOST_PROJECT}" dns record-sets transaction start --zone ${HOST_PROJECT_PRIVATE_ZONE_NAME} + gcloud --project="${HOST_PROJECT}" dns record-sets transaction start --zone "${HOST_PROJECT_PRIVATE_ZONE_NAME}" while read -r line; do - DNSNAME=$(echo $line | jq -r '.name') - DNSTTL=$(echo $line | jq -r '.ttl') - DNSTYPE=$(echo $line | jq -r '.type') - DNSDATA=$(echo $line | jq -r '.rrdatas[]') - gcloud --project="${HOST_PROJECT}" dns record-sets transaction remove --zone ${HOST_PROJECT_PRIVATE_ZONE_NAME} --name ${DNSNAME} --ttl ${DNSTTL} --type ${DNSTYPE} ${DNSDATA}; + DNSNAME=$(echo "${line}" | jq -r '.name') + DNSTTL=$(echo "${line}" | jq -r '.ttl') + DNSTYPE=$(echo "${line}" | jq -r '.type') + DNSDATA=$(echo "${line}" | jq -r '.rrdatas[]') + gcloud --project="${HOST_PROJECT}" dns record-sets transaction remove --zone "${HOST_PROJECT_PRIVATE_ZONE_NAME}" --name "${DNSNAME}" --ttl "${DNSTTL}" --type "${DNSTYPE}" "${DNSDATA}" done < <(gcloud --project="${HOST_PROJECT}" dns record-sets list --zone="${HOST_PROJECT_PRIVATE_ZONE_NAME}" --filter="name:.${CLUSTER_NAME}.${BASE_DOMAIN}." --format=json | jq -c '.[]') # Delete the SRV record - gcloud --project=${HOST_PROJECT} dns record-sets transaction remove \ - --name _etcd-server-ssl._tcp.${CLUSTER_NAME}.${BASE_DOMAIN}. --ttl 60 --type SRV --zone ${HOST_PROJECT_PRIVATE_ZONE_NAME} \ + gcloud "--project=${HOST_PROJECT}" dns record-sets transaction remove \ + --name "_etcd-server-ssl._tcp.${CLUSTER_NAME}.${BASE_DOMAIN}." --ttl 60 --type SRV --zone "${HOST_PROJECT_PRIVATE_ZONE_NAME}" \ "0 10 2380 etcd-0.${CLUSTER_NAME}.${BASE_DOMAIN}." \ "0 10 2380 etcd-1.${CLUSTER_NAME}.${BASE_DOMAIN}." \ "0 10 2380 etcd-2.${CLUSTER_NAME}.${BASE_DOMAIN}." - gcloud --project="${HOST_PROJECT}" dns record-sets transaction execute --zone ${HOST_PROJECT_PRIVATE_ZONE_NAME} + gcloud --project="${HOST_PROJECT}" dns record-sets transaction execute --zone "${HOST_PROJECT_PRIVATE_ZONE_NAME}" set -e fi diff --git a/ci-operator/step-registry/upi/gcp/nested/post/upi-gcp-nested-post-commands.sh b/ci-operator/step-registry/upi/gcp/nested/post/upi-gcp-nested-post-commands.sh index c35dfc943ae44..5f70bf8fa2f8a 100644 --- a/ci-operator/step-registry/upi/gcp/nested/post/upi-gcp-nested-post-commands.sh +++ b/ci-operator/step-registry/upi/gcp/nested/post/upi-gcp-nested-post-commands.sh @@ -2,13 +2,13 @@ set -eo pipefail -INSTANCE_PREFIX=${NAMESPACE}-${JOB_NAME_HASH} +INSTANCE_PREFIX="${NAMESPACE}-${JOB_NAME_HASH}" function teardown() { # This is for running the gcloud commands mock-nss.sh gcloud auth activate-service-account \ - --quiet --key-file ${CLUSTER_PROFILE_DIR}/gce.json + --quiet --key-file "${CLUSTER_PROFILE_DIR}/gce.json" gcloud --quiet config set project "${GOOGLE_PROJECT_ID}" gcloud --quiet config set compute/zone "${GOOGLE_COMPUTE_ZONE}" gcloud --quiet config set compute/region "${GOOGLE_COMPUTE_REGION}" @@ -24,4 +24,4 @@ function teardown() { } trap 'teardown' EXIT -trap 'CHILDREN=$(jobs -p); if test -n "${CHILDREN}"; then kill ${CHILDREN} && wait; fi' TERM \ No newline at end of file +trap 'CHILDREN=$(jobs -p); if test -n "${CHILDREN}"; then kill ${CHILDREN} && wait; fi' TERM diff --git a/ci-operator/step-registry/upi/gcp/nested/pre/upi-gcp-nested-pre-commands.sh b/ci-operator/step-registry/upi/gcp/nested/pre/upi-gcp-nested-pre-commands.sh index 80b6bc2ad909e..66375f240ff2e 100644 --- a/ci-operator/step-registry/upi/gcp/nested/pre/upi-gcp-nested-pre-commands.sh +++ b/ci-operator/step-registry/upi/gcp/nested/pre/upi-gcp-nested-pre-commands.sh @@ -3,18 +3,18 @@ set -euo pipefail trap 'CHILDREN=$(jobs -p); if test -n "${CHILDREN}"; then kill ${CHILDREN} && wait; fi' TERM -INSTANCE_PREFIX=${NAMESPACE}-${JOB_NAME_HASH} +INSTANCE_PREFIX="${NAMESPACE}-${JOB_NAME_HASH}" echo "$(date -u --rfc-3339=seconds) - Configuring VM on GCP..." mkdir -p "${HOME}"/.ssh mock-nss.sh # gcloud compute will use this key rather than create a new one -cp ${CLUSTER_PROFILE_DIR}/ssh-privatekey ${HOME}/.ssh/google_compute_engine -chmod 0600 ${HOME}/.ssh/google_compute_engine -cp ${CLUSTER_PROFILE_DIR}/ssh-publickey ${HOME}/.ssh/google_compute_engine.pub +cp "${CLUSTER_PROFILE_DIR}/ssh-privatekey" "${HOME}/.ssh/google_compute_engine" +chmod 0600 "${HOME}/.ssh/google_compute_engine" +cp "${CLUSTER_PROFILE_DIR}/ssh-publickey" "${HOME}/.ssh/google_compute_engine.pub" -gcloud auth activate-service-account --quiet --key-file ${CLUSTER_PROFILE_DIR}/gce.json +gcloud auth activate-service-account --quiet --key-file "${CLUSTER_PROFILE_DIR}/gce.json" gcloud --quiet config set project "${GOOGLE_PROJECT_ID}" gcloud --quiet config set compute/zone "${GOOGLE_COMPUTE_ZONE}" gcloud --quiet config set compute/region "${GOOGLE_COMPUTE_REGION}" @@ -41,4 +41,4 @@ gcloud compute instances create "${INSTANCE_PREFIX}" \ --boot-disk-type pd-ssd \ --boot-disk-size 256GB \ --subnet "${INSTANCE_PREFIX}" \ - --network "${INSTANCE_PREFIX}" \ No newline at end of file + --network "${INSTANCE_PREFIX}" diff --git a/ci-operator/step-registry/upi/install/gcp/upi-install-gcp-commands.sh b/ci-operator/step-registry/upi/install/gcp/upi-install-gcp-commands.sh index 005317a37fcb9..1afb1fa84e86c 100755 --- a/ci-operator/step-registry/upi/install/gcp/upi-install-gcp-commands.sh +++ b/ci-operator/step-registry/upi/install/gcp/upi-install-gcp-commands.sh @@ -8,27 +8,27 @@ trap 'CHILDREN=$(jobs -p); if test -n "${CHILDREN}"; then kill ${CHILDREN} && wa export HOME=/tmp -export SSH_PRIV_KEY_PATH=${CLUSTER_PROFILE_DIR}/ssh-privatekey -export OPENSHIFT_INSTALL_INVOKER=openshift-internal-ci/${JOB_NAME_SAFE}/${BUILD_ID} +export SSH_PRIV_KEY_PATH="${CLUSTER_PROFILE_DIR}/ssh-privatekey" +export OPENSHIFT_INSTALL_INVOKER="openshift-internal-ci/${JOB_NAME_SAFE}/${BUILD_ID}" echo "$(date -u --rfc-3339=seconds) - Configuring gcloud..." -export GOOGLE_CLOUD_KEYFILE_JSON=${CLUSTER_PROFILE_DIR}/gce.json +export GOOGLE_CLOUD_KEYFILE_JSON="${CLUSTER_PROFILE_DIR}/gce.json" gcloud auth activate-service-account --key-file="${GOOGLE_CLOUD_KEYFILE_JSON}" -gcloud config set project "$(jq -r .gcp.projectID ${SHARED_DIR}/metadata.json)" +gcloud config set project "$(jq -r .gcp.projectID "${SHARED_DIR}/metadata.json")" echo "$(date -u --rfc-3339=seconds) - Copying config from shared dir..." dir=/tmp/installer -mkdir -p "${dir}/auth/" -pushd ${dir} +mkdir -p "${dir}/auth" +pushd "${dir}" cp -t "${dir}" \ "${SHARED_DIR}/install-config.yaml" \ "${SHARED_DIR}/metadata.json" \ - ${SHARED_DIR}/*.ign + "${SHARED_DIR}"/*.ign cp -t "${dir}/auth" \ "${SHARED_DIR}/kubeadmin-password" \ "${SHARED_DIR}/kubeconfig" cp -t "${dir}" \ - /var/lib/openshift-install/upi/${CLUSTER_TYPE}/* + "/var/lib/openshift-install/upi/${CLUSTER_TYPE}"/* tar -xzf "${SHARED_DIR}/.openshift_install_state.json.tgz" function backoff() { @@ -43,8 +43,8 @@ function backoff() { if [[ $attempt -gt 5 ]]; then break fi - echo "command failed, retrying in $(( 2 ** $attempt )) seconds" - sleep $(( 2 ** $attempt )) + echo "command failed, retrying in $(( 2 ** attempt )) seconds" + sleep $(( 2 ** attempt )) done return $failed } @@ -52,23 +52,23 @@ function backoff() { ## Export variables to be used in examples below. echo "$(date -u --rfc-3339=seconds) - Exporting variables..." BASE_DOMAIN='origin-ci-int-gce.dev.openshift.com' -BASE_DOMAIN_ZONE_NAME=$(gcloud dns managed-zones list --filter "DNS_NAME=${BASE_DOMAIN}." --format json | jq -r .[0].name) +BASE_DOMAIN_ZONE_NAME="$(gcloud dns managed-zones list --filter "DNS_NAME=${BASE_DOMAIN}." --format json | jq -r .[0].name)" NETWORK_CIDR='10.0.0.0/16' MASTER_SUBNET_CIDR='10.0.0.0/19' WORKER_SUBNET_CIDR='10.0.32.0/19' KUBECONFIG="${dir}/auth/kubeconfig" export KUBECONFIG -CLUSTER_NAME=$(jq -r .clusterName metadata.json) -INFRA_ID=$(jq -r .infraID metadata.json) -PROJECT_NAME=$(jq -r .gcp.projectID metadata.json) -REGION=$(jq -r .gcp.region metadata.json) -ZONE_0=$(gcloud compute regions describe ${REGION} --format=json | jq -r .zones[0] | cut -d "/" -f9) -ZONE_1=$(gcloud compute regions describe ${REGION} --format=json | jq -r .zones[1] | cut -d "/" -f9) -ZONE_2=$(gcloud compute regions describe ${REGION} --format=json | jq -r .zones[2] | cut -d "/" -f9) +CLUSTER_NAME="$(jq -r .clusterName metadata.json)" +INFRA_ID="$(jq -r .infraID metadata.json)" +PROJECT_NAME="$(jq -r .gcp.projectID metadata.json)" +REGION="$(jq -r .gcp.region metadata.json)" +ZONE_0="$(gcloud compute regions describe "${REGION}" --format=json | jq -r .zones[0] | cut -d "/" -f9)" +ZONE_1="$(gcloud compute regions describe "${REGION}" --format=json | jq -r .zones[1] | cut -d "/" -f9)" +ZONE_2="$(gcloud compute regions describe "${REGION}" --format=json | jq -r .zones[2] | cut -d "/" -f9)" -MASTER_IGNITION=$(cat master.ign) -WORKER_IGNITION=$(cat worker.ign) +MASTER_IGNITION="$(cat master.ign)" +WORKER_IGNITION="$(cat worker.ign)" echo "Using infra_id: ${INFRA_ID}" @@ -76,25 +76,25 @@ echo "Using infra_id: ${INFRA_ID}" if [[ -s "${SHARED_DIR}/xpn.json" ]]; then echo "Reading variables from ${SHARED_DIR}/xpn.json..." IS_XPN=1 - HOST_PROJECT=$(jq -r '.hostProject' ${SHARED_DIR}/xpn.json) - HOST_PROJECT_NETWORK=$(jq -r '.clusterNetwork' ${SHARED_DIR}/xpn.json) - HOST_PROJECT_COMPUTE_SUBNET=$(jq -r '.computeSubnet' ${SHARED_DIR}/xpn.json) - HOST_PROJECT_CONTROL_SUBNET=$(jq -r '.controlSubnet' "${SHARED_DIR}/xpn.json") - HOST_PROJECT_COMPUTE_SERVICE_ACCOUNT=$(jq -r '.computeServiceAccount' "${SHARED_DIR}/xpn.json") - HOST_PROJECT_CONTROL_SERVICE_ACCOUNT=$(jq -r '.controlServiceAccount' "${SHARED_DIR}/xpn.json") - HOST_PROJECT_PRIVATE_ZONE_NAME=$(jq -r '.privateZoneName' "${SHARED_DIR}/xpn.json") + HOST_PROJECT="$(jq -r '.hostProject' "${SHARED_DIR}/xpn.json")" + HOST_PROJECT_NETWORK="$(jq -r '.clusterNetwork' "${SHARED_DIR}/xpn.json")" + HOST_PROJECT_COMPUTE_SUBNET="$(jq -r '.computeSubnet' "${SHARED_DIR}/xpn.json")" + HOST_PROJECT_CONTROL_SUBNET="$(jq -r '.controlSubnet' "${SHARED_DIR}/xpn.json")" + HOST_PROJECT_COMPUTE_SERVICE_ACCOUNT="$(jq -r '.computeServiceAccount' "${SHARED_DIR}/xpn.json")" + HOST_PROJECT_CONTROL_SERVICE_ACCOUNT="$(jq -r '.controlServiceAccount' "${SHARED_DIR}/xpn.json")" + HOST_PROJECT_PRIVATE_ZONE_NAME="$(jq -r '.privateZoneName' "${SHARED_DIR}/xpn.json")" else # Set HOST_PROJECT to the cluster project so commands with `--project` work in both scenarios. - HOST_PROJECT=${PROJECT_NAME} + HOST_PROJECT="${PROJECT_NAME}" fi ## Create the VPC echo "$(date -u --rfc-3339=seconds) - Creating the VPC..." if [[ -v IS_XPN ]]; then echo "$(date -u --rfc-3339=seconds) - Using pre-existing XPN VPC..." - CLUSTER_NETWORK=${HOST_PROJECT_NETWORK} - COMPUTE_SUBNET=${HOST_PROJECT_COMPUTE_SUBNET} - CONTROL_SUBNET=${HOST_PROJECT_CONTROL_SUBNET} + CLUSTER_NETWORK="${HOST_PROJECT_NETWORK}" + COMPUTE_SUBNET="${HOST_PROJECT_COMPUTE_SUBNET}" + CONTROL_SUBNET="${HOST_PROJECT_CONTROL_SUBNET}" else cat < 01_vpc.yaml imports: @@ -109,12 +109,12 @@ resources: worker_subnet_cidr: '${WORKER_SUBNET_CIDR}' EOF - gcloud deployment-manager deployments create ${INFRA_ID}-vpc --config 01_vpc.yaml + gcloud deployment-manager deployments create "${INFRA_ID}-vpc" --config 01_vpc.yaml ## Configure VPC variables - CLUSTER_NETWORK=$(gcloud compute networks describe ${INFRA_ID}-network --format json | jq -r .selfLink) - CONTROL_SUBNET=$(gcloud compute networks subnets describe ${INFRA_ID}-master-subnet --region=${REGION} --format json | jq -r .selfLink) - COMPUTE_SUBNET=$(gcloud compute networks subnets describe ${INFRA_ID}-worker-subnet --region=${REGION} --format json | jq -r .selfLink) + CLUSTER_NETWORK="$(gcloud compute networks describe "${INFRA_ID}-network" --format json | jq -r .selfLink)" + CONTROL_SUBNET="$(gcloud compute networks subnets describe "${INFRA_ID}-master-subnet" "--region=${REGION}" --format json | jq -r .selfLink)" + COMPUTE_SUBNET="$(gcloud compute networks subnets describe "${INFRA_ID}-worker-subnet" "--region=${REGION}" --format json | jq -r .selfLink)" fi ## Create DNS entries and load balancers @@ -193,39 +193,39 @@ resources: EOF fi -gcloud deployment-manager deployments create ${INFRA_ID}-infra --config 02_infra.yaml +gcloud deployment-manager deployments create "${INFRA_ID}-infra" --config 02_infra.yaml ## Configure infra variables if [ -f 02_lb_int.py ]; then # workflow using internal load balancers # https://github.com/openshift/installer/pull/3270 - CLUSTER_IP=$(gcloud compute addresses describe ${INFRA_ID}-cluster-ip --region=${REGION} --format json | jq -r .address) + CLUSTER_IP="$(gcloud compute addresses describe "${INFRA_ID}-cluster-ip" "--region=${REGION}" --format json | jq -r .address)" else # for workflow before internal load balancers - CLUSTER_IP=$(gcloud compute addresses describe ${INFRA_ID}-cluster-public-ip --region=${REGION} --format json | jq -r .address) + CLUSTER_IP="$(gcloud compute addresses describe "${INFRA_ID}-cluster-public-ip" "--region=${REGION}" --format json | jq -r .address)" fi -CLUSTER_PUBLIC_IP=$(gcloud compute addresses describe ${INFRA_ID}-cluster-public-ip --region=${REGION} --format json | jq -r .address) +CLUSTER_PUBLIC_IP="$(gcloud compute addresses describe "${INFRA_ID}-cluster-public-ip" "--region=${REGION}" --format json | jq -r .address)" ### Add internal DNS entries echo "$(date -u --rfc-3339=seconds) - Adding internal DNS entries..." if [ -f transaction.yaml ]; then rm transaction.yaml; fi -gcloud --project="${HOST_PROJECT}" dns record-sets transaction start --zone ${PRIVATE_ZONE_NAME} -gcloud --project="${HOST_PROJECT}" dns record-sets transaction add ${CLUSTER_IP} --name api.${CLUSTER_NAME}.${BASE_DOMAIN}. --ttl 60 --type A --zone ${PRIVATE_ZONE_NAME} -gcloud --project="${HOST_PROJECT}" dns record-sets transaction add ${CLUSTER_IP} --name api-int.${CLUSTER_NAME}.${BASE_DOMAIN}. --ttl 60 --type A --zone ${PRIVATE_ZONE_NAME} -gcloud --project="${HOST_PROJECT}" dns record-sets transaction execute --zone ${PRIVATE_ZONE_NAME} +gcloud --project="${HOST_PROJECT}" dns record-sets transaction start --zone "${PRIVATE_ZONE_NAME}" +gcloud --project="${HOST_PROJECT}" dns record-sets transaction add "${CLUSTER_IP}" --name "api.${CLUSTER_NAME}.${BASE_DOMAIN}." --ttl 60 --type A --zone "${PRIVATE_ZONE_NAME}" +gcloud --project="${HOST_PROJECT}" dns record-sets transaction add "${CLUSTER_IP}" --name "api-int.${CLUSTER_NAME}.${BASE_DOMAIN}." --ttl 60 --type A --zone "${PRIVATE_ZONE_NAME}" +gcloud --project="${HOST_PROJECT}" dns record-sets transaction execute --zone "${PRIVATE_ZONE_NAME}" ### Add external DNS entries (optional) echo "$(date -u --rfc-3339=seconds) - Adding external DNS entries..." if [ -f transaction.yaml ]; then rm transaction.yaml; fi -gcloud dns record-sets transaction start --zone ${BASE_DOMAIN_ZONE_NAME} -gcloud dns record-sets transaction add ${CLUSTER_PUBLIC_IP} --name api.${CLUSTER_NAME}.${BASE_DOMAIN}. --ttl 60 --type A --zone ${BASE_DOMAIN_ZONE_NAME} -gcloud dns record-sets transaction execute --zone ${BASE_DOMAIN_ZONE_NAME} +gcloud dns record-sets transaction start --zone "${BASE_DOMAIN_ZONE_NAME}" +gcloud dns record-sets transaction add "${CLUSTER_PUBLIC_IP}" --name "api.${CLUSTER_NAME}.${BASE_DOMAIN}." --ttl 60 --type A --zone "${BASE_DOMAIN_ZONE_NAME}" +gcloud dns record-sets transaction execute --zone "${BASE_DOMAIN_ZONE_NAME}" ## Create firewall rules and IAM roles echo "$(date -u --rfc-3339=seconds) - Creating service accounts and firewall rules..." if [[ -v IS_XPN ]]; then echo "$(date -u --rfc-3339=seconds) - Using pre-existing XPN firewall rules..." echo "$(date -u --rfc-3339=seconds) - using pre-existing XPN service accounts..." - MASTER_SERVICE_ACCOUNT=${HOST_PROJECT_CONTROL_SERVICE_ACCOUNT} - WORKER_SERVICE_ACCOUNT=${HOST_PROJECT_COMPUTE_SERVICE_ACCOUNT} + MASTER_SERVICE_ACCOUNT="${HOST_PROJECT_CONTROL_SERVICE_ACCOUNT}" + WORKER_SERVICE_ACCOUNT="${HOST_PROJECT_COMPUTE_SERVICE_ACCOUNT}" elif [ -f 03_firewall.py ]; then # for workflow using 03_iam.py and 03_firewall.py # https://github.com/openshift/installer/pull/2574 cat < 03_security.yaml @@ -246,8 +246,8 @@ resources: infra_id: '${INFRA_ID}' EOF else # for workflow before splitting out 03_firewall.py - MASTER_NAT_IP=$(gcloud compute addresses describe ${INFRA_ID}-master-nat-ip --region ${REGION} --format json | jq -r .address) - WORKER_NAT_IP=$(gcloud compute addresses describe ${INFRA_ID}-worker-nat-ip --region ${REGION} --format json | jq -r .address) + MASTER_NAT_IP="$(gcloud compute addresses describe "${INFRA_ID}-master-nat-ip" --region "${REGION}" --format json | jq -r .address)" + WORKER_NAT_IP="$(gcloud compute addresses describe "${INFRA_ID}-worker-nat-ip" --region "${REGION}" --format json | jq -r .address)" cat < 03_security.yaml imports: - path: 03_security.py @@ -264,39 +264,39 @@ EOF fi if [[ -f 03_security.yaml ]]; then - gcloud deployment-manager deployments create ${INFRA_ID}-security --config 03_security.yaml + gcloud deployment-manager deployments create "${INFRA_ID}-security" --config 03_security.yaml ## Configure security variables - MASTER_SERVICE_ACCOUNT=$(gcloud iam service-accounts list --filter "email~^${INFRA_ID}-m@${PROJECT_NAME}." --format json | jq -r '.[0].email') - WORKER_SERVICE_ACCOUNT=$(gcloud iam service-accounts list --filter "email~^${INFRA_ID}-w@${PROJECT_NAME}." --format json | jq -r '.[0].email') + MASTER_SERVICE_ACCOUNT="$(gcloud iam service-accounts list --filter "email~^${INFRA_ID}-m@${PROJECT_NAME}." --format json | jq -r '.[0].email')" + WORKER_SERVICE_ACCOUNT="$(gcloud iam service-accounts list --filter "email~^${INFRA_ID}-w@${PROJECT_NAME}." --format json | jq -r '.[0].email')" ## Add required roles to IAM service accounts echo "$(date -u --rfc-3339=seconds) - Adding required roles to IAM service accounts..." - backoff gcloud projects add-iam-policy-binding ${PROJECT_NAME} --member "serviceAccount:${MASTER_SERVICE_ACCOUNT}" --role "roles/compute.instanceAdmin" - backoff gcloud projects add-iam-policy-binding ${PROJECT_NAME} --member "serviceAccount:${MASTER_SERVICE_ACCOUNT}" --role "roles/compute.networkAdmin" - backoff gcloud projects add-iam-policy-binding ${PROJECT_NAME} --member "serviceAccount:${MASTER_SERVICE_ACCOUNT}" --role "roles/compute.securityAdmin" - backoff gcloud projects add-iam-policy-binding ${PROJECT_NAME} --member "serviceAccount:${MASTER_SERVICE_ACCOUNT}" --role "roles/iam.serviceAccountUser" - backoff gcloud projects add-iam-policy-binding ${PROJECT_NAME} --member "serviceAccount:${MASTER_SERVICE_ACCOUNT}" --role "roles/storage.admin" - - backoff gcloud projects add-iam-policy-binding ${PROJECT_NAME} --member "serviceAccount:${WORKER_SERVICE_ACCOUNT}" --role "roles/compute.viewer" - backoff gcloud projects add-iam-policy-binding ${PROJECT_NAME} --member "serviceAccount:${WORKER_SERVICE_ACCOUNT}" --role "roles/storage.admin" + backoff gcloud projects add-iam-policy-binding "${PROJECT_NAME}" --member "serviceAccount:${MASTER_SERVICE_ACCOUNT}" --role "roles/compute.instanceAdmin" + backoff gcloud projects add-iam-policy-binding "${PROJECT_NAME}" --member "serviceAccount:${MASTER_SERVICE_ACCOUNT}" --role "roles/compute.networkAdmin" + backoff gcloud projects add-iam-policy-binding "${PROJECT_NAME}" --member "serviceAccount:${MASTER_SERVICE_ACCOUNT}" --role "roles/compute.securityAdmin" + backoff gcloud projects add-iam-policy-binding "${PROJECT_NAME}" --member "serviceAccount:${MASTER_SERVICE_ACCOUNT}" --role "roles/iam.serviceAccountUser" + backoff gcloud projects add-iam-policy-binding "${PROJECT_NAME}" --member "serviceAccount:${MASTER_SERVICE_ACCOUNT}" --role "roles/storage.admin" + + backoff gcloud projects add-iam-policy-binding "${PROJECT_NAME}" --member "serviceAccount:${WORKER_SERVICE_ACCOUNT}" --role "roles/compute.viewer" + backoff gcloud projects add-iam-policy-binding "${PROJECT_NAME}" --member "serviceAccount:${WORKER_SERVICE_ACCOUNT}" --role "roles/storage.admin" fi ## Generate a service-account-key for signing the bootstrap.ign url -gcloud iam service-accounts keys create service-account-key.json --iam-account=${MASTER_SERVICE_ACCOUNT} +gcloud iam service-accounts keys create service-account-key.json "--iam-account=${MASTER_SERVICE_ACCOUNT}" ## Create the cluster image. echo "$(date -u --rfc-3339=seconds) - Creating the cluster image..." -IMAGE_SOURCE=$(cat /var/lib/openshift-install/rhcos.json | jq -r .gcp.url) +IMAGE_SOURCE="$(jq -r .gcp.url /var/lib/openshift-install/rhcos.json)" gcloud compute images create "${INFRA_ID}-rhcos-image" --source-uri="${IMAGE_SOURCE}" -CLUSTER_IMAGE=$(gcloud compute images describe ${INFRA_ID}-rhcos-image --format json | jq -r .selfLink) +CLUSTER_IMAGE="$(gcloud compute images describe "${INFRA_ID}-rhcos-image" --format json | jq -r .selfLink)" ## Upload the bootstrap.ign to a new bucket echo "$(date -u --rfc-3339=seconds) - Uploading the bootstrap.ign to a new bucket..." -gsutil mb gs://${INFRA_ID}-bootstrap-ignition -gsutil cp bootstrap.ign gs://${INFRA_ID}-bootstrap-ignition/ +gsutil mb "gs://${INFRA_ID}-bootstrap-ignition" +gsutil cp bootstrap.ign "gs://${INFRA_ID}-bootstrap-ignition/" -BOOTSTRAP_IGN=$(gsutil signurl -d 1h service-account-key.json gs://${INFRA_ID}-bootstrap-ignition/bootstrap.ign | grep "^gs:" | awk '{print $5}') +BOOTSTRAP_IGN="$(gsutil signurl -d 1h service-account-key.json "gs://${INFRA_ID}-bootstrap-ignition/bootstrap.ign" | grep "^gs:" | awk '{print $5}')" ## Launch temporary bootstrap resources echo "$(date -u --rfc-3339=seconds) - Launching temporary bootstrap resources..." @@ -318,22 +318,22 @@ resources: bootstrap_ign: '${BOOTSTRAP_IGN}' EOF -gcloud deployment-manager deployments create ${INFRA_ID}-bootstrap --config 04_bootstrap.yaml +gcloud deployment-manager deployments create "${INFRA_ID}-bootstrap" --config 04_bootstrap.yaml ## Add the bootstrap instance to the load balancers echo "$(date -u --rfc-3339=seconds) - Adding the bootstrap instance to the load balancers..." if [ -f 02_lb_int.py ]; then # for workflow using internal load balancers # https://github.com/openshift/installer/pull/3270 # https://github.com/openshift/installer/pull/3309 - gcloud compute instance-groups unmanaged add-instances ${INFRA_ID}-bootstrap-instance-group --zone=${ZONE_0} --instances=${INFRA_ID}-bootstrap - gcloud compute backend-services add-backend ${INFRA_ID}-api-internal-backend-service --region=${REGION} --instance-group=${INFRA_ID}-bootstrap-instance-group --instance-group-zone=${ZONE_0} + gcloud compute instance-groups unmanaged add-instances "${INFRA_ID}-bootstrap-instance-group" "--zone=${ZONE_0}" "--instances=${INFRA_ID}-bootstrap" + gcloud compute backend-services add-backend "${INFRA_ID}-api-internal-backend-service" "--region=${REGION}" "--instance-group=${INFRA_ID}-bootstrap-instance-group" "--instance-group-zone=${ZONE_0}" else # for workflow before internal load balancers - gcloud compute target-pools add-instances ${INFRA_ID}-ign-target-pool --instances-zone="${ZONE_0}" --instances=${INFRA_ID}-bootstrap - gcloud compute target-pools add-instances ${INFRA_ID}-api-target-pool --instances-zone=${ZONE_0} --instances=${INFRA_ID}-bootstrap + gcloud compute target-pools add-instances "${INFRA_ID}-ign-target-pool" "--instances-zone=${ZONE_0}" "--instances=${INFRA_ID}-bootstrap" + gcloud compute target-pools add-instances "${INFRA_ID}-api-target-pool" "--instances-zone=${ZONE_0}" "--instances=${INFRA_ID}-bootstrap" fi -BOOTSTRAP_IP=$(gcloud compute addresses describe --region ${REGION} ${INFRA_ID}-bootstrap-public-ip --format json | jq -r .address) -GATHER_BOOTSTRAP_ARGS="--bootstrap ${BOOTSTRAP_IP}" +BOOTSTRAP_IP="$(gcloud compute addresses describe --region "${REGION}" "${INFRA_ID}-bootstrap-public-ip" --format json | jq -r .address)" +GATHER_BOOTSTRAP_ARGS=('--bootstrap' "${BOOTSTRAP_IP}") ## Launch permanent control plane echo "$(date -u --rfc-3339=seconds) - Launching permanent control plane..." @@ -357,12 +357,12 @@ resources: ignition: '${MASTER_IGNITION}' EOF -gcloud deployment-manager deployments create ${INFRA_ID}-control-plane --config 05_control_plane.yaml +gcloud deployment-manager deployments create "${INFRA_ID}-control-plane" --config 05_control_plane.yaml ## Determine name of master nodes # https://github.com/openshift/installer/pull/3713 set +e -fgrep -e '-master-0' 05_control_plane.py +grep -Fe '-master-0' 05_control_plane.py ret="$?" set -e if [[ "$ret" == 0 ]]; then @@ -374,47 +374,47 @@ else fi ## Configure control plane variables -MASTER0_IP=$(gcloud compute instances describe ${INFRA_ID}-${MASTER}-0 --zone ${ZONE_0} --format json | jq -r .networkInterfaces[0].networkIP) -MASTER1_IP=$(gcloud compute instances describe ${INFRA_ID}-${MASTER}-1 --zone ${ZONE_1} --format json | jq -r .networkInterfaces[0].networkIP) -MASTER2_IP=$(gcloud compute instances describe ${INFRA_ID}-${MASTER}-2 --zone ${ZONE_2} --format json | jq -r .networkInterfaces[0].networkIP) +MASTER0_IP="$(gcloud compute instances describe "${INFRA_ID}-${MASTER}-0" --zone "${ZONE_0}" --format json | jq -r .networkInterfaces[0].networkIP)" +MASTER1_IP="$(gcloud compute instances describe "${INFRA_ID}-${MASTER}-1" --zone "${ZONE_1}" --format json | jq -r .networkInterfaces[0].networkIP)" +MASTER2_IP="$(gcloud compute instances describe "${INFRA_ID}-${MASTER}-2" --zone "${ZONE_2}" --format json | jq -r .networkInterfaces[0].networkIP)" -GATHER_BOOTSTRAP_ARGS="${GATHER_BOOTSTRAP_ARGS} --master ${MASTER0_IP} --master ${MASTER1_IP} --master ${MASTER2_IP}" +GATHER_BOOTSTRAP_ARGS+=('--master' "${MASTER0_IP}" '--master' "${MASTER1_IP}" '--master' "${MASTER2_IP}") ## Add DNS entries for control plane etcd echo "$(date -u --rfc-3339=seconds) - Adding DNS entries for control plane etcd..." if [ -f transaction.yaml ]; then rm transaction.yaml; fi -gcloud --project=${HOST_PROJECT} dns record-sets transaction start --zone ${PRIVATE_ZONE_NAME} -gcloud --project=${HOST_PROJECT} dns record-sets transaction add ${MASTER0_IP} --name etcd-0.${CLUSTER_NAME}.${BASE_DOMAIN}. --ttl 60 --type A --zone ${PRIVATE_ZONE_NAME} -gcloud --project=${HOST_PROJECT} dns record-sets transaction add ${MASTER1_IP} --name etcd-1.${CLUSTER_NAME}.${BASE_DOMAIN}. --ttl 60 --type A --zone ${PRIVATE_ZONE_NAME} -gcloud --project=${HOST_PROJECT} dns record-sets transaction add ${MASTER2_IP} --name etcd-2.${CLUSTER_NAME}.${BASE_DOMAIN}. --ttl 60 --type A --zone ${PRIVATE_ZONE_NAME} -gcloud --project=${HOST_PROJECT} dns record-sets transaction add \ +gcloud "--project=${HOST_PROJECT}" dns record-sets transaction start --zone "${PRIVATE_ZONE_NAME}" +gcloud "--project=${HOST_PROJECT}" dns record-sets transaction add "${MASTER0_IP}" --name "etcd-0.${CLUSTER_NAME}.${BASE_DOMAIN}." --ttl 60 --type A --zone "${PRIVATE_ZONE_NAME}" +gcloud "--project=${HOST_PROJECT}" dns record-sets transaction add "${MASTER1_IP}" --name "etcd-1.${CLUSTER_NAME}.${BASE_DOMAIN}." --ttl 60 --type A --zone "${PRIVATE_ZONE_NAME}" +gcloud "--project=${HOST_PROJECT}" dns record-sets transaction add "${MASTER2_IP}" --name "etcd-2.${CLUSTER_NAME}.${BASE_DOMAIN}." --ttl 60 --type A --zone "${PRIVATE_ZONE_NAME}" +gcloud "--project=${HOST_PROJECT}" dns record-sets transaction add \ "0 10 2380 etcd-0.${CLUSTER_NAME}.${BASE_DOMAIN}." \ "0 10 2380 etcd-1.${CLUSTER_NAME}.${BASE_DOMAIN}." \ "0 10 2380 etcd-2.${CLUSTER_NAME}.${BASE_DOMAIN}." \ - --name _etcd-server-ssl._tcp.${CLUSTER_NAME}.${BASE_DOMAIN}. --ttl 60 --type SRV --zone ${PRIVATE_ZONE_NAME} -gcloud --project=${HOST_PROJECT} dns record-sets transaction execute --zone ${PRIVATE_ZONE_NAME} + --name "_etcd-server-ssl._tcp.${CLUSTER_NAME}.${BASE_DOMAIN}." --ttl 60 --type SRV --zone "${PRIVATE_ZONE_NAME}" +gcloud "--project=${HOST_PROJECT}" dns record-sets transaction execute --zone "${PRIVATE_ZONE_NAME}" ## Add control plane instances to load balancers echo "$(date -u --rfc-3339=seconds) - Adding control plane instances to load balancers..." if [ -f 02_lb_int.py ]; then # for workflow using internal load balancers # https://github.com/openshift/installer/pull/3270 - gcloud compute instance-groups unmanaged add-instances ${INFRA_ID}-master-${ZONE_0}-instance-group --zone=${ZONE_0} --instances=${INFRA_ID}-${MASTER}-0 - gcloud compute instance-groups unmanaged add-instances ${INFRA_ID}-master-${ZONE_1}-instance-group --zone=${ZONE_1} --instances=${INFRA_ID}-${MASTER}-1 - gcloud compute instance-groups unmanaged add-instances ${INFRA_ID}-master-${ZONE_2}-instance-group --zone=${ZONE_2} --instances=${INFRA_ID}-${MASTER}-2 + gcloud compute instance-groups unmanaged add-instances "${INFRA_ID}-master-${ZONE_0}-instance-group" "--zone=${ZONE_0}" "--instances=${INFRA_ID}-${MASTER}-0" + gcloud compute instance-groups unmanaged add-instances "${INFRA_ID}-master-${ZONE_1}-instance-group" "--zone=${ZONE_1}" "--instances=${INFRA_ID}-${MASTER}-1" + gcloud compute instance-groups unmanaged add-instances "${INFRA_ID}-master-${ZONE_2}-instance-group" "--zone=${ZONE_2}" "--instances=${INFRA_ID}-${MASTER}-2" else # for workflow before internal load balancers - gcloud compute target-pools add-instances ${INFRA_ID}-ign-target-pool --instances-zone="${ZONE_0}" --instances=${INFRA_ID}-${MASTER}-0 - gcloud compute target-pools add-instances ${INFRA_ID}-ign-target-pool --instances-zone="${ZONE_1}" --instances=${INFRA_ID}-${MASTER}-1 - gcloud compute target-pools add-instances ${INFRA_ID}-ign-target-pool --instances-zone="${ZONE_2}" --instances=${INFRA_ID}-${MASTER}-2 + gcloud compute target-pools add-instances "${INFRA_ID}-ign-target-pool" "--instances-zone=${ZONE_0}" "--instances=${INFRA_ID}-${MASTER}-0" + gcloud compute target-pools add-instances "${INFRA_ID}-ign-target-pool" "--instances-zone=${ZONE_1}" "--instances=${INFRA_ID}-${MASTER}-1" + gcloud compute target-pools add-instances "${INFRA_ID}-ign-target-pool" "--instances-zone=${ZONE_2}" "--instances=${INFRA_ID}-${MASTER}-2" fi ### Add control plane instances to external load balancer target pools (optional) -gcloud compute target-pools add-instances ${INFRA_ID}-api-target-pool --instances-zone="${ZONE_0}" --instances=${INFRA_ID}-${MASTER}-0 -gcloud compute target-pools add-instances ${INFRA_ID}-api-target-pool --instances-zone="${ZONE_1}" --instances=${INFRA_ID}-${MASTER}-1 -gcloud compute target-pools add-instances ${INFRA_ID}-api-target-pool --instances-zone="${ZONE_2}" --instances=${INFRA_ID}-${MASTER}-2 +gcloud compute target-pools add-instances "${INFRA_ID}-api-target-pool" "--instances-zone=${ZONE_0}" "--instances=${INFRA_ID}-${MASTER}-0" +gcloud compute target-pools add-instances "${INFRA_ID}-api-target-pool" "--instances-zone=${ZONE_1}" "--instances=${INFRA_ID}-${MASTER}-1" +gcloud compute target-pools add-instances "${INFRA_ID}-api-target-pool" "--instances-zone=${ZONE_2}" "--instances=${INFRA_ID}-${MASTER}-2" ## Launch additional compute nodes echo "$(date -u --rfc-3339=seconds) - Launching additional compute nodes..." -mapfile -t ZONES < <(gcloud compute regions describe ${REGION} --format=json | jq -r .zones[] | cut -d '/' -f9) +mapfile -t ZONES < <(gcloud compute regions describe "${REGION}" --format=json | jq -r .zones[] | cut -d '/' -f9) cat < 06_worker.yaml imports: - path: 06_worker.py @@ -437,7 +437,7 @@ for compute in {0..2}; do EOF done; -gcloud deployment-manager deployments create ${INFRA_ID}-worker --config 06_worker.yaml +gcloud deployment-manager deployments create "${INFRA_ID}-worker" --config 06_worker.yaml ## Monitor for `bootstrap-complete` echo "$(date -u --rfc-3339=seconds) - Monitoring for bootstrap to complete" @@ -448,13 +448,13 @@ wait "$!" ret="$?" set -e -if [ $ret -ne 0 ]; then +if [ "$ret" -ne 0 ]; then set +e # Attempt to gather bootstrap logs. echo "$(date -u --rfc-3339=seconds) - Bootstrap failed, attempting to gather bootstrap logs..." - openshift-install --dir=${dir} gather bootstrap --key "${SSH_PRIV_KEY_PATH}" ${GATHER_BOOTSTRAP_ARGS} + openshift-install "--dir=${dir}" gather bootstrap --key "${SSH_PRIV_KEY_PATH}" "${GATHER_BOOTSTRAP_ARGS[@]}" sed 's/password: .*/password: REDACTED/' "${dir}/.openshift_install.log" >>"${ARTIFACT_DIR}/.openshift_install.log" - cp log-bundle-*.tar.gz ${ARTIFACT_DIR} + cp log-bundle-*.tar.gz "${ARTIFACT_DIR}" set -e exit "$ret" fi @@ -464,14 +464,14 @@ echo "$(date -u --rfc-3339=seconds) - Bootstrap complete, destroying bootstrap r if [ -f 02_lb_int.py ]; then # for workflow using internal load balancers # https://github.com/openshift/installer/pull/3270 # https://github.com/openshift/installer/pull/3309 - gcloud compute backend-services remove-backend ${INFRA_ID}-api-internal-backend-service --region=${REGION} --instance-group=${INFRA_ID}-bootstrap-instance-group --instance-group-zone=${ZONE_0} + gcloud compute backend-services remove-backend "${INFRA_ID}-api-internal-backend-service" "--region=${REGION}" "--instance-group=${INFRA_ID}-bootstrap-instance-group" "--instance-group-zone=${ZONE_0}" else # for workflow before internal load balancers - gcloud compute target-pools remove-instances ${INFRA_ID}-ign-target-pool --instances-zone="${ZONE_0}" --instances=${INFRA_ID}-bootstrap - gcloud compute target-pools remove-instances ${INFRA_ID}-api-target-pool --instances-zone="${ZONE_0}" --instances=${INFRA_ID}-bootstrap + gcloud compute target-pools remove-instances "${INFRA_ID}-ign-target-pool" "--instances-zone=${ZONE_0}" "--instances=${INFRA_ID}-bootstrap" + gcloud compute target-pools remove-instances "${INFRA_ID}-api-target-pool" "--instances-zone=${ZONE_0}" "--instances=${INFRA_ID}-bootstrap" fi -gsutil rm gs://${INFRA_ID}-bootstrap-ignition/bootstrap.ign -gsutil rb gs://${INFRA_ID}-bootstrap-ignition -gcloud deployment-manager deployments delete -q ${INFRA_ID}-bootstrap +gsutil rm "gs://${INFRA_ID}-bootstrap-ignition/bootstrap.ign" +gsutil rb "gs://${INFRA_ID}-bootstrap-ignition" +gcloud deployment-manager deployments delete -q "${INFRA_ID}-bootstrap" ## Approving the CSR requests for nodes echo "$(date -u --rfc-3339=seconds) - Approving the CSR requests for nodes..." @@ -488,10 +488,10 @@ approve_csrs & if [[ -v IS_XPN ]]; then echo "$(date -u --rfc-3339=seconds) - Waiting for the default-router to have an external ip..." set +e - ROUTER_IP=$(oc -n openshift-ingress get service router-default --no-headers | awk '{print $4}') + ROUTER_IP="$(oc -n openshift-ingress get service router-default --no-headers | awk '{print $4}')" while [[ "$ROUTER_IP" == "" || "$ROUTER_IP" == "" ]]; do sleep 10; - ROUTER_IP=$(oc -n openshift-ingress get service router-default --no-headers | awk '{print $4}') + ROUTER_IP="$(oc -n openshift-ingress get service router-default --no-headers | awk '{print $4}')" done set -e fi @@ -500,14 +500,14 @@ fi if [[ -v IS_XPN ]]; then echo "$(date -u --rfc-3339=seconds) - Creating default router DNS entries..." if [ -f transaction.yaml ]; then rm transaction.yaml; fi - gcloud --project=${HOST_PROJECT} dns record-sets transaction start --zone ${PRIVATE_ZONE_NAME} - gcloud --project=${HOST_PROJECT} dns record-sets transaction add ${ROUTER_IP} --name \*.apps.${CLUSTER_NAME}.${BASE_DOMAIN}. --ttl 300 --type A --zone ${PRIVATE_ZONE_NAME} - gcloud --project=${HOST_PROJECT} dns record-sets transaction execute --zone ${PRIVATE_ZONE_NAME} + gcloud "--project=${HOST_PROJECT}" dns record-sets transaction start --zone "${PRIVATE_ZONE_NAME}" + gcloud "--project=${HOST_PROJECT}" dns record-sets transaction add "${ROUTER_IP}" --name "*.apps.${CLUSTER_NAME}.${BASE_DOMAIN}." --ttl 300 --type A --zone "${PRIVATE_ZONE_NAME}" + gcloud "--project=${HOST_PROJECT}" dns record-sets transaction execute --zone "${PRIVATE_ZONE_NAME}" if [ -f transaction.yaml ]; then rm transaction.yaml; fi - gcloud dns record-sets transaction start --zone ${BASE_DOMAIN_ZONE_NAME} - gcloud dns record-sets transaction add ${ROUTER_IP} --name \*.apps.${CLUSTER_NAME}.${BASE_DOMAIN}. --ttl 300 --type A --zone ${BASE_DOMAIN_ZONE_NAME} - gcloud dns record-sets transaction execute --zone ${BASE_DOMAIN_ZONE_NAME} + gcloud dns record-sets transaction start --zone "${BASE_DOMAIN_ZONE_NAME}" + gcloud dns record-sets transaction add "${ROUTER_IP}" --name "*.apps.${CLUSTER_NAME}.${BASE_DOMAIN}." --ttl 300 --type A --zone "${BASE_DOMAIN_ZONE_NAME}" + gcloud dns record-sets transaction execute --zone "${BASE_DOMAIN_ZONE_NAME}" fi ## Monitor for cluster completion