|
| 1 | +#!/bin/bash |
| 2 | + |
| 3 | +set -x |
| 4 | + |
| 5 | +TMP_DIR=$(mktemp -d "$(pwd)/XXXXXXXXXXXX") |
| 6 | +TMP_BASENAME=$(basename ${TMP_DIR}) |
| 7 | +GOPATH="/go" |
| 8 | +WORK_DIR="${GOPATH}/src/github.com/Azure/aks-engine" |
| 9 | + |
| 10 | +# Assumes we're running from the git root of aks-engine |
| 11 | +if [ "${BUILD_AKS_ENGINE}" = "true" ]; then |
| 12 | + docker run --rm \ |
| 13 | + -v $(pwd):${WORK_DIR} \ |
| 14 | + -w ${WORK_DIR} \ |
| 15 | + "${DEV_IMAGE}" make build-binary || exit 1 |
| 16 | +fi |
| 17 | + |
| 18 | +cat > ${TMP_DIR}/apimodel-input.json <<END |
| 19 | +${API_MODEL_INPUT} |
| 20 | +END |
| 21 | + |
| 22 | +echo "Running E2E tests against a cluster built with the following API model:" |
| 23 | +cat ${TMP_DIR}/apimodel-input.json |
| 24 | + |
| 25 | +CLEANUP_AFTER_DEPLOYMENT=${CLEANUP_ON_EXIT} |
| 26 | +if [ "${UPGRADE_CLUSTER}" = "true" ]; then |
| 27 | + CLEANUP_AFTER_DEPLOYMENT="false"; |
| 28 | +elif [ "${SCALE_CLUSTER}" = "true" ]; then |
| 29 | + CLEANUP_AFTER_DEPLOYMENT="false"; |
| 30 | +fi |
| 31 | + |
| 32 | +if [ -n "${GINKGO_SKIP}" ]; then |
| 33 | + if [ -n "${GINKGO_SKIP_AFTER_SCALE_DOWN}" ]; then |
| 34 | + SKIP_AFTER_SCALE_DOWN="${GINKGO_SKIP}|${GINKGO_SKIP_AFTER_SCALE_DOWN}" |
| 35 | + else |
| 36 | + SKIP_AFTER_SCALE_DOWN="${GINKGO_SKIP}" |
| 37 | + fi |
| 38 | + if [ -n "${GINKGO_SKIP_AFTER_SCALE_UP}" ]; then |
| 39 | + SKIP_AFTER_SCALE_UP="${GINKGO_SKIP}|${GINKGO_SKIP_AFTER_SCALE_UP}" |
| 40 | + else |
| 41 | + SKIP_AFTER_SCALE_UP="${GINKGO_SKIP}" |
| 42 | + fi |
| 43 | + if [ "${SCALE_CLUSTER}" = "true" ]; then |
| 44 | + SKIP_AFTER_UPGRADE="${GINKGO_SKIP}|${SKIP_AFTER_SCALE_DOWN}" |
| 45 | + else |
| 46 | + SKIP_AFTER_UPGRADE="${GINKGO_SKIP}" |
| 47 | + fi |
| 48 | +else |
| 49 | + SKIP_AFTER_SCALE_DOWN="${GINKGO_SKIP_AFTER_SCALE_DOWN}" |
| 50 | + SKIP_AFTER_SCALE_UP="${GINKGO_SKIP_AFTER_SCALE_UP}" |
| 51 | + if [ "${SCALE_CLUSTER}" = "true" ]; then |
| 52 | + SKIP_AFTER_UPGRADE="${SKIP_AFTER_SCALE_DOWN}" |
| 53 | + else |
| 54 | + SKIP_AFTER_UPGRADE="" |
| 55 | + fi |
| 56 | +fi |
| 57 | + |
| 58 | +docker run --rm \ |
| 59 | +-v $(pwd):${WORK_DIR} \ |
| 60 | +-w ${WORK_DIR} \ |
| 61 | +-e CLUSTER_DEFINITION=${TMP_BASENAME}/apimodel-input.json \ |
| 62 | +-e CLIENT_ID="${CLIENT_ID}" \ |
| 63 | +-e CLIENT_SECRET="${CLIENT_SECRET}" \ |
| 64 | +-e CLIENT_OBJECTID="${CLIENT_OBJECTID}" \ |
| 65 | +-e TENANT_ID="${TENANT_ID}" \ |
| 66 | +-e SUBSCRIPTION_ID="$SUBSCRIPTION_ID" \ |
| 67 | +-e ORCHESTRATOR=kubernetes \ |
| 68 | +-e ORCHESTRATOR_RELEASE="${ORCHESTRATOR_RELEASE}" \ |
| 69 | +-e CREATE_VNET="${CREATE_VNET}" \ |
| 70 | +-e TIMEOUT="${E2E_TEST_TIMEOUT}" \ |
| 71 | +-e CLEANUP_ON_EXIT=${CLEANUP_AFTER_DEPLOYMENT} \ |
| 72 | +-e SKIP_LOGS_COLLECTION="${SKIP_LOGS_COLLECTION}" \ |
| 73 | +-e REGIONS="${REGION_OPTIONS}" \ |
| 74 | +-e WINDOWS_NODE_IMAGE_GALLERY="${WINDOWS_NODE_IMAGE_GALLERY}" \ |
| 75 | +-e WINDOWS_NODE_IMAGE_NAME="${WINDOWS_NODE_IMAGE_NAME}" \ |
| 76 | +-e WINDOWS_NODE_IMAGE_RESOURCE_GROUP="${WINDOWS_NODE_IMAGE_RESOURCE_GROUP}" \ |
| 77 | +-e WINDOWS_NODE_IMAGE_SUBSCRIPTION_ID="${WINDOWS_NODE_IMAGE_SUBSCRIPTION_ID}" \ |
| 78 | +-e WINDOWS_NODE_IMAGE_VERSION="${WINDOWS_NODE_IMAGE_VERSION}" \ |
| 79 | +-e WINDOWS_NODE_VHD_URL="${WINDOWS_NODE_VHD_URL}" \ |
| 80 | +-e IS_JENKINS="${IS_JENKINS}" \ |
| 81 | +-e SKIP_TEST="${SKIP_TESTS}" \ |
| 82 | +-e GINKGO_FOCUS="${GINKGO_FOCUS}" \ |
| 83 | +-e GINKGO_SKIP="${GINKGO_SKIP}" \ |
| 84 | +"${DEV_IMAGE}" make test-kubernetes || exit 1 |
| 85 | + |
| 86 | +if [ "${UPGRADE_CLUSTER}" = "true" ] || [ "${SCALE_CLUSTER}" = "true" ]; then |
| 87 | + # shellcheck disable=SC2012 |
| 88 | + RESOURCE_GROUP=$(ls -dt1 _output/* | head -n 1 | cut -d/ -f2) |
| 89 | + # shellcheck disable=SC2012 |
| 90 | + REGION=$(ls -dt1 _output/* | head -n 1 | cut -d/ -f2 | cut -d- -f2) |
| 91 | + if [ $(( RANDOM % 4 )) -eq 3 ]; then |
| 92 | + echo Removing bookkeeping tags from VMs in resource group $RESOURCE_GROUP ... |
| 93 | + az login --username ${CLIENT_ID} --password ${CLIENT_SECRET} --tenant ${TENANT_ID} --service-principal > /dev/null |
| 94 | + for vm_type in vm vmss; do |
| 95 | + for vm in $(az $vm_type list -g $RESOURCE_GROUP --subscription $SUBSCRIPTION_ID --query '[].name' -o table | tail -n +3); do |
| 96 | + az $vm_type update -n $vm -g $RESOURCE_GROUP --subscription $SUBSCRIPTION_ID --set tags={} > /dev/null |
| 97 | + done |
| 98 | + done |
| 99 | + fi |
| 100 | + git remote add $UPGRADE_FORK https://github.com/$UPGRADE_FORK/aks-engine.git |
| 101 | + git fetch $UPGRADE_FORK |
| 102 | + git branch -D $UPGRADE_FORK/$UPGRADE_BRANCH |
| 103 | + git checkout -b $UPGRADE_FORK/$UPGRADE_BRANCH --track $UPGRADE_FORK/$UPGRADE_BRANCH |
| 104 | + git pull |
| 105 | + docker run --rm \ |
| 106 | + -v $(pwd):${WORK_DIR} \ |
| 107 | + -w ${WORK_DIR} \ |
| 108 | + "${DEV_IMAGE}" make build-binary > /dev/null 2>&1 || exit 1 |
| 109 | +else |
| 110 | + exit 0 |
| 111 | +fi |
| 112 | + |
| 113 | +if [ "${SCALE_CLUSTER}" = "true" ]; then |
| 114 | + for nodepool in $(echo "${API_MODEL_INPUT}" | jq -r '.properties.agentPoolProfiles[].name'); do |
| 115 | + docker run --rm \ |
| 116 | + -v $(pwd):${WORK_DIR} \ |
| 117 | + -w ${WORK_DIR} \ |
| 118 | + -e RESOURCE_GROUP=$RESOURCE_GROUP \ |
| 119 | + -e REGION=$REGION \ |
| 120 | + ${DEV_IMAGE} \ |
| 121 | + ./bin/aks-engine scale \ |
| 122 | + --subscription-id $SUBSCRIPTION_ID \ |
| 123 | + --deployment-dir _output/$RESOURCE_GROUP \ |
| 124 | + --location $REGION \ |
| 125 | + --resource-group $RESOURCE_GROUP \ |
| 126 | + --master-FQDN "$RESOURCE_GROUP.$REGION.cloudapp.azure.com" \ |
| 127 | + --node-pool $nodepool \ |
| 128 | + --new-node-count 1 \ |
| 129 | + --auth-method client_secret \ |
| 130 | + --client-id ${CLIENT_ID} \ |
| 131 | + --client-secret ${CLIENT_SECRET} || exit 1 |
| 132 | + done |
| 133 | + |
| 134 | + docker run --rm \ |
| 135 | + -v $(pwd):${WORK_DIR} \ |
| 136 | + -w ${WORK_DIR} \ |
| 137 | + -e CLIENT_ID=${CLIENT_ID} \ |
| 138 | + -e CLIENT_SECRET=${CLIENT_SECRET} \ |
| 139 | + -e CLIENT_OBJECTID=${CLIENT_OBJECTID} \ |
| 140 | + -e TENANT_ID=${TENANT_ID} \ |
| 141 | + -e SUBSCRIPTION_ID=$SUBSCRIPTION_ID \ |
| 142 | + -e ORCHESTRATOR=kubernetes \ |
| 143 | + -e NAME=$RESOURCE_GROUP \ |
| 144 | + -e TIMEOUT=${E2E_TEST_TIMEOUT} \ |
| 145 | + -e CLEANUP_ON_EXIT=false \ |
| 146 | + -e REGIONS=$REGION \ |
| 147 | + -e IS_JENKINS=${IS_JENKINS} \ |
| 148 | + -e SKIP_LOGS_COLLECTION=true \ |
| 149 | + -e GINKGO_SKIP="${SKIP_AFTER_SCALE_DOWN}" \ |
| 150 | + -e GINKGO_FOCUS="${GINKGO_FOCUS}" \ |
| 151 | + -e SKIP_TEST=${SKIP_TESTS_AFTER_SCALE_DOWN} \ |
| 152 | + ${DEV_IMAGE} make test-kubernetes || exit 1 |
| 153 | +fi |
| 154 | + |
| 155 | +if [ "${UPGRADE_CLUSTER}" = "true" ]; then |
| 156 | + for ver_target in $UPGRADE_VERSIONS; do |
| 157 | + docker run --rm \ |
| 158 | + -v $(pwd):${WORK_DIR} \ |
| 159 | + -w ${WORK_DIR} \ |
| 160 | + -e RESOURCE_GROUP=$RESOURCE_GROUP \ |
| 161 | + -e REGION=$REGION \ |
| 162 | + ${DEV_IMAGE} \ |
| 163 | + ./bin/aks-engine upgrade --force \ |
| 164 | + --subscription-id $SUBSCRIPTION_ID \ |
| 165 | + --deployment-dir _output/$RESOURCE_GROUP \ |
| 166 | + --location $REGION \ |
| 167 | + --resource-group $RESOURCE_GROUP \ |
| 168 | + --upgrade-version $ver_target \ |
| 169 | + --vm-timeout 20 \ |
| 170 | + --auth-method client_secret \ |
| 171 | + --client-id ${CLIENT_ID} \ |
| 172 | + --client-secret ${CLIENT_SECRET} || exit 1 |
| 173 | + |
| 174 | + docker run --rm \ |
| 175 | + -v $(pwd):${WORK_DIR} \ |
| 176 | + -w ${WORK_DIR} \ |
| 177 | + -e CLIENT_ID=${CLIENT_ID} \ |
| 178 | + -e CLIENT_SECRET=${CLIENT_SECRET} \ |
| 179 | + -e CLIENT_OBJECTID=${CLIENT_OBJECTID} \ |
| 180 | + -e TENANT_ID=${TENANT_ID} \ |
| 181 | + -e SUBSCRIPTION_ID=$SUBSCRIPTION_ID \ |
| 182 | + -e ORCHESTRATOR=kubernetes \ |
| 183 | + -e NAME=$RESOURCE_GROUP \ |
| 184 | + -e TIMEOUT=${E2E_TEST_TIMEOUT} \ |
| 185 | + -e CLEANUP_ON_EXIT=false \ |
| 186 | + -e REGIONS=$REGION \ |
| 187 | + -e IS_JENKINS=${IS_JENKINS} \ |
| 188 | + -e SKIP_LOGS_COLLECTION=${SKIP_LOGS_COLLECTION} \ |
| 189 | + -e GINKGO_SKIP="${SKIP_AFTER_UPGRADE}" \ |
| 190 | + -e GINKGO_FOCUS="${GINKGO_FOCUS}" \ |
| 191 | + -e SKIP_TEST=${SKIP_TESTS_AFTER_UPGRADE} \ |
| 192 | + ${DEV_IMAGE} make test-kubernetes || exit 1 |
| 193 | + done |
| 194 | +fi |
| 195 | + |
| 196 | +if [ "${SCALE_CLUSTER}" = "true" ]; then |
| 197 | + for nodepool in $(echo ${API_MODEL_INPUT} | jq -r '.properties.agentPoolProfiles[].name'); do |
| 198 | + docker run --rm \ |
| 199 | + -v $(pwd):${WORK_DIR} \ |
| 200 | + -w ${WORK_DIR} \ |
| 201 | + -e RESOURCE_GROUP=$RESOURCE_GROUP \ |
| 202 | + -e REGION=$REGION \ |
| 203 | + ${DEV_IMAGE} \ |
| 204 | + ./bin/aks-engine scale \ |
| 205 | + --subscription-id $SUBSCRIPTION_ID \ |
| 206 | + --deployment-dir _output/$RESOURCE_GROUP \ |
| 207 | + --location $REGION \ |
| 208 | + --resource-group $RESOURCE_GROUP \ |
| 209 | + --master-FQDN "$RESOURCE_GROUP.$REGION.cloudapp.azure.com" \ |
| 210 | + --node-pool $nodepool \ |
| 211 | + --new-node-count $NODE_COUNT \ |
| 212 | + --auth-method client_secret \ |
| 213 | + --client-id ${CLIENT_ID} \ |
| 214 | + --client-secret ${CLIENT_SECRET} || exit 1 |
| 215 | + done |
| 216 | + |
| 217 | + docker run --rm \ |
| 218 | + -v $(pwd):${WORK_DIR} \ |
| 219 | + -w ${WORK_DIR} \ |
| 220 | + -e CLIENT_ID=${CLIENT_ID} \ |
| 221 | + -e CLIENT_SECRET=${CLIENT_SECRET} \ |
| 222 | + -e CLIENT_OBJECTID=${CLIENT_OBJECTID} \ |
| 223 | + -e TENANT_ID=${TENANT_ID} \ |
| 224 | + -e SUBSCRIPTION_ID=$SUBSCRIPTION_ID \ |
| 225 | + -e ORCHESTRATOR=kubernetes \ |
| 226 | + -e NAME=$RESOURCE_GROUP \ |
| 227 | + -e TIMEOUT=${E2E_TEST_TIMEOUT} \ |
| 228 | + -e CLEANUP_ON_EXIT=${CLEANUP_ON_EXIT} \ |
| 229 | + -e REGIONS=$REGION \ |
| 230 | + -e IS_JENKINS=${IS_JENKINS} \ |
| 231 | + -e SKIP_LOGS_COLLECTION=${SKIP_LOGS_COLLECTION} \ |
| 232 | + -e GINKGO_SKIP="${SKIP_AFTER_SCALE_UP}" \ |
| 233 | + -e GINKGO_FOCUS="${GINKGO_FOCUS}" \ |
| 234 | + -e SKIP_TEST=${SKIP_TESTS_AFTER_SCALE_DOWN} \ |
| 235 | + ${DEV_IMAGE} make test-kubernetes || exit 1 |
| 236 | +fi |
0 commit comments