diff --git a/.drone.local.yml b/.drone.local.yml index 90d39fb933..02626a709d 100644 --- a/.drone.local.yml +++ b/.drone.local.yml @@ -1,44 +1,44 @@ ---- workspace: base: /go path: src/github.com/vmware/vic pipeline: - clone: - image: plugins/git - tags: true - # dont clone submodules - recursive: false - vic-integration-test-on-pr: - image: ${TEST_BUILD_IMAGE=gcr.io/eminent-nation-87317/vic-integration-test:1.42} + image: '${TEST_BUILD_IMAGE=gcr.io/eminent-nation-87317/vic-integration-test:1.44}' pull: true environment: BIN: bin GOPATH: /go SHELL: /bin/bash - VIC_ESX_TEST_URL: ${VIC_ESX_TEST_URL} LOG_TEMP_DIR: install-logs - DRONE_SERVER: ${DRONE_SERVER} - GITHUB_AUTOMATION_API_KEY: ${GITHUB_AUTOMATION_API_KEY} - DRONE_TOKEN: ${DRONE_TOKEN} - TEST_URL_ARRAY: ${TEST_URL_ARRAY} - TEST_USERNAME: ${TEST_USERNAME} - TEST_PASSWORD: ${TEST_PASSWORD} - TEST_DATASTORE: ${TEST_DATASTORE} - TEST_TIMEOUT: ${TEST_TIMEOUT} - GOVC_INSECURE: true - GOVC_USERNAME: ${TEST_USERNAME} - GOVC_PASSWORD: ${TEST_PASSWORD} - GOVC_DATASTORE: ${TEST_DATASTORE} - GS_PROJECT_ID: ${GS_PROJECT_ID} - GS_CLIENT_EMAIL: ${GS_CLIENT_EMAIL} - GS_PRIVATE_KEY: ${GS_PRIVATE_KEY} - DEBUG_VCH: ${DEBUG_VCH} - DOMAIN: ${DOMAIN} + secrets: + - bridge_network + - debug_vch + - drone_server + - drone_token + - drone_machine + - github_automation_api_key + - gs_client_email + - gs_private_key + - gs_project_id + - public_network + - registry_password + - reporting_server_url + - syslog_passwd + - syslog_server + - syslog_user + - test_datastore + - test_resource + - test_timeout + - test_password + - test_url_array + - test_username commands: + - 'export GOVC_INSECURE=true' + - 'export GOVC_USERNAME=${TEST_USERNAME}' + - 'export GOVC_PASSWORD=${TEST_PASSWORD}' + - 'export GOVC_DATASTORE=${TEST_DATASTORE}' + - 'export DOMAIN=${DOMAIN}' - tests/integration-test.sh - # - pybot tests/test-cases/Group1-Docker-Commands - # - pybot tests/test-cases/Group1-Docker-Commands/1-01-Docker-Info.robot volumes: - /tmp diff --git a/.drone.yml b/.drone.yml index 607f52f8d8..9bc7cb3429 100644 --- a/.drone.yml +++ b/.drone.yml @@ -1,85 +1,89 @@ -# After any change to this file you MUST re-sign and checkin the .drone.yml.sig -# When you are ready to submit a pull request, you must regenerate .drone.yml.sig for the vmware/vic repo: -# $ export DRONE_SERVER=https://ci.vcna.io -# $ export DRONE_TOKEN= -# $ drone sign vmware/vic -# The secrets file is in our local git repo. Ask mhagen for access. ---- workspace: + base: /go path: src/github.com/vmware/vic pipeline: + clone: image: plugins/git tags: true - # dont clone submodules recursive: false - clone-pr: - image: harbor.ci.drone.local/library/git-clone:1.0 - pull: true - environment: - DRONE_PULL_REQUEST: ${DRONE_PULL_REQUEST} - when: - event: [ pull_request ] display-status: - image: harbor.ci.drone.local/library/vic-integration-test:1.42 + image: 'wdc-harbor-ci.eng.vmware.com/default-project/vic-integration-test:1.44' pull: true environment: BIN: bin SHELL: /bin/bash - COMMIT: ${DRONE_COMMIT} commands: - - echo "Expecting ${DRONE_COMMIT}" - - git log -5 - - git log -1 --pretty=oneline | grep "^${DRONE_COMMIT}" > /dev/null && echo 'Build matches' || (git log -1 --pretty=oneline | grep "Merge ${DRONE_COMMIT}" > /dev/null && echo 'Build is of a merge commit' || (echo 'Build does not match!' && exit 1)) + - 'export COMMIT=${DRONE_COMMIT}' + - 'echo "Expecting ${DRONE_COMMIT}"' + - 'git log -5' + - 'git log -1 --pretty=oneline | grep "^${DRONE_COMMIT}" > /dev/null && echo ''Build matches'' || (git log -1 --pretty=oneline | grep "Merge ${DRONE_COMMIT}" > /dev/null && echo ''Build is of a merge commit'' || (echo ''Build does not match!'' && exit 1))' wait-for-build: - image: harbor.ci.drone.local/library/vic-integration-test:1.42 + image: 'wdc-harbor-ci.eng.vmware.com/default-project/vic-integration-test:1.44' pull: true environment: BIN: bin GOPATH: /go SHELL: /bin/bash - TEST_URL_ARRAY: ${TEST_URL_ARRAY} - DRONE_SERVER: ${DRONE_SERVER} - DRONE_TOKEN: ${DRONE_TOKEN} + secrets: + - drone_server + - drone_token + - test_url_array commands: - tests/wait_until_previous_builds_complete.sh + check-org-membership: + image: 'wdc-harbor-ci.eng.vmware.com/default-project/vic-integration-test:1.44' + pull: true + environment: + BIN: bin + GOPATH: /go + SHELL: /bin/bash + secrets: + - github_automation_api_key + commands: + - echo ${DRONE_COMMIT_AUTHOR} + - /bin/bash -c '[[ ! $(curl --silent "https://api.github.com/orgs/vmware/members/${DRONE_COMMIT_AUTHOR}?access_token=$GITHUB_AUTOMATION_API_KEY") ]]' + when: + status: success + vic-engine: - image: harbor.ci.drone.local/library/vic-integration-test:1.42 + image: 'wdc-harbor-ci.eng.vmware.com/default-project/vic-integration-test:1.44' pull: true environment: BIN: bin GOPATH: /go SHELL: /bin/bash - DRONE_SERVER: ${DRONE_SERVER} - DRONE_TOKEN: ${DRONE_TOKEN} - TEST_URL_ARRAY: ${TEST_URL_ARRAY} - TEST_USERNAME: ${TEST_USERNAME} - TEST_PASSWORD: ${TEST_PASSWORD} - REGISTRY_PASSWORD: ${REGISTRY_PASSWORD} - BUILD_NUMBER: ${DRONE_BUILD_NUMBER} - COMMIT: ${DRONE_COMMIT} + secrets: + - drone_server + - drone_token + - registry_password + - test_password + - test_url_array + - test_username commands: - - make mark - - make all - - make sincemark - - make mark - - echo `ls vendor/github.com/vmware/govmomi/vim25/methods` - - echo `ls vendor/github.com/vmware/govmomi/vim25/types` - - echo `ls vendor/github.com/docker/docker/vendor/github.com/opencontainers/runc/libcontainer/system` - - export VIC_ESX_URL_ARRAY="`tests/get_test_url.sh`" + - 'export BUILD_NUMBER=${DRONE_BUILD_NUMBER}' + - 'export COMMIT=${DRONE_COMMIT}' + - 'make mark' + - 'make all' + - 'make sincemark' + - 'make mark' + - 'echo `ls vendor/github.com/vmware/govmomi/vim25/methods`' + - 'echo `ls vendor/github.com/vmware/govmomi/vim25/types`' + - 'echo `ls vendor/github.com/docker/docker/vendor/github.com/opencontainers/runc/libcontainer/system`' + - 'export VIC_ESX_URL_ARRAY="`tests/get_test_url.sh`"' - tests/unit-test-check.sh - - make sincemark + - 'make sincemark' when: - status: success + status: success vic-integration-test-on-pr: - image: harbor.ci.drone.local/library/vic-integration-test:1.42 + image: 'wdc-harbor-ci.eng.vmware.com/default-project/vic-integration-test:1.44' pull: true privileged: true environment: @@ -87,83 +91,90 @@ pipeline: GOPATH: /go SHELL: /bin/bash LOG_TEMP_DIR: install-logs - GITHUB_AUTOMATION_API_KEY: ${GITHUB_AUTOMATION_API_KEY} - DRONE_SERVER: ${DRONE_SERVER} - DRONE_TOKEN: ${DRONE_TOKEN} - DRONE_MACHINE: ${DRONE_MACHINE} - TEST_URL_ARRAY: ${TEST_URL_ARRAY} - TEST_USERNAME: ${TEST_USERNAME} - TEST_PASSWORD: ${TEST_PASSWORD} - REGISTRY_PASSWORD: ${REGISTRY_PASSWORD} - TEST_DATASTORE: ${TEST_DATASTORE} - TEST_TIMEOUT: ${TEST_TIMEOUT} - REPORTING_SERVER_URL: ${REPORTING_SERVER_URL} - GOVC_INSECURE: true - GOVC_USERNAME: ${TEST_USERNAME} - GOVC_PASSWORD: ${TEST_PASSWORD} - GOVC_DATASTORE: ${TEST_DATASTORE} - GS_PROJECT_ID: ${GS_PROJECT_ID} - GS_CLIENT_EMAIL: ${GS_CLIENT_EMAIL} - GS_PRIVATE_KEY: ${GS_PRIVATE_KEY} - DOMAIN: ${CI_DOMAIN} - SYSLOG_SERVER: ${SYSLOG_SERVER} - SYSLOG_USER: ${SYSLOG_USER} - SYSLOG_PASSWD: ${SYSLOG_PASSWD} + secrets: + - bridge_network + - ci_domain + - debug_vch + - drone_server + - drone_token + - drone_machine + - github_automation_api_key + - gs_client_email + - gs_private_key + - gs_project_id + - public_network + - registry_password + - reporting_server_url + - syslog_passwd + - syslog_server + - syslog_user + - test_datastore + - test_resource + - test_timeout + - test_password + - test_url_array + - test_username commands: - - . ./tests/ci-env.sh # set CI env variable values - - make mark + - export GOVC_INSECURE=true + - export GOVC_USERNAME=$TEST_USERNAME + - export GOVC_PASSWORD=$TEST_PASSWORD + - export GOVC_DATASTORE=$TEST_DATASTORE + - export DOMAIN=$CI_DOMAIN + - echo $DOMAIN + - 'make mark' - tests/integration-test.sh - - make sincemark + - 'make sincemark' volumes: - /tmp - - /home/vic:/ci when: - status: success + status: success vic-ui: - image: harbor.ci.drone.local/library/vic-integration-test:1.42 + image: 'wdc-harbor-ci.eng.vmware.com/default-project/vic-integration-test:1.44' pull: true environment: - BUILD_NUMBER: ${DRONE_BUILD_NUMBER} - BIN: bin - GS_PROJECT_ID: ${GS_PROJECT_ID} - GS_CLIENT_EMAIL: ${GS_CLIENT_EMAIL} - GS_PRIVATE_KEY: ${GS_PRIVATE_KEY} + BIN: bin + secrets: + - gs_client_email + - gs_private_key + - gs_project_id commands: - - mkdir -p $BIN/ui - - export LATEST_VIC_UI_BUILD="$(gsutil ls -l 'gs://vic-ui-builds' | grep -v TOTAL | grep vic_ | sort -k2 -r | (trap ' ' PIPE; head -1))" - - export VIC_UI_URL=$(echo $LATEST_VIC_UI_BUILD | xargs | cut -d " " -f 3 | sed "s/gs:\/\//https:\/\/storage.googleapis.com\//") - - wget -nv $VIC_UI_URL -P $BIN - - tar -xvzf $BIN/vic_ui_*.tar.gz - - ls -la ./$BIN && ./$BIN/ui/sync-vic-ui-version.sh -p bin/ 2>&1 - - rm $BIN/vic_ui_*.tar.gz + - 'export BUILD_NUMBER=${DRONE_BUILD_NUMBER}' + - 'mkdir -p $BIN/ui' + - 'export LATEST_VIC_UI_BUILD="$(gsutil ls -l ''gs://vic-ui-builds'' | grep -v TOTAL | grep vic_ | sort -k2 -r | (trap '' '' PIPE; head -1))"' + - 'export VIC_UI_URL=$(echo $LATEST_VIC_UI_BUILD | xargs | cut -d " " -f 3 | sed "s|gs://|https://storage.googleapis.com/|")' + - 'wget -nv $VIC_UI_URL -P $BIN' + - 'tar -xvzf $BIN/vic_ui_*.tar.gz' + - 'ls -la ./$BIN && ./$BIN/ui/sync-vic-ui-version.sh -p bin/ 2>&1' + - 'rm $BIN/vic_ui_*.tar.gz' when: - status: success - branch: [ master ] + status: success + branch: [master] vic-ui-release: - image: harbor.ci.drone.local/library/vic-integration-test:1.42 + image: 'wdc-harbor-ci.eng.vmware.com/default-project/vic-integration-test:1.44' pull: true environment: - BUILD_NUMBER: ${DRONE_BUILD_NUMBER} BIN: bin - GS_PROJECT_ID: ${GS_PROJECT_ID} - GS_CLIENT_EMAIL: ${GS_CLIENT_EMAIL} - GS_PRIVATE_KEY: ${GS_PRIVATE_KEY} + secrets: + - gs_client_email + - gs_private_key + - gs_project_id commands: - - mkdir -p $BIN/ui - - export LATEST_VIC_UI_RELEASE="$(gsutil ls -l 'gs://vic-ui-releases' | grep -v TOTAL | grep vic_ | sort -k2 -r | (trap ' ' PIPE; head -1))" - - export VIC_UI_URL=$(echo $LATEST_VIC_UI_RELEASE | xargs | cut -d " " -f 3 | sed "s/gs:\/\//https:\/\/storage.googleapis.com\//") - - wget -nv $VIC_UI_URL -P $BIN - - tar -xvzf $BIN/vic_ui_*.tar.gz - - ls -la ./$BIN && ./$BIN/ui/sync-vic-ui-version.sh -p bin/ 2>&1 - - rm $BIN/vic_ui_*.tar.gz + - 'export BUILD_NUMBER=${DRONE_BUILD_NUMBER}' + - 'mkdir -p $BIN/ui' + - 'export LATEST_VIC_UI_RELEASE="$(gsutil ls -l ''gs://vic-ui-releases'' | grep -v TOTAL | grep vic_ | sort -k2 -r | (trap '' '' PIPE; head -1))"' + - 'export VIC_UI_URL=$(echo $LATEST_VIC_UI_RELEASE | xargs | cut -d " " -f 3 | sed "s|gs://|https://storage.googleapis.com/|")' + - 'wget -nv $VIC_UI_URL -P $BIN' + - 'tar -xvzf $BIN/vic_ui_*.tar.gz' + - 'ls -la ./$BIN && ./$BIN/ui/sync-vic-ui-version.sh -p bin/ 2>&1' + - 'rm $BIN/vic_ui_*.tar.gz' when: - status: success - branch: [ releases/*, refs/tags/* ] + status: success + branch: ['releases/*', 'refs/tags/*'] bundle: - image: harbor.ci.drone.local/library/golang:1.8 + image: 'gcr.io/eminent-nation-87317/golang:1.8' pull: true environment: BIN: bin @@ -171,181 +182,163 @@ pipeline: GOPATH: /go SHELL: /bin/bash commands: - - make mark - - rm -rf $BIN_TEMP_DIR - - mkdir -p $BIN_TEMP_DIR - - mv $BIN/ui $BIN_TEMP_DIR - - cp LICENSE $BIN_TEMP_DIR - - cp doc/bundle/README $BIN_TEMP_DIR - - cp $BIN/vic-machine* $BIN_TEMP_DIR - - cp $BIN/vic-ui* $BIN_TEMP_DIR - - cp $BIN/appliance.iso $BIN_TEMP_DIR - - cp $BIN/bootstrap.iso $BIN_TEMP_DIR - - tar czvf $BIN/vic_${DRONE_BUILD_NUMBER}.tar.gz -C $BIN vic - - shasum -a 256 $BIN/vic_${DRONE_BUILD_NUMBER}.tar.gz - - shasum -a 1 $BIN/vic_${DRONE_BUILD_NUMBER}.tar.gz - - md5sum $BIN/vic_${DRONE_BUILD_NUMBER}.tar.gz - - du -ks $BIN/vic_${DRONE_BUILD_NUMBER}.tar.gz | awk '{print $1 / 1024}' | { read x; echo $x MB; } - - mkdir bundle - - mkdir bundle-release - - cp $BIN/vic_${DRONE_BUILD_NUMBER}.tar.gz bundle - - cp $BIN/vic_${DRONE_BUILD_NUMBER}.tar.gz bundle-release/vic_`git describe --tags $(git rev-list --tags --max-count=1)`.tar.gz - - make sincemark + - 'make mark' + - 'rm -rf $BIN_TEMP_DIR' + - 'mkdir -p $BIN_TEMP_DIR' + - 'mv $BIN/ui $BIN_TEMP_DIR' + - 'cp LICENSE $BIN_TEMP_DIR' + - 'cp doc/bundle/README $BIN_TEMP_DIR' + - 'cp $BIN/vic-machine* $BIN_TEMP_DIR' + - 'cp $BIN/vic-ui* $BIN_TEMP_DIR' + - 'cp $BIN/appliance.iso $BIN_TEMP_DIR' + - 'cp $BIN/bootstrap.iso $BIN_TEMP_DIR' + - 'tar czvf $BIN/vic_${DRONE_BUILD_NUMBER}.tar.gz -C $BIN vic' + - 'shasum -a 256 $BIN/vic_${DRONE_BUILD_NUMBER}.tar.gz' + - 'shasum -a 1 $BIN/vic_${DRONE_BUILD_NUMBER}.tar.gz' + - 'md5sum $BIN/vic_${DRONE_BUILD_NUMBER}.tar.gz' + - 'du -ks $BIN/vic_${DRONE_BUILD_NUMBER}.tar.gz | awk ''{print $1 / 1024}'' | { read x; echo $x MB; }' + - 'mkdir bundle' + - 'mkdir bundle-release' + - 'cp $BIN/vic_${DRONE_BUILD_NUMBER}.tar.gz bundle' + - 'cp $BIN/vic_${DRONE_BUILD_NUMBER}.tar.gz bundle-release/vic_`git describe --tags $(git rev-list --tags --max-count=1)`.tar.gz' + - 'make sincemark' when: repo: vmware/vic - event: [ push, tag ] - branch: [ master, releases/*, refs/tags/* ] + event: [push, tag] + branch: [master, 'releases/*', 'refs/tags/*'] publish-gcs-builds-on-pass: - image: maplain/drone-gcs:latest + image: 'victest/drone-gcs:1' pull: true source: bundle target: vic-engine-builds acl: - - allUsers:READER - cache_control: public,max-age=3600 + - 'allUsers:READER' + cache_control: 'public,max-age=3600' when: repo: vmware/vic - event: [ push ] - branch: [ master, releases/* ] + event: [push] + branch: [master, 'releases/*'] status: success publish-gcs-builds-on-fail: - image: maplain/drone-gcs:latest + image: 'victest/drone-gcs:1' pull: true source: bundle target: vic-engine-failed-builds acl: - - allUsers:READER - cache_control: public,max-age=3600 + - 'allUsers:READER' + cache_control: 'public,max-age=3600' when: repo: vmware/vic - event: [ push ] - branch: [ master, releases/* ] + event: [push] + branch: [master, 'releases/*'] status: failure publish-gcs-releases: - image: maplain/drone-gcs:latest + image: 'victest/drone-gcs:1' pull: true source: bundle-release target: vic-engine-releases acl: - - allUsers:READER - cache_control: public,max-age=3600 + - 'allUsers:READER' + cache_control: 'public,max-age=3600' when: repo: vmware/vic - event: [ push, tag ] - branch: [ refs/tags/* ] + event: [push, tag] + branch: ['refs/tags/*'] status: success publish-vic-machine-server-dev: image: plugins/gcr repo: eminent-nation-87317/vic-machine-server dockerfile: cmd/vic-machine-server/Dockerfile + secrets: + - gs_client_email + - gs_private_key + - gs_private_key_id + - gs_project_id tags: - dev - json_key: > - { - "type": "service_account", - "project_id": "${GS_PROJECT_ID}", - "private_key_id": "${GS_PRIVATE_KEY_ID}", - "private_key": "${GS_PRIVATE_KEY}", - "client_email": "${GS_CLIENT_EMAIL}", - "client_id": "${GS_PROJECT_ID}", - "auth_uri": "https://accounts.google.com/o/oauth2/auth", - "token_uri": "https://accounts.google.com/o/oauth2/token", - "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", - "client_x509_cert_url": "" - } + json_key: "{ \"type\": \"service_account\",\n \"project_id\": \"$GS_PROJECT_ID\",\n \"private_key_id\": \"$GS_PRIVATE_KEY_ID\",\n \"private_key\": \"$GS_PRIVATE_KEY\",\n \"client_email\": \"$GS_CLIENT_EMAIL\",\n \"client_id\": \"$GS_PROJECT_ID\",\n \"auth_uri\": \"https://accounts.google.com/o/oauth2/auth\",\n \"token_uri\": \"https://accounts.google.com/o/oauth2/token\",\n \"auth_provider_x509_cert_url\": \"https://www.googleapis.com/oauth2/v1/certs\",\n \"client_x509_cert_url\": \"\"\n}\n" when: repo: vmware/vic - event: [ push ] - branch: [ master ] + event: [push] + branch: [master] status: success publish-vic-machine-server-releases: image: plugins/gcr repo: eminent-nation-87317/vic-machine-server dockerfile: cmd/vic-machine-server/Dockerfile + secrets: + - gs_client_email + - gs_private_key + - gs_private_key_id + - gs_project_id tags: - latest - json_key: > - { - "type": "service_account", - "project_id": "${GS_PROJECT_ID}", - "private_key_id": "${GS_PRIVATE_KEY_ID}", - "private_key": "${GS_PRIVATE_KEY}", - "client_email": "${GS_CLIENT_EMAIL}", - "client_id": "${GS_PROJECT_ID}", - "auth_uri": "https://accounts.google.com/o/oauth2/auth", - "token_uri": "https://accounts.google.com/o/oauth2/token", - "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", - "client_x509_cert_url": "" - } + json_key: "{ \"type\": \"service_account\",\n \"project_id\": \"$GS_PROJECT_ID\",\n \"private_key_id\": \"$GS_PRIVATE_KEY_ID\",\n \"private_key\": \"$GS_PRIVATE_KEY\",\n \"client_email\": \"$GS_CLIENT_EMAIL\",\n \"client_id\": \"$GS_PROJECT_ID\",\n \"auth_uri\": \"https://accounts.google.com/o/oauth2/auth\",\n \"token_uri\": \"https://accounts.google.com/o/oauth2/token\",\n \"auth_provider_x509_cert_url\": \"https://www.googleapis.com/oauth2/v1/certs\",\n \"client_x509_cert_url\": \"\"\n}\n" when: repo: vmware/vic - event: [ push, tag ] - branch: [ refs/tags/*, releases/* ] + event: [push, tag] + branch: ['refs/tags/*', 'releases/*'] status: success trigger-downstream: - image: harbor.ci.drone.local/library/vic-downstream-trigger:1.0 + image: 'gcr.io/eminent-nation-87317/vic-downstream-trigger:1.0' environment: - DRONE_SERVER: ${DRONE_SERVER} - DRONE_TOKEN: ${DRONE_TOKEN} SHELL: /bin/bash + secrets: + - drone_server + - drone_token when: repo: vmware/vic - event: [ push, tag ] - branch: [ master, releases/*, refs/tags/* ] + event: [push, tag] + branch: [master, 'releases/*', 'refs/tags/*'] status: success report-coverage: - image: robertstettner/drone-codecov - token: ${CODECOV_TOKEN} - files: - - .cover/cover.out + image: robertstettner/drone-codecov + secrets: + - codecov_token + files: + - .cover/cover.out notify-slack-on-fail: image: plugins/slack - webhook: ${SLACK_URL} + secrets: + - source: slack_url + target: slack_webhook username: drone - template: > - Build https://ci.vcna.io/vmware/vic/{{ build.number }} by {{ build.author }} finished with a {{ build.status }} status. Logs: https://console.cloud.google.com/m/cloudstorage/b/vic-ci-logs/o/integration_logs_{{ build.number }}_{{ build.commit }}.zip?authuser=1 + template: "Build https://ci-vic.vmware.com/vmware/vic/{{ build.number }} by {{ build.author }} finished with a {{ build.status }} status. Logs: https://console.cloud.google.com/m/cloudstorage/b/vic-ci-logs/o/integration_logs_{{ build.number }}_{{ build.commit }}.zip?authuser=1\n" when: repo: vmware/vic - branch: [ master, releases/*, refs/tags/* ] + event: [push, tag, deployment] + branch: [master, 'releases/*', 'refs/tags/*'] status: failure notify-slack-on-pass: image: plugins/slack - webhook: ${SLACK_URL} + secrets: + - source: slack_url + target: slack_webhook username: drone - template: > - Build https://ci.vcna.io/vmware/vic/{{ build.number }} by {{ build.author }} finished with a {{ build.status }} status, find the build at: https://storage.googleapis.com/vic-engine-builds/vic_{{ build.number }}.tar.gz + template: "Build https://ci-vic.vmware.com/vmware/vic/{{ build.number }} by {{ build.author }} finished with a {{ build.status }} status, find the build at: https://storage.googleapis.com/vic-engine-builds/vic_{{ build.number }}.tar.gz\n" when: repo: vmware/vic - branch: [ master, releases/* ] + event: [push, tag, deployment] + branch: [master, 'releases/*'] status: success notify-slack-on-successful-tag: image: plugins/slack - webhook: ${SLACK_URL} + secrets: + - source: slack_url + target: slack_webhook username: drone - template: > - The latest version of VIC engine has been released, find the build here: https://console.cloud.google.com/storage/browser/vic-engine-releases + template: "The latest version of VIC engine has been released, find the build here: https://console.cloud.google.com/storage/browser/vic-engine-releases\n" when: repo: vmware/vic - branch: [ refs/tags/* ] - status: success - - pass-rate: - image: harbor.ci.drone.local/library/vic-integration-test:1.42 - pull: true - environment: - BIN: bin - SHELL: /bin/bash - GITHUB_AUTOMATION_API_KEY: ${GITHUB_AUTOMATION_API_KEY} - SLACK_URL: ${SLACK_URL} - commands: - - tests/pass-rate.sh + event: [push, tag, deployment] + branch: ['refs/tags/*'] diff --git a/.drone.yml.sig b/.drone.yml.sig deleted file mode 100644 index 91da2e7f86..0000000000 --- a/.drone.yml.sig +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJIUzI1NiJ9.IyBBZnRlciBhbnkgY2hhbmdlIHRvIHRoaXMgZmlsZSB5b3UgTVVTVCByZS1zaWduIGFuZCBjaGVja2luIHRoZSAuZHJvbmUueW1sLnNpZwojIFdoZW4geW91IGFyZSByZWFkeSB0byBzdWJtaXQgYSBwdWxsIHJlcXVlc3QsIHlvdSBtdXN0IHJlZ2VuZXJhdGUgLmRyb25lLnltbC5zaWcgZm9yIHRoZSB2bXdhcmUvdmljIHJlcG86CiMgJCBleHBvcnQgRFJPTkVfU0VSVkVSPWh0dHBzOi8vY2kudmNuYS5pbwojICQgZXhwb3J0IERST05FX1RPS0VOPTx0b2tlbj4KIyAkIGRyb25lIHNpZ24gdm13YXJlL3ZpYwojIFRoZSBzZWNyZXRzIGZpbGUgaXMgaW4gb3VyIGxvY2FsIGdpdCByZXBvLiAgQXNrIG1oYWdlbiBmb3IgYWNjZXNzLgoKLS0tCndvcmtzcGFjZToKICBiYXNlOiAvZ28KICBwYXRoOiBzcmMvZ2l0aHViLmNvbS92bXdhcmUvdmljCgpwaXBlbGluZToKICBjbG9uZToKICAgIGltYWdlOiBwbHVnaW5zL2dpdAogICAgdGFnczogdHJ1ZQogICAgIyBkb250IGNsb25lIHN1Ym1vZHVsZXMKICAgIHJlY3Vyc2l2ZTogZmFsc2UKICBjbG9uZS1wcjoKICAgIGltYWdlOiBoYXJib3IuY2kuZHJvbmUubG9jYWwvbGlicmFyeS9naXQtY2xvbmU6MS4wCiAgICBwdWxsOiB0cnVlCiAgICBlbnZpcm9ubWVudDoKICAgICAgRFJPTkVfUFVMTF9SRVFVRVNUOiAgJHtEUk9ORV9QVUxMX1JFUVVFU1R9CiAgICB3aGVuOgogICAgICBldmVudDogWyBwdWxsX3JlcXVlc3QgXQoKICBkaXNwbGF5LXN0YXR1czoKICAgIGltYWdlOiBoYXJib3IuY2kuZHJvbmUubG9jYWwvbGlicmFyeS92aWMtaW50ZWdyYXRpb24tdGVzdDoxLjQyCiAgICBwdWxsOiB0cnVlCiAgICBlbnZpcm9ubWVudDoKICAgICAgQklOOiBiaW4KICAgICAgU0hFTEw6IC9iaW4vYmFzaAogICAgICBDT01NSVQ6ICR7RFJPTkVfQ09NTUlUfQogICAgY29tbWFuZHM6CiAgICAgIC0gZWNobyAiRXhwZWN0aW5nICR7RFJPTkVfQ09NTUlUfSIKICAgICAgLSBnaXQgbG9nIC01CiAgICAgIC0gZ2l0IGxvZyAtMSAtLXByZXR0eT1vbmVsaW5lIHwgZ3JlcCAiXiR7RFJPTkVfQ09NTUlUfSIgPiAvZGV2L251bGwgJiYgZWNobyAnQnVpbGQgbWF0Y2hlcycgfHwgKGdpdCBsb2cgLTEgLS1wcmV0dHk9b25lbGluZSB8IGdyZXAgIk1lcmdlICR7RFJPTkVfQ09NTUlUfSIgPiAvZGV2L251bGwgJiYgZWNobyAnQnVpbGQgaXMgb2YgYSBtZXJnZSBjb21taXQnIHx8IChlY2hvICdCdWlsZCBkb2VzIG5vdCBtYXRjaCEnICYmIGV4aXQgMSkpCgogIHdhaXQtZm9yLWJ1aWxkOgogICAgaW1hZ2U6IGhhcmJvci5jaS5kcm9uZS5sb2NhbC9saWJyYXJ5L3ZpYy1pbnRlZ3JhdGlvbi10ZXN0OjEuNDIKICAgIHB1bGw6IHRydWUKICAgIGVudmlyb25tZW50OgogICAgICBCSU46IGJpbgogICAgICBHT1BBVEg6IC9nbwogICAgICBTSEVMTDogL2Jpbi9iYXNoCiAgICAgIFRFU1RfVVJMX0FSUkFZOiAke1RFU1RfVVJMX0FSUkFZfQogICAgICBEUk9ORV9TRVJWRVI6ICR7RFJPTkVfU0VSVkVSfQogICAgICBEUk9ORV9UT0tFTjogJHtEUk9ORV9UT0tFTn0KICAgIGNvbW1hbmRzOgogICAgICAtIHRlc3RzL3dhaXRfdW50aWxfcHJldmlvdXNfYnVpbGRzX2NvbXBsZXRlLnNoCgogIHZpYy1lbmdpbmU6CiAgICBpbWFnZTogaGFyYm9yLmNpLmRyb25lLmxvY2FsL2xpYnJhcnkvdmljLWludGVncmF0aW9uLXRlc3Q6MS40MgogICAgcHVsbDogdHJ1ZQogICAgZW52aXJvbm1lbnQ6CiAgICAgIEJJTjogYmluCiAgICAgIEdPUEFUSDogL2dvCiAgICAgIFNIRUxMOiAvYmluL2Jhc2gKICAgICAgRFJPTkVfU0VSVkVSOiAgJHtEUk9ORV9TRVJWRVJ9CiAgICAgIERST05FX1RPS0VOOiAgJHtEUk9ORV9UT0tFTn0KICAgICAgVEVTVF9VUkxfQVJSQVk6ICAke1RFU1RfVVJMX0FSUkFZfQogICAgICBURVNUX1VTRVJOQU1FOiAgJHtURVNUX1VTRVJOQU1FfQogICAgICBURVNUX1BBU1NXT1JEOiAgJHtURVNUX1BBU1NXT1JEfQogICAgICBSRUdJU1RSWV9QQVNTV09SRDogICR7UkVHSVNUUllfUEFTU1dPUkR9CiAgICAgIEJVSUxEX05VTUJFUjogJHtEUk9ORV9CVUlMRF9OVU1CRVJ9CiAgICAgIENPTU1JVDogJHtEUk9ORV9DT01NSVR9CiAgICBjb21tYW5kczoKICAgICAgLSBtYWtlIG1hcmsKICAgICAgLSBtYWtlIGFsbAogICAgICAtIG1ha2Ugc2luY2VtYXJrCiAgICAgIC0gbWFrZSBtYXJrCiAgICAgIC0gZWNobyBgbHMgdmVuZG9yL2dpdGh1Yi5jb20vdm13YXJlL2dvdm1vbWkvdmltMjUvbWV0aG9kc2AKICAgICAgLSBlY2hvIGBscyB2ZW5kb3IvZ2l0aHViLmNvbS92bXdhcmUvZ292bW9taS92aW0yNS90eXBlc2AKICAgICAgLSBlY2hvIGBscyB2ZW5kb3IvZ2l0aHViLmNvbS9kb2NrZXIvZG9ja2VyL3ZlbmRvci9naXRodWIuY29tL29wZW5jb250YWluZXJzL3J1bmMvbGliY29udGFpbmVyL3N5c3RlbWAKICAgICAgLSBleHBvcnQgVklDX0VTWF9VUkxfQVJSQVk9ImB0ZXN0cy9nZXRfdGVzdF91cmwuc2hgIgogICAgICAtIHRlc3RzL3VuaXQtdGVzdC1jaGVjay5zaAogICAgICAtIG1ha2Ugc2luY2VtYXJrCiAgICB3aGVuOgogICAgICBzdGF0dXM6ICBzdWNjZXNzCgogIHZpYy1pbnRlZ3JhdGlvbi10ZXN0LW9uLXByOgogICAgaW1hZ2U6IGhhcmJvci5jaS5kcm9uZS5sb2NhbC9saWJyYXJ5L3ZpYy1pbnRlZ3JhdGlvbi10ZXN0OjEuNDIKICAgIHB1bGw6IHRydWUKICAgIHByaXZpbGVnZWQ6IHRydWUKICAgIGVudmlyb25tZW50OgogICAgICBCSU46IGJpbgogICAgICBHT1BBVEg6IC9nbwogICAgICBTSEVMTDogL2Jpbi9iYXNoCiAgICAgIExPR19URU1QX0RJUjogaW5zdGFsbC1sb2dzCiAgICAgIEdJVEhVQl9BVVRPTUFUSU9OX0FQSV9LRVk6ICAke0dJVEhVQl9BVVRPTUFUSU9OX0FQSV9LRVl9CiAgICAgIERST05FX1NFUlZFUjogICR7RFJPTkVfU0VSVkVSfQogICAgICBEUk9ORV9UT0tFTjogICR7RFJPTkVfVE9LRU59CiAgICAgIERST05FX01BQ0hJTkU6ICAke0RST05FX01BQ0hJTkV9CiAgICAgIFRFU1RfVVJMX0FSUkFZOiAgJHtURVNUX1VSTF9BUlJBWX0KICAgICAgVEVTVF9VU0VSTkFNRTogICR7VEVTVF9VU0VSTkFNRX0KICAgICAgVEVTVF9QQVNTV09SRDogICR7VEVTVF9QQVNTV09SRH0KICAgICAgUkVHSVNUUllfUEFTU1dPUkQ6ICAke1JFR0lTVFJZX1BBU1NXT1JEfQogICAgICBURVNUX0RBVEFTVE9SRTogJHtURVNUX0RBVEFTVE9SRX0KICAgICAgVEVTVF9USU1FT1VUOiAke1RFU1RfVElNRU9VVH0KICAgICAgUkVQT1JUSU5HX1NFUlZFUl9VUkw6ICR7UkVQT1JUSU5HX1NFUlZFUl9VUkx9CiAgICAgIEdPVkNfSU5TRUNVUkU6IHRydWUKICAgICAgR09WQ19VU0VSTkFNRTogICR7VEVTVF9VU0VSTkFNRX0KICAgICAgR09WQ19QQVNTV09SRDogICR7VEVTVF9QQVNTV09SRH0KICAgICAgR09WQ19EQVRBU1RPUkU6ICR7VEVTVF9EQVRBU1RPUkV9CiAgICAgIEdTX1BST0pFQ1RfSUQ6ICR7R1NfUFJPSkVDVF9JRH0KICAgICAgR1NfQ0xJRU5UX0VNQUlMOiAke0dTX0NMSUVOVF9FTUFJTH0KICAgICAgR1NfUFJJVkFURV9LRVk6ICR7R1NfUFJJVkFURV9LRVl9CiAgICAgIERPTUFJTjogJHtDSV9ET01BSU59CiAgICAgIFNZU0xPR19TRVJWRVI6ICR7U1lTTE9HX1NFUlZFUn0KICAgICAgU1lTTE9HX1VTRVI6ICR7U1lTTE9HX1VTRVJ9CiAgICAgIFNZU0xPR19QQVNTV0Q6ICR7U1lTTE9HX1BBU1NXRH0KICAgIGNvbW1hbmRzOgogICAgICAtIC4gLi90ZXN0cy9jaS1lbnYuc2ggIyBzZXQgQ0kgZW52IHZhcmlhYmxlIHZhbHVlcwogICAgICAtIG1ha2UgbWFyawogICAgICAtIHRlc3RzL2ludGVncmF0aW9uLXRlc3Quc2gKICAgICAgLSBtYWtlIHNpbmNlbWFyawogICAgdm9sdW1lczoKICAgICAgLSAvdG1wCiAgICAgIC0gL2hvbWUvdmljOi9jaQogICAgd2hlbjoKICAgICAgc3RhdHVzOiAgc3VjY2VzcwoKICB2aWMtdWk6CiAgICBpbWFnZTogaGFyYm9yLmNpLmRyb25lLmxvY2FsL2xpYnJhcnkvdmljLWludGVncmF0aW9uLXRlc3Q6MS40MgogICAgcHVsbDogdHJ1ZQogICAgZW52aXJvbm1lbnQ6CiAgICAgIEJVSUxEX05VTUJFUjogJHtEUk9ORV9CVUlMRF9OVU1CRVJ9CiAgICAgIEJJTjogYmluCiAgICAgIEdTX1BST0pFQ1RfSUQ6ICR7R1NfUFJPSkVDVF9JRH0KICAgICAgR1NfQ0xJRU5UX0VNQUlMOiAke0dTX0NMSUVOVF9FTUFJTH0KICAgICAgR1NfUFJJVkFURV9LRVk6ICR7R1NfUFJJVkFURV9LRVl9CiAgICBjb21tYW5kczoKICAgICAgLSBta2RpciAtcCAkQklOL3VpCiAgICAgIC0gZXhwb3J0IExBVEVTVF9WSUNfVUlfQlVJTEQ9IiQoZ3N1dGlsIGxzIC1sICdnczovL3ZpYy11aS1idWlsZHMnIHwgZ3JlcCAtdiBUT1RBTCB8IGdyZXAgdmljXyB8IHNvcnQgLWsyIC1yIHwgKHRyYXAgJyAnIFBJUEU7IGhlYWQgLTEpKSIKICAgICAgLSBleHBvcnQgVklDX1VJX1VSTD0kKGVjaG8gJExBVEVTVF9WSUNfVUlfQlVJTEQgfCB4YXJncyB8IGN1dCAtZCAiICIgLWYgMyB8IHNlZCAicy9nczpcL1wvL2h0dHBzOlwvXC9zdG9yYWdlLmdvb2dsZWFwaXMuY29tXC8vIikKICAgICAgLSB3Z2V0IC1udiAkVklDX1VJX1VSTCAtUCAkQklOCiAgICAgIC0gdGFyIC14dnpmICRCSU4vdmljX3VpXyoudGFyLmd6CiAgICAgIC0gbHMgLWxhIC4vJEJJTiAmJiAuLyRCSU4vdWkvc3luYy12aWMtdWktdmVyc2lvbi5zaCAtcCBiaW4vIDI-JjEKICAgICAgLSBybSAkQklOL3ZpY191aV8qLnRhci5negogICAgd2hlbjoKICAgICAgc3RhdHVzOiAgc3VjY2VzcwogICAgICBicmFuY2g6IFsgbWFzdGVyIF0KCiAgdmljLXVpLXJlbGVhc2U6CiAgICBpbWFnZTogaGFyYm9yLmNpLmRyb25lLmxvY2FsL2xpYnJhcnkvdmljLWludGVncmF0aW9uLXRlc3Q6MS40MgogICAgcHVsbDogdHJ1ZQogICAgZW52aXJvbm1lbnQ6CiAgICAgIEJVSUxEX05VTUJFUjogJHtEUk9ORV9CVUlMRF9OVU1CRVJ9CiAgICAgIEJJTjogYmluCiAgICAgIEdTX1BST0pFQ1RfSUQ6ICR7R1NfUFJPSkVDVF9JRH0KICAgICAgR1NfQ0xJRU5UX0VNQUlMOiAke0dTX0NMSUVOVF9FTUFJTH0KICAgICAgR1NfUFJJVkFURV9LRVk6ICR7R1NfUFJJVkFURV9LRVl9CiAgICBjb21tYW5kczoKICAgICAgLSBta2RpciAtcCAkQklOL3VpCiAgICAgIC0gZXhwb3J0IExBVEVTVF9WSUNfVUlfUkVMRUFTRT0iJChnc3V0aWwgbHMgLWwgJ2dzOi8vdmljLXVpLXJlbGVhc2VzJyB8IGdyZXAgLXYgVE9UQUwgfCBncmVwIHZpY18gfCBzb3J0IC1rMiAtciB8ICh0cmFwICcgJyBQSVBFOyBoZWFkIC0xKSkiCiAgICAgIC0gZXhwb3J0IFZJQ19VSV9VUkw9JChlY2hvICRMQVRFU1RfVklDX1VJX1JFTEVBU0UgfCB4YXJncyB8IGN1dCAtZCAiICIgLWYgMyB8IHNlZCAicy9nczpcL1wvL2h0dHBzOlwvXC9zdG9yYWdlLmdvb2dsZWFwaXMuY29tXC8vIikKICAgICAgLSB3Z2V0IC1udiAkVklDX1VJX1VSTCAtUCAkQklOCiAgICAgIC0gdGFyIC14dnpmICRCSU4vdmljX3VpXyoudGFyLmd6CiAgICAgIC0gbHMgLWxhIC4vJEJJTiAmJiAuLyRCSU4vdWkvc3luYy12aWMtdWktdmVyc2lvbi5zaCAtcCBiaW4vIDI-JjEKICAgICAgLSBybSAkQklOL3ZpY191aV8qLnRhci5negogICAgd2hlbjoKICAgICAgc3RhdHVzOiAgc3VjY2VzcwogICAgICBicmFuY2g6IFsgcmVsZWFzZXMvKiwgcmVmcy90YWdzLyogXQoKICBidW5kbGU6CiAgICBpbWFnZTogaGFyYm9yLmNpLmRyb25lLmxvY2FsL2xpYnJhcnkvZ29sYW5nOjEuOAogICAgcHVsbDogdHJ1ZQogICAgZW52aXJvbm1lbnQ6CiAgICAgIEJJTjogYmluCiAgICAgIEJJTl9URU1QX0RJUjogYmluL3ZpYwogICAgICBHT1BBVEg6IC9nbwogICAgICBTSEVMTDogL2Jpbi9iYXNoCiAgICBjb21tYW5kczoKICAgICAgLSBtYWtlIG1hcmsKICAgICAgLSBybSAtcmYgJEJJTl9URU1QX0RJUgogICAgICAtIG1rZGlyIC1wICRCSU5fVEVNUF9ESVIKICAgICAgLSBtdiAkQklOL3VpICRCSU5fVEVNUF9ESVIKICAgICAgLSBjcCBMSUNFTlNFICRCSU5fVEVNUF9ESVIKICAgICAgLSBjcCBkb2MvYnVuZGxlL1JFQURNRSAkQklOX1RFTVBfRElSCiAgICAgIC0gY3AgJEJJTi92aWMtbWFjaGluZSogJEJJTl9URU1QX0RJUgogICAgICAtIGNwICRCSU4vdmljLXVpKiAkQklOX1RFTVBfRElSCiAgICAgIC0gY3AgJEJJTi9hcHBsaWFuY2UuaXNvICRCSU5fVEVNUF9ESVIKICAgICAgLSBjcCAkQklOL2Jvb3RzdHJhcC5pc28gJEJJTl9URU1QX0RJUgogICAgICAtIHRhciBjenZmICRCSU4vdmljXyR7RFJPTkVfQlVJTERfTlVNQkVSfS50YXIuZ3ogLUMgJEJJTiB2aWMKICAgICAgLSBzaGFzdW0gLWEgMjU2ICRCSU4vdmljXyR7RFJPTkVfQlVJTERfTlVNQkVSfS50YXIuZ3oKICAgICAgLSBzaGFzdW0gLWEgMSAkQklOL3ZpY18ke0RST05FX0JVSUxEX05VTUJFUn0udGFyLmd6CiAgICAgIC0gbWQ1c3VtICRCSU4vdmljXyR7RFJPTkVfQlVJTERfTlVNQkVSfS50YXIuZ3oKICAgICAgLSBkdSAta3MgJEJJTi92aWNfJHtEUk9ORV9CVUlMRF9OVU1CRVJ9LnRhci5neiB8IGF3ayAne3ByaW50ICQxIC8gMTAyNH0nIHwgeyByZWFkIHg7IGVjaG8gJHggTUI7IH0KICAgICAgLSBta2RpciBidW5kbGUKICAgICAgLSBta2RpciBidW5kbGUtcmVsZWFzZQogICAgICAtIGNwICRCSU4vdmljXyR7RFJPTkVfQlVJTERfTlVNQkVSfS50YXIuZ3ogYnVuZGxlCiAgICAgIC0gY3AgJEJJTi92aWNfJHtEUk9ORV9CVUlMRF9OVU1CRVJ9LnRhci5neiBidW5kbGUtcmVsZWFzZS92aWNfYGdpdCBkZXNjcmliZSAtLXRhZ3MgJChnaXQgcmV2LWxpc3QgLS10YWdzIC0tbWF4LWNvdW50PTEpYC50YXIuZ3oKICAgICAgLSBtYWtlIHNpbmNlbWFyawogICAgd2hlbjoKICAgICAgcmVwbzogdm13YXJlL3ZpYwogICAgICBldmVudDogWyBwdXNoLCB0YWcgXQogICAgICBicmFuY2g6IFsgbWFzdGVyLCByZWxlYXNlcy8qLCByZWZzL3RhZ3MvKiBdCgogIHB1Ymxpc2gtZ2NzLWJ1aWxkcy1vbi1wYXNzOgogICAgaW1hZ2U6IG1hcGxhaW4vZHJvbmUtZ2NzOmxhdGVzdAogICAgcHVsbDogdHJ1ZQogICAgc291cmNlOiBidW5kbGUKICAgIHRhcmdldDogdmljLWVuZ2luZS1idWlsZHMKICAgIGFjbDoKICAgICAgLSBhbGxVc2VyczpSRUFERVIKICAgIGNhY2hlX2NvbnRyb2w6IHB1YmxpYyxtYXgtYWdlPTM2MDAKICAgIHdoZW46CiAgICAgIHJlcG86IHZtd2FyZS92aWMKICAgICAgZXZlbnQ6IFsgcHVzaCBdCiAgICAgIGJyYW5jaDogWyBtYXN0ZXIsIHJlbGVhc2VzLyogXQogICAgICBzdGF0dXM6IHN1Y2Nlc3MKCiAgcHVibGlzaC1nY3MtYnVpbGRzLW9uLWZhaWw6CiAgICBpbWFnZTogbWFwbGFpbi9kcm9uZS1nY3M6bGF0ZXN0CiAgICBwdWxsOiB0cnVlCiAgICBzb3VyY2U6IGJ1bmRsZQogICAgdGFyZ2V0OiB2aWMtZW5naW5lLWZhaWxlZC1idWlsZHMKICAgIGFjbDoKICAgICAgLSBhbGxVc2VyczpSRUFERVIKICAgIGNhY2hlX2NvbnRyb2w6IHB1YmxpYyxtYXgtYWdlPTM2MDAKICAgIHdoZW46CiAgICAgIHJlcG86IHZtd2FyZS92aWMKICAgICAgZXZlbnQ6IFsgcHVzaCBdCiAgICAgIGJyYW5jaDogWyBtYXN0ZXIsIHJlbGVhc2VzLyogXQogICAgICBzdGF0dXM6IGZhaWx1cmUKCiAgcHVibGlzaC1nY3MtcmVsZWFzZXM6CiAgICBpbWFnZTogbWFwbGFpbi9kcm9uZS1nY3M6bGF0ZXN0CiAgICBwdWxsOiB0cnVlCiAgICBzb3VyY2U6IGJ1bmRsZS1yZWxlYXNlCiAgICB0YXJnZXQ6IHZpYy1lbmdpbmUtcmVsZWFzZXMKICAgIGFjbDoKICAgICAgLSBhbGxVc2VyczpSRUFERVIKICAgIGNhY2hlX2NvbnRyb2w6IHB1YmxpYyxtYXgtYWdlPTM2MDAKICAgIHdoZW46CiAgICAgIHJlcG86IHZtd2FyZS92aWMKICAgICAgZXZlbnQ6IFsgcHVzaCwgdGFnIF0KICAgICAgYnJhbmNoOiBbIHJlZnMvdGFncy8qIF0KICAgICAgc3RhdHVzOiBzdWNjZXNzCgogIHB1Ymxpc2gtdmljLW1hY2hpbmUtc2VydmVyLWRldjoKICAgIGltYWdlOiBwbHVnaW5zL2djcgogICAgcmVwbzogZW1pbmVudC1uYXRpb24tODczMTcvdmljLW1hY2hpbmUtc2VydmVyCiAgICBkb2NrZXJmaWxlOiBjbWQvdmljLW1hY2hpbmUtc2VydmVyL0RvY2tlcmZpbGUKICAgIHRhZ3M6CiAgICAgIC0gZGV2CiAgICBqc29uX2tleTogPgogICAgICB7CiAgICAgICAgInR5cGUiOiAic2VydmljZV9hY2NvdW50IiwKICAgICAgICAicHJvamVjdF9pZCI6ICIke0dTX1BST0pFQ1RfSUR9IiwKICAgICAgICAicHJpdmF0ZV9rZXlfaWQiOiAiJHtHU19QUklWQVRFX0tFWV9JRH0iLAogICAgICAgICJwcml2YXRlX2tleSI6ICIke0dTX1BSSVZBVEVfS0VZfSIsCiAgICAgICAgImNsaWVudF9lbWFpbCI6ICIke0dTX0NMSUVOVF9FTUFJTH0iLAogICAgICAgICJjbGllbnRfaWQiOiAiJHtHU19QUk9KRUNUX0lEfSIsCiAgICAgICAgImF1dGhfdXJpIjogImh0dHBzOi8vYWNjb3VudHMuZ29vZ2xlLmNvbS9vL29hdXRoMi9hdXRoIiwKICAgICAgICAidG9rZW5fdXJpIjogImh0dHBzOi8vYWNjb3VudHMuZ29vZ2xlLmNvbS9vL29hdXRoMi90b2tlbiIsCiAgICAgICAgImF1dGhfcHJvdmlkZXJfeDUwOV9jZXJ0X3VybCI6ICJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9vYXV0aDIvdjEvY2VydHMiLAogICAgICAgICJjbGllbnRfeDUwOV9jZXJ0X3VybCI6ICIiCiAgICAgIH0KICAgIHdoZW46CiAgICAgIHJlcG86IHZtd2FyZS92aWMKICAgICAgZXZlbnQ6IFsgcHVzaCBdCiAgICAgIGJyYW5jaDogWyBtYXN0ZXIgXQogICAgICBzdGF0dXM6IHN1Y2Nlc3MKCiAgcHVibGlzaC12aWMtbWFjaGluZS1zZXJ2ZXItcmVsZWFzZXM6CiAgICBpbWFnZTogcGx1Z2lucy9nY3IKICAgIHJlcG86IGVtaW5lbnQtbmF0aW9uLTg3MzE3L3ZpYy1tYWNoaW5lLXNlcnZlcgogICAgZG9ja2VyZmlsZTogY21kL3ZpYy1tYWNoaW5lLXNlcnZlci9Eb2NrZXJmaWxlCiAgICB0YWdzOgogICAgICAtIGxhdGVzdAogICAganNvbl9rZXk6ID4KICAgICAgewogICAgICAgICJ0eXBlIjogInNlcnZpY2VfYWNjb3VudCIsCiAgICAgICAgInByb2plY3RfaWQiOiAiJHtHU19QUk9KRUNUX0lEfSIsCiAgICAgICAgInByaXZhdGVfa2V5X2lkIjogIiR7R1NfUFJJVkFURV9LRVlfSUR9IiwKICAgICAgICAicHJpdmF0ZV9rZXkiOiAiJHtHU19QUklWQVRFX0tFWX0iLAogICAgICAgICJjbGllbnRfZW1haWwiOiAiJHtHU19DTElFTlRfRU1BSUx9IiwKICAgICAgICAiY2xpZW50X2lkIjogIiR7R1NfUFJPSkVDVF9JRH0iLAogICAgICAgICJhdXRoX3VyaSI6ICJodHRwczovL2FjY291bnRzLmdvb2dsZS5jb20vby9vYXV0aDIvYXV0aCIsCiAgICAgICAgInRva2VuX3VyaSI6ICJodHRwczovL2FjY291bnRzLmdvb2dsZS5jb20vby9vYXV0aDIvdG9rZW4iLAogICAgICAgICJhdXRoX3Byb3ZpZGVyX3g1MDlfY2VydF91cmwiOiAiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vb2F1dGgyL3YxL2NlcnRzIiwKICAgICAgICAiY2xpZW50X3g1MDlfY2VydF91cmwiOiAiIgogICAgICB9CiAgICB3aGVuOgogICAgICByZXBvOiB2bXdhcmUvdmljCiAgICAgIGV2ZW50OiBbIHB1c2gsIHRhZyBdCiAgICAgIGJyYW5jaDogWyByZWZzL3RhZ3MvKiwgcmVsZWFzZXMvKiBdCiAgICAgIHN0YXR1czogc3VjY2VzcwoKICB0cmlnZ2VyLWRvd25zdHJlYW06CiAgICBpbWFnZTogaGFyYm9yLmNpLmRyb25lLmxvY2FsL2xpYnJhcnkvdmljLWRvd25zdHJlYW0tdHJpZ2dlcjoxLjAKICAgIGVudmlyb25tZW50OgogICAgICBEUk9ORV9TRVJWRVI6ICR7RFJPTkVfU0VSVkVSfQogICAgICBEUk9ORV9UT0tFTjogJHtEUk9ORV9UT0tFTn0KICAgICAgU0hFTEw6IC9iaW4vYmFzaAogICAgd2hlbjoKICAgICAgcmVwbzogdm13YXJlL3ZpYwogICAgICBldmVudDogWyBwdXNoLCB0YWcgXQogICAgICBicmFuY2g6IFsgbWFzdGVyLCByZWxlYXNlcy8qLCByZWZzL3RhZ3MvKiBdCiAgICAgIHN0YXR1czogc3VjY2VzcwoKICByZXBvcnQtY292ZXJhZ2U6CiAgICAgaW1hZ2U6IHJvYmVydHN0ZXR0bmVyL2Ryb25lLWNvZGVjb3YKICAgICB0b2tlbjogJHtDT0RFQ09WX1RPS0VOfQogICAgIGZpbGVzOgogICAgICAgLSAuY292ZXIvY292ZXIub3V0CgogIG5vdGlmeS1zbGFjay1vbi1mYWlsOgogICAgaW1hZ2U6IHBsdWdpbnMvc2xhY2sKICAgIHdlYmhvb2s6ICR7U0xBQ0tfVVJMfQogICAgdXNlcm5hbWU6IGRyb25lCiAgICB0ZW1wbGF0ZTogPgogICAgICBCdWlsZCBodHRwczovL2NpLnZjbmEuaW8vdm13YXJlL3ZpYy97eyBidWlsZC5udW1iZXIgfX0gYnkge3sgYnVpbGQuYXV0aG9yIH19IGZpbmlzaGVkIHdpdGggYSB7eyBidWlsZC5zdGF0dXMgfX0gc3RhdHVzLiBMb2dzOiBodHRwczovL2NvbnNvbGUuY2xvdWQuZ29vZ2xlLmNvbS9tL2Nsb3Vkc3RvcmFnZS9iL3ZpYy1jaS1sb2dzL28vaW50ZWdyYXRpb25fbG9nc197eyBidWlsZC5udW1iZXIgfX1fe3sgYnVpbGQuY29tbWl0IH19LnppcD9hdXRodXNlcj0xCiAgICB3aGVuOgogICAgICByZXBvOiB2bXdhcmUvdmljCiAgICAgIGJyYW5jaDogWyBtYXN0ZXIsIHJlbGVhc2VzLyosIHJlZnMvdGFncy8qIF0KICAgICAgc3RhdHVzOiBmYWlsdXJlCgogIG5vdGlmeS1zbGFjay1vbi1wYXNzOgogICAgaW1hZ2U6IHBsdWdpbnMvc2xhY2sKICAgIHdlYmhvb2s6ICR7U0xBQ0tfVVJMfQogICAgdXNlcm5hbWU6IGRyb25lCiAgICB0ZW1wbGF0ZTogPgogICAgICBCdWlsZCBodHRwczovL2NpLnZjbmEuaW8vdm13YXJlL3ZpYy97eyBidWlsZC5udW1iZXIgfX0gYnkge3sgYnVpbGQuYXV0aG9yIH19IGZpbmlzaGVkIHdpdGggYSB7eyBidWlsZC5zdGF0dXMgfX0gc3RhdHVzLCBmaW5kIHRoZSBidWlsZCBhdDogaHR0cHM6Ly9zdG9yYWdlLmdvb2dsZWFwaXMuY29tL3ZpYy1lbmdpbmUtYnVpbGRzL3ZpY197eyBidWlsZC5udW1iZXIgfX0udGFyLmd6CiAgICB3aGVuOgogICAgICByZXBvOiB2bXdhcmUvdmljCiAgICAgIGJyYW5jaDogWyBtYXN0ZXIsIHJlbGVhc2VzLyogXQogICAgICBzdGF0dXM6IHN1Y2Nlc3MKCiAgbm90aWZ5LXNsYWNrLW9uLXN1Y2Nlc3NmdWwtdGFnOgogICAgaW1hZ2U6IHBsdWdpbnMvc2xhY2sKICAgIHdlYmhvb2s6ICR7U0xBQ0tfVVJMfQogICAgdXNlcm5hbWU6IGRyb25lCiAgICB0ZW1wbGF0ZTogPgogICAgICBUaGUgbGF0ZXN0IHZlcnNpb24gb2YgVklDIGVuZ2luZSBoYXMgYmVlbiByZWxlYXNlZCwgZmluZCB0aGUgYnVpbGQgaGVyZTogaHR0cHM6Ly9jb25zb2xlLmNsb3VkLmdvb2dsZS5jb20vc3RvcmFnZS9icm93c2VyL3ZpYy1lbmdpbmUtcmVsZWFzZXMKICAgIHdoZW46CiAgICAgIHJlcG86IHZtd2FyZS92aWMKICAgICAgYnJhbmNoOiBbIHJlZnMvdGFncy8qIF0KICAgICAgc3RhdHVzOiBzdWNjZXNzCgogIHBhc3MtcmF0ZToKICAgIGltYWdlOiBoYXJib3IuY2kuZHJvbmUubG9jYWwvbGlicmFyeS92aWMtaW50ZWdyYXRpb24tdGVzdDoxLjQyCiAgICBwdWxsOiB0cnVlCiAgICBlbnZpcm9ubWVudDoKICAgICAgQklOOiBiaW4KICAgICAgU0hFTEw6IC9iaW4vYmFzaAogICAgICBHSVRIVUJfQVVUT01BVElPTl9BUElfS0VZOiAgJHtHSVRIVUJfQVVUT01BVElPTl9BUElfS0VZfQogICAgICBTTEFDS19VUkw6ICR7U0xBQ0tfVVJMfQogICAgY29tbWFuZHM6CiAgICAgIC0gdGVzdHMvcGFzcy1yYXRlLnNoCg.sIsrIg-oxbFr5LySiuBbDHBI5D6XPVuasoIlkQwDorE \ No newline at end of file diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index d102d37a8e..ec386aceb8 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -14,11 +14,13 @@ Refer to CONTRIBUTING.MD for more details. Fixes # diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ad12581d01..c509a7ff34 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -110,7 +110,7 @@ command in the VIC repo's root directory: curl https://cdn.rawgit.com/tommarshall/git-good-commit/v0.6.1/hook.sh > .git/hooks/commit-msg && chmod +x .git/hooks/commit-msg ``` -[dronevic]:https://ci.vcna.io/vmware/vic +[dronevic]:https://ci-vic.vmware.com/vmware/vic [dronesrc]:https://github.com/drone/drone [dronecli]:http://readme.drone.io/0.5/install/cli/ [commithook]:https://github.com/tommarshall/git-good-commit @@ -121,13 +121,15 @@ Automated testing uses [Drone][dronesrc]. Pull requests must pass unit tests and integration tests before being merged into the master branch. A standard PR builds the project and runs unit and regression tests. To customize the integration test suite that runs in your pull request, -you can use three keywords in your PR title or commit message: +you can use these keywords in your PR body: - To skip running tests (e.g. for a work-in-progress PR), use `[ci skip]` or `[skip ci]`. - To run the full test suite, use `[full ci]`. - To run _one_ integration test or group, use `[specific ci=$test]`. This will run the regression test as well. Examples: - To run the `1-01-Docker-Info` suite: `[specific ci=1-01-Docker-Info]` - To run all suites under the `Group1-Docker-Commands` group: `[specific ci=Group1-Docker-Commands]` +- To skip running the unit tests, use `[skip unit]`. +- To fail fast (make normal failures fatal) during the integration testing, use `[fast fail]`. You can run the tests locally before making a PR or view the Drone build results for [unit tests and integration tests][dronevic]. @@ -137,9 +139,9 @@ running tests. Add `WIP` (work in progress) to the PR title to alert reviewers t If your Drone build needs to be restarted, fork the build: ```shell export DRONE_TOKEN= -export DRONE_SERVER=https://ci.vcna.io +export DRONE_SERVER=https://ci-vic.vmware.com -drone build start --fork vmware/vic +drone build start vmware/vic ``` ### Testing locally @@ -149,13 +151,13 @@ Developers need to install [Drone CLI][dronecli]. #### Unit tests ``` shell -drone exec --repo.trusted --secret VIC_ESX_TEST_URL=":@" .drone.yml +VIC_ESX_TEST_URL=":@" drone exec .drone.yml ``` If you don't have a running ESX, tests requiring an ESX can be skipped with the following: ``` shell -drone exec --repo.trusted +drone exec ``` #### Integration tests @@ -181,17 +183,14 @@ discussion. For VIC engineers, you should set the priority based on the below guidelines. Everyone else, do not set the priority of a new issue. #### Priorities -Indicate the VMware business priority of the bug. Priority drives *when* a bug will be fixed. This field is optional for filers but must be filled out by bug owner. -priority/p0 - Bugs that NEED to be fixed immediately as they either block meaningful testing or are release stoppers for the current release. - -priority/p1 - Bugs that NEED to be fixed by the assigned phase of the current release. - -priority/p2 - Bugs that SHOULD be fixed by the assigned phase of the current release, time permitting. - -priority/p3 - Bugs that SHOULD be fixed by a given release, time permitting. - -priority/p4 - Bugs that SHOULD be fixed in a future (to be determined) release. +| Priority | Bugs | Features | Non Bugs | +| -------- | ---- | -------- | -------- | +| priority/p0 | Bugs that NEED to be fixed immediately as they either block meaningful testing or are release stoppers for the current release. | No Feature should be p0. | An issue that is not a bug and is blocking meaningful testing. eg. builds are failing because the syslog server is out of space. | +| priority/p1 | Bugs that NEED to be fixed by the assigned phase of the current release. | A feature that is required for the next release, typically an anchor feature; a large feature that is the focus for the release and drives the release date. | An issue that must be fixed for the next release. eg. Track build success rates. | +| priority/p2 | Bugs that SHOULD be fixed by the assigned phase of the current release, time permitting. | A feature that is desired for the next release, typically a pebble; a feature that has been approved for inclusion but is not considered the anchor feature or is considered good to have for the anchor feature. | An issue that we should fix in the next release. eg. A typo in the UI. | +| priority/p3 | Bugs that SHOULD be fixed by a given release, time permitting. | A feature that can be fixed in the next release. eg. Migrate to a new kernel version. Or a feature that is nice to have for a pebble. | An issue that can be fixed in the next release. eg. Low hanging productivity improvements. | +| priority/p4 | Bugs that SHOULD be fixed in a future (to be determined) release. | An issue or feature that will be fixed in a future release. | An issue or feature that will be fixed in a future release. | ### Not Ready diff --git a/Makefile b/Makefile index 25b6d34865..c618f1bfcf 100644 --- a/Makefile +++ b/Makefile @@ -67,7 +67,7 @@ endif # Caches dependencies to speed repeated calls define godeps $(call assert,$(call gmsl_compatible,1 1 7), Wrong GMSL version) \ - $(if $(filter-out push push-portlayer push-docker push-vic-init push-vicadmin focused-test test check clean distclean mrrobot mark sincemark .DEFAULT,$(MAKECMDGOALS)), \ + $(if $(filter-out push push-portlayer push-docker push-vic-init push-vicadmin focused-test test check clean distclean mrrobot mark sincemark local-ci-test .DEFAULT,$(MAKECMDGOALS)), \ $(if $(call defined,dep_cache,$(dir $1)),,$(info Generating dependency set for $(dir $1))) \ $(or \ $(if $(call defined,dep_cache,$(dir $1)), $(debug Using cached Go dependencies) $(wildcard $1) $(call get,dep_cache,$(dir $1))), @@ -299,6 +299,10 @@ push-vic-init: push-vicadmin: $(BASE_DIR)/infra/scripts/replace-running-components.sh vicadmin +local-ci-test: + @echo running CI tests locally... + infra/scripts/local-ci.sh + focused-test: # test only those packages that have changes infra/scripts/focused-test.sh $(REMOTE) @@ -352,7 +356,7 @@ $(imagec): $(call godeps,cmd/imagec/*.go) $(portlayerapi-client) $(docker-engine-api): $(portlayerapi-client) $(admiralapi-client) $$(call godeps,cmd/docker/*.go) ifeq ($(OS),linux) - @echo Building docker-engine-api server... + @echo building docker-engine-api server... @$(TIME) $(GO) build $(RACE) -ldflags "$(LDFLAGS)" -o $@ ./cmd/docker else @echo skipping docker-engine-api server, cannot build on non-linux @@ -360,12 +364,13 @@ endif $(docker-engine-api-test): $$(call godeps,cmd/docker/*.go) $(portlayerapi-client) ifeq ($(OS),linux) - @echo Building docker-engine-api server for test... + @echo building docker-engine-api server for test... @$(TIME) $(GO) test -c -coverpkg github.com/vmware/vic/lib/...,github.com/vmware/vic/pkg/... -outputdir /tmp -coverprofile docker-engine-api.cov -o $@ ./cmd/docker else @echo skipping docker-engine-api server for test, cannot build on non-linux endif + # Common portlayer dependencies between client and server PORTLAYER_DEPS ?= lib/apiservers/portlayer/swagger.json \ lib/apiservers/portlayer/restapi/configure_port_layer.go \ @@ -505,7 +510,7 @@ distro: all mrrobot: @rm -rf *.xml *.html *.log *.zip VCH-0-* -clean: +clean: cleandeps @echo removing binaries @rm -rf $(BIN)/* @echo removing Go object files @@ -537,3 +542,7 @@ clean: distclean: clean @echo removing binaries @rm -rf $(BIN) + +cleandeps: + @echo removing dependency cache + @rm -rf .godeps_cache diff --git a/README.md b/README.md index 9878c4fd9d..7070d41b13 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -[![Build Status](https://ci.vcna.io/api/badges/vmware/vic/status.svg)](https://ci.vcna.io/vmware/vic) [![codecov](https://codecov.io/gh/vmware/vic/branch/master/graph/badge.svg)](https://codecov.io/gh/vmware/vic) [![Download](https://img.shields.io/badge/download-latest-blue.svg)](https://github.com/vmware/vic/releases/latest) [![Go Report Card](https://goreportcard.com/badge/github.com/vmware/vic)](https://goreportcard.com/report/github.com/vmware/vic) +[![Build Status](https://ci-vic.vmware.com/api/badges/vmware/vic/status.svg)](https://ci-vic.vmware.com/vmware/vic) [![codecov](https://codecov.io/gh/vmware/vic/branch/master/graph/badge.svg)](https://codecov.io/gh/vmware/vic) [![Download](https://img.shields.io/badge/download-latest-blue.svg)](https://github.com/vmware/vic/releases/latest) [![Go Report Card](https://goreportcard.com/badge/github.com/vmware/vic)](https://goreportcard.com/report/github.com/vmware/vic) # vSphere Integrated Containers Engine @@ -53,7 +53,7 @@ Building the project is done with a combination of make and containers, with gol To build as closely as possible to the formal build: ```shell -drone exec --repo.trusted +drone exec ``` To build inside a Docker container: @@ -86,6 +86,22 @@ To run unit tests after a successful build, issue the following: make test ``` +Running "make" every time causes Go dependency regeneration for each component, so that "make" can rebuild only those components that are changed. However, such regeneration may take significant amount of time when it is not really needed. To fight that developers can use cached dependencies that can be enabled by defining the environment variable VIC_CACHE_DEPS. As soon as it is set, infra/scripts/go-deps.sh will read cached version of dependencies if those exist. + +```shell +export VIC_CACHE_DEPS=1 +``` + +This is important to note that as soon as you add a new package or an internal project dependency that didn't exist before, those dependencies +should be regenerated to reflect latest changes. It can be done just by running: + +```shell +make cleandeps +``` + +After that next "make" run will regenerate dependencies from scratch. + + ## Managing vendor/ directory To build the VIC Engine dependencies, ensure `GOPATH` is set, then issue the following. @@ -134,9 +150,9 @@ The iso image will be created in `$BIN` ## Building with CI -[dronevic]:https://ci.vcna.io/vmware/vic +[dronevic]:https://ci-vic.vmware.com/vmware/vic [dronesrc]:https://github.com/drone/drone -[dronecli]:http://readme.drone.io/0.5/install/cli/ +[dronecli]:http://docs.drone.io/cli-installation/ PRs to this repository will trigger builds on our [Drone CI][dronevic]. @@ -144,7 +160,7 @@ To build locally with Drone: Ensure that you have Docker 1.6 or higher installed. Install the [Drone command line tools][dronecli]. -From the root directory of the `vic` repository run `drone exec --repo.trusted` +From the root directory of the `vic` repository run `drone exec` ## Common Build Problems 1. Builds may fail when building either the appliance.iso or bootstrap.iso with the error: `cap_set_file failed - Operation not supported` diff --git a/cmd/gandalf/main.go b/cmd/gandalf/main.go index e9641c4cd1..91289582bc 100644 --- a/cmd/gandalf/main.go +++ b/cmd/gandalf/main.go @@ -46,7 +46,7 @@ const ( DefaultRepo = "vic" DefaultBranch = "master" - DefaultFellows = "caglar10ur,mhagen,mwilliamson,fabio" + DefaultFellows = "mhagen,mwilliamson,ghicken" // 4 disabling merges to master automagically DroneFailureMessage = "finished with a failure status, find the logs" diff --git a/cmd/kubelet-starter/main.go b/cmd/kubelet-starter/main.go index 45ee714962..89fbc358e0 100644 --- a/cmd/kubelet-starter/main.go +++ b/cmd/kubelet-starter/main.go @@ -22,7 +22,7 @@ import ( "os/exec" "strconv" - log "github.com/Sirupsen/logrus" + log "github.com/Sirupsen/logrus" "github.com/vmware/vic/lib/config" "github.com/vmware/vic/lib/constants" @@ -86,7 +86,7 @@ func main() { portlayerPort = strconv.Itoa(constants.DebugPortLayerPort) } - clientIP, err := util.ClientIP() + clientIP, err := util.ClientIP() if err != nil { op.Fatalf("Cannot get Client IP err: %s", err) @@ -114,9 +114,9 @@ func main() { kubeletName := os.Getenv("KUBELET_NAME") - op.Infof("Executing kubelet: %s %s %s %s %s %s %s", KubeletPath, "--provider", "mock", "--kubeconfig", KubeletConfigFile, "--nodename", kubeletName) + op.Infof("Executing kubelet: %s %s %s %s %s %s %s", KubeletPath, "--provider", "vic", "--kubeconfig", KubeletConfigFile, "--nodename", kubeletName) /* #nosec */ - kubeletCmd := exec.Command(KubeletPath, "--provider", "mock", "--kubeconfig", KubeletConfigFile, "--nodename", kubeletName) + kubeletCmd := exec.Command(KubeletPath, "--provider", "vic", "--kubeconfig", KubeletConfigFile, "--nodename", kubeletName) output, err := kubeletCmd.CombinedOutput() op.Infof("Output: %s, Error: %s", string(output), err) } diff --git a/doc/design/vic-machine/service.md b/doc/design/vic-machine/service.md new file mode 100644 index 0000000000..7aac0594bd --- /dev/null +++ b/doc/design/vic-machine/service.md @@ -0,0 +1,508 @@ +1. [Overview](#overview) +2. [Design goals and project scope](#design-goals-and-project-scope) +3. [Implementation Decisions](#implementation-decisions) + 1. [Technology Choices](#technology-choices) + 2. [Delivery](#delivery) + 3. [Cardinality](#cardinality) + 4. [Service Upgrade](#service-upgrade) + 5. [Compatibility](#compatibility) + 6. [Authentication](#authentication) + 7. [Certificate Management](#certificate-management) + 1. [Host Certificates](#host-certificates) + 2. [Client Certificates](#client-certificates) + 8. [Log Management](#log-management) + 1. [Operation logs](#operation-logs) + 2. [Server logs](#server-logs) + 9. [Cross-Origin Requests & Cross-Site Request Forgery](#cross-origin-requests--cross-site-request-forgery) + 10. [ISO Management](#iso-management) + 11. [Communicating modifiability](#communicating-modifiability) + 12. [Use of a query parameter for compute-resource](#use-of-a-query-parameter-for-compute-resource) + 13. [Use of a request body for delete](#use-of-a-request-body-for-delete) +4. [The REST API](#the-rest-api) + 1. [API Versioning](#api-versioning) + 2. [Headers](#headers) + 3. [Resources](#resources) + 4. [Query Parameters](#query-parameters) + 5. [Operations](#operations) + 1. [Get version information](#get-version-information) + 2. [List VCHs](#list-vchs) + 3. [Create a VCH](#create-a-vch) + 4. [Inspect a VCH](#inspect-a-vch) + 5. [Access the creation and reconfiguration log for a VCH](#access-the-creation-and-reconfiguration-log-for-a-vch) + 6. [Access the host certificate for a VCH](#access-the-host-certificate-for-a-vch) + 7. [Reconfigure a VCH](#reconfigure-a-vch) + 8. [Delete a VCH](#delete-a-vch) + 9. [Upgrade a VCH](#upgrade-a-vch) + 10. [Debug a VCH](#debug-a-vch) + 11. [View firewall rule settings](#view-firewall-rule-settings) + 12. [Update firewall rules](#update-firewall-rules) +5. [The WebSockets API](#the-websockets-api) +6. [Proposed changes to existing functionality](#proposed-changes-to-existing-functionality) + 1. [Support for authenticating via SAML or with a session key in vic-machine](#support-for-authenticating-via-saml-or-with-a-session-key-in-vic-machine) + 2. [VCH creation spawns a custom task](#vch-creation-spawns-a-custom-task) + 3. [Refactoring of vic-machine](#refactoring-of-vic-machine) + 4. [Deprecation of demo VCH installer](#deprecation-of-demo-vch-installer) + 5. [Client certificate management using Admiral](#client-certificate-management-using-admiral) + 6. [Reduce friction around use of operations credentials](#reduce-friction-around-use-of-operations-credentials) + 7. [Associating contact information with a VCH](#associating-contact-information-with-a-vch) +7. [Testing](#testing) +8. [See Also](#see-also) + + +## Overview + +The `vic-machine-{darwin,linux,windows}` command line utilities are used to perform Virtual Container Host (VCH) lifecycle operations. + +The `vic-machine-server` exposes similar functionality via a REST API to allow for a wider range of interaction models, such as the integration of VCH management into web interfaces (including the vSphere H5 client). + +## Design goals and project scope + +This API will become a public, documented API at some point in the future (and, to some extent, all APIs are public), but functionality will be delivered incrementally and the first versions will be versioned in a way that communicates breaking changes are expected and backwards compatibility may not be maintained. + +The initial priority is for this API to implement sufficient functionality to enable development of a vSphere plugin, as described here: https://vmware.invisionapp.com/share/GDC9QEDAZ. This may require functionality which is _not_ currently implemented by the `vic-machine` command line utilities. + +The API may not initially _implement_ all functionality currently implemented by the `vic-machine` command line utilities. However, to avoid painting ourselves into a corner, it is desirable to consider all functionality in the design. Further, the _design_ should consider plausible future work to ensure that eventual extension of the API is possible. + +Because this API would likely be used in conjunction with (or by developers familiar with) other VMware APIs, it is desirable to provide a similar "look & feel" where possible. + +Because this API may be used in conjunction with (or by developers familiar with) other container-related APIs, providing a similar "look & feel" to those APIs is also desirable. + +This document will attempt to adhere to VMware standards and guidance around API design. + +## Implementation Decisions + +### Technology Choices + +The API will be defined using OpenAPI (Swagger), today's de facto standard for REST APIs. + +The service will be built in Go, using `go-swagger`. This allows any developer on the team to contribute, maintain, and modify the service with a minimal learning curve. + +WebSockets will be used to provide streaming information about the status on long-running operations so that sophisticated clients do not need to poll for updates. See "[The WebSockets API](#the-websockets-api)" for more information. + +Unit tests will be defined using the Testify libraries. + +End-to-end tests will be defined using the Robot Framework. + +### Delivery + +The new service is implemented as an additional flavor of `vic-machine`: `vic-machine-server`. This standalone linux binary (a swagger server) is invoked with port and certificate information to serve the REST API on one or more interfaces. + +This service is packaged as a container and included in the VIC OVA, following existing best practices. + +The service does not require a access to a persistent data directory, but it useful to provide one as a destination for log files. + +A configuration file (stored on the OVA's data directory) may eventually be used to provide the service with high-level configuration information, such as a syslog server and perhaps the admiral CA certificate (see "[Client certificate management using Admiral](#client-certificate-management-using-admiral)". + +### Cardinality + +It is not assumed that a single instance of `vic-machine-server` will run in a given environment or which manage a given vSphere resource. It is also expected that one or more instances of `vic-machine-server` and the `vic-machine` CLI will be used side-by-side. This means that in-process locking will not be sufficient to protect resources from concurrent access or modification. + +In the future, specific use cases may be identified for multiple instances of `vic-machine-server` around availability, scalability, or isolation. + +### Service Upgrade + +The VIC OVA is upgraded in a side-by-side fashion: + +1. The new OVA is deployed. +2. The old OVA is shutdown. +3. The data disk is moved from the old to the new. +4. A data migration script is run. + * In the case of the upgrading to the first version of the OVA that includes the `vic-machine` server, sane default values could be assumed (e.g., by copying syslog settings from another service). +5. The new OVA is started. + +This does involve downtime of the service (but not of the VCHs or containers). Clients (including the H5 plugin) should handle this appropriately. + +### Compatibility + +We expect the compatibility between `vic-machine-server` and VCHs to be similar to `vic-machine-{darwin,linux,windows}`: + + * Creation will only be supported for VCH of the same version + * Reconfigure will only be supported for VCH of the same version + * Upgrade will only be supported for VCH of the same or lesser versions + * Rollback will only be supported to the same VCH version + * Deletion will be supported for VCH of the same version, and will be best-effort for older versions + +We expect the vSphere H5 client plugin to support a single version of `vic-machine-server` and be upgraded in lock-step. + + - [ ] Do we need the plugin to provide at least basic support for the "N+1" version of `vic-machine-server` as well so that it remains functional between when the OVA is upgraded and when the plugin is upgraded? + +### Authentication + +As with the `vic-machine` CLI, vSphere credentials must be supplied each time an operation is invoked using `vic-machine-server`. The service itself will not store or manage credentials. This means that **all** operations must either be explicitly tied to a vSphere operation or unauthenticated. This is similar to the model for the `vic-machine` CLI, where all operations require vSphere credentials except for those to display help and version information. + +See "[Headers](#headers)" for information about how credentials will be passed, "[Certificate Management](#certificate-management)" for a discussion of how this design influences the design for certificate management, and "[Log Management](#log-management)" for a discussion of how this design influences the design for log persistence and access. + +### Certificate Management + +When invoking the `vic-machine` CLI, access to PKI files is managed out-of-band. PKI files can be placed on the filesystem, and paths passed to the CLI. Generated PKI files are placed on the filesystem for subsequent access, and the OS handles access control for those files. + +With the REST API, these workflows need to be handled explicitly: the REST API must allow PKI files to be supplied as a part of requests and must allow for retrieval of generated PKI files, with appropriate access controls. + +#### Host Certificates + +Host certificates are persisted in the VCH's guest info and are available via the vSphere API today. `vic-machine-server` allows users to access them via its API, regardless of how the VCH was created. + +#### Client Certificates + +Due to the complexity of ensuring the secrecy of client certificate private keys, the API will not support generation of client certificates. (If this becomes a requirement, generated certificates could be protected with a user-supplied passphrase and stored in the same manner as log files.) + +For an alternative approach that allows for creation of VCHs without requiring users to specify client certificates, see "[Client certificate management using Admiral](#client-certificate-management-using-admiral)". + +### Log Management + +#### Operation logs + +When invoking the `vic-machine` CLI, real-time information is provided to stdout and log files can be persisted on the filesystem. The REST API needs to provide equivalent functionality. + +Logs will be streamed to the VCH's datastore folder as `vic-machine-server` executes. Access to logs is then restricted to those vSphere users who can read those files from the datastore. + +#### Server logs + +Logs for the server itself are written to a configurable directory. These logs include server lifecycle information as well as information about each request. Operation IDs are used to associate a request with the log messages related to the handling of that request. + +Logs are not rotated by the server. When deployed as a part of the OVA, `logrotate` is used to provide this functionality. + +### Cross-Origin Requests & Cross-Site Request Forgery + +Because the vSphere H5 client plugin and the `vic-machine-server` will be served from different hosts, the H5 client plugin will be making cross-origin requests. As these would normally be prevented by the browser's same-origin policy, the service must support responding to all requests with an `Access-Control-Allow-Origin` header with an appropriate value, and respond appropriately to `Options` requests for all resources. + +The same-origin policy is intended to prevent cross-origin requests because the browser might inadvertently provide credentials on behalf of the user. Given that each individual request to the API must include credentials, and that we won't be using cookies or persisting those credentials in any other way, the same-origin policy does not provide protection in our case. + +To avoid additional configuration complexity, we will use a combination of `Access-Control-Allow-Origin: *`, ` Access-Control-Allow-Credentials: false` (the default value), and `Access-Control-Allow-Headers: Authorization, X-VMWARE-TICKET` to express that cross-origin requests are allowed from any origin, and that an `Authorization` or `X-VMWARE-TICKET` header may be included, but that the user-agent should not include cookies or HTTP authentication information based on the user agent's previous interactions with the API. This `Access-Control-Allow-Credentials` restriction will not affect the H5 client plugin, as it will be _explicitly_ including credentials by setting an `Authorization` or `X-VMWARE-TICKET` header, not relying on `XMLHttpRequest.withCredentials`. + +### ISO Management + +Several VCH operations (create, configure, and upgrade) take a pair of ISOs as input. These ISOs are used for booting the VCH appliance and container VMs respectively. + +The current implementation involves maintaining the master ISOs on the OVA and duplicating these ISOs for each VCH that is created. We may wish to change this behavior in the future to make it easier for customers to build their own bootstrap ISO, or to transition to something like direct boot. + +Even considering only the current model, uploading these ISOs as a part of service API calls would introduce complexity and inefficiency. + +For simplicity and flexibility, we will provide an API that lists "flavors" of ISOs which are known to the OVA and which may be used for API operations. By default, a single "flavor" will be included with the OVA: the stock appliance and boostrap ISOs shipped with that release. We will provide instructions for users who wish to add additional "flavors" of the appliance ISO (e.g., in order to use a RHEL kernel or systemd), which might involve putting the ISOs in a particular directory on the OVA or adding paths to the ISOs in some sort of manifest file. For now, custom bootstrap ISOs will not be supported (but it is easy to imagine adding support following this same pattern). + +In the future, it may be desirable to improve our handling of ISOs to reduce duplication (e.g., by storing them on the datastore instead of within the OVA). This is, however, orthogonal to the introduction of an API. + + - [ ] Come up with a better term than "flavor". + +### Communicating modifiability + +VCH properties may or may not be modifiable for a variety of reasons. Some properties, such as the id, may never be modifiable. Other properties may not be modifiable without a power-cycle of the VCH. Yet others may be dependent on the state of other resource, such as whether containers are using an attached network. + +In all cases, the server would enforce unmodifiablility when performing an operation, but to provide a good experience for direct and indirect users of the API, it would be helpful to communicate which properties of a given VCH are not currently modifiable and why. (That is, it's better to grey out a field in the interface and provide a help tooltip with an explanation than to allow users to attempt to an operation which will inevitably fail.) + +A variety of approaches exist for this: + + * Capturing the general mutability rules in a formal language so that clients can evaluate those rules against the current state of the resource. This powerful pattern would allow interfaces built on the API to clearly communicate why a field cannot be modified, and offer options for remediation, without attempting the modification. However, this would require substantial effort for both the client and server. + * Evaluating mutability rules on the server to determining point-in-time mutability, and including that information as a part of GET requests. This reduces the burden on the client, while still allowing for a good user experience. However, this significantly bloats the API and leads to GET/PUT asymmetry. + * Documenting the mutability rules in a human-readable way. This would allow implementors of interfaces built on the API to read the rules and select some or all of them to evaluate in their client code. This can be cumbersome when clients wish to support multiple versions of the API, but seems to be the approach used by most common REST APIs today. + +With each of these approaches, it is also possible for the client or sever to express a level of intrusiveness. For example, a server might communicate "the VCH must be restarted to modify this" instead of simply "this cannot be modified in the current state." Similarly, a client might communicate "I want to make this change, even if it requires restarting the VCH" or "I want to make this change, even if it requires powering off all containers." + +More complex logic can be introduced in the future, but in the interest of simplicity of the API and ease of implementation, it seems desirable to start simply with documenting the mutability rules in a human-readable way. As a next step, it may be useful to allow clients to communicate "I want to make this change, even if it requires restarting the VCH." + +### Use of a query parameter for compute-resource + +A datacenter represents an aggregation of resources within which a VCH may exist (spanning compute, storage, and networking), and is therefore included as a hierarchical path element. However, the compute-resource (i.e., cluster or resource pool) is, conceptually, a one-dimensional filter. Analogous filters for storage-resource or networking-resource could exist to identify VCHs using a particular datastore or network respectively. Expressing such filters as query parameters avoids the incorrect connotation of hierarchy that a path element would imply, and ensures composability. + +(In the future, one could even imagine a more flexible filter mechanism that allowed for filtering on _any_ property of a VCH. Under such a model, compute-resource could be deprecated in favor of a more verbose expression, or viewed as a shorthand.) + +### Use of a request body for delete + +While the semantics for including a body with a delete request are not generally defined ([RFC 7231](https://tools.ietf.org/html/rfc7231#section-4.3.5)), there does not seem to be a better way to influence the behavior of the operation. (Use of query parameters or path segments would be contrary to the [stylistic pattern](https://www.ics.uci.edu/~fielding/pubs/dissertation/rest_arch_style.htm) of uniform interface.) + +An alternative approach would be to model deletion as an action, but that may not be as intuitive for some consumers of the API. + +## The REST API + +### API Versioning + + - [ ] To do + +### Headers + +Where possible, the standard `Authorization` header will be used for authentication. + +Currently, only one scheme is supported: + + * "basic", which will allow direct authentication with username and password + +Eventually, another may be added: + + * "Bearer", which will allow authentication via SAML, including from the H5 client plugin. + +Additionally, a session ticket may be specified using the `X-VMWARE-TICKET` header. + +### Resources + +The base resource for all API operations will be `/container`. + +Additional resources will exist to represent: + +1. a vSphere target (ESX, vCenter, or Datacenter) + * The root resource followed by `/target/{target-network-address}` + * The target-network-address parameter must be a valid network address (FQDN or IP address) of a vSphere Server (ESX or vCenter) + * The root resource followed by `/target/{target-network-address}/datacenter/{datacenter-id}` + * The target-network-address parameter must be a valid network address (FQDN or IP address) of a vCenter Server + * The datacenter-id parameter must be an identifier for a resource of type Datacenter located within that vCenter Server +2. the collection of VCHs within (1) + * Any resource from (1) followed by `/vch` +3. a VCH within (2) + * Any resource from (2) followed by `/{vch-id}` +4. a host certificate within (3) + * Any resource from (3) followed by `/certificate` +5. log data within (3) + * Any resource from (3) followed by `/log` + +Note: Given the use cases for this API, the exclusive use of identifiers (vs. names) seems acceptable. If necessary, lookup-by-name can be implemented using the `filter.names` query pattern from in the vSphere REST API. + +### Query Parameters + +For all requests to all resources except the root resource: An optional "thumbprint" parameter will be supported to allow an API client to indicate the expected thumbprint of the target vSphere system. This parameter need not be supplied if the target system has a certificate signed by a trusted certificate authority. There will be no equivalent to the "force" command-line argument. + + - [ ] Figure out how certificate authority management will work. (Presumably there's something this can piggyback on.) + +For many requests, as detailed below: An optional "compute-resource" parameter will be supported to scope a request to a particular compute resource within a vSphere target. This is equivalent to the "compute-resource" command-line argument except that it takes an identifier instead of a name. (For context, see [Use of a query parameter for compute-resource](#use-of-a-query-parameter-for-compute-resource).) + +### Operations + +#### Get version information +``` +GET /container + +GET /container/version +``` + +A `GET` request on the base resource will return a JSON object containing metadata. Initially, the only piece of metadata included will be the version number. Eventually, this may include a list of known appliance ISOs. + +A `GET` request on the `version` sub-resource will return just the version. + + - [ ] Should this also capture the required vSphere permissions for various operations? (If so, how?) + +Corresponding CLI: `vic-machine-{darwin,linux,windows} version` + +#### Display static message +``` +GET /container/hello +``` + +A `GET` request on the `hello` resource will return a static "welcome" message for users who have been redirected to the server to accept SSL/TLS certificates.. + +Corresponding CLI: N/A + +#### List VCHs +``` +GET /container/target/{target-network-address}/[datacenter/{datacenter-id}]/vch?[thumbprint={thumbprint}]&[compute-resource={compute-resource}] +``` + +Making a `GET` request on `/vch` under a target and optionally a datacenter will return information about the VCHs on that target, in that datacenter. + + - [ ] Pagination + +Corresponding CLI: `vic-machine-{darwin,linux,windows} ls` + +#### Create a VCH +``` +POST /container/target/{target-network-address}/[datacenter/{datacenter-id}]/vch?[thumbprint={thumbprint}] +``` + +Making a `POST` request on `/vch` under a target and optionally a datacenter will create a VCH on that target, in that datacenter. Information about the VCH will be provided in the body of the request in a format similar to this. + +Note that validation of the request occurs synchronously, with any errors being returned using an appropriate response code and status. Eventually, portions of the creation will proceed asynchronously, with errors being reported via a vSphere task that is returned once the synchronous validation is complete. (See "[VCH creation spawns a custom task](#vch-creation-spawns-a-custom-task)".) + +Corresponding CLI: `vic-machine-{darwin,linux,windows} create` + +#### Inspect a VCH +``` +GET /container/target/{target-network-address}/[datacenter/{datacenter-id}]/vch/{vch-id}?[thumbprint={thumbprint}] +``` + +Making a `GET` request on a VCH resource will return information about the VCH. Information about the VCH will be provided in the body of the response in the same format as create. + +Corresponding CLI: `vic-machine-{darwin,linux,windows} inspect` + +#### Access the creation and reconfiguration log for a VCH +``` +GET /container/target/{target-network-address}/[datacenter/{datacenter-id}]/vch/{vch-id}/log?[thumbprint={thumbprint}] +``` + +Making a `GET` request on `/log` under a VCH resource will return the contents of that VCH's log. The log is created during VCH creation and appended to during subsequent operations. This request is different than most others in that the return type is `text/plain`. + +Note that log information will be _persisted_ in multiple files (a timestamped file for each creation/mutation operation). The contents of those files will be combined into a single stream for consumption. In the future, more granular access could be provided. + +Corresponding CLI: N/A + +#### Access the host certificate for a VCH +``` +GET /container/target/{target-network-address}/[datacenter/{datacenter-id}]/vch/{vch-id}/certificate?[thumbprint={thumbprint}] +``` + +Making a `GET` request on `/certificate` under a VCH resource will return the certificate the VCH uses when acting as a server, which clients may wish to access to download and add to a trust store. This request is different than most others in that the return type is `application/x-pem-file`. + +Corresponding CLI: N/A + +#### Reconfigure a VCH +``` +PUT /container/target/{target-network-address}/[datacenter/{datacenter-id}]/vch/{vch-id}?[thumbprint={thumbprint}] + +PATCH /container/target/{target-network-address}/[datacenter/{datacenter-id}]/vch/{vch-id}?[thumbprint={thumbprint}] +``` + +Making a `PUT` request on a VCH resource will update that VCH's configuration. Information about the VCH will be provided in the body of the request in the same format as create. + +In trying to strike a balance between the Robustness Principle and the Principle of Least Astonishment, we will allow for fields which cannot be modified to appear in the body of a `PUT` as long as the value of those fields match the current state of the object. This allows us to be relatively liberal in what we accept while avoiding the potential surprise of having edits dropped from the request. When the value of a field which cannot be modified does not match the current state, a 409 Conflict will be returned. To preserve the idempotency requirement for `PUT`, modifications to mutable portions of the body must not cause immutable portions of the body to change as a side-effect. + +Making a `PATCH` request on a VCH resource (with a body as described in RFC 7396) will update a subset of that VCH's configuration. + +As `PATCH` is an explicit request to update a set of fields, fields which cannot be modified must not appear in the body of the `PATCH` request, even if the modification would be a no-op. + +Corresponding CLI: `vic-machine-{darwin,linux,windows} configure` + +#### Delete a VCH +``` +DELETE /container/target/{target-network-address}/[datacenter/{datacenter-id}]/vch/{vch-id}?[thumbprint={thumbprint}] +``` + +Making a `DELETE` request on a VCH resource will delete that VCH. By default, the VCH and any powered off containers will be deleted. A request body may be provided to indicate whether powered on containers and/or volume stores should be deleted. + +Corresponding CLI: `vic-machine-{darwin,linux,windows} delete` + +#### Upgrade a VCH +``` +POST /container/target/{target-network-address}/[datacenter/{datacenter-id}]/vch/{vch-id}?action=upgrade&[thumbprint={thumbprint}] +``` + +Making a `POST` request on a VCH resource with an action of upgrade will initiate an upgrade of the VCH. The body of the request will be a JSON object containing the following optional properties: `bootstrap-iso` (a reference to a known bootstrap ISO on the OVA) and `rollback` (a boolean value). + +Corresponding CLI: `vic-machine-{darwin,linux,windows} upgrade` + +#### Debug a VCH +``` +POST /container/target/{target-network-address}/[datacenter/{datacenter-id}]/vch/{vch-id}?action=debug&[thumbprint={thumbprint}] +``` + +Making a `POST` request on a VCH resource with an action of debug will modify the debug settings for the VCH. The body of the request will be a JSON object containing the following optional properties: `enable-ssh` (a boolean value), `authorized-key` (a string representation of a public key), `rootpw` (a string). + +Corresponding CLI: `vic-machine-{darwin,linux,windows} debug` + +#### View firewall rule settings +``` +GET /container/target/{target-network-address}/[datacenter/{datacenter-id}]?[thumbprint={thumbprint}]&[compute-resource={compute-resource}] +``` + +Making a `GET` request on a vSphere target (with an optional datacenter and compute resource) will return information about the state of the host firewall on those resources. This allows a user to easily determine whether the hosts are in an appropriate state, and allows interfaces to display alerts when they are not. This also provides some measure of symmetry with the update firewall rules operation below. + +Corresponding CLI: N/A + +#### Update firewall rules +``` +POST /container/target/{target-network-address}/[datacenter/{datacenter-id}]?action=firewall:[allow|deny]&[thumbprint={thumbprint}]&[compute-resource={compute-resource}] +``` + +Making a `POST` request on a vSphere target (with an optional datacenter and compute resource) with an action of `firewall:allow` or `firewall:deny` will update the host firewall on those resources. + +Corresponding CLI: `vic-machine-{darwin,linux,windows} update` + +## The WebSockets API + +A WebSockets-based API will be used to provide streaming access to log data. + + - [ ] Specify this. + +## Proposed changes to existing functionality + +### VCH creation spawns a custom task + +Currently, VCH creation consists of three main steps: + +1. Validation of the request and the state of the system. +2. A series of vSphere operations to create the VCH. +3. Starting the container VM and its services. + +A single parent task could be created for the middle portion of this workflow, to be used as a starting point for a user wishing to query for the status of the creation operation. + +When initiating VCH creation via the service, the first (validation) step would occur synchronously. Once the second (vSphere) step begins and the custom task is created, the API would return the handle to the vSphere task, which tracks the asynchronous portion of the request. + +### Refactoring of `vic-machine` + +The CLI and REST API should each be a thin layer around a common idiomatic Go API. That is, the CLI and REST API should be interaction and translation layers which do not include "business logic". Refactoring may be necessary to achieve this. + +Eventually, the CLI could be be re-imagined and implemented as a client of the REST API. The advantages and disadvantages of such a change should be carefully considered. + +### Deprecation of demo VCH installer + +The demo VCH installer is currently shipped as a part of the OVA as a container with a web application that listens on port 1337. It provides users with a simple web interface to provision a VCH for demo/testing purposes by invoking the `vic-machine` CLI. + +Re-implementing this UI using the interface using the new API would be feasible. However, it seems to make sense to consider the use cases for this application in the design of the vSphere H5 client plugin. And as there are no known backwards compatibility guarantees (or important use cases/workflows which would be impacted), it would make sense to deprecate and eventually remove the demo VCH installer once the vSphere H5 client plugin is available. + +### Client certificate management using Admiral + +Currently, `vic-machine` supports using user-specified client certificates or generating certificates for users to use. As an alternate model, it could delegate client certificate management to Admiral. + +As an outline of how this might work: + + * When Admiral is installed, it could generate a CA certificate and expose that certificate's public key (or make use of the vSphere CA). + * When a VCH is created, the Admiral CA certificate's public key could be supplied as the client certificate CA. + * When a user in created in Admiral, it could generate a client certificate signed by its client CA certificate (or sign a public key supplied by the user). + * When VCH is authenticating a request, the Admiral CA certificate's public key (from guest info) could be blended with dynamic configuration information from Admiral to limit access to only those users who have been granted access to the VCH's project in Admiral. + * When a user accesses a VCH, they would use the client certificate assigned by Admiral. + +This provides several key benefits: + + * Each user would have a single certificate which authorizes them to use all VCHs they have been granted access to. + * Operations performed on a VCH can be tied to a user. + * This would help address the lack of certificate revocation mechanisms and allow Admiral to be used to dynamically manage user authorization. + +Most relevantly, and perhaps most importantly: this means that a VCH creation workflow does not require upload or download of client certificates, allowing for a simplified user experience. + +The work that would be required for this would include: + +1. Discovery/lookup of the Admiral CA certificate by `vic-machine-service`. +2. Certificate blending within the VCH. (Validating that the client certificates supplied by Admiral's dynamic configuration are signed by a client CA certificate configured on the VCH, and then limiting access to only those client certificates.) +3. Per-user client certificate generation (or signing) within Admiral. +4. Support for per-user client certificates in Admiral dynamic config. +5. To allow operations performed on a VCH to be tied to a user: + 1. Expanding operation logging across the personality/portlayer boundary. + 2. Logging the public details of the client certificate used for authorization. + +### Reduce friction around use of operations credentials + +Because a VCH must be able to interact with vSphere for a variety of operations on behalf of its users (which may not be vSphere administrators), it requires access to a set of vSphere credentials. + +When using the `vic-machine` CLI, the default behavior is to persist the credentials which were used for the VCH creation itself. Alternatively, administrators may supply a set of "operations credentials". + +This default option cannot be supported for API consumers using a session to authenticate, including the UI. As baseline functionality, we can require that operations credentials are supplied when authenticating with the API in this way. An administrator may chose to supply their own credentials as the operations credentials if they wish to mimic the CLI's default behavior. + +Requiring administrators to supply operations credentials works, but is likely to be a point of friction. Beyond the basic inconvenience of treating more information as required in the wizard, an operations user needs to have a non-trivial set of permissions which introduces an additional source of user error. + +Support for automatically creating a per-VCH solution user and assigning it the correct permissions seems like the best way to address these pain points. This design also has other advantages: solution users are the accepted mechanism for machine-to-machine interaction, solution users authenticate using a certificate instead of a password, and this would allow us to follow the principle of least privilege by default. + +However, issuing requests as a solution user is believed to present significant technical challenges due to immature golang support for SOAP and SAML. + +As an interim solution, we can: + +1. Introduce logic to validate that supplied operations credentials have the necessary permissions, so that missing permissions can be identified early and clearly. +2. Allow administrators to have vic-machine attempt to automatically grant any missing permissions, so that permissions can be granted corrected quickly and easily. +3. Support automatically creating a non-solution user on behalf of the administrator, and granting it a minimal set of permissions, to further reduce the work required of the administrator. + +Eventually, we can then: + +4. Enhance the container operations code to support use of a solution user and update the automatic creation logic to create a solution user. + +Each of these items builds on the previous, and all three act as building blocks towards #4. Each of these items would be useful for UI, API, and CLI. + +### Associating contact information with a VCH + +The original VCH configuration had an "environment contact" and an "administrative contact", which were stored using vSphere notes. This allowed a vSphere administrator to associate their contact information with a VCH, so that other administrators would know who to contact before making changes to the system. + +This information could also be included on the vicadmin page, even for unauthenticated users, to enable users of all types to know who to talk to for help. (Perhaps the contact information should be free-form to allow for things like referring to a ticket-tracking system.) + + - [ ] Customers with large environments must have a way to manage ownership information of entities. Can we learn from that? + +## Testing + +The service will require two types of tests: + +1. Unit tests to verify functionality of handler logic. +2. End-to-end tests to verify the API functionality from a client's point of view (and to serve as the first "client", and as a secondary form of documentation). + +Additionally, appropriate testing will be needed for each of the items in the "proposed changes to existing VIC functionality" section. diff --git a/infra/integration-image/Dockerfile b/infra/integration-image/Dockerfile index 42913b371e..d036af2409 100644 --- a/infra/integration-image/Dockerfile +++ b/infra/integration-image/Dockerfile @@ -4,9 +4,9 @@ # docker tag vic-test gcr.io/eminent-nation-87317/vic-integration-test:1.x # gcloud auth login # gcloud docker -- push gcr.io/eminent-nation-87317/vic-integration-test:1.x -# open vpn to CI cluster then run: -# docker tag vic-test 192.168.31.15/library/vic-integration-test:1.x -# docker push 192.168.31.15/library/vic-integration-test:1.x +# download and install harbor certs, then docker login, then: +# docker tag vic-test wdc-harbor-ci.eng.vmware.com/default-project/vic-integration-test:1.x +# docker push wdc-harbor-ci.eng.vmware.com/default-project/vic-integration-test:1.x FROM golang:1.8 @@ -83,7 +83,7 @@ RUN curl -fsSLO https://releases.hashicorp.com/packer/0.12.2/packer_0.12.2_linux unzip packer_0.12.2_linux_amd64.zip -d /usr/bin && \ rm packer_0.12.2_linux_amd64.zip -RUN curl http://downloads.drone.io/release/linux/amd64/drone.tar.gz | tar zx && \ +RUN wget https://github.com/drone/drone-cli/releases/download/v0.8.3/drone_linux_amd64.tar.gz && tar zxf drone_linux_amd64.tar.gz && \ install -t /usr/local/bin drone RUN curl -sSL https://github.com/vmware/govmomi/releases/download/v0.16.0/govc_linux_amd64.gz | gzip -d > /usr/local/bin/govc && \ diff --git a/infra/machines/devbox/README.md b/infra/machines/devbox/README.md index 97eef41a44..ebbf222ce1 100644 --- a/infra/machines/devbox/README.md +++ b/infra/machines/devbox/README.md @@ -94,7 +94,7 @@ cd $GOPATH/src/github.com/vmware/vic ### Local Drone CI test ``` shell -drone exec -trusted -cache +drone exec ``` ## Devbox on ESX diff --git a/infra/scripts/ci-logs.sh b/infra/scripts/ci-logs.sh index 93ac9022a2..c8b2f8058f 100755 --- a/infra/scripts/ci-logs.sh +++ b/infra/scripts/ci-logs.sh @@ -61,7 +61,7 @@ shift $((OPTIND-1)) build="$1" job="$2" -export DRONE_SERVER=${DRONE_SERVER:-https://ci.vcna.io} +export DRONE_SERVER=${DRONE_SERVER:-https://ci-vic.vmware.com} if [ -z "$DRONE_TOKEN" ] ; then echo "DRONE_TOKEN not set (available at $DRONE_SERVER/settings/profile)" diff --git a/infra/scripts/go-deps.sh b/infra/scripts/go-deps.sh index fa06f799cf..0f48b97266 100755 --- a/infra/scripts/go-deps.sh +++ b/infra/scripts/go-deps.sh @@ -20,20 +20,36 @@ # # pkg This is github.com/vmware/vic/cmd/imagec for example # +# If VIC_CACHE_DEPS environment variable is defined, this script will attempt to read +# cached dependencies from disk if those exist. If they are not cached, dependencies will be +# regenerated and cached. + +cache_dir=.godeps_cache pkg=$1 flags=$2 +cachedname=`echo .$1.godeps_cache | sed 's/\//_/g'` if [ -d "$pkg" ]; then - if [[ "$flags" == *d* ]] - then + + if [[ "$flags" == *d* ]]; then # Only output if make is given the '-d' flag echo "Generating deps for $pkg" >&2 fi - go list -f '{{join .Deps "\n"}}' github.com/vmware/vic/"$pkg" 2>/dev/null | \ - xargs go list -f '{{if not .Standard}}{{.ImportPath}}{{end}}' 2>/dev/null | \ - sed -e 's:github.com/vmware/vic/\(.*\)$:\1/*:' + if [ -n "$VIC_CACHE_DEPS" ]; then + mkdir -p $cache_dir + if [ ! -f $cache_dir/$cachedname ]; then + go list -f '{{join .Deps "\n"}}' github.com/vmware/vic/"$pkg" 2>/dev/null | \ + xargs go list -f '{{if not .Standard}}{{.ImportPath}}{{end}}' 2>/dev/null | \ + sed -e 's:github.com/vmware/vic/\(.*\)$:\1/*:' > "$cache_dir/$cachedname" + fi + cat "$cache_dir/$cachedname" + else + go list -f '{{join .Deps "\n"}}' github.com/vmware/vic/"$pkg" 2>/dev/null | \ + xargs go list -f '{{if not .Standard}}{{.ImportPath}}{{end}}' 2>/dev/null | \ + sed -e 's:github.com/vmware/vic/\(.*\)$:\1/*:' + fi else if [[ "$flags" == *d* ]] then diff --git a/infra/scripts/local-ci.sh b/infra/scripts/local-ci.sh new file mode 100755 index 0000000000..69b08e49d0 --- /dev/null +++ b/infra/scripts/local-ci.sh @@ -0,0 +1,215 @@ +#!/bin/bash +# Copyright 2018 VMware, Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +##### defaults +secretsfile="" +docker_test="Group1-Docker-Commands" +target_vch="" +odir="ci-results" +ci_container="gcr.io/eminent-nation-87317/vic-integration-test:1.44" +github_api_key="" +test_url="" +test_datastore="" +test_username="" +test_password="" +BASE_DIR=$(dirname $(readlink -f "$BASH_SOURCE")) +vic_dir=${BASE_DIR}/../../ + +##### utility functions +function usage() { + echo "Usage: $0 [options]" 1>&2 + echo + echo " Options can be provided by commandline argument, environment variable, or a secrets yaml file containing the" + echo " variables. If a secrets file is not provided, this script will attempt to retrieve some info from govc, such as" + echo " TEST_URL, TEST_USERNAME, and TEST_PASSWORD." + echo + echo " options:" + echo " -t DOCKER_TEST (or env var)" + echo " -v TARGET_VCH (or env var) name of VCH" + echo " -f SECRETS_FILE (or env var)" + echo " -g GITHUB_API_KEY (or env var)" + echo " -u TEST_URL (or env var or in secretsfile)" + echo " -s TEST_DATASTORE (or env var or in secretsfile)" + echo " -n TEST_USERNAME (or env var or in secretsfile)" + echo " -p TEST_PASSWORD (or env var or in secretsfile)" + echo " -d debug dumps out all the inputs and results of the resulting options" + echo + echo " example:" + echo " $0 -t Group1-Docker-Commands -v my_vch -s test.secrets.nested -g xxxxxx" + echo " $0 -t 1-01-Docker-Info.robot -v my_vch -s test.secrets.nested -g xxxxxx" + echo + echo " DOCKER_TEST=Group1-Docker-Commands/1-01-Docker-Info.robot TARGET_VCH=my_vch SECRETS_FILE=test.secrets.nested $0" + echo + echo " $0 -s test.secrets.nested (all params defined in secrets file)" + exit 1 +} + +function GetGovcParamsFromEnv() { + echo "Getting params from GOVC var" + test_username=$(govc env | grep GOVC_USERNAME | cut -d= -f2) + test_password=$(govc env | grep GOVC_PASSWORD | cut -d= -f2) + test_url=$(govc env | grep GOVC_URL | cut -d= -f2) +} + +function GetParamsFromSecrets() { + echo "Getting params from the secrets file" + secrets_api_key="$(grep 'GITHUB_AUTOMATION_API_KEY' ${secretsfile} | awk '{ print $2 }')" + secrets_url="$(grep 'TEST_URL_ARRAY' ${secretsfile} | awk '{ print $2 }')" + secrets_datastore="$(grep -E '\s+TEST_DATASTORE' ${secretsfile} | awk '{ print $2 }')" + secrets_username="$(grep 'TEST_USERNAME' ${secretsfile} | awk '{ print $2 }')" + secrets_password="$(grep 'TEST_PASSWORD' ${secretsfile} | awk '{ print $2 }')" + secrets_docker_test="$(grep 'DOCKER_TEST' ${secretsfile} | awk '{ print $2 }')" + secrets_target_vch="$(grep 'TARGET_VCH' ${secretsfile} | awk '{ print $2 }')" +} + +function DoWork() { + mkdir -p $odir + + testsContainer=$(docker create -it \ + -w /vic \ + -v "$vic_dir:/vic" \ + -e GOVC_URL="$ip" \ + -e GOVC_INSECURE=1 \ + -e GITHUB_AUTOMATION_API_KEY=${github_api_key}\ + -e TEST_URL_ARRAY=${test_url}\ + -e TEST_DATASTORE=${test_datastore}\ + -e TEST_USERNAME=${test_username}\ + -e TEST_PASSWORD=${test_password}\ + -e TARGET_VCH=${target_vch}\ + -e DEBUG_VCH=1\ + ${ci_container}\ + bash -c "pybot -d /vic/${odir} /vic/tests/test-cases/"$docker_test"") + + docker start -ai $testsContainer +} + +function DebugInputDump() { + echo "Environment Variables" + echo "---------------------" + echo "SECRETS_FILE="${SECRETS_FILE} + echo "TARGET_VCH="${TARGET_VCH} + echo "DOCKER_TEST="${DOCKER_TEST} + echo "GITHUB_API_KEY="${GITHUB_API_KEY} + echo "TEST_URL="${TEST_URL} + echo "TEST_DATASTORE"=${TEST_DATASTORE} + echo "TEST_USERNAME"=${TEST_USERNAME} + echo "TEST_PASSWORD"=${TEST_PASSWORD} + echo + echo "Arguments" + echo "---------------------" + echo "SECRETS_FILE="$secretsfile + echo "TARGET_VCH="$target_vch + echo "DOCKER_TEST="$docker_Test + echo "GITHUB_API_KEY="$github_api_key + echo "TEST_URL="$test_url + echo "TEST_DATASTORE"=$test_datastore + echo "TEST_USERNAME"=$test_username + echo "TEST_PASSWORD"=$test_password + echo + echo "Secrets file" + echo "---------------------" + echo "TARGET_VCH="$secrets_target_vch + echo "DOCKER_TEST="$secrets_docker_test + echo "GITHUB_API_KEY="$secrets_api_key + echo "TEST_URL="$secrets_url + echo "TEST_DATASTORE"=$secrets_datastore + echo "TEST_USERNAME"=$secrets_username + echo "TEST_PASSWORD"=$secrets_password +} + +function DebugDump() { + echo + echo "=====================" + echo "SECRETS_FILE="$secretsfile + echo "TARGET_VCH="$target_vch + echo "DOCKER_TEST="$docker_test + echo "GITHUB_API_KEY="$github_api_key + echo "TEST_URL="$test_url + echo "TEST_DATASTORE"=$test_datastore + echo "TEST_USERNAME"=$test_username + echo "TEST_PASSWORD"=$test_password + echo "vic-dir"=$vic_dir +} + +##### Get command line arguments +while getopts f:t:v:g:u:s:n:p:d flag +do + case $flag in + f) + secretsfile=$OPTARG + ;; + t) + docker_test="$OPTARG" + ;; + v) + target_vch=$OPTARG + ;; + g) + github_api_key=$OPTARG + ;; + u) + test_url=$OPTARG + ;; + s) + test_datastore=$OPTARG + ;; + n) + test_username=$OPTARG + ;; + p) + test_password=$OPTARG + ;; + d) + debug_enabled=1 + ;; + *) + usage + ;; + esac +done + +##### Preconditions... + +# There is a priority in the preconditions. First, environment variable. Second, secrets file. Third, command line argument. + +secretsfile=${SECRETS_FILE:-$secretsfile} +if [[ -z $secretsfile ]] ; then + GetGovcParamsFromEnv +else + GetParamsFromSecrets +fi + +if [[ ! -z ${debug_enabled} ]] ; then + DebugInputDump +fi + +target_vch=${TARGET_VCH:-$secrets_target_vch} +docker_test=${DOCKER_TEST:-$secrets_docker_test} +github_api_key=${GITHUB_API_KEY:-$secrets_api_key} +test_url=${TEST_URL:-$secrets_url} +test_datastore=${TEST_DATASTORE:-$secrets_datastore} +test_username=${TEST_USERNAME:-$secrets_username} +test_password=${TEST_PASSWORD:-$secrets_password} +if [[ -z ${target_vch} ]] || [[ -z "${docker_test}" ]] || [[ -z $github_api_key ]] || [[ -z $test_url ]] || [[ -z $test_datastore ]] || [[ -z $test_password ]] ; then + usage +fi + +if [[ ! -z ${debug_enabled} ]] ; then + DebugDump +fi + +##### The actual work +DoWork diff --git a/isos/base/photon-local.repo b/isos/base/photon-local.repo index 2630a336eb..9f49760af4 100644 --- a/isos/base/photon-local.repo +++ b/isos/base/photon-local.repo @@ -1,5 +1,5 @@ [photon] name=VMware Photon Linux 1.0(x86_64) -baseurl=http://192.168.31.16/photon +baseurl=http://wdc-yum-builder-ci.eng.vmware.com/photon gpgcheck=0 enabled=1 diff --git a/isos/base/photon-updates-local.repo b/isos/base/photon-updates-local.repo index 1e8e8c091b..c106519550 100644 --- a/isos/base/photon-updates-local.repo +++ b/isos/base/photon-updates-local.repo @@ -1,5 +1,5 @@ [photon-updates] name=VMware Photon Linux 1.0(x86_64) -baseurl=http://192.168.31.16/photon-updates +baseurl=http://wdc-yum-builder-ci.eng.vmware.com/photon-updates gpgcheck=0 enabled=1 diff --git a/isos/bootstrap/bootstrap b/isos/bootstrap/bootstrap index c133136e8b..610579c14d 100755 --- a/isos/bootstrap/bootstrap +++ b/isos/bootstrap/bootstrap @@ -31,7 +31,9 @@ mkdir -p /mnt/containerfs echo "Waiting for rootfs" while [ ! -e /dev/disk/by-label/containerfs ]; do sleep 0.1;done -if mount -t ext4 /dev/disk/by-label/containerfs ${MOUNTPOINT}; then +# https://github.com/vmware/vic/issues/6379 +# grab dmesg output and dump to debug log if mount doesn't occur in a useful timeframe (2min) +if timeout --signal=KILL 2m mount -t ext4 /dev/disk/by-label/containerfs ${MOUNTPOINT}; then # ensure mountpoint exists mkdir -p ${MOUNTPOINT}/.tether @@ -105,6 +107,11 @@ else # TODO: what do we do here? we really need to somehow report an error # fail hard echo "Unable to chroot into container filesystem" + + # dump dmesg data in case there's a system problem injecting or loading the root filesystem + dmesg + # because dmesg is long and will wrap over console + echo "dmesg dump due to root filesystem mount failure" fi # Shut the system down diff --git a/lib/apiservers/engine/API-COMPATIBILITY.md b/lib/apiservers/engine/API-COMPATIBILITY.md deleted file mode 100644 index ef3ab28b5b..0000000000 --- a/lib/apiservers/engine/API-COMPATIBILITY.md +++ /dev/null @@ -1,405 +0,0 @@ -# Docker Remote API Compatibility -VIC attempts to be compatible with the Docker remote API; however, there are some differences. The API differences are documented below, grouped together by functional areas. - -Here are the [Docker v1.22 docs](https://docs.docker.com/engine/reference/api/docker_remote_api_v1.22/) for the readers convenience. Only the calls that will have differences in implementation are documented below. - -# Support Matrix - -## Container API Support - -|API|support?| -|---|---| -|List Containers|YES| -|Create a Container|YES| -|inspect a container| YES| -|List processes running inside a container|YES| -|Get Container Logs|YES| -|Inspect changes on a containers filesystem|Future| -|Export a container|Future| -|Get Container stats based on resource usage|Future| -|Resize a container TTY|YES| -|Start a Container|YES| -|Stop a container|YES| -|Restart a Container|YES| -|kill a container|YES| -|Update a Container|*Maybe*| -|Rename a Container|YES| -|Pause a Container|*maybe*| -|Unpause a Container|*maybe*| -|Attach a Container|YES| -|Attach a Container(websocket)|*maybe*| -|wait a container|YES| -|Remove a Container|YES| -|Copy files or folders from a Container|YES| -|Retrieve information about files and folders in a container|YES| -|Get and archive of a filesystem resource in a container|YES| -|Extract an archive of files or folders to a driectory in a container|YES| - -## Image API Support - -|API|support?| -|---|---| -|List Images|YES| -|Build image from a Dockerfile|Future| -|Create an Image|YES| -|Inspect and Image |YES| -|Get the History of an Image|YES| -|Push an image on the registry|Future| -|Tag an image into a repository|YES| -|Remove and Image|YES| -|Search Images|YES| - -## Misc API Support - -|API|support?| -|---|---| -|Check auth Configuration|YES| -|Display System-wide Information|YES| -|Show the docker version information|YES| -|Ping the Docker server|YES| -|Create a New Image from a containers changes|Future| -|Monitor Docker Events|YES| -|Get a tarball containing all images in a repository|Future| -|Get a tarball containing all images|Future| -|Load a Tarball with a set of images and tags into docker|YES| -|Exec Create |*maybe*| -|Exec Start |*maybe*| -|Exec Resize|*maybe*| -|Exec Inspect|*maybe*| - - -# Container APIs - -## Create a container - -``` -POST /containers/create -``` - -### Json Fields - -**misc** - -|field|description|supported?| -|---|---|---| -|AttachStdin|Attach to Stdin|YES| -|AttachStdout|Attach to STDOUT|YES| -|AttachStderr|Attach to STDERR|YES| -|Cmd|Array of strings which specify commands to run|YES| -|Domainname|string to use for the containers domain|YES| -|Env|Set environment variables|YES| -|Entrypoint=""|Overwrite the default ENTRYPOINT of the image|YES| -|Hostname|Container host name|YES| -|Image|Image name to be used for the container|*maybe*| -|OpenStdin|Keep STDIN open even if not attached|YES| -|StdinOnce|close stdin once single attached client disconnects|YES| -|NetworkDisabled|boolean value that controls the presence of a network for the container.|YES| -|ExposedPorts|Object mapping to an empty object |YES| -|StopSignal|Signal to stop the container. can be a string or unsigned integer| *maybe*| -|Tty|Allocate a pseudo-TTY|*diff*| -|User|Username or UID|*diff*| -|WorkingDir|Working directory inside the container|YES| - - -**NetworkSettings** - -|Param|Docker|VIC| -|---|---|---| -|"Bridge"|""|?| -|"SandboxID"|""|?| -|"HairpinMode"|""|?| -|"LinkLocalIPv6Address"|""|?| -|"LinkLocalIPv6PrefixLen"|""|?| -|"Ports"|""|?| -|"SandboxKey"|""|?| -|"SecondaryIPAddresses"|""|?| -|"SecondaryIPv6Addresses"|""|?| -|"EndpointID"|""|?| -|"Gateway"|""|?| -|"GlobalIPv6Address"|""|?| -|"GlobalIPv6PrefixLen"|""|?| -|"IPAddress"|""|?| -|"IPPrefixLen"|""|?| -|"IPv6Gateway"|""|?| -|"MacAddress"|""|?| -|"Networks"|-|VIC will have some form of this data| - - - -**Host Config field** - -|field|description|supported| -|---|---|---| -|field|description|Support| -|Binds|volume binds|*diff*| -|BlkioWeight|Block IO weight (relative weight) accepts a weight value between 10 and 1000.|NO| -|BlkioWeightDevice|Block IO weight (relative device weight) in the form of: "BlkioWeightDevice": [{"Path": "device_path", "Weight": weight}]|NO| -|"BlkioDeviceReadBps"|Limit read rate (bytes per second) from a device in the form of: "BlkioDeviceReadBps": [{"Path": "device_path", "Rate": rate}], for example: "BlkioDeviceReadBps": [{"Path": "/dev/sda", "Rate": "1024"}]"|NO| -|BlkioDeviceWriteBps|Limit write rate (bytes per second) to a device in the form of: "BlkioDeviceWriteBps": [{"Path": "device_path", "Rate": rate}], for example: "BlkioDeviceWriteBps": [{"Path": "/dev/sda", "Rate": "1024"}]"|NO| -|"BlkioDeviceReadIOps"|Limit read rate (IO per second) from a device in the form of: "BlkioDeviceReadIOps": [{"Path": "device_path", "Rate": rate}], for example: "BlkioDeviceReadIOps": [{"Path": "/dev/sda", "Rate": "1000"}]|NO| -|BlkioDeviceWiiteIOps|Limit write rate (IO per second) to a device in the form of: "BlkioDeviceWriteIOps": [{"Path": "device_path", "Rate": rate}], for example: "BlkioDeviceWriteIOps": [{"Path": "/dev/sda", "Rate": "1000"}]|NO| -|CapAdd|A list of kernel capabilities to add to the container.|NO| -|CapDrop|A list of kernel capabilities to drop from the container.|NO| -|ContainerIDFile|-|NO| -|CpusetCpus|CPUs in which to allow execution (e.g., 0-3, 0,1).|NO| -|CpusetMems|Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.|NO| -|CpuShares|CPU shares (relative weight).|NO| -|CpuPeriod|The length of a CPU period in microseconds.|NO| -|Devices|A list of devices to add to the container specified as a JSON object in the form { "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}|NO| -|Dns|A list of DNS servers for the container to use.|YES| -|DnsOptions|A list of DNS options|NO| -|DnsSearch|A list of DNS search domains|YES| -|ExtraHosts|A list of hostnames/IP mappings to add to the container’s /etc/hosts file. Specified in the form ["hostname:IP"]|NO| -|IpcMode|-|NO| -|Links|A list of links for the container. Each link entry should be in the form of container_name:alias|NO| -|LxcConf|-|NO| -|Memory|Memory limit in bytes|NO| -|MemorySwap|Total memory limit (memory + swap); set -1 to enable unlimited swap. You must use this with memory and make the swap value larger than memory.|NO| -|MemoryReservation|Memory soft limit in bytes|NO| -|KernelMemory|Kernel memory limit in bytes.|NO| -|OomKillDisable|Boolean value, whether to disable OOM Killer for the container or not.|NO| -|OomScoreAdj|An integer value containing the score given to the container in order to tune OOM killer preferences.|NO| -|NetworkMode|Sets the networking mode for the container. Supported standard values are: bridge, host, none, and container:. Any other value is taken as a custom network’s name to which this container should connect to.|*diff*| -|PortBindings|A map of exposed container ports and the host port they should map to. A JSON object in the form { /: [{ "HostPort": "" }] } Take note that port is specified as a string and not an integer value.|NO| -|Privileged|Gives the container full access to the host. Specified as a boolean value.|NO, vms are the isolation unit in VIC and the commands inside of the container has access to the host's vm| -|ReadonlyRootfs|Mount the container’s root filesystem as read only. Specified as a boolean value.|NO| -|PublishAllPorts|Allocates a random host port for all of a container’s exposed ports. Specified as a boolean value.|NO| -|RestartPolicy|The behavior to apply when the container exits.|NO| -|LogConfig|Log configuration for the container, specified as a JSON object in the form { "Type": "", "Config": {"key1": "val1"}}. Available types: json-file, syslog, journald, gelf, awslogs, splunk, none. json-file logging driver.|YES| -|SecurityOpt|A list of string values to customize labels for MLS systems, such as SELinux.|NO| -|VolumesFrom|A list of volumes to inherit from another container. Specified in the form [:]|NO| -|Ulimits|A list of ulimits to set in the container, specified as { "Name": , "Soft": , "Hard": }, for example: Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }|NO| -|VolumeDriver|Driver that this container users to mount volumes.|NO, VIC has its own volume driver| -|ShmSize|Size of /dev/shm in bytes. The size must be greater than 0. If omitted the system uses 64MB|NO| - -**State** - -|Param|Docker|VIC| -|---|---|---| -|"Error"|-|NO| -|"ExitCode"|-|YES| -|"FinishedAt"|-|YES| -|"OOMKilled"|-|?| -|"Dead"|-|YES| -|"Paused"|-|YES| -|"Pid"|-|NO| -|"Restarting"|-|?| -|"Running"|-|?| -|"StartedAt"|-|YES| -|"Status"|-|YES| - - - -**Mounts** - -This is the structure of the `Mounts` field in the request. - -``` - "Mounts": [ - { - "Name": "fac362...80535", - "Source": "/data", - "Destination": "/data", - "Driver": "local", - "Mode": "ro,Z", - "RW": false, - "Propagation": "" - } - ] -``` - -###Response - -The response for the create call is very simple in nature - -it will return these two fields as json object in the body of an HTTP call. - -* __Id__ : this is the id of the created containers. -* __Warnings__ : an array of strings that contain warning messages. - -### Status Codes - -* 201 : no error -* 404 : no such container -* 506 : impossible to attach -* 500 : server error - -## Get container logs - -```GET /containers/(id)/logs``` - -This operation will return StdOut and StdErr logs from the target container. - -### Query parameters - -|Parameter|Description|support| -|---|---|---| -| __follow__ |1/true/True or 0/false/False, defaults to false. This determines if a stream is returned. | YES | -| __stdout__ |1/true/True or 0/false/False, defaults to false. Indicates whether stdout logs are desired. | YES | -| __stderr__ | 1/true/True or 0/false/False, defaults to false. Indicates whether stderr logs are desired.| YES | -| __since__ | integer based UNIX timestamp with which to filter logs. Only logs since the timestamp will be returned. Default: 0. | *maybe* | -| __timestamps__ | 1/true/True or 0/false/False, defaults to false. timestamped log lines. Defaults to false.| *maybe* | -| __tail__ | all or . indicates how many lines to output since the end of the logs. | *maybe* | - -### Status Codes - -* 101 : no error, indicates proxy about hijacking -* 200 : no error, no upgrade header from attach found -* 404 : no such container -* 500 : server error - -## Export a container - -```GET /containers/(id)/export``` - -This exports the contents of a container as a flattened file for other than docker use. -__VIC plans to support this in the future__ - -### Response - -HTTP response that returns a binary stream of the flattened file in a tarball. The response is of type application/octet-stream. - -### Response Status Codes - -* 200 : no error -* 404 : no such container -* 500 : server error - -## Attach to a container - -```POST /containers/(id or name)/attach``` - -This call will return a stream of the targeted containers common io streams(stdout, stdin, stderr). It is important to note that this involves a `HTTP UPGRADE` response. - -### Query Parameters - -|parameter| description| supported | -|---|---|---| -|detachKeys|Override key for detaching a container. e.g. a-z| *maybe*| -|logs| 1/true/True or 0/false/False. if `true` returns logs defaults to `false`| *maybe*| -|stream| 1/true/True or 0/false/False. if `true` returns a stream defaults to `false`| YES| -|stdin| 1/true/True or 0/false/False. if `true` and `stream=1` returns stdin as a stream. defaults to `false`| YES| -|stdout| 1/true/True or 0/false/False. if `true` and `stream=1` returns stdout as a stream. defaults to `false`| YES| -|stderr| 1/true/True or 0/false/False. if `true` and `stream=1` returns stderr as a stream. defaults to `false`| YES| - - -###Status codes - -* 101 : no error, indicates proxy about hijacking -* 200 : no error, no upgrade header found -* 400 : bad parameter -* 404 : no such container -* 500 : server error - -###Stream Implementation - -__TBD__ - -## Attach to a container (websocket) - -```GET /containers/(id or name)/attach/ws``` - -Handshake according to `RFC 6455` - -### Query Parameters - -|parameter| description| supported | -|---|---|---| -|detachKeys|Override key for detaching a container. e.g. a-z| *maybe*| -|logs| 1/true/True or 0/false/False. if `true` returns logs defaults to `false`| *maybe*| -|stream| 1/true/True or 0/false/False. if `true` returns a stream defaults to `false`| YES| -|stdin| 1/true/True or 0/false/False. if `true` and `stream=1` returns stdin as a stream. defaults to `false`| YES| -|stdout| 1/true/True or 0/false/False. if `true` and `stream=1` returns stdout as a stream. defaults to `false`| YES| -|stderr| 1/true/True or 0/false/False. if `true` and `stream=1` returns stderr as a stream. defaults to `false`| YES| - -###Status codes - -* 101 : no error, indicates proxy about hijacking -* 200 : no error, no upgrade header found -* 400 : bad parameter -* 404 : no such container -* 500 : server error - -## Remove a container - -```Delete /containers/(id or name)``` - -removes indicated container. - -###Query Parameter - -|parameter|description|supported?| -|---|---|---| -|__v__| 1/true/True or 0/false/False. Removes associated volumes of container.| *maybe*| -|__force__| 1/true/True or 0/false/False. Perform a kill then remove operation | YES | - -###Status Codes - -* 204 : no error -* 400 : bad parameter -* 404 : no container -* 500 : server error - -# Image APIs - -## Build image from a Dockerfile - -__Supported in the Future__ - -```POST /build``` - -build an image from a Dockerfile. - -### Request - -the request should probide a tar stream of the file to be used as the docker file. - -### Query Parameters - -|parameter| description | Supported? | -|---|---|---| -|dockerfile|Path within build context to the dockerfile.|YES| -| t | tag name for the build. `name:tag`. Default: `latest`. More than one can be provided. |YES| -|remote| GIT repository URI or HTTP/HTTPS URI. if the target is a file, the contents are put into a file called Dockerfile. | YES | -| q | suppress verbose build output| YES | -|nocache| Do not use build cache | *maybe* | -| pull | Attempt to pull latest image even if one is cached | YES | -|rm| remove intermediate containers after build | *maybe* | -|forcerm|always remove intermediate containers| *maybe* | -|memory| memory limit for build| *YES but different*| -|memswap|total memory|NO| -|cpushares|CPU shares|NO| -|cpusetcpus|CPUs in which to allow execution| NO| -|cpuperiod|the length of a cpu period|NO| -|cpuquota|micreseconds of cpu time use in each cpu period|NO| -|buildargs|the key values in this json object are used as the build context for commands run using the `Run` docker command. |YES| -|shmsize| size of `/dev/shm` in bytes. Default 64MB|NO| - -### Request Headers - -|header| description|supported?| -|---|---|---| -|Content-type| application/tar | YES | -|x-Registry-Config| JSON config object that allows for providing mapped credentials to different registries that may be needed to build a specific image| YES| - -### Status Codes - -* 200 : no error -* 500 : server error - -# Network APIs - -### List networks -### Inspect network -### Create a network -### Connect a container to a network -### Disconnect a container from a network -### Remove a network - - -# Volume APIs - -### List volumes -### Create a volume -### Inspect a volume -### Remove a volume diff --git a/lib/apiservers/engine/CLI-COMPATIBILITY.md b/lib/apiservers/engine/CLI-COMPATIBILITY.md deleted file mode 100644 index 7c5d97254b..0000000000 --- a/lib/apiservers/engine/CLI-COMPATIBILITY.md +++ /dev/null @@ -1,275 +0,0 @@ -# Docker CLI Compatibility -VIC attempts to be compatible with the Docker client (CLI); however, there are some differences. The CLI differences are documented below, alphabetically. Note, the operations listed as supported but with differences does not mean incompatible. It means most of these operations support either a smaller subset of the parameters or the request/response parameters have different meanings that apply to running in an ESX or vCenter environment. - -The list is organized into 3 separate tables. The first table lists CLI operations that are supported in the first version of VIC. The second table lists operations that VIC may support in the future. - -### VIC Support - -|Docker CLI Operation | support viable | technical priority | -|--- | ---| ---| -|attach | YES| P0 - feel good factor and interactive debug | -|cp | YES| P1 - live copy | -|create | *Yes - differences*| P0 - core function: use | -|events | *Yes - differences*| P1 - necessary for automation | -|history | YES| - | -|images | YES| P0 - core function: admin | -|login | YES| - | -|logout | YES| - | -|logs | YES| P0 - core function: use | -|network connect| YES| P2 - necessary only for dynamic topology changes | -|network create| *Yes - differences*| P0 - core function: use/admin | -|network disconnect| YES| P2 - symmetrical with connect | -|network ls| YES| P0 - core function: use | -|network rm| YES| - | -|port | YES| P1 - core function: use - necessity reduced if not using NAT model | -|ps | YES| P0 - core function: use/admin | -|pull | YES| P0 - core function: use | -|rm | YES| P0 - core function: use/admin | -|rmi | YES| P1 - core function: admin - maybe less critical in production | -|run | *Yes - differences*| P0 - core function: use | -|rename | YES| - | -|restart | YES| P0 - core function: use | -|start | YES| P0 - core function: use | -|stop | YES| P0 - core function: use | -|tag| YES| - | -|version | YES| P0 - core function: admin | -|volume create| *Yes - differences*| P0 - core function: use/admin | -|volume inspect| YES| P1 - core function: admin - maybe less critical in production | -|volume ls| YES| P0 - core function: use | -|volume rm| YES| P1 - core function: use/admin - depends on dynamism of volume use | -|exec | YES | P1 - interactive debug | -|pause | YES| - | -|stats | YES| P1 - core function: diagnostics | -|top | YES| - | -|unpause | YES| - | -|wait | YES | P1 - necessary for automation | - -The technical priority ranking is as follows (assessed with regard to production deployment): -P0 - delay GA if not available -P1 - significant reduction in capability - consider slipping -P2 - confined reduction in capability -- - not significant in initial use cases - -### Future Support - -|Docker CLI Operation | -|--- | -|build | -|commit | -|diff | -|export | -|import | -|load | -|push | -|save | -|search | - -The following sections dive further into these differences. - -## create - -``` -Usage: docker create [OPTIONS] IMAGE [COMMAND] [ARG...] - -Create a new container -``` - -| params | Docker | VIC | -|---|---|---| -|-a, --attach=[]|Attach to STDIN, STDOUT or STDERR|YES| -|--add-host=[] |Add a custom host-to-IP mapping (host:ip)|YES| -|-c, --cpu-shares=0 | CPU shares (relative weight)|*diff*| -|--cap-add=[]|Add Linux capabilities|NO, VIC containers do not run in cgroups| -|--cap-drop=[]|Drop Linux capabilities|NO, VIC containers do not run in cgroups| -|--cgroup-parent=""|Optional parent cgroup for the container|NO, VIC containers do not run in cgroups| -|--cidfile=""|Write the container ID to the file|YES| -|--cpuset-cpus=""|CPUs in which to allow execution (0-3, 0,1)|NO| -|--device=[]|Add a host device to the container|*diff*| -|--dns=[]|Set custom DNS servers|YES| -|--dns-search=[]|Set custom DNS search domains|YES| -|-e, --env=[]|Set environment variables|YES| -|--entrypoint=""|Overwrite the default ENTRYPOINT of the image|YES| -|--env-file=[]|Read in a file of environment variables|YES| -|--expose=[]|Expose a port or a range of ports|YES| -|-h, --hostname=""|Container host name|YES| -|-i, --interactive=false|Keep STDIN open even if not attached|*maybe*| -|--ipc=""|IPC namespace to use|NO, VIC containers run in separate VMs| -|-l, --label=[]|Set metadata on the container (e.g., --label=com.example.key=value)|YES| -|--label-file=[]|Read in a line delimited file of labels|YES| -|--link=[]|Add link to another container|*maybe*| -|--log-driver=""|Logging driver for container|YES| -|--lxc-conf=[]|Add custom lxc options|NO, VIC containers do not run in cgroups| -|-m, --memory=""|Memory limit|*diff*| -|--mac-address=""|Container MAC address (e.g. 92:d0:c6:0a:29:33)|YES| -|--name=""|Assign a name to the container|YES| -|--net="bridge"|Set the Network mode for the container|YES| -|-P, --publish-all=false|Publish all exposed ports to random ports|*maybe*| -|-p, --publish=[]|Publish a container's port(s) to the host|YES| -|--privileged=false|Give extended privileges to this container|*diff*| -|--read-only=false|Mount the container's root filesystem as read only|*diff*| -|--restart="no"|Restart policy (no, on-failure[:max-retry], always)|*maybe*| -|--security-opt=[]|Security options|*maybe*| -|-t, --tty=false|Allocate a pseudo-TTY|*diff*| -|-u, --user=""|Username or UID|*diff*| -|-v, --volume=[]|Bind mount a volume|*diff*| -|--volumes-from=[]|Mount volumes from the specified container(s)|*maybe*| -|-w, --workdir=""|Working directory inside the container|YES| - - -## events - -Events **may** may also report ESX and vCenter events. - - -## info - -The information provided by *info* appears similar to those reported by Docker but many of them are specific to vSphere. - -## inspect - -Some parameters **returned** by *inspect* are specific to vSphere. Others are not relevant. - -### Response: - -The response for inspect is a JSON document composed of several inner documents. The following are the primary subdocuments that differs. For the rest of the response, please visit, -``` -https://docs.docker.com/engine/reference/api/docker_remote_api_v1.22/#inspect-a-container. -``` - -**AppArmorProfile** - -A VIC container isolation unit is a VM. Processes running in a VIC container do not use AppArmor profiles. - -**HostConfig** - -|Param|Docker|VIC| -|---|---|---| -|"Binds"|volume binds|*diff*| -|"BlkioWeight|Block IO weight (relative weight) accepts a weight value between 10 and 1000.|NO| -|"BlkioWeightDevice"|Block IO weight (relative device weight) in the form of: "BlkioWeightDevice": [{"Path": "device_path", "Weight": weight}]|NO| -|"BlkioDeviceReadBps"|Limit read rate (bytes per second) from a device in the form of: "BlkioDeviceReadBps": [{"Path": "device_path", "Rate": rate}], for example: "BlkioDeviceReadBps": [{"Path": "/dev/sda", "Rate": "1024"}]"|NO| -|"BlkioDeviceWriteBps"|Limit write rate (bytes per second) to a device in the form of: "BlkioDeviceWriteBps": [{"Path": "device_path", "Rate": rate}], for example: "BlkioDeviceWriteBps": [{"Path": "/dev/sda", "Rate": "1024"}]"|NO| -|"BlkioDeviceReadIOps"|Limit read rate (IO per second) from a device in the form of: "BlkioDeviceReadIOps": [{"Path": "device_path", "Rate": rate}], for example: "BlkioDeviceReadIOps": [{"Path": "/dev/sda", "Rate": "1000"}]|NO| -|"BlkioDeviceWiiteIOps"|Limit write rate (IO per second) to a device in the form of: "BlkioDeviceWriteIOps": [{"Path": "device_path", "Rate": rate}], for example: "BlkioDeviceWriteIOps": [{"Path": "/dev/sda", "Rate": "1000"}]|NO| -|"CapAdd"|A list of kernel capabilities to add to the container.|NO| -|"CapDrop"|A list of kernel capabilities to drop from the container.|NO| -|"ContainerIDFile"|-|NO| -|"CpusetCpus"|CPUs in which to allow execution (e.g., 0-3, 0,1).|NO| -|"CpusetMems"|Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.|NO| -|"CpuShares"|CPU shares (relative weight).|NO| -|"CpuPeriod"|The length of a CPU period in microseconds.|NO| -|"Devices"|A list of devices to add to the container specified as a JSON object in the form { "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}|NO| -|"Dns"|A list of DNS servers for the container to use.|YES| -|"DnsOptions"|A list of DNS options|NO| -|"DnsSearch"|A list of DNS search domains|YES| -|"ExtraHosts"|A list of hostnames/IP mappings to add to the container’s /etc/hosts file. Specified in the form ["hostname:IP"]|NO| -|"IpcMode"|-|NO| -|"Links"|A list of links for the container. Each link entry should be in the form of container_name:alias|NO| -|"LxcConf"|-|NO| -|"Memory"|Memory limit in bytes|NO| -|"MemorySwap"|Total memory limit (memory + swap); set -1 to enable unlimited swap. You must use this with memory and make the swap value larger than memory.|NO| -|"MemoryReservation"|Memory soft limit in bytes|NO| -|"KernelMemory"|Kernel memory limit in bytes.|NO| -|"OomKillDisable"|Boolean value, whether to disable OOM Killer for the container or not.|NO| -|"OomScoreAdj"|An integer value containing the score given to the container in order to tune OOM killer preferences.|NO| -|"NetworkMode"|Sets the networking mode for the container. Supported standard values are: bridge, host, none, and container:name|id. Any other value is taken as a custom network’s name to which this container should connect to.|*diff*| -|"PortBindings"|A map of exposed container ports and the host port they should map to. A JSON object in the form { /: [{ "HostPort": "" }] } Take note that port is specified as a string and not an integer value.|NO| -|"Privileged"|Gives the container full access to the host. Specified as a boolean value.|NO, vms are the isolation unit in VIC and the commands inside of the container has access to the host's vm| -|"ReadonlyRootfs"|Mount the container’s root filesystem as read only. Specified as a boolean value.|NO| -|"PublishAllPorts"|Allocates a random host port for all of a container’s exposed ports. Specified as a boolean value.|NO| -|"RestartPolicy"|The behavior to apply when the container exits.|NO| -|"LogConfig"|Log configuration for the container, specified as a JSON object in the form { "Type": "", "Config": {"key1": "val1"}}. Available types: json-file, syslog, journald, gelf, awslogs, splunk, none. json-file logging driver.|YES| -|"SecurityOpt"|A list of string values to customize labels for MLS systems, such as SELinux.|NO| -|"VolumesFrom"|A list of volumes to inherit from another container. Specified in the form [:]|NO| -|"Ulimits"|A list of ulimits to set in the container, specified as { "Name": , "Soft": , "Hard": }, for example: Ulimits: { "Name": "nofile", "Soft": 1024, "Hard": 2048 }|NO| -|"VolumeDriver"|Driver that this container users to mount volumes.|NO, VIC has its own volume driver| -|"ShmSize"|Size of /dev/shm in bytes. The size must be greater than 0. If omitted the system uses 64MB|NO| - -**misc params** - -|Param|Docker|VIC| -|---|:---|---| -|HostnamePath|e.g. "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname"|This has no meaning in VIC.| -|HostsPath|e.g. "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts"|This has no meaning in VIC| -|LogPath|e.g. "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log"|This has no meaning in VIC| -|"Id"|Image ID, Sha256 content addressable hash|YES| -|"Image"|-|NO?| -|"MountLabel"|-|NO| -|"Name"|Docker's human readable name|YES| -|"Path"|-|NO| -|"ProcessLabel"|-|NO| -|"ResolvConfPath"|-|NO| -|"RestartCount"|-|NO| - -**NetworkSettings** - -|Param|Docker|VIC| -|---|---|---| -|"Bridge"|""|?| -|"SandboxID"|""|?| -|"HairpinMode"|""|?| -|"LinkLocalIPv6Address"|""|?| -|"LinkLocalIPv6PrefixLen"|""|?| -|"Ports"|""|?| -|"SandboxKey"|""|?| -|"SecondaryIPAddresses"|""|?| -|"SecondaryIPv6Addresses"|""|?| -|"EndpointID"|""|?| -|"Gateway"|""|?| -|"GlobalIPv6Address"|""|?| -|"GlobalIPv6PrefixLen"|""|?| -|"IPAddress"|""|?| -|"IPPrefixLen"|""|?| -|"IPv6Gateway"|""|?| -|"MacAddress"|""|?| -|"Networks"|-|VIC will have some form of this data| - -**State** - -|Param|Docker|VIC| -|---|---|---| -|"Error"|-|NO| -|"ExitCode"|-|YES| -|"FinishedAt"|-|YES| -|"OOMKilled"|-|?| -|"Dead"|-|YES| -|"Paused"|-|YES| -|"Pid"|-|NO| -|"Restarting"|-|?| -|"Running"|-|?| -|"StartedAt"|-|YES| -|"Status"|-|YES| - -**Mounts** - -An array of mount points. VIC will provide some form of this. Here is an example of what Docker returns. - -``` - "Mounts": [ - { - "Name": "fac362...80535", - "Source": "/data", - "Destination": "/data", - "Driver": "local", - "Mode": "ro,Z", - "RW": false, - "Propagation": "" - } - ] -``` - -## network create - -The network creation workflow will differ from standard Docker. The -driver option is ignored. All networks in VIC are of type *bridged*. Policies on these networks are administered with vSphere. This makes exposing containers to the external world much simpler. In the future, support for VMWare's NSX will be incorporated. For more information on VIC's networking model, please visit, https://github.com/vmware/vic/blob/master/doc/design/networking/MVPnetworking.md - -## volume create - -Much like network creation, volume creation also leverages the vSphere infrastructure. Volumes are just VMDKs created within a VCH. This gives VIC containers a lot of advantages. A container can run on any ESX server that is part of the VCH and it's data volume can be on another ESX server. The container and the data volume can be moved from one ESX server to another within the VCH, and they would be able to connect seamlessly. - -Shared volumes between containers will eventually be supported. Stay tune. - -VIC can also leverage a vSan infrastructure. - -## run - -Docker run is a composite operation for pull, create, start. As such, the parameter support is equivalent to that of docker create. diff --git a/lib/apiservers/engine/COMPOSE-COMPATIBILITY.md b/lib/apiservers/engine/COMPOSE-COMPATIBILITY.md deleted file mode 100644 index 20d02e9ece..0000000000 --- a/lib/apiservers/engine/COMPOSE-COMPATIBILITY.md +++ /dev/null @@ -1,60 +0,0 @@ -# Docker Compose Compatibility -Docker Compose is a tool to define an application consisting of multiple containers that interact with one another. The concept has some similarity with RKT's and Kubernete's pods. - -The way Compose works is by orchestrating the start of containers, attaching volumes, and attaching networks by calling the Docker Remote API. Since Compose uses the Remote API, VIC should support nearly every CLI operation and Compose file options. - -## Config File options - -These are the compose yml config file options support provided by VIC. See the following for Docker's compose file documentation: https://docs.docker.com/compose/compose-file/ - -|Compose yml config option|VIC Support| -|---|:---| -|build|No, *caveat| -|context|No, not until VIC support build| -|dockerfile|No, not until VIC support build| -|args|No, not until VIC support build| -|cap_add, cap_drop|?| -|command|Yes| -|cgroup_parent|No, VIC does not support cgroups| -|container_name|Yes| -|devices|No, a VIC container host is a VCH| -|depends_on|Yes| -|dns|Yes| -|dns_search|Yes| -|tmpfs|No, a VIC container host is a VCH| -|entrypoint|Yes| -|env_file|Yes| -|environment|Yes| -|expose|Yes| -|extends|Yes| -|external_links|Yes| -|extra_hosts|Yes| -|image|Yes| -|labels|Yes| -|links|Yes| -|logging|Yes| -|log_driver|Yes| -|log_opt|Yes| -|network_mode|Yes, *see below| -|networks|Yes| -|ipv4_address, ipv6_address|Yes| -|pid|No, a VIC container host is a VCH| -|ports|Yes| -|security_opt|Yes| -|stop_signal|Yes| -|ulimits|?| -|volumes, volume_driver|Yes, *see below| -|volumes_from|?| -|cpu_shares, cpu_quota, cpuset, domainname, hostname, ipc, mac_address, mem_limit, memswap_limit, privileged, read_only, restart, shm_size, stdin_open, tty, user, working_dir|?| -|Volume configuration reference: driver, driver_opts, external|Yes, *see below| -|Network configuration reference: driver, driver_opts, ipam, external|Yes, *see below| - -### network_mode, network_driver - -In VIC, all networks are currently *bridged* mode. Policy around access within those networks are managed via vSphere. This provides a much simpler model for administration. Networks can span multiple ESX hosts within a VCH. These networks can be applied to any container running within the VCH, regardless of which ESX host the container is currently running on. - -The network_mode option is ignored. network_driver option is also ignored. - -### volume_driver - -VIC provides only one volume driver. This option is ignored. Data volumes for VIC containers are VMDKs, and they can exist on any ESX host participating in a VCH. During volume creation, VIC will handle the mapping of these VMDKs to the path specified in the docker CLI call. The VIC container and data volume can be on the same ESX host or on different ESX host, as long as the ESX hosts participate in the VCH. This is much more flexible than the standard docker volumes. diff --git a/lib/apiservers/engine/README.md b/lib/apiservers/engine/README.md deleted file mode 100644 index b014e33667..0000000000 --- a/lib/apiservers/engine/README.md +++ /dev/null @@ -1,6 +0,0 @@ -## Introduction -The Docker Engine-API personality server is what VIC calls the server daemon that responds to Docker remote API calls. The primary caller is most likely the Docker CLI or a user using curl. - -The server, itself, builds on top of Docker's Engine-API project. This allows this component to be REST compatible with the Docker Daemon. Once the Engine-API rest server unmarshals the requests into golang structure, execution is handed off to a set of backend code. These backend code validates the request inputs and calls VIC's port layer server. - -VIC calls this a personality server because it translates Docker requests to VIC operations. The port layer server should not know anything about Docker. This allows VIC to add other personality servers in the future. diff --git a/lib/apiservers/engine/backends/archive.go b/lib/apiservers/engine/backends/archive.go index fa56fd072e..f173e49a90 100644 --- a/lib/apiservers/engine/backends/archive.go +++ b/lib/apiservers/engine/backends/archive.go @@ -32,6 +32,7 @@ import ( "github.com/vmware/vic/lib/apiservers/engine/backends/cache" viccontainer "github.com/vmware/vic/lib/apiservers/engine/backends/container" + "github.com/vmware/vic/lib/apiservers/engine/errors" "github.com/vmware/vic/lib/apiservers/engine/proxy" "github.com/vmware/vic/lib/apiservers/portlayer/client/storage" vicarchive "github.com/vmware/vic/lib/archive" @@ -45,14 +46,14 @@ import ( // ContainerArchivePath creates an archive of the filesystem resource at the // specified path in the container identified by the given name. Returns a // tar archive of the resource and whether it was a directory or a single file. -func (c *Container) ContainerArchivePath(name string, path string) (io.ReadCloser, *types.ContainerPathStat, error) { +func (c *ContainerBackend) ContainerArchivePath(name string, path string) (io.ReadCloser, *types.ContainerPathStat, error) { defer trace.End(trace.Begin(name)) op := trace.NewOperation(context.Background(), "ContainerArchivePath: %s", name) path = "/" + strings.TrimPrefix(path, "/") vc := cache.ContainerCache().GetContainer(name) if vc == nil { - return nil, nil, NotFoundError(name) + return nil, nil, errors.NotFoundError(name) } stat, err := c.ContainerStatPath(name, path) @@ -62,17 +63,17 @@ func (c *Container) ContainerArchivePath(name string, path string) (io.ReadClose reader, err := c.exportFromContainer(op, vc, path) if err != nil { - if IsResourceInUse(err) { + if errors.IsResourceInUse(err) { err = fmt.Errorf("ContainerArchivePath failed, resource in use: %s", err.Error()) } - return nil, nil, InternalServerError(err.Error()) + return nil, nil, errors.InternalServerError(err.Error()) } return reader, stat, nil } -func (c *Container) exportFromContainer(op trace.Operation, vc *viccontainer.VicContainer, path string) (io.ReadCloser, error) { - mounts := mountsFromContainer(vc) +func (c *ContainerBackend) exportFromContainer(op trace.Operation, vc *viccontainer.VicContainer, path string) (io.ReadCloser, error) { + mounts := proxy.MountsFromContainer(vc) mounts = append(mounts, types.MountPoint{Destination: "/"}) readerMap := NewArchiveStreamReaderMap(op, mounts, path) @@ -100,14 +101,14 @@ func (c *Container) exportFromContainer(op trace.Operation, vc *viccontainer.Vic // ContainerCopy performs a deprecated operation of archiving the resource at // the specified path in the container identified by the given name. -func (c *Container) ContainerCopy(name string, res string) (io.ReadCloser, error) { - return nil, fmt.Errorf("%s does not yet implement ContainerCopy", ProductName()) +func (c *ContainerBackend) ContainerCopy(name string, res string) (io.ReadCloser, error) { + return nil, errors.APINotSupportedMsg(ProductName(), "ContainerCopy") } // ContainerExport writes the contents of the container to the given // writer. An error is returned if the container cannot be found. -func (c *Container) ContainerExport(name string, out io.Writer) error { - return fmt.Errorf("%s does not yet implement ContainerExport", ProductName()) +func (c *ContainerBackend) ContainerExport(name string, out io.Writer) error { + return errors.APINotSupportedMsg(ProductName(), "ContainerExport") } // ContainerExtractToDir extracts the given archive to the specified location @@ -116,7 +117,7 @@ func (c *Container) ContainerExport(name string, out io.Writer) error { // be ErrExtractPointNotDirectory. If noOverwriteDirNonDir is true then it will // be an error if unpacking the given content would cause an existing directory // to be replaced with a non-directory and vice versa. -func (c *Container) ContainerExtractToDir(name, path string, noOverwriteDirNonDir bool, content io.Reader) error { +func (c *ContainerBackend) ContainerExtractToDir(name, path string, noOverwriteDirNonDir bool, content io.Reader) error { defer trace.End(trace.Begin(name)) op := trace.NewOperation(context.Background(), "ContainerExtractToDir: %s", name) @@ -124,11 +125,11 @@ func (c *Container) ContainerExtractToDir(name, path string, noOverwriteDirNonDi vc := cache.ContainerCache().GetContainer(name) if vc == nil { - return NotFoundError(name) + return errors.NotFoundError(name) } err := c.importToContainer(op, vc, path, content) - if err != nil && IsResourceInUse(err) { + if err != nil && errors.IsResourceInUse(err) { op.Errorf("ContainerExtractToDir failed, resource in use: %s", err.Error()) err = fmt.Errorf("Resource in use") @@ -137,15 +138,15 @@ func (c *Container) ContainerExtractToDir(name, path string, noOverwriteDirNonDi return err } -func (c *Container) importToContainer(op trace.Operation, vc *viccontainer.VicContainer, target string, content io.Reader) (err error) { +func (c *ContainerBackend) importToContainer(op trace.Operation, vc *viccontainer.VicContainer, target string, content io.Reader) (err error) { rawReader, err := archive.DecompressStream(content) if err != nil { op.Errorf("Input tar stream to ContainerExtractToDir not recognized: %s", err.Error()) - return StreamFormatNotRecognized() + return errors.StreamFormatNotRecognized() } tarReader := tar.NewReader(rawReader) - mounts := mountsFromContainer(vc) + mounts := proxy.MountsFromContainer(vc) mounts = append(mounts, types.MountPoint{Destination: "/"}) writerMap := NewArchiveStreamWriterMap(op, mounts, target) defer func() { @@ -193,7 +194,7 @@ func (c *Container) importToContainer(op trace.Operation, vc *viccontainer.VicCo // ContainerStatPath stats the filesystem resource at the specified path in the // container identified by the given name. -func (c *Container) ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) { +func (c *ContainerBackend) ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) { defer trace.End(trace.Begin(name)) op := trace.NewOperation(context.Background(), "ContainerStatPath: %s", name) @@ -201,7 +202,7 @@ func (c *Container) ContainerStatPath(name string, path string) (stat *types.Con vc := cache.ContainerCache().GetContainer(name) if vc == nil { - return nil, NotFoundError(name) + return nil, errors.NotFoundError(name) } // trim / and . off from path and then append / to ensure the format is correct @@ -210,7 +211,7 @@ func (c *Container) ContainerStatPath(name string, path string) (stat *types.Con path = "/" + path } - mounts := mountsFromContainer(vc) + mounts := proxy.MountsFromContainer(vc) mounts = append(mounts, types.MountPoint{Destination: "/"}) // handle the special case of targeting a volume mount point before it exists. @@ -235,16 +236,16 @@ func (c *Container) ContainerStatPath(name string, path string) (stat *types.Con store = constants.VolumeStoreName } - stat, err = c.containerProxy.StatPath(op, store, deviceID, fs) + stat, err = archiveProxy.StatPath(op, store, deviceID, fs) if err != nil { op.Errorf("error getting statpath: %s", err.Error()) switch err := err.(type) { case *storage.StatPathNotFound: - return nil, ResourceNotFoundError(vc.Name, "file or directory") + return nil, errors.ContainerResourceNotFoundError(vc.Name, "file or directory") case *storage.StatPathUnprocessableEntity: - return nil, InternalServerError("failed to process given path") + return nil, errors.InternalServerError("failed to process given path") default: - return nil, InternalServerError(err.Error()) + return nil, errors.InternalServerError(err.Error()) } } diff --git a/lib/apiservers/engine/backends/backends.go b/lib/apiservers/engine/backends/backends.go index 1d946a9a05..7d3d277204 100644 --- a/lib/apiservers/engine/backends/backends.go +++ b/lib/apiservers/engine/backends/backends.go @@ -33,7 +33,8 @@ import ( "github.com/vmware/vic/lib/apiservers/engine/backends/cache" "github.com/vmware/vic/lib/apiservers/engine/backends/container" - vicproxy "github.com/vmware/vic/lib/apiservers/engine/proxy" + "github.com/vmware/vic/lib/apiservers/engine/network" + "github.com/vmware/vic/lib/apiservers/engine/proxy" apiclient "github.com/vmware/vic/lib/apiservers/portlayer/client" "github.com/vmware/vic/lib/apiservers/portlayer/client/containers" "github.com/vmware/vic/lib/apiservers/portlayer/client/misc" @@ -69,7 +70,7 @@ var ( vchConfig *dynConfig RegistryCertPool *x509.CertPool - archiveProxy vicproxy.VicArchiveProxy + archiveProxy proxy.VicArchiveProxy eventService *events.Events @@ -142,7 +143,7 @@ func Init(portLayerAddr, product string, port uint, config *config.VirtualContai return err } - archiveProxy = vicproxy.NewArchiveProxy(portLayerClient) + archiveProxy = proxy.NewArchiveProxy(portLayerClient) eventService = events.New() @@ -283,7 +284,7 @@ func syncContainerCache() error { log.Debugf("Updating container cache") backend := NewContainerBackend() - client := backend.containerProxy.Client() + client := PortLayerClient() reqParams := containers.NewGetContainerListParamsWithContext(ctx).WithAll(swag.Bool(true)) containme, err := client.Containers.GetContainerList(reqParams) @@ -295,7 +296,7 @@ func syncContainerCache() error { cc := cache.ContainerCache() var errs []string for _, info := range containme.Payload { - container := ContainerInfoToVicContainer(*info) + container := proxy.ContainerInfoToVicContainer(*info, portLayerName) cc.AddContainer(container) if err = setPortMapping(info, backend, container); err != nil { errs = append(errs, err.Error()) @@ -308,7 +309,7 @@ func syncContainerCache() error { return nil } -func setPortMapping(info *models.ContainerInfo, backend *Container, container *container.VicContainer) error { +func setPortMapping(info *models.ContainerInfo, backend *ContainerBackend, container *container.VicContainer) error { if info.ContainerConfig.State == "" { log.Infof("container state is nil") return nil @@ -320,7 +321,7 @@ func setPortMapping(info *models.ContainerInfo, backend *Container, container *c } log.Debugf("Set port mapping for container %q, portmapping %+v", container.Name, container.HostConfig.PortBindings) - client := backend.containerProxy.Client() + client := PortLayerClient() endpointsOK, err := client.Scopes.GetContainerEndpoints( scopes.NewGetContainerEndpointsParamsWithContext(ctx).WithHandleOrID(container.ContainerID)) if err != nil { @@ -328,7 +329,7 @@ func setPortMapping(info *models.ContainerInfo, backend *Container, container *c } for _, e := range endpointsOK.Payload { if len(e.Ports) > 0 && e.Scope == constants.BridgeScopeType { - if err = MapPorts(container, e, container.ContainerID); err != nil { + if err = network.MapPorts(container, e, container.ContainerID); err != nil { log.Errorf(err.Error()) return err } diff --git a/lib/apiservers/engine/backends/build.go b/lib/apiservers/engine/backends/build.go index 06ffd0b16a..f8a32a1420 100644 --- a/lib/apiservers/engine/backends/build.go +++ b/lib/apiservers/engine/backends/build.go @@ -15,17 +15,18 @@ package backends import ( - "fmt" "io" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/backend" "golang.org/x/net/context" + + "github.com/vmware/vic/lib/apiservers/engine/errors" ) type Builder struct { } func (b *Builder) BuildFromContext(ctx context.Context, src io.ReadCloser, remote string, buildOptions *types.ImageBuildOptions, pg backend.ProgressWriter) (string, error) { - return "", fmt.Errorf("%s does not yet implement BuildFromContext", ProductName()) + return "", errors.APINotSupportedMsg(ProductName(), "BuildFromContext") } diff --git a/lib/apiservers/engine/backends/checkpoint.go b/lib/apiservers/engine/backends/checkpoint.go index 23488e32b2..ab88fe6a2c 100644 --- a/lib/apiservers/engine/backends/checkpoint.go +++ b/lib/apiservers/engine/backends/checkpoint.go @@ -15,26 +15,26 @@ package backends import ( - "fmt" - "github.com/docker/docker/api/types" + + "github.com/vmware/vic/lib/apiservers/engine/errors" ) -type Checkpoint struct { +type CheckpointBackend struct { } -func NewCheckpointBackend() *Checkpoint { - return &Checkpoint{} +func NewCheckpointBackend() *CheckpointBackend { + return &CheckpointBackend{} } -func (c *Checkpoint) CheckpointCreate(container string, config types.CheckpointCreateOptions) error { - return fmt.Errorf("%s does not yet implement checkpointing", ProductName()) +func (c *CheckpointBackend) CheckpointCreate(container string, config types.CheckpointCreateOptions) error { + return errors.APINotSupportedMsg(ProductName(), "checkpointing") } -func (c *Checkpoint) CheckpointDelete(container string, config types.CheckpointDeleteOptions) error { - return fmt.Errorf("%s does not yet implement checkpointing", ProductName()) +func (c *CheckpointBackend) CheckpointDelete(container string, config types.CheckpointDeleteOptions) error { + return errors.APINotSupportedMsg(ProductName(), "checkpointing") } -func (c *Checkpoint) CheckpointList(container string, config types.CheckpointListOptions) ([]types.Checkpoint, error) { - return nil, fmt.Errorf("%s does not yet implement checkpointing", ProductName()) +func (c *CheckpointBackend) CheckpointList(container string, config types.CheckpointListOptions) ([]types.Checkpoint, error) { + return nil, errors.APINotSupportedMsg(ProductName(), "checkpointing") } diff --git a/lib/apiservers/engine/backends/commit.go b/lib/apiservers/engine/backends/commit.go index 8318453888..1e150e8ae7 100644 --- a/lib/apiservers/engine/backends/commit.go +++ b/lib/apiservers/engine/backends/commit.go @@ -45,6 +45,7 @@ import ( "github.com/docker/docker/reference" "github.com/vmware/vic/lib/apiservers/engine/backends/cache" + "github.com/vmware/vic/lib/apiservers/engine/errors" "github.com/vmware/vic/lib/apiservers/portlayer/models" "github.com/vmware/vic/lib/constants" "github.com/vmware/vic/lib/imagec" @@ -55,26 +56,26 @@ import ( // Commit creates a new filesystem image from the current state of a container. // The image can optionally be tagged into a repository. -func (i *Image) Commit(name string, config *backend.ContainerCommitConfig) (imageID string, err error) { +func (i *ImageBackend) Commit(name string, config *backend.ContainerCommitConfig) (imageID string, err error) { defer trace.End(trace.Begin(name)) op := trace.NewOperation(context.Background(), "Commit") // Look up the container name in the metadata cache to get long ID vc := cache.ContainerCache().GetContainer(name) if vc == nil { - return "", NotFoundError(name) + return "", errors.NotFoundError(name) } // get container info c, err := containerEngine.ContainerInspect(name, false, "") if err != nil { - return "", InternalServerError(err.Error()) + return "", errors.InternalServerError(err.Error()) } container, ok := c.(*types.ContainerJSON) if !ok { - return "", InternalServerError(fmt.Sprintf("Container type assertion failed")) + return "", errors.InternalServerError(fmt.Sprintf("Container type assertion failed")) } if container.State.Running || container.State.Restarting { - return "", ConflictError(fmt.Sprintf("%s does not yet support commit of a running container", ProductName())) + return "", errors.ConflictError(fmt.Sprintf("%s does not yet support commit of a running container", ProductName())) } // TODO: pause container after container.Pause is implemented newConfig, err := dockerfile.BuildFromConfig(config.Config, config.Changes) @@ -92,7 +93,7 @@ func (i *Image) Commit(name string, config *backend.ContainerCommitConfig) (imag return "", err } - rc, err := containerEngine.containerProxy.GetContainerChanges(op, vc, true) + rc, err := containerEngine.GetContainerChanges(op, vc, true) if err != nil { return "", fmt.Errorf("Unable to initialize export stream reader for container %s", name) } @@ -122,7 +123,7 @@ func (i *Image) Commit(name string, config *backend.ContainerCommitConfig) (imag for pl := lm.Parent; pl != constants.ScratchLayerID; pl = lm.Parent { // populate manifest layer with existing cached data if lm, err = imagec.LayerCache().Get(pl); err != nil { - return "", InternalServerError(fmt.Sprintf("Failed to get parent image layer %s: %s", pl, err)) + return "", errors.InternalServerError(fmt.Sprintf("Failed to get parent image layer %s: %s", pl, err)) } layers = append(layers, lm) } @@ -206,7 +207,7 @@ func setLayerConfig(lm *imagec.ImageWithMeta, container *types.ContainerJSON, co // the system (if run standalone) host, err := sys.UUID() if err != nil { - return InternalServerError(fmt.Sprintf("Failed to get host name: %s", err)) + return errors.InternalServerError(fmt.Sprintf("Failed to get host name: %s", err)) } if host != "" { @@ -231,7 +232,7 @@ func setLayerConfig(lm *imagec.ImageWithMeta, container *types.ContainerJSON, co m, err := json.Marshal(meta) if err != nil { - return InternalServerError(fmt.Sprintf("Failed to marshal image layer config: %s", err)) + return errors.InternalServerError(fmt.Sprintf("Failed to marshal image layer config: %s", err)) } // layer metadata lm.Meta = string(m) diff --git a/lib/apiservers/engine/backends/container.go b/lib/apiservers/engine/backends/container.go index 287379438d..2e97b98609 100644 --- a/lib/apiservers/engine/backends/container.go +++ b/lib/apiservers/engine/backends/container.go @@ -19,9 +19,7 @@ import ( "fmt" "io" "math/rand" - "net" "net/http" - "os" "path/filepath" "strconv" "strings" @@ -47,27 +45,26 @@ import ( "github.com/docker/docker/utils" "github.com/docker/go-connections/nat" "github.com/docker/go-units" - "github.com/docker/libnetwork/iptables" - "github.com/docker/libnetwork/portallocator" - "github.com/vishvananda/netlink" "github.com/vmware/vic/lib/apiservers/engine/backends/cache" viccontainer "github.com/vmware/vic/lib/apiservers/engine/backends/container" "github.com/vmware/vic/lib/apiservers/engine/backends/convert" "github.com/vmware/vic/lib/apiservers/engine/backends/filter" - "github.com/vmware/vic/lib/apiservers/engine/backends/portmap" + engerr "github.com/vmware/vic/lib/apiservers/engine/errors" + "github.com/vmware/vic/lib/apiservers/engine/network" + "github.com/vmware/vic/lib/apiservers/engine/proxy" "github.com/vmware/vic/lib/apiservers/portlayer/client/containers" "github.com/vmware/vic/lib/apiservers/portlayer/client/scopes" "github.com/vmware/vic/lib/apiservers/portlayer/client/tasks" "github.com/vmware/vic/lib/apiservers/portlayer/models" "github.com/vmware/vic/lib/archive" - "github.com/vmware/vic/lib/config/executor" "github.com/vmware/vic/lib/constants" "github.com/vmware/vic/lib/metadata" "github.com/vmware/vic/pkg/errors" "github.com/vmware/vic/pkg/retry" "github.com/vmware/vic/pkg/trace" "github.com/vmware/vic/pkg/uid" + "github.com/vmware/vic/pkg/vsphere/sys" ) // valid filters as of docker commit 49bf474 @@ -96,7 +93,7 @@ var unSupportedPsFilters = map[string]bool{ } const ( - bridgeIfaceName = "bridge" + //bridgeIfaceName = "bridge" // MemoryAlignMB is the value to which container VM memory must align in order for hotadd to work MemoryAlignMB = 128 @@ -116,23 +113,11 @@ const ( ) var ( - publicIfaceName = "public" - defaultScope struct { sync.Mutex scope string } - portMapper portmap.PortMapper - - // bridge-to-bridge rules, indexed by mapped port; - // this map is used to delete the rule once - // the container stops or is removed - btbRules map[string][]string - - cbpLock sync.Mutex - containerByPort map[string]string // port:containerID - ctx = context.TODO() // allow mocking @@ -140,22 +125,6 @@ var ( ) func init() { - portMapper = portmap.NewPortMapper() - btbRules = make(map[string][]string) - containerByPort = make(map[string]string) - - l, err := netlink.LinkByName(publicIfaceName) - if l == nil { - l, err = netlink.LinkByAlias(publicIfaceName) - if err != nil { - log.Errorf("interface %s not found", publicIfaceName) - return - } - } - - // don't use interface alias for iptables rules - publicIfaceName = l.Attrs().Name - // seed the random number generator rand.Seed(time.Now().UTC().UnixNano()) } @@ -167,19 +136,23 @@ func (r containerByCreated) Len() int { return len(r) } func (r containerByCreated) Swap(i, j int) { r[i], r[j] = r[j], r[i] } func (r containerByCreated) Less(i, j int) bool { return r[i].Created < r[j].Created } -var containerEngine *Container +var containerEngine *ContainerBackend var once sync.Once // Container struct represents the Container -type Container struct { - containerProxy VicContainerProxy +type ContainerBackend struct { + containerProxy proxy.VicContainerProxy + streamProxy proxy.VicStreamProxy + storageProxy proxy.VicStorageProxy } // NewContainerBackend will create a new containerEngine or return the existing -func NewContainerBackend() *Container { +func NewContainerBackend() *ContainerBackend { once.Do(func() { - containerEngine = &Container{ - containerProxy: NewContainerProxy(PortLayerClient(), PortLayerServer(), PortLayerName()), + containerEngine = &ContainerBackend{ + containerProxy: proxy.NewContainerProxy(PortLayerClient(), PortLayerServer(), PortLayerName()), + streamProxy: proxy.NewStreamProxy(PortLayerClient()), + storageProxy: proxy.NewStorageProxy(PortLayerClient()), } }) return containerEngine @@ -189,29 +162,13 @@ const ( defaultEnvPath = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ) -func (c *Container) Handle(id, name string) (string, error) { - resp, err := c.containerProxy.Client().Containers.Get(containers.NewGetParamsWithContext(ctx).WithID(id)) - if err != nil { - switch err := err.(type) { - case *containers.GetNotFound: - cache.ContainerCache().DeleteContainer(id) - return "", NotFoundError(name) - case *containers.GetDefault: - return "", InternalServerError(err.Payload.Message) - default: - return "", InternalServerError(err.Error()) - } - } - return resp.Payload, nil -} - // docker's container.execBackend -func (c *Container) TaskInspect(cid, cname, eid string) (*models.TaskInspectResponse, error) { +func (c *ContainerBackend) TaskInspect(cid, cname, eid string) (*models.TaskInspectResponse, error) { // obtain a portlayer client - client := c.containerProxy.Client() + client := PortLayerClient() - handle, err := c.Handle(cid, cname) + handle, err := c.containerProxy.Handle(ctx, cid, cname) if err != nil { return nil, err } @@ -231,11 +188,11 @@ func (c *Container) TaskInspect(cid, cname, eid string) (*models.TaskInspectResp } -func (c *Container) TaskWaitToStart(cid, cname, eid string) error { +func (c *ContainerBackend) TaskWaitToStart(cid, cname, eid string) error { // obtain a portlayer client - client := c.containerProxy.Client() + client := PortLayerClient() - handle, err := c.Handle(cid, cname) + handle, err := c.containerProxy.Handle(ctx, cid, cname) if err != nil { return err } @@ -251,9 +208,9 @@ func (c *Container) TaskWaitToStart(cid, cname, eid string) error { if err != nil { switch err := err.(type) { case *tasks.WaitInternalServerError: - return InternalServerError(err.Payload.Message) + return engerr.InternalServerError(err.Payload.Message) default: - return InternalServerError(err.Error()) + return engerr.InternalServerError(err.Error()) } } @@ -261,47 +218,47 @@ func (c *Container) TaskWaitToStart(cid, cname, eid string) error { } // ContainerExecCreate sets up an exec in a running container. -func (c *Container) ContainerExecCreate(name string, config *types.ExecConfig) (string, error) { +func (c *ContainerBackend) ContainerExecCreate(name string, config *types.ExecConfig) (string, error) { op := trace.NewOperation(context.TODO(), "") defer trace.End(trace.Begin(fmt.Sprintf("%s: name=(%s)", op, name))) // Look up the container name in the metadata cache to get long ID vc := cache.ContainerCache().GetContainer(name) if vc == nil { - return "", NotFoundError(name) + return "", engerr.NotFoundError(name) } id := vc.ContainerID // Is it running? - state, err := c.containerProxy.State(vc) + state, err := c.containerProxy.State(op, vc) if err != nil { - return "", InternalServerError(err.Error()) + return "", engerr.InternalServerError(err.Error()) } if state.Restarting { - return "", ConflictError(fmt.Sprintf("Container %s is restarting, wait until the container is running", id)) + return "", engerr.ConflictError(fmt.Sprintf("Container %s is restarting, wait until the container is running", id)) } if !state.Running { - return "", ConflictError(fmt.Sprintf("Container %s is not running", id)) + return "", engerr.ConflictError(fmt.Sprintf("Container %s is not running", id)) } op.Debugf("State checks succeeded for exec operation on cotnainer(%s)", id) - handle, err := c.Handle(id, name) + handle, err := c.containerProxy.Handle(op, id, name) if err != nil { op.Error(err) - return "", InternalServerError(err.Error()) + return "", engerr.InternalServerError(err.Error()) } // set up the environment config.Env = setEnvFromImageConfig(config.Tty, config.Env, vc.Config.Env) - handleprime, eid, err := c.containerProxy.CreateExecTask(handle, config) + handleprime, eid, err := c.containerProxy.CreateExecTask(ctx, handle, config) if err != nil { op.Errorf("Failed to create exec task for container(%s) due to error(%s)", id, err) - return "", InternalServerError(err.Error()) + return "", engerr.InternalServerError(err.Error()) } - err = c.containerProxy.CommitContainerHandle(handleprime, id, 0) + err = c.containerProxy.CommitContainerHandle(ctx, handleprime, id, 0) if err != nil { op.Errorf("Failed to commit exec handle for container(%s) due to error(%s)", id, err) return "", err @@ -315,12 +272,12 @@ func (c *Container) ContainerExecCreate(name string, config *types.ExecConfig) ( switch err := err.(type) { case *tasks.InspectInternalServerError: op.Debugf("received an internal server error during task inspect: %s", err.Payload.Message) - return "", InternalServerError(err.Payload.Message) + return "", engerr.InternalServerError(err.Payload.Message) case *tasks.InspectConflict: op.Debugf("received a conflict error during task inspect: %s", err.Payload.Message) - return "", ConflictError(fmt.Sprintf("Cannot complete the operation, container %s has been powered off during execution", id)) + return "", engerr.ConflictError(fmt.Sprintf("Cannot complete the operation, container %s has been powered off during execution", id)) default: - return "", InternalServerError(err.Error()) + return "", engerr.InternalServerError(err.Error()) } } @@ -334,14 +291,14 @@ func (c *Container) ContainerExecCreate(name string, config *types.ExecConfig) ( // ContainerExecInspect returns low-level information about the exec // command. An error is returned if the exec cannot be found. -func (c *Container) ContainerExecInspect(eid string) (*backend.ExecInspect, error) { +func (c *ContainerBackend) ContainerExecInspect(eid string) (*backend.ExecInspect, error) { op := trace.NewOperation(context.TODO(), "") defer trace.End(trace.Begin(fmt.Sprintf("opID=(%s) eid=(%s)", op, eid))) // Look up the container name in the metadata cache to get long ID vc := cache.ContainerCache().GetContainerFromExec(eid) if vc == nil { - return nil, NotFoundError(eid) + return nil, engerr.NotFoundError(eid) } id := vc.ContainerID name := vc.Name @@ -351,12 +308,12 @@ func (c *Container) ContainerExecInspect(eid string) (*backend.ExecInspect, erro switch err := err.(type) { case *tasks.InspectInternalServerError: op.Debugf("received an internal server error during task inspect: %s", err.Payload.Message) - return nil, InternalServerError(err.Payload.Message) + return nil, engerr.InternalServerError(err.Payload.Message) case *tasks.InspectConflict: op.Debugf("received a conflict error during task inspect: %s", err.Payload.Message) - return nil, ConflictError(fmt.Sprintf("Cannot complete the operation, container %s has been powered off during execution", id)) + return nil, engerr.ConflictError(fmt.Sprintf("Cannot complete the operation, container %s has been powered off during execution", id)) default: - return nil, InternalServerError(err.Error()) + return nil, engerr.InternalServerError(err.Error()) } } @@ -382,13 +339,13 @@ func (c *Container) ContainerExecInspect(eid string) (*backend.ExecInspect, erro // ContainerExecResize changes the size of the TTY of the process // running in the exec with the given name to the given height and // width. -func (c *Container) ContainerExecResize(eid string, height, width int) error { +func (c *ContainerBackend) ContainerExecResize(eid string, height, width int) error { defer trace.End(trace.Begin(eid)) // Look up the container eid in the metadata cache to get long ID vc := cache.ContainerCache().GetContainerFromExec(eid) if vc == nil { - return NotFoundError(eid) + return engerr.NotFoundError(eid) } // Call the port layer to resize @@ -396,7 +353,7 @@ func (c *Container) ContainerExecResize(eid string, height, width int) error { plWidth := int32(width) var err error - if err = c.containerProxy.Resize(eid, plHeight, plWidth); err == nil { + if err = c.containerProxy.Resize(ctx, eid, plHeight, plWidth); err == nil { actor := CreateContainerEventActorWithAttributes(vc, map[string]string{ "height": fmt.Sprintf("%d", height), "width": fmt.Sprintf("%d", width), @@ -410,14 +367,14 @@ func (c *Container) ContainerExecResize(eid string, height, width int) error { // ContainerExecStart starts a previously set up exec instance. The // std streams are set up. -func (c *Container) ContainerExecStart(ctx context.Context, eid string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) error { +func (c *ContainerBackend) ContainerExecStart(ctx context.Context, eid string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) error { op := trace.NewOperation(ctx, "") defer trace.End(trace.Begin(fmt.Sprintf("opID=(%s) eid=(%s)", op, eid))) // Look up the container name in the metadata cache to get long ID vc := cache.ContainerCache().GetContainerFromExec(eid) if vc == nil { - return NotFoundError(eid) + return engerr.NotFoundError(eid) } id := vc.ContainerID name := vc.Name @@ -428,19 +385,19 @@ func (c *Container) ContainerExecStart(ctx context.Context, eid string, stdin io switch err := err.(type) { case *tasks.InspectInternalServerError: op.Debugf("received an internal server error during task inspect: %s", err.Payload.Message) - return InternalServerError(err.Payload.Message) + return engerr.InternalServerError(err.Payload.Message) case *tasks.InspectConflict: op.Debugf("received a conflict error during task inspect: %s", err.Payload.Message) - return ConflictError(fmt.Sprintf("Cannot complete the operation, container %s has been powered off during execution", id)) + return engerr.ConflictError(fmt.Sprintf("Cannot complete the operation, container %s has been powered off during execution", id)) default: - return InternalServerError(err.Error()) + return engerr.InternalServerError(err.Error()) } } - handle, err := c.Handle(id, name) + handle, err := c.containerProxy.Handle(ctx, id, name) if err != nil { op.Errorf("Failed to obtain handle during exec start for container(%s) due to error: %s", id, err) - return InternalServerError(err.Error()) + return engerr.InternalServerError(err.Error()) } bindconfig := &models.TaskBindConfig{ @@ -449,14 +406,14 @@ func (c *Container) ContainerExecStart(ctx context.Context, eid string, stdin io } // obtain a portlayer client - client := c.containerProxy.Client() + client := PortLayerClient() // call Bind with bindparams bindparams := tasks.NewBindParamsWithContext(ctx).WithConfig(bindconfig) resp, err := client.Tasks.Bind(bindparams) if err != nil { op.Errorf("Failed to bind parameters during exec start for container(%s) due to error: %s", id, err) - return InternalServerError(err.Error()) + return engerr.InternalServerError(err.Error()) } handle = resp.Payload.Handle.(string) @@ -464,14 +421,14 @@ func (c *Container) ContainerExecStart(ctx context.Context, eid string, stdin io // exec doesn't have separate attach path so we will decide whether we need interaction/runblocking or not attach := ec.OpenStdin || ec.OpenStdout || ec.OpenStderr if attach { - handle, err = c.containerProxy.BindInteraction(handle, name, eid) + handle, err = c.containerProxy.BindInteraction(ctx, handle, name, eid) if err != nil { op.Errorf("Failed to initiate interactivity during exec start for container(%s) due to error: %s", id, err) return err } } - if err := c.containerProxy.CommitContainerHandle(handle, name, 0); err != nil { + if err := c.containerProxy.CommitContainerHandle(ctx, handle, name, 0); err != nil { op.Errorf("Failed to commit handle for container(%s) due to error: %s", id, err) return err } @@ -519,16 +476,16 @@ func (c *Container) ContainerExecStart(ctx context.Context, eid string, stdin io ca.UseStderr = false } - ac := &AttachConfig{ + ac := &proxy.AttachConfig{ ID: eid, ContainerAttachConfig: ca, UseTty: ec.Tty, CloseStdin: true, } - err = c.containerProxy.AttachStreams(ctx, ac, stdin, stdout, stderr) + err = c.streamProxy.AttachStreams(ctx, ac, stdin, stdout, stderr) if err != nil { - if _, ok := err.(DetachError); ok { + if _, ok := err.(engerr.DetachError); ok { op.Infof("Detach detected, tearing down connection") // QUESTION: why are we returning DetachError? It doesn't seem like an error @@ -546,7 +503,7 @@ func (c *Container) ContainerExecStart(ctx context.Context, eid string, stdin io } return nil } - if err := retry.Do(operation, IsConflictError); err != nil { + if err := retry.Do(operation, engerr.IsConflictError); err != nil { op.Errorf("Failed to start Exec task for container(%s) due to error (%s)", id, err) return err } @@ -555,22 +512,25 @@ func (c *Container) ContainerExecStart(ctx context.Context, eid string, stdin io // ExecExists looks up the exec instance and returns a bool if it exists or not. // It will also return the error produced by `getConfig` -func (c *Container) ExecExists(eid string) (bool, error) { +func (c *ContainerBackend) ExecExists(eid string) (bool, error) { defer trace.End(trace.Begin(eid)) vc := cache.ContainerCache().GetContainerFromExec(eid) if vc == nil { - return false, NotFoundError(eid) + return false, engerr.NotFoundError(eid) } return true, nil } // ContainerCreate creates a container. -func (c *Container) ContainerCreate(config types.ContainerCreateConfig) (containertypes.ContainerCreateCreatedBody, error) { +func (c *ContainerBackend) ContainerCreate(config types.ContainerCreateConfig) (containertypes.ContainerCreateCreatedBody, error) { defer trace.End(trace.Begin("")) var err error + log.Infof("** createconfig = %#v", config) + log.Infof("** container config = %#v", config.Config) + // get the image from the cache image, err := cache.ImageCache().Get(config.Config.Image) if err != nil { @@ -642,44 +602,44 @@ func (c *Container) ContainerCreate(config types.ContainerCreateConfig) (contain // // returns: // (container id, error) -func (c *Container) containerCreate(vc *viccontainer.VicContainer, config types.ContainerCreateConfig) (string, error) { +func (c *ContainerBackend) containerCreate(vc *viccontainer.VicContainer, config types.ContainerCreateConfig) (string, error) { defer trace.End(trace.Begin("Container.containerCreate")) if vc == nil { - return "", InternalServerError("Failed to create container") + return "", engerr.InternalServerError("Failed to create container") } - id, h, err := c.containerProxy.CreateContainerHandle(vc, config) + id, h, err := c.containerProxy.CreateContainerHandle(ctx, vc, config) if err != nil { return "", err } - h, err = c.containerProxy.CreateContainerTask(h, id, config) + h, err = c.containerProxy.CreateContainerTask(ctx, h, id, config) if err != nil { return "", err } - h, err = c.containerProxy.AddContainerToScope(h, config) + h, err = c.containerProxy.AddContainerToScope(ctx, h, config) if err != nil { return id, err } - h, err = c.containerProxy.AddInteractionToContainer(h, config) + h, err = c.containerProxy.AddInteractionToContainer(ctx, h, config) if err != nil { return id, err } - h, err = c.containerProxy.AddLoggingToContainer(h, config) + h, err = c.containerProxy.AddLoggingToContainer(ctx, h, config) if err != nil { return id, err } - h, err = c.containerProxy.AddVolumesToContainer(h, config) + h, err = c.storageProxy.AddVolumesToContainer(ctx, h, config) if err != nil { return id, err } - err = c.containerProxy.CommitContainerHandle(h, id, -1) + err = c.containerProxy.CommitContainerHandle(ctx, h, id, -1) if err != nil { return id, err } @@ -691,16 +651,16 @@ func (c *Container) containerCreate(vc *viccontainer.VicContainer, config types. // If no signal is given (sig 0), then Kill with SIGKILL and wait // for the container to exit. // If a signal is given, then just send it to the container and return. -func (c *Container) ContainerKill(name string, sig uint64) error { +func (c *ContainerBackend) ContainerKill(name string, sig uint64) error { defer trace.End(trace.Begin(fmt.Sprintf("%s, %d", name, sig))) // Look up the container name in the metadata cache to get long ID vc := cache.ContainerCache().GetContainer(name) if vc == nil { - return NotFoundError(name) + return engerr.NotFoundError(name) } - err := c.containerProxy.Signal(vc, sig) + err := c.containerProxy.Signal(ctx, vc, sig) if err == nil { actor := CreateContainerEventActorWithAttributes(vc, map[string]string{"signal": fmt.Sprintf("%d", sig)}) @@ -712,19 +672,19 @@ func (c *Container) ContainerKill(name string, sig uint64) error { } // ContainerPause pauses a container -func (c *Container) ContainerPause(name string) error { - return fmt.Errorf("%s does not yet implement ContainerPause", ProductName()) +func (c *ContainerBackend) ContainerPause(name string) error { + return engerr.APINotSupportedMsg(ProductName(), "ContainerPause") } // ContainerResize changes the size of the TTY of the process running // in the container with the given name to the given height and width. -func (c *Container) ContainerResize(name string, height, width int) error { +func (c *ContainerBackend) ContainerResize(name string, height, width int) error { defer trace.End(trace.Begin(name)) // Look up the container name in the metadata cache to get long ID vc := cache.ContainerCache().GetContainer(name) if vc == nil { - return NotFoundError(name) + return engerr.NotFoundError(name) } // Call the port layer to resize @@ -732,7 +692,7 @@ func (c *Container) ContainerResize(name string, height, width int) error { plWidth := int32(width) var err error - if err = c.containerProxy.Resize(vc.ContainerID, plHeight, plWidth); err == nil { + if err = c.containerProxy.Resize(ctx, vc.ContainerID, plHeight, plWidth); err == nil { actor := CreateContainerEventActorWithAttributes(vc, map[string]string{ "height": fmt.Sprintf("%d", height), "width": fmt.Sprintf("%d", width), @@ -750,28 +710,28 @@ func (c *Container) ContainerResize(name string, height, width int) error { // timeout, ContainerRestart will wait forever until a graceful // stop. Returns an error if the container cannot be found, or if // there is an underlying error at any stage of the restart. -func (c *Container) ContainerRestart(name string, seconds *int) error { +func (c *ContainerBackend) ContainerRestart(name string, seconds *int) error { op := trace.NewOperation(context.Background(), "ContainerRestart - %s", name) defer trace.End(trace.Begin(name, op)) // Look up the container name in the metadata cache ot get long ID vc := cache.ContainerCache().GetContainer(name) if vc == nil { - return NotFoundError(name) + return engerr.NotFoundError(name) } operation := func() error { - return c.containerProxy.Stop(vc, name, seconds, false) + return c.containerProxy.Stop(ctx, vc, name, seconds, false) } - if err := retry.Do(operation, IsConflictError); err != nil { - return InternalServerError(fmt.Sprintf("Stop failed with: %s", err)) + if err := retry.Do(operation, engerr.IsConflictError); err != nil { + return engerr.InternalServerError(fmt.Sprintf("Stop failed with: %s", err)) } operation = func() error { return c.containerStart(op, name, nil, true) } - if err := retry.Do(operation, IsConflictError); err != nil { - return InternalServerError(fmt.Sprintf("Start failed with: %s", err)) + if err := retry.Do(operation, engerr.IsConflictError); err != nil { + return engerr.InternalServerError(fmt.Sprintf("Start failed with: %s", err)) } actor := CreateContainerEventActorWithAttributes(vc, map[string]string{}) @@ -783,14 +743,14 @@ func (c *Container) ContainerRestart(name string, seconds *int) error { // ContainerRm removes the container id from the filesystem. An error // is returned if the container is not found, or if the remove // fails. If the remove succeeds, the container name is released, and -// network links are removed. -func (c *Container) ContainerRm(name string, config *types.ContainerRmConfig) error { +// vicnetwork links are removed. +func (c *ContainerBackend) ContainerRm(name string, config *types.ContainerRmConfig) error { defer trace.End(trace.Begin(name)) // Look up the container name in the metadata cache to get long ID vc := cache.ContainerCache().GetContainer(name) if vc == nil { - return NotFoundError(name) + return engerr.NotFoundError(name) } id := vc.ContainerID secs := 0 @@ -802,33 +762,33 @@ func (c *Container) ContainerRm(name string, config *types.ContainerRmConfig) er return err } } else { - state, err := c.containerProxy.State(vc) + state, err := c.containerProxy.State(ctx, vc) if err != nil { - if IsNotFoundError(err) { + if engerr.IsNotFoundError(err) { // remove container from persona cache, but don't return error to the user cache.ContainerCache().DeleteContainer(id) return nil } - return InternalServerError(err.Error()) + return engerr.InternalServerError(err.Error()) } switch state.Status { - case ContainerError: + case proxy.ContainerError: // force stop if container state is error to make sure container is deletable later - c.containerProxy.Stop(vc, name, &secs, true) + c.containerProxy.Stop(ctx, vc, name, &secs, true) case "Starting": // if we are starting let the user know they must use the force return derr.NewRequestConflictError(fmt.Errorf("The container is starting. To remove use -f")) - case ContainerRunning: + case proxy.ContainerRunning: running = true } - handle, err := c.Handle(id, name) + handle, err := c.containerProxy.Handle(ctx, id, name) if err != nil { return err } - _, err = c.containerProxy.UnbindContainerFromNetwork(vc, handle) + _, err = c.containerProxy.UnbindContainerFromNetwork(ctx, vc, handle) if err != nil { return err } @@ -838,26 +798,24 @@ func (c *Container) ContainerRm(name string, config *types.ContainerRmConfig) er // once to prevent retries from degrading performance. if !running { operation := func() error { - return c.containerProxy.Remove(vc, config) + return c.containerProxy.Remove(ctx, vc, config) } - return retry.Do(operation, IsConflictError) + return retry.Do(operation, engerr.IsConflictError) } - return c.containerProxy.Remove(vc, config) + return c.containerProxy.Remove(ctx, vc, config) } // cleanupPortBindings gets port bindings for the container and // unmaps ports if the cVM that previously bound them isn't powered on -func (c *Container) cleanupPortBindings(vc *viccontainer.VicContainer) error { +func (c *ContainerBackend) cleanupPortBindings(vc *viccontainer.VicContainer) error { defer trace.End(trace.Begin(vc.ContainerID)) for ctrPort, hostPorts := range vc.HostConfig.PortBindings { for _, hostPort := range hostPorts { hPort := hostPort.HostPort - cbpLock.Lock() - mappedCtr, mapped := containerByPort[hPort] - cbpLock.Unlock() + mappedCtr, mapped := network.ContainerWithPort(hPort) if !mapped { continue } @@ -870,9 +828,9 @@ func (c *Container) cleanupPortBindings(vc *viccontainer.VicContainer) error { // port bindings were cleaned up by another operation. continue } - state, err := c.containerProxy.State(cc) + state, err := c.containerProxy.State(ctx, cc) if err != nil { - if IsNotFoundError(err) { + if engerr.IsNotFoundError(err) { log.Debugf("container(%s) not found in portLayer, removing from persona cache", cc.ContainerID) // we have a container in the persona cache, but it's been removed from the portLayer // which is the source of truth -- so remove from the persona cache after this func @@ -880,7 +838,7 @@ func (c *Container) cleanupPortBindings(vc *viccontainer.VicContainer) error { defer cache.ContainerCache().DeleteContainer(cc.ContainerID) } else { // we have issues of an unknown variety...return.. - return InternalServerError(err.Error()) + return engerr.InternalServerError(err.Error()) } } @@ -890,7 +848,7 @@ func (c *Container) cleanupPortBindings(vc *viccontainer.VicContainer) error { } log.Debugf("Unmapping ports for powered off / removed container %q", mappedCtr) - err = UnmapPorts(cc.ContainerID, vc) + err = network.UnmapPorts(cc.ContainerID, vc) if err != nil { return fmt.Errorf("Failed to unmap host port %s for container %q: %s", hPort, mappedCtr, err) @@ -901,14 +859,14 @@ func (c *Container) cleanupPortBindings(vc *viccontainer.VicContainer) error { } // ContainerStart starts a container. -func (c *Container) ContainerStart(name string, hostConfig *containertypes.HostConfig, checkpoint string, checkpointDir string) error { +func (c *ContainerBackend) ContainerStart(name string, hostConfig *containertypes.HostConfig, checkpoint string, checkpointDir string) error { op := trace.NewOperation(context.Background(), "ContainerStart - %s", name) defer trace.End(trace.Begin(name, op)) operation := func() error { return c.containerStart(op, name, hostConfig, true) } - if err := retry.Do(operation, IsConflictError); err != nil { + if err := retry.Do(operation, engerr.IsConflictError); err != nil { op.Debugf("Container start failed due to error - %s", err.Error()) return err } @@ -916,19 +874,19 @@ func (c *Container) ContainerStart(name string, hostConfig *containertypes.HostC return nil } -func (c *Container) containerStart(op trace.Operation, name string, hostConfig *containertypes.HostConfig, bind bool) error { +func (c *ContainerBackend) containerStart(op trace.Operation, name string, hostConfig *containertypes.HostConfig, bind bool) error { var err error // Get an API client to the portlayer - client := c.containerProxy.Client() + client := PortLayerClient() // Look up the container name in the metadata cache to get long ID vc := cache.ContainerCache().GetContainer(name) if vc == nil { - return NotFoundError(name) + return engerr.NotFoundError(name) } if !vc.TryLock(APITimeout) { - return ConcurrentAPIError(name, "ContainerStart") + return engerr.ConcurrentAPIError(name, "ContainerStart") } defer vc.Unlock() id := vc.ContainerID @@ -947,15 +905,15 @@ func (c *Container) containerStart(op trace.Operation, name string, hostConfig * } // get a handle to the container - handle, err := c.Handle(id, name) + handle, err := c.containerProxy.Handle(ctx, id, name) if err != nil { return err } var endpoints []*models.EndpointConfig - // bind network + // bind vicnetwork if bind { - op.Debugf("Binding network to container %s", id) + op.Debugf("Binding vicnetwork to container %s", id) var bindRes *scopes.BindContainerOK bindRes, err = client.Scopes.BindContainer(scopes.NewBindContainerParamsWithContext(ctx).WithHandle(handle)) @@ -963,11 +921,11 @@ func (c *Container) containerStart(op trace.Operation, name string, hostConfig * switch err := err.(type) { case *scopes.BindContainerNotFound: cache.ContainerCache().DeleteContainer(id) - return NotFoundError(name) + return engerr.NotFoundError(name) case *scopes.BindContainerInternalServerError: - return InternalServerError(err.Payload.Message) + return engerr.InternalServerError(err.Payload.Message) default: - return InternalServerError(err.Error()) + return engerr.InternalServerError(err.Error()) } } @@ -998,11 +956,11 @@ func (c *Container) containerStart(op trace.Operation, name string, hostConfig * switch err := err.(type) { case *containers.StateChangeNotFound: cache.ContainerCache().DeleteContainer(id) - return NotFoundError(name) + return engerr.NotFoundError(name) case *containers.StateChangeDefault: - return InternalServerError(err.Payload.Message) + return engerr.InternalServerError(err.Payload.Message) default: - return InternalServerError(err.Error()) + return engerr.InternalServerError(err.Error()) } } @@ -1012,14 +970,14 @@ func (c *Container) containerStart(op trace.Operation, name string, hostConfig * if bind { scope, e := c.findPortBoundNetworkEndpoint(hostConfig, endpoints) if scope != nil && scope.ScopeType == constants.BridgeScopeType { - if err = MapPorts(vc, e, id); err != nil { - return InternalServerError(fmt.Sprintf("error mapping ports: %s", err)) + if err = network.MapPorts(vc, e, id); err != nil { + return engerr.InternalServerError(fmt.Sprintf("error mapping ports: %s", err)) } defer func() { if err != nil { op.Debugf("Unbinding ports for %s due to error - %s", id, err.Error()) - UnmapPorts(id, vc) + network.UnmapPorts(id, vc) } }() } @@ -1032,13 +990,13 @@ func (c *Container) containerStart(op trace.Operation, name string, hostConfig * switch err := err.(type) { case *containers.CommitNotFound: cache.ContainerCache().DeleteContainer(id) - return NotFoundError(name) + return engerr.NotFoundError(name) case *containers.CommitConflict: - return ConflictError(err.Payload.Message) + return engerr.ConflictError(err.Payload.Message) case *containers.CommitDefault: - return InternalServerError(err.Payload.Message) + return engerr.InternalServerError(err.Payload.Message) default: - return InternalServerError(err.Error()) + return engerr.InternalServerError(err.Error()) } } @@ -1047,212 +1005,7 @@ func (c *Container) containerStart(op trace.Operation, name string, hostConfig * return nil } -// requestHostPort finds a free port on the host -func requestHostPort(proto string) (int, error) { - pa := portallocator.Get() - return pa.RequestPortInRange(nil, proto, 0, 0) -} - -type portMapping struct { - intHostPort int - strHostPort string - portProto nat.Port -} - -// unrollPortMap processes config for mapping/unmapping ports e.g. from hostconfig.PortBindings -func unrollPortMap(portMap nat.PortMap) ([]*portMapping, error) { - var portMaps []*portMapping - for i, pb := range portMap { - - proto, port := nat.SplitProtoPort(string(i)) - nport, err := nat.NewPort(proto, port) - if err != nil { - return nil, err - } - - // iterate over all the ports in pb []nat.PortBinding - for i := range pb { - var hostPort int - var hPort string - if pb[i].HostPort == "" { - // use a random port since no host port is specified - hostPort, err = requestHostPort(proto) - if err != nil { - log.Errorf("could not find available port on host") - return nil, err - } - log.Infof("using port %d on the host for port mapping", hostPort) - - // update the hostconfig - pb[i].HostPort = strconv.Itoa(hostPort) - - } else { - hostPort, err = strconv.Atoi(pb[i].HostPort) - if err != nil { - return nil, err - } - } - hPort = strconv.Itoa(hostPort) - portMaps = append(portMaps, &portMapping{ - intHostPort: hostPort, - strHostPort: hPort, - portProto: nport, - }) - } - } - return portMaps, nil -} - -// MapPorts maps ports defined in bridge endpoint for containerID -func MapPorts(vc *viccontainer.VicContainer, endpoint *models.EndpointConfig, containerID string) error { - if endpoint == nil { - return fmt.Errorf("invalid endpoint") - } - - var containerIP net.IP - containerIP = net.ParseIP(endpoint.Address) - if containerIP == nil { - return fmt.Errorf("invalid endpoint address %s", endpoint.Address) - } - - portMap := addIndirectEndpointsToPortMap([]*models.EndpointConfig{endpoint}, nil) - log.Debugf("Mapping ports of %q on endpoint %s: %v", containerID, endpoint.Name, portMap) - if len(portMap) == 0 { - return nil - } - - mappings, err := unrollPortMap(portMap) - if err != nil { - return err - } - - // cannot occur direct under the lock below because unmap ports take a lock. - defer func() { - if err != nil { - // if we didn't succeed then make sure we clean up - UnmapPorts(containerID, vc) - } - }() - - cbpLock.Lock() - defer cbpLock.Unlock() - vc.NATMap = portMap - - for _, p := range mappings { - // update mapped ports - if containerByPort[p.strHostPort] == containerID { - log.Debugf("Skipping mapping for already mapped port %s for %s", p.strHostPort, containerID) - continue - } - - if err = portMapper.MapPort(nil, p.intHostPort, p.portProto.Proto(), containerIP.String(), p.portProto.Int(), publicIfaceName, bridgeIfaceName); err != nil { - return err - } - - // bridge-to-bridge pin hole for traffic from containers for exposed port - if err = interBridgeTraffic(portmap.Map, p.strHostPort, p.portProto.Proto(), containerIP.String(), p.portProto.Port()); err != nil { - return err - } - - // update mapped ports - containerByPort[p.strHostPort] = containerID - log.Debugf("mapped port %s for container %s", p.strHostPort, containerID) - } - return nil -} - -// UnmapPorts unmaps ports defined in hostconfig if it's mapped for this container -func UnmapPorts(id string, vc *viccontainer.VicContainer) error { - portMap := vc.NATMap - log.Debugf("UnmapPorts for %s: %v", vc.ContainerID, portMap) - - if len(portMap) == 0 { - return nil - } - - mappings, err := unrollPortMap(vc.NATMap) - if err != nil { - return err - } - - cbpLock.Lock() - defer cbpLock.Unlock() - vc.NATMap = nil - - for _, p := range mappings { - // check if we should actually unmap based on current mappings - mappedID, mapped := containerByPort[p.strHostPort] - if !mapped { - log.Debugf("skipping already unmapped %s", p.strHostPort) - continue - } - if mappedID != id { - log.Debugf("port is mapped for container %s, not %s, skipping", mappedID, id) - continue - } - - if err = portMapper.UnmapPort(nil, p.intHostPort, p.portProto.Proto(), p.portProto.Int(), publicIfaceName, bridgeIfaceName); err != nil { - log.Warnf("failed to unmap port %s: %s", p.strHostPort, err) - continue - } - - // bridge-to-bridge pin hole for traffic from containers for exposed port - if err = interBridgeTraffic(portmap.Unmap, p.strHostPort, "", "", ""); err != nil { - log.Warnf("failed to undo bridge-to-bridge pinhole %s: %s", p.strHostPort, err) - continue - } - - // update mapped ports - delete(containerByPort, p.strHostPort) - log.Debugf("unmapped port %s", p.strHostPort) - } - return nil -} - -// interBridgeTraffic enables traffic for exposed port from one bridge network to another -func interBridgeTraffic(op portmap.Operation, hostPort, proto, containerAddr, containerPort string) error { - switch op { - case portmap.Map: - switch proto { - case "udp", "tcp": - default: - return fmt.Errorf("unknown protocol: %s", proto) - } - - // rule to allow connections from bridge interface for the - // specific mapped port. has to inserted at the top of the - // chain rather than appended to supersede bridge-to-bridge - // traffic blocking - baseArgs := []string{"-t", string(iptables.Filter), - "-i", bridgeIfaceName, - "-o", bridgeIfaceName, - "-p", proto, - "-d", containerAddr, - "--dport", containerPort, - "-j", "ACCEPT", - } - - args := append([]string{string(iptables.Insert), "VIC", "1"}, baseArgs...) - if _, err := iptables.Raw(args...); err != nil && !os.IsExist(err) { - return err - } - - btbRules[hostPort] = baseArgs - case portmap.Unmap: - if args, ok := btbRules[hostPort]; ok { - args = append([]string{string(iptables.Delete), "VIC"}, args...) - if _, err := iptables.Raw(args...); err != nil && !os.IsNotExist(err) { - return err - } - - delete(btbRules, hostPort) - } - } - - return nil -} - -func (c *Container) defaultScope() string { +func (c *ContainerBackend) defaultScope() string { defaultScope.Lock() defer defaultScope.Unlock() @@ -1260,7 +1013,7 @@ func (c *Container) defaultScope() string { return defaultScope.scope } - client := c.containerProxy.Client() + client := PortLayerClient() listRes, err := client.Scopes.List(scopes.NewListParamsWithContext(ctx).WithIDName("default")) if err != nil { log.Error(err) @@ -1276,12 +1029,12 @@ func (c *Container) defaultScope() string { return defaultScope.scope } -func (c *Container) findPortBoundNetworkEndpoint(hostconfig *containertypes.HostConfig, endpoints []*models.EndpointConfig) (*models.ScopeConfig, *models.EndpointConfig) { +func (c *ContainerBackend) findPortBoundNetworkEndpoint(hostconfig *containertypes.HostConfig, endpoints []*models.EndpointConfig) (*models.ScopeConfig, *models.EndpointConfig) { if len(hostconfig.PortBindings) == 0 { return nil, nil } - // check if the port binding network is a bridge type + // check if the port binding vicnetwork is a bridge type listRes, err := PortLayerClient().Scopes.List(scopes.NewListParamsWithContext(ctx).WithIDName(hostconfig.NetworkMode.NetworkName())) if err != nil { log.Error(err) @@ -1294,11 +1047,11 @@ func (c *Container) findPortBoundNetworkEndpoint(hostconfig *containertypes.Host } if listRes.Payload[0].ScopeType != constants.BridgeScopeType { - log.Warnf("port binding for network %s is not bridge type", hostconfig.NetworkMode.NetworkName()) + log.Warnf("port binding for vicnetwork %s is not bridge type", hostconfig.NetworkMode.NetworkName()) return listRes.Payload[0], nil } - // look through endpoints to find the container's IP on the network that has the port binding + // look through endpoints to find the container's IP on the vicnetwork that has the port binding for _, e := range endpoints { if hostconfig.NetworkMode.NetworkName() == e.Scope || (hostconfig.NetworkMode.IsDefault() && e.Scope == c.defaultScope()) { return listRes.Payload[0], e @@ -1314,13 +1067,13 @@ func (c *Container) findPortBoundNetworkEndpoint(hostconfig *containertypes.Host // will wait for a graceful termination. An error is returned if the // container is not found, is already stopped, or if there is a // problem stopping the container. -func (c *Container) ContainerStop(name string, seconds *int) error { +func (c *ContainerBackend) ContainerStop(name string, seconds *int) error { defer trace.End(trace.Begin(name)) // Look up the container name in the metadata cache to get long ID vc := cache.ContainerCache().GetContainer(name) if vc == nil { - return NotFoundError(name) + return engerr.NotFoundError(name) } if seconds == nil { @@ -1332,12 +1085,12 @@ func (c *Container) ContainerStop(name string, seconds *int) error { } operation := func() error { - return c.containerProxy.Stop(vc, name, seconds, true) + return c.containerProxy.Stop(ctx, vc, name, seconds, true) } config := retry.NewBackoffConfig() config.MaxElapsedTime = maxElapsedTime - if err := retry.DoWithConfig(operation, IsConflictError, config); err != nil { + if err := retry.DoWithConfig(operation, engerr.IsConflictError, config); err != nil { return err } @@ -1348,13 +1101,13 @@ func (c *Container) ContainerStop(name string, seconds *int) error { } // ContainerUnpause unpauses a container -func (c *Container) ContainerUnpause(name string) error { - return fmt.Errorf("%s does not yet implement ContainerUnpause", ProductName()) +func (c *ContainerBackend) ContainerUnpause(name string) error { + return engerr.APINotSupportedMsg(ProductName(), "ContainerUnpause") } // ContainerUpdate updates configuration of the container -func (c *Container) ContainerUpdate(name string, hostConfig *containertypes.HostConfig) (containertypes.ContainerUpdateOKBody, error) { - return containertypes.ContainerUpdateOKBody{}, fmt.Errorf("%s does not yet implement ontainerUpdate", ProductName()) +func (c *ContainerBackend) ContainerUpdate(name string, hostConfig *containertypes.HostConfig) (containertypes.ContainerUpdateOKBody, error) { + return containertypes.ContainerUpdateOKBody{}, engerr.APINotSupportedMsg(ProductName(), "ContainerUpdate") } // ContainerWait stops processing until the given container is @@ -1362,16 +1115,16 @@ func (c *Container) ContainerUpdate(name string, hostConfig *containertypes.Host // successful stop, the exit code of the container is returned. On a // timeout, an error is returned. If you want to wait forever, supply // a negative duration for the timeout. -func (c *Container) ContainerWait(name string, timeout time.Duration) (int, error) { +func (c *ContainerBackend) ContainerWait(name string, timeout time.Duration) (int, error) { defer trace.End(trace.Begin(fmt.Sprintf("name(%s):timeout(%s)", name, timeout))) // Look up the container name in the metadata cache to get long ID vc := cache.ContainerCache().GetContainer(name) if vc == nil { - return -1, NotFoundError(name) + return -1, engerr.NotFoundError(name) } - dockerState, err := c.containerProxy.Wait(vc, timeout) + dockerState, err := c.containerProxy.Wait(ctx, vc, timeout) if err != nil { return -1, err } @@ -1382,18 +1135,18 @@ func (c *Container) ContainerWait(name string, timeout time.Duration) (int, erro // docker's container.monitorBackend // ContainerChanges returns a list of container fs changes -func (c *Container) ContainerChanges(name string) ([]docker.Change, error) { +func (c *ContainerBackend) ContainerChanges(name string) ([]docker.Change, error) { defer trace.End(trace.Begin(name)) op := trace.NewOperation(context.Background(), "ContainerChanges: %s", name) vc := cache.ContainerCache().GetContainer(name) if vc == nil { - return nil, NotFoundError(name) + return nil, engerr.NotFoundError(name) } - r, err := c.containerProxy.GetContainerChanges(op, vc, false) + r, err := c.GetContainerChanges(op, vc, false) if err != nil { - return nil, InternalServerError(err.Error()) + return nil, engerr.InternalServerError(err.Error()) } changes := []docker.Change{} @@ -1409,7 +1162,7 @@ func (c *Container) ContainerChanges(name string) ([]docker.Change, error) { } if err != nil { - return []docker.Change{}, InternalServerError(err.Error()) + return []docker.Change{}, engerr.InternalServerError(err.Error()) } change := docker.Change{ @@ -1426,44 +1179,66 @@ func (c *Container) ContainerChanges(name string) ([]docker.Change, error) { case "C": change.Kind = docker.ChangeModify default: - return []docker.Change{}, InternalServerError("Invalid change type") + return []docker.Change{}, engerr.InternalServerError("Invalid change type") } changes = append(changes, change) } return changes, nil } +// GetContainerChanges returns container changes from portlayer. +// Set data to true will return file data, otherwise, only return file headers with change type. +func (c *ContainerBackend) GetContainerChanges(op trace.Operation, vc *viccontainer.VicContainer, data bool) (io.ReadCloser, error) { + host, err := sys.UUID() + if err != nil { + return nil, engerr.InternalServerError("Failed to determine host UUID") + } + + parent := vc.LayerID + spec := archive.FilterSpec{ + Inclusions: make(map[string]struct{}), + Exclusions: make(map[string]struct{}), + } + + r, err := archiveProxy.ArchiveExportReader(op, constants.ContainerStoreName, host, vc.ContainerID, parent, data, spec) + if err != nil { + return nil, engerr.InternalServerError(err.Error()) + } + + return r, nil +} + // ContainerInspect returns low-level information about a // container. Returns an error if the container cannot be found, or if // there is an error getting the data. -func (c *Container) ContainerInspect(name string, size bool, version string) (interface{}, error) { +func (c *ContainerBackend) ContainerInspect(name string, size bool, version string) (interface{}, error) { // Ignore version. We're supporting post-1.20 version. defer trace.End(trace.Begin(name)) // Look up the container name in the metadata cache to get long ID vc := cache.ContainerCache().GetContainer(name) if vc == nil { - return nil, NotFoundError(name) + return nil, engerr.NotFoundError(name) } id := vc.ContainerID log.Debugf("Found %q in cache as %q", id, vc.ContainerID) - client := c.containerProxy.Client() + client := PortLayerClient() results, err := client.Containers.GetContainerInfo(containers.NewGetContainerInfoParamsWithContext(ctx).WithID(id)) if err != nil { switch err := err.(type) { case *containers.GetContainerInfoNotFound: cache.ContainerCache().DeleteContainer(id) - return nil, NotFoundError(name) + return nil, engerr.NotFoundError(name) case *containers.GetContainerInfoInternalServerError: - return nil, InternalServerError(err.Payload.Message) + return nil, engerr.InternalServerError(err.Payload.Message) default: - return nil, InternalServerError(err.Error()) + return nil, engerr.InternalServerError(err.Error()) } } - inspectJSON, err := ContainerInfoToDockerContainerInspect(vc, results.Payload, PortLayerName()) + inspectJSON, err := proxy.ContainerInfoToDockerContainerInspect(vc, results.Payload, PortLayerName()) if err != nil { log.Errorf("containerInfoToDockerContainerInspect failed with %s", err) return nil, err @@ -1476,13 +1251,13 @@ func (c *Container) ContainerInspect(name string, size bool, version string) (in // ContainerLogs hooks up a container's stdout and stderr streams // configured with the given struct. -func (c *Container) ContainerLogs(ctx context.Context, name string, config *backend.ContainerLogsConfig, started chan struct{}) error { +func (c *ContainerBackend) ContainerLogs(ctx context.Context, name string, config *backend.ContainerLogsConfig, started chan struct{}) error { defer trace.End(trace.Begin("")) // Look up the container name in the metadata cache to get long ID vc := cache.ContainerCache().GetContainer(name) if vc == nil { - return NotFoundError(name) + return engerr.NotFoundError(name) } name = vc.ContainerID @@ -1504,7 +1279,7 @@ func (c *Container) ContainerLogs(ctx context.Context, name string, config *back } // Make a call to our proxy to handle the remoting - err = c.containerProxy.StreamContainerLogs(ctx, name, outStream, started, config.Timestamps, config.Follow, since, tailLines) + err = c.streamProxy.StreamContainerLogs(ctx, name, outStream, started, config.Timestamps, config.Follow, since, tailLines) if err != nil { // Don't return an error encountered while streaming logs. // Once we've started streaming logs, the Docker client doesn't expect @@ -1517,13 +1292,13 @@ func (c *Container) ContainerLogs(ctx context.Context, name string, config *back // ContainerStats writes information about the container to the stream // given in the config object. -func (c *Container) ContainerStats(ctx context.Context, name string, config *backend.ContainerStatsConfig) error { +func (c *ContainerBackend) ContainerStats(ctx context.Context, name string, config *backend.ContainerStatsConfig) error { defer trace.End(trace.Begin(name)) // Look up the container name in the metadata cache to get long ID vc := cache.ContainerCache().GetContainer(name) if vc == nil { - return NotFoundError(name) + return engerr.NotFoundError(name) } // get the configured CPUMhz for this VCH so that we can calculate docker CPU stats @@ -1532,7 +1307,7 @@ func (c *Container) ContainerStats(ctx context.Context, name string, config *bac // wrap error to provide a bit more detail sysErr := fmt.Errorf("unable to gather system CPUMhz for container(%s): %s", vc.ContainerID, err) log.Error(sysErr) - return InternalServerError(sysErr.Error()) + return engerr.InternalServerError(sysErr.Error()) } out := config.OutStream @@ -1556,14 +1331,14 @@ func (c *Container) ContainerStats(ctx context.Context, name string, config *bac // if we are not streaming then we need to get the container state if !config.Stream { - statsConfig.ContainerState, err = c.containerProxy.State(vc) + statsConfig.ContainerState, err = c.containerProxy.State(ctx, vc) if err != nil { - return InternalServerError(err.Error()) + return engerr.InternalServerError(err.Error()) } } - err = c.containerProxy.StreamContainerStats(ctx, statsConfig) + err = c.streamProxy.StreamContainerStats(ctx, statsConfig) if err != nil { log.Errorf("error while streaming container (%s) stats: %s", vc.ContainerID, err) } @@ -1575,12 +1350,12 @@ func (c *Container) ContainerStats(ctx context.Context, name string, config *bac // "-ef" if no args are given. An error is returned if the container // is not found, or is not running, or if there are any problems // running ps, or parsing the output. -func (c *Container) ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) { - return nil, fmt.Errorf("%s does not yet implement ContainerTop", ProductName()) +func (c *ContainerBackend) ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) { + return nil, engerr.APINotSupportedMsg(ProductName(), "ContainerTop") } // Containers returns the list of containers to show given the user's filtering. -func (c *Container) Containers(config *types.ContainerListOptions) ([]*types.Container, error) { +func (c *ContainerBackend) Containers(config *types.ContainerListOptions) ([]*types.Container, error) { defer trace.End(trace.Begin(fmt.Sprintf("ListOptions %#v", config))) // validate filters for support and validity @@ -1590,7 +1365,7 @@ func (c *Container) Containers(config *types.ContainerListOptions) ([]*types.Con } // Get an API client to the portlayer - client := c.containerProxy.Client() + client := PortLayerClient() containme, err := client.Containers.GetContainerList(containers.NewGetContainerListParamsWithContext(ctx).WithAll(&listContext.All)) if err != nil { @@ -1649,15 +1424,16 @@ payloadLoop: if dockerState.Running { // we only present port information in ps output when the container is running and // should be responsive at that address:port - ports = directPortInformation(t) + ports = network.DirectPortInformation(t) - ips, err := publicIPv4Addrs() + ips, err := network.PublicIPv4Addrs() if err != nil { log.Errorf("Could not get IP information for reporting port bindings: %s", err) // display port mappings without IP data if we cannot get it ips = []string{""} } - ports = append(ports, portForwardingInformation(t, ips)...) + c := cache.ContainerCache().GetContainer(t.ContainerConfig.ContainerID) + ports = append(ports, network.PortForwardingInformation(c, ips)...) } // verify that the repo:tag exists for the container -- if it doesn't then we should present the @@ -1698,51 +1474,51 @@ payloadLoop: return containers, nil } -func (c *Container) ContainersPrune(pruneFilters filters.Args) (*types.ContainersPruneReport, error) { - return nil, fmt.Errorf("%s does not yet implement ContainersPrune", ProductName()) +func (c *ContainerBackend) ContainersPrune(pruneFilters filters.Args) (*types.ContainersPruneReport, error) { + return nil, engerr.APINotSupportedMsg(ProductName(), "ContainersPrune") } // docker's container.attachBackend // ContainerAttach attaches to logs according to the config passed in. See ContainerAttachConfig. -func (c *Container) ContainerAttach(name string, ca *backend.ContainerAttachConfig) error { +func (c *ContainerBackend) ContainerAttach(name string, ca *backend.ContainerAttachConfig) error { defer trace.End(trace.Begin(name)) operation := func() error { return c.containerAttach(name, ca) } - if err := retry.Do(operation, IsConflictError); err != nil { + if err := retry.Do(operation, engerr.IsConflictError); err != nil { return err } return nil } -func (c *Container) containerAttach(name string, ca *backend.ContainerAttachConfig) error { +func (c *ContainerBackend) containerAttach(name string, ca *backend.ContainerAttachConfig) error { // Look up the container name in the metadata cache to get long ID vc := cache.ContainerCache().GetContainer(name) if vc == nil { - return NotFoundError(name) + return engerr.NotFoundError(name) } id := vc.ContainerID - handle, err := c.Handle(id, name) + handle, err := c.containerProxy.Handle(ctx, id, name) if err != nil { return err } - handleprime, err := c.containerProxy.BindInteraction(handle, name, id) + handleprime, err := c.containerProxy.BindInteraction(ctx, handle, name, id) if err != nil { return err } - if err := c.containerProxy.CommitContainerHandle(handleprime, name, 0); err != nil { + if err := c.containerProxy.CommitContainerHandle(ctx, handleprime, name, 0); err != nil { return err } stdin, stdout, stderr, err := ca.GetStreams() if err != nil { - return InternalServerError("Unable to get stdio streams for calling client") + return engerr.InternalServerError("Unable to get stdio streams for calling client") } defer stdin.Close() @@ -1763,16 +1539,16 @@ func (c *Container) containerAttach(name string, ca *backend.ContainerAttachConf ca.UseStderr = false } - ac := &AttachConfig{ + ac := &proxy.AttachConfig{ ID: id, ContainerAttachConfig: ca, UseTty: vc.Config.Tty, CloseStdin: vc.Config.StdinOnce, } - err = c.containerProxy.AttachStreams(context.Background(), ac, stdin, stdout, stderr) + err = c.streamProxy.AttachStreams(context.Background(), ac, stdin, stdout, stderr) if err != nil { - if _, ok := err.(DetachError); ok { + if _, ok := err.(engerr.DetachError); ok { log.Infof("Detach detected, tearing down connection") // fire detach event @@ -1793,7 +1569,7 @@ func (c *Container) containerAttach(name string, ca *backend.ContainerAttachConf // ContainerRename changes the name of a container, using the oldName // to find the container. An error is returned if newName is already // reserved. -func (c *Container) ContainerRename(oldName, newName string) error { +func (c *ContainerBackend) ContainerRename(oldName, newName string) error { defer trace.End(trace.Begin(newName)) if oldName == "" || newName == "" { @@ -1812,7 +1588,7 @@ func (c *Container) ContainerRename(oldName, newName string) error { vc := cache.ContainerCache().GetContainer(oldName) if vc == nil { log.Errorf("Container %s not found", oldName) - return NotFoundError(oldName) + return engerr.NotFoundError(oldName) } oldName = vc.Name @@ -1829,10 +1605,10 @@ func (c *Container) ContainerRename(oldName, newName string) error { } renameOp := func() error { - return c.containerProxy.Rename(vc, newName) + return c.containerProxy.Rename(ctx, vc, newName) } - if err := retry.Do(renameOp, IsConflictError); err != nil { + if err := retry.Do(renameOp, engerr.IsConflictError); err != nil { log.Errorf("Rename error: %s", err) cache.ContainerCache().ReleaseName(newName) return err @@ -1974,7 +1750,7 @@ func validateCreateConfig(config *types.ContainerCreateConfig) error { defer trace.End(trace.Begin("Container.validateCreateConfig")) if config.Config == nil { - return BadRequestError("invalid config") + return engerr.BadRequestError("invalid config") } if config.HostConfig == nil { @@ -2027,9 +1803,9 @@ func validateCreateConfig(config *types.ContainerCreateConfig) error { config.NetworkingConfig = &dnetwork.NetworkingConfig{} } else { if l := len(config.NetworkingConfig.EndpointsConfig); l > 1 { - return fmt.Errorf("NetworkMode error: Container can be connected to one network endpoint only") + return fmt.Errorf("NetworkMode error: Container can be connected to one vicnetwork endpoint only") } - // If NetworkConfig exists, set NetworkMode to the default endpoint network, assuming only one endpoint network as the default network during container create + // If NetworkConfig exists, set NetworkMode to the default endpoint vicnetwork, assuming only one endpoint vicnetwork as the default vicnetwork during container create for networkName := range config.NetworkingConfig.EndpointsConfig { config.HostConfig.NetworkMode = containertypes.NetworkMode(networkName) } @@ -2037,7 +1813,7 @@ func validateCreateConfig(config *types.ContainerCreateConfig) error { // validate port bindings var ips []string - if addrs, err := publicIPv4Addrs(); err != nil { + if addrs, err := network.PublicIPv4Addrs(); err != nil { log.Warnf("could not get address for public interface: %s", err) } else { ips = make([]string, len(addrs)) @@ -2058,14 +1834,14 @@ func validateCreateConfig(config *types.ContainerCreateConfig) error { } } if !found { - return InternalServerError("host IP for port bindings is only supported for 0.0.0.0 and the public interface IP address") + return engerr.InternalServerError("host IP for port bindings is only supported for 0.0.0.0 and the public interface IP address") } } // #nosec: Errors unhandled. start, end, _ := nat.ParsePortRangeToInt(pb.HostPort) if start != end { - return InternalServerError("host port ranges are not supported for port bindings") + return engerr.InternalServerError("host port ranges are not supported for port bindings") } } } @@ -2096,149 +1872,6 @@ func copyConfigOverrides(vc *viccontainer.VicContainer, config types.ContainerCr vc.HostConfig = config.HostConfig } -func publicIPv4Addrs() ([]string, error) { - l, err := netlink.LinkByName(publicIfaceName) - if err != nil { - return nil, fmt.Errorf("could not look up link from interface name %s: %s", publicIfaceName, err.Error()) - } - - addrs, err := netlink.AddrList(l, netlink.FAMILY_V4) - if err != nil { - return nil, fmt.Errorf("could not get addresses from public link: %s", err.Error()) - } - - ips := make([]string, len(addrs)) - for i := range addrs { - ips[i] = addrs[i].IP.String() - } - - return ips, nil -} - -func directPortInformation(t *models.ContainerInfo) []types.Port { - var resultPorts []types.Port - - for _, ne := range t.Endpoints { - trust, _ := executor.ParseTrustLevel(ne.Trust) - if !ne.Direct || trust == executor.Closed || trust == executor.Outbound || trust == executor.Peers { - // we don't publish port info for ports that are not directly accessible from outside of the VCH - continue - } - - ip := strings.SplitN(ne.Address, "/", 2)[0] - - // if it's an open network then inject an "all ports" entry - if trust == executor.Open { - resultPorts = append(resultPorts, types.Port{ - IP: ip, - PrivatePort: 0, - PublicPort: 0, - Type: "*", - }) - } - - for _, p := range ne.Ports { - port := types.Port{IP: ip} - - portsAndType := strings.SplitN(p, "/", 2) - port.Type = portsAndType[1] - - mapping := strings.Split(portsAndType[0], ":") - // if no mapping is supplied then there's only one and that's public. If there is a mapping then the first - // entry is the public - public, err := strconv.Atoi(mapping[0]) - if err != nil { - log.Errorf("Got an error trying to convert public port number \"%s\" to an int: %s", mapping[0], err) - continue - } - port.PublicPort = uint16(public) - - // If port is on container network then a different container could be forwarding the same port via the endpoint - // so must check for explicit ID match. If a match then it's definitely not accessed directly. - if containerByPort[mapping[0]] == t.ContainerConfig.ContainerID { - continue - } - - // did not find a way to have the client not render both ports so setting them the same even if there's not - // redirect occurring - port.PrivatePort = port.PublicPort - - // for open networks we don't bother listing direct ports - if len(mapping) == 1 { - if trust != executor.Open { - resultPorts = append(resultPorts, port) - } - continue - } - - private, err := strconv.Atoi(mapping[1]) - if err != nil { - log.Errorf("Got an error trying to convert private port number \"%s\" to an int: %s", mapping[1], err) - continue - } - port.PrivatePort = uint16(private) - resultPorts = append(resultPorts, port) - } - } - - return resultPorts -} - -// returns port bindings as a slice of Docker Ports for return to the client -// returns empty slice on error -func portForwardingInformation(t *models.ContainerInfo, ips []string) []types.Port { - cid := t.ContainerConfig.ContainerID - c := cache.ContainerCache().GetContainer(cid) - - if c == nil { - log.Errorf("Could not find container with ID %s", cid) - return nil - } - - portBindings := c.NATMap - var resultPorts []types.Port - - // create a port for each IP on the interface (usually only 1, but could be more) - // (works with both IPv4 and IPv6 addresses) - for _, ip := range ips { - port := types.Port{IP: ip} - - for portBindingPrivatePort, hostPortBindings := range portBindings { - proto, pnum := nat.SplitProtoPort(string(portBindingPrivatePort)) - portNum, err := strconv.Atoi(pnum) - if err != nil { - log.Warnf("Unable to convert private port %q to an int", pnum) - continue - } - port.PrivatePort = uint16(portNum) - port.Type = proto - - for i := 0; i < len(hostPortBindings); i++ { - // If port is on container network then a different container could be forwarding the same port via the endpoint - // so must check for explicit ID match. If no match, definitely not forwarded via endpoint. - if containerByPort[hostPortBindings[i].HostPort] != t.ContainerConfig.ContainerID { - continue - } - - newport := port - publicPort, err := strconv.Atoi(hostPortBindings[i].HostPort) - if err != nil { - log.Infof("Got an error trying to convert public port number to an int") - continue - } - - newport.PublicPort = uint16(publicPort) - // sanity check -- sometimes these come back as 0 when no binding actually exists - // that doesn't make sense, so in that case we don't want to report these bindings - if newport.PublicPort != 0 && newport.PrivatePort != 0 { - resultPorts = append(resultPorts, newport) - } - } - } - } - return resultPorts -} - //---------------------------------- // ContainerLogs() utility functions //---------------------------------- @@ -2248,7 +1881,7 @@ func portForwardingInformation(t *models.ContainerInfo, ips []string) []types.Po // // returns: // tail lines, since (in unix time), error -func (c *Container) validateContainerLogsConfig(vc *viccontainer.VicContainer, config *backend.ContainerLogsConfig) (int64, int64, error) { +func (c *ContainerBackend) validateContainerLogsConfig(vc *viccontainer.VicContainer, config *backend.ContainerLogsConfig) (int64, int64, error) { if !(config.ShowStdout || config.ShowStderr) { return 0, 0, fmt.Errorf("You must choose at least one stream") } diff --git a/lib/apiservers/engine/backends/container_proxy.go b/lib/apiservers/engine/backends/container_proxy.go deleted file mode 100644 index 376620a133..0000000000 --- a/lib/apiservers/engine/backends/container_proxy.go +++ /dev/null @@ -1,2265 +0,0 @@ -// Copyright 2016-2018 VMware, Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package backends - -//**** -// container_proxy.go -// -// Contains all code that touches the portlayer for container operations and all -// code that converts swagger based returns to docker personality backend structs. -// The goal is to make the backend code that implements the docker engine-api -// interfaces be as simple as possible and contain no swagger or portlayer code. -// -// Rule for code to be in here: -// 1. touches VIC portlayer -// 2. converts swagger to docker engine-api structs -// 3. errors MUST be docker engine-api compatible errors. DO NOT return arbitrary errors! -// - Do NOT return portlayer errors -// - Do NOT return fmt.Errorf() -// - Do NOT return errors.New() -// - Please USE the aliased docker error package 'derr' - -import ( - "context" - "fmt" - "io" - "net" - "net/http" - "os" - "strconv" - "strings" - "sync" - "syscall" - "time" - - log "github.com/Sirupsen/logrus" - "github.com/go-openapi/strfmt" - "github.com/google/uuid" - - derr "github.com/docker/docker/api/errors" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/mount" - dnetwork "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/strslice" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/term" - "github.com/docker/go-connections/nat" - - "github.com/vmware/vic/lib/apiservers/engine/backends/cache" - viccontainer "github.com/vmware/vic/lib/apiservers/engine/backends/container" - "github.com/vmware/vic/lib/apiservers/engine/backends/convert" - epoint "github.com/vmware/vic/lib/apiservers/engine/backends/endpoint" - "github.com/vmware/vic/lib/apiservers/engine/backends/filter" - "github.com/vmware/vic/lib/apiservers/portlayer/client" - "github.com/vmware/vic/lib/apiservers/portlayer/client/containers" - "github.com/vmware/vic/lib/apiservers/portlayer/client/interaction" - "github.com/vmware/vic/lib/apiservers/portlayer/client/logging" - "github.com/vmware/vic/lib/apiservers/portlayer/client/scopes" - "github.com/vmware/vic/lib/apiservers/portlayer/client/storage" - "github.com/vmware/vic/lib/apiservers/portlayer/client/tasks" - "github.com/vmware/vic/lib/apiservers/portlayer/models" - "github.com/vmware/vic/lib/archive" - "github.com/vmware/vic/lib/constants" - "github.com/vmware/vic/lib/metadata" - "github.com/vmware/vic/pkg/trace" - "github.com/vmware/vic/pkg/vsphere/sys" -) - -// VicContainerProxy interface -type VicContainerProxy interface { - CreateContainerHandle(vc *viccontainer.VicContainer, config types.ContainerCreateConfig) (string, string, error) - CreateContainerTask(handle string, id string, config types.ContainerCreateConfig) (string, error) - CreateExecTask(handle string, config *types.ExecConfig) (string, string, error) - AddContainerToScope(handle string, config types.ContainerCreateConfig) (string, error) - AddVolumesToContainer(handle string, config types.ContainerCreateConfig) (string, error) - AddLoggingToContainer(handle string, config types.ContainerCreateConfig) (string, error) - AddInteractionToContainer(handle string, config types.ContainerCreateConfig) (string, error) - - BindInteraction(handle string, name string, id string) (string, error) - UnbindInteraction(handle string, name string, id string) (string, error) - - CommitContainerHandle(handle, containerID string, waitTime int32) error - AttachStreams(ctx context.Context, ac *AttachConfig, stdin io.ReadCloser, stdout, stderr io.Writer) error - StreamContainerLogs(ctx context.Context, name string, out io.Writer, started chan struct{}, showTimestamps bool, followLogs bool, since int64, tailLines int64) error - StreamContainerStats(ctx context.Context, config *convert.ContainerStatsConfig) error - - StatPath(op trace.Operation, sotre, deviceID string, filterSpec archive.FilterSpec) (*types.ContainerPathStat, error) - - Stop(vc *viccontainer.VicContainer, name string, seconds *int, unbound bool) error - State(vc *viccontainer.VicContainer) (*types.ContainerState, error) - Wait(vc *viccontainer.VicContainer, timeout time.Duration) (*types.ContainerState, error) - Signal(vc *viccontainer.VicContainer, sig uint64) error - Resize(id string, height, width int32) error - Rename(vc *viccontainer.VicContainer, newName string) error - Remove(vc *viccontainer.VicContainer, config *types.ContainerRmConfig) error - - GetContainerChanges(op trace.Operation, vc *viccontainer.VicContainer, data bool) (io.ReadCloser, error) - - UnbindContainerFromNetwork(vc *viccontainer.VicContainer, handle string) (string, error) - - Handle(id, name string) (string, error) - Client() *client.PortLayer - exitCode(vc *viccontainer.VicContainer) (string, error) -} - -// ContainerProxy struct -type ContainerProxy struct { - client *client.PortLayer - portlayerAddr string - portlayerName string -} - -type volumeFields struct { - ID string - Dest string - Flags string -} - -// AttachConfig wraps backend.ContainerAttachConfig and adds other required fields -// Similar to https://github.com/docker/docker/blob/master/container/stream/attach.go -type AttachConfig struct { - *backend.ContainerAttachConfig - - // ID of the session - ID string - // Tells the attach copier that the stream's stdin is a TTY and to look for - // escape sequences in stdin to detach from the stream. - // When true the escape sequence is not passed to the underlying stream - UseTty bool - // CloseStdin signals that once done, stdin for the attached stream should be closed - // For example, this would close the attached container's stdin. - CloseStdin bool -} - -const ( - attachConnectTimeout time.Duration = 15 * time.Second //timeout for the connection - attachAttemptTimeout time.Duration = 60 * time.Second //timeout before we ditch an attach attempt - attachPLAttemptDiff time.Duration = 10 * time.Second - attachStdinInitString = "v1c#>" - swaggerSubstringEOF = "EOF" - forceLogType = "json-file" //Use in inspect to allow docker logs to work - ShortIDLen = 12 - archiveStreamBufSize = 64 * 1024 - - DriverArgFlagKey = "flags" - DriverArgContainerKey = "container" - DriverArgImageKey = "image" - - ContainerRunning = "running" - ContainerError = "error" - ContainerStopped = "stopped" - ContainerExited = "exited" - ContainerCreated = "created" -) - -// NewContainerProxy will create a new proxy -func NewContainerProxy(plClient *client.PortLayer, portlayerAddr string, portlayerName string) *ContainerProxy { - return &ContainerProxy{client: plClient, portlayerAddr: portlayerAddr, portlayerName: portlayerName} -} - -// Handle retrieves a handle to a VIC container. Handles should be treated as opaque strings. -// -// returns: -// (handle string, error) -func (c *ContainerProxy) Handle(id, name string) (string, error) { - if c.client == nil { - return "", InternalServerError("ContainerProxy.Handle failed to get a portlayer client") - } - - resp, err := c.client.Containers.Get(containers.NewGetParamsWithContext(ctx).WithID(id)) - if err != nil { - switch err := err.(type) { - case *containers.GetNotFound: - cache.ContainerCache().DeleteContainer(id) - return "", NotFoundError(name) - case *containers.GetDefault: - return "", InternalServerError(err.Payload.Message) - default: - return "", InternalServerError(err.Error()) - } - } - return resp.Payload, nil -} - -func (c *ContainerProxy) Client() *client.PortLayer { - return c.client -} - -// CreateContainerHandle creates a new VIC container by calling the portlayer -// -// returns: -// (containerID, containerHandle, error) -func (c *ContainerProxy) CreateContainerHandle(vc *viccontainer.VicContainer, config types.ContainerCreateConfig) (string, string, error) { - defer trace.End(trace.Begin(vc.ImageID)) - - if c.client == nil { - return "", "", InternalServerError("ContainerProxy.CreateContainerHandle failed to create a portlayer client") - } - - if vc.ImageID == "" { - return "", "", NotFoundError("No image specified") - } - - if vc.LayerID == "" { - return "", "", NotFoundError("No layer specified") - } - - // Call the Exec port layer to create the container - host, err := sys.UUID() - if err != nil { - return "", "", InternalServerError("ContainerProxy.CreateContainerHandle got unexpected error getting VCH UUID") - } - - plCreateParams := dockerContainerCreateParamsToPortlayer(config, vc, host) - createResults, err := c.client.Containers.Create(plCreateParams) - if err != nil { - if _, ok := err.(*containers.CreateNotFound); ok { - cerr := fmt.Errorf("No such image: %s", vc.ImageID) - log.Errorf("%s (%s)", cerr, err) - return "", "", NotFoundError(cerr.Error()) - } - - // If we get here, most likely something went wrong with the port layer API server - return "", "", InternalServerError(err.Error()) - } - - id := createResults.Payload.ID - h := createResults.Payload.Handle - - return id, h, nil -} - -// CreateContainerTask sets the primary command to run in the container -// -// returns: -// (containerHandle, error) -func (c *ContainerProxy) CreateContainerTask(handle, id string, config types.ContainerCreateConfig) (string, error) { - defer trace.End(trace.Begin("")) - - if c.client == nil { - return "", InternalServerError("ContainerProxy.CreateContainerTask failed to create a portlayer client") - } - - plTaskParams := dockerContainerCreateParamsToTask(id, config) - plTaskParams.Config.Handle = handle - - responseJoin, err := c.client.Tasks.Join(plTaskParams) - if err != nil { - log.Errorf("Unable to join primary task to container: %+v", err) - return "", InternalServerError(err.Error()) - } - - handle, ok := responseJoin.Payload.Handle.(string) - if !ok { - return "", InternalServerError(fmt.Sprintf("Type assertion failed on handle from task join: %#+v", handle)) - } - - plBindParams := tasks.NewBindParamsWithContext(ctx).WithConfig(&models.TaskBindConfig{Handle: handle, ID: id}) - responseBind, err := c.client.Tasks.Bind(plBindParams) - if err != nil { - log.Errorf("Unable to bind primary task to container: %+v", err) - return "", InternalServerError(err.Error()) - } - - handle, ok = responseBind.Payload.Handle.(string) - if !ok { - return "", InternalServerError(fmt.Sprintf("Type assertion failed on handle from task bind %#+v", handle)) - } - - return handle, nil -} - -func (c *ContainerProxy) CreateExecTask(handle string, config *types.ExecConfig) (string, string, error) { - defer trace.End(trace.Begin("")) - - if c.client == nil { - return "", "", InternalServerError("ContainerProxy.CreateExecTask failed to create a portlayer client") - } - - joinconfig := &models.TaskJoinConfig{ - Handle: handle, - Path: config.Cmd[0], - Args: config.Cmd[1:], - Env: config.Env, - User: config.User, - Attach: config.AttachStdin || config.AttachStdout || config.AttachStderr, - OpenStdin: config.AttachStdin, - Tty: config.Tty, - } - - // call Join with JoinParams - joinparams := tasks.NewJoinParamsWithContext(ctx).WithConfig(joinconfig) - resp, err := c.client.Tasks.Join(joinparams) - if err != nil { - return "", "", InternalServerError(err.Error()) - } - eid := resp.Payload.ID - - handleprime, ok := resp.Payload.Handle.(string) - if !ok { - return "", "", InternalServerError(fmt.Sprintf("Type assertion failed on handle from task bind %#+v", handleprime)) - } - - return handleprime, eid, nil -} - -// AddContainerToScope adds a container, referenced by handle, to a scope. -// If an error is return, the returned handle should not be used. -// -// returns: -// modified handle -func (c *ContainerProxy) AddContainerToScope(handle string, config types.ContainerCreateConfig) (string, error) { - defer trace.End(trace.Begin(handle)) - - if c.client == nil { - return "", InternalServerError("ContainerProxy.AddContainerToScope failed to create a portlayer client") - } - - log.Debugf("Network Configuration Section - Container Create") - // configure networking - netConf := toModelsNetworkConfig(config) - if netConf != nil { - addContRes, err := c.client.Scopes.AddContainer(scopes.NewAddContainerParamsWithContext(ctx). - WithScope(netConf.NetworkName). - WithConfig(&models.ScopesAddContainerConfig{ - Handle: handle, - NetworkConfig: netConf, - })) - - if err != nil { - log.Errorf("ContainerProxy.AddContainerToScope: Scopes error: %s", err.Error()) - return handle, InternalServerError(err.Error()) - } - - defer func() { - if err == nil { - return - } - // roll back the AddContainer call - if _, err2 := c.client.Scopes.RemoveContainer(scopes.NewRemoveContainerParamsWithContext(ctx).WithHandle(handle).WithScope(netConf.NetworkName)); err2 != nil { - log.Warnf("could not roll back container add: %s", err2) - } - }() - - handle = addContRes.Payload - } - - return handle, nil -} - -// AddVolumesToContainer adds volumes to a container, referenced by handle. -// If an error is returned, the returned handle should not be used. -// -// returns: -// modified handle -func (c *ContainerProxy) AddVolumesToContainer(handle string, config types.ContainerCreateConfig) (string, error) { - defer trace.End(trace.Begin(handle)) - - if c.client == nil { - return "", InternalServerError("ContainerProxy.AddVolumesToContainer failed to create a portlayer client") - } - - // Volume Attachment Section - log.Debugf("ContainerProxy.AddVolumesToContainer - VolumeSection") - log.Debugf("Raw volume arguments: binds: %#v, volumes: %#v", config.HostConfig.Binds, config.Config.Volumes) - - // Collect all volume mappings. In a docker create/run, they - // can be anonymous (-v /dir) or specific (-v vol-name:/dir). - // anonymous volumes can also come from Image Metadata - - rawAnonVolumes := make([]string, 0, len(config.Config.Volumes)) - for k := range config.Config.Volumes { - rawAnonVolumes = append(rawAnonVolumes, k) - } - - volList, err := finalizeVolumeList(config.HostConfig.Binds, rawAnonVolumes) - if err != nil { - return handle, BadRequestError(err.Error()) - } - log.Infof("Finalized volume list: %#v", volList) - - if len(config.Config.Volumes) > 0 { - // override anonymous volume list with generated volume id - for _, vol := range volList { - if _, ok := config.Config.Volumes[vol.Dest]; ok { - delete(config.Config.Volumes, vol.Dest) - mount := getMountString(vol.ID, vol.Dest, vol.Flags) - config.Config.Volumes[mount] = struct{}{} - log.Debugf("Replace anonymous volume config %s with %s", vol.Dest, mount) - } - } - } - - // Create and join volumes. - for _, fields := range volList { - // We only set these here for volumes made on a docker create - volumeData := make(map[string]string) - volumeData[DriverArgFlagKey] = fields.Flags - volumeData[DriverArgContainerKey] = config.Name - volumeData[DriverArgImageKey] = config.Config.Image - - // NOTE: calling volumeCreate regardless of whether the volume is already - // present can be avoided by adding an extra optional param to VolumeJoin, - // which would then call volumeCreate if the volume does not exist. - vol := &Volume{} - _, err := vol.volumeCreate(fields.ID, "vsphere", volumeData, nil) - if err != nil { - switch err := err.(type) { - case *storage.CreateVolumeConflict: - // Implicitly ignore the error where a volume with the same name - // already exists. We can just join the said volume to the container. - log.Infof("a volume with the name %s already exists", fields.ID) - case *storage.CreateVolumeNotFound: - return handle, VolumeCreateNotFoundError(volumeStore(volumeData)) - default: - return handle, InternalServerError(err.Error()) - } - } else { - log.Infof("volumeCreate succeeded. Volume mount section ID: %s", fields.ID) - } - - flags := make(map[string]string) - //NOTE: for now we are passing the flags directly through. This is NOT SAFE and only a stop gap. - flags[constants.Mode] = fields.Flags - joinParams := storage.NewVolumeJoinParamsWithContext(ctx).WithJoinArgs(&models.VolumeJoinConfig{ - Flags: flags, - Handle: handle, - MountPath: fields.Dest, - }).WithName(fields.ID) - - res, err := c.client.Storage.VolumeJoin(joinParams) - if err != nil { - switch err := err.(type) { - case *storage.VolumeJoinInternalServerError: - return handle, InternalServerError(err.Payload.Message) - case *storage.VolumeJoinDefault: - return handle, InternalServerError(err.Payload.Message) - case *storage.VolumeJoinNotFound: - return handle, VolumeJoinNotFoundError(err.Payload.Message) - default: - return handle, InternalServerError(err.Error()) - } - } - - handle = res.Payload - } - - return handle, nil -} - -// AddLoggingToContainer adds logging capability to a container, referenced by handle. -// If an error is return, the returned handle should not be used. -// -// returns: -// modified handle -func (c *ContainerProxy) AddLoggingToContainer(handle string, config types.ContainerCreateConfig) (string, error) { - defer trace.End(trace.Begin(handle)) - - if c.client == nil { - return "", InternalServerError("ContainerProxy.AddLoggingToContainer failed to get the portlayer client") - } - - response, err := c.client.Logging.LoggingJoin(logging.NewLoggingJoinParamsWithContext(ctx). - WithConfig(&models.LoggingJoinConfig{ - Handle: handle, - })) - if err != nil { - return "", InternalServerError(err.Error()) - } - handle, ok := response.Payload.Handle.(string) - if !ok { - return "", InternalServerError(fmt.Sprintf("Type assertion failed for %#+v", handle)) - } - - return handle, nil -} - -// AddInteractionToContainer adds interaction capabilities to a container, referenced by handle. -// If an error is return, the returned handle should not be used. -// -// returns: -// modified handle -func (c *ContainerProxy) AddInteractionToContainer(handle string, config types.ContainerCreateConfig) (string, error) { - defer trace.End(trace.Begin(handle)) - - if c.client == nil { - return "", InternalServerError("ContainerProxy.AddInteractionToContainer failed to get the portlayer client") - } - - response, err := c.client.Interaction.InteractionJoin(interaction.NewInteractionJoinParamsWithContext(ctx). - WithConfig(&models.InteractionJoinConfig{ - Handle: handle, - })) - if err != nil { - return "", InternalServerError(err.Error()) - } - handle, ok := response.Payload.Handle.(string) - if !ok { - return "", InternalServerError(fmt.Sprintf("Type assertion failed for %#+v", handle)) - } - - return handle, nil -} - -// BindInteraction enables interaction capabilities -func (c *ContainerProxy) BindInteraction(handle string, name string, id string) (string, error) { - defer trace.End(trace.Begin(handle)) - - if c.client == nil { - return "", InternalServerError("ContainerProxy.AddInteractionToContainer failed to get the portlayer client") - } - - bind, err := c.client.Interaction.InteractionBind( - interaction.NewInteractionBindParamsWithContext(ctx). - WithConfig(&models.InteractionBindConfig{ - Handle: handle, - ID: id, - })) - if err != nil { - switch err := err.(type) { - case *interaction.InteractionBindInternalServerError: - return "", InternalServerError(err.Payload.Message) - default: - return "", InternalServerError(err.Error()) - } - } - handle, ok := bind.Payload.Handle.(string) - if !ok { - return "", InternalServerError(fmt.Sprintf("Type assertion failed for %#+v", handle)) - } - return handle, nil -} - -// UnbindInteraction disables interaction capabilities -func (c *ContainerProxy) UnbindInteraction(handle string, name string, id string) (string, error) { - defer trace.End(trace.Begin(handle)) - - if c.client == nil { - return "", InternalServerError("ContainerProxy.AddInteractionToContainer failed to get the portlayer client") - } - - unbind, err := c.client.Interaction.InteractionUnbind( - interaction.NewInteractionUnbindParamsWithContext(ctx). - WithConfig(&models.InteractionUnbindConfig{ - Handle: handle, - ID: id, - })) - if err != nil { - return "", InternalServerError(err.Error()) - } - handle, ok := unbind.Payload.Handle.(string) - if !ok { - return "", InternalServerError("type assertion failed") - } - - return handle, nil -} - -// CommitContainerHandle commits any changes to container handle. -// -// Args: -// waitTime <= 0 means no wait time -func (c *ContainerProxy) CommitContainerHandle(handle, containerID string, waitTime int32) error { - defer trace.End(trace.Begin(handle)) - - if c.client == nil { - return InternalServerError("ContainerProxy.CommitContainerHandle failed to get a portlayer client") - } - - var commitParams *containers.CommitParams - if waitTime > 0 { - commitParams = containers.NewCommitParamsWithContext(ctx).WithHandle(handle).WithWaitTime(&waitTime) - } else { - commitParams = containers.NewCommitParamsWithContext(ctx).WithHandle(handle) - } - - _, err := c.client.Containers.Commit(commitParams) - if err != nil { - switch err := err.(type) { - case *containers.CommitNotFound: - return NotFoundError(containerID) - case *containers.CommitConflict: - return ConflictError(err.Error()) - case *containers.CommitDefault: - return InternalServerError(err.Payload.Message) - default: - return InternalServerError(err.Error()) - } - } - - return nil -} - -// StreamContainerLogs reads the log stream from the portlayer rest server and writes -// it directly to the io.Writer that is passed in. -func (c *ContainerProxy) StreamContainerLogs(ctx context.Context, name string, out io.Writer, started chan struct{}, showTimestamps bool, followLogs bool, since int64, tailLines int64) error { - defer trace.End(trace.Begin("")) - - close(started) - - params := containers.NewGetContainerLogsParamsWithContext(ctx). - WithID(name). - WithFollow(&followLogs). - WithTimestamp(&showTimestamps). - WithSince(&since). - WithTaillines(&tailLines) - _, err := c.client.Containers.GetContainerLogs(params, out) - if err != nil { - switch err := err.(type) { - case *containers.GetContainerLogsNotFound: - return NotFoundError(name) - case *containers.GetContainerLogsInternalServerError: - return InternalServerError("Server error from the interaction port layer") - default: - //Check for EOF. Since the connection, transport, and data handling are - //encapsulated inside of Swagger, we can only detect EOF by checking the - //error string - if strings.Contains(err.Error(), swaggerSubstringEOF) { - return nil - } - return InternalServerError(fmt.Sprintf("Unknown error from the interaction port layer: %s", err)) - } - } - - return nil -} - -// StreamContainerStats will provide a stream of container stats written to the provided -// io.Writer. Prior to writing to the provided io.Writer there will be a transformation -// from the portLayer representation of stats to the docker format -func (c *ContainerProxy) StreamContainerStats(ctx context.Context, config *convert.ContainerStatsConfig) error { - defer trace.End(trace.Begin(config.ContainerID)) - - // create a child context that we control - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - params := containers.NewGetContainerStatsParamsWithContext(ctx) - params.ID = config.ContainerID - params.Stream = config.Stream - - config.Ctx = ctx - config.Cancel = cancel - - // create our converter - containerConverter := convert.NewContainerStats(config) - // provide the writer for the portLayer and start listening for metrics - writer := containerConverter.Listen() - if writer == nil { - // problem with the listener - return InternalServerError(fmt.Sprintf("unable to gather container(%s) statistics", config.ContainerID)) - } - - _, err := c.client.Containers.GetContainerStats(params, writer) - if err != nil { - switch err := err.(type) { - case *containers.GetContainerStatsNotFound: - return NotFoundError(config.ContainerID) - case *containers.GetContainerStatsInternalServerError: - return InternalServerError("Server error from the interaction port layer") - default: - if ctx.Err() == context.Canceled { - return nil - } - //Check for EOF. Since the connection, transport, and data handling are - //encapsulated inside of Swagger, we can only detect EOF by checking the - //error string - if strings.Contains(err.Error(), swaggerSubstringEOF) { - return nil - } - return InternalServerError(fmt.Sprintf("Unknown error from the interaction port layer: %s", err)) - } - } - return nil -} - -// GetContainerChanges returns container changes from portlayer. -// Set data to true will return file data, otherwise, only return file headers with change type. -func (c *ContainerProxy) GetContainerChanges(op trace.Operation, vc *viccontainer.VicContainer, data bool) (io.ReadCloser, error) { - host, err := sys.UUID() - if err != nil { - return nil, InternalServerError("Failed to determine host UUID") - } - - parent := vc.LayerID - spec := archive.FilterSpec{ - Inclusions: map[string]struct{}{}, - Exclusions: map[string]struct{}{}, - } - - r, err := archiveProxy.ArchiveExportReader(op, constants.ContainerStoreName, host, vc.ContainerID, parent, data, spec) - if err != nil { - return nil, InternalServerError(err.Error()) - } - - return r, nil -} - -// StatPath requests the portlayer to stat the filesystem resource at the -// specified path in the container vc. -func (c *ContainerProxy) StatPath(op trace.Operation, store, deviceID string, filterSpec archive.FilterSpec) (*types.ContainerPathStat, error) { - defer trace.End(trace.Begin(deviceID)) - - statPathParams := storage. - NewStatPathParamsWithContext(op). - WithStore(store). - WithDeviceID(deviceID) - - spec, err := archive.EncodeFilterSpec(op, &filterSpec) - if err != nil { - op.Errorf(err.Error()) - return nil, InternalServerError(err.Error()) - } - statPathParams = statPathParams.WithFilterSpec(spec) - - statPathOk, err := c.client.Storage.StatPath(statPathParams) - if err != nil { - op.Errorf(err.Error()) - return nil, err - } - - stat := &types.ContainerPathStat{ - Name: statPathOk.Name, - Mode: os.FileMode(statPathOk.Mode), - Size: statPathOk.Size, - LinkTarget: statPathOk.LinkTarget, - } - - var modTime time.Time - if err := modTime.GobDecode([]byte(statPathOk.ModTime)); err != nil { - op.Debugf("error getting mod time from statpath: %s", err.Error()) - } else { - stat.Mtime = modTime - } - - return stat, nil -} - -// Stop will stop (shutdown) a VIC container. -// -// returns -// error -func (c *ContainerProxy) Stop(vc *viccontainer.VicContainer, name string, seconds *int, unbound bool) error { - defer trace.End(trace.Begin(vc.ContainerID)) - - if c.client == nil { - return InternalServerError("ContainerProxy.Stop failed to get a portlayer client") - } - - //retrieve client to portlayer - handle, err := c.Handle(vc.ContainerID, name) - if err != nil { - return err - } - - // we have a container on the PL side lets check the state before proceeding - // ignore the error since others will be checking below..this is an attempt to short circuit the op - // TODO: can be replaced with simple cache check once power events are propagated to persona - state, err := c.State(vc) - if err != nil && IsNotFoundError(err) { - cache.ContainerCache().DeleteContainer(vc.ContainerID) - return err - } - // attempt to stop container only if container state is not stopped, exited or created. - // we should allow user to stop and remove the container that is in unexpected status, e.g. starting, because of serial port connection issue - if state.Status == ContainerStopped || state.Status == ContainerExited || state.Status == ContainerCreated { - return nil - } - - if unbound { - handle, err = c.UnbindContainerFromNetwork(vc, handle) - if err != nil { - return err - } - - // unmap ports - if err = UnmapPorts(vc.ContainerID, vc); err != nil { - return err - } - } - - // change the state of the container - changeParams := containers.NewStateChangeParamsWithContext(ctx).WithHandle(handle).WithState("STOPPED") - stateChangeResponse, err := c.client.Containers.StateChange(changeParams) - if err != nil { - switch err := err.(type) { - case *containers.StateChangeNotFound: - cache.ContainerCache().DeleteContainer(vc.ContainerID) - return NotFoundError(name) - case *containers.StateChangeDefault: - return InternalServerError(err.Payload.Message) - default: - return InternalServerError(err.Error()) - } - } - - handle = stateChangeResponse.Payload - - // if no timeout in seconds provided then set to default of 10 - if seconds == nil { - s := 10 - seconds = &s - } - - err = c.CommitContainerHandle(handle, vc.ContainerID, int32(*seconds)) - if err != nil { - if IsNotFoundError(err) { - cache.ContainerCache().DeleteContainer(vc.ContainerID) - } - return err - } - - return nil -} - -// UnbindContainerFromNetwork unbinds a container from the networks that it connects to -func (c *ContainerProxy) UnbindContainerFromNetwork(vc *viccontainer.VicContainer, handle string) (string, error) { - defer trace.End(trace.Begin(vc.ContainerID)) - - unbindParams := scopes.NewUnbindContainerParamsWithContext(ctx).WithHandle(handle) - ub, err := c.client.Scopes.UnbindContainer(unbindParams) - if err != nil { - switch err := err.(type) { - case *scopes.UnbindContainerNotFound: - // ignore error - log.Warnf("Container %s not found by network unbind", vc.ContainerID) - case *scopes.UnbindContainerInternalServerError: - return "", InternalServerError(err.Payload.Message) - default: - return "", InternalServerError(err.Error()) - } - } - - return ub.Payload.Handle, nil -} - -// State returns container state -func (c *ContainerProxy) State(vc *viccontainer.VicContainer) (*types.ContainerState, error) { - defer trace.End(trace.Begin("")) - - if c.client == nil { - return nil, InternalServerError("ContainerProxy.State failed to get a portlayer client") - } - - results, err := c.client.Containers.GetContainerInfo(containers.NewGetContainerInfoParamsWithContext(ctx).WithID(vc.ContainerID)) - if err != nil { - switch err := err.(type) { - case *containers.GetContainerInfoNotFound: - return nil, NotFoundError(vc.Name) - case *containers.GetContainerInfoInternalServerError: - return nil, InternalServerError(err.Payload.Message) - default: - return nil, InternalServerError(fmt.Sprintf("Unknown error from the interaction port layer: %s", err)) - } - } - - inspectJSON, err := ContainerInfoToDockerContainerInspect(vc, results.Payload, c.portlayerName) - if err != nil { - return nil, err - } - return inspectJSON.State, nil -} - -// exitCode returns container exitCode -func (c *ContainerProxy) exitCode(vc *viccontainer.VicContainer) (string, error) { - defer trace.End(trace.Begin("")) - - if c.client == nil { - return "", InternalServerError("ContainerProxy.exitCode failed to get a portlayer client") - } - - results, err := c.client.Containers.GetContainerInfo(containers.NewGetContainerInfoParamsWithContext(ctx).WithID(vc.ContainerID)) - if err != nil { - switch err := err.(type) { - case *containers.GetContainerInfoNotFound: - return "", NotFoundError(vc.Name) - case *containers.GetContainerInfoInternalServerError: - return "", InternalServerError(err.Payload.Message) - default: - return "", InternalServerError(fmt.Sprintf("Unknown error from the interaction port layer: %s", err)) - } - } - // get the container state - dockerState := convert.State(results.Payload) - if dockerState == nil { - return "", InternalServerError("Unable to determine container state") - } - - return strconv.Itoa(dockerState.ExitCode), nil -} - -func (c *ContainerProxy) Wait(vc *viccontainer.VicContainer, timeout time.Duration) ( - *types.ContainerState, error) { - - defer trace.End(trace.Begin(vc.ContainerID)) - - if vc == nil { - return nil, InternalServerError("Wait bad arguments") - } - - // Get an API client to the portlayer - client := c.client - if client == nil { - return nil, InternalServerError("Wait failed to create a portlayer client") - } - - params := containers.NewContainerWaitParamsWithContext(ctx). - WithTimeout(int64(timeout.Seconds())). - WithID(vc.ContainerID) - results, err := client.Containers.ContainerWait(params) - if err != nil { - switch err := err.(type) { - case *containers.ContainerWaitNotFound: - // since the container wasn't found on the PL lets remove from the local - // cache - cache.ContainerCache().DeleteContainer(vc.ContainerID) - return nil, NotFoundError(vc.ContainerID) - case *containers.ContainerWaitInternalServerError: - return nil, InternalServerError(err.Payload.Message) - default: - return nil, InternalServerError(err.Error()) - } - } - - if results == nil || results.Payload == nil { - return nil, InternalServerError("Unexpected swagger error") - } - - dockerState := convert.State(results.Payload) - if dockerState == nil { - return nil, InternalServerError("Unable to determine container state") - } - return dockerState, nil -} - -func (c *ContainerProxy) Signal(vc *viccontainer.VicContainer, sig uint64) error { - defer trace.End(trace.Begin(vc.ContainerID)) - - if vc == nil { - return InternalServerError("Signal bad arguments") - } - - // Get an API client to the portlayer - client := c.client - if client == nil { - return InternalServerError("Signal failed to create a portlayer client") - } - - if state, err := c.State(vc); !state.Running && err == nil { - return fmt.Errorf("%s is not running", vc.ContainerID) - } - - // If Docker CLI sends sig == 0, we use sigkill - if sig == 0 { - sig = uint64(syscall.SIGKILL) - } - params := containers.NewContainerSignalParamsWithContext(ctx).WithID(vc.ContainerID).WithSignal(int64(sig)) - if _, err := client.Containers.ContainerSignal(params); err != nil { - switch err := err.(type) { - case *containers.ContainerSignalNotFound: - return NotFoundError(vc.ContainerID) - case *containers.ContainerSignalInternalServerError: - return InternalServerError(err.Payload.Message) - default: - return InternalServerError(err.Error()) - } - } - - if state, err := c.State(vc); !state.Running && err == nil { - // unmap ports - if err = UnmapPorts(vc.ContainerID, vc); err != nil { - return err - } - } - - return nil -} - -func (c *ContainerProxy) Resize(id string, height, width int32) error { - defer trace.End(trace.Begin(id)) - - if c.client == nil { - return derr.NewErrorWithStatusCode(fmt.Errorf("ContainerProxy failed to create a portlayer client"), - http.StatusInternalServerError) - } - - plResizeParam := interaction.NewContainerResizeParamsWithContext(ctx). - WithID(id). - WithHeight(height). - WithWidth(width) - - _, err := c.client.Interaction.ContainerResize(plResizeParam) - if err != nil { - if _, isa := err.(*interaction.ContainerResizeNotFound); isa { - return ResourceNotFoundError(id, "interaction connection") - } - - // If we get here, most likely something went wrong with the port layer API server - return InternalServerError(err.Error()) - } - - return nil -} - -// AttachStreams takes the the hijacked connections from the calling client and attaches -// them to the 3 streams from the portlayer's rest server. -// stdin, stdout, stderr are the hijacked connection -func (c *ContainerProxy) AttachStreams(ctx context.Context, ac *AttachConfig, stdin io.ReadCloser, stdout, stderr io.Writer) error { - // Cancel will close the child connections. - var wg, outWg sync.WaitGroup - errors := make(chan error, 3) - - var keys []byte - var err error - if ac.DetachKeys != "" { - keys, err = term.ToBytes(ac.DetachKeys) - if err != nil { - return fmt.Errorf("Invalid escape keys (%s) provided", ac.DetachKeys) - } - } - - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - if ac.UseStdin { - wg.Add(1) - } - - if ac.UseStdout { - wg.Add(1) - outWg.Add(1) - } - - if ac.UseStderr { - wg.Add(1) - outWg.Add(1) - } - - // cancel stdin if all output streams are complete - go func() { - outWg.Wait() - cancel() - }() - - EOForCanceled := func(err error) bool { - return err != nil && ctx.Err() != context.Canceled && !strings.HasSuffix(err.Error(), swaggerSubstringEOF) - } - - if ac.UseStdin { - go func() { - defer wg.Done() - err := copyStdIn(ctx, c.client, ac, stdin, keys) - if err != nil { - log.Errorf("container attach: stdin (%s): %s", ac.ID, err) - } else { - log.Infof("container attach: stdin (%s) done", ac.ID) - } - - if !ac.CloseStdin || ac.UseTty { - cancel() - } - - // Check for EOF or canceled context. We can only detect EOF by checking the error string returned by swagger :/ - if EOForCanceled(err) { - errors <- err - } - }() - } - - if ac.UseStdout { - go func() { - defer outWg.Done() - defer wg.Done() - - err := copyStdOut(ctx, c.client, ac, stdout, attachAttemptTimeout) - if err != nil { - log.Errorf("container attach: stdout (%s): %s", ac.ID, err) - } else { - log.Infof("container attach: stdout (%s) done", ac.ID) - } - - // Check for EOF or canceled context. We can only detect EOF by checking the error string returned by swagger :/ - if EOForCanceled(err) { - errors <- err - } - }() - } - - if ac.UseStderr { - go func() { - defer outWg.Done() - defer wg.Done() - - err := copyStdErr(ctx, c.client, ac, stderr) - if err != nil { - log.Errorf("container attach: stderr (%s): %s", ac.ID, err) - } else { - log.Infof("container attach: stderr (%s) done", ac.ID) - } - - // Check for EOF or canceled context. We can only detect EOF by checking the error string returned by swagger :/ - if EOForCanceled(err) { - errors <- err - } - }() - } - - // Wait for all stream copy to exit - wg.Wait() - - // close the channel so that we don't leak (if there is an error)/or get blocked (if there are no errors) - close(errors) - - log.Infof("cleaned up connections to %s. Checking errors", ac.ID) - for err := range errors { - if err != nil { - // check if we got DetachError - if _, ok := err.(DetachError); ok { - log.Infof("Detached from container detected") - return err - } - - // If we get here, most likely something went wrong with the port layer API server - // These errors originate within the go-swagger client itself. - // Go-swagger returns untyped errors to us if the error is not one that we define - // in the swagger spec. Even EOF. Therefore, we must scan the error string (if there - // is an error string in the untyped error) for the term EOF. - log.Errorf("container attach error: %s", err) - - return err - } - } - - log.Infof("No error found. Returning nil...") - return nil -} - -// Rename calls the portlayer's RenameContainerHandler to update the container name in the handle, -// and then commit the new name to vSphere -func (c *ContainerProxy) Rename(vc *viccontainer.VicContainer, newName string) error { - defer trace.End(trace.Begin(vc.ContainerID)) - - //retrieve client to portlayer - handle, err := c.Handle(vc.ContainerID, vc.Name) - if err != nil { - return InternalServerError(err.Error()) - } - - if c.client == nil { - return InternalServerError("ContainerProxy.Rename failed to create a portlayer client") - } - - // Call the rename functionality in the portlayer. - renameParams := containers.NewContainerRenameParamsWithContext(ctx).WithName(newName).WithHandle(handle) - result, err := c.client.Containers.ContainerRename(renameParams) - if err != nil { - switch err := err.(type) { - // Here we don't check the portlayer error type for *containers.ContainerRenameConflict since - // (1) we already check that in persona cache for ConflictError and - // (2) the container name in portlayer cache will be updated when committing the handle in the next step - case *containers.ContainerRenameNotFound: - return NotFoundError(vc.Name) - default: - return InternalServerError(err.Error()) - } - } - - h := result.Payload - - // commit handle - _, err = c.client.Containers.Commit(containers.NewCommitParamsWithContext(ctx).WithHandle(h)) - if err != nil { - switch err := err.(type) { - case *containers.CommitNotFound: - return NotFoundError(err.Payload.Message) - case *containers.CommitConflict: - return ConflictError(err.Payload.Message) - default: - return InternalServerError(err.Error()) - } - } - - return nil -} - -// Remove calls the portlayer's ContainerRemove handler to remove the container and its -// anonymous volumes if the remove flag is set. -func (c *ContainerProxy) Remove(vc *viccontainer.VicContainer, config *types.ContainerRmConfig) error { - if c.client == nil { - return InternalServerError("ContainerProxy.Remove failed to get a portlayer client") - } - - id := vc.ContainerID - _, err := c.client.Containers.ContainerRemove(containers.NewContainerRemoveParamsWithContext(ctx).WithID(id)) - if err != nil { - switch err := err.(type) { - case *containers.ContainerRemoveNotFound: - // Remove container from persona cache, but don't return error to the user. - cache.ContainerCache().DeleteContainer(id) - return nil - case *containers.ContainerRemoveDefault: - return InternalServerError(err.Payload.Message) - case *containers.ContainerRemoveConflict: - return derr.NewRequestConflictError(fmt.Errorf("You cannot remove a running container. Stop the container before attempting removal or use -f")) - case *containers.ContainerRemoveInternalServerError: - if err.Payload == nil || err.Payload.Message == "" { - return InternalServerError(err.Error()) - } - return InternalServerError(err.Payload.Message) - default: - return InternalServerError(err.Error()) - } - } - - // Once the container is removed, remove anonymous volumes (vc.Config.Volumes) if - // the remove flag is set. - if config.RemoveVolume && len(vc.Config.Volumes) > 0 { - removeAnonContainerVols(c.client, id, vc) - } - - return nil -} - -//---------- -// Utility Functions -//---------- - -// removeAnonContainerVols removes anonymous volumes joined to a container. It is invoked -// once the said container has been removed. It fetches a list of volumes that are joined -// to at least one other container, and calls the portlayer to remove this container's -// anonymous volumes if they are dangling. Errors, if any, are only logged. -func removeAnonContainerVols(pl *client.PortLayer, cID string, vc *viccontainer.VicContainer) { - // NOTE: these strings come in the form of :: - volumes := vc.Config.Volumes - // NOTE: these strings come in the form of : - namedVolumes := vc.HostConfig.Binds - - // assemble a mask of volume paths before processing binds. MUST be paths, as we want to move to honoring the proper metadata in the "volumes" section in the future. - namedMaskList := make(map[string]struct{}, 0) - for _, entry := range namedVolumes { - fields := strings.SplitN(entry, ":", 2) - if len(fields) != 2 { - log.Errorf("Invalid entry in the HostConfig.Binds metadata section for container %s: %s", cID, entry) - continue - } - destPath := fields[1] - namedMaskList[destPath] = struct{}{} - } - - joinedVols, err := fetchJoinedVolumes() - if err != nil { - log.Errorf("Unable to obtain joined volumes from portlayer, skipping removal of anonymous volumes for %s: %s", cID, err.Error()) - return - } - - for vol := range volumes { - // Extract the volume ID from the full mount path, which is of form "id:mountpath:flags" - see getMountString(). - volFields := strings.SplitN(vol, ":", 3) - - // NOTE(mavery): this check will start to fail when we fix our metadata correctness issues - if len(volFields) != 3 { - log.Debugf("Invalid entry in the volumes metadata section for container %s: %s", cID, vol) - continue - } - volName := volFields[0] - volPath := volFields[1] - - _, isNamed := namedMaskList[volPath] - _, joined := joinedVols[volName] - if !joined && !isNamed { - _, err := pl.Storage.RemoveVolume(storage.NewRemoveVolumeParamsWithContext(ctx).WithName(volName)) - if err != nil { - log.Debugf("Unable to remove anonymous volume %s in container %s: %s", volName, cID, err.Error()) - continue - } - log.Debugf("Successfully removed anonymous volume %s during remove operation against container(%s)", volName, cID) - } - } -} - -func dockerContainerCreateParamsToTask(id string, cc types.ContainerCreateConfig) *tasks.JoinParams { - config := &models.TaskJoinConfig{} - - var path string - var args []string - - // we explicitly specify the ID for the primary task so that it's the same as the containerID - config.ID = id - - // Expand cmd into entrypoint and args - cmd := strslice.StrSlice(cc.Config.Cmd) - if len(cc.Config.Entrypoint) != 0 { - path, args = cc.Config.Entrypoint[0], append(cc.Config.Entrypoint[1:], cmd...) - } else { - path, args = cmd[0], cmd[1:] - } - - // copy the path - config.Path = path - - // copy the args - config.Args = make([]string, len(args)) - copy(config.Args, args) - - // copy the env array - config.Env = make([]string, len(cc.Config.Env)) - copy(config.Env, cc.Config.Env) - - // working dir - config.WorkingDir = cc.Config.WorkingDir - - // user - config.User = cc.Config.User - - // attach. Always set to true otherwise we cannot attach later. - // this tells portlayer container is attachable. - config.Attach = true - - // openstdin - config.OpenStdin = cc.Config.OpenStdin - - // tty - config.Tty = cc.Config.Tty - - // container stop signal - config.StopSignal = cc.Config.StopSignal - - log.Debugf("dockerContainerCreateParamsToTask = %+v", config) - - return tasks.NewJoinParamsWithContext(ctx).WithConfig(config) -} - -func dockerContainerCreateParamsToPortlayer(cc types.ContainerCreateConfig, vc *viccontainer.VicContainer, imageStore string) *containers.CreateParams { - config := &models.ContainerCreateConfig{} - - config.NumCpus = cc.HostConfig.CPUCount - config.MemoryMB = cc.HostConfig.Memory - - // Layer/vmdk to use - config.Layer = vc.LayerID - - // Image ID - config.Image = vc.ImageID - - // Repo Requested - config.RepoName = cc.Config.Image - - //copy friendly name - config.Name = cc.Name - - // image store - config.ImageStore = &models.ImageStore{Name: imageStore} - - // network - config.NetworkDisabled = cc.Config.NetworkDisabled - - // Stuff the Docker labels into VIC container annotations - if len(cc.Config.Labels) > 0 { - convert.SetContainerAnnotation(config, convert.AnnotationKeyLabels, cc.Config.Labels) - } - // if autoremove then add to annotation - if cc.HostConfig.AutoRemove { - convert.SetContainerAnnotation(config, convert.AnnotationKeyAutoRemove, cc.HostConfig.AutoRemove) - } - - // hostname - config.Hostname = cc.Config.Hostname - // domainname - https://github.com/moby/moby/issues/27067 - config.Domainname = cc.Config.Domainname - - log.Debugf("dockerContainerCreateParamsToPortlayer = %+v", config) - - return containers.NewCreateParamsWithContext(ctx).WithCreateConfig(config) -} - -func toModelsNetworkConfig(cc types.ContainerCreateConfig) *models.NetworkConfig { - if cc.Config.NetworkDisabled { - return nil - } - - nc := &models.NetworkConfig{ - NetworkName: cc.HostConfig.NetworkMode.NetworkName(), - } - - // Docker supports link for bridge network and user defined network, we should handle that - if len(cc.HostConfig.Links) > 0 { - nc.Aliases = append(nc.Aliases, cc.HostConfig.Links...) - } - - if cc.NetworkingConfig != nil { - log.Debugf("EndpointsConfig: %#v", cc.NetworkingConfig) - - es, ok := cc.NetworkingConfig.EndpointsConfig[nc.NetworkName] - if ok { - if es.IPAMConfig != nil { - nc.Address = es.IPAMConfig.IPv4Address - } - - // Pass Links and Aliases to PL - nc.Aliases = append(nc.Aliases, epoint.Alias(es)...) - } - } - - for p := range cc.HostConfig.PortBindings { - nc.Ports = append(nc.Ports, fromPortbinding(p, cc.HostConfig.PortBindings[p])...) - } - - return nc -} - -// fromPortbinding translate Port/PortBinding pair to string array with format "hostPort:containerPort/protocol" or -// "containerPort/protocol" if hostPort is empty -// HostIP is ignored here, cause VCH ip address might change. Will query back real interface address in docker ps -func fromPortbinding(port nat.Port, binding []nat.PortBinding) []string { - var portMappings []string - if len(binding) == 0 { - portMappings = append(portMappings, string(port)) - return portMappings - } - - proto, privatePort := nat.SplitProtoPort(string(port)) - for _, bind := range binding { - var portMap string - if bind.HostPort != "" { - portMap = fmt.Sprintf("%s:%s/%s", bind.HostPort, privatePort, proto) - } else { - portMap = string(port) - } - portMappings = append(portMappings, portMap) - } - return portMappings -} - -// processVolumeParam is used to turn any call from docker create -v into a volumeFields object. -// The -v has 3 forms. -v , -v : and -// -v :: -func processVolumeParam(volString string) (volumeFields, error) { - volumeStrings := strings.Split(volString, ":") - fields := volumeFields{} - - // Error out if the intended volume is a directory on the client filesystem. - numVolParams := len(volumeStrings) - if numVolParams > 1 && strings.HasPrefix(volumeStrings[0], "/") { - return volumeFields{}, InvalidVolumeError{} - } - - // This switch determines which type of -v was invoked. - switch numVolParams { - case 1: - VolumeID, err := uuid.NewUUID() - if err != nil { - return fields, err - } - fields.ID = VolumeID.String() - fields.Dest = volumeStrings[0] - fields.Flags = "rw" - case 2: - fields.ID = volumeStrings[0] - fields.Dest = volumeStrings[1] - fields.Flags = "rw" - case 3: - fields.ID = volumeStrings[0] - fields.Dest = volumeStrings[1] - fields.Flags = volumeStrings[2] - default: - // NOTE: the docker cli should cover this case. This is here for posterity. - return volumeFields{}, InvalidBindError{volume: volString} - } - return fields, nil -} - -// processVolumeFields parses fields for volume mappings specified in a create/run -v. -// It returns a map of unique mountable volumes. This means that it removes dupes favoring -// specified volumes over anonymous volumes. -func processVolumeFields(volumes []string) (map[string]volumeFields, error) { - volumeFields := make(map[string]volumeFields) - - for _, v := range volumes { - fields, err := processVolumeParam(v) - log.Infof("Processed volume arguments: %#v", fields) - if err != nil { - return nil, err - } - volumeFields[fields.Dest] = fields - } - return volumeFields, nil -} - -func finalizeVolumeList(specifiedVolumes, anonymousVolumes []string) ([]volumeFields, error) { - log.Infof("Specified Volumes : %#v", specifiedVolumes) - processedVolumes, err := processVolumeFields(specifiedVolumes) - if err != nil { - return nil, err - } - - log.Infof("anonymous Volumes : %#v", anonymousVolumes) - processedAnonVolumes, err := processVolumeFields(anonymousVolumes) - if err != nil { - return nil, err - } - - //combine all volumes, specified volumes are taken over anonymous volumes - for k, v := range processedVolumes { - processedAnonVolumes[k] = v - } - - finalizedVolumes := make([]volumeFields, 0, len(processedAnonVolumes)) - for _, v := range processedAnonVolumes { - finalizedVolumes = append(finalizedVolumes, v) - } - return finalizedVolumes, nil -} - -//------------------------------------- -// Inspect Utility Functions -//------------------------------------- - -// ContainerInfoToDockerContainerInspect takes a ContainerInfo swagger-based struct -// returned from VIC's port layer and creates an engine-api based container inspect struct. -// There maybe other asset gathering if ContainerInfo does not have all the information -func ContainerInfoToDockerContainerInspect(vc *viccontainer.VicContainer, info *models.ContainerInfo, portlayerName string) (*types.ContainerJSON, error) { - if vc == nil || info == nil || info.ContainerConfig == nil { - return nil, NotFoundError(fmt.Sprintf("No such container: %s", vc.ContainerID)) - } - // get the docker state - containerState := convert.State(info) - - inspectJSON := &types.ContainerJSON{ - ContainerJSONBase: &types.ContainerJSONBase{ - State: containerState, - ResolvConfPath: "", - HostnamePath: "", - HostsPath: "", - Driver: portlayerName, - MountLabel: "", - ProcessLabel: "", - AppArmorProfile: "", - ExecIDs: vc.List(), - HostConfig: hostConfigFromContainerInfo(vc, info, portlayerName), - GraphDriver: types.GraphDriverData{Name: portlayerName}, - SizeRw: nil, - SizeRootFs: nil, - }, - Mounts: mountsFromContainer(vc), - Config: containerConfigFromContainerInfo(vc, info), - NetworkSettings: networkFromContainerInfo(vc, info), - } - - if inspectJSON.NetworkSettings != nil { - log.Debugf("Docker inspect - network settings = %#v", inspectJSON.NetworkSettings) - } else { - log.Debug("Docker inspect - network settings = nil") - } - - if info.ProcessConfig != nil { - inspectJSON.Path = info.ProcessConfig.ExecPath - if len(info.ProcessConfig.ExecArgs) > 0 { - // args[0] is the command and should not appear in the args list here - inspectJSON.Args = info.ProcessConfig.ExecArgs[1:] - } - } - - if info.ContainerConfig != nil { - // set the status to the inspect expected values - containerState.Status = filter.DockerState(info.ContainerConfig.State) - - // https://github.com/docker/docker/blob/master/container/state.go#L77 - if containerState.Status == ContainerStopped { - containerState.Status = ContainerExited - } - - inspectJSON.Image = info.ContainerConfig.ImageID - inspectJSON.LogPath = info.ContainerConfig.LogPath - inspectJSON.RestartCount = int(info.ContainerConfig.RestartCount) - inspectJSON.ID = info.ContainerConfig.ContainerID - inspectJSON.Created = time.Unix(0, info.ContainerConfig.CreateTime).Format(time.RFC3339Nano) - if len(info.ContainerConfig.Names) > 0 { - inspectJSON.Name = fmt.Sprintf("/%s", info.ContainerConfig.Names[0]) - } - } - - return inspectJSON, nil -} - -// hostConfigFromContainerInfo() gets the hostconfig that is passed to the backend during -// docker create and updates any needed info -func hostConfigFromContainerInfo(vc *viccontainer.VicContainer, info *models.ContainerInfo, portlayerName string) *container.HostConfig { - if vc == nil || vc.HostConfig == nil || info == nil { - return nil - } - - // Create a copy of the created container's hostconfig. This is passed in during - // container create - hostConfig := *vc.HostConfig - - // Resources don't really map well to VIC so we leave most of them empty. If we look - // at the struct in engine-api/types/container/host_config.go, Microsoft added - // additional attributes to the struct that are applicable to Windows containers. - // If understanding VIC's host resources are desirable, we should go down this - // same route. - // - // The values we fill out below is an abridged list of the original struct. - resourceConfig := container.Resources{ - // Applicable to all platforms - // CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers) - // Memory int64 // Memory limit (in bytes) - - // // Applicable to UNIX platforms - // DiskQuota int64 // Disk limit (in bytes) - } - - hostConfig.VolumeDriver = portlayerName - hostConfig.Resources = resourceConfig - hostConfig.DNS = make([]string, 0) - - if len(info.Endpoints) > 0 { - for _, ep := range info.Endpoints { - for _, dns := range ep.Nameservers { - if dns != "" { - hostConfig.DNS = append(hostConfig.DNS, dns) - } - } - } - - hostConfig.NetworkMode = container.NetworkMode(info.Endpoints[0].Scope) - } - - hostConfig.PortBindings = portMapFromContainer(vc, info) - - // Set this to json-file to force the docker CLI to allow us to use docker logs - hostConfig.LogConfig.Type = forceLogType - - // get the autoremove annotation from the container annotations - convert.ContainerAnnotation(info.ContainerConfig.Annotations, convert.AnnotationKeyAutoRemove, &hostConfig.AutoRemove) - - return &hostConfig -} - -// mountsFromContainer derives []types.MountPoint (used in inspect) from the cached container -// data. -func mountsFromContainer(vc *viccontainer.VicContainer) []types.MountPoint { - if vc == nil { - return nil - } - - var mounts []types.MountPoint - - rawAnonVolumes := make([]string, 0, len(vc.Config.Volumes)) - for k := range vc.Config.Volumes { - rawAnonVolumes = append(rawAnonVolumes, k) - } - - volList, err := finalizeVolumeList(vc.HostConfig.Binds, rawAnonVolumes) - if err != nil { - return mounts - } - - for _, vol := range volList { - mountConfig := types.MountPoint{ - Type: mount.TypeVolume, - Driver: DefaultVolumeDriver, - Name: vol.ID, - Source: vol.ID, - Destination: vol.Dest, - RW: false, - Mode: vol.Flags, - } - - if strings.Contains(strings.ToLower(vol.Flags), "rw") { - mountConfig.RW = true - } - mounts = append(mounts, mountConfig) - } - - return mounts -} - -// containerConfigFromContainerInfo() returns a container.Config that has attributes -// overridden at create or start time. This is important. This function is called -// to help build the Container Inspect struct. That struct contains the original -// container config that is part of the image metadata AND the overridden container -// config. The user can override these via the remote API or the docker CLI. -func containerConfigFromContainerInfo(vc *viccontainer.VicContainer, info *models.ContainerInfo) *container.Config { - if vc == nil || vc.Config == nil || info == nil || info.ContainerConfig == nil || info.ProcessConfig == nil { - return nil - } - - // Copy the working copy of our container's config - container := *vc.Config - - if info.ContainerConfig.ContainerID != "" { - container.Hostname = stringid.TruncateID(info.ContainerConfig.ContainerID) // Hostname - } - if info.ContainerConfig.AttachStdin != nil { - container.AttachStdin = *info.ContainerConfig.AttachStdin // Attach the standard input, makes possible user interaction - } - if info.ContainerConfig.AttachStdout != nil { - container.AttachStdout = *info.ContainerConfig.AttachStdout // Attach the standard output - } - if info.ContainerConfig.AttachStderr != nil { - container.AttachStderr = *info.ContainerConfig.AttachStderr // Attach the standard error - } - if info.ContainerConfig.Tty != nil { - container.Tty = *info.ContainerConfig.Tty // Attach standard streams to a tty, including stdin if it is not closed. - } - if info.ContainerConfig.OpenStdin != nil { - container.OpenStdin = *info.ContainerConfig.OpenStdin - } - // They are not coming from PL so set them to true unconditionally - container.StdinOnce = true - - if info.ContainerConfig.RepoName != nil { - container.Image = *info.ContainerConfig.RepoName // Name of the image as it was passed by the operator (eg. could be symbolic) - } - - // Fill in information about the process - if info.ProcessConfig.Env != nil { - container.Env = info.ProcessConfig.Env // List of environment variable to set in the container - } - - if info.ProcessConfig.WorkingDir != nil { - container.WorkingDir = *info.ProcessConfig.WorkingDir // Current directory (PWD) in the command will be launched - } - - container.User = info.ProcessConfig.User - - // Fill in information about the container network - if info.Endpoints == nil { - container.NetworkDisabled = true - } else { - container.NetworkDisabled = false - container.MacAddress = "" - container.ExposedPorts = vc.Config.ExposedPorts - } - - // Get the original container config from the image's metadata in our image cache. - var imageConfig *metadata.ImageConfig - - if info.ContainerConfig.LayerID != "" { - // #nosec: Errors unhandled. - imageConfig, _ = cache.ImageCache().Get(info.ContainerConfig.LayerID) - } - - // Fill in the values with defaults from the original image's container config - // structure - if imageConfig != nil { - container.StopSignal = imageConfig.ContainerConfig.StopSignal // Signal to stop a container - - container.OnBuild = imageConfig.ContainerConfig.OnBuild // ONBUILD metadata that were defined on the image Dockerfile - } - - // Pull labels from the annotation - convert.ContainerAnnotation(info.ContainerConfig.Annotations, convert.AnnotationKeyLabels, &container.Labels) - return &container -} - -func networkFromContainerInfo(vc *viccontainer.VicContainer, info *models.ContainerInfo) *types.NetworkSettings { - networks := &types.NetworkSettings{ - NetworkSettingsBase: types.NetworkSettingsBase{ - Bridge: "", - SandboxID: "", - HairpinMode: false, - LinkLocalIPv6Address: "", - LinkLocalIPv6PrefixLen: 0, - Ports: portMapFromContainer(vc, info), - SandboxKey: "", - SecondaryIPAddresses: nil, - SecondaryIPv6Addresses: nil, - }, - Networks: make(map[string]*dnetwork.EndpointSettings), - } - - shortCID := vc.ContainerID[0:ShortIDLen] - - // Fill in as much info from the endpoint struct inside of the ContainerInfo. - // The rest of the data must be obtained from the Scopes portlayer. - for _, ep := range info.Endpoints { - netEp := &dnetwork.EndpointSettings{ - IPAMConfig: nil, //Get from Scope PL - Links: nil, - Aliases: nil, - NetworkID: "", //Get from Scope PL - EndpointID: ep.ID, - Gateway: ep.Gateway, - IPAddress: "", - IPPrefixLen: 0, //Get from Scope PL - IPv6Gateway: "", //Get from Scope PL - GlobalIPv6Address: "", //Get from Scope PL - GlobalIPv6PrefixLen: 0, //Get from Scope PL - MacAddress: "", //Container endpoints currently do not have mac addr yet - } - - if ep.Address != "" { - ip, ipnet, err := net.ParseCIDR(ep.Address) - if err == nil { - netEp.IPAddress = ip.String() - netEp.IPPrefixLen, _ = ipnet.Mask.Size() - } - } - - if len(ep.Aliases) > 0 { - netEp.Aliases = make([]string, len(ep.Aliases)) - found := false - for i, alias := range ep.Aliases { - netEp.Aliases[i] = alias - if alias == shortCID { - found = true - } - } - - if !found { - netEp.Aliases = append(netEp.Aliases, vc.ContainerID[0:ShortIDLen]) - } - } - - networks.Networks[ep.Scope] = netEp - } - - return networks -} - -// portMapFromContainer constructs a docker portmap from the container's -// info as returned by the portlayer and adds nil entries for any exposed ports -// that are unmapped -func portMapFromContainer(vc *viccontainer.VicContainer, t *models.ContainerInfo) nat.PortMap { - var mappings nat.PortMap - - if t != nil { - mappings = addDirectEndpointsToPortMap(t.Endpoints, mappings) - } - if vc != nil && vc.Config != nil { - if vc.NATMap != nil { - // if there's a NAT map for the container then just use that for the indirect port set - mappings = mergePortMaps(vc.NATMap, mappings) - } else { - // if there's no NAT map then we use the backend data every time - mappings = addIndirectEndpointsToPortMap(t.Endpoints, mappings) - } - mappings = addExposedToPortMap(vc.Config, mappings) - } - - return mappings -} - -// mergePortMaps creates a new map containing the union of the two arguments -func mergePortMaps(map1, map2 nat.PortMap) nat.PortMap { - resultMap := make(map[nat.Port][]nat.PortBinding) - for k, v := range map1 { - resultMap[k] = v - } - - for k, v := range map2 { - vr := resultMap[k] - resultMap[k] = append(vr, v...) - } - - return resultMap -} - -// addIndirectEndpointToPortMap constructs a docker portmap from the container's info as returned by the portlayer for those ports that -// require NAT forward on the endpointVM. -// The portMap provided is modified and returned - the return value should always be used. -func addIndirectEndpointsToPortMap(endpoints []*models.EndpointConfig, portMap nat.PortMap) nat.PortMap { - if len(endpoints) == 0 { - return portMap - } - - // will contain a combined set of port mappings - if portMap == nil { - portMap = make(nat.PortMap) - } - - // add IP address into port spec to allow direct usage of data returned by calls such as docker port - var ip string - ips, _ := publicIPv4Addrs() - if len(ips) > 0 { - ip = ips[0] - } - - // Preserve the existing behaviour if we do not have an IP for some reason. - if ip == "" { - ip = "0.0.0.0" - } - - for _, ep := range endpoints { - if ep.Direct { - continue - } - - for _, port := range ep.Ports { - mappings, err := nat.ParsePortSpec(port) - if err != nil { - log.Error(err) - // just continue if we do have partial port data - } - - for i := range mappings { - p := mappings[i].Port - b := mappings[i].Binding - - if b.HostIP == "" { - b.HostIP = ip - } - - if mappings[i].Binding.HostPort == "" { - // leave this undefined for dynamic assignment - // TODO: for port stability over VCH restart we would expect to set the dynamically assigned port - // recorded in containerVM annotations here, so that the old host->port mapping is preserved. - } - - log.Debugf("Adding indirect mapping for port %v: %v (%s)", p, b, port) - - current, _ := portMap[p] - portMap[p] = append(current, b) - } - } - } - - return portMap -} - -// addDirectEndpointsToPortMap constructs a docker portmap from the container's info as returned by the portlayer for those -// ports exposed directly from the containerVM via container network -// The portMap provided is modified and returned - the return value should always be used. -func addDirectEndpointsToPortMap(endpoints []*models.EndpointConfig, portMap nat.PortMap) nat.PortMap { - if len(endpoints) == 0 { - return portMap - } - - if portMap == nil { - portMap = make(nat.PortMap) - } - - for _, ep := range endpoints { - if !ep.Direct { - continue - } - - // add IP address into the port spec to allow direct usage of data returned by calls such as docker port - var ip string - rawIP, _, _ := net.ParseCIDR(ep.Address) - if rawIP != nil { - ip = rawIP.String() - } - - if ip == "" { - ip = "0.0.0.0" - } - - for _, port := range ep.Ports { - mappings, err := nat.ParsePortSpec(port) - if err != nil { - log.Error(err) - // just continue if we do have partial port data - } - - for i := range mappings { - if mappings[i].Binding.HostIP == "" { - mappings[i].Binding.HostIP = ip - } - - if mappings[i].Binding.HostPort == "" { - // If there's no explicit host port and it's a direct endpoint, then - // mirror the actual port. It's a bit misleading but we're trying to - // pack extended function into an existing structure. - _, p := nat.SplitProtoPort(string(mappings[i].Port)) - mappings[i].Binding.HostPort = p - } - } - - for _, mapping := range mappings { - p := mapping.Port - current, _ := portMap[p] - portMap[p] = append(current, mapping.Binding) - } - } - } - - return portMap -} - -// addExposedToPortMap ensures that exposed ports are all present in the port map. -// This means nil entries for any exposed ports that are not mapped. -// The portMap provided is modified and returned - the return value should always be used. -func addExposedToPortMap(config *container.Config, portMap nat.PortMap) nat.PortMap { - if config == nil || len(config.ExposedPorts) == 0 { - return portMap - } - - if portMap == nil { - portMap = make(nat.PortMap) - } - - for p := range config.ExposedPorts { - if _, ok := portMap[p]; ok { - continue - } - - portMap[p] = nil - } - - return portMap -} - -func ContainerInfoToVicContainer(info models.ContainerInfo) *viccontainer.VicContainer { - vc := viccontainer.NewVicContainer() - - if info.ContainerConfig.ContainerID != "" { - vc.ContainerID = info.ContainerConfig.ContainerID - } - - log.Debugf("Convert container info to vic container: %s", vc.ContainerID) - - if len(info.ContainerConfig.Names) > 0 { - vc.Name = info.ContainerConfig.Names[0] - log.Debugf("Container %q", vc.Name) - } - - if info.ContainerConfig.LayerID != "" { - vc.LayerID = info.ContainerConfig.LayerID - } - - if info.ContainerConfig.ImageID != "" { - vc.ImageID = info.ContainerConfig.ImageID - } - - tempVC := viccontainer.NewVicContainer() - tempVC.HostConfig = &container.HostConfig{} - vc.Config = containerConfigFromContainerInfo(tempVC, &info) - vc.HostConfig = hostConfigFromContainerInfo(tempVC, &info, PortLayerName()) - - // FIXME: duplicate Config.Volumes and HostConfig.Binds here for can not derive them from persisted value right now. - // get volumes from volume config - vc.Config.Volumes = make(map[string]struct{}, len(info.VolumeConfig)) - vc.HostConfig.Binds = []string{} - for _, volume := range info.VolumeConfig { - mount := getMountString(volume.Name, volume.MountPoint, volume.Flags[constants.Mode]) - vc.Config.Volumes[mount] = struct{}{} - vc.HostConfig.Binds = append(vc.HostConfig.Binds, mount) - log.Debugf("add volume mount %s to config.volumes and hostconfig.binds", mount) - } - - vc.Config.Cmd = info.ProcessConfig.ExecArgs - - return vc -} - -// getMountString returns a colon-delimited string containing a volume's name/ID, mount -// point and flags. -func getMountString(mounts ...string) string { - return strings.Join(mounts, ":") -} - -//------------------------------------ -// ContainerAttach() Utility Functions -//------------------------------------ - -func copyStdIn(ctx context.Context, pl *client.PortLayer, ac *AttachConfig, stdin io.ReadCloser, keys []byte) error { - // Pipe for stdin so we can interject and watch the input streams for detach keys. - stdinReader, stdinWriter := io.Pipe() - defer stdinReader.Close() - - var detach bool - - done := make(chan struct{}) - go func() { - // make sure we get out of io.Copy if context is canceled - select { - case <-ctx.Done(): - // This will cause the transport to the API client to be shut down, so all output - // streams will get closed as well. - // See the closer in container_routes.go:postContainersAttach - - // We're closing this here to disrupt the io.Copy below - // TODO: seems like we should be providing an io.Copy impl with ctx argument that honors - // cancelation with the amount of code dedicated to working around it - - // TODO: I think this still leaves a race between closing of the API client transport and - // copying of the output streams, it's just likely the error will be dropped as the transport is - // closed when it occurs. - // We should move away from needing to close transports to interrupt reads. - stdin.Close() - case <-done: - } - }() - - go func() { - defer close(done) - defer stdinWriter.Close() - - // Copy the stdin from the CLI and write to a pipe. We need to do this so we can - // watch the stdin stream for the detach keys. - var err error - - // Write some init bytes into the pipe to force Swagger to make the initial - // call to the portlayer, prior to any user input in whatever attach client - // he/she is using. - log.Debugf("copyStdIn writing primer bytes") - stdinWriter.Write([]byte(attachStdinInitString)) - if ac.UseTty { - _, err = copyEscapable(stdinWriter, stdin, keys) - } else { - _, err = io.Copy(stdinWriter, stdin) - } - - if err != nil { - if _, ok := err.(DetachError); ok { - log.Infof("stdin detach detected") - detach = true - } else { - log.Errorf("stdin err: %s", err) - } - } - }() - - id := ac.ID - - // Swagger wants an io.reader so give it the reader pipe. Also, the swagger call - // to set the stdin is synchronous so we need to run in a goroutine - setStdinParams := interaction.NewContainerSetStdinParamsWithContext(ctx).WithID(id) - setStdinParams = setStdinParams.WithRawStream(stdinReader) - - _, err := pl.Interaction.ContainerSetStdin(setStdinParams) - <-done - - if ac.CloseStdin && !ac.UseTty { - // Close the stdin connection. Mimicing Docker's behavior. - log.Errorf("Attach stream has stdinOnce set. Closing the stdin.") - params := interaction.NewContainerCloseStdinParamsWithContext(ctx).WithID(id) - _, err := pl.Interaction.ContainerCloseStdin(params) - if err != nil { - log.Errorf("CloseStdin failed with %s", err) - } - } - - // ignore the portlayer error when it is DetachError as that is what we should return to the caller when we detach - if detach { - return DetachError{} - } - - return err -} - -func copyStdOut(ctx context.Context, pl *client.PortLayer, ac *AttachConfig, stdout io.Writer, attemptTimeout time.Duration) error { - id := ac.ID - - //Calculate how much time to let portlayer attempt - plAttemptTimeout := attemptTimeout - attachPLAttemptDiff //assumes personality deadline longer than portlayer's deadline - plAttemptDeadline := time.Now().Add(plAttemptTimeout) - swaggerDeadline := strfmt.DateTime(plAttemptDeadline) - log.Debugf("* stdout portlayer deadline: %s", plAttemptDeadline.Format(time.UnixDate)) - log.Debugf("* stdout personality deadline: %s", time.Now().Add(attemptTimeout).Format(time.UnixDate)) - - log.Debugf("* stdout attach start %s", time.Now().Format(time.UnixDate)) - getStdoutParams := interaction.NewContainerGetStdoutParamsWithContext(ctx).WithID(id).WithDeadline(&swaggerDeadline) - _, err := pl.Interaction.ContainerGetStdout(getStdoutParams, stdout) - log.Debugf("* stdout attach end %s", time.Now().Format(time.UnixDate)) - if err != nil { - if _, ok := err.(*interaction.ContainerGetStdoutNotFound); ok { - return ResourceNotFoundError(id, "interaction connection") - } - - return InternalServerError(err.Error()) - } - - return nil -} - -func copyStdErr(ctx context.Context, pl *client.PortLayer, ac *AttachConfig, stderr io.Writer) error { - id := ac.ID - - getStderrParams := interaction.NewContainerGetStderrParamsWithContext(ctx).WithID(id) - _, err := pl.Interaction.ContainerGetStderr(getStderrParams, stderr) - if err != nil { - if _, ok := err.(*interaction.ContainerGetStderrNotFound); ok { - ResourceNotFoundError(id, "interaction connection") - } - - return InternalServerError(err.Error()) - } - - return nil -} - -// FIXME: Move this function to a pkg to show it's origination from Docker once -// we have ignore capabilities in our header-check.sh that checks for copyright -// header. -// Code c/c from io.Copy() modified by Docker to handle escape sequence -// Begin - -// DetachError is special error which returned in case of container detach. -type DetachError struct{} - -func (DetachError) Error() string { - return "detached from container" -} - -func copyEscapable(dst io.Writer, src io.ReadCloser, keys []byte) (written int64, err error) { - if len(keys) == 0 { - // Default keys : ctrl-p ctrl-q - keys = []byte{16, 17} - } - buf := make([]byte, 32*1024) - for { - nr, er := src.Read(buf) - if nr > 0 { - // ---- Docker addition - preservBuf := []byte{} - for i, key := range keys { - preservBuf = append(preservBuf, buf[0:nr]...) - if nr != 1 || buf[0] != key { - break - } - if i == len(keys)-1 { - src.Close() - return 0, DetachError{} - } - nr, er = src.Read(buf) - } - var nw int - var ew error - if len(preservBuf) > 0 { - nw, ew = dst.Write(preservBuf) - nr = len(preservBuf) - } else { - // ---- End of docker - nw, ew = dst.Write(buf[0:nr]) - } - if nw > 0 { - written += int64(nw) - } - if ew != nil { - err = ew - break - } - if nr != nw { - err = io.ErrShortWrite - break - } - } - if er == io.EOF { - break - } - if er != nil { - err = er - break - } - } - return written, err -} - -// End diff --git a/lib/apiservers/engine/backends/container_test.go b/lib/apiservers/engine/backends/container_test.go index 81f62dd577..5545aed59a 100644 --- a/lib/apiservers/engine/backends/container_test.go +++ b/lib/apiservers/engine/backends/container_test.go @@ -36,13 +36,13 @@ import ( "github.com/vmware/vic/lib/apiservers/engine/backends/cache" viccontainer "github.com/vmware/vic/lib/apiservers/engine/backends/container" "github.com/vmware/vic/lib/apiservers/engine/backends/convert" + "github.com/vmware/vic/lib/apiservers/engine/network" + "github.com/vmware/vic/lib/apiservers/engine/proxy" plclient "github.com/vmware/vic/lib/apiservers/portlayer/client" plscopes "github.com/vmware/vic/lib/apiservers/portlayer/client/scopes" plmodels "github.com/vmware/vic/lib/apiservers/portlayer/models" - "github.com/vmware/vic/lib/archive" "github.com/vmware/vic/lib/config/executor" "github.com/vmware/vic/lib/metadata" - "github.com/vmware/vic/pkg/trace" ) //*********** @@ -104,6 +104,12 @@ type MockContainerProxy struct { mockCommitData []CommitHandleMockData } +type MockStorageProxy struct { +} + +type MockStreamProxy struct { +} + const ( SUCCESS = 0 dummyContainerID = "abc123" @@ -137,6 +143,14 @@ func NewMockContainerProxy() *MockContainerProxy { } } +func NewMockStorageProxy() *MockStorageProxy { + return &MockStorageProxy{} +} + +func NewMockStreamProxy() *MockStreamProxy { + return &MockStreamProxy{} +} + func MockCreateHandleData() []CreateHandleMockData { createHandleTimeoutErr := runtime.NewAPIError("unknown error", "context deadline exceeded", http.StatusServiceUnavailable) @@ -214,11 +228,11 @@ func (m *MockContainerProxy) SetMockDataResponse(createHandleResp int, addToScop m.mockRespIndices[5] = commitContainerResp } -func (m *MockContainerProxy) Handle(id, name string) (string, error) { +func (m *MockContainerProxy) Handle(ctx context.Context, id, name string) (string, error) { return "", nil } -func (m *MockContainerProxy) CreateContainerHandle(vc *viccontainer.VicContainer, config types.ContainerCreateConfig) (string, string, error) { +func (m *MockContainerProxy) CreateContainerHandle(ctx context.Context, vc *viccontainer.VicContainer, config types.ContainerCreateConfig) (string, string, error) { respIdx := m.mockRespIndices[0] if respIdx >= len(m.mockCreateHandleData) { @@ -227,7 +241,7 @@ func (m *MockContainerProxy) CreateContainerHandle(vc *viccontainer.VicContainer return m.mockCreateHandleData[respIdx].retID, m.mockCreateHandleData[respIdx].retHandle, m.mockCreateHandleData[respIdx].retErr } -func (m *MockContainerProxy) CreateContainerTask(handle string, id string, config types.ContainerCreateConfig) (string, error) { +func (m *MockContainerProxy) CreateContainerTask(ctx context.Context, handle string, id string, config types.ContainerCreateConfig) (string, error) { respIdx := m.mockRespIndices[0] if respIdx >= len(m.mockCreateHandleData) { @@ -236,7 +250,7 @@ func (m *MockContainerProxy) CreateContainerTask(handle string, id string, confi return m.mockCreateHandleData[respIdx].retHandle, m.mockCreateHandleData[respIdx].retErr } -func (m *MockContainerProxy) AddContainerToScope(handle string, config types.ContainerCreateConfig) (string, error) { +func (m *MockContainerProxy) AddContainerToScope(ctx context.Context, handle string, config types.ContainerCreateConfig) (string, error) { respIdx := m.mockRespIndices[1] if respIdx >= len(m.mockAddToScopeData) { @@ -246,7 +260,7 @@ func (m *MockContainerProxy) AddContainerToScope(handle string, config types.Con return m.mockAddToScopeData[respIdx].retHandle, m.mockAddToScopeData[respIdx].retErr } -func (m *MockContainerProxy) AddVolumesToContainer(handle string, config types.ContainerCreateConfig) (string, error) { +func (m *MockContainerProxy) AddVolumesToContainer(ctx context.Context, handle string, config types.ContainerCreateConfig) (string, error) { respIdx := m.mockRespIndices[2] if respIdx >= len(m.mockAddVolumesData) { @@ -256,7 +270,7 @@ func (m *MockContainerProxy) AddVolumesToContainer(handle string, config types.C return m.mockAddVolumesData[respIdx].retHandle, m.mockAddVolumesData[respIdx].retErr } -func (m *MockContainerProxy) AddInteractionToContainer(handle string, config types.ContainerCreateConfig) (string, error) { +func (m *MockContainerProxy) AddInteractionToContainer(ctx context.Context, handle string, config types.ContainerCreateConfig) (string, error) { respIdx := m.mockRespIndices[3] if respIdx >= len(m.mockAddInteractionData) { @@ -266,7 +280,7 @@ func (m *MockContainerProxy) AddInteractionToContainer(handle string, config typ return m.mockAddInteractionData[respIdx].retHandle, m.mockAddInteractionData[respIdx].retErr } -func (m *MockContainerProxy) AddLoggingToContainer(handle string, config types.ContainerCreateConfig) (string, error) { +func (m *MockContainerProxy) AddLoggingToContainer(ctx context.Context, handle string, config types.ContainerCreateConfig) (string, error) { respIdx := m.mockRespIndices[4] if respIdx >= len(m.mockAddLoggingData) { @@ -276,19 +290,19 @@ func (m *MockContainerProxy) AddLoggingToContainer(handle string, config types.C return m.mockAddLoggingData[respIdx].retHandle, m.mockAddLoggingData[respIdx].retErr } -func (m *MockContainerProxy) BindInteraction(handle string, name string, id string) (string, error) { +func (m *MockContainerProxy) BindInteraction(ctx context.Context, handle string, name string, id string) (string, error) { return "", nil } -func (m *MockContainerProxy) CreateExecTask(handle string, config *types.ExecConfig) (string, string, error) { +func (m *MockContainerProxy) CreateExecTask(ctx context.Context, handle string, config *types.ExecConfig) (string, string, error) { return "", "", nil } -func (m *MockContainerProxy) UnbindInteraction(handle string, name string, id string) (string, error) { +func (m *MockContainerProxy) UnbindInteraction(ctx context.Context, handle string, name string, id string) (string, error) { return "", nil } -func (m *MockContainerProxy) CommitContainerHandle(handle, containerID string, waitTime int32) error { +func (m *MockContainerProxy) CommitContainerHandle(ctx context.Context, handle, containerID string, waitTime int32) error { respIdx := m.mockRespIndices[5] if respIdx >= len(m.mockCommitData) { @@ -302,34 +316,11 @@ func (m *MockContainerProxy) Client() *plclient.PortLayer { return nil } -func (m *MockContainerProxy) StreamContainerLogs(_ context.Context, name string, out io.Writer, started chan struct{}, showTimestamps bool, followLogs bool, since int64, tailLines int64) error { - if name == "" { - return fmt.Errorf("sample error message") - } - - var lineCount int64 = 10 - - close(started) - - for i := int64(0); i < lineCount; i++ { - if !followLogs && i > tailLines { - break - } - if followLogs && i > tailLines { - time.Sleep(500 * time.Millisecond) - } - - fmt.Fprintf(out, "line %d\n", i) - } - +func (m *MockContainerProxy) Stop(ctx context.Context, vc *viccontainer.VicContainer, name string, seconds *int, unbound bool) error { return nil } -func (m *MockContainerProxy) Stop(vc *viccontainer.VicContainer, name string, seconds *int, unbound bool) error { - return nil -} - -func (m *MockContainerProxy) State(vc *viccontainer.VicContainer) (*types.ContainerState, error) { +func (m *MockContainerProxy) State(ctx context.Context, vc *viccontainer.VicContainer) (*types.ContainerState, error) { // Assume container is running if container in cache. If we need other conditions // in the future, we can add it, but for now, just assume running. c := cache.ContainerCache().GetContainer(vc.ContainerID) @@ -344,28 +335,24 @@ func (m *MockContainerProxy) State(vc *viccontainer.VicContainer) (*types.Contai return state, nil } -func (m *MockContainerProxy) Wait(vc *viccontainer.VicContainer, timeout time.Duration) (*types.ContainerState, error) { +func (m *MockContainerProxy) Wait(ctx context.Context, vc *viccontainer.VicContainer, timeout time.Duration) (*types.ContainerState, error) { dockerState := &types.ContainerState{ExitCode: 0} return dockerState, nil } -func (m *MockContainerProxy) Signal(vc *viccontainer.VicContainer, sig uint64) error { - return nil -} - -func (m *MockContainerProxy) Resize(id string, height, width int32) error { +func (m *MockContainerProxy) Signal(ctx context.Context, vc *viccontainer.VicContainer, sig uint64) error { return nil } -func (m *MockContainerProxy) Rename(vc *viccontainer.VicContainer, newName string) error { +func (m *MockContainerProxy) Resize(ctx context.Context, id string, height, width int32) error { return nil } -func (m *MockContainerProxy) Remove(vc *viccontainer.VicContainer, config *types.ContainerRmConfig) error { +func (m *MockContainerProxy) Rename(ctx context.Context, vc *viccontainer.VicContainer, newName string) error { return nil } -func (m *MockContainerProxy) AttachStreams(ctx context.Context, ac *AttachConfig, clStdin io.ReadCloser, clStdout, clStderr io.Writer) error { +func (m *MockContainerProxy) Remove(ctx context.Context, vc *viccontainer.VicContainer, config *types.ContainerRmConfig) error { return nil } @@ -373,19 +360,11 @@ func (m *MockContainerProxy) StreamContainerStats(ctx context.Context, config *c return nil } -func (m *MockContainerProxy) StatPath(op trace.Operation, sotre, deviceID string, filterSpec archive.FilterSpec) (*types.ContainerPathStat, error) { - return nil, nil -} - -func (m *MockContainerProxy) GetContainerChanges(op trace.Operation, vc *viccontainer.VicContainer, data bool) (io.ReadCloser, error) { - return nil, nil -} - -func (m *MockContainerProxy) UnbindContainerFromNetwork(vc *viccontainer.VicContainer, handle string) (string, error) { +func (m *MockContainerProxy) UnbindContainerFromNetwork(ctx context.Context, vc *viccontainer.VicContainer, handle string) (string, error) { return "", nil } -func (m *MockContainerProxy) exitCode(vc *viccontainer.VicContainer) (string, error) { +func (m *MockContainerProxy) ExitCode(ctx context.Context, vc *viccontainer.VicContainer) (string, error) { return "", nil } @@ -449,6 +428,57 @@ func AddMockContainerToCache() { } } +func (s *MockStorageProxy) Create(ctx context.Context, name, driverName string, volumeData, labels map[string]string) (*types.Volume, error) { + return nil, nil +} + +func (s *MockStorageProxy) VolumeList(ctx context.Context, filter string) ([]*plmodels.VolumeResponse, error) { + return nil, nil +} + +func (s *MockStorageProxy) VolumeInfo(ctx context.Context, name string) (*plmodels.VolumeResponse, error) { + return nil, nil +} + +func (s *MockStorageProxy) Remove(ctx context.Context, name string) error { + return nil +} + +func (s *MockStorageProxy) AddVolumesToContainer(ctx context.Context, handle string, config types.ContainerCreateConfig) (string, error) { + return "", nil +} + +func (sp *MockStreamProxy) AttachStreams(ctx context.Context, ac *proxy.AttachConfig, stdin io.ReadCloser, stdout, stderr io.Writer) error { + return nil +} + +func (sp *MockStreamProxy) StreamContainerLogs(_ context.Context, name string, out io.Writer, started chan struct{}, showTimestamps bool, followLogs bool, since int64, tailLines int64) error { + if name == "" { + return fmt.Errorf("sample error message") + } + + var lineCount int64 = 10 + + close(started) + + for i := int64(0); i < lineCount; i++ { + if !followLogs && i > tailLines { + break + } + if followLogs && i > tailLines { + time.Sleep(500 * time.Millisecond) + } + + fmt.Fprintf(out, "line %d\n", i) + } + + return nil +} + +func (sp *MockStreamProxy) StreamContainerStats(ctx context.Context, config *convert.ContainerStatsConfig) error { + return nil +} + //*********** // Tests //*********** @@ -459,7 +489,7 @@ func TestContainerCreateEmptyImageCache(t *testing.T) { mockContainerProxy := NewMockContainerProxy() // Create our personality Container backend - cb := &Container{ + cb := &ContainerBackend{ containerProxy: mockContainerProxy, } @@ -483,7 +513,7 @@ func TestCreateHandle(t *testing.T) { mockContainerProxy := NewMockContainerProxy() // Create our personality Container backend - cb := &Container{ + cb := &ContainerBackend{ containerProxy: mockContainerProxy, } @@ -528,7 +558,7 @@ func TestContainerAddToScope(t *testing.T) { mockContainerProxy := NewMockContainerProxy() // Create our personality Container backend - cb := &Container{ + cb := &ContainerBackend{ containerProxy: mockContainerProxy, } @@ -565,10 +595,12 @@ func TestContainerAddToScope(t *testing.T) { // possible input/outputs for committing the handle and calls vicbackends.ContainerCreate() func TestCommitHandle(t *testing.T) { mockContainerProxy := NewMockContainerProxy() + mockStorageProxy := NewMockStorageProxy() // Create our personality Container backend - cb := &Container{ + cb := &ContainerBackend{ containerProxy: mockContainerProxy, + storageProxy: mockStorageProxy, } AddMockImageToCache() @@ -603,11 +635,10 @@ func TestCommitHandle(t *testing.T) { // TestContainerLogs() tests the docker logs api when user asks for entire log func TestContainerLogs(t *testing.T) { - mockContainerProxy := NewMockContainerProxy() - // Create our personality Container backend - cb := &Container{ - containerProxy: mockContainerProxy, + cb := &ContainerBackend{ + containerProxy: NewMockContainerProxy(), + streamProxy: NewMockStreamProxy(), } // Prepopulate our image and container cache with dummy data @@ -738,20 +769,20 @@ func TestPortInformation(t *testing.T) { co.Name = "bar" cache.ContainerCache().AddContainer(co) - // unless there are entries in containerByPort we won't report them as bound - ports := portForwardingInformation(mockContainerInfo, ips) + // unless there are entries in vicnetwork.ContainerByPort we won't report them as bound + ports := network.PortForwardingInformation(co, ips) assert.Empty(t, ports, "There should be no bound IPs at this point for forwarding") // the current port binding should show up as a direct port - ports = directPortInformation(mockContainerInfo) + ports = network.DirectPortInformation(mockContainerInfo) assert.NotEmpty(t, ports, "There should be a direct port") - containerByPort["8000"] = containerID - ports = portForwardingInformation(mockContainerInfo, ips) + network.ContainerByPort["8000"] = containerID + ports = network.PortForwardingInformation(co, ips) assert.NotEmpty(t, ports, "There should be bound IPs") assert.Equal(t, 1, len(ports), "Expected 1 port binding, found %d", len(ports)) // now that this port presents as a forwarded port it should NOT present as a direct port - ports = directPortInformation(mockContainerInfo) + ports = network.DirectPortInformation(mockContainerInfo) assert.Empty(t, ports, "There should not be a direct port") port, _ = nat.NewPort("tcp", "80") @@ -763,8 +794,8 @@ func TestPortInformation(t *testing.T) { // forwarding of 00 should never happen, but this is allowing us to confirm that // it's kicked out by the function even if present in the map - containerByPort["00"] = containerID - ports = portForwardingInformation(mockContainerInfo, ips) + network.ContainerByPort["00"] = containerID + ports = network.PortForwardingInformation(co, ips) assert.NotEmpty(t, ports, "There should be 1 bound IP") assert.Equal(t, 1, len(ports), "Expected 1 port binding, found %d", len(ports)) @@ -774,8 +805,8 @@ func TestPortInformation(t *testing.T) { HostPort: "800", } portMap[port] = portBindings - containerByPort["800"] = containerID - ports = portForwardingInformation(mockContainerInfo, ips) + network.ContainerByPort["800"] = containerID + ports = network.PortForwardingInformation(co, ips) assert.Equal(t, 2, len(ports), "Expected 2 port binding, found %d", len(ports)) } @@ -799,7 +830,7 @@ func TestCreateConfigNetworkMode(t *testing.T) { assert.Equal(t, mockConfig.HostConfig.NetworkMode.NetworkName(), "net1", "expected NetworkMode is net1, found %s", mockConfig.HostConfig.NetworkMode) - // container connects to two network endpoints; check for NetworkMode error + // container connects to two vicnetwork endpoints; check for NetworkMode error mockConfig.NetworkingConfig.EndpointsConfig["net2"] = &dnetwork.EndpointSettings{} err := validateCreateConfig(&mockConfig) diff --git a/lib/apiservers/engine/backends/endpoint.go b/lib/apiservers/engine/backends/endpoint.go index d1f0bff038..e6610b960e 100644 --- a/lib/apiservers/engine/backends/endpoint.go +++ b/lib/apiservers/engine/backends/endpoint.go @@ -43,18 +43,18 @@ func (e *endpoint) Name() string { return e.ep.Name } -// Network returns the name of the network to which this endpoint is attached. +// Network returns the name of the vicnetwork to which this endpoint is attached. func (e *endpoint) Network() string { return e.ep.Scope } // Join joins the sandbox to the endpoint and populates into the sandbox -// the network resources allocated for the endpoint. +// the vicnetwork resources allocated for the endpoint. func (e *endpoint) Join(sandbox libnetwork.Sandbox, options ...libnetwork.EndpointOption) error { return notImplementedError } -// Leave detaches the network resources populated in the sandbox. +// Leave detaches the vicnetwork resources populated in the sandbox. func (e *endpoint) Leave(sandbox libnetwork.Sandbox, options ...libnetwork.EndpointOption) error { return notImplementedError } @@ -69,7 +69,7 @@ func (e *endpoint) DriverInfo() (map[string]interface{}, error) { return nil, notImplementedError } -// Delete and detaches this endpoint from the network. +// Delete and detaches this endpoint from the vicnetwork. func (e *endpoint) Delete(force bool) error { return notImplementedError } @@ -94,8 +94,8 @@ func (e *endpoint) GatewayIPv6() net.IP { return nil } -// StaticRoutes returns the list of static routes configured by the network -// driver when the container joins a network +// StaticRoutes returns the list of static routes configured by the vicnetwork +// driver when the container joins a vicnetwork func (e *endpoint) StaticRoutes() []*types.StaticRoute { return nil } diff --git a/lib/apiservers/engine/backends/eventmonitor.go b/lib/apiservers/engine/backends/eventmonitor.go index 19892894d2..b7381170b9 100644 --- a/lib/apiservers/engine/backends/eventmonitor.go +++ b/lib/apiservers/engine/backends/eventmonitor.go @@ -36,6 +36,9 @@ import ( eventtypes "github.com/docker/docker/api/types/events" "github.com/vmware/vic/lib/apiservers/engine/backends/cache" + "github.com/vmware/vic/lib/apiservers/engine/errors" + "github.com/vmware/vic/lib/apiservers/engine/network" + "github.com/vmware/vic/lib/apiservers/engine/proxy" "github.com/vmware/vic/lib/apiservers/portlayer/client/events" plevents "github.com/vmware/vic/lib/portlayer/event/events" "github.com/vmware/vic/pkg/trace" @@ -85,22 +88,22 @@ func (ep PlEventProxy) StreamEvents(ctx context.Context, out io.Writer) error { plClient := PortLayerClient() if plClient == nil { - return InternalServerError("eventproxy.StreamEvents failed to get a portlayer client") + return errors.InternalServerError("eventproxy.StreamEvents failed to get a portlayer client") } params := events.NewGetEventsParamsWithContext(ctx) if _, err := plClient.Events.GetEvents(params, out); err != nil { switch err := err.(type) { case *events.GetEventsInternalServerError: - return InternalServerError("Server error from the events port layer") + return errors.InternalServerError("Server error from the events port layer") default: //Check for EOF. Since the connection, transport, and data handling are //encapsulated inside of Swagger, we can only detect EOF by checking the //error string - if strings.Contains(err.Error(), swaggerSubstringEOF) { + if strings.Contains(err.Error(), proxy.SwaggerSubstringEOF) { return nil } - return InternalServerError(fmt.Sprintf("Unknown error from the interaction port layer: %s", err)) + return errors.InternalServerError(fmt.Sprintf("Unknown error from the interaction port layer: %s", err)) } } @@ -242,7 +245,7 @@ func (p DockerEventPublisher) PublishEvent(event plevents.BaseEvent) { go func() { attrs = make(map[string]string) // get the containerEngine - code, _ := NewContainerBackend().containerProxy.exitCode(vc) + code, _ := NewContainerBackend().containerProxy.ExitCode(context.Background(), vc) log.Infof("Sending die event for container(%s) with exitCode[%s] - eventID(%s)", containerShortID, code, event.ID) // if the docker client is unable to convert the code to an int the client will return 125 @@ -251,7 +254,7 @@ func (p DockerEventPublisher) PublishEvent(event plevents.BaseEvent) { EventService().Log(containerDieEvent, eventtypes.ContainerEventType, actor) // TODO: this really, really shouldn't be in the event publishing code - it's fine to have multiple consumers of events // and this should be registered as a callback by the logic responsible for the MapPorts portion. - if err := UnmapPorts(vc.ContainerID, vc); err != nil { + if err := network.UnmapPorts(vc.ContainerID, vc); err != nil { log.Errorf("Event Monitor failed to unmap ports for container(%s): %s - eventID(%s)", containerShortID, err, event.ID) } @@ -274,7 +277,7 @@ func (p DockerEventPublisher) PublishEvent(event plevents.BaseEvent) { // pop the destroy event... actor := CreateContainerEventActorWithAttributes(vc, attrs) EventService().Log(containerDestroyEvent, eventtypes.ContainerEventType, actor) - if err := UnmapPorts(vc.ContainerID, vc); err != nil { + if err := network.UnmapPorts(vc.ContainerID, vc); err != nil { log.Errorf("Event Monitor failed to unmap ports for container(%s): %s - eventID(%s)", containerShortID, err, event.ID) } // remove from the container cache... diff --git a/lib/apiservers/engine/backends/executor/SwarmBackend.go b/lib/apiservers/engine/backends/executor/SwarmBackend.go index 9ec6d2051b..21395cbca2 100644 --- a/lib/apiservers/engine/backends/executor/SwarmBackend.go +++ b/lib/apiservers/engine/backends/executor/SwarmBackend.go @@ -15,10 +15,11 @@ package executor import ( - "fmt" "io" "time" + "golang.org/x/net/context" + "github.com/docker/distribution" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/backend" @@ -34,7 +35,8 @@ import ( "github.com/docker/libnetwork/cluster" networktypes "github.com/docker/libnetwork/types" "github.com/docker/swarmkit/agent/exec" - "golang.org/x/net/context" + + "github.com/vmware/vic/lib/apiservers/engine/errors" ) type SwarmBackend struct { @@ -138,7 +140,7 @@ func (b SwarmBackend) SetClusterProvider(provider cluster.Provider) { } func (b SwarmBackend) IsSwarmCompatible() error { - return fmt.Errorf("vSphere Integrated Containers Engine does not yet support Docker Swarm.") + return errors.SwarmNotSupportedError() } func (b SwarmBackend) SubscribeToEvents(since, until time.Time, filter filters.Args) ([]events.Message, chan interface{}) { diff --git a/lib/apiservers/engine/backends/image.go b/lib/apiservers/engine/backends/image.go index 65a009e6de..666e5577de 100644 --- a/lib/apiservers/engine/backends/image.go +++ b/lib/apiservers/engine/backends/image.go @@ -36,6 +36,7 @@ import ( "github.com/vmware/vic/lib/apiservers/engine/backends/cache" vicfilter "github.com/vmware/vic/lib/apiservers/engine/backends/filter" + "github.com/vmware/vic/lib/apiservers/engine/errors" "github.com/vmware/vic/lib/apiservers/portlayer/client/storage" "github.com/vmware/vic/lib/imagec" "github.com/vmware/vic/lib/metadata" @@ -59,19 +60,19 @@ var unSupportedImageFilters = map[string]bool{ "dangling": false, } -type Image struct { +type ImageBackend struct { } -func NewImageBackend() *Image { - return &Image{} +func NewImageBackend() *ImageBackend { + return &ImageBackend{} } -func (i *Image) Exists(containerName string) bool { +func (i *ImageBackend) Exists(containerName string) bool { return false } // TODO fix the errors so the client doesnt print the generic POST or DELETE message -func (i *Image) ImageDelete(imageRef string, force, prune bool) ([]types.ImageDelete, error) { +func (i *ImageBackend) ImageDelete(imageRef string, force, prune bool) ([]types.ImageDelete, error) { defer trace.End(trace.Begin(imageRef)) var ( @@ -208,11 +209,11 @@ func (i *Image) ImageDelete(imageRef string, force, prune bool) ([]types.ImageDe return deletedRes, err } -func (i *Image) ImageHistory(imageName string) ([]*types.ImageHistory, error) { - return nil, fmt.Errorf("%s does not yet implement image.History", ProductName()) +func (i *ImageBackend) ImageHistory(imageName string) ([]*types.ImageHistory, error) { + return nil, errors.APINotSupportedMsg(ProductName(), "ImageHistory") } -func (i *Image) Images(imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error) { +func (i *ImageBackend) Images(imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error) { defer trace.End(trace.Begin(fmt.Sprintf("imageFilters: %#v", imageFilters))) // validate filters for accuracy and support @@ -263,7 +264,7 @@ imageLoop: // Docker Inspect. LookupImage looks up an image by name and returns it as an // ImageInspect structure. -func (i *Image) LookupImage(name string) (*types.ImageInspect, error) { +func (i *ImageBackend) LookupImage(name string) (*types.ImageInspect, error) { defer trace.End(trace.Begin("LookupImage (docker inspect)")) imageConfig, err := cache.ImageCache().Get(name) @@ -274,7 +275,7 @@ func (i *Image) LookupImage(name string) (*types.ImageInspect, error) { return imageConfigToDockerImageInspect(imageConfig, ProductName()), nil } -func (i *Image) TagImage(imageName, repository, tag string) error { +func (i *ImageBackend) TagImage(imageName, repository, tag string) error { img, err := cache.ImageCache().Get(imageName) if err != nil { return err @@ -302,23 +303,23 @@ func (i *Image) TagImage(imageName, repository, tag string) error { return nil } -func (i *Image) ImagesPrune(pruneFilters filters.Args) (*types.ImagesPruneReport, error) { - return nil, fmt.Errorf("%s does not yet implement image.ImagesPrune", ProductName()) +func (i *ImageBackend) ImagesPrune(pruneFilters filters.Args) (*types.ImagesPruneReport, error) { + return nil, errors.APINotSupportedMsg(ProductName(), "ImagesPrune") } -func (i *Image) LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error { - return fmt.Errorf("%s does not yet implement image.LoadImage", ProductName()) +func (i *ImageBackend) LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error { + return errors.APINotSupportedMsg(ProductName(), "LoadImage") } -func (i *Image) ImportImage(src string, repository, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error { - return fmt.Errorf("%s does not yet implement image.ImportImage", ProductName()) +func (i *ImageBackend) ImportImage(src string, repository, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error { + return errors.APINotSupportedMsg(ProductName(), "ImportImage") } -func (i *Image) ExportImage(names []string, outStream io.Writer) error { - return fmt.Errorf("%s does not yet implement image.ExportImage", ProductName()) +func (i *ImageBackend) ExportImage(names []string, outStream io.Writer) error { + return errors.APINotSupportedMsg(ProductName(), "ExportImage") } -func (i *Image) PullImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { +func (i *ImageBackend) PullImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { defer trace.End(trace.Begin("")) log.Debugf("PullImage: image = %s, tag = %s, metaheaders = %+v\n", image, tag, metaHeaders) @@ -409,12 +410,12 @@ func (i *Image) PullImage(ctx context.Context, image, tag string, metaHeaders ma return nil } -func (i *Image) PushImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { - return fmt.Errorf("%s does not yet implement image.PushImage", ProductName()) +func (i *ImageBackend) PushImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { + return errors.APINotSupportedMsg(ProductName(), "PushImage") } -func (i *Image) SearchRegistryForImages(ctx context.Context, filtersArgs string, term string, limit int, authConfig *types.AuthConfig, metaHeaders map[string][]string) (*registry.SearchResults, error) { - return nil, fmt.Errorf("%s does not yet implement image.SearchRegistryForImages", ProductName()) +func (i *ImageBackend) SearchRegistryForImages(ctx context.Context, filtersArgs string, term string, limit int, authConfig *types.AuthConfig, metaHeaders map[string][]string) (*registry.SearchResults, error) { + return nil, errors.APINotSupportedMsg(ProductName(), "SearchRegistryForImages") } // Utility functions diff --git a/lib/apiservers/engine/backends/network.go b/lib/apiservers/engine/backends/network.go index 51b78d06cf..b2f468915b 100644 --- a/lib/apiservers/engine/backends/network.go +++ b/lib/apiservers/engine/backends/network.go @@ -35,24 +35,25 @@ import ( "github.com/vmware/vic/lib/apiservers/engine/backends/cache" "github.com/vmware/vic/lib/apiservers/engine/backends/convert" vicendpoint "github.com/vmware/vic/lib/apiservers/engine/backends/endpoint" + "github.com/vmware/vic/lib/apiservers/engine/errors" "github.com/vmware/vic/lib/apiservers/portlayer/client/containers" "github.com/vmware/vic/lib/apiservers/portlayer/client/scopes" "github.com/vmware/vic/lib/apiservers/portlayer/models" "github.com/vmware/vic/pkg/retry" ) -type Network struct { +type NetworkBackend struct { } -func NewNetworkBackend() *Network { - return &Network{} +func NewNetworkBackend() *NetworkBackend { + return &NetworkBackend{} } -func (n *Network) NetworkControllerEnabled() bool { +func (n *NetworkBackend) NetworkControllerEnabled() bool { return false } -func (n *Network) FindNetwork(idName string) (libnetwork.Network, error) { +func (n *NetworkBackend) FindNetwork(idName string) (libnetwork.Network, error) { ok, err := PortLayerClient().Scopes.List(scopes.NewListParamsWithContext(ctx).WithIDName(idName)) if err != nil { switch err := err.(type) { @@ -67,10 +68,10 @@ func (n *Network) FindNetwork(idName string) (libnetwork.Network, error) { } } - return &network{cfg: ok.Payload[0]}, nil + return &vicnetwork{cfg: ok.Payload[0]}, nil } -func (n *Network) GetNetworkByName(idName string) (libnetwork.Network, error) { +func (n *NetworkBackend) GetNetworkByName(idName string) (libnetwork.Network, error) { ok, err := PortLayerClient().Scopes.List(scopes.NewListParamsWithContext(ctx).WithIDName(idName)) if err != nil { switch err := err.(type) { @@ -85,10 +86,10 @@ func (n *Network) GetNetworkByName(idName string) (libnetwork.Network, error) { } } - return &network{cfg: ok.Payload[0]}, nil + return &vicnetwork{cfg: ok.Payload[0]}, nil } -func (n *Network) GetNetworksByID(partialID string) []libnetwork.Network { +func (n *NetworkBackend) GetNetworksByID(partialID string) []libnetwork.Network { ok, err := PortLayerClient().Scopes.List(scopes.NewListParamsWithContext(ctx).WithIDName(partialID)) if err != nil { return nil @@ -96,13 +97,13 @@ func (n *Network) GetNetworksByID(partialID string) []libnetwork.Network { nets := make([]libnetwork.Network, len(ok.Payload)) for i, cfg := range ok.Payload { - nets[i] = &network{cfg: cfg} + nets[i] = &vicnetwork{cfg: cfg} } return nets } -func (n *Network) GetNetworks() []libnetwork.Network { +func (n *NetworkBackend) GetNetworks() []libnetwork.Network { ok, err := PortLayerClient().Scopes.ListAll(scopes.NewListAllParamsWithContext(ctx)) if err != nil { return nil @@ -110,14 +111,14 @@ func (n *Network) GetNetworks() []libnetwork.Network { nets := make([]libnetwork.Network, len(ok.Payload)) for i, cfg := range ok.Payload { - nets[i] = &network{cfg: cfg} + nets[i] = &vicnetwork{cfg: cfg} i++ } return nets } -func (n *Network) CreateNetwork(nc types.NetworkCreateRequest) (*types.NetworkCreateResponse, error) { +func (n *NetworkBackend) CreateNetwork(nc types.NetworkCreateRequest) (*types.NetworkCreateResponse, error) { if nc.IPAM != nil && len(nc.IPAM.Config) > 1 { return nil, fmt.Errorf("at most one ipam config supported") } @@ -165,7 +166,7 @@ func (n *Network) CreateNetwork(nc types.NetworkCreateRequest) (*types.NetworkCr if err != nil { switch err := err.(type) { case *scopes.CreateScopeConflict: - return nil, derr.NewErrorWithStatusCode(fmt.Errorf("network %s already exists", nc.Name), http.StatusConflict) + return nil, derr.NewErrorWithStatusCode(fmt.Errorf("vicnetwork %s already exists", nc.Name), http.StatusConflict) case *scopes.CreateScopeDefault: return nil, derr.NewErrorWithStatusCode(fmt.Errorf(err.Payload.Message), http.StatusInternalServerError) @@ -190,7 +191,7 @@ func isCommitConflictError(err error) bool { return isConflictErr } -// connectContainerToNetwork performs portlayer operations to connect a container to a container network. +// connectContainerToNetwork performs portlayer operations to connect a container to a container vicnetwork. func connectContainerToNetwork(containerName, networkName string, endpointConfig *apinet.EndpointSettings) error { client := PortLayerClient() getRes, err := client.Containers.Get(containers.NewGetParamsWithContext(ctx).WithID(containerName)) @@ -289,9 +290,9 @@ func connectContainerToNetwork(containerName, networkName string, endpointConfig return err } -// ConnectContainerToNetwork connects a container to a container network. It wraps the portlayer operations +// ConnectContainerToNetwork connects a container to a container vicnetwork. It wraps the portlayer operations // in a retry for when there's a conflict error received, such as one during a similar concurrent operation. -func (n *Network) ConnectContainerToNetwork(containerName, networkName string, endpointConfig *apinet.EndpointSettings) error { +func (n *NetworkBackend) ConnectContainerToNetwork(containerName, networkName string, endpointConfig *apinet.EndpointSettings) error { vc := cache.ContainerCache().GetContainer(containerName) if vc != nil { containerName = vc.ContainerID @@ -320,15 +321,15 @@ func (n *Network) ConnectContainerToNetwork(containerName, networkName string, e return nil } -func (n *Network) DisconnectContainerFromNetwork(containerName string, networkName string, force bool) error { +func (n *NetworkBackend) DisconnectContainerFromNetwork(containerName string, networkName string, force bool) error { vc := cache.ContainerCache().GetContainer(containerName) if vc != nil { containerName = vc.ContainerID } - return fmt.Errorf("%s does not yet implement network.DisconnectContainerFromNetwork", ProductName()) + return errors.APINotSupportedMsg(ProductName(), "DisconnectContainerFromNetwork") } -func (n *Network) DeleteNetwork(name string) error { +func (n *NetworkBackend) DeleteNetwork(name string) error { client := PortLayerClient() if _, err := client.Scopes.DeleteScope(scopes.NewDeleteScopeParamsWithContext(ctx).WithIDName(name)); err != nil { @@ -347,45 +348,45 @@ func (n *Network) DeleteNetwork(name string) error { return nil } -func (n *Network) NetworksPrune(pruneFilters filters.Args) (*types.NetworksPruneReport, error) { - return nil, fmt.Errorf("%s does not yet implement NetworksPrune", ProductName()) +func (n *NetworkBackend) NetworksPrune(pruneFilters filters.Args) (*types.NetworksPruneReport, error) { + return nil, errors.APINotSupportedMsg(ProductName(), "NetworksPrune") } -// network implements the libnetwork.Network and libnetwork.NetworkInfo interfaces -type network struct { +// vicnetwork implements the libnetwork.Network and libnetwork.NetworkInfo interfaces +type vicnetwork struct { sync.Mutex cfg *models.ScopeConfig } -// A user chosen name for this network. -func (n *network) Name() string { +// A user chosen name for this vicnetwork. +func (n *vicnetwork) Name() string { return n.cfg.Name } -// A system generated id for this network. -func (n *network) ID() string { +// A system generated id for this vicnetwork. +func (n *vicnetwork) ID() string { return n.cfg.ID } -// The type of network, which corresponds to its managing driver. -func (n *network) Type() string { +// The type of vicnetwork, which corresponds to its managing driver. +func (n *vicnetwork) Type() string { return n.cfg.ScopeType } -// Create a new endpoint to this network symbolically identified by the +// Create a new endpoint to this vicnetwork symbolically identified by the // specified unique name. The options parameter carry driver specific options. -func (n *network) CreateEndpoint(name string, options ...libnetwork.EndpointOption) (libnetwork.Endpoint, error) { +func (n *vicnetwork) CreateEndpoint(name string, options ...libnetwork.EndpointOption) (libnetwork.Endpoint, error) { return nil, fmt.Errorf("not implemented") } -// Delete the network. -func (n *network) Delete() error { +// Delete the vicnetwork. +func (n *vicnetwork) Delete() error { return fmt.Errorf("not implemented") } -// Endpoints returns the list of Endpoint(s) in this network. -func (n *network) Endpoints() []libnetwork.Endpoint { +// Endpoints returns the list of Endpoint(s) in this vicnetwork. +func (n *vicnetwork) Endpoints() []libnetwork.Endpoint { eps := make([]libnetwork.Endpoint, len(n.cfg.Endpoints)) for i, e := range n.cfg.Endpoints { eps[i] = &endpoint{ep: e, sc: n.cfg} @@ -395,7 +396,7 @@ func (n *network) Endpoints() []libnetwork.Endpoint { } // WalkEndpoints uses the provided function to walk the Endpoints -func (n *network) WalkEndpoints(walker libnetwork.EndpointWalker) { +func (n *vicnetwork) WalkEndpoints(walker libnetwork.EndpointWalker) { for _, e := range n.cfg.Endpoints { if walker(&endpoint{ep: e, sc: n.cfg}) { return @@ -404,7 +405,7 @@ func (n *network) WalkEndpoints(walker libnetwork.EndpointWalker) { } // EndpointByName returns the Endpoint which has the passed name. If not found, the error ErrNoSuchEndpoint is returned. -func (n *network) EndpointByName(name string) (libnetwork.Endpoint, error) { +func (n *vicnetwork) EndpointByName(name string) (libnetwork.Endpoint, error) { for _, e := range n.cfg.Endpoints { if e.Name == name { return &endpoint{ep: e, sc: n.cfg}, nil @@ -415,7 +416,7 @@ func (n *network) EndpointByName(name string) (libnetwork.Endpoint, error) { } // EndpointByID returns the Endpoint which has the passed id. If not found, the error ErrNoSuchEndpoint is returned. -func (n *network) EndpointByID(id string) (libnetwork.Endpoint, error) { +func (n *vicnetwork) EndpointByID(id string) (libnetwork.Endpoint, error) { for _, e := range n.cfg.Endpoints { if e.ID == id { return &endpoint{ep: e, sc: n.cfg}, nil @@ -425,12 +426,12 @@ func (n *network) EndpointByID(id string) (libnetwork.Endpoint, error) { return nil, fmt.Errorf("not found") } -// Return certain operational data belonging to this network -func (n *network) Info() libnetwork.NetworkInfo { +// Return certain operational data belonging to this vicnetwork +func (n *vicnetwork) Info() libnetwork.NetworkInfo { return n } -func (n *network) IpamConfig() (string, map[string]string, []*libnetwork.IpamConf, []*libnetwork.IpamConf) { +func (n *vicnetwork) IpamConfig() (string, map[string]string, []*libnetwork.IpamConf, []*libnetwork.IpamConf) { n.Lock() defer n.Unlock() @@ -455,7 +456,7 @@ func (n *network) IpamConfig() (string, map[string]string, []*libnetwork.IpamCon return "", make(map[string]string), confs, nil } -func (n *network) IpamInfo() ([]*libnetwork.IpamInfo, []*libnetwork.IpamInfo) { +func (n *vicnetwork) IpamInfo() ([]*libnetwork.IpamInfo, []*libnetwork.IpamInfo) { n.Lock() defer n.Unlock() @@ -485,27 +486,27 @@ func (n *network) IpamInfo() ([]*libnetwork.IpamInfo, []*libnetwork.IpamInfo) { return infos, nil } -func (n *network) DriverOptions() map[string]string { +func (n *vicnetwork) DriverOptions() map[string]string { return make(map[string]string) } -func (n *network) Scope() string { +func (n *vicnetwork) Scope() string { return "" } -func (n *network) IPv6Enabled() bool { +func (n *vicnetwork) IPv6Enabled() bool { return false } -func (n *network) Internal() bool { +func (n *vicnetwork) Internal() bool { n.Lock() defer n.Unlock() return n.cfg.Internal } -// Labels decodes and unmarshals the stored blob of network labels. -func (n *network) Labels() map[string]string { +// Labels decodes and unmarshals the stored blob of vicnetwork labels. +func (n *vicnetwork) Labels() map[string]string { n.Lock() defer n.Unlock() @@ -529,22 +530,22 @@ func (n *network) Labels() map[string]string { return labels } -func (n *network) Attachable() bool { +func (n *vicnetwork) Attachable() bool { return false //? } -func (n *network) Dynamic() bool { +func (n *vicnetwork) Dynamic() bool { return false //? } -func (n *network) Created() time.Time { +func (n *vicnetwork) Created() time.Time { return time.Now() } // Peers returns a slice of PeerInfo structures which has the information about the peer -// nodes participating in the same overlay network. This is currently the per-network +// nodes participating in the same overlay vicnetwork. This is currently the per-vicnetwork // gossip cluster. For non-dynamic overlay networks and bridge networks it returns an // empty slice -func (n *network) Peers() []networkdb.PeerInfo { +func (n *vicnetwork) Peers() []networkdb.PeerInfo { return nil } diff --git a/lib/apiservers/engine/backends/plugins.go b/lib/apiservers/engine/backends/plugins.go index b564f72707..7b4331987a 100644 --- a/lib/apiservers/engine/backends/plugins.go +++ b/lib/apiservers/engine/backends/plugins.go @@ -15,58 +15,59 @@ package backends import ( - "fmt" "io" "net/http" enginetypes "github.com/docker/docker/api/types" "github.com/docker/docker/reference" "golang.org/x/net/context" + + "github.com/vmware/vic/lib/apiservers/engine/errors" ) -type Plugin struct { +type PluginBackend struct { } -func NewPluginBackend() *Plugin { - return &Plugin{} +func NewPluginBackend() *PluginBackend { + return &PluginBackend{} } -func (p *Plugin) Disable(name string, config *enginetypes.PluginDisableConfig) error { - return fmt.Errorf("%s does not yet support plugins", ProductName()) +func (p *PluginBackend) Disable(name string, config *enginetypes.PluginDisableConfig) error { + return errors.APINotSupportedMsg(ProductName(), "plugins") } -func (p *Plugin) Enable(name string, config *enginetypes.PluginEnableConfig) error { - return fmt.Errorf("%s does not yet support plugins", ProductName()) +func (p *PluginBackend) Enable(name string, config *enginetypes.PluginEnableConfig) error { + return errors.APINotSupportedMsg(ProductName(), "plugins") } -func (p *Plugin) List() ([]enginetypes.Plugin, error) { - return nil, fmt.Errorf("%s does not yet support plugins", ProductName()) +func (p *PluginBackend) List() ([]enginetypes.Plugin, error) { + return nil, errors.APINotSupportedMsg(ProductName(), "plugins") } -func (p *Plugin) Inspect(name string) (*enginetypes.Plugin, error) { - return nil, PluginNotFoundError(name) +func (p *PluginBackend) Inspect(name string) (*enginetypes.Plugin, error) { + return nil, errors.PluginNotFoundError(name) } -func (p *Plugin) Remove(name string, config *enginetypes.PluginRmConfig) error { - return fmt.Errorf("%s does not yet support plugins", ProductName()) +func (p *PluginBackend) Remove(name string, config *enginetypes.PluginRmConfig) error { + return errors.APINotSupportedMsg(ProductName(), "plugins") } -func (p *Plugin) Set(name string, args []string) error { - return fmt.Errorf("%s does not yet support plugins", ProductName()) +func (p *PluginBackend) Set(name string, args []string) error { + return errors.APINotSupportedMsg(ProductName(), "plugins") } -func (p *Plugin) Privileges(ctx context.Context, ref reference.Named, metaHeaders http.Header, authConfig *enginetypes.AuthConfig) (enginetypes.PluginPrivileges, error) { - return nil, fmt.Errorf("%s does not yet support plugins", ProductName()) +func (p *PluginBackend) Privileges(ctx context.Context, ref reference.Named, metaHeaders http.Header, authConfig *enginetypes.AuthConfig) (enginetypes.PluginPrivileges, error) { + return nil, errors.APINotSupportedMsg(ProductName(), "plugins") } -func (p *Plugin) Pull(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer) error { - return fmt.Errorf("%s does not yet support plugins", ProductName()) +func (p *PluginBackend) Pull(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer) error { + return errors.APINotSupportedMsg(ProductName(), "plugins") } -func (p *Plugin) Push(ctx context.Context, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, outStream io.Writer) error { - return fmt.Errorf("%s does not yet support plugins", ProductName()) +func (p *PluginBackend) Push(ctx context.Context, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, outStream io.Writer) error { + return errors.APINotSupportedMsg(ProductName(), "plugins") } -func (p *Plugin) CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *enginetypes.PluginCreateOptions) error { - return fmt.Errorf("%s does not yet support plugins", ProductName()) +func (p *PluginBackend) CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *enginetypes.PluginCreateOptions) error { + return errors.APINotSupportedMsg(ProductName(), "plugins") } diff --git a/lib/apiservers/engine/backends/swarm.go b/lib/apiservers/engine/backends/swarm.go index 624b83e364..0396c7771e 100644 --- a/lib/apiservers/engine/backends/swarm.go +++ b/lib/apiservers/engine/backends/swarm.go @@ -15,114 +15,114 @@ package backends import ( - "fmt" - "golang.org/x/net/context" basictypes "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/backend" types "github.com/docker/docker/api/types/swarm" + + "github.com/vmware/vic/lib/apiservers/engine/errors" ) -type Swarm struct { +type SwarmBackend struct { } -func NewSwarmBackend() *Swarm { - return &Swarm{} +func NewSwarmBackend() *SwarmBackend { + return &SwarmBackend{} } -func (s *Swarm) Init(req types.InitRequest) (string, error) { - return "", fmt.Errorf("%s does not yet support Docker Swarm", ProductName()) +func (s *SwarmBackend) Init(req types.InitRequest) (string, error) { + return "", errors.SwarmNotSupportedError() } -func (s *Swarm) Join(req types.JoinRequest) error { - return fmt.Errorf("%s does not yet support Docker Swarm", ProductName()) +func (s *SwarmBackend) Join(req types.JoinRequest) error { + return errors.SwarmNotSupportedError() } -func (s *Swarm) Leave(force bool) error { - return fmt.Errorf("%s does not yet support Docker Swarm", ProductName()) +func (s *SwarmBackend) Leave(force bool) error { + return errors.SwarmNotSupportedError() } -func (s *Swarm) Inspect() (types.Swarm, error) { - return types.Swarm{}, SwarmNotSupportedError() +func (s *SwarmBackend) Inspect() (types.Swarm, error) { + return types.Swarm{}, errors.SwarmNotSupportedError() } -func (s *Swarm) Update(uint64, types.Spec, types.UpdateFlags) error { - return fmt.Errorf("%s does not yet support Docker Swarm", ProductName()) +func (s *SwarmBackend) Update(uint64, types.Spec, types.UpdateFlags) error { + return errors.SwarmNotSupportedError() } -func (s *Swarm) GetUnlockKey() (string, error) { - return "", fmt.Errorf("%s does not yet support Docker Swarm", ProductName()) +func (s *SwarmBackend) GetUnlockKey() (string, error) { + return "", errors.SwarmNotSupportedError() } -func (s *Swarm) UnlockSwarm(req types.UnlockRequest) error { - return fmt.Errorf("%s does not yet support Docker Swarm", ProductName()) +func (s *SwarmBackend) UnlockSwarm(req types.UnlockRequest) error { + return errors.SwarmNotSupportedError() } -func (s *Swarm) GetServices(basictypes.ServiceListOptions) ([]types.Service, error) { - return nil, SwarmNotSupportedError() +func (s *SwarmBackend) GetServices(basictypes.ServiceListOptions) ([]types.Service, error) { + return nil, errors.SwarmNotSupportedError() } -func (s *Swarm) GetService(string) (types.Service, error) { - return types.Service{}, SwarmNotSupportedError() +func (s *SwarmBackend) GetService(string) (types.Service, error) { + return types.Service{}, errors.SwarmNotSupportedError() } -func (s *Swarm) CreateService(types.ServiceSpec, string) (*basictypes.ServiceCreateResponse, error) { - return nil, fmt.Errorf("%s does not yet support Docker Swarm", ProductName()) +func (s *SwarmBackend) CreateService(types.ServiceSpec, string) (*basictypes.ServiceCreateResponse, error) { + return nil, errors.SwarmNotSupportedError() } -func (s *Swarm) UpdateService(string, uint64, types.ServiceSpec, string, string) (*basictypes.ServiceUpdateResponse, error) { - return nil, fmt.Errorf("%s does not yet support Docker Swarm", ProductName()) +func (s *SwarmBackend) UpdateService(string, uint64, types.ServiceSpec, string, string) (*basictypes.ServiceUpdateResponse, error) { + return nil, errors.SwarmNotSupportedError() } -func (s *Swarm) RemoveService(string) error { - return fmt.Errorf("%s does not yet support Docker Swarm", ProductName()) +func (s *SwarmBackend) RemoveService(string) error { + return errors.SwarmNotSupportedError() } -func (s *Swarm) ServiceLogs(context.Context, string, *backend.ContainerLogsConfig, chan struct{}) error { - return fmt.Errorf("%s does not yet support Docker Swarm", ProductName()) +func (s *SwarmBackend) ServiceLogs(context.Context, string, *backend.ContainerLogsConfig, chan struct{}) error { + return errors.SwarmNotSupportedError() } -func (s *Swarm) GetNodes(basictypes.NodeListOptions) ([]types.Node, error) { - return nil, SwarmNotSupportedError() +func (s *SwarmBackend) GetNodes(basictypes.NodeListOptions) ([]types.Node, error) { + return nil, errors.SwarmNotSupportedError() } -func (s *Swarm) GetNode(string) (types.Node, error) { - return types.Node{}, SwarmNotSupportedError() +func (s *SwarmBackend) GetNode(string) (types.Node, error) { + return types.Node{}, errors.SwarmNotSupportedError() } -func (s *Swarm) UpdateNode(string, uint64, types.NodeSpec) error { - return fmt.Errorf("%s does not yet support Docker Swarm", ProductName()) +func (s *SwarmBackend) UpdateNode(string, uint64, types.NodeSpec) error { + return errors.SwarmNotSupportedError() } -func (s *Swarm) RemoveNode(string, bool) error { - return fmt.Errorf("%s does not yet support Docker Swarm", ProductName()) +func (s *SwarmBackend) RemoveNode(string, bool) error { + return errors.SwarmNotSupportedError() } -func (s *Swarm) GetTasks(basictypes.TaskListOptions) ([]types.Task, error) { - return nil, SwarmNotSupportedError() +func (s *SwarmBackend) GetTasks(basictypes.TaskListOptions) ([]types.Task, error) { + return nil, errors.SwarmNotSupportedError() } -func (s *Swarm) GetTask(string) (types.Task, error) { - return types.Task{}, SwarmNotSupportedError() +func (s *SwarmBackend) GetTask(string) (types.Task, error) { + return types.Task{}, errors.SwarmNotSupportedError() } -func (s *Swarm) GetSecrets(opts basictypes.SecretListOptions) ([]types.Secret, error) { - return nil, fmt.Errorf("%s does not yet support Docker Swarm", ProductName()) +func (s *SwarmBackend) GetSecrets(opts basictypes.SecretListOptions) ([]types.Secret, error) { + return nil, errors.SwarmNotSupportedError() } -func (s *Swarm) CreateSecret(sp types.SecretSpec) (string, error) { - return "", fmt.Errorf("%s does not yet support Docker Swarm", ProductName()) +func (s *SwarmBackend) CreateSecret(sp types.SecretSpec) (string, error) { + return "", errors.SwarmNotSupportedError() } -func (s *Swarm) RemoveSecret(id string) error { - return fmt.Errorf("%s does not yet support Docker Swarm", ProductName()) +func (s *SwarmBackend) RemoveSecret(id string) error { + return errors.SwarmNotSupportedError() } -func (s *Swarm) GetSecret(id string) (types.Secret, error) { - return types.Secret{}, fmt.Errorf("%s does not yet support Docker Swarm", ProductName()) +func (s *SwarmBackend) GetSecret(id string) (types.Secret, error) { + return types.Secret{}, errors.SwarmNotSupportedError() } -func (s *Swarm) UpdateSecret(id string, version uint64, spec types.SecretSpec) error { - return fmt.Errorf("%s does not yet support Docker Swarm", ProductName()) +func (s *SwarmBackend) UpdateSecret(id string, version uint64, spec types.SecretSpec) error { + return errors.SwarmNotSupportedError() } diff --git a/lib/apiservers/engine/backends/system.go b/lib/apiservers/engine/backends/system.go index 75f8fbf862..b2e639b846 100644 --- a/lib/apiservers/engine/backends/system.go +++ b/lib/apiservers/engine/backends/system.go @@ -18,12 +18,12 @@ package backends // system.go // // Rules for code to be in here: -// 1. No remote or swagger calls. Move those code to system_portlayer.go +// 1. No remote or swagger calls. Move those code to ../proxy/system_proxy.go // 2. Always return docker engine-api compatible errors. // - Do NOT return fmt.Errorf() // - Do NOT return errors.New() // - DO USE the aliased docker error package 'derr' -// - It is OK to return errors returned from functions in system_portlayer.go +// - It is OK to return errors returned from functions in system_proxy.go import ( "crypto/x509" @@ -31,6 +31,7 @@ import ( "net/url" "runtime" "strings" + "sync" "time" "golang.org/x/net/context" @@ -38,6 +39,8 @@ import ( log "github.com/Sirupsen/logrus" "github.com/vmware/vic/lib/apiservers/engine/backends/cache" + "github.com/vmware/vic/lib/apiservers/engine/errors" + "github.com/vmware/vic/lib/apiservers/engine/proxy" "github.com/vmware/vic/lib/apiservers/portlayer/client" "github.com/vmware/vic/lib/apiservers/portlayer/client/storage" "github.com/vmware/vic/lib/imagec" @@ -54,8 +57,8 @@ import ( "github.com/docker/go-units" ) -type System struct { - systemProxy VicSystemProxy +type SystemBackend struct { + systemProxy proxy.VicSystemProxy } const ( @@ -75,21 +78,24 @@ const ( ) // var for use by other engine components -var systemBackend *System +var systemBackend *SystemBackend +var sysOnce sync.Once -func NewSystemBackend() *System { - systemBackend = &System{ - systemProxy: &SystemProxy{}, - } +func NewSystemBackend() *SystemBackend { + sysOnce.Do(func() { + systemBackend = &SystemBackend{ + systemProxy: proxy.NewSystemProxy(PortLayerClient()), + } + }) return systemBackend } -func (s *System) SystemInfo() (*types.Info, error) { +func (s *SystemBackend) SystemInfo() (*types.Info, error) { defer trace.End(trace.Begin("SystemInfo")) client := PortLayerClient() // Retrieve container status from port layer - running, paused, stopped, err := s.systemProxy.ContainerCount() + running, paused, stopped, err := s.systemProxy.ContainerCount(context.Background()) if err != nil { log.Infof("System.SytemInfo unable to get global status on containers: %s", err.Error()) } @@ -152,7 +158,7 @@ func (s *System) SystemInfo() (*types.Info, error) { NoProxy: "", } - // Add in network info from the VCH via guestinfo + // Add in vicnetwork info from the VCH via guestinfo for _, network := range cfg.ContainerNetworks { info.Plugins.Network = append(info.Plugins.Network, network.Name) } @@ -172,7 +178,7 @@ func (s *System) SystemInfo() (*types.Info, error) { // driver supplied by the Docker client and is equivalent to "vsphere" in // our implementation. if len(volumeStoreString) > 0 { - for driver := range supportedVolDrivers { + for driver := range proxy.SupportedVolDrivers { if driver != "local" { info.Plugins.Volume = append(info.Plugins.Volume, driver) } @@ -180,7 +186,7 @@ func (s *System) SystemInfo() (*types.Info, error) { } } - if s.systemProxy.PingPortlayer() { + if s.systemProxy.PingPortlayer(context.Background()) { status := [2]string{PortLayerName(), "RUNNING"} info.SystemStatus = append(info.SystemStatus, status) } else { @@ -189,7 +195,7 @@ func (s *System) SystemInfo() (*types.Info, error) { } // Add in vch information - vchInfo, err := s.systemProxy.VCHInfo() + vchInfo, err := s.systemProxy.VCHInfo(context.Background()) if err != nil || vchInfo == nil { log.Infof("System.SystemInfo unable to get vch info from port layer: %s", err.Error()) } else { @@ -253,7 +259,7 @@ func (s *System) SystemInfo() (*types.Info, error) { // layout for build time as per constants defined in https://golang.org/src/time/format.go const buildTimeLayout = "2006/01/02@15:04:05" -func (s *System) SystemVersion() types.Version { +func (s *SystemBackend) SystemVersion() types.Version { Arch := runtime.GOARCH BuildTime := version.BuildDate @@ -291,36 +297,38 @@ func (s *System) SystemVersion() types.Version { Version: Version, } + log.Infof("***** version = %#v", version) + return version } // SystemCPUMhzLimit will return the VCH configured Mhz limit -func (s *System) SystemCPUMhzLimit() (int64, error) { - vchInfo, err := s.systemProxy.VCHInfo() +func (s *SystemBackend) SystemCPUMhzLimit() (int64, error) { + vchInfo, err := s.systemProxy.VCHInfo(context.Background()) if err != nil || vchInfo == nil { return 0, err } return vchInfo.CPUMhz, nil } -func (s *System) SystemDiskUsage() (*types.DiskUsage, error) { - return nil, fmt.Errorf("%s does not yet implement SystemDiskUsage", ProductName()) +func (s *SystemBackend) SystemDiskUsage() (*types.DiskUsage, error) { + return nil, errors.APINotSupportedMsg(ProductName(), "SystemDiskUsage") } -func (s *System) SubscribeToEvents(since, until time.Time, filter filters.Args) ([]eventtypes.Message, chan interface{}) { +func (s *SystemBackend) SubscribeToEvents(since, until time.Time, filter filters.Args) ([]eventtypes.Message, chan interface{}) { defer trace.End(trace.Begin("")) ef := events.NewFilter(filter) return EventService().SubscribeTopic(since, until, ef) } -func (s *System) UnsubscribeFromEvents(listener chan interface{}) { +func (s *SystemBackend) UnsubscribeFromEvents(listener chan interface{}) { defer trace.End(trace.Begin("")) EventService().Evict(listener) } // AuthenticateToRegistry handles docker logins -func (s *System) AuthenticateToRegistry(ctx context.Context, authConfig *types.AuthConfig) (string, string, error) { +func (s *SystemBackend) AuthenticateToRegistry(ctx context.Context, authConfig *types.AuthConfig) (string, string, error) { defer trace.End(trace.Begin("")) // Only look at V2 registries diff --git a/lib/apiservers/engine/backends/volume.go b/lib/apiservers/engine/backends/volume.go index 6a4668cdd8..40cc8b7709 100644 --- a/lib/apiservers/engine/backends/volume.go +++ b/lib/apiservers/engine/backends/volume.go @@ -15,64 +15,33 @@ package backends import ( + "context" "encoding/json" "fmt" - "regexp" - "strconv" - "strings" + //"regexp" + //"strconv" + //"strings" + "sync" log "github.com/Sirupsen/logrus" - derr "github.com/docker/docker/api/errors" + //derr "github.com/docker/docker/api/errors" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" - "github.com/docker/go-units" - "github.com/google/uuid" + //"github.com/docker/go-units" + //"github.com/google/uuid" vicfilter "github.com/vmware/vic/lib/apiservers/engine/backends/filter" + "github.com/vmware/vic/lib/apiservers/engine/errors" + "github.com/vmware/vic/lib/apiservers/engine/proxy" "github.com/vmware/vic/lib/apiservers/portlayer/client/containers" - "github.com/vmware/vic/lib/apiservers/portlayer/client/storage" + //"github.com/vmware/vic/lib/apiservers/portlayer/client/storage" "github.com/vmware/vic/lib/apiservers/portlayer/models" "github.com/vmware/vic/pkg/trace" ) -// NOTE: FIXME: These might be moved to a utility package once there are multiple personalities -const ( - OptsVolumeStoreKey string = "volumestore" - OptsCapacityKey string = "capacity" - dockerMetadataModelKey string = "DockerMetaData" - DefaultVolumeDriver string = "vsphere" -) - -// define a set (whitelist) of valid driver opts keys for command line argument validation -var validDriverOptsKeys = map[string]struct{}{ - OptsVolumeStoreKey: {}, - OptsCapacityKey: {}, - DriverArgFlagKey: {}, - DriverArgContainerKey: {}, - DriverArgImageKey: {}, -} - -// Volume drivers currently supported. "local" is the default driver supplied by the client -// and is equivalent to "vsphere" for our implementation. -var supportedVolDrivers = map[string]struct{}{ - "vsphere": {}, - "local": {}, -} - -//Validation pattern for Volume Names -var volumeNameRegex = regexp.MustCompile("^[a-zA-Z0-9][a-zA-Z0-9_.-]*$") - -func NewVolumeModel(volume *models.VolumeResponse, labels map[string]string) *types.Volume { - return &types.Volume{ - Driver: volume.Driver, - Name: volume.Name, - Labels: labels, - Mountpoint: volume.Label, - } -} - // Volume which defines the docker personalities view of a Volume -type Volume struct { +type VolumeBackend struct { + storageProxy proxy.VicStorageProxy } // acceptedVolumeFilters are volume filters that are supported by VIC @@ -83,44 +52,38 @@ var acceptedVolumeFilters = map[string]bool{ "label": true, } -var errPortlayerClient = fmt.Errorf("failed to get a portlayer client") +var volumeBackend *VolumeBackend +var volOnce sync.Once -func NewVolumeBackend() *Volume { - return &Volume{} +func NewVolumeBackend() *VolumeBackend { + volOnce.Do(func() { + volumeBackend = &VolumeBackend{ + storageProxy: proxy.NewStorageProxy(PortLayerClient()), + } + }) + return volumeBackend } // Volumes docker personality implementation for VIC -func (v *Volume) Volumes(filter string) ([]*types.Volume, []string, error) { - defer trace.End(trace.Begin("Volume.Volumes")) - var volumes []*types.Volume +func (v *VolumeBackend) Volumes(filter string) ([]*types.Volume, []string, error) { + defer trace.End(trace.Begin(filter)) - client := PortLayerClient() - if client == nil { - return nil, nil, VolumeInternalServerError(errPortlayerClient) - } + var volumes []*types.Volume - res, err := client.Storage.ListVolumes(storage.NewListVolumesParamsWithContext(ctx).WithFilterString(&filter)) + // Get volume list from the portlayer + volumeResponses, err := v.storageProxy.VolumeList(context.Background(), filter) if err != nil { - switch err := err.(type) { - case *storage.ListVolumesInternalServerError: - return nil, nil, VolumeInternalServerError(fmt.Errorf("error from portlayer server: %s", err.Payload.Message)) - case *storage.ListVolumesDefault: - return nil, nil, VolumeInternalServerError(fmt.Errorf("error from portlayer server: %s", err.Payload.Message)) - default: - return nil, nil, VolumeInternalServerError(fmt.Errorf("error from portlayer server: %s", err.Error())) - } + return nil, nil, err } - volumeResponses := res.Payload - // Parse and validate filters volumeFilters, err := filters.FromParam(filter) if err != nil { - return nil, nil, VolumeInternalServerError(err) + return nil, nil, errors.VolumeInternalServerError(err) } volFilterContext, err := vicfilter.ValidateVolumeFilters(volumeFilters, acceptedVolumeFilters, nil) if err != nil { - return nil, nil, VolumeInternalServerError(err) + return nil, nil, errors.VolumeInternalServerError(err) } // joinedVolumes stores names of volumes that are joined to a container @@ -131,7 +94,7 @@ func (v *Volume) Volumes(filter string) ([]*types.Volume, []string, error) { // If the dangling filter is specified, gather required items beforehand joinedVolumes, err = fetchJoinedVolumes() if err != nil { - return nil, nil, VolumeInternalServerError(err) + return nil, nil, errors.VolumeInternalServerError(err) } } @@ -141,7 +104,7 @@ func (v *Volume) Volumes(filter string) ([]*types.Volume, []string, error) { volumeMetadata, err := extractDockerMetadata(vol.Metadata) if err != nil { - return nil, nil, VolumeInternalServerError(fmt.Errorf("error unmarshalling docker metadata: %s", err)) + return nil, nil, errors.VolumeInternalServerError(fmt.Errorf("error unmarshalling docker metadata: %s", err)) } // Set fields needed for filtering the output @@ -161,292 +124,109 @@ func (v *Volume) Volumes(filter string) ([]*types.Volume, []string, error) { return volumes, nil, nil } -// fetchJoinedVolumes obtains all containers from the portlayer and returns a map with all -// volumes that are joined to at least one container. -func fetchJoinedVolumes() (map[string]struct{}, error) { - conts, err := allContainers() - if err != nil { - return nil, VolumeInternalServerError(err) - } - - joinedVolumes := make(map[string]struct{}) - var s struct{} - for i := range conts { - for _, vol := range conts[i].VolumeConfig { - joinedVolumes[vol.Name] = s - } - } - - return joinedVolumes, nil -} - -// allContainers obtains all containers from the portlayer, akin to `docker ps -a`. -func allContainers() ([]*models.ContainerInfo, error) { - client := PortLayerClient() - if client == nil { - return nil, errPortlayerClient - } - - all := true - cons, err := client.Containers.GetContainerList(containers.NewGetContainerListParamsWithContext(ctx).WithAll(&all)) - if err != nil { - return nil, err - } - - return cons.Payload, nil -} - // VolumeInspect : docker personality implementation for VIC -func (v *Volume) VolumeInspect(name string) (*types.Volume, error) { +func (v *VolumeBackend) VolumeInspect(name string) (*types.Volume, error) { defer trace.End(trace.Begin(name)) - client := PortLayerClient() - if client == nil { - return nil, VolumeInternalServerError(errPortlayerClient) - } - - if name == "" { - return nil, nil - } - - param := storage.NewGetVolumeParamsWithContext(ctx).WithName(name) - res, err := client.Storage.GetVolume(param) + volInfo, err := v.storageProxy.VolumeInfo(context.Background(), name) if err != nil { - switch err := err.(type) { - case *storage.GetVolumeNotFound: - return nil, VolumeNotFoundError(name) - default: - return nil, VolumeInternalServerError(fmt.Errorf("error from portlayer server: %s", err.Error())) - } + return nil, err } - volumeMetadata, err := extractDockerMetadata(res.Payload.Metadata) + volumeMetadata, err := extractDockerMetadata(volInfo.Metadata) if err != nil { - return nil, VolumeInternalServerError(fmt.Errorf("error unmarshalling docker metadata: %s", err)) + return nil, errors.VolumeInternalServerError(fmt.Errorf("error unmarshalling docker metadata: %s", err)) } - volume := NewVolumeModel(res.Payload, volumeMetadata.Labels) + volume := NewVolumeModel(volInfo, volumeMetadata.Labels) return volume, nil } -// volumeCreate issues a CreateVolume request to the portlayer -func (v *Volume) volumeCreate(name, driverName string, volumeData, labels map[string]string) (*types.Volume, error) { - defer trace.End(trace.Begin("")) - result := &types.Volume{} - - client := PortLayerClient() - if client == nil { - return nil, errPortlayerClient - } - - if name == "" { - name = uuid.New().String() - } - - // TODO: support having another driver besides vsphere. - // assign the values of the model to be passed to the portlayer handler - req, varErr := newVolumeCreateReq(name, driverName, volumeData, labels) - if varErr != nil { - return result, varErr - } - log.Infof("Finalized model for volume create request to portlayer: %#v", req) - - res, err := client.Storage.CreateVolume(storage.NewCreateVolumeParamsWithContext(ctx).WithVolumeRequest(req)) - if err != nil { - return result, err - } - result = NewVolumeModel(res.Payload, labels) - return result, nil -} - // VolumeCreate : docker personality implementation for VIC -func (v *Volume) VolumeCreate(name, driverName string, volumeData, labels map[string]string) (*types.Volume, error) { - defer trace.End(trace.Begin("Volume.VolumeCreate")) +func (v *VolumeBackend) VolumeCreate(name, driverName string, volumeData, labels map[string]string) (*types.Volume, error) { + defer trace.End(trace.Begin(name)) - result, err := v.volumeCreate(name, driverName, volumeData, labels) + result, err := v.storageProxy.Create(context.Background(), name, driverName, volumeData, labels) if err != nil { - switch err := err.(type) { - case *storage.CreateVolumeConflict: - return result, VolumeInternalServerError(fmt.Errorf("A volume named %s already exists. Choose a different volume name.", name)) - - case *storage.CreateVolumeNotFound: - return result, VolumeInternalServerError(fmt.Errorf("No volume store named (%s) exists", volumeStore(volumeData))) - - case *storage.CreateVolumeInternalServerError: - // FIXME: right now this does not return an error model... - return result, VolumeInternalServerError(fmt.Errorf("%s", err.Error())) - - case *storage.CreateVolumeDefault: - return result, VolumeInternalServerError(fmt.Errorf("%s", err.Payload.Message)) - - default: - return result, VolumeInternalServerError(fmt.Errorf("%s", err)) - } + return nil, err } return result, nil } // VolumeRm : docker personality for VIC -func (v *Volume) VolumeRm(name string, force bool) error { +func (v *VolumeBackend) VolumeRm(name string, force bool) error { defer trace.End(trace.Begin(name)) - client := PortLayerClient() - if client == nil { - return VolumeInternalServerError(errPortlayerClient) - } - - _, err := client.Storage.RemoveVolume(storage.NewRemoveVolumeParamsWithContext(ctx).WithName(name)) + err := v.storageProxy.Remove(context.Background(), name) if err != nil { - - switch err := err.(type) { - case *storage.RemoveVolumeNotFound: - return derr.NewRequestNotFoundError(fmt.Errorf("Get %s: no such volume", name)) - - case *storage.RemoveVolumeConflict: - return derr.NewRequestConflictError(fmt.Errorf(err.Payload.Message)) - - case *storage.RemoveVolumeInternalServerError: - return VolumeInternalServerError(fmt.Errorf("Server error from portlayer: %s", err.Payload.Message)) - default: - return VolumeInternalServerError(fmt.Errorf("Server error from portlayer: %s", err)) - } + return err } + return nil } -func (v *Volume) VolumesPrune(pruneFilters filters.Args) (*types.VolumesPruneReport, error) { - return nil, fmt.Errorf("%s does not yet implement VolumesPrune", ProductName()) +func (v *VolumeBackend) VolumesPrune(pruneFilters filters.Args) (*types.VolumesPruneReport, error) { + return nil, errors.APINotSupportedMsg(ProductName(), "VolumesPrune") } -type volumeMetadata struct { - Driver string - DriverOpts map[string]string - Name string - Labels map[string]string - AttachHistory []string - Image string -} +//------------------------------------ +// Utility Functions +//------------------------------------ -func createVolumeMetadata(req *models.VolumeRequest, driverargs, labels map[string]string) (string, error) { - metadata := volumeMetadata{ - Driver: req.Driver, - DriverOpts: req.DriverArgs, - Name: req.Name, - Labels: labels, - AttachHistory: []string{driverargs[DriverArgContainerKey]}, - Image: driverargs[DriverArgImageKey], +func NewVolumeModel(volume *models.VolumeResponse, labels map[string]string) *types.Volume { + return &types.Volume{ + Driver: volume.Driver, + Name: volume.Name, + Labels: labels, + Mountpoint: volume.Label, } - result, err := json.Marshal(metadata) - return string(result), err } -// Unmarshal the docker metadata using the docker metadata key. The docker -// metadatakey. We stash the vals we know about in that map with that key. -func extractDockerMetadata(metadataMap map[string]string) (*volumeMetadata, error) { - v, ok := metadataMap[dockerMetadataModelKey] - if !ok { - return nil, fmt.Errorf("metadata %s missing", dockerMetadataModelKey) +// fetchJoinedVolumes obtains all containers from the portlayer and returns a map with all +// volumes that are joined to at least one container. +func fetchJoinedVolumes() (map[string]struct{}, error) { + conts, err := allContainers() + if err != nil { + return nil, errors.VolumeInternalServerError(err) } - result := &volumeMetadata{} - err := json.Unmarshal([]byte(v), result) - return result, err -} - -// Utility Functions - -func newVolumeCreateReq(name, driverName string, volumeData, labels map[string]string) (*models.VolumeRequest, error) { - if _, ok := supportedVolDrivers[driverName]; !ok { - return nil, fmt.Errorf("error looking up volume plugin %s: plugin not found", driverName) + joinedVolumes := make(map[string]struct{}) + var s struct{} + for i := range conts { + for _, vol := range conts[i].VolumeConfig { + joinedVolumes[vol.Name] = s + } } - if !volumeNameRegex.Match([]byte(name)) && name != "" { - return nil, fmt.Errorf("volume name %q includes invalid characters, only \"[a-zA-Z0-9][a-zA-Z0-9_.-]\" are allowed", name) - } + return joinedVolumes, nil +} - req := &models.VolumeRequest{ - Driver: driverName, - DriverArgs: volumeData, - Name: name, - Metadata: make(map[string]string), +// allContainers obtains all containers from the portlayer, akin to `docker ps -a`. +func allContainers() ([]*models.ContainerInfo, error) { + client := PortLayerClient() + if client == nil { + return nil, errors.NillPortlayerClientError("Volume Backend") } - metadata, err := createVolumeMetadata(req, volumeData, labels) + all := true + cons, err := client.Containers.GetContainerList(containers.NewGetContainerListParamsWithContext(ctx).WithAll(&all)) if err != nil { return nil, err } - req.Metadata[dockerMetadataModelKey] = metadata - - if err := validateDriverArgs(volumeData, req); err != nil { - return nil, fmt.Errorf("bad driver value - %s", err) - } - - return req, nil -} - -// volumeStore returns the value of the optional volume store param specified in the CLI. -func volumeStore(args map[string]string) string { - storeName, ok := args[OptsVolumeStoreKey] - if !ok { - return "default" - } - return storeName -} - -func normalizeDriverArgs(args map[string]string) error { - // normalize keys to lowercase & validate them - for k, val := range args { - lowercase := strings.ToLower(k) - - if _, ok := validDriverOptsKeys[lowercase]; !ok { - return fmt.Errorf("%s is not a supported option", k) - } - - if strings.Compare(lowercase, k) != 0 { - delete(args, k) - args[lowercase] = val - } - } - return nil + return cons.Payload, nil } -func validateDriverArgs(args map[string]string, req *models.VolumeRequest) error { - if err := normalizeDriverArgs(args); err != nil { - return err - } - - // volumestore name validation - req.Store = volumeStore(args) - - // capacity validation - capstr, ok := args[OptsCapacityKey] +// Unmarshal the docker metadata using the docker metadata key. The docker +// metadatakey. We stash the vals we know about in that map with that key. +func extractDockerMetadata(metadataMap map[string]string) (*proxy.VolumeMetadata, error) { + v, ok := metadataMap[proxy.DockerMetadataModelKey] if !ok { - req.Capacity = -1 - return nil - } - - //check if it is just a numerical value - capacity, err := strconv.ParseInt(capstr, 10, 64) - if err == nil { - //input has no units in this case. - if capacity < 1 { - return fmt.Errorf("Invalid size: %s", capstr) - } - req.Capacity = capacity - return nil - } - - capacity, err = units.FromHumanSize(capstr) - if err != nil { - return err + return nil, fmt.Errorf("metadata %s missing", proxy.DockerMetadataModelKey) } - if capacity < 1 { - return fmt.Errorf("Capacity value too large: %s", capstr) - } - - req.Capacity = int64(capacity) / int64(units.MB) - return nil + result := &proxy.VolumeMetadata{} + err := json.Unmarshal([]byte(v), result) + return result, err } diff --git a/lib/apiservers/engine/backends/volume_test.go b/lib/apiservers/engine/backends/volume_test.go index e98499c3cc..a28e47ed6c 100644 --- a/lib/apiservers/engine/backends/volume_test.go +++ b/lib/apiservers/engine/backends/volume_test.go @@ -20,124 +20,9 @@ import ( "github.com/stretchr/testify/assert" - "github.com/vmware/vic/lib/apiservers/portlayer/models" + "github.com/vmware/vic/lib/apiservers/engine/proxy" ) -func TestFillDockerVolume(t *testing.T) { - testResponse := &models.VolumeResponse{ - Driver: "vsphere", - Name: "Test Volume", - Label: "Test Label", - } - testLabels := make(map[string]string) - testLabels["TestMeta"] = "custom info about my volume" - - dockerVolume := NewVolumeModel(testResponse, testLabels) - - assert.Equal(t, "vsphere", dockerVolume.Driver) - assert.Equal(t, "Test Volume", dockerVolume.Name) - assert.Equal(t, "Test Label", dockerVolume.Mountpoint) - assert.Equal(t, "custom info about my volume", dockerVolume.Labels["TestMeta"]) -} - -func TestTranslatVolumeRequestModel(t *testing.T) { - testLabels := make(map[string]string) - testLabels["TestMeta"] = "custom info about my volume" - - testDriverArgs := make(map[string]string) - testDriverArgs["testarg"] = "important driver stuff" - testDriverArgs[OptsVolumeStoreKey] = "testStore" - testDriverArgs[OptsCapacityKey] = "12MB" - - testRequest, err := newVolumeCreateReq("testName", "vsphere", testDriverArgs, testLabels) - if !assert.Error(t, err) { - return - } - - delete(testDriverArgs, "testarg") - testRequest, err = newVolumeCreateReq("testName", "vsphere", testDriverArgs, testLabels) - if !assert.NoError(t, err) { - return - } - - assert.Equal(t, "testName", testRequest.Name) - assert.Equal(t, "", testRequest.DriverArgs["testarg"]) // unsupported keys should just be empty - assert.Equal(t, "testStore", testRequest.Store) - assert.Equal(t, "vsphere", testRequest.Driver) - assert.Equal(t, int64(12), testRequest.Capacity) - - testMetaDatabuf, err := createVolumeMetadata(testRequest, testDriverArgs, testLabels) - if !assert.NoError(t, err) { - return - } - - assert.Equal(t, testMetaDatabuf, testRequest.Metadata[dockerMetadataModelKey]) - assert.Nil(t, err) -} - -func TestCreateVolumeMetada(t *testing.T) { - testDriverOpts := make(map[string]string) - testDriverOpts["TestArg"] = "test" - testModel := models.VolumeRequest{ - Driver: "vsphere", - DriverArgs: testDriverOpts, - Name: "testModel", - } - testLabels := make(map[string]string) - testLabels["TestMeta"] = "custom info about my volume" - - testMetadataString, err := createVolumeMetadata(&testModel, testDriverOpts, testLabels) - if !assert.NoError(t, err) { - return - } - - volumeMetadata := volumeMetadata{} - json.Unmarshal([]byte(testMetadataString), &volumeMetadata) - - assert.Equal(t, testModel.Driver, volumeMetadata.Driver) - assert.Equal(t, testModel.Name, volumeMetadata.Name) - assert.Equal(t, testLabels["TestMeta"], volumeMetadata.Labels["TestMeta"]) - assert.Equal(t, testLabels["TestArg"], volumeMetadata.DriverOpts["testArg"]) -} - -func TestValidateDriverArgs(t *testing.T) { - testMap := make(map[string]string) - testStore := "Mystore" - testCap := "12MB" - testBadCap := "This is not valid!" - testModel := models.VolumeRequest{ - Driver: "vsphere", - DriverArgs: testMap, - Name: "testModel", - } - - err := validateDriverArgs(testMap, &testModel) - if !assert.Equal(t, "default", testModel.Store) || !assert.Equal(t, int64(-1), testModel.Capacity) || !assert.NoError(t, err) { - return - } - - testMap[OptsVolumeStoreKey] = testStore - testMap[OptsCapacityKey] = testCap - err = validateDriverArgs(testMap, &testModel) - if !assert.Equal(t, testStore, testModel.Store) || !assert.Equal(t, int64(12), testModel.Capacity) || !assert.NoError(t, err) { - return - } - - //This is a negative test case. We want an error - testMap[OptsCapacityKey] = testBadCap - err = validateDriverArgs(testMap, &testModel) - if !assert.Equal(t, testStore, testModel.Store) || !assert.Equal(t, int64(12), testModel.Capacity) || !assert.Error(t, err) { - return - } - - testMap[OptsCapacityKey] = testCap - delete(testMap, OptsVolumeStoreKey) - err = validateDriverArgs(testMap, &testModel) - if !assert.Equal(t, "default", testModel.Store) || !assert.Equal(t, int64(12), testModel.Capacity) || !assert.NoError(t, err) { - return - } -} - func TestExtractDockerMetadata(t *testing.T) { driver := "vsphere" volumeName := "testVolume" @@ -145,13 +30,13 @@ func TestExtractDockerMetadata(t *testing.T) { testCap := "512" testOptMap := make(map[string]string) - testOptMap[OptsVolumeStoreKey] = store - testOptMap[OptsCapacityKey] = testCap + testOptMap[proxy.OptsVolumeStoreKey] = store + testOptMap[proxy.OptsCapacityKey] = testCap testLabelMap := make(map[string]string) testLabelMap["someLabel"] = "this is a label" - metaDataBefore := volumeMetadata{ + metaDataBefore := proxy.VolumeMetadata{ Driver: driver, Name: volumeName, DriverOpts: testOptMap, @@ -164,31 +49,15 @@ func TestExtractDockerMetadata(t *testing.T) { } metadataMap := make(map[string]string) - metadataMap[dockerMetadataModelKey] = string(buf) + metadataMap[proxy.DockerMetadataModelKey] = string(buf) metadataAfter, err := extractDockerMetadata(metadataMap) if !assert.NoError(t, err) { return } - assert.Equal(t, metaDataBefore.DriverOpts[OptsCapacityKey], metadataAfter.DriverOpts[OptsCapacityKey]) - assert.Equal(t, metaDataBefore.DriverOpts[OptsVolumeStoreKey], metadataAfter.DriverOpts[OptsVolumeStoreKey]) + assert.Equal(t, metaDataBefore.DriverOpts[proxy.OptsCapacityKey], metadataAfter.DriverOpts[proxy.OptsCapacityKey]) + assert.Equal(t, metaDataBefore.DriverOpts[proxy.OptsVolumeStoreKey], metadataAfter.DriverOpts[proxy.OptsVolumeStoreKey]) assert.Equal(t, metaDataBefore.Labels["someLabel"], metadataAfter.Labels["someLabel"]) assert.Equal(t, metaDataBefore.Name, metadataAfter.Name) assert.Equal(t, metaDataBefore.Driver, metadataAfter.Driver) } - -func TestNormalizeDriverArgs(t *testing.T) { - testOptMap := make(map[string]string) - testOptMap["VOLUMESTORE"] = "foo" - testOptMap["CAPACITY"] = "bar" - - normalizeDriverArgs(testOptMap) - - assert.Equal(t, testOptMap["volumestore"], "foo") - assert.Equal(t, testOptMap["capacity"], "bar") - - testOptMap["bogus"] = "bogus" - - err := normalizeDriverArgs(testOptMap) - assert.Error(t, err, "expected: bogus is not a supported option") -} diff --git a/lib/apiservers/engine/constants/constants.go b/lib/apiservers/engine/constants/constants.go new file mode 100644 index 0000000000..927866648e --- /dev/null +++ b/lib/apiservers/engine/constants/constants.go @@ -0,0 +1,19 @@ +// Copyright 2016-2018 VMware, Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package constants + +const ( + DefaultVolumeDriver = "vsphere" +) diff --git a/lib/apiservers/engine/backends/errors.go b/lib/apiservers/engine/errors/errors.go similarity index 83% rename from lib/apiservers/engine/backends/errors.go rename to lib/apiservers/engine/errors/errors.go index a457be7720..ed0dedac33 100644 --- a/lib/apiservers/engine/backends/errors.go +++ b/lib/apiservers/engine/errors/errors.go @@ -1,4 +1,4 @@ -// Copyright 2016-2017 VMware, Inc. All Rights Reserved. +// Copyright 2016-2018 VMware, Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package backends +package errors import ( "fmt" @@ -31,16 +31,24 @@ type InvalidVolumeError struct { } func (e InvalidVolumeError) Error() string { - return fmt.Sprintf("%s does not support mounting directories as a data volume.", ProductName()) + return fmt.Sprintf("mounting directories as a data volume is not supported.") } // InvalidBindError is returned when create/run -v has more params than allowed. type InvalidBindError struct { - volume string + Volume string } func (e InvalidBindError) Error() string { - return fmt.Sprintf("volume bind input is invalid: -v %s", e.volume) + return fmt.Sprintf("volume bind input is invalid: -v %s", e.Volume) +} + +func APINotSupportedMsg(product, method string) error { + return fmt.Errorf("%s does not yet implement %s", product, method) +} + +func NillPortlayerClientError(caller string) error { + return derr.NewErrorWithStatusCode(fmt.Errorf("%s failed to get a portlayer client", caller), http.StatusInternalServerError) } // VolumeJoinNotFoundError returns a 404 docker error for a volume join request. @@ -63,10 +71,14 @@ func VolumeInternalServerError(err error) error { return derr.NewErrorWithStatusCode(err, http.StatusInternalServerError) } -func ResourceNotFoundError(cid, res string) error { +func ContainerResourceNotFoundError(cid, res string) error { return derr.NewRequestNotFoundError(fmt.Errorf("No such %s for container: %s", res, cid)) } +func ResourceNotFoundError(res string) error { + return derr.NewRequestNotFoundError(fmt.Errorf("No such %s", res)) +} + // NotFoundError returns a 404 docker error when a container is not found. func NotFoundError(msg string) error { return derr.NewRequestNotFoundError(fmt.Errorf("No such container: %s", msg)) @@ -104,7 +116,7 @@ func PluginNotFoundError(name string) error { } func SwarmNotSupportedError() error { - return derr.NewErrorWithStatusCode(fmt.Errorf("%s does not yet support Docker Swarm", ProductName()), http.StatusNotFound) + return derr.NewErrorWithStatusCode(fmt.Errorf("Docker Swarm is not yet supported"), http.StatusNotFound) } func StreamFormatNotRecognized() error { @@ -148,3 +160,9 @@ func IsResourceInUse(err error) bool { return false } + +type DetachError struct{} + +func (DetachError) Error() string { + return "detached from container" +} diff --git a/lib/apiservers/engine/network/utils.go b/lib/apiservers/engine/network/utils.go new file mode 100644 index 0000000000..5f27d13a17 --- /dev/null +++ b/lib/apiservers/engine/network/utils.go @@ -0,0 +1,614 @@ +// Copyright 2016-2018 VMware, Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package network + +import ( + "fmt" + "net" + "os" + "strconv" + "strings" + "sync" + + log "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/go-connections/nat" + "github.com/docker/libnetwork/iptables" + "github.com/docker/libnetwork/portallocator" + "github.com/vishvananda/netlink" + + viccontainer "github.com/vmware/vic/lib/apiservers/engine/backends/container" + "github.com/vmware/vic/lib/apiservers/engine/backends/portmap" + "github.com/vmware/vic/lib/apiservers/portlayer/models" + "github.com/vmware/vic/lib/config/executor" +) + +const ( + bridgeIfaceName = "bridge" +) + +var ( + publicIfaceName = "public" + + portMapper portmap.PortMapper + + // bridge-to-bridge rules, indexed by mapped port; + // this map is used to delete the rule once + // the container stops or is removed + btbRules map[string][]string + + cbpLock sync.Mutex + ContainerByPort map[string]string // port:containerID +) + +func init() { + portMapper = portmap.NewPortMapper() + btbRules = make(map[string][]string) + ContainerByPort = make(map[string]string) + + l, err := netlink.LinkByName(publicIfaceName) + if l == nil { + l, err = netlink.LinkByAlias(publicIfaceName) + if err != nil { + log.Errorf("interface %s not found", publicIfaceName) + return + } + } + + // don't use interface alias for iptables rules + publicIfaceName = l.Attrs().Name +} + +// requestHostPort finds a free port on the host +func requestHostPort(proto string) (int, error) { + pa := portallocator.Get() + return pa.RequestPortInRange(nil, proto, 0, 0) +} + +type portMapping struct { + intHostPort int + strHostPort string + portProto nat.Port +} + +// unrollPortMap processes config for mapping/unmapping ports e.g. from hostconfig.PortBindings +func unrollPortMap(portMap nat.PortMap) ([]*portMapping, error) { + var portMaps []*portMapping + for i, pb := range portMap { + + proto, port := nat.SplitProtoPort(string(i)) + nport, err := nat.NewPort(proto, port) + if err != nil { + return nil, err + } + + // iterate over all the ports in pb []nat.PortBinding + for i := range pb { + var hostPort int + var hPort string + if pb[i].HostPort == "" { + // use a random port since no host port is specified + hostPort, err = requestHostPort(proto) + if err != nil { + log.Errorf("could not find available port on host") + return nil, err + } + log.Infof("using port %d on the host for port mapping", hostPort) + + // update the hostconfig + pb[i].HostPort = strconv.Itoa(hostPort) + + } else { + hostPort, err = strconv.Atoi(pb[i].HostPort) + if err != nil { + return nil, err + } + } + hPort = strconv.Itoa(hostPort) + portMaps = append(portMaps, &portMapping{ + intHostPort: hostPort, + strHostPort: hPort, + portProto: nport, + }) + } + } + return portMaps, nil +} + +// MapPorts maps ports defined in bridge endpoint for containerID +func MapPorts(vc *viccontainer.VicContainer, endpoint *models.EndpointConfig, containerID string) error { + if endpoint == nil { + return fmt.Errorf("invalid endpoint") + } + + var containerIP net.IP + containerIP = net.ParseIP(endpoint.Address) + if containerIP == nil { + return fmt.Errorf("invalid endpoint address %s", endpoint.Address) + } + + portMap := addIndirectEndpointsToPortMap([]*models.EndpointConfig{endpoint}, nil) + log.Debugf("Mapping ports of %q on endpoint %s: %v", containerID, endpoint.Name, portMap) + if len(portMap) == 0 { + return nil + } + + mappings, err := unrollPortMap(portMap) + if err != nil { + return err + } + + // cannot occur direct under the lock below because unmap ports take a lock. + defer func() { + if err != nil { + // if we didn't succeed then make sure we clean up + UnmapPorts(containerID, vc) + } + }() + + cbpLock.Lock() + defer cbpLock.Unlock() + vc.NATMap = portMap + + for _, p := range mappings { + // update mapped ports + if ContainerByPort[p.strHostPort] == containerID { + log.Debugf("Skipping mapping for already mapped port %s for %s", p.strHostPort, containerID) + continue + } + + if err = portMapper.MapPort(nil, p.intHostPort, p.portProto.Proto(), containerIP.String(), p.portProto.Int(), publicIfaceName, bridgeIfaceName); err != nil { + return err + } + + // bridge-to-bridge pin hole for traffic from containers for exposed port + if err = interBridgeTraffic(portmap.Map, p.strHostPort, p.portProto.Proto(), containerIP.String(), p.portProto.Port()); err != nil { + return err + } + + // update mapped ports + ContainerByPort[p.strHostPort] = containerID + log.Debugf("mapped port %s for container %s", p.strHostPort, containerID) + } + return nil +} + +// UnmapPorts unmaps ports defined in hostconfig if it's mapped for this container +func UnmapPorts(id string, vc *viccontainer.VicContainer) error { + portMap := vc.NATMap + log.Debugf("UnmapPorts for %s: %v", vc.ContainerID, portMap) + + if len(portMap) == 0 { + return nil + } + + mappings, err := unrollPortMap(vc.NATMap) + if err != nil { + return err + } + + cbpLock.Lock() + defer cbpLock.Unlock() + vc.NATMap = nil + + for _, p := range mappings { + // check if we should actually unmap based on current mappings + mappedID, mapped := ContainerByPort[p.strHostPort] + if !mapped { + log.Debugf("skipping already unmapped %s", p.strHostPort) + continue + } + if mappedID != id { + log.Debugf("port is mapped for container %s, not %s, skipping", mappedID, id) + continue + } + + if err = portMapper.UnmapPort(nil, p.intHostPort, p.portProto.Proto(), p.portProto.Int(), publicIfaceName, bridgeIfaceName); err != nil { + log.Warnf("failed to unmap port %s: %s", p.strHostPort, err) + continue + } + + // bridge-to-bridge pin hole for traffic from containers for exposed port + if err = interBridgeTraffic(portmap.Unmap, p.strHostPort, "", "", ""); err != nil { + log.Warnf("failed to undo bridge-to-bridge pinhole %s: %s", p.strHostPort, err) + continue + } + + // update mapped ports + delete(ContainerByPort, p.strHostPort) + log.Debugf("unmapped port %s", p.strHostPort) + } + return nil +} + +// interBridgeTraffic enables traffic for exposed port from one bridge network to another +func interBridgeTraffic(op portmap.Operation, hostPort, proto, containerAddr, containerPort string) error { + switch op { + case portmap.Map: + switch proto { + case "udp", "tcp": + default: + return fmt.Errorf("unknown protocol: %s", proto) + } + + // rule to allow connections from bridge interface for the + // specific mapped port. has to inserted at the top of the + // chain rather than appended to supersede bridge-to-bridge + // traffic blocking + baseArgs := []string{"-t", string(iptables.Filter), + "-i", bridgeIfaceName, + "-o", bridgeIfaceName, + "-p", proto, + "-d", containerAddr, + "--dport", containerPort, + "-j", "ACCEPT", + } + + args := append([]string{string(iptables.Insert), "VIC", "1"}, baseArgs...) + if _, err := iptables.Raw(args...); err != nil && !os.IsExist(err) { + return err + } + + btbRules[hostPort] = baseArgs + case portmap.Unmap: + if args, ok := btbRules[hostPort]; ok { + args = append([]string{string(iptables.Delete), "VIC"}, args...) + if _, err := iptables.Raw(args...); err != nil && !os.IsNotExist(err) { + return err + } + + delete(btbRules, hostPort) + } + } + + return nil +} + +func PublicIPv4Addrs() ([]string, error) { + l, err := netlink.LinkByName(publicIfaceName) + if err != nil { + return nil, fmt.Errorf("could not look up link from interface name %s: %s", publicIfaceName, err.Error()) + } + + addrs, err := netlink.AddrList(l, netlink.FAMILY_V4) + if err != nil { + return nil, fmt.Errorf("could not get addresses from public link: %s", err.Error()) + } + + ips := make([]string, len(addrs)) + for i := range addrs { + ips[i] = addrs[i].IP.String() + } + + return ips, nil +} + +// portMapFromContainer constructs a docker portmap from the container's +// info as returned by the portlayer and adds nil entries for any exposed ports +// that are unmapped +func PortMapFromContainer(vc *viccontainer.VicContainer, t *models.ContainerInfo) nat.PortMap { + var mappings nat.PortMap + + if t != nil { + mappings = addDirectEndpointsToPortMap(t.Endpoints, mappings) + } + if vc != nil && vc.Config != nil { + if vc.NATMap != nil { + // if there's a NAT map for the container then just use that for the indirect port set + mappings = mergePortMaps(vc.NATMap, mappings) + } else { + // if there's no NAT map then we use the backend data every time + mappings = addIndirectEndpointsToPortMap(t.Endpoints, mappings) + } + mappings = addExposedToPortMap(vc.Config, mappings) + } + + return mappings +} + +func ContainerWithPort(hostPort string) (string, bool) { + cbpLock.Lock() + mappedCtr, mapped := ContainerByPort[hostPort] + cbpLock.Unlock() + + return mappedCtr, mapped +} + +// mergePortMaps creates a new map containing the union of the two arguments +func mergePortMaps(map1, map2 nat.PortMap) nat.PortMap { + resultMap := make(map[nat.Port][]nat.PortBinding) + for k, v := range map1 { + resultMap[k] = v + } + + for k, v := range map2 { + vr := resultMap[k] + resultMap[k] = append(vr, v...) + } + + return resultMap +} + +// addIndirectEndpointToPortMap constructs a docker portmap from the container's info as returned by the portlayer for those ports that +// require NAT forward on the endpointVM. +// The portMap provided is modified and returned - the return value should always be used. +func addIndirectEndpointsToPortMap(endpoints []*models.EndpointConfig, portMap nat.PortMap) nat.PortMap { + if len(endpoints) == 0 { + return portMap + } + + // will contain a combined set of port mappings + if portMap == nil { + portMap = make(nat.PortMap) + } + + // add IP address into port spec to allow direct usage of data returned by calls such as docker port + var ip string + ips, _ := PublicIPv4Addrs() + if len(ips) > 0 { + ip = ips[0] + } + + // Preserve the existing behaviour if we do not have an IP for some reason. + if ip == "" { + ip = "0.0.0.0" + } + + for _, ep := range endpoints { + if ep.Direct { + continue + } + + for _, port := range ep.Ports { + mappings, err := nat.ParsePortSpec(port) + if err != nil { + log.Error(err) + // just continue if we do have partial port data + } + + for i := range mappings { + p := mappings[i].Port + b := mappings[i].Binding + + if b.HostIP == "" { + b.HostIP = ip + } + + if mappings[i].Binding.HostPort == "" { + // leave this undefined for dynamic assignment + // TODO: for port stability over VCH restart we would expect to set the dynamically assigned port + // recorded in containerVM annotations here, so that the old host->port mapping is preserved. + } + + log.Debugf("Adding indirect mapping for port %v: %v (%s)", p, b, port) + + current, _ := portMap[p] + portMap[p] = append(current, b) + } + } + } + + return portMap +} + +// addDirectEndpointsToPortMap constructs a docker portmap from the container's info as returned by the portlayer for those +// ports exposed directly from the containerVM via container network +// The portMap provided is modified and returned - the return value should always be used. +func addDirectEndpointsToPortMap(endpoints []*models.EndpointConfig, portMap nat.PortMap) nat.PortMap { + if len(endpoints) == 0 { + return portMap + } + + if portMap == nil { + portMap = make(nat.PortMap) + } + + for _, ep := range endpoints { + if !ep.Direct { + continue + } + + // add IP address into the port spec to allow direct usage of data returned by calls such as docker port + var ip string + rawIP, _, _ := net.ParseCIDR(ep.Address) + if rawIP != nil { + ip = rawIP.String() + } + + if ip == "" { + ip = "0.0.0.0" + } + + for _, port := range ep.Ports { + mappings, err := nat.ParsePortSpec(port) + if err != nil { + log.Error(err) + // just continue if we do have partial port data + } + + for i := range mappings { + if mappings[i].Binding.HostIP == "" { + mappings[i].Binding.HostIP = ip + } + + if mappings[i].Binding.HostPort == "" { + // If there's no explicit host port and it's a direct endpoint, then + // mirror the actual port. It's a bit misleading but we're trying to + // pack extended function into an existing structure. + _, p := nat.SplitProtoPort(string(mappings[i].Port)) + mappings[i].Binding.HostPort = p + } + } + + for _, mapping := range mappings { + p := mapping.Port + current, _ := portMap[p] + portMap[p] = append(current, mapping.Binding) + } + } + } + + return portMap +} + +// addExposedToPortMap ensures that exposed ports are all present in the port map. +// This means nil entries for any exposed ports that are not mapped. +// The portMap provided is modified and returned - the return value should always be used. +func addExposedToPortMap(config *container.Config, portMap nat.PortMap) nat.PortMap { + if config == nil || len(config.ExposedPorts) == 0 { + return portMap + } + + if portMap == nil { + portMap = make(nat.PortMap) + } + + for p := range config.ExposedPorts { + if _, ok := portMap[p]; ok { + continue + } + + portMap[p] = nil + } + + return portMap +} + +func DirectPortInformation(t *models.ContainerInfo) []types.Port { + var resultPorts []types.Port + + for _, ne := range t.Endpoints { + trust, _ := executor.ParseTrustLevel(ne.Trust) + if !ne.Direct || trust == executor.Closed || trust == executor.Outbound || trust == executor.Peers { + // we don't publish port info for ports that are not directly accessible from outside of the VCH + continue + } + + ip := strings.SplitN(ne.Address, "/", 2)[0] + + // if it's an open network then inject an "all ports" entry + if trust == executor.Open { + resultPorts = append(resultPorts, types.Port{ + IP: ip, + PrivatePort: 0, + PublicPort: 0, + Type: "*", + }) + } + + for _, p := range ne.Ports { + port := types.Port{IP: ip} + + portsAndType := strings.SplitN(p, "/", 2) + port.Type = portsAndType[1] + + mapping := strings.Split(portsAndType[0], ":") + // if no mapping is supplied then there's only one and that's public. If there is a mapping then the first + // entry is the public + public, err := strconv.Atoi(mapping[0]) + if err != nil { + log.Errorf("Got an error trying to convert public port number \"%s\" to an int: %s", mapping[0], err) + continue + } + port.PublicPort = uint16(public) + + // If port is on container network then a different container could be forwarding the same port via the endpoint + // so must check for explicit ID match. If a match then it's definitely not accessed directly. + if ContainerByPort[mapping[0]] == t.ContainerConfig.ContainerID { + continue + } + + // did not find a way to have the client not render both ports so setting them the same even if there's not + // redirect occurring + port.PrivatePort = port.PublicPort + + // for open networks we don't bother listing direct ports + if len(mapping) == 1 { + if trust != executor.Open { + resultPorts = append(resultPorts, port) + } + continue + } + + private, err := strconv.Atoi(mapping[1]) + if err != nil { + log.Errorf("Got an error trying to convert private port number \"%s\" to an int: %s", mapping[1], err) + continue + } + port.PrivatePort = uint16(private) + resultPorts = append(resultPorts, port) + } + } + + return resultPorts +} + +// returns port bindings as a slice of Docker Ports for return to the client +// returns empty slice on error +//func PortForwardingInformation(t *models.ContainerInfo, ips []string) []types.Port { +func PortForwardingInformation(vc *viccontainer.VicContainer, ips []string) []types.Port { + //cid := t.ContainerConfig.ContainerID + //c := cache.ContainerCache().GetContainer(cid) + + if vc == nil { + log.Errorf("Could not find container with ID %s", vc.ContainerID) + return nil + } + + portBindings := vc.NATMap + var resultPorts []types.Port + + // create a port for each IP on the interface (usually only 1, but could be more) + // (works with both IPv4 and IPv6 addresses) + for _, ip := range ips { + port := types.Port{IP: ip} + + for portBindingPrivatePort, hostPortBindings := range portBindings { + proto, pnum := nat.SplitProtoPort(string(portBindingPrivatePort)) + portNum, err := strconv.Atoi(pnum) + if err != nil { + log.Warnf("Unable to convert private port %q to an int", pnum) + continue + } + port.PrivatePort = uint16(portNum) + port.Type = proto + + for i := 0; i < len(hostPortBindings); i++ { + // If port is on container network then a different container could be forwarding the same port via the endpoint + // so must check for explicit ID match. If no match, definitely not forwarded via endpoint. + //if ContainerByPort[hostPortBindings[i].HostPort] != t.ContainerConfig.ContainerID { + if ContainerByPort[hostPortBindings[i].HostPort] != vc.ContainerID { + continue + } + + newport := port + publicPort, err := strconv.Atoi(hostPortBindings[i].HostPort) + if err != nil { + log.Infof("Got an error trying to convert public port number to an int") + continue + } + + newport.PublicPort = uint16(publicPort) + // sanity check -- sometimes these come back as 0 when no binding actually exists + // that doesn't make sense, so in that case we don't want to report these bindings + if newport.PublicPort != 0 && newport.PrivatePort != 0 { + resultPorts = append(resultPorts, newport) + } + } + } + } + return resultPorts +} diff --git a/lib/apiservers/engine/proxy/archive.go b/lib/apiservers/engine/proxy/archive_proxy.go similarity index 66% rename from lib/apiservers/engine/proxy/archive.go rename to lib/apiservers/engine/proxy/archive_proxy.go index bc3cbc1891..af12981b0f 100644 --- a/lib/apiservers/engine/proxy/archive.go +++ b/lib/apiservers/engine/proxy/archive_proxy.go @@ -19,18 +19,24 @@ import ( "encoding/json" "fmt" "io" + "os" "strings" "sync" + "time" + "github.com/vmware/vic/lib/apiservers/engine/errors" "github.com/vmware/vic/lib/apiservers/portlayer/client" "github.com/vmware/vic/lib/apiservers/portlayer/client/storage" - vicarchive "github.com/vmware/vic/lib/archive" + "github.com/vmware/vic/lib/archive" "github.com/vmware/vic/pkg/trace" + + "github.com/docker/docker/api/types" ) type VicArchiveProxy interface { - ArchiveExportReader(op trace.Operation, store, ancestorStore, deviceID, ancestor string, data bool, filterSpec vicarchive.FilterSpec) (io.ReadCloser, error) - ArchiveImportWriter(op trace.Operation, store, deviceID string, filterSpec vicarchive.FilterSpec, wg *sync.WaitGroup, errchan chan error) (io.WriteCloser, error) + ArchiveExportReader(op trace.Operation, store, ancestorStore, deviceID, ancestor string, data bool, filterSpec archive.FilterSpec) (io.ReadCloser, error) + ArchiveImportWriter(op trace.Operation, store, deviceID string, filterSpec archive.FilterSpec, wg *sync.WaitGroup, errchan chan error) (io.WriteCloser, error) + StatPath(op trace.Operation, store, deviceID string, filterSpec archive.FilterSpec) (*types.ContainerPathStat, error) } //------------------------------------ @@ -41,15 +47,25 @@ type ArchiveProxy struct { client *client.PortLayer } +var archiveProxy *ArchiveProxy + func NewArchiveProxy(client *client.PortLayer) VicArchiveProxy { return &ArchiveProxy{client: client} } +func GetArchiveProxy() VicArchiveProxy { + return archiveProxy +} + // ArchiveExportReader streams a tar archive from the portlayer. Once the stream is complete, // an io.Reader is returned and the caller can use that reader to parse the data. -func (a *ArchiveProxy) ArchiveExportReader(op trace.Operation, store, ancestorStore, deviceID, ancestor string, data bool, filterSpec vicarchive.FilterSpec) (io.ReadCloser, error) { +func (a *ArchiveProxy) ArchiveExportReader(op trace.Operation, store, ancestorStore, deviceID, ancestor string, data bool, filterSpec archive.FilterSpec) (io.ReadCloser, error) { defer trace.End(trace.Begin(deviceID)) + if a.client == nil { + return nil, errors.NillPortlayerClientError("ArchiveProxy") + } + if store == "" || deviceID == "" { return nil, fmt.Errorf("ArchiveExportReader called with either empty store or deviceID") } @@ -93,22 +109,22 @@ func (a *ArchiveProxy) ArchiveExportReader(op trace.Operation, store, ancestorSt op.Errorf("Error from ExportArchive: %s", err.Error()) switch err := err.(type) { case *storage.ExportArchiveInternalServerError: - plErr := InternalServerError(fmt.Sprintf("Server error from archive reader for device %s", deviceID)) + plErr := errors.InternalServerError(fmt.Sprintf("Server error from archive reader for device %s", deviceID)) op.Errorf(plErr.Error()) pipeWriter.CloseWithError(plErr) case *storage.ExportArchiveLocked: - plErr := ResourceLockedError(fmt.Sprintf("Resource locked for device %s", deviceID)) + plErr := errors.ResourceLockedError(fmt.Sprintf("Resource locked for device %s", deviceID)) op.Errorf(plErr.Error()) pipeWriter.CloseWithError(plErr) case *storage.ExportArchiveUnprocessableEntity: - plErr := InternalServerError("failed to process given path") + plErr := errors.InternalServerError("failed to process given path") op.Errorf(plErr.Error()) pipeWriter.CloseWithError(plErr) default: //Check for EOF. Since the connection, transport, and data handling are //encapsulated inside of Swagger, we can only detect EOF by checking the //error string - if strings.Contains(err.Error(), swaggerSubstringEOF) { + if strings.Contains(err.Error(), SwaggerSubstringEOF) { op.Debugf("swagger error %s", err.Error()) pipeWriter.Close() } else { @@ -125,9 +141,13 @@ func (a *ArchiveProxy) ArchiveExportReader(op trace.Operation, store, ancestorSt // ArchiveImportWriter initializes a write stream for a path. This is usually called // for getting a writer during docker cp TO container. -func (a *ArchiveProxy) ArchiveImportWriter(op trace.Operation, store, deviceID string, filterSpec vicarchive.FilterSpec, wg *sync.WaitGroup, errchan chan error) (io.WriteCloser, error) { +func (a *ArchiveProxy) ArchiveImportWriter(op trace.Operation, store, deviceID string, filterSpec archive.FilterSpec, wg *sync.WaitGroup, errchan chan error) (io.WriteCloser, error) { defer trace.End(trace.Begin(deviceID)) + if a.client == nil { + return nil, errors.NillPortlayerClientError("ArchiveProxy") + } + if store == "" || deviceID == "" { return nil, fmt.Errorf("ArchiveImportWriter called with either empty store or deviceID") } @@ -171,23 +191,23 @@ func (a *ArchiveProxy) ArchiveImportWriter(op trace.Operation, store, deviceID s if err != nil { switch err := err.(type) { case *storage.ImportArchiveInternalServerError: - plErr = InternalServerError(fmt.Sprintf("error writing files to device %s", deviceID)) + plErr = errors.InternalServerError(fmt.Sprintf("error writing files to device %s", deviceID)) op.Errorf(plErr.Error()) pipeReader.CloseWithError(plErr) case *storage.ImportArchiveLocked: - plErr = ResourceLockedError(fmt.Sprintf("resource locked for device %s", deviceID)) + plErr = errors.ResourceLockedError(fmt.Sprintf("resource locked for device %s", deviceID)) op.Errorf(plErr.Error()) pipeReader.CloseWithError(plErr) case *storage.ImportArchiveNotFound: - plErr = ResourceNotFoundError("file or directory") + plErr = errors.ResourceNotFoundError("file or directory") op.Errorf(plErr.Error()) pipeReader.CloseWithError(plErr) case *storage.ImportArchiveUnprocessableEntity: - plErr = InternalServerError("failed to process given path") + plErr = errors.InternalServerError("failed to process given path") op.Errorf(plErr.Error()) pipeReader.CloseWithError(plErr) case *storage.ImportArchiveConflict: - plErr = InternalServerError("unexpected copy failure may result in truncated copy, please try again") + plErr = errors.InternalServerError("unexpected copy failure may result in truncated copy, please try again") op.Errorf(plErr.Error()) pipeReader.CloseWithError(plErr) default: @@ -195,7 +215,7 @@ func (a *ArchiveProxy) ArchiveImportWriter(op trace.Operation, store, deviceID s //encapsulated inside of Swagger, we can only detect EOF by checking the //error string plErr = err - if strings.Contains(err.Error(), swaggerSubstringEOF) { + if strings.Contains(err.Error(), SwaggerSubstringEOF) { op.Error(err) pipeReader.Close() } else { @@ -209,3 +229,47 @@ func (a *ArchiveProxy) ArchiveImportWriter(op trace.Operation, store, deviceID s return pipeWriter, nil } + +// StatPath requests the portlayer to stat the filesystem resource at the +// specified path in the container vc. +func (a *ArchiveProxy) StatPath(op trace.Operation, store, deviceID string, filterSpec archive.FilterSpec) (*types.ContainerPathStat, error) { + defer trace.End(trace.Begin(deviceID)) + + if a.client == nil { + return nil, errors.NillPortlayerClientError("ArchiveProxy") + } + + statPathParams := storage. + NewStatPathParamsWithContext(op). + WithStore(store). + WithDeviceID(deviceID) + + spec, err := archive.EncodeFilterSpec(op, &filterSpec) + if err != nil { + op.Errorf(err.Error()) + return nil, errors.InternalServerError(err.Error()) + } + statPathParams = statPathParams.WithFilterSpec(spec) + + statPathOk, err := a.client.Storage.StatPath(statPathParams) + if err != nil { + op.Errorf(err.Error()) + return nil, err + } + + stat := &types.ContainerPathStat{ + Name: statPathOk.Name, + Mode: os.FileMode(statPathOk.Mode), + Size: statPathOk.Size, + LinkTarget: statPathOk.LinkTarget, + } + + var modTime time.Time + if err := modTime.GobDecode([]byte(statPathOk.ModTime)); err != nil { + op.Debugf("error getting mod time from statpath: %s", err.Error()) + } else { + stat.Mtime = modTime + } + + return stat, nil +} diff --git a/lib/apiservers/engine/proxy/client.go b/lib/apiservers/engine/proxy/client.go new file mode 100644 index 0000000000..38920477dd --- /dev/null +++ b/lib/apiservers/engine/proxy/client.go @@ -0,0 +1,34 @@ +// Copyright 2018 VMware, Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proxy + +import ( + "github.com/go-openapi/runtime" + rc "github.com/go-openapi/runtime/client" + + apiclient "github.com/vmware/vic/lib/apiservers/portlayer/client" +) + +func NewPortLayerClient(portLayerAddr string) *apiclient.PortLayer { + t := rc.New(portLayerAddr, "/", []string{"http"}) + t.Consumers["application/x-tar"] = runtime.ByteStreamConsumer() + t.Consumers["application/octet-stream"] = runtime.ByteStreamConsumer() + t.Producers["application/x-tar"] = runtime.ByteStreamProducer() + t.Producers["application/octet-stream"] = runtime.ByteStreamProducer() + + portLayerClient := apiclient.New(t, nil) + + return portLayerClient +} diff --git a/lib/apiservers/engine/proxy/common.go b/lib/apiservers/engine/proxy/common.go index 326870697c..23bce818c7 100644 --- a/lib/apiservers/engine/proxy/common.go +++ b/lib/apiservers/engine/proxy/common.go @@ -15,5 +15,5 @@ package proxy const ( - swaggerSubstringEOF = "EOF" + SwaggerSubstringEOF = "EOF" ) diff --git a/lib/apiservers/engine/proxy/container_proxy.go b/lib/apiservers/engine/proxy/container_proxy.go new file mode 100644 index 0000000000..5f82fbbd5e --- /dev/null +++ b/lib/apiservers/engine/proxy/container_proxy.go @@ -0,0 +1,1337 @@ +// Copyright 2016-2018 VMware, Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proxy + +//**** +// container_proxy.go +// +// Contains all code that touches the portlayer for container operations and all +// code that converts swagger based returns to docker personality backend structs. +// The goal is to make the backend code that implements the docker engine-api +// interfaces be as simple as possible and contain no swagger or portlayer code. +// +// Rule for code to be in here: +// 1. touches VIC portlayer +// 2. converts swagger to docker engine-api structs +// 3. errors MUST be docker engine-api compatible errors. DO NOT return arbitrary errors! +// - Do NOT return portlayer errors +// - Do NOT return fmt.Errorf() +// - Do NOT return errors.New() +// - Please USE the aliased docker error package 'derr' + +import ( + "context" + "fmt" + "net" + "strconv" + "strings" + "syscall" + "time" + + log "github.com/Sirupsen/logrus" + + derr "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/mount" + dnetwork "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/go-connections/nat" + + "github.com/vmware/vic/lib/apiservers/engine/backends/cache" + viccontainer "github.com/vmware/vic/lib/apiservers/engine/backends/container" + "github.com/vmware/vic/lib/apiservers/engine/backends/convert" + epoint "github.com/vmware/vic/lib/apiservers/engine/backends/endpoint" + "github.com/vmware/vic/lib/apiservers/engine/backends/filter" + engconstants "github.com/vmware/vic/lib/apiservers/engine/constants" + "github.com/vmware/vic/lib/apiservers/engine/errors" + "github.com/vmware/vic/lib/apiservers/engine/network" + "github.com/vmware/vic/lib/apiservers/portlayer/client" + "github.com/vmware/vic/lib/apiservers/portlayer/client/containers" + "github.com/vmware/vic/lib/apiservers/portlayer/client/interaction" + "github.com/vmware/vic/lib/apiservers/portlayer/client/logging" + "github.com/vmware/vic/lib/apiservers/portlayer/client/scopes" + "github.com/vmware/vic/lib/apiservers/portlayer/client/tasks" + "github.com/vmware/vic/lib/apiservers/portlayer/models" + "github.com/vmware/vic/lib/constants" + "github.com/vmware/vic/lib/metadata" + "github.com/vmware/vic/pkg/trace" + "github.com/vmware/vic/pkg/vsphere/sys" +) + +// VicContainerProxy interface +type VicContainerProxy interface { + CreateContainerHandle(ctx context.Context, vc *viccontainer.VicContainer, config types.ContainerCreateConfig) (string, string, error) + CreateContainerTask(ctx context.Context, handle string, id string, config types.ContainerCreateConfig) (string, error) + CreateExecTask(ctx context.Context, handle string, config *types.ExecConfig) (string, string, error) + AddContainerToScope(ctx context.Context, handle string, config types.ContainerCreateConfig) (string, error) + AddLoggingToContainer(ctx context.Context, handle string, config types.ContainerCreateConfig) (string, error) + AddInteractionToContainer(ctx context.Context, handle string, config types.ContainerCreateConfig) (string, error) + + BindInteraction(ctx context.Context, handle string, name string, id string) (string, error) + UnbindInteraction(ctx context.Context, handle string, name string, id string) (string, error) + UnbindContainerFromNetwork(ctx context.Context, vc *viccontainer.VicContainer, handle string) (string, error) + CommitContainerHandle(ctx context.Context, handle, containerID string, waitTime int32) error + + Handle(ctx context.Context, id, name string) (string, error) + + Stop(ctx context.Context, vc *viccontainer.VicContainer, name string, seconds *int, unbound bool) error + State(ctx context.Context, vc *viccontainer.VicContainer) (*types.ContainerState, error) + Wait(ctx context.Context, vc *viccontainer.VicContainer, timeout time.Duration) (*types.ContainerState, error) + Signal(ctx context.Context, vc *viccontainer.VicContainer, sig uint64) error + Resize(ctx context.Context, id string, height, width int32) error + Rename(ctx context.Context, vc *viccontainer.VicContainer, newName string) error + Remove(ctx context.Context, vc *viccontainer.VicContainer, config *types.ContainerRmConfig) error + + ExitCode(ctx context.Context, vc *viccontainer.VicContainer) (string, error) +} + +// ContainerProxy struct +type ContainerProxy struct { + client *client.PortLayer + portlayerAddr string + portlayerName string +} + +const ( + forceLogType = "json-file" //Use in inspect to allow docker logs to work + ShortIDLen = 12 + + ContainerRunning = "running" + ContainerError = "error" + ContainerStopped = "stopped" + ContainerExited = "exited" + ContainerCreated = "created" +) + +// NewContainerProxy will create a new proxy +func NewContainerProxy(plClient *client.PortLayer, portlayerAddr string, portlayerName string) *ContainerProxy { + return &ContainerProxy{client: plClient, portlayerAddr: portlayerAddr, portlayerName: portlayerName} +} + +// Handle retrieves a handle to a VIC container. Handles should be treated as opaque strings. +// +// returns: +// (handle string, error) +func (c *ContainerProxy) Handle(ctx context.Context, id, name string) (string, error) { + if c.client == nil { + return "", errors.NillPortlayerClientError("ContainerProxy") + } + + resp, err := c.client.Containers.Get(containers.NewGetParamsWithContext(ctx).WithID(id)) + if err != nil { + switch err := err.(type) { + case *containers.GetNotFound: + cache.ContainerCache().DeleteContainer(id) + return "", errors.NotFoundError(name) + case *containers.GetDefault: + return "", errors.InternalServerError(err.Payload.Message) + default: + return "", errors.InternalServerError(err.Error()) + } + } + return resp.Payload, nil +} + +// CreateContainerHandle creates a new VIC container by calling the portlayer +// +// returns: +// (containerID, containerHandle, error) +func (c *ContainerProxy) CreateContainerHandle(ctx context.Context, vc *viccontainer.VicContainer, config types.ContainerCreateConfig) (string, string, error) { + defer trace.End(trace.Begin(vc.ImageID)) + + if c.client == nil { + return "", "", errors.NillPortlayerClientError("ContainerProxy") + } + + if vc.ImageID == "" { + return "", "", errors.NotFoundError("No image specified") + } + + if vc.LayerID == "" { + return "", "", errors.NotFoundError("No layer specified") + } + + // Call the Exec port layer to create the container + host, err := sys.UUID() + if err != nil { + return "", "", errors.InternalServerError("ContainerProxy.CreateContainerHandle got unexpected error getting VCH UUID") + } + + plCreateParams := dockerContainerCreateParamsToPortlayer(ctx, config, vc, host) + createResults, err := c.client.Containers.Create(plCreateParams) + if err != nil { + if _, ok := err.(*containers.CreateNotFound); ok { + cerr := fmt.Errorf("No such image: %s", vc.ImageID) + log.Errorf("%s (%s)", cerr, err) + return "", "", errors.NotFoundError(cerr.Error()) + } + + // If we get here, most likely something went wrong with the port layer API server + return "", "", errors.InternalServerError(err.Error()) + } + + id := createResults.Payload.ID + h := createResults.Payload.Handle + + return id, h, nil +} + +// CreateContainerTask sets the primary command to run in the container +// +// returns: +// (containerHandle, error) +func (c *ContainerProxy) CreateContainerTask(ctx context.Context, handle, id string, config types.ContainerCreateConfig) (string, error) { + defer trace.End(trace.Begin("")) + + if c.client == nil { + return "", errors.NillPortlayerClientError("ContainerProxy") + } + + plTaskParams := dockerContainerCreateParamsToTask(ctx, id, config) + plTaskParams.Config.Handle = handle + + responseJoin, err := c.client.Tasks.Join(plTaskParams) + if err != nil { + log.Errorf("Unable to join primary task to container: %+v", err) + return "", errors.InternalServerError(err.Error()) + } + + handle, ok := responseJoin.Payload.Handle.(string) + if !ok { + return "", errors.InternalServerError(fmt.Sprintf("Type assertion failed on handle from task join: %#+v", handle)) + } + + plBindParams := tasks.NewBindParamsWithContext(ctx).WithConfig(&models.TaskBindConfig{Handle: handle, ID: id}) + responseBind, err := c.client.Tasks.Bind(plBindParams) + if err != nil { + log.Errorf("Unable to bind primary task to container: %+v", err) + return "", errors.InternalServerError(err.Error()) + } + + handle, ok = responseBind.Payload.Handle.(string) + if !ok { + return "", errors.InternalServerError(fmt.Sprintf("Type assertion failed on handle from task bind %#+v", handle)) + } + + return handle, nil +} + +func (c *ContainerProxy) CreateExecTask(ctx context.Context, handle string, config *types.ExecConfig) (string, string, error) { + defer trace.End(trace.Begin("")) + + if c.client == nil { + return "", "", errors.NillPortlayerClientError("ContainerProxy") + } + + joinconfig := &models.TaskJoinConfig{ + Handle: handle, + Path: config.Cmd[0], + Args: config.Cmd[1:], + Env: config.Env, + User: config.User, + Attach: config.AttachStdin || config.AttachStdout || config.AttachStderr, + OpenStdin: config.AttachStdin, + Tty: config.Tty, + } + + // call Join with JoinParams + joinparams := tasks.NewJoinParamsWithContext(ctx).WithConfig(joinconfig) + resp, err := c.client.Tasks.Join(joinparams) + if err != nil { + return "", "", errors.InternalServerError(err.Error()) + } + eid := resp.Payload.ID + + handleprime, ok := resp.Payload.Handle.(string) + if !ok { + return "", "", errors.InternalServerError(fmt.Sprintf("Type assertion failed on handle from task bind %#+v", handleprime)) + } + + return handleprime, eid, nil +} + +// AddContainerToScope adds a container, referenced by handle, to a scope. +// If an error is return, the returned handle should not be used. +// +// returns: +// modified handle +func (c *ContainerProxy) AddContainerToScope(ctx context.Context, handle string, config types.ContainerCreateConfig) (string, error) { + defer trace.End(trace.Begin(handle)) + + if c.client == nil { + return "", errors.NillPortlayerClientError("ContainerProxy") + } + + log.Debugf("Network Configuration Section - Container Create") + // configure network + netConf := toModelsNetworkConfig(config) + if netConf != nil { + addContRes, err := c.client.Scopes.AddContainer(scopes.NewAddContainerParamsWithContext(ctx). + WithScope(netConf.NetworkName). + WithConfig(&models.ScopesAddContainerConfig{ + Handle: handle, + NetworkConfig: netConf, + })) + + if err != nil { + log.Errorf("ContainerProxy.AddContainerToScope: Scopes error: %s", err.Error()) + return handle, errors.InternalServerError(err.Error()) + } + + defer func() { + if err == nil { + return + } + // roll back the AddContainer call + if _, err2 := c.client.Scopes.RemoveContainer(scopes.NewRemoveContainerParamsWithContext(ctx).WithHandle(handle).WithScope(netConf.NetworkName)); err2 != nil { + log.Warnf("could not roll back container add: %s", err2) + } + }() + + handle = addContRes.Payload + } + + return handle, nil +} + +// AddLoggingToContainer adds logging capability to a container, referenced by handle. +// If an error is return, the returned handle should not be used. +// +// returns: +// modified handle +func (c *ContainerProxy) AddLoggingToContainer(ctx context.Context, handle string, config types.ContainerCreateConfig) (string, error) { + defer trace.End(trace.Begin(handle)) + + if c.client == nil { + return "", errors.NillPortlayerClientError("ContainerProxy") + } + + response, err := c.client.Logging.LoggingJoin(logging.NewLoggingJoinParamsWithContext(ctx). + WithConfig(&models.LoggingJoinConfig{ + Handle: handle, + })) + if err != nil { + return "", errors.InternalServerError(err.Error()) + } + handle, ok := response.Payload.Handle.(string) + if !ok { + return "", errors.InternalServerError(fmt.Sprintf("Type assertion failed for %#+v", handle)) + } + + return handle, nil +} + +// AddInteractionToContainer adds interaction capabilities to a container, referenced by handle. +// If an error is return, the returned handle should not be used. +// +// returns: +// modified handle +func (c *ContainerProxy) AddInteractionToContainer(ctx context.Context, handle string, config types.ContainerCreateConfig) (string, error) { + defer trace.End(trace.Begin(handle)) + + if c.client == nil { + return "", errors.NillPortlayerClientError("ContainerProxy") + } + + response, err := c.client.Interaction.InteractionJoin(interaction.NewInteractionJoinParamsWithContext(ctx). + WithConfig(&models.InteractionJoinConfig{ + Handle: handle, + })) + if err != nil { + return "", errors.InternalServerError(err.Error()) + } + handle, ok := response.Payload.Handle.(string) + if !ok { + return "", errors.InternalServerError(fmt.Sprintf("Type assertion failed for %#+v", handle)) + } + + return handle, nil +} + +// BindInteraction enables interaction capabilities +func (c *ContainerProxy) BindInteraction(ctx context.Context, handle string, name string, id string) (string, error) { + defer trace.End(trace.Begin(handle)) + + if c.client == nil { + return "", errors.NillPortlayerClientError("ContainerProxy") + } + + bind, err := c.client.Interaction.InteractionBind( + interaction.NewInteractionBindParamsWithContext(ctx). + WithConfig(&models.InteractionBindConfig{ + Handle: handle, + ID: id, + })) + if err != nil { + switch err := err.(type) { + case *interaction.InteractionBindInternalServerError: + return "", errors.InternalServerError(err.Payload.Message) + default: + return "", errors.InternalServerError(err.Error()) + } + } + handle, ok := bind.Payload.Handle.(string) + if !ok { + return "", errors.InternalServerError(fmt.Sprintf("Type assertion failed for %#+v", handle)) + } + return handle, nil +} + +// UnbindInteraction disables interaction capabilities +func (c *ContainerProxy) UnbindInteraction(ctx context.Context, handle string, name string, id string) (string, error) { + defer trace.End(trace.Begin(handle)) + + if c.client == nil { + return "", errors.NillPortlayerClientError("ContainerProxy") + } + + unbind, err := c.client.Interaction.InteractionUnbind( + interaction.NewInteractionUnbindParamsWithContext(ctx). + WithConfig(&models.InteractionUnbindConfig{ + Handle: handle, + ID: id, + })) + if err != nil { + return "", errors.InternalServerError(err.Error()) + } + handle, ok := unbind.Payload.Handle.(string) + if !ok { + return "", errors.InternalServerError("type assertion failed") + } + + return handle, nil +} + +// CommitContainerHandle commits any changes to container handle. +// +// Args: +// waitTime <= 0 means no wait time +func (c *ContainerProxy) CommitContainerHandle(ctx context.Context, handle, containerID string, waitTime int32) error { + defer trace.End(trace.Begin(handle)) + + if c.client == nil { + return errors.NillPortlayerClientError("ContainerProxy") + } + + var commitParams *containers.CommitParams + if waitTime > 0 { + commitParams = containers.NewCommitParamsWithContext(ctx).WithHandle(handle).WithWaitTime(&waitTime) + } else { + commitParams = containers.NewCommitParamsWithContext(ctx).WithHandle(handle) + } + + _, err := c.client.Containers.Commit(commitParams) + if err != nil { + switch err := err.(type) { + case *containers.CommitNotFound: + return errors.NotFoundError(containerID) + case *containers.CommitConflict: + return errors.ConflictError(err.Error()) + case *containers.CommitDefault: + return errors.InternalServerError(err.Payload.Message) + default: + return errors.InternalServerError(err.Error()) + } + } + + return nil +} + +// Stop will stop (shutdown) a VIC container. +// +// returns +// error +func (c *ContainerProxy) Stop(ctx context.Context, vc *viccontainer.VicContainer, name string, seconds *int, unbound bool) error { + defer trace.End(trace.Begin(vc.ContainerID)) + + if c.client == nil { + return errors.NillPortlayerClientError("ContainerProxy") + } + + //retrieve client to portlayer + handle, err := c.Handle(context.TODO(), vc.ContainerID, name) + if err != nil { + return err + } + + // we have a container on the PL side lets check the state before proceeding + // ignore the error since others will be checking below..this is an attempt to short circuit the op + // TODO: can be replaced with simple cache check once power events are propagated to persona + state, err := c.State(ctx, vc) + if err != nil && errors.IsNotFoundError(err) { + cache.ContainerCache().DeleteContainer(vc.ContainerID) + return err + } + // attempt to stop container only if container state is not stopped, exited or created. + // we should allow user to stop and remove the container that is in unexpected status, e.g. starting, because of serial port connection issue + if state.Status == ContainerStopped || state.Status == ContainerExited || state.Status == ContainerCreated { + return nil + } + + if unbound { + handle, err = c.UnbindContainerFromNetwork(ctx, vc, handle) + if err != nil { + return err + } + + // unmap ports + if err = network.UnmapPorts(vc.ContainerID, vc); err != nil { + return err + } + } + + // change the state of the container + changeParams := containers.NewStateChangeParamsWithContext(ctx).WithHandle(handle).WithState("STOPPED") + stateChangeResponse, err := c.client.Containers.StateChange(changeParams) + if err != nil { + switch err := err.(type) { + case *containers.StateChangeNotFound: + cache.ContainerCache().DeleteContainer(vc.ContainerID) + return errors.NotFoundError(name) + case *containers.StateChangeDefault: + return errors.InternalServerError(err.Payload.Message) + default: + return errors.InternalServerError(err.Error()) + } + } + + handle = stateChangeResponse.Payload + + // if no timeout in seconds provided then set to default of 10 + if seconds == nil { + s := 10 + seconds = &s + } + + err = c.CommitContainerHandle(ctx, handle, vc.ContainerID, int32(*seconds)) + if err != nil { + if errors.IsNotFoundError(err) { + cache.ContainerCache().DeleteContainer(vc.ContainerID) + } + return err + } + + return nil +} + +// UnbindContainerFromNetwork unbinds a container from the networks that it connects to +func (c *ContainerProxy) UnbindContainerFromNetwork(ctx context.Context, vc *viccontainer.VicContainer, handle string) (string, error) { + defer trace.End(trace.Begin(vc.ContainerID)) + + if c.client == nil { + return "", errors.NillPortlayerClientError("ContainerProxy") + } + + unbindParams := scopes.NewUnbindContainerParamsWithContext(ctx).WithHandle(handle) + ub, err := c.client.Scopes.UnbindContainer(unbindParams) + if err != nil { + switch err := err.(type) { + case *scopes.UnbindContainerNotFound: + // ignore error + log.Warnf("Container %s not found by network unbind", vc.ContainerID) + case *scopes.UnbindContainerInternalServerError: + return "", errors.InternalServerError(err.Payload.Message) + default: + return "", errors.InternalServerError(err.Error()) + } + } + + return ub.Payload.Handle, nil +} + +// State returns container state +func (c *ContainerProxy) State(ctx context.Context, vc *viccontainer.VicContainer) (*types.ContainerState, error) { + defer trace.End(trace.Begin("")) + + if c.client == nil { + return nil, errors.NillPortlayerClientError("ContainerProxy") + } + + results, err := c.client.Containers.GetContainerInfo(containers.NewGetContainerInfoParamsWithContext(ctx).WithID(vc.ContainerID)) + if err != nil { + switch err := err.(type) { + case *containers.GetContainerInfoNotFound: + return nil, errors.NotFoundError(vc.Name) + case *containers.GetContainerInfoInternalServerError: + return nil, errors.InternalServerError(err.Payload.Message) + default: + return nil, errors.InternalServerError(fmt.Sprintf("Unknown error from the interaction port layer: %s", err)) + } + } + + inspectJSON, err := ContainerInfoToDockerContainerInspect(vc, results.Payload, c.portlayerName) + if err != nil { + return nil, err + } + return inspectJSON.State, nil +} + +// ExitCode returns container exitCode +func (c *ContainerProxy) ExitCode(ctx context.Context, vc *viccontainer.VicContainer) (string, error) { + defer trace.End(trace.Begin("")) + + if c.client == nil { + return "", errors.NillPortlayerClientError("ContainerProxy") + } + + results, err := c.client.Containers.GetContainerInfo(containers.NewGetContainerInfoParamsWithContext(ctx).WithID(vc.ContainerID)) + if err != nil { + switch err := err.(type) { + case *containers.GetContainerInfoNotFound: + return "", errors.NotFoundError(vc.Name) + case *containers.GetContainerInfoInternalServerError: + return "", errors.InternalServerError(err.Payload.Message) + default: + return "", errors.InternalServerError(fmt.Sprintf("Unknown error from the interaction port layer: %s", err)) + } + } + // get the container state + dockerState := convert.State(results.Payload) + if dockerState == nil { + return "", errors.InternalServerError("Unable to determine container state") + } + + return strconv.Itoa(dockerState.ExitCode), nil +} + +func (c *ContainerProxy) Wait(ctx context.Context, vc *viccontainer.VicContainer, timeout time.Duration) ( + *types.ContainerState, error) { + + defer trace.End(trace.Begin(vc.ContainerID)) + + if vc == nil { + return nil, errors.InternalServerError("Wait bad arguments") + } + + // Get an API client to the portlayer + if c.client == nil { + return nil, errors.NillPortlayerClientError("ContainerProxy") + } + + params := containers.NewContainerWaitParamsWithContext(ctx). + WithTimeout(int64(timeout.Seconds())). + WithID(vc.ContainerID) + results, err := c.client.Containers.ContainerWait(params) + if err != nil { + switch err := err.(type) { + case *containers.ContainerWaitNotFound: + // since the container wasn't found on the PL lets remove from the local + // cache + cache.ContainerCache().DeleteContainer(vc.ContainerID) + return nil, errors.NotFoundError(vc.ContainerID) + case *containers.ContainerWaitInternalServerError: + return nil, errors.InternalServerError(err.Payload.Message) + default: + return nil, errors.InternalServerError(err.Error()) + } + } + + if results == nil || results.Payload == nil { + return nil, errors.InternalServerError("Unexpected swagger error") + } + + dockerState := convert.State(results.Payload) + if dockerState == nil { + return nil, errors.InternalServerError("Unable to determine container state") + } + return dockerState, nil +} + +func (c *ContainerProxy) Signal(ctx context.Context, vc *viccontainer.VicContainer, sig uint64) error { + defer trace.End(trace.Begin(vc.ContainerID)) + + if vc == nil { + return errors.InternalServerError("Signal bad arguments") + } + + if c.client == nil { + return errors.NillPortlayerClientError("ContainerProxy") + } + + if state, err := c.State(ctx, vc); !state.Running && err == nil { + return fmt.Errorf("%s is not running", vc.ContainerID) + } + + // If Docker CLI sends sig == 0, we use sigkill + if sig == 0 { + sig = uint64(syscall.SIGKILL) + } + params := containers.NewContainerSignalParamsWithContext(ctx).WithID(vc.ContainerID).WithSignal(int64(sig)) + if _, err := c.client.Containers.ContainerSignal(params); err != nil { + switch err := err.(type) { + case *containers.ContainerSignalNotFound: + return errors.NotFoundError(vc.ContainerID) + case *containers.ContainerSignalInternalServerError: + return errors.InternalServerError(err.Payload.Message) + default: + return errors.InternalServerError(err.Error()) + } + } + + if state, err := c.State(ctx, vc); !state.Running && err == nil { + // unmap ports + if err = network.UnmapPorts(vc.ContainerID, vc); err != nil { + return err + } + } + + return nil +} + +func (c *ContainerProxy) Resize(ctx context.Context, id string, height, width int32) error { + defer trace.End(trace.Begin(id)) + + if c.client == nil { + return errors.NillPortlayerClientError("ContainerProxy") + } + + plResizeParam := interaction.NewContainerResizeParamsWithContext(ctx). + WithID(id). + WithHeight(height). + WithWidth(width) + + _, err := c.client.Interaction.ContainerResize(plResizeParam) + if err != nil { + if _, isa := err.(*interaction.ContainerResizeNotFound); isa { + return errors.ContainerResourceNotFoundError(id, "interaction connection") + } + + // If we get here, most likely something went wrong with the port layer API server + return errors.InternalServerError(err.Error()) + } + + return nil +} + +// Rename calls the portlayer's RenameContainerHandler to update the container name in the handle, +// and then commit the new name to vSphere +func (c *ContainerProxy) Rename(ctx context.Context, vc *viccontainer.VicContainer, newName string) error { + defer trace.End(trace.Begin(vc.ContainerID)) + + //retrieve client to portlayer + handle, err := c.Handle(context.TODO(), vc.ContainerID, vc.Name) + if err != nil { + return err + } + + if c.client == nil { + return errors.NillPortlayerClientError("ContainerProxy") + } + + // Call the rename functionality in the portlayer. + renameParams := containers.NewContainerRenameParamsWithContext(ctx).WithName(newName).WithHandle(handle) + result, err := c.client.Containers.ContainerRename(renameParams) + if err != nil { + switch err := err.(type) { + // Here we don't check the portlayer error type for *containers.ContainerRenameConflict since + // (1) we already check that in persona cache for ConflictError and + // (2) the container name in portlayer cache will be updated when committing the handle in the next step + case *containers.ContainerRenameNotFound: + return errors.NotFoundError(vc.Name) + default: + return errors.InternalServerError(err.Error()) + } + } + + h := result.Payload + + // commit handle + _, err = c.client.Containers.Commit(containers.NewCommitParamsWithContext(ctx).WithHandle(h)) + if err != nil { + switch err := err.(type) { + case *containers.CommitNotFound: + return errors.NotFoundError(err.Payload.Message) + case *containers.CommitConflict: + return errors.ConflictError(err.Payload.Message) + default: + return errors.InternalServerError(err.Error()) + } + } + + return nil +} + +// Remove calls the portlayer's ContainerRemove handler to remove the container and its +// anonymous volumes if the remove flag is set. +func (c *ContainerProxy) Remove(ctx context.Context, vc *viccontainer.VicContainer, config *types.ContainerRmConfig) error { + defer trace.End(trace.Begin(vc.ContainerID)) + + if c.client == nil { + return errors.NillPortlayerClientError("ContainerProxy") + } + + id := vc.ContainerID + _, err := c.client.Containers.ContainerRemove(containers.NewContainerRemoveParamsWithContext(ctx).WithID(id)) + if err != nil { + switch err := err.(type) { + case *containers.ContainerRemoveNotFound: + // Remove container from persona cache, but don't return error to the user. + cache.ContainerCache().DeleteContainer(id) + return nil + case *containers.ContainerRemoveDefault: + return errors.InternalServerError(err.Payload.Message) + case *containers.ContainerRemoveConflict: + return derr.NewRequestConflictError(fmt.Errorf("You cannot remove a running container. Stop the container before attempting removal or use -f")) + case *containers.ContainerRemoveInternalServerError: + if err.Payload == nil || err.Payload.Message == "" { + return errors.InternalServerError(err.Error()) + } + return errors.InternalServerError(err.Payload.Message) + default: + return errors.InternalServerError(err.Error()) + } + } + + // Once the container is removed, remove anonymous volumes (vc.Config.Volumes) if + // the remove flag is set. + if config.RemoveVolume && len(vc.Config.Volumes) > 0 { + RemoveAnonContainerVols(ctx, c.client, id, vc) + } + + return nil +} + +//---------- +// Utility Functions +//---------- + +func dockerContainerCreateParamsToTask(ctx context.Context, id string, cc types.ContainerCreateConfig) *tasks.JoinParams { + config := &models.TaskJoinConfig{} + + var path string + var args []string + + // we explicitly specify the ID for the primary task so that it's the same as the containerID + config.ID = id + + // Expand cmd into entrypoint and args + cmd := strslice.StrSlice(cc.Config.Cmd) + if len(cc.Config.Entrypoint) != 0 { + path, args = cc.Config.Entrypoint[0], append(cc.Config.Entrypoint[1:], cmd...) + } else { + path, args = cmd[0], cmd[1:] + } + + // copy the path + config.Path = path + + // copy the args + config.Args = make([]string, len(args)) + copy(config.Args, args) + + // copy the env array + config.Env = make([]string, len(cc.Config.Env)) + copy(config.Env, cc.Config.Env) + + // working dir + config.WorkingDir = cc.Config.WorkingDir + + // user + config.User = cc.Config.User + + // attach. Always set to true otherwise we cannot attach later. + // this tells portlayer container is attachable. + config.Attach = true + + // openstdin + config.OpenStdin = cc.Config.OpenStdin + + // tty + config.Tty = cc.Config.Tty + + // container stop signal + config.StopSignal = cc.Config.StopSignal + + log.Debugf("dockerContainerCreateParamsToTask = %+v", config) + + return tasks.NewJoinParamsWithContext(ctx).WithConfig(config) +} + +func dockerContainerCreateParamsToPortlayer(ctx context.Context, cc types.ContainerCreateConfig, vc *viccontainer.VicContainer, imageStore string) *containers.CreateParams { + config := &models.ContainerCreateConfig{} + + config.NumCpus = cc.HostConfig.CPUCount + config.MemoryMB = cc.HostConfig.Memory + + // Layer/vmdk to use + config.Layer = vc.LayerID + + // Image ID + config.Image = vc.ImageID + + // Repo Requested + config.RepoName = cc.Config.Image + + //copy friendly name + config.Name = cc.Name + + // image store + config.ImageStore = &models.ImageStore{Name: imageStore} + + // network + config.NetworkDisabled = cc.Config.NetworkDisabled + + // Stuff the Docker labels into VIC container annotations + if len(cc.Config.Labels) > 0 { + convert.SetContainerAnnotation(config, convert.AnnotationKeyLabels, cc.Config.Labels) + } + // if autoremove then add to annotation + if cc.HostConfig.AutoRemove { + convert.SetContainerAnnotation(config, convert.AnnotationKeyAutoRemove, cc.HostConfig.AutoRemove) + } + + // hostname + config.Hostname = cc.Config.Hostname + // domainname - https://github.com/moby/moby/issues/27067 + config.Domainname = cc.Config.Domainname + + log.Debugf("dockerContainerCreateParamsToPortlayer = %+v", config) + + return containers.NewCreateParamsWithContext(ctx).WithCreateConfig(config) +} + +func toModelsNetworkConfig(cc types.ContainerCreateConfig) *models.NetworkConfig { + if cc.Config.NetworkDisabled { + return nil + } + + nc := &models.NetworkConfig{ + NetworkName: cc.HostConfig.NetworkMode.NetworkName(), + } + + // Docker supports link for bridge network and user defined network, we should handle that + if len(cc.HostConfig.Links) > 0 { + nc.Aliases = append(nc.Aliases, cc.HostConfig.Links...) + } + + if cc.NetworkingConfig != nil { + log.Debugf("EndpointsConfig: %#v", cc.NetworkingConfig) + + es, ok := cc.NetworkingConfig.EndpointsConfig[nc.NetworkName] + if ok { + if es.IPAMConfig != nil { + nc.Address = es.IPAMConfig.IPv4Address + } + + // Pass Links and Aliases to PL + nc.Aliases = append(nc.Aliases, epoint.Alias(es)...) + } + } + + for p := range cc.HostConfig.PortBindings { + nc.Ports = append(nc.Ports, fromPortbinding(p, cc.HostConfig.PortBindings[p])...) + } + + return nc +} + +// fromPortbinding translate Port/PortBinding pair to string array with format "hostPort:containerPort/protocol" or +// "containerPort/protocol" if hostPort is empty +// HostIP is ignored here, cause VCH ip address might change. Will query back real interface address in docker ps +func fromPortbinding(port nat.Port, binding []nat.PortBinding) []string { + var portMappings []string + if len(binding) == 0 { + portMappings = append(portMappings, string(port)) + return portMappings + } + + proto, privatePort := nat.SplitProtoPort(string(port)) + for _, bind := range binding { + var portMap string + if bind.HostPort != "" { + portMap = fmt.Sprintf("%s:%s/%s", bind.HostPort, privatePort, proto) + } else { + portMap = string(port) + } + portMappings = append(portMappings, portMap) + } + return portMappings +} + +//------------------------------------- +// Inspect Utility Functions +//------------------------------------- + +// ContainerInfoToDockerContainerInspect takes a ContainerInfo swagger-based struct +// returned from VIC's port layer and creates an engine-api based container inspect struct. +// There maybe other asset gathering if ContainerInfo does not have all the information +func ContainerInfoToDockerContainerInspect(vc *viccontainer.VicContainer, info *models.ContainerInfo, portlayerName string) (*types.ContainerJSON, error) { + if vc == nil || info == nil || info.ContainerConfig == nil { + return nil, errors.NotFoundError(fmt.Sprintf("No such container: %s", vc.ContainerID)) + } + // get the docker state + containerState := convert.State(info) + + inspectJSON := &types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + State: containerState, + ResolvConfPath: "", + HostnamePath: "", + HostsPath: "", + Driver: portlayerName, + MountLabel: "", + ProcessLabel: "", + AppArmorProfile: "", + ExecIDs: vc.List(), + HostConfig: hostConfigFromContainerInfo(vc, info, portlayerName), + GraphDriver: types.GraphDriverData{Name: portlayerName}, + SizeRw: nil, + SizeRootFs: nil, + }, + Mounts: MountsFromContainer(vc), + Config: containerConfigFromContainerInfo(vc, info), + NetworkSettings: networkFromContainerInfo(vc, info), + } + + if inspectJSON.NetworkSettings != nil { + log.Debugf("Docker inspect - network settings = %#v", inspectJSON.NetworkSettings) + } else { + log.Debug("Docker inspect - network settings = nil") + } + + if info.ProcessConfig != nil { + inspectJSON.Path = info.ProcessConfig.ExecPath + if len(info.ProcessConfig.ExecArgs) > 0 { + // args[0] is the command and should not appear in the args list here + inspectJSON.Args = info.ProcessConfig.ExecArgs[1:] + } + } + + if info.ContainerConfig != nil { + // set the status to the inspect expected values + containerState.Status = filter.DockerState(info.ContainerConfig.State) + + // https://github.com/docker/docker/blob/master/container/state.go#L77 + if containerState.Status == ContainerStopped { + containerState.Status = ContainerExited + } + + inspectJSON.Image = info.ContainerConfig.ImageID + inspectJSON.LogPath = info.ContainerConfig.LogPath + inspectJSON.RestartCount = int(info.ContainerConfig.RestartCount) + inspectJSON.ID = info.ContainerConfig.ContainerID + inspectJSON.Created = time.Unix(0, info.ContainerConfig.CreateTime).Format(time.RFC3339Nano) + if len(info.ContainerConfig.Names) > 0 { + inspectJSON.Name = fmt.Sprintf("/%s", info.ContainerConfig.Names[0]) + } + } + + return inspectJSON, nil +} + +// hostConfigFromContainerInfo() gets the hostconfig that is passed to the backend during +// docker create and updates any needed info +func hostConfigFromContainerInfo(vc *viccontainer.VicContainer, info *models.ContainerInfo, portlayerName string) *container.HostConfig { + if vc == nil || vc.HostConfig == nil || info == nil { + return nil + } + + // Create a copy of the created container's hostconfig. This is passed in during + // container create + hostConfig := *vc.HostConfig + + // Resources don't really map well to VIC so we leave most of them empty. If we look + // at the struct in engine-api/types/container/host_config.go, Microsoft added + // additional attributes to the struct that are applicable to Windows containers. + // If understanding VIC's host resources are desirable, we should go down this + // same route. + // + // The values we fill out below is an abridged list of the original struct. + resourceConfig := container.Resources{ + // Applicable to all platforms + // CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers) + // Memory int64 // Memory limit (in bytes) + + // // Applicable to UNIX platforms + // DiskQuota int64 // Disk limit (in bytes) + } + + hostConfig.VolumeDriver = portlayerName + hostConfig.Resources = resourceConfig + hostConfig.DNS = make([]string, 0) + + if len(info.Endpoints) > 0 { + for _, ep := range info.Endpoints { + for _, dns := range ep.Nameservers { + if dns != "" { + hostConfig.DNS = append(hostConfig.DNS, dns) + } + } + } + + hostConfig.NetworkMode = container.NetworkMode(info.Endpoints[0].Scope) + } + + hostConfig.PortBindings = network.PortMapFromContainer(vc, info) + + // Set this to json-file to force the docker CLI to allow us to use docker logs + hostConfig.LogConfig.Type = forceLogType + + // get the autoremove annotation from the container annotations + convert.ContainerAnnotation(info.ContainerConfig.Annotations, convert.AnnotationKeyAutoRemove, &hostConfig.AutoRemove) + + return &hostConfig +} + +// mountsFromContainer derives []types.MountPoint (used in inspect) from the cached container +// data. +func MountsFromContainer(vc *viccontainer.VicContainer) []types.MountPoint { + if vc == nil { + return nil + } + + var mounts []types.MountPoint + + rawAnonVolumes := make([]string, 0, len(vc.Config.Volumes)) + for k := range vc.Config.Volumes { + rawAnonVolumes = append(rawAnonVolumes, k) + } + + volList, err := finalizeVolumeList(vc.HostConfig.Binds, rawAnonVolumes) + if err != nil { + return mounts + } + + for _, vol := range volList { + mountConfig := types.MountPoint{ + Type: mount.TypeVolume, + Driver: engconstants.DefaultVolumeDriver, + Name: vol.ID, + Source: vol.ID, + Destination: vol.Dest, + RW: false, + Mode: vol.Flags, + } + + if strings.Contains(strings.ToLower(vol.Flags), "rw") { + mountConfig.RW = true + } + mounts = append(mounts, mountConfig) + } + + return mounts +} + +// containerConfigFromContainerInfo() returns a container.Config that has attributes +// overridden at create or start time. This is important. This function is called +// to help build the Container Inspect struct. That struct contains the original +// container config that is part of the image metadata AND the overridden container +// config. The user can override these via the remote API or the docker CLI. +func containerConfigFromContainerInfo(vc *viccontainer.VicContainer, info *models.ContainerInfo) *container.Config { + if vc == nil || vc.Config == nil || info == nil || info.ContainerConfig == nil || info.ProcessConfig == nil { + return nil + } + + // Copy the working copy of our container's config + container := *vc.Config + + if info.ContainerConfig.ContainerID != "" { + container.Hostname = stringid.TruncateID(info.ContainerConfig.ContainerID) // Hostname + } + if info.ContainerConfig.AttachStdin != nil { + container.AttachStdin = *info.ContainerConfig.AttachStdin // Attach the standard input, makes possible user interaction + } + if info.ContainerConfig.AttachStdout != nil { + container.AttachStdout = *info.ContainerConfig.AttachStdout // Attach the standard output + } + if info.ContainerConfig.AttachStderr != nil { + container.AttachStderr = *info.ContainerConfig.AttachStderr // Attach the standard error + } + if info.ContainerConfig.Tty != nil { + container.Tty = *info.ContainerConfig.Tty // Attach standard streams to a tty, including stdin if it is not closed. + } + if info.ContainerConfig.OpenStdin != nil { + container.OpenStdin = *info.ContainerConfig.OpenStdin + } + // They are not coming from PL so set them to true unconditionally + container.StdinOnce = true + + if info.ContainerConfig.RepoName != nil { + container.Image = *info.ContainerConfig.RepoName // Name of the image as it was passed by the operator (eg. could be symbolic) + } + + // Fill in information about the process + if info.ProcessConfig.Env != nil { + container.Env = info.ProcessConfig.Env // List of environment variable to set in the container + } + + if info.ProcessConfig.WorkingDir != nil { + container.WorkingDir = *info.ProcessConfig.WorkingDir // Current directory (PWD) in the command will be launched + } + + container.User = info.ProcessConfig.User + + // Fill in information about the container network + if info.Endpoints == nil { + container.NetworkDisabled = true + } else { + container.NetworkDisabled = false + container.MacAddress = "" + container.ExposedPorts = vc.Config.ExposedPorts + } + + // Get the original container config from the image's metadata in our image cache. + var imageConfig *metadata.ImageConfig + + if info.ContainerConfig.LayerID != "" { + // #nosec: Errors unhandled. + imageConfig, _ = cache.ImageCache().Get(info.ContainerConfig.LayerID) + } + + // Fill in the values with defaults from the original image's container config + // structure + if imageConfig != nil { + container.StopSignal = imageConfig.ContainerConfig.StopSignal // Signal to stop a container + + container.OnBuild = imageConfig.ContainerConfig.OnBuild // ONBUILD metadata that were defined on the image Dockerfile + } + + // Pull labels from the annotation + convert.ContainerAnnotation(info.ContainerConfig.Annotations, convert.AnnotationKeyLabels, &container.Labels) + return &container +} + +func networkFromContainerInfo(vc *viccontainer.VicContainer, info *models.ContainerInfo) *types.NetworkSettings { + networks := &types.NetworkSettings{ + NetworkSettingsBase: types.NetworkSettingsBase{ + Bridge: "", + SandboxID: "", + HairpinMode: false, + LinkLocalIPv6Address: "", + LinkLocalIPv6PrefixLen: 0, + Ports: network.PortMapFromContainer(vc, info), + SandboxKey: "", + SecondaryIPAddresses: nil, + SecondaryIPv6Addresses: nil, + }, + Networks: make(map[string]*dnetwork.EndpointSettings), + } + + shortCID := vc.ContainerID[0:ShortIDLen] + + // Fill in as much info from the endpoint struct inside of the ContainerInfo. + // The rest of the data must be obtained from the Scopes portlayer. + for _, ep := range info.Endpoints { + netEp := &dnetwork.EndpointSettings{ + IPAMConfig: nil, //Get from Scope PL + Links: nil, + Aliases: nil, + NetworkID: "", //Get from Scope PL + EndpointID: ep.ID, + Gateway: ep.Gateway, + IPAddress: "", + IPPrefixLen: 0, //Get from Scope PL + IPv6Gateway: "", //Get from Scope PL + GlobalIPv6Address: "", //Get from Scope PL + GlobalIPv6PrefixLen: 0, //Get from Scope PL + MacAddress: "", //Container endpoints currently do not have mac addr yet + } + + if ep.Address != "" { + ip, ipnet, err := net.ParseCIDR(ep.Address) + if err == nil { + netEp.IPAddress = ip.String() + netEp.IPPrefixLen, _ = ipnet.Mask.Size() + } + } + + if len(ep.Aliases) > 0 { + netEp.Aliases = make([]string, len(ep.Aliases)) + found := false + for i, alias := range ep.Aliases { + netEp.Aliases[i] = alias + if alias == shortCID { + found = true + } + } + + if !found { + netEp.Aliases = append(netEp.Aliases, vc.ContainerID[0:ShortIDLen]) + } + } + + networks.Networks[ep.Scope] = netEp + } + + return networks +} + +// addExposedToPortMap ensures that exposed ports are all present in the port map. +// This means nil entries for any exposed ports that are not mapped. +// The portMap provided is modified and returned - the return value should always be used. +func addExposedToPortMap(config *container.Config, portMap nat.PortMap) nat.PortMap { + if config == nil || len(config.ExposedPorts) == 0 { + return portMap + } + + if portMap == nil { + portMap = make(nat.PortMap) + } + + for p := range config.ExposedPorts { + if _, ok := portMap[p]; ok { + continue + } + + portMap[p] = nil + } + + return portMap +} + +func ContainerInfoToVicContainer(info models.ContainerInfo, portlayerName string) *viccontainer.VicContainer { + vc := viccontainer.NewVicContainer() + + if info.ContainerConfig.ContainerID != "" { + vc.ContainerID = info.ContainerConfig.ContainerID + } + + log.Debugf("Convert container info to vic container: %s", vc.ContainerID) + + if len(info.ContainerConfig.Names) > 0 { + vc.Name = info.ContainerConfig.Names[0] + log.Debugf("Container %q", vc.Name) + } + + if info.ContainerConfig.LayerID != "" { + vc.LayerID = info.ContainerConfig.LayerID + } + + if info.ContainerConfig.ImageID != "" { + vc.ImageID = info.ContainerConfig.ImageID + } + + tempVC := viccontainer.NewVicContainer() + tempVC.HostConfig = &container.HostConfig{} + vc.Config = containerConfigFromContainerInfo(tempVC, &info) + vc.HostConfig = hostConfigFromContainerInfo(tempVC, &info, portlayerName) + + // FIXME: duplicate Config.Volumes and HostConfig.Binds here for can not derive them from persisted value right now. + // get volumes from volume config + vc.Config.Volumes = make(map[string]struct{}, len(info.VolumeConfig)) + vc.HostConfig.Binds = []string{} + for _, volume := range info.VolumeConfig { + mount := getMountString(volume.Name, volume.MountPoint, volume.Flags[constants.Mode]) + vc.Config.Volumes[mount] = struct{}{} + vc.HostConfig.Binds = append(vc.HostConfig.Binds, mount) + log.Debugf("add volume mount %s to config.volumes and hostconfig.binds", mount) + } + + vc.Config.Cmd = info.ProcessConfig.ExecArgs + + return vc +} diff --git a/lib/apiservers/engine/backends/container_proxy_test.go b/lib/apiservers/engine/proxy/container_proxy_test.go similarity index 99% rename from lib/apiservers/engine/backends/container_proxy_test.go rename to lib/apiservers/engine/proxy/container_proxy_test.go index 02d203881b..c853c65bea 100644 --- a/lib/apiservers/engine/backends/container_proxy_test.go +++ b/lib/apiservers/engine/proxy/container_proxy_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package backends +package proxy import ( "testing" diff --git a/lib/apiservers/engine/proxy/errors.go b/lib/apiservers/engine/proxy/errors.go deleted file mode 100644 index 9fb6cb7b04..0000000000 --- a/lib/apiservers/engine/proxy/errors.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2017 VMware, Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package proxy - -import ( - "fmt" - "net/http" - - derr "github.com/docker/docker/api/errors" -) - -// InternalServerError returns a 500 docker error on a portlayer error. -func InternalServerError(msg string) error { - return derr.NewErrorWithStatusCode(fmt.Errorf("Server error from portlayer: %s", msg), http.StatusInternalServerError) -} - -// ResourceLockedError returns a 423 http status -func ResourceLockedError(msg string) error { - return derr.NewErrorWithStatusCode(fmt.Errorf("Resource locked: %s", msg), http.StatusLocked) -} - -// ResourceNotFoundError returns a 404 http status -func ResourceNotFoundError(msg string) error { - return derr.NewErrorWithStatusCode(fmt.Errorf("No such %s", msg), http.StatusNotFound) -} diff --git a/lib/apiservers/engine/proxy/storage_proxy.go b/lib/apiservers/engine/proxy/storage_proxy.go new file mode 100644 index 0000000000..78ae07d834 --- /dev/null +++ b/lib/apiservers/engine/proxy/storage_proxy.go @@ -0,0 +1,626 @@ +// Copyright 2016-2018 VMware, Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proxy + +import ( + "context" + "encoding/json" + "fmt" + "regexp" + "strconv" + "strings" + + log "github.com/Sirupsen/logrus" + "github.com/google/uuid" + + derr "github.com/docker/docker/api/errors" + "github.com/docker/docker/api/types" + "github.com/docker/go-units" + + viccontainer "github.com/vmware/vic/lib/apiservers/engine/backends/container" + "github.com/vmware/vic/lib/apiservers/engine/errors" + "github.com/vmware/vic/lib/apiservers/portlayer/client" + "github.com/vmware/vic/lib/apiservers/portlayer/client/containers" + "github.com/vmware/vic/lib/apiservers/portlayer/client/storage" + "github.com/vmware/vic/lib/apiservers/portlayer/models" + "github.com/vmware/vic/lib/constants" + "github.com/vmware/vic/pkg/trace" +) + +type VicStorageProxy interface { + Create(ctx context.Context, name, driverName string, volumeData, labels map[string]string) (*types.Volume, error) + VolumeList(ctx context.Context, filter string) ([]*models.VolumeResponse, error) + VolumeInfo(ctx context.Context, name string) (*models.VolumeResponse, error) + Remove(ctx context.Context, name string) error + + AddVolumesToContainer(ctx context.Context, handle string, config types.ContainerCreateConfig) (string, error) +} + +type StorageProxy struct { + client *client.PortLayer +} + +type volumeFields struct { + ID string + Dest string + Flags string +} + +type VolumeMetadata struct { + Driver string + DriverOpts map[string]string + Name string + Labels map[string]string + AttachHistory []string + Image string +} + +const ( + DriverArgFlagKey = "flags" + DriverArgContainerKey = "container" + DriverArgImageKey = "image" + + OptsVolumeStoreKey string = "volumestore" + OptsCapacityKey string = "capacity" + DockerMetadataModelKey string = "DockerMetaData" +) + +// define a set (whitelist) of valid driver opts keys for command line argument validation +var validDriverOptsKeys = map[string]struct{}{ + OptsVolumeStoreKey: {}, + OptsCapacityKey: {}, + DriverArgFlagKey: {}, + DriverArgContainerKey: {}, + DriverArgImageKey: {}, +} + +// Volume drivers currently supported. "local" is the default driver supplied by the client +// and is equivalent to "vsphere" for our implementation. +var SupportedVolDrivers = map[string]struct{}{ + "vsphere": {}, + "local": {}, +} + +//Validation pattern for Volume Names +var volumeNameRegex = regexp.MustCompile("^[a-zA-Z0-9][a-zA-Z0-9_.-]*$") + +func NewStorageProxy(client *client.PortLayer) VicStorageProxy { + if client == nil { + return nil + } + + return &StorageProxy{client: client} +} + +func (s *StorageProxy) Create(ctx context.Context, name, driverName string, volumeData, labels map[string]string) (*types.Volume, error) { + defer trace.End(trace.Begin("")) + + if s.client == nil { + return nil, errors.NillPortlayerClientError("StorageProxy") + } + + result, err := s.volumeCreate(ctx, name, driverName, volumeData, labels) + if err != nil { + switch err := err.(type) { + case *storage.CreateVolumeConflict: + return result, errors.VolumeInternalServerError(fmt.Errorf("A volume named %s already exists. Choose a different volume name.", name)) + case *storage.CreateVolumeNotFound: + return result, errors.VolumeInternalServerError(fmt.Errorf("No volume store named (%s) exists", volumeStore(volumeData))) + case *storage.CreateVolumeInternalServerError: + // FIXME: right now this does not return an error model... + return result, errors.VolumeInternalServerError(fmt.Errorf("%s", err.Error())) + case *storage.CreateVolumeDefault: + return result, errors.VolumeInternalServerError(fmt.Errorf("%s", err.Payload.Message)) + default: + return result, errors.VolumeInternalServerError(fmt.Errorf("%s", err)) + } + } + + return result, nil +} + +// volumeCreate issues a CreateVolume request to the portlayer +func (s *StorageProxy) volumeCreate(ctx context.Context, name, driverName string, volumeData, labels map[string]string) (*types.Volume, error) { + defer trace.End(trace.Begin("")) + result := &types.Volume{} + + if s.client == nil { + return nil, errors.NillPortlayerClientError("StorageProxy") + } + + if name == "" { + name = uuid.New().String() + } + + // TODO: support having another driver besides vsphere. + // assign the values of the model to be passed to the portlayer handler + req, varErr := newVolumeCreateReq(name, driverName, volumeData, labels) + if varErr != nil { + return result, varErr + } + log.Infof("Finalized model for volume create request to portlayer: %#v", req) + + res, err := s.client.Storage.CreateVolume(storage.NewCreateVolumeParamsWithContext(ctx).WithVolumeRequest(req)) + if err != nil { + return result, err + } + + return NewVolumeModel(res.Payload, labels), nil +} + +func (s *StorageProxy) VolumeList(ctx context.Context, filter string) ([]*models.VolumeResponse, error) { + defer trace.End(trace.Begin("")) + + if s.client == nil { + return nil, errors.NillPortlayerClientError("StorageProxy") + } + + res, err := s.client.Storage.ListVolumes(storage.NewListVolumesParamsWithContext(ctx).WithFilterString(&filter)) + if err != nil { + switch err := err.(type) { + case *storage.ListVolumesInternalServerError: + return nil, errors.VolumeInternalServerError(fmt.Errorf("error from portlayer server: %s", err.Payload.Message)) + case *storage.ListVolumesDefault: + return nil, errors.VolumeInternalServerError(fmt.Errorf("error from portlayer server: %s", err.Payload.Message)) + default: + return nil, errors.VolumeInternalServerError(fmt.Errorf("error from portlayer server: %s", err.Error())) + } + } + + return res.Payload, nil +} + +func (s *StorageProxy) VolumeInfo(ctx context.Context, name string) (*models.VolumeResponse, error) { + defer trace.End(trace.Begin(name)) + + if name == "" { + return nil, nil + } + + if s.client == nil { + return nil, errors.NillPortlayerClientError("StorageProxy") + } + + param := storage.NewGetVolumeParamsWithContext(ctx).WithName(name) + res, err := s.client.Storage.GetVolume(param) + if err != nil { + switch err := err.(type) { + case *storage.GetVolumeNotFound: + return nil, errors.VolumeNotFoundError(name) + default: + return nil, errors.VolumeInternalServerError(fmt.Errorf("error from portlayer server: %s", err.Error())) + } + } + + return res.Payload, nil +} + +func (s *StorageProxy) Remove(ctx context.Context, name string) error { + defer trace.End(trace.Begin(name)) + + if s.client == nil { + return errors.NillPortlayerClientError("StorageProxy") + } + + _, err := s.client.Storage.RemoveVolume(storage.NewRemoveVolumeParamsWithContext(ctx).WithName(name)) + if err != nil { + switch err := err.(type) { + case *storage.RemoveVolumeNotFound: + return derr.NewRequestNotFoundError(fmt.Errorf("Get %s: no such volume", name)) + case *storage.RemoveVolumeConflict: + return derr.NewRequestConflictError(fmt.Errorf(err.Payload.Message)) + case *storage.RemoveVolumeInternalServerError: + return errors.VolumeInternalServerError(fmt.Errorf("Server error from portlayer: %s", err.Payload.Message)) + default: + return errors.VolumeInternalServerError(fmt.Errorf("Server error from portlayer: %s", err)) + } + } + + return nil +} + +// AddVolumesToContainer adds volumes to a container, referenced by handle. +// If an error is returned, the returned handle should not be used. +// +// returns: +// modified handle +func (s *StorageProxy) AddVolumesToContainer(ctx context.Context, handle string, config types.ContainerCreateConfig) (string, error) { + defer trace.End(trace.Begin(handle)) + + if s.client == nil { + return "", errors.NillPortlayerClientError("StorageProxy") + } + + // Volume Attachment Section + log.Debugf("ContainerProxy.AddVolumesToContainer - VolumeSection") + log.Debugf("Raw volume arguments: binds: %#v, volumes: %#v", config.HostConfig.Binds, config.Config.Volumes) + + // Collect all volume mappings. In a docker create/run, they + // can be anonymous (-v /dir) or specific (-v vol-name:/dir). + // anonymous volumes can also come from Image Metadata + + rawAnonVolumes := make([]string, 0, len(config.Config.Volumes)) + for k := range config.Config.Volumes { + rawAnonVolumes = append(rawAnonVolumes, k) + } + + volList, err := finalizeVolumeList(config.HostConfig.Binds, rawAnonVolumes) + if err != nil { + return handle, errors.BadRequestError(err.Error()) + } + log.Infof("Finalized volume list: %#v", volList) + + if len(config.Config.Volumes) > 0 { + // override anonymous volume list with generated volume id + for _, vol := range volList { + if _, ok := config.Config.Volumes[vol.Dest]; ok { + delete(config.Config.Volumes, vol.Dest) + mount := getMountString(vol.ID, vol.Dest, vol.Flags) + config.Config.Volumes[mount] = struct{}{} + log.Debugf("Replace anonymous volume config %s with %s", vol.Dest, mount) + } + } + } + + // Create and join volumes. + for _, fields := range volList { + // We only set these here for volumes made on a docker create + volumeData := make(map[string]string) + volumeData[DriverArgFlagKey] = fields.Flags + volumeData[DriverArgContainerKey] = config.Name + volumeData[DriverArgImageKey] = config.Config.Image + + // NOTE: calling volumeCreate regardless of whether the volume is already + // present can be avoided by adding an extra optional param to VolumeJoin, + // which would then call volumeCreate if the volume does not exist. + _, err := s.volumeCreate(ctx, fields.ID, "vsphere", volumeData, nil) + if err != nil { + switch err := err.(type) { + case *storage.CreateVolumeConflict: + // Implicitly ignore the error where a volume with the same name + // already exists. We can just join the said volume to the container. + log.Infof("a volume with the name %s already exists", fields.ID) + case *storage.CreateVolumeNotFound: + return handle, errors.VolumeCreateNotFoundError(volumeStore(volumeData)) + default: + return handle, errors.InternalServerError(err.Error()) + } + } else { + log.Infof("volumeCreate succeeded. Volume mount section ID: %s", fields.ID) + } + + flags := make(map[string]string) + //NOTE: for now we are passing the flags directly through. This is NOT SAFE and only a stop gap. + flags[constants.Mode] = fields.Flags + joinParams := storage.NewVolumeJoinParamsWithContext(ctx).WithJoinArgs(&models.VolumeJoinConfig{ + Flags: flags, + Handle: handle, + MountPath: fields.Dest, + }).WithName(fields.ID) + + res, err := s.client.Storage.VolumeJoin(joinParams) + if err != nil { + switch err := err.(type) { + case *storage.VolumeJoinInternalServerError: + return handle, errors.InternalServerError(err.Payload.Message) + case *storage.VolumeJoinDefault: + return handle, errors.InternalServerError(err.Payload.Message) + case *storage.VolumeJoinNotFound: + return handle, errors.VolumeJoinNotFoundError(err.Payload.Message) + default: + return handle, errors.InternalServerError(err.Error()) + } + } + + handle = res.Payload + } + + return handle, nil +} + +// allContainers obtains all containers from the portlayer, akin to `docker ps -a`. +func (s *StorageProxy) allContainers(ctx context.Context) ([]*models.ContainerInfo, error) { + if s.client == nil { + return nil, errors.NillPortlayerClientError("StorageProxy") + } + + all := true + cons, err := s.client.Containers.GetContainerList(containers.NewGetContainerListParamsWithContext(ctx).WithAll(&all)) + if err != nil { + return nil, err + } + + return cons.Payload, nil +} + +// fetchJoinedVolumes obtains all containers from the portlayer and returns a map with all +// volumes that are joined to at least one container. +func (s *StorageProxy) fetchJoinedVolumes(ctx context.Context) (map[string]struct{}, error) { + conts, err := s.allContainers(ctx) + if err != nil { + return nil, errors.VolumeInternalServerError(err) + } + + joinedVolumes := make(map[string]struct{}) + var v struct{} + for i := range conts { + for _, vol := range conts[i].VolumeConfig { + joinedVolumes[vol.Name] = v + } + } + + return joinedVolumes, nil +} + +//------------------------------------ +// Utility Functions +//------------------------------------ + +func NewVolumeModel(volume *models.VolumeResponse, labels map[string]string) *types.Volume { + return &types.Volume{ + Driver: volume.Driver, + Name: volume.Name, + Labels: labels, + Mountpoint: volume.Label, + } +} + +// volumeStore returns the value of the optional volume store param specified in the CLI. +func volumeStore(args map[string]string) string { + storeName, ok := args[OptsVolumeStoreKey] + if !ok { + return "default" + } + return storeName +} + +// getMountString returns a colon-delimited string containing a volume's name/ID, mount +// point and flags. +func getMountString(mounts ...string) string { + return strings.Join(mounts, ":") +} + +func createVolumeMetadata(req *models.VolumeRequest, driverargs, labels map[string]string) (string, error) { + metadata := VolumeMetadata{ + Driver: req.Driver, + DriverOpts: req.DriverArgs, + Name: req.Name, + Labels: labels, + AttachHistory: []string{driverargs[DriverArgContainerKey]}, + Image: driverargs[DriverArgImageKey], + } + result, err := json.Marshal(metadata) + return string(result), err +} + +// RemoveAnonContainerVols removes anonymous volumes joined to a container. It is invoked +// once the said container has been removed. It fetches a list of volumes that are joined +// to at least one other container, and calls the portlayer to remove this container's +// anonymous volumes if they are dangling. Errors, if any, are only logged. +func RemoveAnonContainerVols(ctx context.Context, pl *client.PortLayer, cID string, vc *viccontainer.VicContainer) { + // NOTE: these strings come in the form of :: + volumes := vc.Config.Volumes + // NOTE: these strings come in the form of : + namedVolumes := vc.HostConfig.Binds + + // assemble a mask of volume paths before processing binds. MUST be paths, as we want to move to honoring the proper metadata in the "volumes" section in the future. + namedMaskList := make(map[string]struct{}, 0) + for _, entry := range namedVolumes { + fields := strings.SplitN(entry, ":", 2) + if len(fields) != 2 { + log.Errorf("Invalid entry in the HostConfig.Binds metadata section for container %s: %s", cID, entry) + continue + } + destPath := fields[1] + namedMaskList[destPath] = struct{}{} + } + + proxy := StorageProxy{client: pl} + joinedVols, err := proxy.fetchJoinedVolumes(ctx) + if err != nil { + log.Errorf("Unable to obtain joined volumes from portlayer, skipping removal of anonymous volumes for %s: %s", cID, err.Error()) + return + } + + for vol := range volumes { + // Extract the volume ID from the full mount path, which is of form "id:mountpath:flags" - see getMountString(). + volFields := strings.SplitN(vol, ":", 3) + + // NOTE(mavery): this check will start to fail when we fix our metadata correctness issues + if len(volFields) != 3 { + log.Debugf("Invalid entry in the volumes metadata section for container %s: %s", cID, vol) + continue + } + volName := volFields[0] + volPath := volFields[1] + + _, isNamed := namedMaskList[volPath] + _, joined := joinedVols[volName] + if !joined && !isNamed { + _, err := pl.Storage.RemoveVolume(storage.NewRemoveVolumeParamsWithContext(ctx).WithName(volName)) + if err != nil { + log.Debugf("Unable to remove anonymous volume %s in container %s: %s", volName, cID, err.Error()) + continue + } + log.Debugf("Successfully removed anonymous volume %s during remove operation against container(%s)", volName, cID) + } + } +} + +// processVolumeParam is used to turn any call from docker create -v into a volumeFields object. +// The -v has 3 forms. -v , -v : and +// -v :: +func processVolumeParam(volString string) (volumeFields, error) { + volumeStrings := strings.Split(volString, ":") + fields := volumeFields{} + + // Error out if the intended volume is a directory on the client filesystem. + numVolParams := len(volumeStrings) + if numVolParams > 1 && strings.HasPrefix(volumeStrings[0], "/") { + return volumeFields{}, errors.InvalidVolumeError{} + } + + // This switch determines which type of -v was invoked. + switch numVolParams { + case 1: + VolumeID, err := uuid.NewUUID() + if err != nil { + return fields, err + } + fields.ID = VolumeID.String() + fields.Dest = volumeStrings[0] + fields.Flags = "rw" + case 2: + fields.ID = volumeStrings[0] + fields.Dest = volumeStrings[1] + fields.Flags = "rw" + case 3: + fields.ID = volumeStrings[0] + fields.Dest = volumeStrings[1] + fields.Flags = volumeStrings[2] + default: + // NOTE: the docker cli should cover this case. This is here for posterity. + return volumeFields{}, errors.InvalidBindError{Volume: volString} + } + return fields, nil +} + +// processVolumeFields parses fields for volume mappings specified in a create/run -v. +// It returns a map of unique mountable volumes. This means that it removes dupes favoring +// specified volumes over anonymous volumes. +func processVolumeFields(volumes []string) (map[string]volumeFields, error) { + volumeFields := make(map[string]volumeFields) + + for _, v := range volumes { + fields, err := processVolumeParam(v) + log.Infof("Processed volume arguments: %#v", fields) + if err != nil { + return nil, err + } + volumeFields[fields.Dest] = fields + } + return volumeFields, nil +} + +func finalizeVolumeList(specifiedVolumes, anonymousVolumes []string) ([]volumeFields, error) { + log.Infof("Specified Volumes : %#v", specifiedVolumes) + processedVolumes, err := processVolumeFields(specifiedVolumes) + if err != nil { + return nil, err + } + + log.Infof("anonymous Volumes : %#v", anonymousVolumes) + processedAnonVolumes, err := processVolumeFields(anonymousVolumes) + if err != nil { + return nil, err + } + + //combine all volumes, specified volumes are taken over anonymous volumes + for k, v := range processedVolumes { + processedAnonVolumes[k] = v + } + + finalizedVolumes := make([]volumeFields, 0, len(processedAnonVolumes)) + for _, v := range processedAnonVolumes { + finalizedVolumes = append(finalizedVolumes, v) + } + return finalizedVolumes, nil +} + +func newVolumeCreateReq(name, driverName string, volumeData, labels map[string]string) (*models.VolumeRequest, error) { + if _, ok := SupportedVolDrivers[driverName]; !ok { + return nil, fmt.Errorf("error looking up volume plugin %s: plugin not found", driverName) + } + + if !volumeNameRegex.Match([]byte(name)) && name != "" { + return nil, fmt.Errorf("volume name %q includes invalid characters, only \"[a-zA-Z0-9][a-zA-Z0-9_.-]\" are allowed", name) + } + + req := &models.VolumeRequest{ + Driver: driverName, + DriverArgs: volumeData, + Name: name, + Metadata: make(map[string]string), + } + + metadata, err := createVolumeMetadata(req, volumeData, labels) + if err != nil { + return nil, err + } + + req.Metadata[DockerMetadataModelKey] = metadata + + if err := validateDriverArgs(volumeData, req); err != nil { + return nil, fmt.Errorf("bad driver value - %s", err) + } + + return req, nil +} + +func validateDriverArgs(args map[string]string, req *models.VolumeRequest) error { + if err := normalizeDriverArgs(args); err != nil { + return err + } + + // volumestore name validation + req.Store = volumeStore(args) + + // capacity validation + capstr, ok := args[OptsCapacityKey] + if !ok { + req.Capacity = -1 + return nil + } + + //check if it is just a numerical value + capacity, err := strconv.ParseInt(capstr, 10, 64) + if err == nil { + //input has no units in this case. + if capacity < 1 { + return fmt.Errorf("Invalid size: %s", capstr) + } + req.Capacity = capacity + return nil + } + + capacity, err = units.FromHumanSize(capstr) + if err != nil { + return err + } + + if capacity < 1 { + return fmt.Errorf("Capacity value too large: %s", capstr) + } + + req.Capacity = int64(capacity) / int64(units.MB) + return nil +} + +func normalizeDriverArgs(args map[string]string) error { + // normalize keys to lowercase & validate them + for k, val := range args { + lowercase := strings.ToLower(k) + + if _, ok := validDriverOptsKeys[lowercase]; !ok { + return fmt.Errorf("%s is not a supported option", k) + } + + if strings.Compare(lowercase, k) != 0 { + delete(args, k) + args[lowercase] = val + } + } + return nil +} diff --git a/lib/apiservers/engine/proxy/storage_proxy_test.go b/lib/apiservers/engine/proxy/storage_proxy_test.go new file mode 100644 index 0000000000..310b2987d9 --- /dev/null +++ b/lib/apiservers/engine/proxy/storage_proxy_test.go @@ -0,0 +1,129 @@ +// Copyright 2016-2018 VMware, Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proxy + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/vmware/vic/lib/apiservers/portlayer/models" +) + +func TestFillDockerVolume(t *testing.T) { + testResponse := &models.VolumeResponse{ + Driver: "vsphere", + Name: "Test Volume", + Label: "Test Label", + } + testLabels := make(map[string]string) + testLabels["TestMeta"] = "custom info about my volume" + + dockerVolume := NewVolumeModel(testResponse, testLabels) + + assert.Equal(t, "vsphere", dockerVolume.Driver) + assert.Equal(t, "Test Volume", dockerVolume.Name) + assert.Equal(t, "Test Label", dockerVolume.Mountpoint) + assert.Equal(t, "custom info about my volume", dockerVolume.Labels["TestMeta"]) +} + +func TestTranslatVolumeRequestModel(t *testing.T) { + testLabels := make(map[string]string) + testLabels["TestMeta"] = "custom info about my volume" + + testDriverArgs := make(map[string]string) + testDriverArgs["testarg"] = "important driver stuff" + testDriverArgs[OptsVolumeStoreKey] = "testStore" + testDriverArgs[OptsCapacityKey] = "12MB" + + testRequest, err := newVolumeCreateReq("testName", "vsphere", testDriverArgs, testLabels) + if !assert.Error(t, err) { + return + } + + delete(testDriverArgs, "testarg") + testRequest, err = newVolumeCreateReq("testName", "vsphere", testDriverArgs, testLabels) + if !assert.NoError(t, err) { + return + } + + assert.Equal(t, "testName", testRequest.Name) + assert.Equal(t, "", testRequest.DriverArgs["testarg"]) // unsupported keys should just be empty + assert.Equal(t, "testStore", testRequest.Store) + assert.Equal(t, "vsphere", testRequest.Driver) + assert.Equal(t, int64(12), testRequest.Capacity) + + testMetaDatabuf, err := createVolumeMetadata(testRequest, testDriverArgs, testLabels) + if !assert.NoError(t, err) { + return + } + + assert.Equal(t, testMetaDatabuf, testRequest.Metadata[DockerMetadataModelKey]) + assert.Nil(t, err) +} + +func TestValidateDriverArgs(t *testing.T) { + testMap := make(map[string]string) + testStore := "Mystore" + testCap := "12MB" + testBadCap := "This is not valid!" + testModel := models.VolumeRequest{ + Driver: "vsphere", + DriverArgs: testMap, + Name: "testModel", + } + + err := validateDriverArgs(testMap, &testModel) + if !assert.Equal(t, "default", testModel.Store) || !assert.Equal(t, int64(-1), testModel.Capacity) || !assert.NoError(t, err) { + return + } + + testMap[OptsVolumeStoreKey] = testStore + testMap[OptsCapacityKey] = testCap + err = validateDriverArgs(testMap, &testModel) + if !assert.Equal(t, testStore, testModel.Store) || !assert.Equal(t, int64(12), testModel.Capacity) || !assert.NoError(t, err) { + return + } + + //This is a negative test case. We want an error + testMap[OptsCapacityKey] = testBadCap + err = validateDriverArgs(testMap, &testModel) + if !assert.Equal(t, testStore, testModel.Store) || !assert.Equal(t, int64(12), testModel.Capacity) || !assert.Error(t, err) { + return + } + + testMap[OptsCapacityKey] = testCap + delete(testMap, OptsVolumeStoreKey) + err = validateDriverArgs(testMap, &testModel) + if !assert.Equal(t, "default", testModel.Store) || !assert.Equal(t, int64(12), testModel.Capacity) || !assert.NoError(t, err) { + return + } +} + +func TestNormalizeDriverArgs(t *testing.T) { + testOptMap := make(map[string]string) + testOptMap["VOLUMESTORE"] = "foo" + testOptMap["CAPACITY"] = "bar" + + normalizeDriverArgs(testOptMap) + + assert.Equal(t, testOptMap["volumestore"], "foo") + assert.Equal(t, testOptMap["capacity"], "bar") + + testOptMap["bogus"] = "bogus" + + err := normalizeDriverArgs(testOptMap) + assert.Error(t, err, "expected: bogus is not a supported option") +} diff --git a/lib/apiservers/engine/proxy/stream_proxy.go b/lib/apiservers/engine/proxy/stream_proxy.go new file mode 100644 index 0000000000..12c12df8e0 --- /dev/null +++ b/lib/apiservers/engine/proxy/stream_proxy.go @@ -0,0 +1,495 @@ +// Copyright 2016-2018 VMware, Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proxy + +import ( + "context" + "fmt" + "io" + "strings" + "sync" + "time" + + log "github.com/Sirupsen/logrus" + "github.com/go-openapi/strfmt" + + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/pkg/term" + + "github.com/vmware/vic/lib/apiservers/engine/backends/convert" + "github.com/vmware/vic/lib/apiservers/engine/errors" + "github.com/vmware/vic/lib/apiservers/portlayer/client" + "github.com/vmware/vic/lib/apiservers/portlayer/client/containers" + "github.com/vmware/vic/lib/apiservers/portlayer/client/interaction" + "github.com/vmware/vic/pkg/trace" +) + +type VicStreamProxy interface { + AttachStreams(ctx context.Context, ac *AttachConfig, stdin io.ReadCloser, stdout, stderr io.Writer) error + StreamContainerLogs(ctx context.Context, name string, out io.Writer, started chan struct{}, showTimestamps bool, followLogs bool, since int64, tailLines int64) error + StreamContainerStats(ctx context.Context, config *convert.ContainerStatsConfig) error +} + +type StreamProxy struct { + client *client.PortLayer +} + +const ( + attachConnectTimeout time.Duration = 15 * time.Second //timeout for the connection + attachAttemptTimeout time.Duration = 60 * time.Second //timeout before we ditch an attach attempt + attachPLAttemptDiff time.Duration = 10 * time.Second + attachStdinInitString = "v1c#>" + archiveStreamBufSize = 64 * 1024 +) + +// AttachConfig wraps backend.ContainerAttachConfig and adds other required fields +// Similar to https://github.com/docker/docker/blob/master/container/stream/attach.go +type AttachConfig struct { + *backend.ContainerAttachConfig + + // ID of the session + ID string + // Tells the attach copier that the stream's stdin is a TTY and to look for + // escape sequences in stdin to detach from the stream. + // When true the escape sequence is not passed to the underlying stream + UseTty bool + // CloseStdin signals that once done, stdin for the attached stream should be closed + // For example, this would close the attached container's stdin. + CloseStdin bool +} + +func NewStreamProxy(client *client.PortLayer) VicStreamProxy { + return &StreamProxy{client: client} +} + +// AttachStreams takes the the hijacked connections from the calling client and attaches +// them to the 3 streams from the portlayer's rest server. +// stdin, stdout, stderr are the hijacked connection +func (s *StreamProxy) AttachStreams(ctx context.Context, ac *AttachConfig, stdin io.ReadCloser, stdout, stderr io.Writer) error { + // Cancel will close the child connections. + var wg, outWg sync.WaitGroup + + if s.client == nil { + return errors.NillPortlayerClientError("StreamProxy") + } + + errChan := make(chan error, 3) + + var keys []byte + var err error + if ac.DetachKeys != "" { + keys, err = term.ToBytes(ac.DetachKeys) + if err != nil { + return fmt.Errorf("Invalid escape keys (%s) provided", ac.DetachKeys) + } + } + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + if ac.UseStdin { + wg.Add(1) + } + + if ac.UseStdout { + wg.Add(1) + outWg.Add(1) + } + + if ac.UseStderr { + wg.Add(1) + outWg.Add(1) + } + + // cancel stdin if all output streams are complete + go func() { + outWg.Wait() + cancel() + }() + + EOForCanceled := func(err error) bool { + return err != nil && ctx.Err() != context.Canceled && !strings.HasSuffix(err.Error(), SwaggerSubstringEOF) + } + + if ac.UseStdin { + go func() { + defer wg.Done() + err := copyStdIn(ctx, s.client, ac, stdin, keys) + if err != nil { + log.Errorf("container attach: stdin (%s): %s", ac.ID, err) + } else { + log.Infof("container attach: stdin (%s) done", ac.ID) + } + + if !ac.CloseStdin || ac.UseTty { + cancel() + } + + // Check for EOF or canceled context. We can only detect EOF by checking the error string returned by swagger :/ + if EOForCanceled(err) { + errChan <- err + } + }() + } + + if ac.UseStdout { + go func() { + defer outWg.Done() + defer wg.Done() + + err := copyStdOut(ctx, s.client, ac, stdout, attachAttemptTimeout) + if err != nil { + log.Errorf("container attach: stdout (%s): %s", ac.ID, err) + } else { + log.Infof("container attach: stdout (%s) done", ac.ID) + } + + // Check for EOF or canceled context. We can only detect EOF by checking the error string returned by swagger :/ + if EOForCanceled(err) { + errChan <- err + } + }() + } + + if ac.UseStderr { + go func() { + defer outWg.Done() + defer wg.Done() + + err := copyStdErr(ctx, s.client, ac, stderr) + if err != nil { + log.Errorf("container attach: stderr (%s): %s", ac.ID, err) + } else { + log.Infof("container attach: stderr (%s) done", ac.ID) + } + + // Check for EOF or canceled context. We can only detect EOF by checking the error string returned by swagger :/ + if EOForCanceled(err) { + errChan <- err + } + }() + } + + // Wait for all stream copy to exit + wg.Wait() + + // close the channel so that we don't leak (if there is an error)/or get blocked (if there are no errors) + close(errChan) + + log.Infof("cleaned up connections to %s. Checking errors", ac.ID) + for err := range errChan { + if err != nil { + // check if we got DetachError + if _, ok := err.(errors.DetachError); ok { + log.Infof("Detached from container detected") + return err + } + + // If we get here, most likely something went wrong with the port layer API server + // These errors originate within the go-swagger client itself. + // Go-swagger returns untyped errors to us if the error is not one that we define + // in the swagger spec. Even EOF. Therefore, we must scan the error string (if there + // is an error string in the untyped error) for the term EOF. + log.Errorf("container attach error: %s", err) + + return err + } + } + + log.Infof("No error found. Returning nil...") + return nil +} + +// StreamContainerLogs reads the log stream from the portlayer rest server and writes +// it directly to the io.Writer that is passed in. +func (s *StreamProxy) StreamContainerLogs(ctx context.Context, name string, out io.Writer, started chan struct{}, showTimestamps bool, followLogs bool, since int64, tailLines int64) error { + defer trace.End(trace.Begin("")) + + if s.client == nil { + return errors.NillPortlayerClientError("StreamProxy") + } + + close(started) + + params := containers.NewGetContainerLogsParamsWithContext(ctx). + WithID(name). + WithFollow(&followLogs). + WithTimestamp(&showTimestamps). + WithSince(&since). + WithTaillines(&tailLines) + _, err := s.client.Containers.GetContainerLogs(params, out) + if err != nil { + switch err := err.(type) { + case *containers.GetContainerLogsNotFound: + return errors.NotFoundError(name) + case *containers.GetContainerLogsInternalServerError: + return errors.InternalServerError("Server error from the interaction port layer") + default: + //Check for EOF. Since the connection, transport, and data handling are + //encapsulated inside of Swagger, we can only detect EOF by checking the + //error string + if strings.Contains(err.Error(), SwaggerSubstringEOF) { + return nil + } + return errors.InternalServerError(fmt.Sprintf("Unknown error from the interaction port layer: %s", err)) + } + } + + return nil +} + +// StreamContainerStats will provide a stream of container stats written to the provided +// io.Writer. Prior to writing to the provided io.Writer there will be a transformation +// from the portLayer representation of stats to the docker format +func (s *StreamProxy) StreamContainerStats(ctx context.Context, config *convert.ContainerStatsConfig) error { + defer trace.End(trace.Begin(config.ContainerID)) + + if s.client == nil { + return errors.NillPortlayerClientError("StreamProxy") + } + + // create a child context that we control + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + params := containers.NewGetContainerStatsParamsWithContext(ctx) + params.ID = config.ContainerID + params.Stream = config.Stream + + config.Ctx = ctx + config.Cancel = cancel + + // create our converter + containerConverter := convert.NewContainerStats(config) + // provide the writer for the portLayer and start listening for metrics + writer := containerConverter.Listen() + if writer == nil { + // problem with the listener + return errors.InternalServerError(fmt.Sprintf("unable to gather container(%s) statistics", config.ContainerID)) + } + + _, err := s.client.Containers.GetContainerStats(params, writer) + if err != nil { + switch err := err.(type) { + case *containers.GetContainerStatsNotFound: + return errors.NotFoundError(config.ContainerID) + case *containers.GetContainerStatsInternalServerError: + return errors.InternalServerError("Server error from the interaction port layer") + default: + if ctx.Err() == context.Canceled { + return nil + } + //Check for EOF. Since the connection, transport, and data handling are + //encapsulated inside of Swagger, we can only detect EOF by checking the + //error string + if strings.Contains(err.Error(), SwaggerSubstringEOF) { + return nil + } + return errors.InternalServerError(fmt.Sprintf("Unknown error from the interaction port layer: %s", err)) + } + } + return nil +} + +//------------------------------------ +// ContainerAttach() Utility Functions +//------------------------------------ + +func copyStdIn(ctx context.Context, pl *client.PortLayer, ac *AttachConfig, stdin io.ReadCloser, keys []byte) error { + // Pipe for stdin so we can interject and watch the input streams for detach keys. + stdinReader, stdinWriter := io.Pipe() + defer stdinReader.Close() + + var detach bool + + done := make(chan struct{}) + go func() { + // make sure we get out of io.Copy if context is canceled + select { + case <-ctx.Done(): + // This will cause the transport to the API client to be shut down, so all output + // streams will get closed as well. + // See the closer in container_routes.go:postContainersAttach + + // We're closing this here to disrupt the io.Copy below + // TODO: seems like we should be providing an io.Copy impl with ctx argument that honors + // cancelation with the amount of code dedicated to working around it + + // TODO: I think this still leaves a race between closing of the API client transport and + // copying of the output streams, it's just likely the error will be dropped as the transport is + // closed when it occurs. + // We should move away from needing to close transports to interrupt reads. + stdin.Close() + case <-done: + } + }() + + go func() { + defer close(done) + defer stdinWriter.Close() + + // Copy the stdin from the CLI and write to a pipe. We need to do this so we can + // watch the stdin stream for the detach keys. + var err error + + // Write some init bytes into the pipe to force Swagger to make the initial + // call to the portlayer, prior to any user input in whatever attach client + // he/she is using. + log.Debugf("copyStdIn writing primer bytes") + stdinWriter.Write([]byte(attachStdinInitString)) + if ac.UseTty { + _, err = copyEscapable(stdinWriter, stdin, keys) + } else { + _, err = io.Copy(stdinWriter, stdin) + } + + if err != nil { + if _, ok := err.(errors.DetachError); ok { + log.Infof("stdin detach detected") + detach = true + } else { + log.Errorf("stdin err: %s", err) + } + } + }() + + id := ac.ID + + // Swagger wants an io.reader so give it the reader pipe. Also, the swagger call + // to set the stdin is synchronous so we need to run in a goroutine + setStdinParams := interaction.NewContainerSetStdinParamsWithContext(ctx).WithID(id) + setStdinParams = setStdinParams.WithRawStream(stdinReader) + + _, err := pl.Interaction.ContainerSetStdin(setStdinParams) + <-done + + if ac.CloseStdin && !ac.UseTty { + // Close the stdin connection. Mimicing Docker's behavior. + log.Errorf("Attach stream has stdinOnce set. Closing the stdin.") + params := interaction.NewContainerCloseStdinParamsWithContext(ctx).WithID(id) + _, err := pl.Interaction.ContainerCloseStdin(params) + if err != nil { + log.Errorf("CloseStdin failed with %s", err) + } + } + + // ignore the portlayer error when it is DetachError as that is what we should return to the caller when we detach + if detach { + return errors.DetachError{} + } + + return err +} + +func copyStdOut(ctx context.Context, pl *client.PortLayer, ac *AttachConfig, stdout io.Writer, attemptTimeout time.Duration) error { + id := ac.ID + + //Calculate how much time to let portlayer attempt + plAttemptTimeout := attemptTimeout - attachPLAttemptDiff //assumes personality deadline longer than portlayer's deadline + plAttemptDeadline := time.Now().Add(plAttemptTimeout) + swaggerDeadline := strfmt.DateTime(plAttemptDeadline) + log.Debugf("* stdout portlayer deadline: %s", plAttemptDeadline.Format(time.UnixDate)) + log.Debugf("* stdout personality deadline: %s", time.Now().Add(attemptTimeout).Format(time.UnixDate)) + + log.Debugf("* stdout attach start %s", time.Now().Format(time.UnixDate)) + getStdoutParams := interaction.NewContainerGetStdoutParamsWithContext(ctx).WithID(id).WithDeadline(&swaggerDeadline) + _, err := pl.Interaction.ContainerGetStdout(getStdoutParams, stdout) + log.Debugf("* stdout attach end %s", time.Now().Format(time.UnixDate)) + if err != nil { + if _, ok := err.(*interaction.ContainerGetStdoutNotFound); ok { + return errors.ContainerResourceNotFoundError(id, "interaction connection") + } + + return errors.InternalServerError(err.Error()) + } + + return nil +} + +func copyStdErr(ctx context.Context, pl *client.PortLayer, ac *AttachConfig, stderr io.Writer) error { + id := ac.ID + + getStderrParams := interaction.NewContainerGetStderrParamsWithContext(ctx).WithID(id) + _, err := pl.Interaction.ContainerGetStderr(getStderrParams, stderr) + if err != nil { + if _, ok := err.(*interaction.ContainerGetStderrNotFound); ok { + errors.ContainerResourceNotFoundError(id, "interaction connection") + } + + return errors.InternalServerError(err.Error()) + } + + return nil +} + +// FIXME: Move this function to a pkg to show it's origination from Docker once +// we have ignore capabilities in our header-check.sh that checks for copyright +// header. +// Code c/c from io.Copy() modified by Docker to handle escape sequence +// Begin + +func copyEscapable(dst io.Writer, src io.ReadCloser, keys []byte) (written int64, err error) { + if len(keys) == 0 { + // Default keys : ctrl-p ctrl-q + keys = []byte{16, 17} + } + buf := make([]byte, 32*1024) + for { + nr, er := src.Read(buf) + if nr > 0 { + // ---- Docker addition + preservBuf := []byte{} + for i, key := range keys { + preservBuf = append(preservBuf, buf[0:nr]...) + if nr != 1 || buf[0] != key { + break + } + if i == len(keys)-1 { + src.Close() + return 0, errors.DetachError{} + } + nr, er = src.Read(buf) + } + var nw int + var ew error + if len(preservBuf) > 0 { + nw, ew = dst.Write(preservBuf) + nr = len(preservBuf) + } else { + // ---- End of docker + nw, ew = dst.Write(buf[0:nr]) + } + if nw > 0 { + written += int64(nw) + } + if ew != nil { + err = ew + break + } + if nr != nw { + err = io.ErrShortWrite + break + } + } + if er == io.EOF { + break + } + if er != nil { + err = er + break + } + } + return written, err +} diff --git a/lib/apiservers/engine/backends/system_portlayer.go b/lib/apiservers/engine/proxy/system_proxy.go similarity index 64% rename from lib/apiservers/engine/backends/system_portlayer.go rename to lib/apiservers/engine/proxy/system_proxy.go index d4c6438fe9..bdd569957a 100644 --- a/lib/apiservers/engine/backends/system_portlayer.go +++ b/lib/apiservers/engine/proxy/system_proxy.go @@ -1,4 +1,4 @@ -// Copyright 2016 VMware, Inc. All Rights Reserved. +// Copyright 2016-2018 VMware, Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,10 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -package backends +package proxy //**** -// system_portlayer.go +// system_proxy.go // // Contains all code that touches the portlayer for system operations and all // code that converts swagger based returns to docker personality backend structs. @@ -35,9 +35,13 @@ import ( "fmt" "net/http" + "golang.org/x/net/context" + log "github.com/Sirupsen/logrus" derr "github.com/docker/docker/api/errors" + "github.com/vmware/vic/lib/apiservers/engine/errors" + "github.com/vmware/vic/lib/apiservers/portlayer/client" "github.com/vmware/vic/lib/apiservers/portlayer/client/containers" "github.com/vmware/vic/lib/apiservers/portlayer/client/misc" "github.com/vmware/vic/lib/apiservers/portlayer/models" @@ -45,50 +49,53 @@ import ( ) type VicSystemProxy interface { - PingPortlayer() bool - ContainerCount() (int, int, int, error) - VCHInfo() (*models.VCHInfo, error) + PingPortlayer(ctx context.Context) bool + ContainerCount(ctx context.Context) (int, int, int, error) + VCHInfo(ctx context.Context) (*models.VCHInfo, error) +} + +type SystemProxy struct { + client *client.PortLayer } -type SystemProxy struct{} +func NewSystemProxy(client *client.PortLayer) VicSystemProxy { + if client == nil { + return nil + } + + return &SystemProxy{client: client} +} -func (s *SystemProxy) PingPortlayer() bool { - defer trace.End(trace.Begin("PingPortlayer")) +func (s *SystemProxy) PingPortlayer(ctx context.Context) bool { + defer trace.End(trace.Begin("")) - plClient := PortLayerClient() - if plClient == nil { + if s.client == nil { + log.Errorf("Portlayer client is invalid") return false } - if plClient != nil { - pingParams := misc.NewPingParamsWithContext(ctx) - _, err := plClient.Misc.Ping(pingParams) - if err != nil { - log.Info("Ping to portlayer failed") - return false - } - return true + pingParams := misc.NewPingParamsWithContext(ctx) + _, err := s.client.Misc.Ping(pingParams) + if err != nil { + log.Info("Ping to portlayer failed") + return false } - - log.Errorf("Portlayer client is invalid") - return false + return true } // Use the Portlayer's support for docker ps to get the container count // return order: running, paused, stopped counts -func (s *SystemProxy) ContainerCount() (int, int, int, error) { - defer trace.End(trace.Begin("ContainerCount")) +func (s *SystemProxy) ContainerCount(ctx context.Context) (int, int, int, error) { + defer trace.End(trace.Begin("")) var running, paused, stopped int - plClient := PortLayerClient() - if plClient == nil { - return 0, 0, 0, derr.NewErrorWithStatusCode(fmt.Errorf("ContainerCount failed to create a portlayer client"), - http.StatusInternalServerError) + if s.client == nil { + return 0, 0, 0, errors.NillPortlayerClientError("SystemProxy") } all := true - containList, err := plClient.Containers.GetContainerList(containers.NewGetContainerListParamsWithContext(ctx).WithAll(&all)) + containList, err := s.client.Containers.GetContainerList(containers.NewGetContainerListParamsWithContext(ctx).WithAll(&all)) if err != nil { return 0, 0, 0, derr.NewErrorWithStatusCode(fmt.Errorf("Failed to get container list: %s", err), http.StatusInternalServerError) } @@ -105,17 +112,15 @@ func (s *SystemProxy) ContainerCount() (int, int, int, error) { return running, paused, stopped, nil } -func (s *SystemProxy) VCHInfo() (*models.VCHInfo, error) { - defer trace.End(trace.Begin("VCHInfo")) +func (s *SystemProxy) VCHInfo(ctx context.Context) (*models.VCHInfo, error) { + defer trace.End(trace.Begin("")) - plClient := PortLayerClient() - if plClient == nil { - return nil, derr.NewErrorWithStatusCode(fmt.Errorf("VCHInfo failed to create a portlayer client"), - http.StatusInternalServerError) + if s.client == nil { + return nil, errors.NillPortlayerClientError("SystemProxy") } params := misc.NewGetVCHInfoParamsWithContext(ctx) - resp, err := plClient.Misc.GetVCHInfo(params) + resp, err := s.client.Misc.GetVCHInfo(params) if err != nil { //There are no custom error for this operation. If we get back an error, it's //unknown. diff --git a/lib/constants/constants.go b/lib/constants/constants.go index cbe4117110..b81ec1c3ac 100644 --- a/lib/constants/constants.go +++ b/lib/constants/constants.go @@ -29,6 +29,9 @@ const ( ManagementHostName = "management.localhost" ClientHostName = "client.localhost" + DefaultPersonaPort = 2375 + DefaultPersonaSSLPort = 2376 + // DebugPortLayerPort defines the portlayer port while debug level is greater than 2 DebugPortLayerPort = 2380 @@ -75,6 +78,10 @@ const ( // Scratch layer ID ScratchLayerID = "scratch" + + // Directory for all logging in the VCH + DefaultLogDir = "/var/log/vic" + ) func DefaultAltVCHGuestName() string { diff --git a/lib/install/management/create.go b/lib/install/management/create.go index 6f88ccd213..a85866d4ff 100644 --- a/lib/install/management/create.go +++ b/lib/install/management/create.go @@ -18,7 +18,7 @@ import ( "context" "fmt" "path" - "sync" + "path/filepath" "time" "github.com/vmware/govmomi/object" @@ -26,6 +26,7 @@ import ( "github.com/vmware/vic/lib/install/data" "github.com/vmware/vic/lib/install/opsuser" "github.com/vmware/vic/lib/install/vchlog" + "github.com/vmware/vic/lib/progresslog" "github.com/vmware/vic/pkg/errors" "github.com/vmware/vic/pkg/retry" "github.com/vmware/vic/pkg/trace" @@ -120,98 +121,85 @@ func (d *Dispatcher) uploadImages(files map[string]string) error { // upload the images d.op.Info("Uploading images for container") - results := make(chan error, len(files)) - var wg sync.WaitGroup + // Build retry config + backoffConf := retry.NewBackoffConfig() + backoffConf.InitialInterval = uploadInitialInterval + backoffConf.MaxInterval = uploadMaxInterval + backoffConf.MaxElapsedTime = uploadMaxElapsedTime for key, image := range files { - - wg.Add(1) - go func(key string, image string) { - finalMessage := "" - d.op.Infof("\t%q", image) - - // upload function that is passed to retry - operationForRetry := func() error { - // attempt to delete the iso image first in case of failed upload - dc := d.session.Datacenter - fm := d.session.Datastore.NewFileManager(dc, false) - ds := d.session.Datastore - - isoTargetPath := path.Join(d.vmPathName, key) - // check iso first - _, err := ds.Stat(d.op, isoTargetPath) - if err != nil { - switch err.(type) { - case object.DatastoreNoSuchFileError: - // if not found, do nothing - default: - // otherwise force delete - d.op.Debugf("target delete path = %s", isoTargetPath) - err := fm.Delete(d.op, isoTargetPath) - if err != nil { - d.op.Debugf("Failed to delete image (%s) with error (%s)", image, err.Error()) - return err - } + baseName := filepath.Base(image) + finalMessage := "" + // upload function that is passed to retry + isoTargetPath := path.Join(d.vmPathName, key) + + operationForRetry := func() error { + // attempt to delete the iso image first in case of failed upload + dc := d.session.Datacenter + fm := d.session.Datastore.NewFileManager(dc, false) + ds := d.session.Datastore + + // check iso first + d.op.Debugf("Checking if file already exists: %s", isoTargetPath) + _, err := ds.Stat(d.op, isoTargetPath) + if err != nil { + switch err.(type) { + case object.DatastoreNoSuchFileError: + d.op.Debug("File not found. Nothing to do.") + case object.DatastoreNoSuchDirectoryError: + d.op.Debug("Directory not found. Nothing to do.") + default: + d.op.Debugf("ISO file already exists, deleting: %s", isoTargetPath) + err := fm.Delete(d.op, isoTargetPath) + if err != nil { + d.op.Debugf("Failed to delete image (%s) with error (%s)", image, err.Error()) + return err } } - - return d.session.Datastore.UploadFile(d.op, image, path.Join(d.vmPathName, key), nil) } - // counter for retry decider - retryCount := uploadRetryLimit + d.op.Infof("Uploading %s as %s", baseName, key) - // decider for our retry, will retry the upload uploadRetryLimit times - uploadRetryDecider := func(err error) bool { - if err == nil { - return false - } + ul := progresslog.NewUploadLogger(d.op.Infof, baseName, time.Second*3) + // need to wait since UploadLogger is asynchronous. + defer ul.Wait() - retryCount-- - if retryCount < 0 { - d.op.Warnf("Attempted upload a total of %d times without success, Upload process failed.", uploadRetryLimit) - return false - } - d.op.Warnf("failed an attempt to upload isos with err (%s), %d retries remain", err.Error(), retryCount) - return true - } + return d.session.Datastore.UploadFile(d.op, image, path.Join(d.vmPathName, key), + progresslog.UploadParams(ul)) + } - // Build retry config - backoffConf := retry.NewBackoffConfig() - backoffConf.InitialInterval = uploadInitialInterval - backoffConf.MaxInterval = uploadMaxInterval - backoffConf.MaxElapsedTime = uploadMaxElapsedTime - - uploadErr := retry.DoWithConfig(operationForRetry, uploadRetryDecider, backoffConf) - if uploadErr != nil { - finalMessage = fmt.Sprintf("\t\tUpload failed for %q: %s\n", image, uploadErr) - if d.force { - finalMessage = fmt.Sprintf("%s\t\tContinuing despite failures (due to --force option)\n", finalMessage) - finalMessage = fmt.Sprintf("%s\t\tNote: The VCH will not function without %q...", finalMessage, image) - results <- errors.New(finalMessage) - } else { - results <- errors.New(finalMessage) - } + // counter for retry decider + retryCount := uploadRetryLimit + + // decider for our retry, will retry the upload uploadRetryLimit times + uploadRetryDecider := func(err error) bool { + if err == nil { + return false } - wg.Done() - }(key, image) - } - wg.Wait() - close(results) + retryCount-- + if retryCount < 0 { + d.op.Warnf("Attempted upload a total of %d times without success, Upload process failed.", uploadRetryLimit) + return false + } + d.op.Warnf("Failed an attempt to upload isos with err (%s), %d retries remain", err.Error(), retryCount) + return true + } - uploadFailed := false - for err := range results { - if err != nil { - d.op.Error(err) - uploadFailed = true + uploadErr := retry.DoWithConfig(operationForRetry, uploadRetryDecider, backoffConf) + if uploadErr != nil { + finalMessage = fmt.Sprintf("\t\tUpload failed for %q: %s\n", image, uploadErr) + if d.force { + finalMessage = fmt.Sprintf("%s\t\tContinuing despite failures (due to --force option)\n", finalMessage) + finalMessage = fmt.Sprintf("%s\t\tNote: The VCH will not function without %q...", finalMessage, image) + } + d.op.Error(finalMessage) + return errors.New("Failed to upload iso images.") } - } - if uploadFailed { - return errors.New("Failed to upload iso images successfully.") } return nil + } // cleanupAfterCreationFailed cleans up the dangling resource pool for the failed VCH and any bridge network if there is any. diff --git a/lib/install/management/store_files.go b/lib/install/management/store_files.go index 8f5c860890..2046fb7ab6 100644 --- a/lib/install/management/store_files.go +++ b/lib/install/management/store_files.go @@ -158,23 +158,26 @@ func (d *Dispatcher) isVSAN(ds *object.Datastore) bool { func (d *Dispatcher) deleteFilesIteratively(m *object.DatastoreFileManager, ds *object.Datastore, dsPath string) error { defer trace.End(trace.Begin(dsPath, d.op)) - // Get sorted result to make sure children files listed ahead of folder. Then we can empty folder before delete it - // This function specifically designed for vSan, as vSan sometimes will throw error to delete folder is the folder is not empty - res, err := d.getSortedChildren(ds, dsPath) - if err != nil { - if !types.IsFileNotFound(err) { - err = errors.Errorf("Failed to browse sub folders %q: %s", dsPath, err) - return err + if d.isVSAN(ds) { + // Get sorted result to make sure child files are listed ahead of their parent folder so we empty the folder before deleting it. + // This behaviour is specifically for vSan, as vSan sometimes throws an error when deleting a folder that is not empty. + res, err := d.getSortedChildren(ds, dsPath) + if err != nil { + if !types.IsFileNotFound(err) { + err = errors.Errorf("Failed to browse sub folders %q: %s", dsPath, err) + return err + } + d.op.Debugf("Folder %q is not found", dsPath) + return nil } - d.op.Debugf("Folder %q is not found", dsPath) - return nil - } - for _, path := range res { - if err = d.deleteVMFSFiles(m, ds, path); err != nil { - return err + for _, path := range res { + if err = d.deleteVMFSFiles(m, ds, path); err != nil { + return err + } } } + return d.deleteVMFSFiles(m, ds, dsPath) } diff --git a/lib/install/opsuser/opsuser.go b/lib/install/opsuser/opsuser.go index d8b7bcc3af..12f014f3a3 100644 --- a/lib/install/opsuser/opsuser.go +++ b/lib/install/opsuser/opsuser.go @@ -217,6 +217,10 @@ func (mgr *RBACManager) collectDatastores(ctx context.Context, finder *find.Find } volumeLocations := make([]url.URL, 0, len(mgr.configSpec.Storage.VolumeLocations)) for _, volumeLocation := range mgr.configSpec.Storage.VolumeLocations { + // Only apply changes to datastores managed by vSphere + if volumeLocation.Scheme != "ds" { + continue + } volumeLocations = append(volumeLocations, *volumeLocation) } if err = mgr.findDatastores(ctx, finder, volumeLocations, dsNameToRef); err != nil { diff --git a/lib/install/validate/validator_test_sim_util.go b/lib/install/validate/validator_test_sim_util.go index c793912782..65e8fb7f3e 100644 --- a/lib/install/validate/validator_test_sim_util.go +++ b/lib/install/validate/validator_test_sim_util.go @@ -91,6 +91,17 @@ var testInputConfigVPX = data.Data{ RawQuery: "", Fragment: "", }, + "nfs": { + Scheme: "nfs", + Opaque: "", + User: (*url.Userinfo)(nil), + Host: "nfs-host", + Path: "vic-volumes:nas", + RawPath: "", + ForceQuery: false, + RawQuery: "", + Fragment: "", + }, }, BridgeNetworkName: "DC0_DVPG0", ClientNetwork: data.NetworkConfig{ diff --git a/lib/portlayer/network/context.go b/lib/portlayer/network/context.go index a220e78f2b..a19a3cd233 100644 --- a/lib/portlayer/network/context.go +++ b/lib/portlayer/network/context.go @@ -42,7 +42,7 @@ const ( DefaultBridgeName = "bridge" ) -// Context denotes a networking context that represents a set of scopes, endpoints, +// Context denotes a network context that represents a set of scopes, endpoints, // and containers. Each context has its own separate IPAM. type Context struct { sync.Mutex diff --git a/lib/portlayer/storage/volume_cache.go b/lib/portlayer/storage/volume_cache.go index 0d299cc22b..95037eb190 100644 --- a/lib/portlayer/storage/volume_cache.go +++ b/lib/portlayer/storage/volume_cache.go @@ -1,4 +1,4 @@ -// Copyright 2016-2017 VMware, Inc. All Rights Reserved. +// Copyright 2016-2018 VMware, Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -59,7 +59,7 @@ func (v *VolumeLookupCache) GetVolumeStore(op trace.Operation, storeName string) return u, nil } -// AddStore adds a volumestore by name. The url returned is the service url to the volume store. +// AddStore adds a volumestore by name. The url returned is the service url to the volume store. func (v *VolumeLookupCache) AddStore(op trace.Operation, storeName string, vs VolumeStorer) (*url.URL, error) { v.vlcLock.Lock() defer v.vlcLock.Unlock() @@ -70,12 +70,13 @@ func (v *VolumeLookupCache) AddStore(op trace.Operation, storeName string, vs Vo return nil, err } - if _, ok := v.volumeStores[u.String()]; ok { - return nil, fmt.Errorf("volumestore (%s) already added", u.String()) + storeURLStr := u.String() + if _, ok := v.volumeStores[storeURLStr]; ok { + return nil, fmt.Errorf("volumestore (%s) already added", storeURLStr) } - v.volumeStores[u.String()] = vs - return u, v.rebuildCache(op) + v.volumeStores[storeURLStr] = vs + return u, v.addVolumesToCache(op, storeURLStr, vs) } func (v *VolumeLookupCache) volumeStore(store *url.URL) (VolumeStorer, error) { @@ -94,7 +95,6 @@ func (v *VolumeLookupCache) volumeStore(store *url.URL) (VolumeStorer, error) { // VolumeStoresList returns a list of volume store names func (v *VolumeLookupCache) VolumeStoresList(op trace.Operation) ([]string, error) { - v.vlcLock.RLock() defer v.vlcLock.RUnlock() @@ -201,21 +201,19 @@ func (v *VolumeLookupCache) VolumesList(op trace.Operation) ([]*Volume, error) { return l, nil } -// goto the volume store and repopulate the cache. -func (v *VolumeLookupCache) rebuildCache(op trace.Operation) error { - op.Infof("Refreshing volume cache.") +// addVolumesToCache finds the volumes in the input volume store and adds them to the cache. +func (v *VolumeLookupCache) addVolumesToCache(op trace.Operation, storeURLStr string, vs VolumeStorer) error { + op.Infof("Adding volumes in volume store %s to volume cache", storeURLStr) - for _, vs := range v.volumeStores { - vols, err := vs.VolumesList(op) - if err != nil { - return err - } + vols, err := vs.VolumesList(op) + if err != nil { + return err + } - for _, vol := range vols { - log.Infof("Volumestore: Found vol %s on store %s.", vol.ID, vol.Store) - // Add it to the cache. - v.vlc[vol.ID] = *vol - } + for _, vol := range vols { + log.Infof("Volumestore: Found vol %s on store %s", vol.ID, vol.Store) + // Add it to the cache. + v.vlc[vol.ID] = *vol } return nil diff --git a/lib/portlayer/storage/volume_cache_test.go b/lib/portlayer/storage/volume_cache_test.go index 5a8af665b6..87533d81be 100644 --- a/lib/portlayer/storage/volume_cache_test.go +++ b/lib/portlayer/storage/volume_cache_test.go @@ -1,4 +1,4 @@ -// Copyright 2016-2017 VMware, Inc. All Rights Reserved. +// Copyright 2016-2018 VMware, Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -95,7 +95,7 @@ func (m *MockVolumeStore) VolumeDestroy(op trace.Operation, vol *Volume) error { return nil } -// Lists all volumes on the given volume store` +// VolumesList lists all volumes on the given volume store. func (m *MockVolumeStore) VolumesList(op trace.Operation) ([]*Volume, error) { var i int list := make([]*Volume, len(m.db)) @@ -214,6 +214,52 @@ func TestVolumeCreateGetListAndDelete(t *testing.T) { } } +// createVolumes is a test helper that creates a set of num volumes on the input volume cache and volume store. +func createVolumes(t *testing.T, op trace.Operation, v *VolumeLookupCache, storeURL *url.URL, num int) map[string]*Volume { + vols := make(map[string]*Volume) + for i := 1; i <= num; i++ { + id := fmt.Sprintf("ID-%d", i) + + // Write to the datastore + vol, err := v.VolumeCreate(op, id, storeURL, 0, nil) + if !assert.NoError(t, err) || !assert.NotNil(t, vol) { + return nil + } + + vols[id] = vol + } + + return vols +} + +func TestAddVolumesToCache(t *testing.T) { + mvs1 := NewMockVolumeStore() + op := trace.NewOperation(context.Background(), "test") + v := NewVolumeLookupCache(op) + + storeURL, err := util.VolumeStoreNameToURL("testStore") + assert.NotNil(t, storeURL) + storeURLStr := storeURL.String() + v.volumeStores[storeURLStr] = mvs1 + + // Create 50 volumes on the volume store. + vols := createVolumes(t, op, v, storeURL, 50) + + // Clear the volume map after it has been filled during volume creation. + v.vlc = make(map[string]Volume) + + err = v.addVolumesToCache(op, storeURLStr, mvs1) + assert.Nil(t, err) + + // Check that the volume map is intact again in the cache. + for _, expectedVol := range vols { + vol, err := v.VolumeGet(op, expectedVol.ID) + if !assert.NoError(t, err) || !assert.Equal(t, expectedVol, vol) { + return + } + } +} + // Create 2 store caches but use the same backing datastore. Create images // with the first cache, then get the image with the second. This simulates // restart since the second cache is empty and has to go to the backing store. @@ -227,26 +273,15 @@ func TestVolumeCacheRestart(t *testing.T) { return } - // Create a set of volumes - inVols := make(map[string]*Volume) - for i := 1; i < 50; i++ { - id := fmt.Sprintf("ID-%d", i) - - // Write to the datastore - vol, err := firstCache.VolumeCreate(op, id, storeURL, 0, nil) - if !assert.NoError(t, err) || !assert.NotNil(t, vol) { - return - } - - inVols[id] = vol - } + // Create a set of 50 volumes. + inVols := createVolumes(t, op, firstCache, storeURL, 50) secondCache := NewVolumeLookupCache(op) if !assert.NotNil(t, secondCache) { return } - _, err = secondCache.AddStore(op, "testStore", mvs) + storeURL, err = secondCache.AddStore(op, "testStore", mvs) if !assert.NoError(t, err) || !assert.NotNil(t, storeURL) { return } diff --git a/lib/progresslog/progresslog.go b/lib/progresslog/progresslog.go new file mode 100644 index 0000000000..1f38b7ce0e --- /dev/null +++ b/lib/progresslog/progresslog.go @@ -0,0 +1,100 @@ +// Copyright 2018 VMware, Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package progresslog + +import ( + "sync" + "time" + + "github.com/vmware/govmomi/vim25/progress" + "github.com/vmware/govmomi/vim25/soap" +) + +// UploadParams uses default upload settings as initial input and set UploadLogger as a logger. +func UploadParams(ul *UploadLogger) *soap.Upload { + params := soap.DefaultUpload + params.Progress = ul + return ¶ms +} + +// UploadLogger is used to track upload progress to ESXi/VC of a specific file. +type UploadLogger struct { + wg sync.WaitGroup + filename string + interval time.Duration + logTo func(format string, args ...interface{}) +} + +// NewUploadLogger returns a new instance of UploadLogger. User can provide a logger interface +// that will be used to dump output of the current upload status. +func NewUploadLogger(logTo func(format string, args ...interface{}), + filename string, progressInterval time.Duration) *UploadLogger { + + return &UploadLogger{ + logTo: logTo, + filename: filename, + interval: progressInterval, + } +} + +// Sink returns a channel that receives current upload progress status. +// Channel has to be closed by the caller. +func (ul *UploadLogger) Sink() chan<- progress.Report { + ul.wg.Add(1) + ch := make(chan progress.Report) + fmtStr := "Uploading %s. Progress: %.2f%%" + + go func() { + var curProgress float32 + var lastProgress float32 + ul.logTo(fmtStr, ul.filename, curProgress) + + mu := sync.Mutex{} + ticker := time.NewTicker(ul.interval) + + // Print progress every ul.interval seconds. + go func() { + for range ticker.C { + mu.Lock() + lastProgress = curProgress + mu.Unlock() + ul.logTo(fmtStr, ul.filename, lastProgress) + } + }() + + for v := range ch { + mu.Lock() + curProgress = v.Percentage() + mu.Unlock() + } + + ticker.Stop() + + if lastProgress != curProgress { + ul.logTo(fmtStr, ul.filename, curProgress) + } + + if curProgress == 100.0 { + ul.logTo("Uploading of %s has been completed", ul.filename) + } + ul.wg.Done() + }() + return ch +} + +// Wait waits for Sink to complete its work, due to its async nature logging messages may be not sequential. +func (ul *UploadLogger) Wait() { + ul.wg.Wait() +} diff --git a/lib/progresslog/progresslog_test.go b/lib/progresslog/progresslog_test.go new file mode 100644 index 0000000000..2826ecf79c --- /dev/null +++ b/lib/progresslog/progresslog_test.go @@ -0,0 +1,93 @@ +// Copyright 2018 VMware, Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package progresslog + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/vmware/govmomi/vim25/progress" +) + +type ProgressResults struct { + percentage float32 +} + +func (pr *ProgressResults) Percentage() float32 { + return pr.percentage +} + +func (pr *ProgressResults) Detail() string { + return "" +} + +func (pr *ProgressResults) Error() error { + return nil +} + +var _ progress.Report = &ProgressResults{} + +func TestNewUploadLoggerComplete(t *testing.T) { + var logs []string + logTo := func(format string, args ...interface{}) { + logs = append(logs, fmt.Sprintf(format, args...)) + } + pl := NewUploadLogger(logTo, "unittest", time.Millisecond*10) + progressChan := pl.Sink() + for i := 0; i <= 10; i++ { + res := &ProgressResults{percentage: float32(i * 10)} + progressChan <- res + time.Sleep(time.Duration(time.Millisecond * 5)) + } + close(progressChan) + pl.Wait() + + if assert.True(t, len(logs) > 3) { + last := len(logs) - 1 + assert.Contains(t, logs[0], "unittest") + assert.Contains(t, logs[0], "0.00%") + assert.Contains(t, logs[1], ".00%") + assert.Contains(t, logs[last-1], "100.00%") + assert.Contains(t, logs[last], "complete") + } +} + +func TestNewUploadLoggerNotComplete(t *testing.T) { + var logs []string + logTo := func(format string, args ...interface{}) { + logs = append(logs, fmt.Sprintf(format, args...)) + } + pl := NewUploadLogger(logTo, "unittest", time.Millisecond*10) + progressChan := pl.Sink() + for i := 0; i < 10; i++ { + res := &ProgressResults{percentage: float32(i * 10)} + progressChan <- res + time.Sleep(time.Duration(time.Millisecond * 5)) + } + close(progressChan) + pl.Wait() + + if assert.True(t, len(logs) > 3) { + last := len(logs) - 1 + assert.Contains(t, logs[0], "unittest") + assert.Contains(t, logs[0], "0.00%") + assert.Contains(t, logs[1], ".00%") + assert.NotContains(t, logs[last], "100.00%") + assert.NotContains(t, logs[last], "complete") + } +} diff --git a/pkg/log/log.go b/pkg/log/log.go index 5a6faffdef..beff78b2a5 100644 --- a/pkg/log/log.go +++ b/pkg/log/log.go @@ -95,3 +95,7 @@ func CreateSyslogHook(cfg *LoggingConfig) (logrus.Hook, error) { } return hook, err } + +func (l *LoggingConfig) SetLogLevel(level uint8) { + l.Level = logrus.Level(level) +} \ No newline at end of file diff --git a/pkg/trace/trace.go b/pkg/trace/trace.go index 9ee87b0ae8..26aa509cb7 100644 --- a/pkg/trace/trace.go +++ b/pkg/trace/trace.go @@ -77,6 +77,12 @@ func InitLogger(cfg *log.LoggingConfig) error { return err } +func SetLoggerLevel(level uint8) { + if Logger != nil { + Logger.Level = logrus.Level(level) + } +} + // begin a trace from this stack frame less the skip. func newTrace(msg string, skip int, opID string) *Message { pc, _, line, ok := runtime.Caller(skip) diff --git a/pkg/vsphere/rbac/rbac.go b/pkg/vsphere/rbac/rbac.go index dc0bb69692..de192e6bce 100644 --- a/pkg/vsphere/rbac/rbac.go +++ b/pkg/vsphere/rbac/rbac.go @@ -146,11 +146,12 @@ func (am *AuthzManager) PrincipalBelongsToGroup(ctx context.Context, group strin } results, err := methods.RetrieveUserGroups(ctx, am.client, &req) + // This is to work around a bug in vSphere, when AD is added to // the identity source list, the API returns Object Not Found, // In this case, we ignore the error and return false (BUG: 2037706) - if err != nil && isNotFoundError(ctx, err) { - op.Debugf("Received Not Found Error from PrincipalBelongsToGroup(), could not verify user %s is not a member of the Administrators group", am.Principal) + if err != nil && (isNotSupportedError(ctx, err) || isNotFoundError(ctx, err)) { + op.Debugf("Received Error (%s) from PrincipalBelongsToGroup(), could not verify user %s is not a member of the Administrators group", err.Error(), am.Principal) op.Warnf("If ops-user (%s) belongs to the Administrators group, permissions on some resources might have been restricted", am.Principal) return false, nil } @@ -393,6 +394,20 @@ func (am *AuthzManager) getRoleName(resource *Resource) string { } } +func isNotSupportedError(ctx context.Context, err error) bool { + op := trace.FromContext(ctx, "isNotSupportedError") + + if soap.IsSoapFault(err) { + vimFault := soap.ToSoapFault(err).VimFault() + op.Debugf("Error type: %s", reflect.TypeOf(vimFault)) + + _, ok := soap.ToSoapFault(err).VimFault().(types.NotSupported) + return ok + } + + return false +} + func isNotFoundError(ctx context.Context, err error) bool { op := trace.FromContext(ctx, "isNotFoundError") diff --git a/pkg/vsphere/tags/rest_client.go b/pkg/vsphere/tags/rest_client.go index b73e5ba0e5..b1d12dfb79 100644 --- a/pkg/vsphere/tags/rest_client.go +++ b/pkg/vsphere/tags/rest_client.go @@ -173,6 +173,10 @@ func (c *RestClient) Login(ctx context.Context) error { if err != nil { return errors.Wrap(err, "login failed") } + if c.user != nil { + password, _ := c.user.Password() + request.SetBasicAuth(c.user.Username(), password) + } resp, err := c.HTTP.Do(request) if err != nil { return errors.Wrap(err, "login failed") @@ -194,15 +198,5 @@ func (c *RestClient) Login(ctx context.Context) error { } func (c *RestClient) newRequest(method, urlStr string, body io.Reader) (*http.Request, error) { - req, err := http.NewRequest(method, c.endpoint.String()+urlStr, body) - if err != nil { - return nil, err - } - - if c.user != nil { - password, _ := c.user.Password() - req.SetBasicAuth(c.user.Username(), password) - } - - return req, nil + return http.NewRequest(method, c.endpoint.String()+urlStr, body) } diff --git a/tests/README.md b/tests/README.md index 3dc12f58ab..a28517b29e 100644 --- a/tests/README.md +++ b/tests/README.md @@ -56,16 +56,16 @@ Use ./local-integration-test.sh * Execute Drone from the project root directory: - Drone will run based on `.drone.local.yml` - defaults should be fine, edit as needed + Drone will run based on `.drone.local.yml` - defaults should be fine, edit as needed. Set secrets as env variables: * To run only the regression tests: ``` - drone exec --repo.trusted --secrets-file "test.secrets" .drone.local.yml + drone exec .drone.local.yml ``` * To run the full suite: ``` - drone exec --repo.trusted --repo.branch=master --repo.fullname="vmware/vic" --secrets-file "test.secrets" .drone.local.yml + drone exec --repo-name "vmware/vic" .drone.local.yml ``` ## Test a specific .robot file @@ -78,6 +78,33 @@ Use ./local-integration-test.sh ./tests/robot.sh tests/test-cases/Group6-VIC-Machine/6-04-Create-Basic.robot ``` +## Run Docker command tests via makefile target + +There exists a makefile target for developers to run the docker command tests locally (not in CI environment) against a pre-deployed VCH. This is a fast way for contributors to test their potential code chages, against the CI tests locally, before pushing the commit. There is another benefit gained from using the makefile target, the way it is setup, logs from the run are written out to vic/ci-results, even if the tests fail. The method described above, to run the tests locally with drone, has the weakness that a failure in the test can sometimes result in no written logs to help debug the failure. + +There are a few requirements before using this makefile target. + +1. A VCH must be pre-deployed before calling this makefile target +2. The makefile target relies on a script that looks for a few more secrets variable. When running the script directly, these secrets variables may be passed into the script via commandline arguments, environment variables, or via a secrets file. When running the makefile target via make, the secrets must be defined in environment variables. + +To run these tests using the makefile target, + +``` +make local-ci-test + +SECRETS_FILE=test.secrets.esx make local-ci-test + +DOCKER_TEST=Group1-Docker-Commands/1-01-Docker-Info.robot make local-ci-test +``` +In the above example, the first command assumes all environment variables are defined. The second command defines one environment variable, SECRETS_FILE, before calling the make target. This allows calling the make target with all the necessary secrets variable defined in the secrets file instead of in environment variables. The third command defines a specific test to run using the environment variable, DOCKER_TEST. + +Currently, only the Group1 tests are setup to use an existing VCH so this makefile target only works on the group 1 tests. + +It is also possible to run the docker command tests, without using make, by calling the internal script itself. The script is located at "infra/scripts/local-ci.sh". As stated above, the scripts also allows command line arguments to be passed directly into the script. + +A helpful tip is to create different secrets files for different environments. For instance, test.secrets.esx and test.secrets.vc for an ESX host and VC cluster, respectively. + + ## Find the documentation for each of the tests here: * [Automated Test Suite Documentation](test-cases/TestGroups.md) diff --git a/tests/ci-env.sh b/tests/ci-env.sh index b0fdb04519..b25dd5cbeb 100644 --- a/tests/ci-env.sh +++ b/tests/ci-env.sh @@ -15,20 +15,9 @@ set -e -if [ ! -f /ci/test_env ]; then - echo "'/ci/test_env' file not found! Make sure its available on host and mounted."; -fi - -# update file permissions -chmod 777 /ci/test_env - -# source env variables from volume mounted test_env file -. /ci/test_env - -# check if DRONE_MACHINE is set -# TODO: use DRONE_MACHINE from 0.8 drone version -if [ -z "${DRONE_MACHINE}" ]; then - echo "WARN: DRONE_MACHINE is not set"; +# check if DRONE_HOSTNAME is available +if [ -z "${DRONE_HOSTNAME}" ]; then + echo "WARN: DRONE_HOSTNAME is not set"; else - echo "DRONE_MACHINE is set to '$DRONE_MACHINE'"; + echo "DRONE_HOSTNAME is set to '$DRONE_HOSTNAME'"; fi diff --git a/tests/concurrent/README.md b/tests/concurrent/README.md new file mode 100644 index 0000000000..fcf5a56ab0 --- /dev/null +++ b/tests/concurrent/README.md @@ -0,0 +1,17 @@ +# Concurrent + +Concurrent is a simple tool to create/start/stop/destroy container vms. It doesn't use persona or portlayer but entartains the same code. + +# Usage + +Requires a VCH to be present and also requires busybox image to be in the image store. + +``` +# VIC_MAX_IN_FLIGHT=32 ./concurrent -service "username:password@VC_OR_ESXI" -datacenter DATACENTER -datastore DATASTORE -resource-pool RP -cluster CLUSTER -vch VCH -concurrency 256 -memory-mb 64 +Concurrent testing... + +Creating 100% [===================================================================================================================================] 9s +Destroying 100% [=================================================================================================================================] 25s + +``` + diff --git a/tests/concurrent/concurrent.go b/tests/concurrent/concurrent.go new file mode 100644 index 0000000000..022ecd8dec --- /dev/null +++ b/tests/concurrent/concurrent.go @@ -0,0 +1,385 @@ +// Copyright 2017 VMware, Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "context" + "flag" + "fmt" + "log" + "math/rand" + "net/url" + "path" + "strings" + "sync" + "time" + + "github.com/sethgrid/multibar" + + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/task" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" + "github.com/vmware/vic/lib/guest" + "github.com/vmware/vic/lib/spec" + "github.com/vmware/vic/pkg/version" + "github.com/vmware/vic/pkg/vsphere/datastore" + "github.com/vmware/vic/pkg/vsphere/session" + "github.com/vmware/vic/pkg/vsphere/tasks" + "github.com/vmware/vic/pkg/vsphere/vm" +) + +const ( + // concurrency + DefaultConcurrency = 16 + + // busybox leaf image name + parent = "c33a7c3535692e8ca015bebc3a01f7a72b14cc013918c09c40d808efe1505c62" + + DefaultService = "root:password@somehost" + + DefaultResourcePool = "/dc1/host/cluster1/Resources" + DefaultCluster = "/dc1/host/cluster1" + DefaultDatacenter = "/dc1" + + DefaultDatastore = "vsanDatastore" + + DefaultVCH = "ZzZ" + + DefaultMemoryMB = 512 +) + +type Config struct { + Service string + + Concurrency int + + Start bool + RP bool + + Datacenter string + Cluster string + ResourcePool string + + Datastore string + + MemoryMB int64 + + VCH string +} + +var ( + config = Config{} +) + +func init() { + flag.StringVar(&config.Service, "service", DefaultService, "Service") + + flag.IntVar(&config.Concurrency, "concurrency", DefaultConcurrency, "Concurrency") + + flag.Int64Var(&config.MemoryMB, "memory-mb", DefaultMemoryMB, "Memory") + + flag.StringVar(&config.Datacenter, "datacenter", DefaultDatacenter, "DataCenter") + flag.StringVar(&config.Cluster, "cluster", DefaultCluster, "Cluster") + flag.StringVar(&config.ResourcePool, "resource-pool", DefaultResourcePool, "ResourcePool") + + flag.StringVar(&config.Datastore, "datastore", DefaultDatastore, "Datastore") + + flag.StringVar(&config.VCH, "vch", DefaultVCH, "VCH") + + flag.BoolVar(&config.Start, "start", false, "Start/Stop") + flag.BoolVar(&config.RP, "rp", false, "force 2 Resource Pool") + + flag.Parse() + + rand.Seed(time.Now().UnixNano()) +} + +func IsNotFoundError(err error) bool { + if soap.IsSoapFault(err) { + fault := soap.ToSoapFault(err).VimFault() + if _, ok := fault.(types.ManagedObjectNotFound); ok { + return true + } + } + return false +} + +func IsConcurrentAccessError(err error) bool { + if soap.IsSoapFault(err) { + fault := soap.ToSoapFault(err).VimFault() + if _, ok := fault.(types.ConcurrentAccess); ok { + return true + } + // sometimes we get this wrong type with correct error + return soap.ToSoapFault(err).String == "vim.fault.ConcurrentAccess" + } + return false +} + +func main() { + ctx := context.Background() + + // session + c := &session.Config{ + Service: config.Service, + Insecure: true, + Keepalive: 30 * time.Minute, + DatacenterPath: config.Datacenter, + ClusterPath: config.Cluster, + PoolPath: config.ResourcePool, + DatastorePath: config.Datastore, + UserAgent: version.UserAgent("vic-engine"), + } + + s, err := session.NewSession(c).Connect(ctx) + if err != nil { + log.Panic(err) + } + defer s.Logout(ctx) + + if s, err = s.Populate(ctx); err != nil { + log.Panic(err) + } + + helper := datastore.NewHelperFromSession(ctx, s) + p, err := datastore.PathFromString(fmt.Sprintf("[%s] %s/", config.Datastore, config.VCH)) + if err != nil { + log.Panic(err) + } + helper.RootURL = *p + + var vapp *object.VirtualApp + var pool *object.ResourcePool + if s.IsVC() && !config.RP { + // vapp + vapp, err = s.Finder.VirtualApp(ctx, fmt.Sprintf("%s/%s", config.ResourcePool, config.VCH)) + if err != nil { + log.Panic(err) + } + } else { + // pool + pool, err = s.Finder.ResourcePool(ctx, config.ResourcePool) + if err != nil { + log.Panic(err) + } + } + + // image store path + image, err := url.Parse(config.VCH) + if err != nil { + log.Panic(err) + } + + // VIC//images + rres, err := helper.LsDirs(ctx, "VIC") + if err != nil { + log.Panic(err) + } + r := rres.HostDatastoreBrowserSearchResults + + STORE := "" + for i := range r { + if strings.Contains(r[i].File[0].GetFileInfo().Path, "-") { + STORE = r[i].File[0].GetFileInfo().Path + break + } + } + + // VCH/*-bootstrap.iso + res, err := helper.Ls(ctx, "") + if err != nil { + log.Panic(err) + } + ISO := "" + for i := range res.File { + if strings.HasSuffix(res.File[i].GetFileInfo().Path, "-bootstrap.iso") { + ISO = res.File[i].GetFileInfo().Path + break + } + } + if ISO == "" { + log.Panic("Failed to find ISO file") + } + + // bars + progressBars, err := multibar.New() + if err != nil { + log.Panic(err) + } + progressBars.Printf("\nConcurrent testing...\n\n") + + create := progressBars.MakeBar(config.Concurrency-1, fmt.Sprintf("%16s", "Creating")) + start := func(progress int) {} + stop := func(progress int) {} + if config.Start { + start = progressBars.MakeBar(config.Concurrency-1, fmt.Sprintf("%16s", "Starting")) + stop = progressBars.MakeBar(config.Concurrency-1, fmt.Sprintf("%16s", "Stopping")) + } + destroy := progressBars.MakeBar(config.Concurrency-1, fmt.Sprintf("%16s", "Destroying")) + + for i := range progressBars.Bars { + progressBars.Bars[i].ShowTimeElapsed = false + } + + go progressBars.Listen() + + var mu sync.Mutex + var vms []*vm.VirtualMachine + + wrap := func(f func(i int) error, p multibar.ProgressFunc) { + var wg sync.WaitGroup + + errs := make(chan error, config.Concurrency) + for i := 0; i < config.Concurrency; i++ { + wg.Add(1) + + go func(i int) { + defer wg.Done() + + errs <- f(i) + }(i) + } + + go func() { + wg.Wait() + close(errs) + }() + + idx := 0 + for err := range errs { + if err != nil { + progressBars.Printf("ERROR: %s", err) + } + p(idx) + idx++ + } + } + + createFunc := func(i int) error { + name := fmt.Sprintf("%d-vm", i) + + specconfig := &spec.VirtualMachineConfigSpecConfig{ + NumCPUs: 1, + MemoryMB: config.MemoryMB, + + ID: name, + Name: name, + VMFullName: name, + + ParentImageID: parent, + BootMediaPath: fmt.Sprintf("[%s] %s/%s", config.Datastore, config.VCH, ISO), + VMPathName: fmt.Sprintf("[%s]", config.Datastore), + + ImageStoreName: STORE, + ImageStorePath: image, + } + + // Create a linux guest + linux, err := guest.NewLinuxGuest(ctx, s, specconfig) + if err != nil { + return err + } + h := linux.Spec().Spec() + + var res *types.TaskInfo + if s.IsVC() && !config.RP { + res, err = tasks.WaitForResult(ctx, func(ctx context.Context) (tasks.Task, error) { + return vapp.CreateChildVM(ctx, *h, nil) + }) + if err != nil { + return err + + } + } else { + res, err = tasks.WaitForResult(ctx, func(ctx context.Context) (tasks.Task, error) { + return s.VMFolder.CreateVM(ctx, *h, pool, nil) + }) + if err != nil { + return err + } + } + mu.Lock() + vms = append(vms, vm.NewVirtualMachine(ctx, s, res.Result.(types.ManagedObjectReference))) + mu.Unlock() + + return nil + } + wrap(createFunc, create) + + if config.Start { + startFunc := func(i int) error { + _, err := tasks.WaitForResult(ctx, func(ctx context.Context) (tasks.Task, error) { + return vms[i].PowerOn(ctx) + }) + return err + } + wrap(startFunc, start) + + stopFunc := func(i int) error { + _, err := tasks.WaitForResult(ctx, func(ctx context.Context) (tasks.Task, error) { + return vms[i].PowerOff(ctx) + }) + return err + } + wrap(stopFunc, stop) + } + + destroyFunc := func(i int) error { + v, err := vms[i].VMPathName(ctx) + if err != nil { + return err + } + + concurrent := false + // if DeleteExceptDisks succeeds on VC, it leaves the VM orphan so we need to call Unregister + // if DeleteExceptDisks succeeds on ESXi, no further action needed + // if DeleteExceptDisks fails, we should call Unregister and only return an error if that fails too + // Unregister sometimes can fail with ManagedObjectNotFound so we ignore it + _, err = tasks.WaitForResult(ctx, func(ctx context.Context) (tasks.Task, error) { + return vms[i].DeleteExceptDisks(ctx) + }) + if err != nil { + switch f := err.(type) { + case task.Error: + switch f.Fault().(type) { + case *types.ConcurrentAccess: + log.Printf("DeleteExceptDisks failed for %d with ConcurrentAccess error. Ignoring it", i) + concurrent = true + } + // err but not concurrent + if !concurrent { + return err + } + } + } + if concurrent && vms[i].IsVC() { + if err := vms[i].Unregister(ctx); err != nil { + if !IsNotFoundError(err) && !IsConcurrentAccessError(err) { + return err + } + } + } + + fm := s.Datastore.NewFileManager(s.Datacenter, true) + // remove from datastore + if err := fm.Delete(ctx, path.Dir(v)); err != nil { + return err + } + + return nil + } + wrap(destroyFunc, destroy) +} diff --git a/tests/integration-test.sh b/tests/integration-test.sh index ac19d35dec..a9290abdf6 100755 --- a/tests/integration-test.sh +++ b/tests/integration-test.sh @@ -20,8 +20,17 @@ set +x dpkg -l > package.list - +set -x buildinfo=$(drone build info vmware/vic $DRONE_BUILD_NUMBER) +prNumber=$(drone build info --format "{{ .Ref }}" vmware/vic $DRONE_BUILD_NUMBER | cut -f 3 -d'/') +set +x +prBody=$(curl https://api.github.com/repos/vmware/vic/pulls/$prNumber?access_token=$GITHUB_AUTOMATION_API_KEY | jq -r ".body") + +if (echo $prBody | grep -q "\[fast fail\]"); then + export FAST_FAILURE=1 +else + export FAST_FAILURE=0 +fi if [[ $DRONE_BRANCH == "master" || $DRONE_BRANCH == "releases/"* ]] && [[ $DRONE_REPO == "vmware/vic" ]] && [[ $DRONE_BUILD_EVENT == "push" ]]; then echo "Running full CI for $DRONE_BUILD_EVENT on $DRONE_BRANCH" @@ -29,12 +38,12 @@ if [[ $DRONE_BRANCH == "master" || $DRONE_BRANCH == "releases/"* ]] && [[ $DRONE elif [[ $DRONE_BRANCH == *"refs/tags"* ]] && [[ $DRONE_REPO == "vmware/vic" ]] && [[ $DRONE_BUILD_EVENT == "tag" ]]; then echo "Running only Group11-Upgrade and 7-01-Regression for $DRONE_BUILD_EVENT on $DRONE_BRANCH" pybot --removekeywords TAG:secret --suite Group11-Upgrade --suite 7-01-Regression tests/test-cases -elif grep -q "\[full ci\]" <(drone build info vmware/vic $DRONE_BUILD_NUMBER); then +elif (echo $prBody | grep -q "\[full ci\]"); then echo "Running full CI as per commit message" pybot --removekeywords TAG:secret --exclude skip tests/test-cases -elif (echo $buildinfo | grep -q "\[specific ci="); then +elif (echo $prBody | grep -q "\[specific ci="); then echo "Running specific CI as per commit message" - buildtype=$(echo $buildinfo | grep "\[specific ci=") + buildtype=$(echo $prBody | grep "\[specific ci=") testsuite=$(echo $buildtype | awk -F"\[specific ci=" '{sub(/\].*/,"",$2);print $2}') pybot --removekeywords TAG:secret --suite $testsuite --suite 7-01-Regression tests/test-cases else @@ -47,7 +56,7 @@ rc="$?" timestamp=$(date +%s) outfile="integration_logs_"$DRONE_BUILD_NUMBER"_"$DRONE_COMMIT".zip" -zip -9 -j $outfile output.xml log.html report.html package.list *container-logs*.zip *.log /var/log/vic-machine-server/vic-machine-server.log +zip -9 -j $outfile output.xml log.html report.html package.list *container-logs*.zip *.log /var/log/vic-machine-server/vic-machine-server.log *.debug # GC credentials keyfile="/root/vic-ci-logs.key" diff --git a/tests/local-integration-test.sh b/tests/local-integration-test.sh index 7291d7c81a..ea8104ed27 100755 --- a/tests/local-integration-test.sh +++ b/tests/local-integration-test.sh @@ -78,7 +78,7 @@ pipeline: # dont clone submodules recursive: false vic-integration-test-on-pr: - image: gcr.io/eminent-nation-87317/vic-integration-test:1.42 + image: gcr.io/eminent-nation-87317/vic-integration-test:1.44 pull: true environment: GITHUB_AUTOMATION_API_KEY: $GITHUB_TOKEN @@ -99,4 +99,4 @@ pipeline: - $cmd ${tests:-tests/test-cases} CONFIG -drone exec --privileged --local $tmpYml +drone exec --local $tmpYml diff --git a/tests/longevity-tests/Dockerfile.6.5 b/tests/longevity-tests/Dockerfile.6.5 index 3c5a7535b1..195cf77413 100644 --- a/tests/longevity-tests/Dockerfile.6.5 +++ b/tests/longevity-tests/Dockerfile.6.5 @@ -13,6 +13,11 @@ # limitations under the License FROM longevity-base -ENV GOVC_URL=blinky.eng.vmware.com -ENV TEST_URL_ARRAY=blinky.eng.vmware.com -ENV STATIC_VCH_OPTIONS="--insecure-registry=vic-executor1.vcna.io --public-network-ip 10.17.109.7/24 --public-network-gateway 10.17.109.253 --dns-server 10.118.81.1" +ENV GOVC_URL=10.133.146.15 +ENV TEST_URL_ARRAY=10.133.146.15 +ENV GOVC_DATASTORE=CPBU_2TB_01 +ENV TEST_DATASTORE=CPBU_2TB_01 +ENV BRIDGE_NETWORK=vxw-dvs-3444-virtualwire-1-sid-5000-VCH-LS-1 +ENV PUBLIC_NETWORK=DPortGroup +ENV TEST_DATACENTER=/Datacenter +ENV STATIC_VCH_OPTIONS="--insecure-registry=vic-executor1.vcna.io --public-network-ip 10.197.37.210/23 --public-network-gateway 10.197.37.253 --dns-server 10.142.7.21" diff --git a/tests/longevity-tests/Dockerfile.foundation b/tests/longevity-tests/Dockerfile.foundation index 6ded0614df..753a95591a 100644 --- a/tests/longevity-tests/Dockerfile.foundation +++ b/tests/longevity-tests/Dockerfile.foundation @@ -12,18 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License -FROM gcr.io/eminent-nation-87317/vic-integration-test:1.42 +FROM gcr.io/eminent-nation-87317/vic-integration-test:1.44 RUN mkdir -p /go/src/github.com/vmware/vic COPY secrets /go/src/github.com/vmware/vic ENV LONGEVITY=1 ENV GOVC_INSECURE=1 ENV GOVC_USERNAME=administrator@vsphere.local -ENV GOVC_DATASTORE=vsanDatastore -ENV TEST_DATASTORE=vsanDatastore ENV TEST_USERNAME=administrator@vsphere.local ENV TEST_TIMEOUT=3m -ENV TEST_RESOURCE=cls +ENV TEST_RESOURCE=Cluster ENV DOMAIN= -ENV BRIDGE_NETWORK=bridge -ENV PUBLIC_NETWORK=vm-network ENV DOCKER_API_VERSION=1.23 diff --git a/tests/manual-test-cases/Group19-ROBO/19-1-ROBO-SKU.md b/tests/manual-test-cases/Group19-ROBO/19-1-ROBO-SKU.md new file mode 100644 index 0000000000..97483ae121 --- /dev/null +++ b/tests/manual-test-cases/Group19-ROBO/19-1-ROBO-SKU.md @@ -0,0 +1,24 @@ +Test 19-1 - ROBO SKU +======= + +# Purpose: +To verify that VIC works properly when a VCH is installed in a remote office branch office (ROBO) version of vSphere. + +# References: +1. [vSphere Remote Office and Branch Office](http://www.vmware.com/products/vsphere/remote-office-branch-office.html) + +# Environment: +This test requires access to VMware Nimbus cluster for dynamic ESXi and vCenter creation + +# Test Steps: +1. Deploy a new vCenter with stand alone hosts +2. Add the Enterprise license to the vCenter appliance +3. Assign the ROBO SKU license to each of the hosts within the vCenter +4. Install a VCH onto a particular multi-host cluster in the vCenter +5. Run a variety of docker operations on the VCH, including the regression test suite + +# Expected Outcome: +* All test steps should complete without error + +# Possible Problems: +None diff --git a/tests/manual-test-cases/Group5-Functional-Tests/5-19-ROBO-SKU.robot b/tests/manual-test-cases/Group19-ROBO/19-1-ROBO-SKU.robot similarity index 100% rename from tests/manual-test-cases/Group5-Functional-Tests/5-19-ROBO-SKU.robot rename to tests/manual-test-cases/Group19-ROBO/19-1-ROBO-SKU.robot diff --git a/tests/manual-test-cases/Group19-ROBO/19-2-ROBO-Container-Limit.md b/tests/manual-test-cases/Group19-ROBO/19-2-ROBO-Container-Limit.md new file mode 100644 index 0000000000..281a3dce6f --- /dev/null +++ b/tests/manual-test-cases/Group19-ROBO/19-2-ROBO-Container-Limit.md @@ -0,0 +1,61 @@ +Test 19-2 - ROBO - Container VM Limit +======= + +# Purpose: +To verify that the total container VM limit feature works as expected in a vSphere ROBO Advanced environment. + +# References: +1. [vSphere Remote Office and Branch Office](http://www.vmware.com/products/vsphere/remote-office-branch-office.html) +2. [Limit total allowed containerVMs per VCH](https://github.com/vmware/vic/issues/7273) +3. [vic-machine inspect to report configured containerVM limit](https://github.com/vmware/vic/issues/7284) + +# Environment: +This test requires access to VMware Nimbus cluster for dynamic ESXi and vCenter creation. This test should be executed in the following topologies and should have vSAN enabled. +* 1 vCenter host with 3 clusters, where 1 cluster has 1 ESXi host and the other 2 clusters have 3 ESXi hosts each +* 2 vCenter hosts connected with ELM, where each vCenter host has a cluster/host/datacenter topology that emulates a customer environment (exact topology TBD) + +See https://confluence.eng.vmware.com/display/CNA/VIC+ROBO for more details. + +# Test Steps: +1. Deploy a ROBO Advanced vCenter testbed for both environments above +2. Install a VCH on a particular cluster in vCenter with a container VM limit of `y` +3. Use vic-machine inspect to verify the set container VM limit +4. Visit the VCH Admin page and verify the container VM limit is displayed in the VCH Info section +5. Create and run `y` (long-running) containers with the VCH +6. Create another (long-running) container so as to have `y+1` total containers, but only `y` running containers +7. Attempt to run the container created in Step 6 +8. Delete one of the containers created in Step 5 +9. Start the container created in Step 6 +10. Create (don't run) `x` (`x` < `y`) long-running containers to have a total of `y + x` containers +11. From the `y` already-running containers, assemble a list of `x` containers (using `docker ps -q` for example) +12. Concurrently start the containers in Step 10 and concurrently delete the containers in Step 11 +13. Check the number of running containers with `docker ps -q` +14. Use vic-machine configure to increase the container VM limit (new limit = `z`) +15. Use vic-machine inspect to verify the new container VM limit +16. Visit the VCH Admin page and verify the container VM limit is displayed in the VCH Info section +17. Create and run more containers and verify that up to a total of `z` containers can be run +18. Use vic-machine configure to set the limit to lower than the current number of containers running +19. Attempt to run more containers +20. Delete/stop some containers so the current container VM count is lower than the set limit +21. Attempt to create/run more containers until the set limit +22. Delete the VCH + +# Expected Outcome: +* Steps 1 and 2 should succeed +* Step 3's output should indicate the limit set in Step 2 +* Steps 4 and 5 should succeed +* Step 6 should succeed since the container limit applies to running containers +* Step 7 should fail since the container limit applies to running containers +* Steps 8-11 should succeed +* In Step 12, depending on the order in which operations are processed, a container should fail to start if it breaches the running container limit +* In Step 13, the number of running containers should be `<= y`, the current running container limit +* Step 14 should succeed +* Step 15's output should indicate the limit set in Step 14 +* Step 16 should show the new container VM limit +* Step 17 should succeed +* Step 18 should succeed - exact behavior of existing running containers is TBD +* Step 19 should fail and should receive an error upon attempting to start "surplus" container VMs (exact behavior of existing running containers TBD) +* Steps 20-22 should succeed + +# Possible Problems: +None diff --git a/tests/manual-test-cases/Group19-ROBO/19-3-ROBO-VM-Placement.md b/tests/manual-test-cases/Group19-ROBO/19-3-ROBO-VM-Placement.md new file mode 100644 index 0000000000..5c3aec93cf --- /dev/null +++ b/tests/manual-test-cases/Group19-ROBO/19-3-ROBO-VM-Placement.md @@ -0,0 +1,43 @@ +Test 19-3 - ROBO - VM Placement +======= + +# Purpose: +To verify that the VM placement feature specified works as expected in a vSphere ROBO Advanced environment without DRS. +The current placement strategy is to avoid bad host selection, instead of selecting the "best" possible host. + +# References: +1. [vSphere Remote Office and Branch Office](http://www.vmware.com/products/vsphere/remote-office-branch-office.html) +2. [VM Placement without DRS](https://github.com/vmware/vic/issues/7282) + +# Environment: +This test requires access to VMware Nimbus cluster for dynamic ESXi and vCenter creation. This test should be executed in the following topologies and should have vSAN enabled. +* 1 vCenter host with 3 clusters, where 1 cluster has 1 ESXi host and the other 2 clusters have 3 ESXi hosts each +* 2 vCenter hosts connected with ELM, where each vCenter host has a cluster/host/datacenter topology that emulates a customer environment (exact topology TBD) + +In addition, this test should be run in multi-ESX-host and single-ESX-host cluster topologies. + +See https://confluence.eng.vmware.com/display/CNA/VIC+ROBO for more details. + +# Test Steps: +1. Deploy a ROBO Advanced vCenter testbed for both environments above +2. Install a VCH on a particular cluster on vCenter - see note in [Environment](#environment) +3. Deploy containers that will consume resources predictably (e.g. the `progrium/stress` image) +4. Measure cluster metrics and gather resource consumption +5. Create and run regular containers such as `busybox` +6. Create and run enough containers to consume all available cluster resources +7. Attempt to create and run more containers +8. Delete some containers +9. Create and run a few containers +10. Delete the VCH + +# Expected Outcome: +* Step 1 should succeed +* Step 2 should succeed and the VCH should be placed on a host that satisfies the license and other feature requirements +* Steps 3-4 should succeed and containers should be placed on ESX hosts in the cluster according to the criteria defined in point 2 of [References](#references) +* Step 5 should succeed and containers should be placed on ESX hosts in the cluster that have available resources according to the criteria defined in point 2 of [References](#references). In the multi-host cluster environment, the cluster resource utilization level should be as expected given containerVM sizes, cluster capacity and placement logic. +* Step 6 should succeed +* Step 7 should fail since the available resources are exhausted +* Steps 8-10 should succeed + +# Possible Problems: +None diff --git a/tests/manual-test-cases/Group19-ROBO/19-4-ROBO-License-Features.md b/tests/manual-test-cases/Group19-ROBO/19-4-ROBO-License-Features.md new file mode 100644 index 0000000000..8dbe15dc05 --- /dev/null +++ b/tests/manual-test-cases/Group19-ROBO/19-4-ROBO-License-Features.md @@ -0,0 +1,32 @@ +Test 19-4 - ROBO License Features +======= + +# Purpose: +To verify that the license and feature checks required for a ROBO Advanced environment are displayed and updated on VCH Admin. + +# References: +1. [vSphere Remote Office and Branch Office](http://www.vmware.com/products/vsphere/remote-office-branch-office.html) +2. [Provide License and Feature Check](https://github.com/vmware/vic/issues/7277) +3. [vic-admin to report on license and feature compliance](https://github.com/vmware/vic/issues/7276) + +# Environment: +This test requires access to VMware Nimbus cluster for dynamic ESXi and vCenter creation. This test should be executed in the following topologies and should have vSAN enabled. +* 1 vCenter host with 3 clusters, where 1 cluster has 1 ESXi host and the other 2 clusters have 3 ESXi hosts each +* 2 vCenter hosts connected with ELM, where each vCenter host has a cluster/host/datacenter topology that emulates a customer environment (exact topology TBD) + +See https://confluence.eng.vmware.com/display/CNA/VIC+ROBO for more details. + +# Test Steps: +1. Deploy a ROBO Advanced vCenter testbed for both environments above +2. Install a VCH on vCenter +3. Visit the VCH Admin page and verify that the License and Feature Status sections show that required license and features are present +4. Assign a more restrictive license such as ROBO Standard or Standard that does not have the required features (VDS, VSPC) to vCenter +5. Assign the above license to each of the hosts within the vCenter cluster +6. Refresh the VCH Admin page and verify that the License and Feature Status sections show that required license and features are not present +7. Delete the VCH + +# Expected Outcome: +* All test steps should complete without error + +# Possible Problems: +None diff --git a/tests/manual-test-cases/Group19-ROBO/19-5-ROBO-Vcenter-Connectivity.md b/tests/manual-test-cases/Group19-ROBO/19-5-ROBO-Vcenter-Connectivity.md new file mode 100644 index 0000000000..186c7ca8fa --- /dev/null +++ b/tests/manual-test-cases/Group19-ROBO/19-5-ROBO-Vcenter-Connectivity.md @@ -0,0 +1,41 @@ +Test 19-5 - ROBO vCenter Connectivity +======= + +# Purpose: +To verify that the applications deployed in containerVMs in a ROBO Advanced environment are functional when the ESXi(s) hosting the containerVMs are disconnected from the vSphere host. This test exercises the WAN connectivity and resiliency support for a ROBO environment that could represent a customer's cluster topology. + +# References: +1. [vSphere Remote Office and Branch Office](http://www.vmware.com/products/vsphere/remote-office-branch-office.html) + +# Environment: +This test requires access to VMware Nimbus cluster for dynamic ESXi and vCenter creation. This test should be executed in the following topologies and should have vSAN enabled. +* 1 vCenter host with 3 clusters, where 1 cluster has 1 ESXi host and the other 2 clusters have 3 ESXi hosts each +* 2 vCenter hosts connected with ELM, where each vCenter host has a cluster/host/datacenter topology that emulates a customer environment (exact topology TBD) + +See https://confluence.eng.vmware.com/display/CNA/VIC+ROBO for more details. + +# Test Steps: +1. Deploy a ROBO Advanced vCenter testbed for both environments above +2. Deploy the VIC appliance OVA on vCenter for testing VIC Product as well +3. Once the OVA is powered on and initialized, populate Harbor with some images +4. Install a VCH on a cluster in vCenter +5. Log in to the Admiral UI +6. Add the VCH to the default project in Admiral +7. Using Admiral, deploy some containers through the VCH +8. Create and start some container services such as nginx, wordpress or a database +9. Run a multi-container application exercising network links with docker-compose +10. To simulate a WAN link outage, _abruptly_ disconnect each ESX host in the cluster from vCenter (possibly by changing firewall rules) +11. Verify that the containers/services/applications started in Steps 7-9 are still alive and responding +12. Pull an image from Harbor +13. Create/start a container +14. Re-connect all hosts in the cluster to vCenter +15. Create/start a container +16. Delete the VCH + +# Expected Outcome: +* Steps 1-12 should succeed +* Step 13 should fail since the vCenter host is disconnected from the VCH's host +* Steps 14-16 should succeed + +# Possible Problems: +None diff --git a/tests/manual-test-cases/Group19-ROBO/TestCases.md b/tests/manual-test-cases/Group19-ROBO/TestCases.md new file mode 100644 index 0000000000..fbad16cbbc --- /dev/null +++ b/tests/manual-test-cases/Group19-ROBO/TestCases.md @@ -0,0 +1,8 @@ +Group 19 - ROBO +======= + +* [Test 19-1 - ROBO-SKU](19-1-ROBO-SKU.md) +* [Test 19-2 - ROBO-Container-Limit](19-2-ROBO-Container-Limit.md) +* [Test 19-3 - ROBO-VM-Placement](19-3-ROBO-VM-Placement.md) +* [Test 19-4 - ROBO-License-Features](19-4-ROBO-License-Features.md) +* [Test 19-5 - ROBO-vCenter-Connectivity](19-5-ROBO-Vcenter-Connectivity.md) diff --git a/tests/manual-test-cases/Group21-Registries/21-1-Whitelist.robot b/tests/manual-test-cases/Group21-Registries/21-1-Whitelist.robot index 974530ba3c..801ae7807e 100644 --- a/tests/manual-test-cases/Group21-Registries/21-1-Whitelist.robot +++ b/tests/manual-test-cases/Group21-Registries/21-1-Whitelist.robot @@ -16,7 +16,7 @@ Documentation Test 21-01 - Whitelist Resource ../../resources/Util.robot Resource ../../resources/Harbor-Util.robot -Suite Setup Setup Harbor +Suite Setup Wait Until Keyword Succeeds 10x 10m Setup Harbor Suite Teardown Nimbus Cleanup ${list} ${false} Test Teardown Run Keyword If Test Failed Cleanup VIC Appliance On Test Server @@ -39,7 +39,7 @@ Simple ESXi Setup Remove Environment Variable PUBLIC_NETWORK Setup Harbor - Wait Until Keyword Succeeds 10x 10m Simple ESXi Setup + Simple ESXi Setup # Install a Harbor server with HTTPS a Harbor server with HTTP Install Harbor To Test Server protocol=https name=harbor-https diff --git a/tests/manual-test-cases/Group5-Functional-Tests/5-19-ROBO-SKU.md b/tests/manual-test-cases/Group5-Functional-Tests/5-19-ROBO-SKU.md deleted file mode 100644 index e86d507568..0000000000 --- a/tests/manual-test-cases/Group5-Functional-Tests/5-19-ROBO-SKU.md +++ /dev/null @@ -1,24 +0,0 @@ -Test 5-19 - ROBO SKU -======= - -# Purpose: -To verify that VIC works properly when a VCH is installed in a remote office branch office (ROBO) version of vSphere - -# References: -[1 - vSphere Remote Office and Branch Office](http://www.vmware.com/products/vsphere/remote-office-branch-office.html) - -# Environment: -This test requires access to VMware Nimbus cluster for dynamic ESXi and vCenter creation - -# Test Steps: -1. Deploy a new vCenter with stand alone hosts -2. Add the ROBO SKU license to the vCenter appliance -3. Assign the ROBO SKU license to each of the hosts within the vCenter -4. Install the VIC appliance onto one of the hosts in the vCenter -5. Run a variety of docker operation on the VCH - -# Expected Outcome: -All test steps should complete without error - -# Possible Problems: -None diff --git a/tests/manual-test-cases/Group5-Functional-Tests/5-24-Non-vSphere-Local-Cluster.robot b/tests/manual-test-cases/Group5-Functional-Tests/5-24-Non-vSphere-Local-Cluster.robot index e5b521b58c..98e71e1bb1 100644 --- a/tests/manual-test-cases/Group5-Functional-Tests/5-24-Non-vSphere-Local-Cluster.robot +++ b/tests/manual-test-cases/Group5-Functional-Tests/5-24-Non-vSphere-Local-Cluster.robot @@ -34,7 +34,9 @@ Non vSphere Local Cluster Install Setup Set Suite Variable @{list} @{esx_names}[0] @{esx_names}[1] @{esx_names}[2] %{NIMBUS_USER}-${vc} # Finish vCenter deploy - ${output}= Wait For Process ${pid} + ${output}= Wait For Process ${pid} timeout=70 minutes on_timeout=terminate + Log ${output.stdout} + Log ${output.stderr} Should Contain ${output.stdout} Overall Status: Succeeded Open Connection %{NIMBUS_GW} diff --git a/tests/manual-test-cases/Group5-Functional-Tests/5-25-OPS-User-Grant.robot b/tests/manual-test-cases/Group5-Functional-Tests/5-25-OPS-User-Grant.robot index 0449d31e9d..15060a5e05 100644 --- a/tests/manual-test-cases/Group5-Functional-Tests/5-25-OPS-User-Grant.robot +++ b/tests/manual-test-cases/Group5-Functional-Tests/5-25-OPS-User-Grant.robot @@ -17,8 +17,15 @@ Documentation Test 5-25 - OPS-User-Grant Resource ../../resources/Util.robot Suite Setup Wait Until Keyword Succeeds 10x 10m Ops User Create Suite Teardown Run Keyword And Ignore Error Nimbus Cleanup ${list} +Test Teardown Run Keyword If Test Failed Gather VC Logs *** Keywords *** + +Gather VC Logs + Log To Console Collecting VC logs .. + Run Keyword And Ignore Error Gather Logs From ESX Server + Log To Console VC logs collected + Ops User Create [Timeout] 110 minutes Run Keyword And Ignore Error Nimbus Cleanup ${list} ${false} diff --git a/tests/manual-test-cases/Group5-Functional-Tests/5-26-Static-IP-Address.robot b/tests/manual-test-cases/Group5-Functional-Tests/5-26-Static-IP-Address.robot index 0f3d5adaba..c0269433b1 100644 --- a/tests/manual-test-cases/Group5-Functional-Tests/5-26-Static-IP-Address.robot +++ b/tests/manual-test-cases/Group5-Functional-Tests/5-26-Static-IP-Address.robot @@ -21,23 +21,49 @@ Suite Teardown Run Keyword And Ignore Error Nimbus Cleanup ${list} *** Keywords *** Static IP Address Create [Timeout] 110 minutes + Log To Console Starting Static IP Address test... Set Suite Variable ${NIMBUS_LOCATION} NIMBUS_LOCATION=wdc Run Keyword And Ignore Error Nimbus Cleanup ${list} ${false} + ${name}= Evaluate 'vic-5-26-' + str(random.randint(1000,9999)) modules=random + ${out}= Deploy Nimbus Testbed %{NIMBUS_USER} %{NIMBUS_PASSWORD} --noSupportBundles --plugin testng --vcvaBuild ${VC_VERSION} --esxBuild ${ESX_VERSION} --testbedName vic-simple-cluster --testbedSpecRubyFile /dbc/pa-dbc1111/mhagen/nimbus-testbeds/testbeds/vic-simple-cluster.rb --runName ${name} + Open Connection %{NIMBUS_GW} Wait Until Keyword Succeeds 10 min 30 sec Login %{NIMBUS_USER} %{NIMBUS_PASSWORD} - ${esx1} ${esx2} ${esx3} ${vc} ${esx1-ip} ${esx2-ip} ${esx3-ip} ${vc-ip}= Create a Simple VC Cluster dc1 cls - Log To Console Finished Creating Cluster ${vc} - Set Suite Variable @{list} ${esx1} ${esx2} ${esx3} %{NIMBUS_USER}-${vc} + ${vc-ip}= Get IP ${name}.vc.0 + ${pod}= Fetch POD ${name}.vc.0 + Set Suite Variable ${NIMBUS_POD} ${pod} + Close Connection + + Set Suite Variable @{list} %{NIMBUS_USER}-${name}.esx.0 %{NIMBUS_USER}-${name}.esx.1 %{NIMBUS_USER}-${name}.esx.2 %{NIMBUS_USER}-${name}.nfs.0 %{NIMBUS_USER}-${name}.vc.0 + Log To Console Finished Creating Cluster ${name} # Need to suggest which subnet/gateway to install the static IP address worker into ${pre} ${post}= Split String From Right ${vc-ip} . 1 ${out}= Get Static IP Address --gateway ${pre}.253 Set Suite Variable ${static} ${out} - Append To List ${list} %{STATIC_WORKER_NAME} + Append To List ${list} %{STATIC_WORKER_NAME} + + Log To Console Set environment variables up for GOVC + Set Environment Variable GOVC_URL ${vc-ip} + Set Environment Variable GOVC_USERNAME Administrator@vsphere.local + Set Environment Variable GOVC_PASSWORD Admin\!23 + + Log To Console Deploy VIC to the VC cluster + Set Environment Variable TEST_URL_ARRAY ${vc-ip} + Set Environment Variable TEST_USERNAME Administrator@vsphere.local + Set Environment Variable TEST_PASSWORD Admin\!23 + Set Environment Variable BRIDGE_NETWORK bridge + Set Environment Variable PUBLIC_NETWORK vm-network + Remove Environment Variable TEST_DATACENTER + Set Environment Variable TEST_DATASTORE nfs0-1 + Set Environment Variable TEST_RESOURCE cls + Set Environment Variable TEST_TIMEOUT 15m *** Test Cases *** Test Log To Console \nStarting test... + Custom Testbed Keepalive /dbc/pa-dbc1111/mhagen + Install VIC Appliance To Test Server additional-args=--public-network-ip &{static}[ip]/&{static}[netmask] --public-network-gateway &{static}[gateway] --dns-server 10.170.16.48 Run Regression Tests \ No newline at end of file diff --git a/tests/manual-test-cases/Group5-Functional-Tests/5-3-Enhanced-Linked-Mode.robot b/tests/manual-test-cases/Group5-Functional-Tests/5-3-Enhanced-Linked-Mode.robot index 55201116dd..013724a4bb 100644 --- a/tests/manual-test-cases/Group5-Functional-Tests/5-3-Enhanced-Linked-Mode.robot +++ b/tests/manual-test-cases/Group5-Functional-Tests/5-3-Enhanced-Linked-Mode.robot @@ -62,7 +62,7 @@ Enhanced Link Mode Setup ${esx6-ip}= Get From List ${esx-ips} 2 # Finish test bed deploy - ${output}= Wait For Process ${pid} + ${output}= Wait For Process ${pid} timeout=70 minutes on_timeout=terminate Log ${output.stdout} Log ${output.stderr} Should Be Equal As Integers ${output.rc} 0 @@ -99,7 +99,6 @@ Enhanced Link Mode Setup Set Environment Variable GOVC_URL ${vc1-ip} Set Environment Variable GOVC_USERNAME administrator@vsphere.local Set Environment Variable GOVC_PASSWORD Admin!23 - ${license}= Run govc license.ls # First VC cluster diff --git a/tests/manual-test-cases/TestGroups.md b/tests/manual-test-cases/TestGroups.md index 256c71a0c1..c8ca3ceeb2 100644 --- a/tests/manual-test-cases/TestGroups.md +++ b/tests/manual-test-cases/TestGroups.md @@ -20,9 +20,11 @@ VIC Manual Test Suite - [Group 18 - VIC UI](Group18-VIC-UI/TestCases.md) - +[Group 19 - ROBO](Group18-ROBO/TestCases.md) +- [Group 20 - Security](Group20-Security/TestCases.md) - [Group 21 - Whitelist](Group21-Registries/TestCases.md) - [Group 23 - Future Tests](Group23-Future-Tests/TestCases.md) -- \ No newline at end of file +- diff --git a/tests/nightly/upload-logs.sh b/tests/nightly/upload-logs.sh index 40083befa0..889fb032ca 100755 --- a/tests/nightly/upload-logs.sh +++ b/tests/nightly/upload-logs.sh @@ -24,9 +24,9 @@ outfile="vic_nightly_logs_"$1".zip" echo $outfile if [ -d "60" ]; then - /usr/bin/zip -9 -r $outfile 60 *.zip *.log *.debug + /usr/bin/zip -9 -r $outfile 60 *.zip *.log *.debug *.tgz elif [ -d "65" ]; then - /usr/bin/zip -9 -r $outfile 65 *.zip *.log *.debug + /usr/bin/zip -9 -r $outfile 65 *.zip *.log *.debug *.tgz else echo "No output directories to upload!" exit 1 diff --git a/tests/pass-rate.sh b/tests/pass-rate.sh index d4ab97c07e..3fe0665a3a 100755 --- a/tests/pass-rate.sh +++ b/tests/pass-rate.sh @@ -13,15 +13,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -curl -s https://api.github.com/repos/vmware/vic/commits?access_token=$GITHUB_AUTOMATION_API_KEY | jq -r ".[].sha" | xargs -n1 -I{} curl -s https://api.github.com/repos/vmware/vic/statuses/{}?access_token=$GITHUB_AUTOMATION_API_KEY | jq -r ".[-0].state" > status.out +commits=$(curl -s https://api.github.com/repos/vmware/vic/commits?access_token=$GITHUB_AUTOMATION_API_KEY | jq -r ' map(.sha) | join(",")') +curl -s https://api.github.com/repos/vmware/vic/statuses/{$commits}?access_token=$GITHUB_AUTOMATION_API_KEY | jq '.[] | select((.context == "continuous-integration/vic/push") and (.state != "pending")) | "\(.target_url): \(.state)"' | tee status.out failures=$(cat status.out | grep -c failure) -echo "Number of failed merges to master in the last 30 merges: $failures" successes=$(cat status.out | grep -c success) -echo "Number of successful merges to master in the last 30 merges: $successes" let total=$successes+$failures passrate=$(bc -l <<< "scale=2;100 * ($successes / $total)") -echo "Current CI passrate: $passrate" -curl --max-time 10 --retry 3 -s -d "payload={'channel': '#vic-bots', 'text': 'Current CI passrate: $passrate%'}" "$SLACK_URL" +echo "Number of failed merges to master in the last $total merges: $failures" +echo "Number of successful merges to master in the last $total merges: $successes" + +echo "Current vmware/vic CI passrate: $passrate" +curl --max-time 10 --retry 3 -s -d "payload={'channel': '#vic-bots', 'text': 'Current vmware/vic CI passrate: $passrate%'}" "$SLACK_URL" diff --git a/tests/resources/Github-Util.robot b/tests/resources/Github-Util.robot index ae154d691a..8ca6208edc 100644 --- a/tests/resources/Github-Util.robot +++ b/tests/resources/Github-Util.robot @@ -34,4 +34,14 @@ Post Comment To Github Issue \ ${status} ${result}= Run Keyword And Ignore Error Post https://api.github.com/repos/vmware/vic/issues/${num}/comments?access_token\=%{GITHUB_AUTOMATION_API_KEY} data={"body": "${comment}"} \ Exit For Loop If '${status}' \ Sleep 1 - Should Be Equal ${result.status_code} ${201} \ No newline at end of file + Should Be Equal ${result.status_code} ${201} + +Check VMware Organization Membership + [Arguments] ${username} + [Tags] secret + :FOR ${idx} IN RANGE 0 5 + \ ${status} ${result}= Run Keyword And Ignore Error Get https://api.github.com/orgs/vmware/members/${username}?access_token\=%{GITHUB_AUTOMATION_API_KEY} + \ Exit For Loop If '${status}' + \ Sleep 1 + ${isMember}= Run Keyword And Return Status Should Be Equal ${result.status_code} ${204} + [Return] ${isMember} diff --git a/tests/resources/Harbor-Util.robot b/tests/resources/Harbor-Util.robot index a2c7390ec9..d9e7d38b74 100644 --- a/tests/resources/Harbor-Util.robot +++ b/tests/resources/Harbor-Util.robot @@ -26,9 +26,14 @@ ${harbor_cert} getcert ${ova_harbor_admin_password} harbor-admin-passwd *** Keywords *** -Install Harbor To Test Server +Secret Install Harbor To Test Server [Tags] secret - [Arguments] ${name}=harbor ${protocol}=http ${verify}=off ${db_password}=%{TEST_PASSWORD} ${user}=%{TEST_USERNAME} ${password}=%{TEST_PASSWORD} ${host}=%{TEST_URL_ARRAY} ${datastore}=%{TEST_DATASTORE} ${network}=VM Network + [Arguments] ${name} ${protocol} ${verify} ${host} ${datastore} ${network} + ${out}= Run ovftool --noSSLVerify --acceptAllEulas --datastore=${datastore} --name=${name} --net:"Network 1"='${network}' --diskMode=thin --powerOn --X:waitForIp --X:injectOvfEnv --X:enableHiddenProperties --prop:root_pwd=%{TEST_PASSWORD} --prop:harbor_admin_password=%{TEST_PASSWORD} --prop:db_password=%{TEST_PASSWORD} --prop:auth_mode=db_auth --prop:verify_remote_cert=${verify} --prop:protocol=${protocol} ${HARBOR_VERSION}.ova 'vi://%{TEST_USERNAME}:%{TEST_PASSWORD}@${host}' + [Return] ${out} + +Install Harbor To Test Server + [Arguments] ${name}=harbor ${protocol}=http ${verify}=off ${host}=%{TEST_URL_ARRAY} ${datastore}=%{TEST_DATASTORE} ${network}=VM Network Log To Console \nFetching harbor ova... ${status} ${message}= Run Keyword And Ignore Error OperatingSystem.File Should Exist ${HARBOR_VERSION}.ova ${out}= Run Keyword If '${status}' == 'FAIL' Run wget https://github.com/vmware/harbor/releases/download/${HARBOR_SHORT_VERSION}/${HARBOR_VERSION}.ova @@ -42,7 +47,8 @@ Install Harbor To Test Server ${rc} ${output}= Run Keyword If '%{HOST_TYPE}' == 'VC' Set Suite Variable ${host} @{URLs}[${IDX}]%{TEST_DATACENTER}/host/%{TEST_RESOURCE} Log To Console \nDeploying ova... - ${out}= Run ovftool --noSSLVerify --acceptAllEulas --datastore=${datastore} --name=${name} --net:"Network 1"='${network}' --diskMode=thin --powerOn --X:waitForIp --X:injectOvfEnv --X:enableHiddenProperties --prop:root_pwd=${password} --prop:harbor_admin_password=${password} --prop:db_password=${db_password} --prop:auth_mode=db_auth --prop:verify_remote_cert=${verify} --prop:protocol=${protocol} ${HARBOR_VERSION}.ova 'vi://${user}:${password}@${host}' + ${out}= Secret Install Harbor To Test Server ${name} ${protocol} ${verify} ${host} ${datastore} ${network} + Log ${out} Should Contain ${out} Received IP address: Should Not Contain ${out} None diff --git a/tests/resources/Nimbus-Util.robot b/tests/resources/Nimbus-Util.robot index e7ab6f7d66..a83842aede 100644 --- a/tests/resources/Nimbus-Util.robot +++ b/tests/resources/Nimbus-Util.robot @@ -444,17 +444,24 @@ Create Static IP Worker Open Connection %{NIMBUS_GW} Wait Until Keyword Succeeds 10 min 30 sec Login %{NIMBUS_USER} %{NIMBUS_PASSWORD} Log To Console Create a new static ip address worker... - ${out}= Execute Command ${NIMBUS_LOCATION} nimbus-ctl --silentObjectNotFoundError kill '%{NIMBUS_USER}-static-worker' && ${NIMBUS_LOCATION} nimbus-worker-deploy --enableStaticIpService static-worker ${suggested-gateway} + ${name}= Evaluate 'static-worker-' + str(random.randint(1000,9999)) + str(time.clock()) modules=random,time + Log To Console \nDeploying static ip worker: ${name} + ${out}= Execute Command ${NIMBUS_LOCATION} nimbus-ctl --silentObjectNotFoundError kill '%{NIMBUS_USER}-static-worker' && ${NIMBUS_LOCATION} nimbus-worker-deploy --nimbus ${NIMBUS_POD} --enableStaticIpService ${name} Should Contain ${out} "deploy_status": "success" - Set Environment Variable STATIC_WORKER_NAME %{NIMBUS_USER}-static-worker - ${ip}= Get IP static-worker + + ${pod}= Fetch POD ${name} + Run Keyword If '${pod}' != '${NIMBUS_POD}' Kill Nimbus Server %{NIMBUS_USER} %{NIMBUS_PASSWORD} %{NIMBUS_USER}-${name} + Run Keyword If '${pod}' != '${NIMBUS_POD}' Fail Nimbus pod suggestion failed + + Set Environment Variable STATIC_WORKER_NAME %{NIMBUS_USER}-${name} + ${ip}= Get IP ${name} Set Environment Variable STATIC_WORKER_IP ${ip} Close Connection Get Static IP Address [Arguments] ${suggested-gateway}=${EMPTY} ${status} ${message}= Run Keyword And Ignore Error Environment Variable Should Be Set STATIC_WORKER_IP - Run Keyword If '${status}' == 'FAIL' Create Static IP Worker ${suggested-gateway} + Run Keyword If '${status}' == 'FAIL' Wait Until Keyword Succeeds 10x 10s Create Static IP Worker Log To Console Curl a new static ip address from the created worker... ${out}= Run curl -s http://%{STATIC_WORKER_IP}:4827/nsips diff --git a/tests/resources/VCH-Util.robot b/tests/resources/VCH-Util.robot index 2096d9e432..66dfce4f89 100644 --- a/tests/resources/VCH-Util.robot +++ b/tests/resources/VCH-Util.robot @@ -26,8 +26,8 @@ Set Test Environment Variables Run Keyword If '${status}' == 'FAIL' Set Environment Variable PUBLIC_NETWORK 'VM Network' ${status} ${message}= Run Keyword And Ignore Error Environment Variable Should Be Set TEST_DATACENTER Run Keyword If '${status}' == 'FAIL' Set Environment Variable TEST_DATACENTER ${SPACE} - ${status} ${message}= Run Keyword And Ignore Error Environment Variable Should Be Set DRONE_MACHINE - Run Keyword If '${status}' == 'FAIL' Set Environment Variable DRONE_MACHINE 'local' + ${status} ${message}= Run Keyword And Ignore Error Environment Variable Should Be Set DRONE_HOSTNAME + Run Keyword If '${status}' == 'FAIL' Set Environment Variable DRONE_HOSTNAME 'local' @{URLs}= Split String %{TEST_URL_ARRAY} ${len}= Get Length ${URLs} @@ -42,14 +42,15 @@ Set Test Environment Variables Should Be Equal As Integers ${rc} 0 Set Environment Variable TEST_THUMBPRINT ${thumbprint} Log To Console \nTEST_URL=%{TEST_URL} - Log To Console \nDRONE_MACHINE=%{DRONE_MACHINE} + Log To Console \nDRONE_HOSTNAME=%{DRONE_HOSTNAME} ${worker_date}= Run date Log To Console \nWorker_Date=${worker_date} - ${server_date}= Run govc host.date.info - Log To Console \nTest_Server_Date=\n${server_date}\n - + ${rc} ${host}= Run And Return Rc And Output govc ls host Should Be Equal As Integers ${rc} 0 + ${out}= Run govc ls -t HostSystem ${host} | xargs -I% -n1 govc host.date.info -host\=% | grep 'date and time' + Log To Console \nTest_Server_Dates=\n${out}\n + ${status} ${message}= Run Keyword And Ignore Error Environment Variable Should Be Set TEST_RESOURCE Run Keyword If '${status}' == 'FAIL' Set Environment Variable TEST_RESOURCE ${host}/Resources Set Environment Variable GOVC_RESOURCE_POOL %{TEST_RESOURCE} @@ -83,6 +84,7 @@ Set Test Environment Variables Set Test VCH Name ${name}= Evaluate 'VCH-%{DRONE_BUILD_NUMBER}-' + str(random.randint(1000,9999)) modules=random Set Environment Variable VCH-NAME ${name} + Log Set VCH-NAME as ${name} Set List Of Env Variables [Arguments] ${vars} @@ -128,7 +130,9 @@ Get Docker Params ${ip}= Strip String @{hostParts}[0] ${port}= Strip String @{hostParts}[1] Set Environment Variable VCH-IP ${ip} + Log Set VCH-IP as ${ip} Set Environment Variable VCH-PORT ${port} + Log Set VCH-PORT as ${port} :FOR ${index} ${item} IN ENUMERATE @{output} \ ${status} ${message}= Run Keyword And Ignore Error Should Contain ${item} http @@ -143,6 +147,7 @@ Get Docker Params ... ELSE Split String From Right ${ext-ip} ${SPACE} 1 ${ext-ip}= Strip String ${ext-ip} Set Environment Variable EXT-IP ${ext-ip} + Log Set EXT-IP as ${ext-ip} ${status}= Run Keyword And Return Status Should Match Regexp ${line} msg\="([^"]*)" @@ -272,6 +277,7 @@ Install VIC Appliance To Test Server [Arguments] ${vic-machine}=bin/vic-machine-linux ${appliance-iso}=bin/appliance.iso ${bootstrap-iso}=bin/bootstrap.iso ${certs}=${true} ${vol}=default ${cleanup}=${true} ${debug}=1 ${additional-args}=${EMPTY} Set Test Environment Variables ${output}= Install VIC Appliance To Test Server With Current Environment Variables ${vic-machine} ${appliance-iso} ${bootstrap-iso} ${certs} ${vol} ${cleanup} ${debug} ${additional-args} + Log ${output} [Return] ${output} Install VIC Appliance To Test Server With Current Environment Variables @@ -300,11 +306,11 @@ Install VIC Appliance To Test Server With Current Environment Variables Run VIC Machine Command [Tags] secret [Arguments] ${vic-machine} ${appliance-iso} ${bootstrap-iso} ${certs} ${vol} ${debug} ${additional-args} - ${output}= Run Keyword If ${certs} Run ${vic-machine} create --debug ${debug} --name=%{VCH-NAME} --target=%{TEST_URL}%{TEST_DATACENTER} --thumbprint=%{TEST_THUMBPRINT} --user=%{TEST_USERNAME} --image-store=%{TEST_DATASTORE} --appliance-iso=${appliance-iso} --bootstrap-iso=${bootstrap-iso} --password=%{TEST_PASSWORD} --force=true --bridge-network=%{BRIDGE_NETWORK} --public-network=%{PUBLIC_NETWORK} --compute-resource=%{TEST_RESOURCE} --timeout %{TEST_TIMEOUT} --insecure-registry harbor.ci.drone.local --volume-store=%{TEST_DATASTORE}/%{VCH-NAME}-VOL:${vol} --container-network=%{PUBLIC_NETWORK}:public ${vicmachinetls} ${additional-args} + ${output}= Run Keyword If ${certs} Run ${vic-machine} create --debug ${debug} --name=%{VCH-NAME} --target=%{TEST_URL}%{TEST_DATACENTER} --thumbprint=%{TEST_THUMBPRINT} --user=%{TEST_USERNAME} --image-store=%{TEST_DATASTORE} --appliance-iso=${appliance-iso} --bootstrap-iso=${bootstrap-iso} --password=%{TEST_PASSWORD} --force=true --bridge-network=%{BRIDGE_NETWORK} --public-network=%{PUBLIC_NETWORK} --compute-resource=%{TEST_RESOURCE} --timeout %{TEST_TIMEOUT} --insecure-registry wdc-harbor-ci.eng.vmware.com --volume-store=%{TEST_DATASTORE}/%{VCH-NAME}-VOL:${vol} --container-network=%{PUBLIC_NETWORK}:public ${vicmachinetls} ${additional-args} Run Keyword If ${certs} Should Contain ${output} Installer completed successfully Return From Keyword If ${certs} ${output} - ${output}= Run Keyword Unless ${certs} Run ${vic-machine} create --debug ${debug} --name=%{VCH-NAME} --target=%{TEST_URL}%{TEST_DATACENTER} --thumbprint=%{TEST_THUMBPRINT} --user=%{TEST_USERNAME} --image-store=%{TEST_DATASTORE} --appliance-iso=${appliance-iso} --bootstrap-iso=${bootstrap-iso} --password=%{TEST_PASSWORD} --force=true --bridge-network=%{BRIDGE_NETWORK} --public-network=%{PUBLIC_NETWORK} --compute-resource=%{TEST_RESOURCE} --timeout %{TEST_TIMEOUT} --insecure-registry harbor.ci.drone.local --volume-store=%{TEST_DATASTORE}/%{VCH-NAME}-VOL:${vol} --container-network=%{PUBLIC_NETWORK}:public --no-tlsverify ${additional-args} + ${output}= Run Keyword Unless ${certs} Run ${vic-machine} create --debug ${debug} --name=%{VCH-NAME} --target=%{TEST_URL}%{TEST_DATACENTER} --thumbprint=%{TEST_THUMBPRINT} --user=%{TEST_USERNAME} --image-store=%{TEST_DATASTORE} --appliance-iso=${appliance-iso} --bootstrap-iso=${bootstrap-iso} --password=%{TEST_PASSWORD} --force=true --bridge-network=%{BRIDGE_NETWORK} --public-network=%{PUBLIC_NETWORK} --compute-resource=%{TEST_RESOURCE} --timeout %{TEST_TIMEOUT} --insecure-registry wdc-harbor-ci.eng.vmware.com --volume-store=%{TEST_DATASTORE}/%{VCH-NAME}-VOL:${vol} --container-network=%{PUBLIC_NETWORK}:public --no-tlsverify ${additional-args} Run Keyword Unless ${certs} Should Contain ${output} Installer completed successfully [Return] ${output} @@ -323,6 +329,7 @@ Run Secret VIC Machine Inspect Command Run VIC Machine Delete Command ${rc} ${output}= Run Secret VIC Machine Delete Command %{VCH-NAME} + Log ${output} Wait Until Keyword Succeeds 6x 5s Check Delete Success %{VCH-NAME} Should Be Equal As Integers ${rc} 0 Should Contain ${output} Completed successfully @@ -332,11 +339,13 @@ Run VIC Machine Delete Command Run VIC Machine Inspect Command [Arguments] ${name}=%{VCH-NAME} ${rc} ${output}= Run Secret VIC Machine Inspect Command ${name} + Log ${output} Get Docker Params ${output} ${true} Inspect VCH [Arguments] ${expected} ${rc} ${output}= Run And Return Rc And Output bin/vic-machine-linux inspect --name=%{VCH-NAME} --target=%{TEST_URL}%{TEST_DATACENTER} --thumbprint=%{TEST_THUMBPRINT} --user=%{TEST_USERNAME} --password=%{TEST_PASSWORD} --compute-resource=%{TEST_RESOURCE} + Log ${output} Should Be Equal As Integers ${rc} 0 Should Contain ${output} ${expected} @@ -353,6 +362,7 @@ VCH Docker Info Check UpdateInProgress [Arguments] ${expected} ${rc} ${output}= Run And Return Rc And Output govc vm.info -e %{VCH-NAME} | grep UpdateInProgress + Log ${output} Should Be Equal As Integers ${rc} 0 Should Contain ${output} ${expected} @@ -369,9 +379,12 @@ Gather Logs From Test Server Run Keyword And Continue On Failure Run zip %{VCH-NAME}-certs -r %{VCH-NAME} Secret Curl Container Logs ${name-suffix} ${host}= Get VM Host Name %{VCH-NAME} + Log ${host} ${out}= Run govc datastore.download -host ${host} %{VCH-NAME}/vmware.log %{VCH-NAME}-vmware${name-suffix}.log + Log ${out} Should Contain ${out} OK ${out}= Run govc datastore.download -host ${host} %{VCH-NAME}/tether.debug %{VCH-NAME}-tether${name-suffix}.debug + Log ${out} Should Contain ${out} OK Run Keyword If '%{HOST_TYPE}' == 'ESXi' Run govc logs -log=vmkernel -n=10000 > vmkernel${name-suffix}.log @@ -382,6 +395,8 @@ Secret Curl Container Logs Log ${out} ${out}= Run curl -k -b vic-admin-cookies %{VIC-ADMIN}/container-logs.zip -o ${SUITE NAME}-%{VCH-NAME}-container-logs${name-suffix}.zip Log ${out} + ${out}= Run curl -k -b vic-admin-cookies %{VIC-ADMIN}/logs/port-layer.log + Should Not Contain ${out} SIGSEGV: segmentation violation Remove File vic-admin-cookies Check For The Proper Log Files @@ -426,6 +441,7 @@ Cleanup VIC Appliance On Test Server Return From Keyword If ${exclude} Log To Console Deleting the VCH appliance %{VCH-NAME} ${output}= Run VIC Machine Delete Command + Log ${output} Run Keyword And Ignore Error Cleanup VCH Bridge Network %{VCH-NAME} Run Keyword And Ignore Error Run govc datastore.rm %{VCH-NAME}-VOL [Return] ${output} @@ -439,13 +455,16 @@ Cleanup VCH Bridge Network Add VC Distributed Portgroup [Arguments] ${dvs} ${pg} ${out}= Run govc dvs.portgroup.add -nports 12 -dc=%{TEST_DATACENTER} -dvs=${dvs} ${pg} + Log ${out} Remove VC Distributed Portgroup [Arguments] ${pg} ${out}= Run govc object.destroy %{TEST_DATACENTER}/network/${pg} + Log ${out} Cleanup Datastore On Test Server ${out}= Run govc datastore.ls + Log ${out} ${exceptions}= Get Environment Variable VM_EXCEPTIONS ${EMPTY} ${items}= Split To Lines ${out} :FOR ${item} IN @{items} @@ -465,6 +484,7 @@ Cleanup Datastore On Test Server Cleanup Dangling VMs On Test Server ${out}= Run govc ls vm + Log ${out} ${exceptions}= Get Environment Variable VM_EXCEPTIONS ${EMPTY} ${vms}= Split To Lines ${out} :FOR ${vm} IN @{vms} @@ -484,6 +504,7 @@ Cleanup Dangling VMs On Test Server Cleanup Dangling Resource Pools On Test Server ${out}= Run govc ls host/*/Resources/* + Log ${out} ${exceptions}= Get Environment Variable VM_EXCEPTIONS ${EMPTY} ${pools}= Split To Lines ${out} :FOR ${pool} IN @{pools} @@ -503,6 +524,7 @@ Cleanup Dangling Resource Pools On Test Server Cleanup Dangling Networks On Test Server ${out}= Run govc ls network + Log ${out} ${exceptions}= Get Environment Variable VM_EXCEPTIONS ${EMPTY} ${nets}= Split To Lines ${out} :FOR ${net} IN @{nets} @@ -521,6 +543,7 @@ Cleanup Dangling Networks On Test Server Cleanup Dangling vSwitches On Test Server ${out}= Run Keyword If '%{HOST_TYPE}' == 'ESXi' Run govc host.vswitch.info | grep VCH + Log ${out} ${exceptions}= Get Environment Variable VM_EXCEPTIONS ${EMPTY} ${nets}= Split To Lines ${out} :FOR ${net} IN @{nets} @@ -582,6 +605,7 @@ Get VCH ID \ Continue For Loop If '${name}' != '${vch-name}' \ ${vch-id}= Strip String @{vch}[0] \ Log To Console \nVCH ID: ${vch-id} + \ Log VCH ID ${vch-id} \ Return From Keyword ${vch-id} # VCH upgrade helpers @@ -604,6 +628,7 @@ Clean up VIC Appliance And Local Binary Upgrade Log To Console \nUpgrading VCH... ${rc} ${output}= Run And Return Rc And Output bin/vic-machine-linux upgrade --debug 1 --name=%{VCH-NAME} --target=%{TEST_URL}%{TEST_DATACENTER} --user=%{TEST_USERNAME} --password=%{TEST_PASSWORD} --force=true --compute-resource=%{TEST_RESOURCE} --timeout %{TEST_TIMEOUT} + Log ${output} Should Contain ${output} Completed successfully Should Not Contain ${output} Rolling back upgrade Should Be Equal As Integers ${rc} 0 @@ -612,6 +637,7 @@ Upgrade with ID Log To Console \nUpgrading VCH using vch ID... ${vch-id}= Get VCH ID %{VCH-NAME} ${rc} ${output}= Run And Return Rc And Output bin/vic-machine-linux upgrade --debug 1 --id=${vch-id} --target=%{TEST_URL}%{TEST_DATACENTER} --user=%{TEST_USERNAME} --password=%{TEST_PASSWORD} --force=true --compute-resource=%{TEST_RESOURCE} --timeout %{TEST_TIMEOUT} + Log ${output} Should Contain ${output} Completed successfully Should Not Contain ${output} Rolling back upgrade Should Be Equal As Integers ${rc} 0 @@ -620,6 +646,7 @@ Check Upgraded Version ${rc} ${output}= Run And Return Rc And Output bin/vic-machine-linux version @{vers}= Split String ${output} ${rc} ${output}= Run And Return Rc And Output bin/vic-machine-linux inspect --name=%{VCH-NAME} --target=%{TEST_URL}%{TEST_DATACENTER} --thumbprint=%{TEST_THUMBPRINT} --user=%{TEST_USERNAME} --password=%{TEST_PASSWORD} --compute-resource=%{TEST_RESOURCE} + Log ${output} Should Contain ${output} Completed successfully Should Contain ${output} @{vers}[2] Should Not Contain ${output} %{INITIAL-VERSION} @@ -631,6 +658,7 @@ Check Original Version ${rc} ${output}= Run And Return Rc And Output bin/vic-machine-linux version @{vers}= Split String ${output} ${rc} ${output}= Run And Return Rc And Output bin/vic-machine-linux inspect --name=%{VCH-NAME} --target=%{TEST_URL}%{TEST_DATACENTER} --thumbprint=%{TEST_THUMBPRINT} --user=%{TEST_USERNAME} --password=%{TEST_PASSWORD} --compute-resource=%{TEST_RESOURCE} + Log ${output} Should Contain ${output} Completed successfully Should Contain ${output} %{INITIAL-VERSION} Should Be Equal As Integers ${rc} 0 @@ -640,6 +668,7 @@ Check Original Version Rollback Log To Console \nTesting rollback... ${rc} ${output}= Run And Return Rc And Output bin/vic-machine-linux upgrade --debug 1 --name=%{VCH-NAME} --target=%{TEST_URL}%{TEST_DATACENTER} --user=%{TEST_USERNAME} --password=%{TEST_PASSWORD} --force=true --compute-resource=%{TEST_RESOURCE} --timeout %{TEST_TIMEOUT} --rollback + Log ${output} Should Contain ${output} Completed successfully Should Be Equal As Integers ${rc} 0 @@ -647,5 +676,6 @@ Enable VCH SSH [Arguments] ${vic-machine}=bin/vic-machine-linux ${rootpw}=%{TEST_PASSWORD} ${target}=%{TEST_URL}%{TEST_DATACENTER} ${password}=%{TEST_PASSWORD} ${thumbprint}=%{TEST_THUMBPRINT} ${name}=%{VCH-NAME} ${user}=%{TEST_USERNAME} ${resource}=%{TEST_RESOURCE} Log To Console \nEnable SSH on vch... ${rc} ${output}= Run And Return Rc And Output ${vic-machine} debug --rootpw ${rootpw} --target ${target} --password ${password} --thumbprint ${thumbprint} --name ${name} --user ${user} --compute-resource ${resource} --enable-ssh + Log ${output} Should Be Equal As Integers ${rc} 0 Should Contain ${output} Completed successfully diff --git a/tests/resources/dynamic-vars.py b/tests/resources/dynamic-vars.py index 2211dd5f8f..59f3046a18 100644 --- a/tests/resources/dynamic-vars.py +++ b/tests/resources/dynamic-vars.py @@ -31,7 +31,7 @@ def getEnvironment(): return TestEnvironment.LOCAL def getName(image): - return {TestEnvironment.DRONE: 'harbor.ci.drone.local/library/{}'.format(image), + return {TestEnvironment.DRONE: 'wdc-harbor-ci.eng.vmware.com/default-project/{}'.format(image), TestEnvironment.LONGEVITY: 'vic-executor1.vcna.io/library/{}'.format(image), TestEnvironment.LOCAL: image}[getEnvironment()] diff --git a/tests/resources/nimbus-testbeds/vic-simple-cluster.rb b/tests/resources/nimbus-testbeds/vic-simple-cluster.rb new file mode 100644 index 0000000000..cf649b18fc --- /dev/null +++ b/tests/resources/nimbus-testbeds/vic-simple-cluster.rb @@ -0,0 +1,116 @@ +oneGB = 1 * 1000 * 1000 # in KB + +$testbed = Proc.new do + { + "name" => "vic-simple-cluster", + "version" => 3, + "esx" => (0..2).map do | idx | + { + "name" => "esx.#{idx}", + "vc" => "vc.0", + "style" => "fullInstall", + "desiredPassword" => "e2eFunctionalTest", + "disks" => [ 30 * oneGB, 30 * oneGB, 30 * oneGB], + "nics" => 2, + "mountNfs" => ["nfs.0"], + "clusterName" => "cls", + } + end, + + "nfs" => [ + { + "name" => "nfs.0", + "type" => "NFS41" + } + ], + + "vcs" => [ + { + "name" => "vc.0", + "type" => "vcva", + "dcName" => "dc1", + "clusters" => [{"name" => "cls", "vsan" => false, "enableDrs" => true, "enableHA" => true}], + "addHosts" => "allInSameCluster", + } + ], + + "postBoot" => Proc.new do |runId, testbedSpec, vmList, catApi, logDir| + esxList = vmList['esx'] + esxList.each do |host| + host.ssh do |ssh| + ssh.exec!("esxcli network firewall set -e false") + end + end + vc = vmList['vc'][0] + vim = VIM.connect vc.rbvmomiConnectSpec + datacenters = vim.serviceInstance.content.rootFolder.childEntity.grep(RbVmomi::VIM::Datacenter) + raise "Couldn't find a Datacenter precreated" if datacenters.length == 0 + datacenter = datacenters.first + Log.info "Found a datacenter successfully in the system, name: #{datacenter.name}" + clusters = datacenter.hostFolder.children + raise "Couldn't find a cluster precreated" if clusters.length == 0 + cluster = clusters.first + Log.info "Found a cluster successfully in the system, name: #{cluster.name}" + + dvs = datacenter.networkFolder.CreateDVS_Task( + :spec => { + :configSpec => { + :name => "test-ds" + }, + } + ).wait_for_completion + Log.info "Vds DSwitch created" + + dvpg1 = dvs.AddDVPortgroup_Task( + :spec => [ + { + :name => "management", + :type => :earlyBinding, + :numPorts => 12, + } + ] + ).wait_for_completion + Log.info "management DPG created" + + dvpg2 = dvs.AddDVPortgroup_Task( + :spec => [ + { + :name => "vm-network", + :type => :earlyBinding, + :numPorts => 12, + } + ] + ).wait_for_completion + Log.info "vm-network DPG created" + + dvpg3 = dvs.AddDVPortgroup_Task( + :spec => [ + { + :name => "bridge", + :type => :earlyBinding, + :numPorts => 12, + } + ] + ).wait_for_completion + Log.info "bridge DPG created" + + Log.info "Add hosts to the DVS" + onecluster_pnic_spec = [ VIM::DistributedVirtualSwitchHostMemberPnicSpec({:pnicDevice => 'vmnic1'}) ] + dvs_config = VIM::DVSConfigSpec({ + :configVersion => dvs.config.configVersion, + :host => cluster.host.map do |host| + { + :operation => :add, + :host => host, + :backing => VIM::DistributedVirtualSwitchHostMemberPnicBacking({ + :pnicSpec => onecluster_pnic_spec + }) + } + end + }) + dvs.ReconfigureDvs_Task(:spec => dvs_config).wait_for_completion + Log.info "Hosts added to DVS successfully" + end + } +end + diff --git a/tests/test-cases/Group0-Bugs/7137.robot b/tests/test-cases/Group0-Bugs/7137.robot index c5aad14bd6..5759db0db6 100644 --- a/tests/test-cases/Group0-Bugs/7137.robot +++ b/tests/test-cases/Group0-Bugs/7137.robot @@ -33,7 +33,7 @@ Check for die events when forcing update via state refresh Should Be Equal As Integers ${rc} 0 # tight loop on inspect - this will force an inline state refresh - Run end=$(($(date +%s) + 6));while [ $(date +%s) -lt $end ]; do docker %{VCH-PARAMS} inspect ${id} >/dev/null; done + Run end=$(($(date +%s) + 24));while [ $(date +%s) -lt $end ]; do docker %{VCH-PARAMS} inspect ${id} >/dev/null; done ${rc} ${until}= Run And Return Rc And Output docker %{VCH-PARAMS} info --format '{{json .SystemTime}}' Should Be Equal As Integers ${rc} 0 diff --git a/tests/test-cases/Group1-Docker-Commands/1-02-Docker-Pull.robot b/tests/test-cases/Group1-Docker-Commands/1-02-Docker-Pull.robot index b61ca23f54..a3036d767c 100644 --- a/tests/test-cases/Group1-Docker-Commands/1-02-Docker-Pull.robot +++ b/tests/test-cases/Group1-Docker-Commands/1-02-Docker-Pull.robot @@ -89,7 +89,7 @@ Pull the same image concurrently \ Log ${res.stdout} \ Log ${res.stderr} \ Should Be Equal As Integers ${res.rc} 0 - \ Should Contain ${res.stdout} Downloaded newer image for library/redis:latest + \ Should Contain ${res.stdout} Downloaded newer image for default-project/redis:latest Pull two images that share layers concurrently ${pid1}= Start Process docker %{VCH-PARAMS} pull golang:1.7 shell=True diff --git a/tests/test-cases/Group1-Docker-Commands/1-04-Docker-Create.robot b/tests/test-cases/Group1-Docker-Commands/1-04-Docker-Create.robot index 4270ea3456..84a3932cc0 100644 --- a/tests/test-cases/Group1-Docker-Commands/1-04-Docker-Create.robot +++ b/tests/test-cases/Group1-Docker-Commands/1-04-Docker-Create.robot @@ -46,18 +46,20 @@ Create with anonymous volume Should Not Contain ${output} Error Create with named volume + Run docker %{VCH-PARAMS} volume rm test-named-vol ${disk-size}= Run docker %{VCH-PARAMS} logs $(docker %{VCH-PARAMS} start $(docker %{VCH-PARAMS} create -v test-named-vol:/testdir ${busybox} /bin/df -Ph) && sleep 10) | grep by-label | awk '{print $2}' Should Contain ${disk-size} 975.9M Create with a directory as a volume ${rc} ${output}= Run And Return Rc And Output docker %{VCH-PARAMS} create -v /dir:/dir ${busybox} Should Be Equal As Integers ${rc} 1 - Should Contain ${output} Error response from daemon: Bad request error from portlayer: vSphere Integrated Containers does not support mounting directories as a data volume. + Should Contain ${output} Error response from daemon: Bad request error from portlayer: mounting directories as a data volume is not supported. Create with complex volume topology - overriding image volume with named volume # Verify that only anonymous volumes are removed when superseding an image volume with a named volume ${suffix}= Evaluate '%{DRONE_BUILD_NUMBER}-' + str(random.randint(1000,9999)) modules=random Set Test Variable ${namedImageVol} non-anonymous-image-volume-${suffix} + ${rc} ${output}= Run And Return Rc And Output docker %{VCH-PARAMS} volume rm ${namedImageVol} ${rc} ${output}= Run And Return Rc And Output docker %{VCH-PARAMS} volume create --name ${namedImageVol} Should Be Equal As Integers ${rc} 0 Set Test Variable ${imageVolumeContainer} I-Have-Two-Anonymous-Volumes-${suffix} @@ -176,7 +178,10 @@ Create a container with custom amount of memory in Bytes Should Contain ${output} 2048MB Create a container using rest api call without HostConfig in the form data - ${output}= Run curl -sk --cert %{DOCKER_CERT_PATH}/cert.pem --key %{DOCKER_CERT_PATH}/key.pem -H "Content-Type: application/json" -d '{"Image": "${busybox}", "Cmd": ["ping", "127.0.0.1"], "NetworkMode": "bridge"}' https://%{VCH-IP}:2376/containers/create + ${status}= Run Keyword And Return Status Environment Variable Should Be Set DOCKER_CERT_PATH + ${certs}= Set Variable If ${status} --cert %{DOCKER_CERT_PATH}/cert.pem --key %{DOCKER_CERT_PATH}/key.pem ${EMPTY} + + ${output}= Run curl -sk ${certs} -H "Content-Type: application/json" -d '{"Image": "${busybox}", "Cmd": ["ping", "127.0.0.1"], "NetworkMode": "bridge"}' https://%{VCH-IP}:%{VCH-PORT}/containers/create Log ${output} Should contain ${output} "Warnings":null diff --git a/tests/test-cases/Group1-Docker-Commands/1-12-Docker-RMI.robot b/tests/test-cases/Group1-Docker-Commands/1-12-Docker-RMI.robot index 9100409406..70c6d383d0 100644 --- a/tests/test-cases/Group1-Docker-Commands/1-12-Docker-RMI.robot +++ b/tests/test-cases/Group1-Docker-Commands/1-12-Docker-RMI.robot @@ -29,7 +29,7 @@ Basic docker pull, restart, and remove image # Gather logs before rebooting Run Keyword And Continue On Failure Gather Logs From Test Server -before-reboot-1 Reboot VM %{VCH-NAME} - Wait For VCH Initialization 20x 5 seconds + Wait For VCH Initialization 30x 10 seconds ${rc} ${output}= Run And Return Rc And Output docker %{VCH-PARAMS} images Should Be Equal As Integers ${rc} 0 @@ -93,7 +93,7 @@ Remove images by short and long ID after VCH restart # Gather logs before rebooting Run Keyword And Continue On Failure Gather Logs From Test Server -before-reboot-2 Reboot VM %{VCH-NAME} - Wait For VCH Initialization 20x 5 seconds + Wait For VCH Initialization 30x 10 seconds # Remove image by short ID ${rc} ${busybox-shortID}= Run And Return Rc And Output docker %{VCH-PARAMS} images -q ${busybox} diff --git a/tests/test-cases/Group1-Docker-Commands/1-19-Docker-Volume-Create.robot b/tests/test-cases/Group1-Docker-Commands/1-19-Docker-Volume-Create.robot index d4d28157d2..f9b34a1223 100644 --- a/tests/test-cases/Group1-Docker-Commands/1-19-Docker-Volume-Create.robot +++ b/tests/test-cases/Group1-Docker-Commands/1-19-Docker-Volume-Create.robot @@ -177,32 +177,34 @@ Docker volume create with possibly invalid name Should Be Equal As Strings ${output} Error response from daemon: volume name "test???" includes invalid characters, only "[a-zA-Z0-9][a-zA-Z0-9_.-]" are allowed Docker volume verify anonymous volume contains base image files - ${rc} ${output}= Run And Return Rc And Output docker %{VCH-PARAMS} run --name verify-anon-1 jakedsouza/group-1-19-docker-verify-volume-files:1.0 ls /etc/example - Should Be Equal As Integers ${rc} 0 - Should Contain ${output} thisshouldexist - Should Contain ${output} testfile.txt - - ${rc} ${output}= Run And Return Rc And Output docker %{VCH-PARAMS} run --name verify-anon-2 jakedsouza/group-1-19-docker-verify-volume-files:1.0 cat /etc/example/testfile.txt - Should Be Equal As Integers ${rc} 0 - Should Contain ${output} TestFile - -Docker volume verify named volume contains base image files - ${rc} ${output}= Run And Return Rc And Output docker %{VCH-PARAMS} run --name verify-named-1 -v test15:/etc/example jakedsouza/group-1-19-docker-verify-volume-files:1.0 cat /etc/example/testfile.txt - Should Be Equal As Integers ${rc} 0 - Should Contain ${output} TestFile + ${status}= Get State Of Github Issue 7365 + Run Keyword If '${status}' == 'closed' Fail Test 1-19-Docker-Volume-Create.robot needs to be updated now that Issue #7365 has been resolved +# ${rc} ${output}= Run And Return Rc And Output docker %{VCH-PARAMS} run --name verify-anon-1 jakedsouza/group-1-19-docker-verify-volume-files:1.0 ls /etc/example +# Should Be Equal As Integers ${rc} 0 +# Should Contain ${output} thisshouldexist +# Should Contain ${output} testfile.txt + +# ${rc} ${output}= Run And Return Rc And Output docker %{VCH-PARAMS} run --name verify-anon-2 jakedsouza/group-1-19-docker-verify-volume-files:1.0 cat /etc/example/testfile.txt +# Should Be Equal As Integers ${rc} 0 +# Should Contain ${output} TestFile + +#Docker volume verify named volume contains base image files +# ${rc} ${output}= Run And Return Rc And Output docker %{VCH-PARAMS} run --name verify-named-1 -v test15:/etc/example jakedsouza/group-1-19-docker-verify-volume-files:1.0 cat /etc/example/testfile.txt +# Should Be Equal As Integers ${rc} 0 +# Should Contain ${output} TestFile # Verify file is copied to volumeA - ${rc} ${output}= Run And Return Rc And Output docker %{VCH-PARAMS} run --name verify-named-2 -v test15:/mnt/test15 jakedsouza/group-1-19-docker-verify-volume-files:1.0 cat /mnt/test15/testfile.txt - Should Be Equal As Integers ${rc} 0 - Should Contain ${output} TestFile +# ${rc} ${output}= Run And Return Rc And Output docker %{VCH-PARAMS} run --name verify-named-2 -v test15:/mnt/test15 jakedsouza/group-1-19-docker-verify-volume-files:1.0 cat /mnt/test15/testfile.txt +# Should Be Equal As Integers ${rc} 0 +# Should Contain ${output} TestFile -Docker volume verify files are not copied again in a non empty volume - ${rc} ${output}= Run And Return Rc And Output docker %{VCH-PARAMS} run -v test16:/etc/example jakedsouza/group-1-19-docker-verify-volume-files:1.0 sh -c "echo test16modified >> /etc/example/testfile.txt" - Should Be Equal As Integers ${rc} 0 +#Docker volume verify files are not copied again in a non empty volume +# ${rc} ${output}= Run And Return Rc And Output docker %{VCH-PARAMS} run -v test16:/etc/example jakedsouza/group-1-19-docker-verify-volume-files:1.0 sh -c "echo test16modified >> /etc/example/testfile.txt" +# Should Be Equal As Integers ${rc} 0 # Verify modified file remains - ${rc} ${output}= Run And Return Rc And Output docker %{VCH-PARAMS} run -v test16:/etc/example jakedsouza/group-1-19-docker-verify-volume-files:1.0 cat /etc/example/testfile.txt - Should Be Equal As Integers ${rc} 0 - Should Contain ${output} test16modified +# ${rc} ${output}= Run And Return Rc And Output docker %{VCH-PARAMS} run -v test16:/etc/example jakedsouza/group-1-19-docker-verify-volume-files:1.0 cat /etc/example/testfile.txt +# Should Be Equal As Integers ${rc} 0 +# Should Contain ${output} test16modified Docker volume conflict in new container ${rc} ${output}= Run And Return Rc And Output docker %{VCH-PARAMS} volume create diff --git a/tests/test-cases/Group1-Docker-Commands/1-23-Docker-Inspect.robot b/tests/test-cases/Group1-Docker-Commands/1-23-Docker-Inspect.robot index ee5184b1ee..19de785c4a 100644 --- a/tests/test-cases/Group1-Docker-Commands/1-23-Docker-Inspect.robot +++ b/tests/test-cases/Group1-Docker-Commands/1-23-Docker-Inspect.robot @@ -45,7 +45,7 @@ Docker inspect image specifying incorrect type ${rc} ${output}= Run And Return Rc And Output docker %{VCH-PARAMS} inspect --type=container ${busybox} Should Be Equal As Integers ${rc} 1 ${out}= Run Keyword If '${busybox}' == 'busybox' Should Contain ${output} Error: No such container: busybox - ${out}= Run Keyword Unless '${busybox}' == 'busybox' Should Contain ${output} Error: No such container: harbor.ci.drone.local/library/busybox + ${out}= Run Keyword Unless '${busybox}' == 'busybox' Should Contain ${output} Error: No such container: wdc-harbor-ci.eng.vmware.com/default-project/busybox Simple docker inspect of container ${rc} ${container}= Run And Return Rc And Output docker %{VCH-PARAMS} create ${busybox} @@ -154,4 +154,4 @@ Docker inspect container status Should Be Equal As Integers ${rc} 0 # keyword at top of file ${stopped}= Get container inspect status ${container} - Should Contain ${stopped} exited \ No newline at end of file + Should Contain ${stopped} exited diff --git a/tests/test-cases/Group1-Docker-Commands/1-28-Docker-Secret.robot b/tests/test-cases/Group1-Docker-Commands/1-28-Docker-Secret.robot index 130f6fecc2..0060c7ab61 100644 --- a/tests/test-cases/Group1-Docker-Commands/1-28-Docker-Secret.robot +++ b/tests/test-cases/Group1-Docker-Commands/1-28-Docker-Secret.robot @@ -26,21 +26,21 @@ ${fake-secret} test Docker secret ls ${rc} ${output}= Run And Return Rc And Output docker1.13 %{VCH-PARAMS} secret ls Should Be Equal As Integers ${rc} 1 - Should Contain ${output} vSphere Integrated Containers does not yet support Docker Swarm + Should Contain ${output} Docker Swarm is not yet supported Docker secret create Run echo '${fake-secret}' > secret.file ${rc} ${output}= Run And Return Rc And Output docker1.13 %{VCH-PARAMS} secret create mysecret ./secret.file Should Be Equal As Integers ${rc} 1 - Should Contain ${output} vSphere Integrated Containers does not yet support Docker Swarm + Should Contain ${output} Docker Swarm is not yet supported Docker secret inspect ${rc} ${output}= Run And Return Rc And Output docker1.13 %{VCH-PARAMS} secret inspect my_secret Should Be Equal As Integers ${rc} 1 - Should Contain ${output} vSphere Integrated Containers does not yet support Docker Swarm + Should Contain ${output} Docker Swarm is not yet supported Docker secret rm ${rc} ${output}= Run And Return Rc And Output docker1.13 %{VCH-PARAMS} secret rm my_secret Should Be Equal As Integers ${rc} 1 - Should Contain ${output} vSphere Integrated Containers does not yet support Docker Swarm + Should Contain ${output} Docker Swarm is not yet supported diff --git a/tests/test-cases/Group1-Docker-Commands/1-31-Docker-Node.md b/tests/test-cases/Group1-Docker-Commands/1-31-Docker-Node.md index f149d5a74f..d8f02e7e8c 100644 --- a/tests/test-cases/Group1-Docker-Commands/1-31-Docker-Node.md +++ b/tests/test-cases/Group1-Docker-Commands/1-31-Docker-Node.md @@ -21,7 +21,7 @@ This test requires that a vSphere server is running and available 8. Issue docker node inspect # Expected Outcome: -* Step 2-6 should result in an error that contains does not yet support Docker Swarm +* Step 2-6 should result in an error that contains Docker Swarm is not yet supported * Step 7-8 should result in an error that contains No such node # Possible Problems: diff --git a/tests/test-cases/Group1-Docker-Commands/1-31-Docker-Node.robot b/tests/test-cases/Group1-Docker-Commands/1-31-Docker-Node.robot index b64d1fe59e..2480f42bc1 100644 --- a/tests/test-cases/Group1-Docker-Commands/1-31-Docker-Node.robot +++ b/tests/test-cases/Group1-Docker-Commands/1-31-Docker-Node.robot @@ -28,7 +28,7 @@ Docker node demote Docker node ls ${rc} ${output}= Run And Return Rc And Output docker1.13 %{VCH-PARAMS} node ls Should Be Equal As Integers ${rc} 1 - Should Contain ${output} does not yet support Docker Swarm + Should Contain ${output} Docker Swarm is not yet supported Docker node promote ${rc} ${output}= Run And Return Rc And Output docker1.13 %{VCH-PARAMS} node promote self @@ -38,7 +38,7 @@ Docker node promote Docker node rm ${rc} ${output}= Run And Return Rc And Output docker1.13 %{VCH-PARAMS} node rm self Should Be Equal As Integers ${rc} 1 - Should Contain ${output} does not yet support Docker Swarm + Should Contain ${output} Docker Swarm is not yet supported Docker node update ${rc} ${output}= Run And Return Rc And Output docker1.13 %{VCH-PARAMS} node update self diff --git a/tests/test-cases/Group1-Docker-Commands/1-32-Docker-Plugin.robot b/tests/test-cases/Group1-Docker-Commands/1-32-Docker-Plugin.robot index 3f70d9af8d..3367c0fdb9 100644 --- a/tests/test-cases/Group1-Docker-Commands/1-32-Docker-Plugin.robot +++ b/tests/test-cases/Group1-Docker-Commands/1-32-Docker-Plugin.robot @@ -23,23 +23,23 @@ Test Timeout 20 minutes Docker plugin install ${rc} ${output}= Run And Return Rc And Output docker1.13 %{VCH-PARAMS} plugin install vieux/sshfs Should Be Equal As Integers ${rc} 1 - Should Contain ${output} does not yet support plugins + Should Contain ${output} does not yet implement plugins Docker plugin create Run echo '{}' > config.json ${rc} ${output}= Run And Return Rc And Output docker1.13 %{VCH-PARAMS} plugin create test-plugin . Should Be Equal As Integers ${rc} 1 - Should Contain ${output} does not yet support plugins + Should Contain ${output} does not yet implement plugins Docker plugin enable ${rc} ${output}= Run And Return Rc And Output docker1.13 %{VCH-PARAMS} plugin enable test-plugin Should Be Equal As Integers ${rc} 1 - Should Contain ${output} does not yet support plugins + Should Contain ${output} does not yet implement plugins Docker plugin disable ${rc} ${output}= Run And Return Rc And Output docker1.13 %{VCH-PARAMS} plugin disable test-plugin Should Be Equal As Integers ${rc} 1 - Should Contain ${output} does not yet support plugins + Should Contain ${output} does not yet implement plugins Docker plugin inspect ${status}= Get State Of Github Issue 4464 @@ -48,19 +48,19 @@ Docker plugin inspect Docker plugin ls ${rc} ${output}= Run And Return Rc And Output docker1.13 %{VCH-PARAMS} plugin ls Should Be Equal As Integers ${rc} 1 - Should Contain ${output} does not yet support plugins + Should Contain ${output} does not yet implement plugins Docker plugin push ${rc} ${output}= Run And Return Rc And Output docker1.13 %{VCH-PARAMS} plugin push test-plugin Should Be Equal As Integers ${rc} 1 - Should Contain ${output} does not yet support plugins + Should Contain ${output} does not yet implement plugins Docker plugin rm ${rc} ${output}= Run And Return Rc And Output docker1.13 %{VCH-PARAMS} plugin rm test-plugin Should Be Equal As Integers ${rc} 1 - Should Contain ${output} does not yet support plugins + Should Contain ${output} does not yet implement plugins Docker plugin set ${rc} ${output}= Run And Return Rc And Output docker1.13 %{VCH-PARAMS} plugin set test-plugin test-data Should Be Equal As Integers ${rc} 1 - Should Contain ${output} does not yet support plugins + Should Contain ${output} does not yet implement plugins diff --git a/tests/test-cases/Group1-Docker-Commands/1-33-Docker-Service.md b/tests/test-cases/Group1-Docker-Commands/1-33-Docker-Service.md index a4106dc25c..93fd88f2b7 100644 --- a/tests/test-cases/Group1-Docker-Commands/1-33-Docker-Service.md +++ b/tests/test-cases/Group1-Docker-Commands/1-33-Docker-Service.md @@ -22,7 +22,7 @@ This test requires that a vSphere server is running and available 9. Issue docker service logs # Expected Outcome: -* Step 2-8 should result in an error that contains does not yet support Docker Swarm +* Step 2-8 should result in an error that contains Docker Swarm is not yet supported * Step 9 should result in an error that contains only supported with experimental daemon # Possible Problems: diff --git a/tests/test-cases/Group1-Docker-Commands/1-33-Docker-Service.robot b/tests/test-cases/Group1-Docker-Commands/1-33-Docker-Service.robot index 10a9fc467c..daeac81267 100644 --- a/tests/test-cases/Group1-Docker-Commands/1-33-Docker-Service.robot +++ b/tests/test-cases/Group1-Docker-Commands/1-33-Docker-Service.robot @@ -23,12 +23,12 @@ Test Timeout 20 minutes Docker service create ${rc} ${output}= Run And Return Rc And Output docker1.13 %{VCH-PARAMS} service create test-service Should Be Equal As Integers ${rc} 1 - Should Contain ${output} does not yet support Docker Swarm + Should Contain ${output} Docker Swarm is not yet supported Docker service ls ${rc} ${output}= Run And Return Rc And Output docker1.13 %{VCH-PARAMS} service ls Should Be Equal As Integers ${rc} 1 - Should Contain ${output} does not yet support Docker Swarm + Should Contain ${output} Docker Swarm is not yet supported Docker service ps ${rc} ${output}= Run And Return Rc And Output docker1.13 %{VCH-PARAMS} service ps test-service @@ -38,7 +38,7 @@ Docker service ps Docker serivce rm ${rc} ${output}= Run And Return Rc And Output docker1.13 %{VCH-PARAMS} service rm test-service Should Be Equal As Integers ${rc} 1 - Should Contain ${output} does not yet support Docker Swarm + Should Contain ${output} Docker Swarm is not yet supported Docker service scale ${rc} ${output}= Run And Return Rc And Output docker1.13 %{VCH-PARAMS} service scale test-service=3 diff --git a/tests/test-cases/Group1-Docker-Commands/1-34-Docker-Stack.md b/tests/test-cases/Group1-Docker-Commands/1-34-Docker-Stack.md index 63d983683f..e8a7d091d2 100644 --- a/tests/test-cases/Group1-Docker-Commands/1-34-Docker-Stack.md +++ b/tests/test-cases/Group1-Docker-Commands/1-34-Docker-Stack.md @@ -19,7 +19,7 @@ This test requires that a vSphere server is running and available 6. Issue docker stack services # Expected Outcome: -* Step 2-6 should result in an error that contains does not yet support Docker Swarm +* Step 2-6 should result in an error that contains Docker Swarm is not yet supported # Possible Problems: None \ No newline at end of file diff --git a/tests/test-cases/Group1-Docker-Commands/1-34-Docker-Stack.robot b/tests/test-cases/Group1-Docker-Commands/1-34-Docker-Stack.robot index 72bdf9a698..a09e144257 100644 --- a/tests/test-cases/Group1-Docker-Commands/1-34-Docker-Stack.robot +++ b/tests/test-cases/Group1-Docker-Commands/1-34-Docker-Stack.robot @@ -25,24 +25,24 @@ Test Timeout 20 minutes #Should Be Equal As Integers ${rc} 0 #${rc} ${output}= Run And Return Rc And Output docker1.13 %{VCH-PARAMS} stack deploy -c #./docker-compose-stack.yml proxy #Should Be Equal As Integers ${rc} 1 - #Should Contain ${output} does not yet support Docker Swarm + #Should Contain ${output} Docker Swarm is not yet supported Docker stack ls ${rc} ${output}= Run And Return Rc And Output docker1.13 %{VCH-PARAMS} stack ls Should Be Equal As Integers ${rc} 1 - Should Contain ${output} does not yet support Docker Swarm + Should Contain ${output} Docker Swarm is not yet supported Docker stack ps ${rc} ${output}= Run And Return Rc And Output docker1.13 %{VCH-PARAMS} stack ps test-stack Should Be Equal As Integers ${rc} 1 - Should Contain ${output} does not yet support Docker Swarm + Should Contain ${output} Docker Swarm is not yet supported Docker stack rm ${rc} ${output}= Run And Return Rc And Output docker1.13 %{VCH-PARAMS} stack rm test-stack Should Be Equal As Integers ${rc} 1 - Should Contain ${output} does not yet support Docker Swarm + Should Contain ${output} Docker Swarm is not yet supported Docker stack services ${rc} ${output}= Run And Return Rc And Output docker1.13 %{VCH-PARAMS} stack services test-stack Should Be Equal As Integers ${rc} 1 - Should Contain ${output} does not yet support Docker Swarm + Should Contain ${output} Docker Swarm is not yet supported diff --git a/tests/test-cases/Group1-Docker-Commands/1-35-Docker-Swarm.md b/tests/test-cases/Group1-Docker-Commands/1-35-Docker-Swarm.md index 85f6ca8bf3..e471307031 100644 --- a/tests/test-cases/Group1-Docker-Commands/1-35-Docker-Swarm.md +++ b/tests/test-cases/Group1-Docker-Commands/1-35-Docker-Swarm.md @@ -21,7 +21,7 @@ This test requires that a vSphere server is running and available 8. Issue docker swarm update # Expected Outcome: -* Step 2-8 should result in an error that contains does not yet support Docker Swarm +* Step 2-8 should result in an error that contains Docker Swarm is not yet supported # Possible Problems: None \ No newline at end of file diff --git a/tests/test-cases/Group1-Docker-Commands/1-35-Docker-Swarm.robot b/tests/test-cases/Group1-Docker-Commands/1-35-Docker-Swarm.robot index aa4f20c8de..3923ef3990 100644 --- a/tests/test-cases/Group1-Docker-Commands/1-35-Docker-Swarm.robot +++ b/tests/test-cases/Group1-Docker-Commands/1-35-Docker-Swarm.robot @@ -23,33 +23,33 @@ Test Timeout 20 minutes Docker swarm init ${rc} ${output}= Run And Return Rc And Output docker1.13 %{VCH-PARAMS} swarm init Should Be Equal As Integers ${rc} 1 - Should Contain ${output} does not yet support Docker Swarm + Should Contain ${output} Docker Swarm is not yet supported Docker swarm join ${rc} ${output}= Run And Return Rc And Output docker1.13 %{VCH-PARAMS} swarm join 127.0.0.1:2375 Should Be Equal As Integers ${rc} 1 - Should Contain ${output} does not yet support Docker Swarm + Should Contain ${output} Docker Swarm is not yet supported Docker swarm join-token ${rc} ${output}= Run And Return Rc And Output docker1.13 %{VCH-PARAMS} swarm join-token worker Should Be Equal As Integers ${rc} 1 - Should Contain ${output} does not yet support Docker Swarm + Should Contain ${output} Docker Swarm is not yet supported ${rc} ${output}= Run And Return Rc And Output docker1.13 %{VCH-PARAMS} swarm join-token manager Should Be Equal As Integers ${rc} 1 - Should Contain ${output} does not yet support Docker Swarm + Should Contain ${output} Docker Swarm is not yet supported Docker swarm leave ${rc} ${output}= Run And Return Rc And Output docker1.13 %{VCH-PARAMS} swarm leave Should Be Equal As Integers ${rc} 1 - Should Contain ${output} does not yet support Docker Swarm + Should Contain ${output} Docker Swarm is not yet supported Docker swarm unlock-key ${rc} ${output}= Run And Return Rc And Output docker1.13 %{VCH-PARAMS} swarm unlock-key Should Be Equal As Integers ${rc} 1 - Should Contain ${output} does not yet support Docker Swarm + Should Contain ${output} Docker Swarm is not yet supported Docker swarm update ${rc} ${output}= Run And Return Rc And Output docker1.13 %{VCH-PARAMS} swarm update --autolock Should Be Equal As Integers ${rc} 1 - Should Contain ${output} does not yet support Docker Swarm + Should Contain ${output} Docker Swarm is not yet supported diff --git a/tests/test-cases/Group1-Docker-Commands/1-39-Docker-Stats.robot b/tests/test-cases/Group1-Docker-Commands/1-39-Docker-Stats.robot index a8cc823cbe..90d005488b 100644 --- a/tests/test-cases/Group1-Docker-Commands/1-39-Docker-Stats.robot +++ b/tests/test-cases/Group1-Docker-Commands/1-39-Docker-Stats.robot @@ -88,7 +88,9 @@ Stats No Stream All Containers Should Contain ${output} ${stop} Stats API Memory Validation - ${rc} ${apiMem}= Run And Return Rc And Output curl -sk --cert %{DOCKER_CERT_PATH}/cert.pem --key %{DOCKER_CERT_PATH}/key.pem -H "Accept: application/json" -H "Content-Type: application/json" -X GET https://%{VCH-IP}:%{VCH-PORT}/containers/%{STRESSED}/stats?stream=false | jq -r .memory_stats.usage + ${status}= Run Keyword And Return Status Environment Variable Should Be Set DOCKER_CERT_PATH + ${certs}= Set Variable If ${status} --cert %{DOCKER_CERT_PATH}/cert.pem --key %{DOCKER_CERT_PATH}/key.pem ${EMPTY} + ${rc} ${apiMem}= Run And Return Rc And Output curl -sk ${certs} -H "Accept: application/json" -H "Content-Type: application/json" -X GET https://%{VCH-IP}:%{VCH-PORT}/containers/%{STRESSED}/stats?stream=false | jq -r .memory_stats.usage Should Be Equal As Integers ${rc} 0 ${stress}= Get Container ShortID %{STRESSED} ${vmomiMemory}= Get Average Active Memory %{VM-PATH} @@ -99,7 +101,9 @@ Stats API Memory Validation Should Be True ${diff} < 1000 Stats API CPU Validation - ${rc} ${apiCPU}= Run And Return Rc And Output curl -sk --cert %{DOCKER_CERT_PATH}/cert.pem --key %{DOCKER_CERT_PATH}/key.pem -H "Accept: application/json" -H "Content-Type: application/json" -X GET https://%{VCH-IP}:%{VCH-PORT}/containers/%{STRESSED}/stats?stream=false + ${status}= Run Keyword And Return Status Environment Variable Should Be Set DOCKER_CERT_PATH + ${certs}= Set Variable If ${status} --cert %{DOCKER_CERT_PATH}/cert.pem --key %{DOCKER_CERT_PATH}/key.pem ${EMPTY} + ${rc} ${apiCPU}= Run And Return Rc And Output curl -sk ${certs} -H "Accept: application/json" -H "Content-Type: application/json" -X GET https://%{VCH-IP}:%{VCH-PORT}/containers/%{STRESSED}/stats?stream=false Should Be Equal As Integers ${rc} 0 Should Contain ${apiCPU} cpu_stats Should Contain ${apiCPU} cpu_usage @@ -117,7 +121,9 @@ Stats No Stream Specific Stopped Container Should Contain ${output} ${stop} Stats API Disk and Network Validation - ${rc} ${api}= Run And Return Rc And Output curl -sk --cert %{DOCKER_CERT_PATH}/cert.pem --key %{DOCKER_CERT_PATH}/key.pem -H "Accept: application/json" -H "Content-Type: application/json" -X GET https://%{VCH-IP}:%{VCH-PORT}/containers/%{STRESSED}/stats?stream=false + ${status}= Run Keyword And Return Status Environment Variable Should Be Set DOCKER_CERT_PATH + ${certs}= Set Variable If ${status} --cert %{DOCKER_CERT_PATH}/cert.pem --key %{DOCKER_CERT_PATH}/key.pem ${EMPTY} + ${rc} ${api}= Run And Return Rc And Output curl -sk ${certs} -H "Accept: application/json" -H "Content-Type: application/json" -X GET https://%{VCH-IP}:%{VCH-PORT}/containers/%{STRESSED}/stats?stream=false Should Be Equal As Integers ${rc} 0 Should Contain ${api} ethernet Should Contain ${api} Read diff --git a/tests/test-cases/Group1-Docker-Commands/1-41-Docker-Commit.robot b/tests/test-cases/Group1-Docker-Commands/1-41-Docker-Commit.robot index 5e1ba40853..b52d105f5b 100644 --- a/tests/test-cases/Group1-Docker-Commands/1-41-Docker-Commit.robot +++ b/tests/test-cases/Group1-Docker-Commands/1-41-Docker-Commit.robot @@ -33,7 +33,7 @@ Commit nano to image Should Be Equal As Integers ${rc} 0 ${rc} ${output}= Run And Return Rc And Output docker %{VCH-PARAMS} run debian-nano whereis nano Should Be Equal As Integers ${rc} 0 - Should Contain ${output} /usr/bin/nano + Should Contain ${output} /bin/nano Commit env variable to image ${rc} ${output}= Run And Return Rc And Output docker %{VCH-PARAMS} run -d --name commit2 ${debian} tail -f /dev/null diff --git a/tests/test-cases/Group10-VCH-Restart/10-01-VCH-Restart.robot b/tests/test-cases/Group10-VCH-Restart/10-01-VCH-Restart.robot index 63fd345e2f..ee0e51be16 100644 --- a/tests/test-cases/Group10-VCH-Restart/10-01-VCH-Restart.robot +++ b/tests/test-cases/Group10-VCH-Restart/10-01-VCH-Restart.robot @@ -155,6 +155,8 @@ Container on Open Network And Port Forwarding Persist After Reboot Log To Console Create Port Groups For Container network ${out}= Run Keyword If '%{HOST_TYPE}' == 'ESXi' Run govc host.portgroup.add -vswitch vSwitchLAN open-net + ${out}= Run Keyword If '%{HOST_TYPE}' == 'VC' Add VC Distributed Portgroup test-ds open-net + Log ${out} Install VIC Appliance To Test Server additional-args=--container-network=open-net --container-network-firewall=open-net:open @@ -197,6 +199,10 @@ Container on Open Network And Port Forwarding Persist After Reboot Should Not Be Equal As Integers ${rc1} 0 Should Not Be Equal As Integers ${rc2} 0 + Log To Console Cleanup Port Groups For Container network + ${out}= Run Keyword If '%{HOST_TYPE}' == 'ESXi' Run govc host.portgroup.remove open-net + ${out}= Run Keyword If '%{HOST_TYPE}' == 'VC' Remove VC Distributed Portgroup open-net + Log ${out} Create VCH attach disk and reboot ${rc}= Run And Return Rc govc vm.disk.create -vm=%{VCH-NAME} -name=%{VCH-NAME}/deleteme -size "16M" diff --git a/tests/test-cases/Group12-VCH-BC/12-01-Delete.robot b/tests/test-cases/Group12-VCH-BC/12-01-Delete.robot index 3a58fda11d..46a5dfb02c 100644 --- a/tests/test-cases/Group12-VCH-BC/12-01-Delete.robot +++ b/tests/test-cases/Group12-VCH-BC/12-01-Delete.robot @@ -55,9 +55,7 @@ Delete VCH with new vic-machine Should Contain ${ret} is different than installer version # Delete with force - ${ret}= Run bin/vic-machine-linux delete --target %{TEST_URL} --user %{TEST_USERNAME} --password=%{TEST_PASSWORD} --compute-resource=%{TEST_RESOURCE} --name %{VCH-NAME} --force - Should Contain ${ret} Completed successfully - Should Not Contain ${ret} delete failed + Run VIC Machine Delete Command # Check VM is removed ${ret}= Run govc vm.info -json=true ${containerName}-* diff --git a/tests/test-cases/Group23-VIC-Machine-Service/23-08-VCH-Delete.robot b/tests/test-cases/Group23-VIC-Machine-Service/23-08-VCH-Delete.robot index 16cd024850..80c9f55e9d 100644 --- a/tests/test-cases/Group23-VIC-Machine-Service/23-08-VCH-Delete.robot +++ b/tests/test-cases/Group23-VIC-Machine-Service/23-08-VCH-Delete.robot @@ -13,7 +13,7 @@ # limitations under the License *** Settings *** -Documentation Test 23-03 - VCH Create +Documentation Test 23-08 - VCH Delete Resource ../../resources/Util.robot Resource ../../resources/Group23-VIC-Machine-Service-Util.robot Suite Setup Start VIC Machine Server @@ -35,7 +35,7 @@ Install And Prepare VIC Appliance Pull Busybox Re-Install And Prepare VIC Appliance - Install VIC Appliance To Test Server With Current Environment Variables + Install VIC Appliance To Test Server With Current Environment Variables cleanup=${false} Pull Busybox Install And Prepare VIC Appliance With Volume Stores @@ -46,7 +46,7 @@ Install And Prepare VIC Appliance With Volume Stores Re-Install And Prepare VIC Appliance With Volume Stores Re-Install And Prepare VIC Appliance With Volume Stores - Install VIC Appliance To Test Server With Current Environment Variables additional-args=--volume-store=%{TEST_DATASTORE}/${VOLUME_STORE_PATH}:${VOLUME_STORE_NAME} + Install VIC Appliance To Test Server With Current Environment Variables additional-args=--volume-store=%{TEST_DATASTORE}/${VOLUME_STORE_PATH}:${VOLUME_STORE_NAME} cleanup=${false} Pull Busybox @@ -293,7 +293,7 @@ Delete the correct VCH ${two}= Get VCH ID %{VCH-NAME} - Should Not Be Equal As Integers ${one} ${two} + Should Not Be Equal ${one} ${two} # This will fail when run outside of drone because "Install VIC Appliance To Test Server" # will delete "dangling" VCHs - which means any associated with a drone job id that isn't running @@ -468,13 +468,6 @@ Delete VCH and delete powered on container Verify Container Not Exists ${POWERED_ON_CONTAINER_NAME} Verify Container Not Exists ${POWERED_OFF_CONTAINER_NAME} - # should this delete volume stores? - # if it should then we should check they're gone, if it shouldn't we should check they're not - # if not then we should clean up volume stores in teardown - - # No VCH to delete - [Teardown] NONE - Delete VCH and powered off containers and volumes [Setup] Install And Prepare VIC Appliance With Volume Stores ${id}= Get VCH ID %{VCH-NAME} @@ -603,7 +596,7 @@ Delete VCH and powered off container and preserve volumes Output Should Not Contain Error Verify Container Exists ${OFF_NV_NVS_CONTAINER_NAME} - [Teardown] Cleanup VIC Appliance On Test Server + [Teardown] Run Keywords Cleanup VIC Appliance On Test Server Cleanup Datastore On Test Server Delete VCH and powered on container but preserve volume @@ -644,7 +637,7 @@ Delete VCH and powered on container but preserve volume Verify Container Exists ${ON_NV_DVS_CONTAINER_NAME} - [Teardown] Cleanup VIC Appliance On Test Server + [Teardown] Run Keywords Cleanup VIC Appliance On Test Server Cleanup Datastore On Test Server Delete VCH and preserve powered on container and volumes @@ -669,7 +662,7 @@ Delete VCH and preserve powered on container and volumes Verify Volume Store Exists %{VCH-NAME}-VOL Verify Volume Exists %{VCH-NAME}-VOL ${ON_NV_DVS_VOLUME_NAME} - [Teardown] Cleanup VIC Appliance On Test Server + [Teardown] Run Keywords Cleanup VIC Appliance On Test Server Cleanup Datastore On Test Server Delete VCH and preserve powered on container and fail to delete volumes @@ -694,4 +687,4 @@ Delete VCH and preserve powered on container and fail to delete volumes Verify Volume Store Exists %{VCH-NAME}-VOL Verify Volume Exists %{VCH-NAME}-VOL ${ON_NV_DVS_VOLUME_NAME} - [Teardown] Cleanup VIC Appliance On Test Server + [Teardown] Run Keywords Cleanup VIC Appliance On Test Server Cleanup Datastore On Test Server diff --git a/tests/test-cases/Group6-VIC-Machine/6-04-Create-Basic.robot b/tests/test-cases/Group6-VIC-Machine/6-04-Create-Basic.robot index 22231807e9..05190c737c 100644 --- a/tests/test-cases/Group6-VIC-Machine/6-04-Create-Basic.robot +++ b/tests/test-cases/Group6-VIC-Machine/6-04-Create-Basic.robot @@ -49,7 +49,7 @@ Create VCH - custom base disk Run Keyword And Ignore Error Cleanup Datastore On Test Server # Deploy vic-machine with debug enabled to attempt to cache #7047 - ${output}= Run bin/vic-machine-linux create --name=%{VCH-NAME} --debug=1 --target=%{TEST_URL} --thumbprint=%{TEST_THUMBPRINT} --user=%{TEST_USERNAME} --bridge-network=%{BRIDGE_NETWORK} --public-network=%{PUBLIC_NETWORK} --image-store=%{TEST_DATASTORE} --password=%{TEST_PASSWORD} --base-image-size=6GB ${vicmachinetls} --insecure-registry harbor.ci.drone.local + ${output}= Run bin/vic-machine-linux create --name=%{VCH-NAME} --debug=1 --target=%{TEST_URL} --thumbprint=%{TEST_THUMBPRINT} --user=%{TEST_USERNAME} --bridge-network=%{BRIDGE_NETWORK} --public-network=%{PUBLIC_NETWORK} --image-store=%{TEST_DATASTORE} --password=%{TEST_PASSWORD} --base-image-size=6GB ${vicmachinetls} --insecure-registry wdc-harbor-ci.eng.vmware.com Should Contain ${output} Installer completed successfully Get Docker Params ${output} ${true} Log To Console Installer completed successfully: %{VCH-NAME} @@ -79,7 +79,7 @@ Create VCH - target URL Run Keyword And Ignore Error Cleanup Dangling VMs On Test Server Run Keyword And Ignore Error Cleanup Datastore On Test Server - ${output}= Run bin/vic-machine-linux create --name=%{VCH-NAME} --target="%{TEST_USERNAME}:%{TEST_PASSWORD}@%{TEST_URL}" --thumbprint=%{TEST_THUMBPRINT} --image-store=%{TEST_DATASTORE} --bridge-network=%{BRIDGE_NETWORK} --public-network=%{PUBLIC_NETWORK} ${vicmachinetls} --insecure-registry harbor.ci.drone.local + ${output}= Run bin/vic-machine-linux create --name=%{VCH-NAME} --target="%{TEST_USERNAME}:%{TEST_PASSWORD}@%{TEST_URL}" --thumbprint=%{TEST_THUMBPRINT} --image-store=%{TEST_DATASTORE} --bridge-network=%{BRIDGE_NETWORK} --public-network=%{PUBLIC_NETWORK} ${vicmachinetls} --insecure-registry wdc-harbor-ci.eng.vmware.com Should Contain ${output} Installer completed successfully Get Docker Params ${output} ${true} Log To Console Installer completed successfully: %{VCH-NAME} @@ -92,7 +92,7 @@ Create VCH - operations user Run Keyword And Ignore Error Cleanup Dangling VMs On Test Server Run Keyword And Ignore Error Cleanup Datastore On Test Server - ${output}= Run bin/vic-machine-linux create --name=%{VCH-NAME} --target="%{TEST_USERNAME}:%{TEST_PASSWORD}@%{TEST_URL}" --thumbprint=%{TEST_THUMBPRINT} --image-store=%{TEST_DATASTORE} --bridge-network=%{BRIDGE_NETWORK} --public-network=%{PUBLIC_NETWORK} ${vicmachinetls} --ops-user=%{TEST_USERNAME} --ops-password=%{TEST_PASSWORD} --insecure-registry harbor.ci.drone.local + ${output}= Run bin/vic-machine-linux create --name=%{VCH-NAME} --target="%{TEST_USERNAME}:%{TEST_PASSWORD}@%{TEST_URL}" --thumbprint=%{TEST_THUMBPRINT} --image-store=%{TEST_DATASTORE} --bridge-network=%{BRIDGE_NETWORK} --public-network=%{PUBLIC_NETWORK} ${vicmachinetls} --ops-user=%{TEST_USERNAME} --ops-password=%{TEST_PASSWORD} --insecure-registry wdc-harbor-ci.eng.vmware.com Should Contain ${output} Installer completed successfully Get Docker Params ${output} ${true} Log To Console Installer completed successfully: %{VCH-NAME} @@ -107,7 +107,7 @@ Create VCH - specified datacenter Run Keyword And Ignore Error Cleanup Dangling VMs On Test Server Run Keyword And Ignore Error Cleanup Datastore On Test Server - ${output}= Run bin/vic-machine-linux create --name=%{VCH-NAME} --target="%{TEST_USERNAME}:%{TEST_PASSWORD}@%{TEST_URL}" --thumbprint=%{TEST_THUMBPRINT} --image-store=%{TEST_DATASTORE} --bridge-network=%{BRIDGE_NETWORK} --public-network=%{PUBLIC_NETWORK} ${vicmachinetls} --compute-resource=%{TEST_DATACENTER} + ${output}= Run bin/vic-machine-linux create --name=%{VCH-NAME} --target="%{TEST_USERNAME}:%{TEST_PASSWORD}@%{TEST_URL}" --thumbprint=%{TEST_THUMBPRINT} --image-store=%{TEST_DATASTORE} --bridge-network=%{BRIDGE_NETWORK} --public-network=%{PUBLIC_NETWORK} ${vicmachinetls} --compute-resource=%{TEST_DATACENTER} --insecure-registry wdc-harbor-ci.eng.vmware.com Should Contain ${output} Installer completed successfully Get Docker Params ${output} ${true} Log To Console Installer completed successfully: %{VCH-NAME} @@ -121,10 +121,10 @@ Create VCH - defaults Run Keyword And Ignore Error Cleanup Dangling VMs On Test Server Run Keyword And Ignore Error Cleanup Datastore On Test Server - ${output}= Run Keyword If '%{HOST_TYPE}' == 'VC' Run bin/vic-machine-linux create --name=%{VCH-NAME} --target=%{TEST_URL} --thumbprint=%{TEST_THUMBPRINT} --user=%{TEST_USERNAME} --image-store=%{TEST_DATASTORE} --password=%{TEST_PASSWORD} --bridge-network=%{BRIDGE_NETWORK} --public-network=%{PUBLIC_NETWORK} ${vicmachinetls} --insecure-registry harbor.ci.drone.local + ${output}= Run Keyword If '%{HOST_TYPE}' == 'VC' Run bin/vic-machine-linux create --name=%{VCH-NAME} --target=%{TEST_URL} --thumbprint=%{TEST_THUMBPRINT} --user=%{TEST_USERNAME} --image-store=%{TEST_DATASTORE} --password=%{TEST_PASSWORD} --bridge-network=%{BRIDGE_NETWORK} --public-network=%{PUBLIC_NETWORK} ${vicmachinetls} --insecure-registry wdc-harbor-ci.eng.vmware.com Run Keyword If '%{HOST_TYPE}' == 'VC' Should Contain ${output} Installer completed successfully Run Keyword If '%{HOST_TYPE}' == 'VC' Get Docker Params ${output} ${true} - ${output}= Run Keyword If '%{HOST_TYPE}' == 'ESXi' Run bin/vic-machine-linux create --name=%{VCH-NAME} --target=%{TEST_URL} --thumbprint=%{TEST_THUMBPRINT} --user=%{TEST_USERNAME} --image-store=%{TEST_DATASTORE} --password=%{TEST_PASSWORD} ${vicmachinetls} --insecure-registry harbor.ci.drone.local + ${output}= Run Keyword If '%{HOST_TYPE}' == 'ESXi' Run bin/vic-machine-linux create --name=%{VCH-NAME} --target=%{TEST_URL} --thumbprint=%{TEST_THUMBPRINT} --user=%{TEST_USERNAME} --image-store=%{TEST_DATASTORE} --password=%{TEST_PASSWORD} ${vicmachinetls} --insecure-registry wdc-harbor-ci.eng.vmware.com Run Keyword If '%{HOST_TYPE}' == 'ESXi' Should Contain ${output} Installer completed successfully Run Keyword If '%{HOST_TYPE}' == 'ESXi' Get Docker Params ${output} ${true} Log To Console Installer completed successfully: %{VCH-NAME} @@ -137,7 +137,7 @@ Create VCH - full params Run Keyword And Ignore Error Cleanup Dangling VMs On Test Server Run Keyword And Ignore Error Cleanup Datastore On Test Server - ${output}= Run bin/vic-machine-linux create --name=%{VCH-NAME} --target=%{TEST_URL} --thumbprint=%{TEST_THUMBPRINT} --user=%{TEST_USERNAME} --image-store=%{TEST_DATASTORE} --appliance-iso=bin/appliance.iso --bootstrap-iso=bin/bootstrap.iso --password=%{TEST_PASSWORD} --force=true --bridge-network=%{BRIDGE_NETWORK} --public-network=%{PUBLIC_NETWORK} --compute-resource=%{TEST_RESOURCE} --timeout %{TEST_TIMEOUT} --volume-store=%{TEST_DATASTORE}/%{VCH-NAME}-VOL:default ${vicmachinetls} --insecure-registry harbor.ci.drone.local + ${output}= Run bin/vic-machine-linux create --name=%{VCH-NAME} --target=%{TEST_URL} --thumbprint=%{TEST_THUMBPRINT} --user=%{TEST_USERNAME} --image-store=%{TEST_DATASTORE} --appliance-iso=bin/appliance.iso --bootstrap-iso=bin/bootstrap.iso --password=%{TEST_PASSWORD} --force=true --bridge-network=%{BRIDGE_NETWORK} --public-network=%{PUBLIC_NETWORK} --compute-resource=%{TEST_RESOURCE} --timeout %{TEST_TIMEOUT} --volume-store=%{TEST_DATASTORE}/%{VCH-NAME}-VOL:default ${vicmachinetls} --insecure-registry wdc-harbor-ci.eng.vmware.com Should Contain ${output} Installer completed successfully Get Docker Params ${output} ${true} Log To Console Installer completed successfully: %{VCH-NAME} @@ -154,7 +154,7 @@ Create VCH - using environment variables Run Keyword And Ignore Error Cleanup Dangling VMs On Test Server Run Keyword And Ignore Error Cleanup Datastore On Test Server - ${output}= Run bin/vic-machine-linux create --name=%{VCH-NAME} --image-store=%{TEST_DATASTORE} --appliance-iso=bin/appliance.iso --bootstrap-iso=bin/bootstrap.iso --force=true --bridge-network=%{BRIDGE_NETWORK} --public-network=%{PUBLIC_NETWORK} --compute-resource=%{TEST_RESOURCE} --timeout %{TEST_TIMEOUT} --volume-store=%{TEST_DATASTORE}/%{VCH-NAME}-VOL:default ${vicmachinetls} --insecure-registry harbor.ci.drone.local + ${output}= Run bin/vic-machine-linux create --name=%{VCH-NAME} --image-store=%{TEST_DATASTORE} --appliance-iso=bin/appliance.iso --bootstrap-iso=bin/bootstrap.iso --force=true --bridge-network=%{BRIDGE_NETWORK} --public-network=%{PUBLIC_NETWORK} --compute-resource=%{TEST_RESOURCE} --timeout %{TEST_TIMEOUT} --volume-store=%{TEST_DATASTORE}/%{VCH-NAME}-VOL:default ${vicmachinetls} --insecure-registry wdc-harbor-ci.eng.vmware.com Should Contain ${output} Installer completed successfully Get Docker Params ${output} ${true} Log To Console Installer completed successfully: %{VCH-NAME} @@ -171,7 +171,7 @@ Create VCH - custom image store directory Run Keyword And Ignore Error Cleanup Dangling VMs On Test Server Run Keyword And Ignore Error Cleanup Datastore On Test Server - ${output}= Run bin/vic-machine-linux create --name=%{VCH-NAME} --target=%{TEST_URL} --thumbprint=%{TEST_THUMBPRINT} --user=%{TEST_USERNAME} --image-store %{TEST_DATASTORE}/vic-machine-test-images --appliance-iso=bin/appliance.iso --bootstrap-iso=bin/bootstrap.iso --password=%{TEST_PASSWORD} --force=true --bridge-network=%{BRIDGE_NETWORK} --public-network=%{PUBLIC_NETWORK} --compute-resource=%{TEST_RESOURCE} --timeout %{TEST_TIMEOUT} ${vicmachinetls} --insecure-registry harbor.ci.drone.local + ${output}= Run bin/vic-machine-linux create --name=%{VCH-NAME} --target=%{TEST_URL} --thumbprint=%{TEST_THUMBPRINT} --user=%{TEST_USERNAME} --image-store %{TEST_DATASTORE}/vic-machine-test-images --appliance-iso=bin/appliance.iso --bootstrap-iso=bin/bootstrap.iso --password=%{TEST_PASSWORD} --force=true --bridge-network=%{BRIDGE_NETWORK} --public-network=%{PUBLIC_NETWORK} --compute-resource=%{TEST_RESOURCE} --timeout %{TEST_TIMEOUT} ${vicmachinetls} --insecure-registry wdc-harbor-ci.eng.vmware.com Should Contain ${output} Installer completed successfully Get Docker Params ${output} ${true} @@ -261,7 +261,7 @@ Creation log file uploaded to datastore Run Keyword And Ignore Error Cleanup Dangling VMs On Test Server Run Keyword And Ignore Error Cleanup Datastore On Test Server - ${output}= Run bin/vic-machine-linux create --name=%{VCH-NAME} --target=%{TEST_URL} --thumbprint=%{TEST_THUMBPRINT} --user=%{TEST_USERNAME} --image-store=%{TEST_DATASTORE} --appliance-iso=bin/appliance.iso --bootstrap-iso=bin/bootstrap.iso --password=%{TEST_PASSWORD} --force=true --bridge-network=%{BRIDGE_NETWORK} --public-network=%{PUBLIC_NETWORK} --compute-resource=%{TEST_RESOURCE} --timeout %{TEST_TIMEOUT} ${vicmachinetls} --insecure-registry harbor.ci.drone.local + ${output}= Run bin/vic-machine-linux create --name=%{VCH-NAME} --target=%{TEST_URL} --thumbprint=%{TEST_THUMBPRINT} --user=%{TEST_USERNAME} --image-store=%{TEST_DATASTORE} --appliance-iso=bin/appliance.iso --bootstrap-iso=bin/bootstrap.iso --password=%{TEST_PASSWORD} --force=true --bridge-network=%{BRIDGE_NETWORK} --public-network=%{PUBLIC_NETWORK} --compute-resource=%{TEST_RESOURCE} --timeout %{TEST_TIMEOUT} ${vicmachinetls} --insecure-registry wdc-harbor-ci.eng.vmware.com Should Contain ${output} Installer completed successfully Get Docker Params ${output} ${true} diff --git a/tests/test-cases/Group6-VIC-Machine/6-05-Create-Validation.robot b/tests/test-cases/Group6-VIC-Machine/6-05-Create-Validation.robot index 0028da3019..080f3ad2f1 100644 --- a/tests/test-cases/Group6-VIC-Machine/6-05-Create-Validation.robot +++ b/tests/test-cases/Group6-VIC-Machine/6-05-Create-Validation.robot @@ -25,7 +25,6 @@ Suggest resources - Invalid datacenter # Attempt to cleanup old/canceled tests Run Keyword And Ignore Error Cleanup Dangling VMs On Test Server Run Keyword And Ignore Error Cleanup Datastore On Test Server - Set Test VCH Name Log To Console \nInstalling VCH to test server... ${output}= Run bin/vic-machine-linux create --name=%{VCH-NAME} --target=%{TEST_URL}/WOW --thumbprint=%{TEST_THUMBPRINT} --user=%{TEST_USERNAME} --image-store=%{TEST_DATASTORE} --password=%{TEST_PASSWORD} ${vicmachinetls} @@ -38,7 +37,6 @@ Suggest resources - Invalid target path # Attempt to cleanup old/canceled tests Run Keyword And Ignore Error Cleanup Dangling VMs On Test Server Run Keyword And Ignore Error Cleanup Datastore On Test Server - Set Test VCH Name Log To Console \nInstalling VCH to test server... ${output}= Run bin/vic-machine-linux create --name=%{VCH-NAME} --target=%{TEST_URL}/MUCH/DATACENTER --thumbprint=%{TEST_THUMBPRINT} --user=%{TEST_USERNAME} --image-store=%{TEST_DATASTORE} --password=%{TEST_PASSWORD} ${vicmachinetls} @@ -51,7 +49,6 @@ Create VCH - target thumbprint verification # Attempt to cleanup old/canceled tests Run Keyword And Ignore Error Cleanup Dangling VMs On Test Server Run Keyword And Ignore Error Cleanup Datastore On Test Server - Set Test VCH Name ${output}= Run bin/vic-machine-linux create --thumbprint=NOPE --name=%{VCH-NAME} --target="%{TEST_USERNAME}:%{TEST_PASSWORD}@%{TEST_URL}" --image-store=ENOENT ${vicmachinetls} Should Contain ${output} thumbprint does not match @@ -63,10 +60,9 @@ Default image datastore # Attempt to cleanup old/canceled tests Run Keyword And Ignore Error Cleanup Dangling VMs On Test Server Run Keyword And Ignore Error Cleanup Datastore On Test Server - Set Test VCH Name Log To Console \nInstalling VCH to test server... - ${output}= Run bin/vic-machine-linux create --name=%{VCH-NAME} --target=%{TEST_URL} --thumbprint=%{TEST_THUMBPRINT} --user=%{TEST_USERNAME} --password=%{TEST_PASSWORD} ${vicmachinetls} --insecure-registry harbor.ci.drone.local + ${output}= Run bin/vic-machine-linux create --name=%{VCH-NAME} --target=%{TEST_URL} --thumbprint=%{TEST_THUMBPRINT} --user=%{TEST_USERNAME} --password=%{TEST_PASSWORD} ${vicmachinetls} --insecure-registry wdc-harbor-ci.eng.vmware.com Log ${output} # VCH creation should succeed on ESXi with one datastore @@ -88,11 +84,10 @@ Custom image datastore # Attempt to cleanup old/canceled tests Run Keyword And Ignore Error Cleanup Dangling VMs On Test Server Run Keyword And Ignore Error Cleanup Datastore On Test Server - Set Test VCH Name Log To Console \nInstalling VCH to test server... - ${output-esx}= Run Keyword If '%{HOST_TYPE}' == 'ESXi' Run bin/vic-machine-linux create --name=%{VCH-NAME} --target=%{TEST_URL} --thumbprint=%{TEST_THUMBPRINT} --user=%{TEST_USERNAME} --password=%{TEST_PASSWORD} --image-store=%{TEST_DATASTORE}/long/weird/path ${vicmachinetls} --insecure-registry harbor.ci.drone.local - ${output-vc}= Run Keyword If '%{HOST_TYPE}' == 'VC' Run bin/vic-machine-linux create --name=%{VCH-NAME} --target=%{TEST_URL} --thumbprint=%{TEST_THUMBPRINT} --user=%{TEST_USERNAME} --password=%{TEST_PASSWORD} --bridge-network=%{BRIDGE_NETWORK} --public-network=%{PUBLIC_NETWORK} --image-store=%{TEST_DATASTORE}/long/weird/path ${vicmachinetls} --insecure-registry harbor.ci.drone.local + ${output-esx}= Run Keyword If '%{HOST_TYPE}' == 'ESXi' Run bin/vic-machine-linux create --name=%{VCH-NAME} --target=%{TEST_URL} --thumbprint=%{TEST_THUMBPRINT} --user=%{TEST_USERNAME} --password=%{TEST_PASSWORD} --image-store=%{TEST_DATASTORE}/long/weird/path ${vicmachinetls} --insecure-registry wdc-harbor-ci.eng.vmware.com + ${output-vc}= Run Keyword If '%{HOST_TYPE}' == 'VC' Run bin/vic-machine-linux create --name=%{VCH-NAME} --target=%{TEST_URL} --thumbprint=%{TEST_THUMBPRINT} --user=%{TEST_USERNAME} --password=%{TEST_PASSWORD} --bridge-network=%{BRIDGE_NETWORK} --public-network=%{PUBLIC_NETWORK} --image-store=%{TEST_DATASTORE}/long/weird/path ${vicmachinetls} --insecure-registry wdc-harbor-ci.eng.vmware.com ${output}= Set Variable If '%{HOST_TYPE}' == 'ESXi' ${output-esx} ${output-vc} Log ${output} Should Contain ${output} Installer completed successfully @@ -104,8 +99,8 @@ Custom image datastore Trailing slash works as expected Set Test Environment Variables Log To Console \nInstalling VCH to test server... - ${output-esx}= Run Keyword If '%{HOST_TYPE}' == 'ESXi' Run bin/vic-machine-linux create --name=%{VCH-NAME} --target=%{TEST_URL}/ --thumbprint=%{TEST_THUMBPRINT} --user=%{TEST_USERNAME} --password=%{TEST_PASSWORD} ${vicmachinetls} --insecure-registry harbor.ci.drone.local - ${output-vc}= Run Keyword If '%{HOST_TYPE}' == 'VC' Run bin/vic-machine-linux create --name=%{VCH-NAME} --target=%{TEST_URL}/ --thumbprint=%{TEST_THUMBPRINT} --user=%{TEST_USERNAME} --password=%{TEST_PASSWORD} --image-store=%{TEST_DATASTORE} --bridge-network=%{BRIDGE_NETWORK} --public-network=%{PUBLIC_NETWORK} ${vicmachinetls} --insecure-registry harbor.ci.drone.local + ${output-esx}= Run Keyword If '%{HOST_TYPE}' == 'ESXi' Run bin/vic-machine-linux create --name=%{VCH-NAME} --target=%{TEST_URL}/ --thumbprint=%{TEST_THUMBPRINT} --user=%{TEST_USERNAME} --password=%{TEST_PASSWORD} ${vicmachinetls} --insecure-registry wdc-harbor-ci.eng.vmware.com + ${output-vc}= Run Keyword If '%{HOST_TYPE}' == 'VC' Run bin/vic-machine-linux create --name=%{VCH-NAME} --target=%{TEST_URL}/ --thumbprint=%{TEST_THUMBPRINT} --user=%{TEST_USERNAME} --password=%{TEST_PASSWORD} --image-store=%{TEST_DATASTORE} --bridge-network=%{BRIDGE_NETWORK} --public-network=%{PUBLIC_NETWORK} ${vicmachinetls} --insecure-registry wdc-harbor-ci.eng.vmware.com ${output}= Set Variable If '%{HOST_TYPE}' == 'ESXi' ${output-esx} ${output-vc} Should Contain ${output} Installer completed successfully Get Docker Params ${output} ${true} diff --git a/tests/test-cases/Group6-VIC-Machine/6-06-Create-Datastore.robot b/tests/test-cases/Group6-VIC-Machine/6-06-Create-Datastore.robot index ddb58e680e..6571d8341f 100644 --- a/tests/test-cases/Group6-VIC-Machine/6-06-Create-Datastore.robot +++ b/tests/test-cases/Group6-VIC-Machine/6-06-Create-Datastore.robot @@ -25,8 +25,6 @@ Image Store Delete - Image store not found # Attempt to cleanup old/canceled tests Run Keyword And Ignore Error Cleanup Dangling VMs On Test Server Run Keyword And Ignore Error Cleanup Datastore On Test Server - Set Test VCH Name - ${output}= Run rm -rf %{VCH-NAME} Log To Console \nInstalling VCH to test server... ${output}= Run bin/vic-machine-linux create --name=%{VCH-NAME} --target=%{TEST_URL} --user=%{TEST_USERNAME} --bridge-network=%{BRIDGE_NETWORK} --public-network=%{PUBLIC_NETWORK} --image-store=%{TEST_DATASTORE}/images --password=%{TEST_PASSWORD} --force --kv diff --git a/tests/test-cases/Group6-VIC-Machine/6-07-Create-Network.robot b/tests/test-cases/Group6-VIC-Machine/6-07-Create-Network.robot index 962a3b290d..992e204dbe 100644 --- a/tests/test-cases/Group6-VIC-Machine/6-07-Create-Network.robot +++ b/tests/test-cases/Group6-VIC-Machine/6-07-Create-Network.robot @@ -47,7 +47,7 @@ Public network - default Run Keyword And Ignore Error Cleanup Dangling VMs On Test Server Run Keyword And Ignore Error Cleanup Datastore On Test Server - ${output}= Run bin/vic-machine-linux create --name=%{VCH-NAME} --target="%{TEST_USERNAME}:%{TEST_PASSWORD}@%{TEST_URL}" --thumbprint=%{TEST_THUMBPRINT} --image-store=%{TEST_DATASTORE} --bridge-network=%{BRIDGE_NETWORK} ${vicmachinetls} --insecure-registry harbor.ci.drone.local + ${output}= Run bin/vic-machine-linux create --name=%{VCH-NAME} --target="%{TEST_USERNAME}:%{TEST_PASSWORD}@%{TEST_URL}" --thumbprint=%{TEST_THUMBPRINT} --image-store=%{TEST_DATASTORE} --bridge-network=%{BRIDGE_NETWORK} ${vicmachinetls} --insecure-registry wdc-harbor-ci.eng.vmware.com Should Contain ${output} Installer completed successfully Get Docker Params ${output} ${true} Log To Console Installer completed successfully: %{VCH-NAME} @@ -90,7 +90,7 @@ Management network - none Run Keyword And Ignore Error Cleanup Dangling VMs On Test Server Run Keyword And Ignore Error Cleanup Datastore On Test Server - ${output}= Run bin/vic-machine-linux create --name=%{VCH-NAME} --target="%{TEST_USERNAME}:%{TEST_PASSWORD}@%{TEST_URL}" --thumbprint=%{TEST_THUMBPRINT} --image-store=%{TEST_DATASTORE} --bridge-network=%{BRIDGE_NETWORK} --public-network=%{PUBLIC_NETWORK} ${vicmachinetls} --insecure-registry harbor.ci.drone.local + ${output}= Run bin/vic-machine-linux create --name=%{VCH-NAME} --target="%{TEST_USERNAME}:%{TEST_PASSWORD}@%{TEST_URL}" --thumbprint=%{TEST_THUMBPRINT} --image-store=%{TEST_DATASTORE} --bridge-network=%{BRIDGE_NETWORK} --public-network=%{PUBLIC_NETWORK} ${vicmachinetls} --insecure-registry wdc-harbor-ci.eng.vmware.com Should Contain ${output} Installer completed successfully ${status}= Run Keyword And Return Status Should Contain ${output} Network role \\"management\\" is sharing NIC with \\"public\\" ${status2}= Run Keyword And Return Status Should Contain ${output} Network role \\"public\\" is sharing NIC with \\"management\\" @@ -131,7 +131,7 @@ Management network - valid Run Keyword And Ignore Error Cleanup Dangling VMs On Test Server Run Keyword And Ignore Error Cleanup Datastore On Test Server - ${output}= Run bin/vic-machine-linux create --name=%{VCH-NAME} --target="%{TEST_USERNAME}:%{TEST_PASSWORD}@%{TEST_URL}" --thumbprint=%{TEST_THUMBPRINT} --image-store=%{TEST_DATASTORE} --bridge-network=%{BRIDGE_NETWORK} --management-network=%{PUBLIC_NETWORK} ${vicmachinetls} --insecure-registry harbor.ci.drone.local + ${output}= Run bin/vic-machine-linux create --name=%{VCH-NAME} --target="%{TEST_USERNAME}:%{TEST_PASSWORD}@%{TEST_URL}" --thumbprint=%{TEST_THUMBPRINT} --image-store=%{TEST_DATASTORE} --bridge-network=%{BRIDGE_NETWORK} --management-network=%{PUBLIC_NETWORK} ${vicmachinetls} --insecure-registry wdc-harbor-ci.eng.vmware.com Should Contain ${output} Installer completed successfully Get Docker Params ${output} ${true} Log To Console Installer completed successfully: %{VCH-NAME} @@ -158,7 +158,7 @@ Connectivity Bridge to Public ${out}= Run Keyword If '%{HOST_TYPE}' == 'ESXi' Run govc host.portgroup.add -vswitch vSwitchLAN bridge ${out}= Run Keyword If '%{HOST_TYPE}' == 'VC' Add VC Distributed Portgroup test-ds bridge - ${output}= Run bin/vic-machine-linux create --debug 1 --name=%{VCH-NAME} --target=%{TEST_URL}%{TEST_DATACENTER} --thumbprint=%{TEST_THUMBPRINT} --user=%{TEST_USERNAME} --image-store=%{TEST_DATASTORE} --password=%{TEST_PASSWORD} --force=true --bridge-network=bridge --public-network=vm-network --compute-resource=%{TEST_RESOURCE} --container-network vm-network --container-network-firewall vm-network:published --no-tlsverify --insecure-registry harbor.ci.drone.local + ${output}= Run bin/vic-machine-linux create --debug 1 --name=%{VCH-NAME} --target=%{TEST_URL}%{TEST_DATACENTER} --thumbprint=%{TEST_THUMBPRINT} --user=%{TEST_USERNAME} --image-store=%{TEST_DATASTORE} --password=%{TEST_PASSWORD} --force=true --bridge-network=bridge --public-network=vm-network --compute-resource=%{TEST_RESOURCE} --container-network vm-network --container-network-firewall vm-network:published --no-tlsverify --insecure-registry wdc-harbor-ci.eng.vmware.com Should Contain ${output} Installer completed successfully Get Docker Params ${output} ${true} @@ -213,7 +213,7 @@ Connectivity Bridge to Management ${out}= Run Keyword If '%{HOST_TYPE}' == 'ESXi' Run govc host.portgroup.add -vswitch vSwitchLAN management ${out}= Run Keyword If '%{HOST_TYPE}' == 'VC' Add VC Distributed Portgroup test-ds management - ${output}= Run bin/vic-machine-linux create --debug 1 --name=%{VCH-NAME} --target=%{TEST_URL}%{TEST_DATACENTER} --thumbprint=%{TEST_THUMBPRINT} --user=%{TEST_USERNAME} --image-store=%{TEST_DATASTORE} --password=%{TEST_PASSWORD} --force=true --bridge-network=bridge --compute-resource=%{TEST_RESOURCE} --container-network management --container-network vm-network --container-network-ip-range=management:10.10.10.0/24 --container-network-gateway=management:10.10.10.1/24 --no-tlsverify --insecure-registry harbor.ci.drone.local + ${output}= Run bin/vic-machine-linux create --debug 1 --name=%{VCH-NAME} --target=%{TEST_URL}%{TEST_DATACENTER} --thumbprint=%{TEST_THUMBPRINT} --user=%{TEST_USERNAME} --image-store=%{TEST_DATASTORE} --password=%{TEST_PASSWORD} --force=true --bridge-network=bridge --compute-resource=%{TEST_RESOURCE} --container-network management --container-network vm-network --container-network-ip-range=management:10.10.10.0/24 --container-network-gateway=management:10.10.10.1/24 --no-tlsverify --insecure-registry wdc-harbor-ci.eng.vmware.com Should Contain ${output} Installer completed successfully Get Docker Params ${output} ${true} @@ -260,7 +260,7 @@ Bridge network - vCenter none Run Keyword And Ignore Error Cleanup Datastore On Test Server ${output}= Run bin/vic-machine-linux create --name=%{VCH-NAME} --target="%{TEST_USERNAME}:%{TEST_PASSWORD}@%{TEST_URL}" --thumbprint=%{TEST_THUMBPRINT} --image-store=%{TEST_DATASTORE} ${vicmachinetls} - Should Contain ${output} ERROR + Should Contain ${output} error Should Contain ${output} An existing distributed port group must be specified for bridge network on vCenter # Delete the portgroup added by env vars keyword @@ -274,7 +274,7 @@ Bridge network - ESX none Run Keyword And Ignore Error Cleanup Dangling VMs On Test Server Run Keyword And Ignore Error Cleanup Datastore On Test Server - ${output}= Run bin/vic-machine-linux create --name=%{VCH-NAME} --target="%{TEST_USERNAME}:%{TEST_PASSWORD}@%{TEST_URL}" --thumbprint=%{TEST_THUMBPRINT} --image-store=%{TEST_DATASTORE} ${vicmachinetls} --insecure-registry harbor.ci.drone.local + ${output}= Run bin/vic-machine-linux create --name=%{VCH-NAME} --target="%{TEST_USERNAME}:%{TEST_PASSWORD}@%{TEST_URL}" --thumbprint=%{TEST_THUMBPRINT} --image-store=%{TEST_DATASTORE} ${vicmachinetls} --insecure-registry wdc-harbor-ci.eng.vmware.com Should Contain ${output} Installer completed successfully Get Docker Params ${output} ${true} Log To Console Installer completed successfully: %{VCH-NAME} @@ -295,7 +295,7 @@ Bridge network - create bridge network if it doesn't exist Run govc host.portgroup.remove 'AAAAAAAAAA' Run govc host.vswitch.remove 'AAAAAAAAAA' - ${output}= Run bin/vic-machine-linux create --name=%{VCH-NAME} --target="%{TEST_USERNAME}:%{TEST_PASSWORD}@%{TEST_URL}" --thumbprint=%{TEST_THUMBPRINT} --image-store=%{TEST_DATASTORE} --bridge-network=AAAAAAAAAA ${vicmachinetls} --insecure-registry harbor.ci.drone.local + ${output}= Run bin/vic-machine-linux create --name=%{VCH-NAME} --target="%{TEST_USERNAME}:%{TEST_PASSWORD}@%{TEST_URL}" --thumbprint=%{TEST_THUMBPRINT} --image-store=%{TEST_DATASTORE} --bridge-network=AAAAAAAAAA ${vicmachinetls} --insecure-registry wdc-harbor-ci.eng.vmware.com Should Contain ${output} Installer completed successfully Get Docker Params ${output} ${true} Log To Console Installer completed successfully: %{VCH-NAME} @@ -322,7 +322,7 @@ Bridge network - valid Run Keyword And Ignore Error Cleanup Dangling VMs On Test Server Run Keyword And Ignore Error Cleanup Datastore On Test Server - ${output}= Run bin/vic-machine-linux create --name=%{VCH-NAME} --target="%{TEST_USERNAME}:%{TEST_PASSWORD}@%{TEST_URL}" --thumbprint=%{TEST_THUMBPRINT} --image-store=%{TEST_DATASTORE} --bridge-network=%{BRIDGE_NETWORK} ${vicmachinetls} --insecure-registry harbor.ci.drone.local + ${output}= Run bin/vic-machine-linux create --name=%{VCH-NAME} --target="%{TEST_USERNAME}:%{TEST_PASSWORD}@%{TEST_URL}" --thumbprint=%{TEST_THUMBPRINT} --image-store=%{TEST_DATASTORE} --bridge-network=%{BRIDGE_NETWORK} ${vicmachinetls} --insecure-registry wdc-harbor-ci.eng.vmware.com Should Contain ${output} Installer completed successfully Get Docker Params ${output} ${true} Log To Console Installer completed successfully: %{VCH-NAME} @@ -410,7 +410,7 @@ Container network - space in network name valid ${out}= Run Keyword If '%{HOST_TYPE}' == 'ESXi' Run govc host.portgroup.add -vswitch vSwitchLAN bridge ${out}= Run Keyword If '%{HOST_TYPE}' == 'VC' Add VC Distributed Portgroup test-ds bridge - ${output}= Run bin/vic-machine-linux create --name=%{VCH-NAME} --target="%{TEST_USERNAME}:%{TEST_PASSWORD}@%{TEST_URL}" --thumbprint=%{TEST_THUMBPRINT} --image-store=%{TEST_DATASTORE} --bridge-network=bridge --container-network 'VM Network With Spaces':vmnet --insecure-registry harbor.ci.drone.local ${vicmachinetls} + ${output}= Run bin/vic-machine-linux create --name=%{VCH-NAME} --target="%{TEST_USERNAME}:%{TEST_PASSWORD}@%{TEST_URL}" --thumbprint=%{TEST_THUMBPRINT} --image-store=%{TEST_DATASTORE} --bridge-network=bridge --container-network 'VM Network With Spaces':vmnet --insecure-registry wdc-harbor-ci.eng.vmware.com ${vicmachinetls} Should Contain ${output} Installer completed successfully Get Docker Params ${output} ${true} Log To Console Installer completed successfully: %{VCH-NAME} @@ -462,7 +462,7 @@ Container Firewalls ... --target=%{TEST_URL}%{TEST_DATACENTER} --thumbprint=%{TEST_THUMBPRINT} ... --user=%{TEST_USERNAME} --image-store=%{TEST_DATASTORE} --password=%{TEST_PASSWORD} ... --force=true --bridge-network=bridge --compute-resource=%{TEST_RESOURCE} --no-tlsverify - ... --insecure-registry harbor.ci.drone.local + ... --insecure-registry wdc-harbor-ci.eng.vmware.com ... --container-network open-net --container-network-firewall open-net:open ... --container-network closed-net --container-network-firewall closed-net:closed ... --container-network outbound-net --container-network-firewall outbound-net:outbound diff --git a/tests/test-cases/Group6-VIC-Machine/6-09-Inspect.robot b/tests/test-cases/Group6-VIC-Machine/6-09-Inspect.robot index a1d857447b..5fccbabde2 100644 --- a/tests/test-cases/Group6-VIC-Machine/6-09-Inspect.robot +++ b/tests/test-cases/Group6-VIC-Machine/6-09-Inspect.robot @@ -154,6 +154,9 @@ Inspect VCH Configuration with Container Networks Should Be Equal As Integers 0 ${rc} Verify inspect output for a full tls VCH + ${domain}= Get Environment Variable DOMAIN '' + Run Keyword If '${domain}' == '' Pass Execution Skipping test - domain not set, won't generate keys + Install VIC Appliance To Test Server ${output}= Run bin/vic-machine-linux inspect --name=%{VCH-NAME} --target="%{TEST_USERNAME}:%{TEST_PASSWORD}@%{TEST_URL}" --thumbprint=%{TEST_THUMBPRINT} diff --git a/tests/test-cases/Group6-VIC-Machine/6-13-TLS.robot b/tests/test-cases/Group6-VIC-Machine/6-13-TLS.robot index 2d654f7c73..f2645edfdc 100644 --- a/tests/test-cases/Group6-VIC-Machine/6-13-TLS.robot +++ b/tests/test-cases/Group6-VIC-Machine/6-13-TLS.robot @@ -24,7 +24,7 @@ Create VCH - defaults with --no-tls Run Keyword And Ignore Error Cleanup Dangling VMs On Test Server Run Keyword And Ignore Error Cleanup Datastore On Test Server - ${output}= Run bin/vic-machine-linux create --name=%{VCH-NAME} --target="%{TEST_USERNAME}:%{TEST_PASSWORD}@%{TEST_URL}" --thumbprint=%{TEST_THUMBPRINT} --image-store=%{TEST_DATASTORE} --bridge-network=%{BRIDGE_NETWORK} --public-network=%{PUBLIC_NETWORK} --no-tls --insecure-registry harbor.ci.drone.local + ${output}= Run bin/vic-machine-linux create --name=%{VCH-NAME} --target="%{TEST_USERNAME}:%{TEST_PASSWORD}@%{TEST_URL}" --thumbprint=%{TEST_THUMBPRINT} --image-store=%{TEST_DATASTORE} --bridge-network=%{BRIDGE_NETWORK} --public-network=%{PUBLIC_NETWORK} --no-tls --insecure-registry wdc-harbor-ci.eng.vmware.com Should Contain ${output} Installer completed successfully Get Docker Params ${output} ${true} Log To Console Installer completed successfully: %{VCH-NAME} @@ -34,11 +34,14 @@ Create VCH - defaults with --no-tls Cleanup VIC Appliance On Test Server Create VCH - defaults custom cert path + ${domain}= Get Environment Variable DOMAIN '' + Run Keyword If '${domain}' == '' Pass Execution Skipping test - domain not set, won't generate keys + Set Test Environment Variables Run Keyword And Ignore Error Cleanup Dangling VMs On Test Server Run Keyword And Ignore Error Cleanup Datastore On Test Server - ${output}= Run bin/vic-machine-linux create ${vicmachinetls} --name=%{VCH-NAME} --target="%{TEST_USERNAME}:%{TEST_PASSWORD}@%{TEST_URL}" --thumbprint=%{TEST_THUMBPRINT} --image-store=%{TEST_DATASTORE} --bridge-network=%{BRIDGE_NETWORK} --public-network=%{PUBLIC_NETWORK} --tls-cert-path=${EXECDIR}/foo-bar-certs/ --insecure-registry harbor.ci.drone.local + ${output}= Run bin/vic-machine-linux create ${vicmachinetls} --name=%{VCH-NAME} --target="%{TEST_USERNAME}:%{TEST_PASSWORD}@%{TEST_URL}" --thumbprint=%{TEST_THUMBPRINT} --image-store=%{TEST_DATASTORE} --bridge-network=%{BRIDGE_NETWORK} --public-network=%{PUBLIC_NETWORK} --tls-cert-path=${EXECDIR}/foo-bar-certs/ --insecure-registry wdc-harbor-ci.eng.vmware.com Should Contain ${output} --tlscacert=\\"${EXECDIR}/foo-bar-certs/ca.pem\\" --tlscert=\\"${EXECDIR}/foo-bar-certs/cert.pem\\" --tlskey=\\"${EXECDIR}/foo-bar-certs/key.pem\\" Should Contain ${output} Generating CA certificate/key pair - private key in ${EXECDIR}/foo-bar-certs/ca-key.pem Should Contain ${output} Generating server certificate/key pair - private key in ${EXECDIR}/foo-bar-certs/server-key.pem @@ -61,7 +64,7 @@ Create VCH - force accept target thumbprint Run Keyword And Ignore Error Cleanup Datastore On Test Server # Test that --force without --thumbprint accepts the --target thumbprint - ${output}= Run bin/vic-machine-linux create --name=%{VCH-NAME} --target="%{TEST_USERNAME}:%{TEST_PASSWORD}@%{TEST_URL}" --force --image-store=%{TEST_DATASTORE} --bridge-network=%{BRIDGE_NETWORK} --public-network=%{PUBLIC_NETWORK} ${vicmachinetls} --insecure-registry harbor.ci.drone.local + ${output}= Run bin/vic-machine-linux create --name=%{VCH-NAME} --target="%{TEST_USERNAME}:%{TEST_PASSWORD}@%{TEST_URL}" --force --image-store=%{TEST_DATASTORE} --bridge-network=%{BRIDGE_NETWORK} --public-network=%{PUBLIC_NETWORK} ${vicmachinetls} --insecure-registry wdc-harbor-ci.eng.vmware.com Log ${output} Should Contain ${output} Installer completed successfully Get Docker Params ${output} ${true} diff --git a/tests/test-cases/Group6-VIC-Machine/6-14-Update-Firewall.robot b/tests/test-cases/Group6-VIC-Machine/6-14-Update-Firewall.robot index 415e8bcdd5..5ea682e400 100644 --- a/tests/test-cases/Group6-VIC-Machine/6-14-Update-Firewall.robot +++ b/tests/test-cases/Group6-VIC-Machine/6-14-Update-Firewall.robot @@ -41,7 +41,7 @@ Enable and disable VIC firewall rule ${output}= Run govc host.esxcli network firewall ruleset list --ruleset-id=vSPC Should Contain ${output} true - ${output}= Run bin/vic-machine-linux create --name=%{VCH-NAME} --target="%{TEST_USERNAME}:%{TEST_PASSWORD}@%{TEST_URL}" --thumbprint=%{TEST_THUMBPRINT} --image-store=%{TEST_DATASTORE} --bridge-network=%{BRIDGE_NETWORK} --public-network=%{PUBLIC_NETWORK} --no-tls --insecure-registry harbor.ci.drone.local + ${output}= Run bin/vic-machine-linux create --name=%{VCH-NAME} --target="%{TEST_USERNAME}:%{TEST_PASSWORD}@%{TEST_URL}" --thumbprint=%{TEST_THUMBPRINT} --image-store=%{TEST_DATASTORE} --bridge-network=%{BRIDGE_NETWORK} --public-network=%{PUBLIC_NETWORK} --no-tls --insecure-registry wdc-harbor-ci.eng.vmware.com Should Contain ${output} Installer completed successfully Get Docker Params ${output} ${true} Log To Console Installer completed successfully: %{VCH-NAME} diff --git a/tests/test-cases/Group6-VIC-Machine/6-17-Configure-TLS.robot b/tests/test-cases/Group6-VIC-Machine/6-17-Configure-TLS.robot index 8f19e7e363..2d4168a9e6 100644 --- a/tests/test-cases/Group6-VIC-Machine/6-17-Configure-TLS.robot +++ b/tests/test-cases/Group6-VIC-Machine/6-17-Configure-TLS.robot @@ -27,6 +27,9 @@ Setup Test Environment Run Keyword And Ignore Error Cleanup Dangling VMs On Test Server Run Keyword And Ignore Error Cleanup Datastore On Test Server + ${domain}= Get Environment Variable DOMAIN '' + Run Keyword If '${domain}' == '' Pass Execution Skipping test - domain not set, won't generate keys + ${output}= Run bin/vic-machine-linux create ${vicmachinetls} --name=%{VCH-NAME} --target="%{TEST_USERNAME}:%{TEST_PASSWORD}@%{TEST_URL}" --thumbprint=%{TEST_THUMBPRINT} --image-store=%{TEST_DATASTORE} --bridge-network=%{BRIDGE_NETWORK} --public-network=%{PUBLIC_NETWORK} --tls-cert-path=${EXECDIR}/foo-bar-certs/ Should Contain ${output} --tlscacert=\\"${EXECDIR}/foo-bar-certs/ca.pem\\" --tlscert=\\"${EXECDIR}/foo-bar-certs/cert.pem\\" --tlskey=\\"${EXECDIR}/foo-bar-certs/key.pem\\" Should Contain ${output} Generating CA certificate/key pair - private key in ${EXECDIR}/foo-bar-certs/ca-key.pem diff --git a/tests/test-cases/Group9-VIC-Admin/9-02-VICAdmin-CertAuth.robot b/tests/test-cases/Group9-VIC-Admin/9-02-VICAdmin-CertAuth.robot index bbdbf95683..cec65afc23 100644 --- a/tests/test-cases/Group9-VIC-Admin/9-02-VICAdmin-CertAuth.robot +++ b/tests/test-cases/Group9-VIC-Admin/9-02-VICAdmin-CertAuth.robot @@ -20,6 +20,10 @@ Suite Teardown Cleanup VIC Appliance On Test Server Default Tags *** Keywords *** +Skip Execution If Certs Not Available + ${status}= Run Keyword And Return Status Environment Variable Should Not Be Set DOCKER_CERT_PATH + Pass Execution If ${status} This test is only applicable if using TLS with certs + Curl [Arguments] ${path} ${output}= Run curl -sk --cert %{DOCKER_CERT_PATH}/cert.pem --key %{DOCKER_CERT_PATH}/key.pem %{VIC-ADMIN}${path} @@ -28,47 +32,57 @@ Curl *** Test Cases *** Display HTML - ${output}= Wait Until Keyword Succeeds 10x 10s Curl ${EMPTY} - Should contain ${output} VIC: %{VCH-NAME} + Skip Execution If Certs Not Available + ${output}= Wait Until Keyword Succeeds 10x 10s Curl ${EMPTY} + Should contain ${output} VIC: %{VCH-NAME} Get Portlayer Log + Skip Execution If Certs Not Available ${output}= Wait Until Keyword Succeeds 10x 10s Curl /logs/port-layer.log Should contain ${output} Launching portlayer server Get VCH-Init Log + Skip Execution If Certs Not Available ${output}= Wait Until Keyword Succeeds 10x 10s Curl /logs/init.log Should contain ${output} reaping child processes Get Docker Personality Log + Skip Execution If Certs Not Available ${output}= Wait Until Keyword Succeeds 10x 10s Curl /logs/docker-personality.log Should contain ${output} docker personality Get VICAdmin Log + Skip Execution If Certs Not Available ${output}= Wait Until Keyword Succeeds 10x 10s Curl /logs/vicadmin.log Log ${output} Should contain ${output} Launching vicadmin pprof server Fail to Get VICAdmin Log without cert + Skip Execution If Certs Not Available ${output}= Run curl -sk %{VIC-ADMIN}/logs/vicadmin.log Log ${output} Should Not contain ${output} Launching vicadmin pprof server Fail to Display HTML without cert + Skip Execution If Certs Not Available ${output}= Run curl -sk %{VIC-ADMIN} Log ${output} Should Not contain ${output} VCH %{VCH-NAME} Fail to get Portlayer Log without cert + Skip Execution If Certs Not Available ${output}= Run curl -sk %{VIC-ADMIN}/logs/port-layer.log Log ${output} Should Not contain ${output} Launching portlayer server Fail to get Docker Personality Log without cert + Skip Execution If Certs Not Available ${output}= Run curl -sk %{VIC-ADMIN}/logs/docker-personality.log Log ${output} Should Not contain ${output} docker personality Fail to get VCH init logs without cert + Skip Execution If Certs Not Available ${output}= Run curl -sk %{VIC-ADMIN}/logs/init.log Log ${output} Should Not contain ${output} reaping child processes diff --git a/tests/test-cases/Group9-VIC-Admin/9-03-VICAdmin-Log-Failed-Attempts.robot b/tests/test-cases/Group9-VIC-Admin/9-03-VICAdmin-Log-Failed-Attempts.robot index fe00420073..3bd4e79762 100644 --- a/tests/test-cases/Group9-VIC-Admin/9-03-VICAdmin-Log-Failed-Attempts.robot +++ b/tests/test-cases/Group9-VIC-Admin/9-03-VICAdmin-Log-Failed-Attempts.robot @@ -30,6 +30,9 @@ Verify Temporary Redirect Should Contain ${out} HTTP request sent, awaiting response... 303 See Other Verify Failed Log Attempts + ${status}= Run Keyword And Return Status Environment Variable Should Not Be Set DOCKER_CERT_PATH + Pass Execution If ${status} This test is only applicable if using TLS with certs + #Save the first appliance certs and cleanup the first appliance #${old-certs}= Set Variable %{DOCKER_CERT_PATH} Run cp -r %{DOCKER_CERT_PATH} old-certs