From 7a7d081cb10d042bd222572768953636595220bf Mon Sep 17 00:00:00 2001 From: Kir Kolyshkin Date: Mon, 27 Aug 2018 18:00:55 -0700 Subject: [PATCH 01/27] hack/make.ps1: know where we failed In case of an exception, it makes great sense to print out some information telling where exactly it happened. _.InvocationInfo.PositionMessage gives script name, line number, character position and (depending on the PS version) highlights the part where error has happened. Signed-off-by: Kir Kolyshkin (cherry picked from commit d2788cb2f01f9f2baeef3d3ac705133282dcd27c) Signed-off-by: Sebastiaan van Stijn --- hack/make.ps1 | 1 + 1 file changed, 1 insertion(+) diff --git a/hack/make.ps1 b/hack/make.ps1 index 6ff7a7fbe67cf..03425dc185530 100644 --- a/hack/make.ps1 +++ b/hack/make.ps1 @@ -476,6 +476,7 @@ Try { } Catch [Exception] { Write-Host -ForegroundColor Red ("`nERROR: make.ps1 failed:`n$_") + Write-Host -ForegroundColor Red ($_.InvocationInfo.PositionMessage) # More gratuitous ASCII art. Write-Host From 89765908ba314c379895297ce77c31f0ba692088 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Mon, 24 Dec 2018 17:42:24 +0100 Subject: [PATCH 02/27] Fix double slash after $PREFIX I noticed this in the build output; ``` 16:05:07 [100%] Built target tini-static 16:05:07 + mkdir -p /build/ 16:05:07 + cp tini-static /build//docker-init ``` Signed-off-by: Sebastiaan van Stijn (cherry picked from commit 5d5adcd898f0b29a3ffe2820001288dd3a324374) Signed-off-by: Sebastiaan van Stijn --- Dockerfile | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/Dockerfile b/Dockerfile index 4acc6495c2a20..3fe27761d16c8 100644 --- a/Dockerfile +++ b/Dockerfile @@ -105,32 +105,32 @@ FROM base AS tomlv ENV INSTALL_BINARY_NAME=tomlv COPY hack/dockerfile/install/install.sh ./install.sh COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./ -RUN PREFIX=/build/ ./install.sh $INSTALL_BINARY_NAME +RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME FROM base AS vndr ENV INSTALL_BINARY_NAME=vndr COPY hack/dockerfile/install/install.sh ./install.sh COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./ -RUN PREFIX=/build/ ./install.sh $INSTALL_BINARY_NAME +RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME FROM base AS containerd RUN apt-get update && apt-get install -y btrfs-tools ENV INSTALL_BINARY_NAME=containerd COPY hack/dockerfile/install/install.sh ./install.sh COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./ -RUN PREFIX=/build/ ./install.sh $INSTALL_BINARY_NAME +RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME FROM base AS proxy ENV INSTALL_BINARY_NAME=proxy COPY hack/dockerfile/install/install.sh ./install.sh COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./ -RUN PREFIX=/build/ ./install.sh $INSTALL_BINARY_NAME +RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME FROM base AS gometalinter ENV INSTALL_BINARY_NAME=gometalinter COPY hack/dockerfile/install/install.sh ./install.sh COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./ -RUN PREFIX=/build/ ./install.sh $INSTALL_BINARY_NAME +RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME FROM base AS gotestsum ENV INSTALL_BINARY_NAME=gotestsum @@ -142,20 +142,20 @@ FROM base AS dockercli ENV INSTALL_BINARY_NAME=dockercli COPY hack/dockerfile/install/install.sh ./install.sh COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./ -RUN PREFIX=/build/ ./install.sh $INSTALL_BINARY_NAME +RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME FROM runtime-dev AS runc ENV INSTALL_BINARY_NAME=runc COPY hack/dockerfile/install/install.sh ./install.sh COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./ -RUN PREFIX=/build/ ./install.sh $INSTALL_BINARY_NAME +RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME FROM base AS tini RUN apt-get update && apt-get install -y cmake vim-common COPY hack/dockerfile/install/install.sh ./install.sh ENV INSTALL_BINARY_NAME=tini COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./ -RUN PREFIX=/build/ ./install.sh $INSTALL_BINARY_NAME +RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME From b881d8f6a956c8f45e732abd5538017740899fa7 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Sun, 13 Jan 2019 21:38:00 +0100 Subject: [PATCH 03/27] Use 17.06 stable channel for CLI used in CI Update to the latest patch release of 17.06.2. This keeps the same API requirements. This also enables pre-built binaries for armhf instead of compiling from source. Signed-off-by: Sebastiaan van Stijn (cherry picked from commit 468eb93e5acc809248405102db32460fe7efed08) Signed-off-by: Sebastiaan van Stijn --- hack/dockerfile/install/dockercli.installer | 10 +++++----- hack/make.ps1 | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/hack/dockerfile/install/dockercli.installer b/hack/dockerfile/install/dockercli.installer index 03435fe54bfab..08c893eba6ed1 100755 --- a/hack/dockerfile/install/dockercli.installer +++ b/hack/dockerfile/install/dockercli.installer @@ -1,22 +1,22 @@ #!/bin/sh -DOCKERCLI_CHANNEL=${DOCKERCLI_CHANNEL:-edge} -DOCKERCLI_VERSION=${DOCKERCLI_VERSION:-17.06.0-ce} +DOCKERCLI_CHANNEL=${DOCKERCLI_CHANNEL:-stable} +DOCKERCLI_VERSION=${DOCKERCLI_VERSION:-17.06.2-ce} install_dockercli() { echo "Install docker/cli version $DOCKERCLI_VERSION from $DOCKERCLI_CHANNEL" arch=$(uname -m) # No official release of these platforms - if [ "$arch" != "x86_64" ] && [ "$arch" != "s390x" ]; then + if [ "$arch" != "x86_64" ] && [ "$arch" != "s390x" ] && [ "$arch" != "armhf" ]; then build_dockercli return fi url=https://download.docker.com/linux/static curl -Ls "${url}/${DOCKERCLI_CHANNEL}/${arch}/docker-${DOCKERCLI_VERSION}.tgz" | tar -xz docker/docker - mkdir -p ${PREFIX} - mv docker/docker ${PREFIX}/ + mkdir -p "${PREFIX}" + mv docker/docker "${PREFIX}/" rmdir docker } diff --git a/hack/make.ps1 b/hack/make.ps1 index 03425dc185530..0385d4c2a8c2d 100644 --- a/hack/make.ps1 +++ b/hack/make.ps1 @@ -430,8 +430,8 @@ Try { if ($Daemon) { Execute-Build "daemon" "daemon" "dockerd" } if ($Client) { # Get the Docker channel and version from the environment, or use the defaults. - if (-not ($channel = $env:DOCKERCLI_CHANNEL)) { $channel = "edge" } - if (-not ($version = $env:DOCKERCLI_VERSION)) { $version = "17.06.0-ce" } + if (-not ($channel = $env:DOCKERCLI_CHANNEL)) { $channel = "stable" } + if (-not ($version = $env:DOCKERCLI_VERSION)) { $version = "17.06.2-ce" } # Download the zip file and extract the client executable. Write-Host "INFO: Downloading docker/cli version $version from $channel..." From 2f1c29f608fe5253c1b799b1c0d0c4200fc406dc Mon Sep 17 00:00:00 2001 From: Rong Gao Date: Tue, 26 Feb 2019 16:00:54 +0800 Subject: [PATCH 04/27] fix hack/dockerfile/install/containerd.installer test statement Signed-off-by: Rong Gao (cherry picked from commit 5e77399b92a5f03ea45ec1326e5c5b8d771a3d49) Signed-off-by: Sebastiaan van Stijn --- hack/dockerfile/install/containerd.installer | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hack/dockerfile/install/containerd.installer b/hack/dockerfile/install/containerd.installer index e9f9e4a494ac1..c989b7922928a 100755 --- a/hack/dockerfile/install/containerd.installer +++ b/hack/dockerfile/install/containerd.installer @@ -19,7 +19,7 @@ install_containerd() { export EXTRA_LDFLAGS='-extldflags "-fno-PIC -static"' # Reset build flags to nothing if we want a dynbinary - if [ "$1" == "dynamic" ]; then + if [ "$1" = "dynamic" ]; then export BUILDTAGS='' export EXTRA_FLAGS='' export EXTRA_LDFLAGS='' From 8bb6de641b99c28429b0c75b77087c37c7599d2e Mon Sep 17 00:00:00 2001 From: Brian Goff Date: Fri, 5 Apr 2019 17:20:06 -0700 Subject: [PATCH 05/27] Support cross-compile for arm Pretty much cross-compile doesn't work because of this: > profiles/seccomp/seccomp.go:13:2: build constraints exclude all Go files in /go/src/github.com/docker/docker/vendor/github.com/seccomp/libseccomp-golang This changes adds a new Dockerfile target for cross compilation with the neccesary arch specific libseccomp packages and CC toolchains. Signed-off-by: Brian Goff Signed-off-by: Sebastiaan van Stijn (cherry picked from commit 61a3285864d3f1b489f48f765b61b2c7bd300372) Signed-off-by: Sebastiaan van Stijn --- Dockerfile | 23 ++++++++++++++++++++++- Makefile | 21 +++++++++++++++------ hack/make/.binary | 20 ++++++++++++++++++++ 3 files changed, 57 insertions(+), 7 deletions(-) diff --git a/Dockerfile b/Dockerfile index 3fe27761d16c8..7139dd167b40f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -24,6 +24,7 @@ # the case. Therefore, you don't have to disable it anymore. # +ARG CROSS="false" ARG GO_VERSION=1.11.13 FROM golang:${GO_VERSION}-stretch AS base @@ -95,11 +96,31 @@ RUN /download-frozen-image-v2.sh /build \ # See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list) # Just a little hack so we don't have to install these deps twice, once for runc and once for dockerd -FROM base AS runtime-dev +FROM base AS runtime-dev-cross-false RUN apt-get update && apt-get install -y \ libapparmor-dev \ libseccomp-dev +FROM runtime-dev-cross-false AS runtime-dev-cross-true +RUN dpkg --add-architecture armhf +RUN dpkg --add-architecture arm64 +RUN dpkg --add-architecture armel +# These crossbuild packages rely on gcc-, but this doesn't want to install +# on non-amd64 systems. +# Additionally, the crossbuild-amd64 is currently only on debian:buster, so +# other architectures cannnot crossbuild amd64. +RUN if [ "$(go env GOHOSTARCH)" = "amd64" ]; then \ + apt-get update \ + && apt-get install -y \ + crossbuild-essential-armhf \ + crossbuild-essential-arm64 \ + crossbuild-essential-armel \ + libseccomp-dev:armhf \ + libseccomp-dev:arm64 \ + libseccomp-dev:armel; \ + fi + +FROM runtime-dev-cross-${CROSS} AS runtime-dev FROM base AS tomlv ENV INSTALL_BINARY_NAME=tomlv diff --git a/Makefile b/Makefile index a8f5ad9b0629f..b8859e216d8a5 100644 --- a/Makefile +++ b/Makefile @@ -121,9 +121,6 @@ INTERACTIVE := $(shell [ -t 0 ] && echo 1 || echo 0) ifeq ($(INTERACTIVE), 1) DOCKER_FLAGS += -t endif -ifeq ($(BIND_DIR), .) - DOCKER_BUILD_OPTS += --target=dev -endif DOCKER_RUN_DOCKER := $(DOCKER_FLAGS) "$(DOCKER_IMAGE)" @@ -138,6 +135,21 @@ binary: build ## build the linux binaries dynbinary: build ## build the linux dynbinaries $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary + + +cross: DOCKER_CROSS := true +cross: build ## cross build the binaries for darwin, freebsd and\nwindows + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary binary cross + +ifdef DOCKER_CROSSPLATFORMS +build: DOCKER_CROSS := true +else +build: DOCKER_CROSS ?= false +endif +ifeq ($(BIND_DIR), .) +build: DOCKER_BUILD_OPTS += --target=dev +endif +build: DOCKER_BUILD_ARGS += --build-arg=CROSS=$(DOCKER_CROSS) build: DOCKER_BUILDKIT ?= 1 build: bundles $(warning The docker client CLI has moved to github.com/docker/cli. For a dev-test cycle involving the CLI, run:${\n} DOCKER_CLI_PATH=/host/path/to/cli/binary make shell ${\n} then change the cli and compile into a binary at the same location.${\n}) @@ -153,9 +165,6 @@ clean: clean-cache clean-cache: docker volume rm -f docker-dev-cache -cross: build ## cross build the binaries for darwin, freebsd and\nwindows - $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary binary cross - help: ## this help @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z0-9_-]+:.*?## / {gsub("\\\\n",sprintf("\n%22c",""), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) diff --git a/hack/make/.binary b/hack/make/.binary index ff33e18cab8b8..357b456e09109 100644 --- a/hack/make/.binary +++ b/hack/make/.binary @@ -47,6 +47,26 @@ if [ "$(go env GOOS)/$(go env GOARCH)" != "$(go env GOHOSTOS)/$(go env GOHOSTARC export CC=x86_64-w64-mingw32-gcc export CGO_ENABLED=1 ;; + linux/arm) + case "${GOARM}" in + 5|"") + export CC=arm-linux-gnueabi-gcc + export CGO_ENABLED=1 + ;; + 7) + export CC=arm-linux-gnueabihf-gcc + export CGO_ENABLED=1 + ;; + esac + ;; + linux/arm64) + export CC=aarch64-linux-gnu-gcc + export CGO_ENABLED=1 + ;; + linux/amd64) + export CC=x86_64-linux-gnu-gcc + export CGO_ENABLED=1 + ;; esac fi From 929337ff0ef409dd0fdbca9739daed0b26d56403 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Sat, 13 Apr 2019 20:51:34 +0200 Subject: [PATCH 06/27] Fix DOCKER_CROSS being overwritten Not exactly sure why, but this line; build: DOCKER_CROSS ?= false Always overwrote `DOCKER_CROSS` when running `make cross`. Perhaps because it is set in `cross: DOCKER_CROSS := true`, and in a different scope? May also be dependent on the version of `make` in use. Signed-off-by: Sebastiaan van Stijn (cherry picked from commit 934b3a3841fb47649ccf93257ed909cbd0f1caed) Signed-off-by: Sebastiaan van Stijn --- Makefile | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index b8859e216d8a5..be6421210cedf 100644 --- a/Makefile +++ b/Makefile @@ -4,6 +4,9 @@ DOCKER_GRAPHDRIVER := $(if $(DOCKER_GRAPHDRIVER),$(DOCKER_GRAPHDRIVER),$(shell docker info 2>&1 | grep "Storage Driver" | sed 's/.*: //')) export DOCKER_GRAPHDRIVER +# enable/disable cross-compile +DOCKER_CROSS ?= false + # get OS/Arch of docker engine DOCKER_OSARCH := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKER_ENGINE_OSARCH}') DOCKERFILE := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKERFILE}') @@ -143,8 +146,6 @@ cross: build ## cross build the binaries for darwin, freebsd and\nwindows ifdef DOCKER_CROSSPLATFORMS build: DOCKER_CROSS := true -else -build: DOCKER_CROSS ?= false endif ifeq ($(BIND_DIR), .) build: DOCKER_BUILD_OPTS += --target=dev From bd11bc441e9a36535a1abd40aaf8ea2433fb0fa0 Mon Sep 17 00:00:00 2001 From: Brian Goff Date: Tue, 16 Apr 2019 15:40:53 -0700 Subject: [PATCH 07/27] Add support for setting GOARM in cross target. This adds to the existing format of `/` to allow for `/arm/v` Signed-off-by: Brian Goff (cherry picked from commit fbb001d1f98fef6cc1b38b58350c7cd0c2828461) Signed-off-by: Sebastiaan van Stijn --- hack/make/.binary | 1 + hack/make/cross | 10 ++++++++-- project/PACKAGERS.md | 21 +++++++++++++++++++++ 3 files changed, 30 insertions(+), 2 deletions(-) diff --git a/hack/make/.binary b/hack/make/.binary index 357b456e09109..53de6749e577e 100644 --- a/hack/make/.binary +++ b/hack/make/.binary @@ -80,6 +80,7 @@ case "$(go env GOOS)/$(go env GOARCH)" in esac echo "Building: $DEST/$BINARY_FULLNAME" +echo "GOOS=\"${GOOS}\" GOARCH=\"${GOARCH}\" GOARM=\"${GOARM}\"" go build \ -o "$DEST/$BINARY_FULLNAME" \ "${BUILDFLAGS[@]}" \ diff --git a/hack/make/cross b/hack/make/cross index 47cb667af24a5..ab9f0b7445230 100644 --- a/hack/make/cross +++ b/hack/make/cross @@ -18,8 +18,14 @@ for platform in ${DOCKER_CROSSPLATFORMS}; do ( export KEEPDEST=1 export DEST="${DEST}/${platform}" # bundles/VERSION/cross/GOOS/GOARCH/docker-VERSION - export GOOS=${platform%/*} - export GOARCH=${platform##*/} + export GOOS=${platform%%/*} + export GOARCH=${platform#*/} + + if [[ "${GOARCH}" = "arm/"* ]]; then + GOARM=${GOARCH##*/v} + GOARCH=${GOARCH%/v*} + export GOARM + fi echo "Cross building: ${DEST}" mkdir -p "${DEST}" diff --git a/project/PACKAGERS.md b/project/PACKAGERS.md index a5b0018b5a0cf..0fb6c20866444 100644 --- a/project/PACKAGERS.md +++ b/project/PACKAGERS.md @@ -233,6 +233,27 @@ following: This will create "./bundles/$VERSION/dynbinary-client/docker-$VERSION", which for client-only builds is the important file to grab and install as appropriate. +### Cross Compilation + +Limited cross compilation is supported due to requiring cgo for critical +functionality (such as seccomp support). + +To cross compile run `make cross`. You can specify the platforms to target by +setting the `DOCKER_CROSSPLATFORMS` environment variable to a list of platforms +in the format `/`. Specify multiple platforms by using a space +in between each desired platform. + +For setting arm variants, you can specify the `GOARM` value by append `/v` +to your `/arm`. Example: + +``` +make DOCKER_CROSSPLATFORMS=linux/arm/v7 cross +``` + +This will create a linux binary targeting arm 7. + +See `hack/make/.binary` for supported cross compliation platforms. + ## System Dependencies ### Runtime Dependencies From 899d9e2d40e5342c5c415e1eb8a09ccfdad3cca4 Mon Sep 17 00:00:00 2001 From: Brian Goff Date: Tue, 16 Apr 2019 16:31:49 -0700 Subject: [PATCH 08/27] Make CC toolchains available for other targets This cross-compiling other binaries simpler. It would be nice if the cross Makefile target built all the required bins, but at least this is a first step. Signed-off-by: Brian Goff (cherry picked from commit f067a0acaa98824b5efa3b6e2c370f6e81013a35) Signed-off-by: Sebastiaan van Stijn --- Dockerfile | 44 ++++++++++++++++++++++++++++++-------------- 1 file changed, 30 insertions(+), 14 deletions(-) diff --git a/Dockerfile b/Dockerfile index 7139dd167b40f..476b07ae583c9 100644 --- a/Dockerfile +++ b/Dockerfile @@ -95,16 +95,28 @@ RUN /download-frozen-image-v2.sh /build \ hello-world:latest@sha256:be0cd392e45be79ffeffa6b05338b98ebb16c87b255f48e297ec7f98e123905c # See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list) -# Just a little hack so we don't have to install these deps twice, once for runc and once for dockerd -FROM base AS runtime-dev-cross-false -RUN apt-get update && apt-get install -y \ - libapparmor-dev \ - libseccomp-dev +FROM base AS cross-false -FROM runtime-dev-cross-false AS runtime-dev-cross-true +FROM base AS cross-true RUN dpkg --add-architecture armhf RUN dpkg --add-architecture arm64 RUN dpkg --add-architecture armel +RUN if [ "$(go env GOHOSTARCH)" = "amd64" ]; then \ + apt-get update \ + && apt-get install -y --no-install-recommends \ + crossbuild-essential-armhf \ + crossbuild-essential-arm64 \ + crossbuild-essential-armel; \ + fi + +FROM cross-${CROSS} as dev-base + +FROM dev-base AS runtime-dev-cross-false +RUN apt-get update && apt-get install -y \ + libapparmor-dev \ + libseccomp-dev + +FROM cross-true AS runtime-dev-cross-true # These crossbuild packages rely on gcc-, but this doesn't want to install # on non-amd64 systems. # Additionally, the crossbuild-amd64 is currently only on debian:buster, so @@ -112,12 +124,16 @@ RUN dpkg --add-architecture armel RUN if [ "$(go env GOHOSTARCH)" = "amd64" ]; then \ apt-get update \ && apt-get install -y \ - crossbuild-essential-armhf \ - crossbuild-essential-arm64 \ - crossbuild-essential-armel \ libseccomp-dev:armhf \ libseccomp-dev:arm64 \ - libseccomp-dev:armel; \ + libseccomp-dev:armel \ + libapparmor-dev:armhf \ + libapparmor-dev:arm64 \ + libapparmor-dev:armel \ + # install this arches seccomp here due to compat issues with the v0 builder + # This is as opposed to inheriting from runtime-dev-cross-false + libapparmor-dev \ + libseccomp-dev; \ fi FROM runtime-dev-cross-${CROSS} AS runtime-dev @@ -134,14 +150,14 @@ COPY hack/dockerfile/install/install.sh ./install.sh COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./ RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME -FROM base AS containerd +FROM dev-base AS containerd RUN apt-get update && apt-get install -y btrfs-tools ENV INSTALL_BINARY_NAME=containerd COPY hack/dockerfile/install/install.sh ./install.sh COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./ RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME -FROM base AS proxy +FROM dev-base AS proxy ENV INSTALL_BINARY_NAME=proxy COPY hack/dockerfile/install/install.sh ./install.sh COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./ @@ -159,7 +175,7 @@ COPY hack/dockerfile/install/install.sh ./install.sh COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./ RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME -FROM base AS dockercli +FROM dev-base AS dockercli ENV INSTALL_BINARY_NAME=dockercli COPY hack/dockerfile/install/install.sh ./install.sh COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./ @@ -171,7 +187,7 @@ COPY hack/dockerfile/install/install.sh ./install.sh COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./ RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME -FROM base AS tini +FROM dev-base AS tini RUN apt-get update && apt-get install -y cmake vim-common COPY hack/dockerfile/install/install.sh ./install.sh ENV INSTALL_BINARY_NAME=tini From 8f4ddc9b099dc85a72d8be0ec7d3653b54532a47 Mon Sep 17 00:00:00 2001 From: Brian Goff Date: Mon, 3 Jun 2019 16:05:23 -0700 Subject: [PATCH 09/27] Set DOCKER_BINDDIR mount options from env Adds `DOCKER_BINDDIR_MOUNT_OPTS` to easily tweak the BINDDIR mount options... primarily adding so I can control the caching mode for osxfs because compiling takes > 1min for me with the default and < 30s with both `cached` and `delegated`. Signed-off-by: Brian Goff (cherry picked from commit b1e6536ceb8a4eeaecee6d9bcf2ef37d3a25d127) Signed-off-by: Sebastiaan van Stijn --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index be6421210cedf..76ed039fbebce 100644 --- a/Makefile +++ b/Makefile @@ -86,6 +86,7 @@ BIND_DIR := $(if $(BINDDIR),$(BINDDIR),$(if $(DOCKER_HOST),,bundles)) # DOCKER_MOUNT can be overriden, but use at your own risk! ifndef DOCKER_MOUNT DOCKER_MOUNT := $(if $(BIND_DIR),-v "$(CURDIR)/$(BIND_DIR):/go/src/github.com/docker/docker/$(BIND_DIR)") +DOCKER_MOUNT := $(if $(DOCKER_BINDDIR_MOUNT_OPTS),$(DOCKER_MOUNT):$(DOCKER_BINDDIR_MOUNT_OPTS),$(DOCKER_MOUNT)) # This allows the test suite to be able to run without worrying about the underlying fs used by the container running the daemon (e.g. aufs-on-aufs), so long as the host running the container is running a supported fs. # The volume will be cleaned up when the container is removed due to `--rm`. From caa11a411d83fea1b7d7d86da1db324ce4b206ee Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Fri, 12 Jul 2019 14:41:08 +0200 Subject: [PATCH 10/27] Fix "Removing bundles/" not actually removing bundles Before: Running `ls -la bundles/` before, and after removing: ls -la bundles/ total 16 drwxr-xr-x 7 root root 224 Jul 12 12:25 . drwxr-xr-x 1 root root 4096 Jul 12 12:30 .. drwxr-xr-x 2 root root 64 Jul 12 10:00 dynbinary drwxr-xr-x 6 root root 192 Jul 12 12:25 dynbinary-daemon lrwxrwxrwx 1 root root 1 Jul 12 12:25 latest -> . drwxr-xr-x 92 root root 2944 Jul 12 12:29 test-integration Removing bundles/ ls -la bundles/ total 16 drwxr-xr-x 7 root root 224 Jul 12 12:25 . drwxr-xr-x 1 root root 4096 Jul 12 12:30 .. drwxr-xr-x 2 root root 64 Jul 12 10:00 dynbinary drwxr-xr-x 6 root root 192 Jul 12 12:25 dynbinary-daemon lrwxrwxrwx 1 root root 1 Jul 12 12:25 latest -> . drwxr-xr-x 92 root root 2944 Jul 12 12:29 test-integration After: Running `ls -la bundles/` before, and after removing: ls -la bundles/ total 16 drwxr-xr-x 7 root root 224 Jul 12 12:25 . drwxr-xr-x 1 root root 4096 Jul 12 12:30 .. drwxr-xr-x 2 root root 64 Jul 12 10:00 dynbinary drwxr-xr-x 6 root root 192 Jul 12 12:25 dynbinary-daemon lrwxrwxrwx 1 root root 1 Jul 12 12:25 latest -> . drwxr-xr-x 92 root root 2944 Jul 12 12:29 test-integration Removing bundles/ ls -la bundles/ total 4 drwxr-xr-x 2 root root 64 Jul 12 12:25 . drwxr-xr-x 1 root root 4096 Jul 12 12:30 .. Signed-off-by: Sebastiaan van Stijn (cherry picked from commit f75f34249bd55b449dd4e0be08624fe0dcf50b63) Signed-off-by: Sebastiaan van Stijn --- hack/make.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hack/make.sh b/hack/make.sh index 58efc74ee5989..2c7ca612f52f0 100755 --- a/hack/make.sh +++ b/hack/make.sh @@ -174,7 +174,7 @@ bundle() { main() { if [ -z "${KEEPBUNDLE-}" ]; then echo "Removing bundles/" - rm -rf "bundles/*" + rm -rf bundles/* echo fi mkdir -p bundles From c8d69fa59e0ff43c034cbb0159e4f88fe9744944 Mon Sep 17 00:00:00 2001 From: Kir Kolyshkin Date: Thu, 18 Jul 2019 13:53:38 -0700 Subject: [PATCH 11/27] TESTING.md: document GO_VERSION Signed-off-by: Kir Kolyshkin (cherry picked from commit a55753877016eaa3ed577774c11c2428f95ac433) Signed-off-by: Sebastiaan van Stijn --- TESTING.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/TESTING.md b/TESTING.md index b2c53769ca052..15322f9eedf33 100644 --- a/TESTING.md +++ b/TESTING.md @@ -87,3 +87,10 @@ To run the integration test suite: ``` make test-integration ``` + +You can change a version of golang used for building stuff that is being tested +by setting `GO_VERSION` variable, for example: + +``` +make GO_VERSION=1.12.7 test +``` From c0fa7b664cf412fb943b9a2288c984dbd678bcf3 Mon Sep 17 00:00:00 2001 From: Andrew Hsu Date: Sat, 20 Jul 2019 00:30:16 +0000 Subject: [PATCH 12/27] added hack/ci/master as entry point for master codeline checks Signed-off-by: Andrew Hsu (cherry picked from commit aac6e62209b1b7fe22717ca9c4714c8fc74c29e8) Signed-off-by: Sebastiaan van Stijn --- hack/ci/master | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100755 hack/ci/master diff --git a/hack/ci/master b/hack/ci/master new file mode 100755 index 0000000000000..83182c69ec435 --- /dev/null +++ b/hack/ci/master @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +# Entrypoint for jenkins master CI build +set -eu -o pipefail + +hack/validate/default +hack/test/unit + +hack/make.sh \ + binary-daemon \ + dynbinary \ + test-docker-py \ + test-integration \ + cross From 2fe5b013fa1c71c840c9d11ae98e364cb44efc18 Mon Sep 17 00:00:00 2001 From: Brian Goff Date: Fri, 2 Aug 2019 13:32:36 -0700 Subject: [PATCH 13/27] Add support for setting a test filter This is basically taking some stuff that make a custom shell function for. This takes a test filter, builds the appropriate TESTFLAGS, and sets the integration API test dirs that match the given filter to avoid building all test dirs. Signed-off-by: Brian Goff (cherry picked from commit 13064b155eb439a79adcf8f160ecf2b76f805bd4) Signed-off-by: Sebastiaan van Stijn --- Makefile | 1 + TESTING.md | 23 +++++++++++++++++++++++ hack/make/.integration-test-helpers | 29 ++++++++++++++++++++++++++++- 3 files changed, 52 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 76ed039fbebce..7beee44bdbba2 100644 --- a/Makefile +++ b/Makefile @@ -62,6 +62,7 @@ DOCKER_ENVS := \ -e TESTFLAGS \ -e TESTFLAGS_INTEGRATION \ -e TESTFLAGS_INTEGRATION_CLI \ + -e TEST_FILTER \ -e TIMEOUT \ -e VALIDATE_REPO \ -e VALIDATE_BRANCH \ diff --git a/TESTING.md b/TESTING.md index 15322f9eedf33..ba5bcda289312 100644 --- a/TESTING.md +++ b/TESTING.md @@ -67,6 +67,8 @@ If a remote daemon is detected, the test will be skipped. ## Running tests +### Unit Tests + To run the unit test suite: ``` @@ -82,12 +84,33 @@ The following environment variables may be used to run a subset of tests: * `TESTFLAGS` - flags passed to `go test`, to run tests which match a pattern use `TESTFLAGS="-test.run TestNameOrPrefix"` +### Integration Tests + To run the integration test suite: ``` make test-integration ``` +This make target runs both the "integration" suite and the "integration-cli" +suite. + +You can specify which integration test dirs to build and run by specifying +the list of dirs in the TEST_INTEGRATION_DIR environment variable. + +You can also explicitly skip either suite by setting (any value) in +TEST_SKIP_INTEGRATION and/or TEST_SKIP_INTEGRATION_CLI environment variables. + +Flags specific to each suite can be set in the TESTFLAGS_INTEGRATION and +TESTFLAGS_INTEGRATION_CLI environment variables. + +If all you want is to specity a test filter to run, you can set the +`TEST_FILTER` environment variable. This ends up getting passed directly to `go +test -run` (or `go test -check-f`, dpenending on the test suite). It will also +automatically set the other above mentioned environment variables accordingly. + +### Go Version + You can change a version of golang used for building stuff that is being tested by setting `GO_VERSION` variable, for example: diff --git a/hack/make/.integration-test-helpers b/hack/make/.integration-test-helpers index abe69474ae673..484aa448c0525 100644 --- a/hack/make/.integration-test-helpers +++ b/hack/make/.integration-test-helpers @@ -27,7 +27,34 @@ source "$MAKEDIR/.go-autogen" : ${TESTFLAGS:=} : ${TESTDEBUG:=} -integration_api_dirs=${TEST_INTEGRATION_DIR:-"$(go list -test -f '{{- if ne .ForTest "" -}}{{- .Dir -}}{{- end -}}' ./integration/...)"} +setup_integration_test_filter() { + if [ -z "${TEST_FILTER}" ]; then + return + fi + + if [ -z "${TEST_SKIP_INTEGRATION}" ]; then + : ${TEST_INTEGRATION_DIR:=$(grep -rl "func\ .*${TEST_FILTER}.*\(t\ \*testing\.T\)" ./integration | grep '_test\.go' | xargs -I file dirname file | uniq)} + if [ -z "${TEST_INTEGRATION_DIR}" ]; then + echo Skipping integration tests since the supplied filter \"${TEST_FILTER}\" omits all integration tests + TEST_SKIP_INTEGRATION=1 + else + TESTFLAGS_INTEGRATION+="-test.run ${TEST_FILTER}" + fi + fi + + if [ -z "${TEST_SKIP_INTEGRATION_CLI}" ]; then + # ease up on the filtering here since CLI suites are namespaced by an object + if grep -r "${TEST_FILTER}.*\(c\ \*check\.C\)" ./integration-cli | grep -q '_test\.go$'; then + TEST_SKIP_INTEGRATION_CLI=1 + echo Skipping integration-cli tests since the supplied filter \"${TEST_FILTER}\" omits all integration-cli tests + else + TESTFLAGS_INTEGRATION_CLI+="-check.f ${TEST_FILTER}" + fi + fi +} + +setup_integration_test_filter +integration_api_dirs=${TEST_INTEGRATION_DIR:-$(go list -test -f '{{- if ne .ForTest "" -}}{{- .Dir -}}{{- end -}}' ./integration/...)} run_test_integration() { set_platform_timeout From be340bc00bd6cc6c082ee5739bbb0e5b52d706a2 Mon Sep 17 00:00:00 2001 From: Brian Goff Date: Tue, 6 Aug 2019 19:28:07 -0700 Subject: [PATCH 14/27] Improve integration test detecetor The "new test" detector in test-integration-flaky was a bit flaky since it would detect function signatures that are not new tests. In addition, the test calls `return` outside of a function which is not allowed. Signed-off-by: Brian Goff (cherry picked from commit e2b24490e45fb1e024c0b1594bf978573b91271c) Signed-off-by: Sebastiaan van Stijn --- hack/make/test-integration-flaky | 50 ++++++++++++++++++-------------- 1 file changed, 28 insertions(+), 22 deletions(-) diff --git a/hack/make/test-integration-flaky b/hack/make/test-integration-flaky index a613d6ccbc168..5c28b944231c0 100644 --- a/hack/make/test-integration-flaky +++ b/hack/make/test-integration-flaky @@ -2,28 +2,34 @@ set -e -o pipefail source hack/validate/.validate -new_tests=$( - validate_diff --diff-filter=ACMR --unified=0 -- 'integration/*_test.go' | - grep -E '^(\+func )(.*)(\*testing)' || true -) -if [ -z "$new_tests" ]; then - echo 'No new tests added to integration.' - return -fi -echo -echo "Found new integrations tests:" -echo "$new_tests" -echo "Running stress test for them." +run_integration_flaky() { + new_tests=$( + validate_diff --diff-filter=ACMR --unified=0 -- 'integration/*_test.go' | + grep -E '^(\+func Test)(.*)(\*testing\.T\))' || true + ) -( - TESTARRAY=$(echo "$new_tests" | sed 's/+func //' | awk -F'\\(' '{print $1}' | tr '\n' '|') - # Note: TEST_REPEAT will make the test suite run 5 times, restarting the daemon - # and each test will run 5 times in a row under the same daemon. - # This will make a total of 25 runs for each test in TESTARRAY. - export TEST_REPEAT=5 - export TESTFLAGS="-test.count ${TEST_REPEAT} -test.run ${TESTARRAY%?}" - echo "Using test flags: $TESTFLAGS" - source hack/make/test-integration -) + if [ -z "$new_tests" ]; then + echo 'No new tests added to integration.' + return + fi + + echo + echo "Found new integrations tests:" + echo "$new_tests" + echo "Running stress test for them." + + ( + TESTARRAY=$(echo "$new_tests" | sed 's/+func //' | awk -F'\\(' '{print $1}' | tr '\n' '|') + # Note: TEST_REPEAT will make the test suite run 5 times, restarting the daemon + # and each test will run 5 times in a row under the same daemon. + # This will make a total of 25 runs for each test in TESTARRAY. + export TEST_REPEAT=5 + export TESTFLAGS="-test.count ${TEST_REPEAT} -test.run ${TESTARRAY%?}" + echo "Using test flags: $TESTFLAGS" + source hack/make/test-integration + ) +} + +run_integration_flaky From b8ea2de52e04feecacb027b7146b97c6c2923fa5 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Thu, 8 Aug 2019 13:18:50 +0200 Subject: [PATCH 15/27] make.ps1: Run-IntegrationTests(): set working directory for test suite This function changed to the correct working directory before starting the tests (which is the same as on Linux), however the `ProcessStartInfo` process does not inherit this working directory, which caused Windows tests to be running with a different working directory as Linux (causing files used in tests to not be found). From the documentation; https://docs.microsoft.com/en-us/dotnet/api/system.diagnostics.processstartinfo.workingdirectory?view=netframework-4.8 > When `UseShellExecute` is `true`, the fully qualified name of the directory that contains > the process to be started. When the `UseShellExecute` property is `false`, the working > directory for the process to be started. The default is an empty string (`""`). This patch sets the `ProcessStartInfo.WorkingDirectory` to the correct working directory before starting the process. Signed-off-by: Sebastiaan van Stijn (cherry picked from commit 6ae46aeabf056180067dd6af8d5d8588d6075c31) Signed-off-by: Sebastiaan van Stijn --- hack/make.ps1 | 1 + 1 file changed, 1 insertion(+) diff --git a/hack/make.ps1 b/hack/make.ps1 index 0385d4c2a8c2d..63fdbb32ad0b1 100644 --- a/hack/make.ps1 +++ b/hack/make.ps1 @@ -340,6 +340,7 @@ Function Run-IntegrationTests() { Write-Host "Running $($PWD.Path)" $pinfo = New-Object System.Diagnostics.ProcessStartInfo $pinfo.FileName = "$($PWD.Path)\test.exe" + $pinfo.WorkingDirectory = "$($PWD.Path)" $pinfo.RedirectStandardError = $true $pinfo.UseShellExecute = $false $pinfo.Arguments = $env:INTEGRATION_TESTFLAGS From 6f7072dda4aee7de50a08385583f3f8fd99a043f Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Tue, 30 Jul 2019 16:49:57 -0700 Subject: [PATCH 16/27] Dockerfile: use --no-install-recommends for all stages Signed-off-by: Sebastiaan van Stijn (cherry picked from commit b0835dd0889b00e231edf78b25fc929a6b148b05) Signed-off-by: Sebastiaan van Stijn --- Dockerfile | 41 +++++++++++++++++++++++++---------------- 1 file changed, 25 insertions(+), 16 deletions(-) diff --git a/Dockerfile b/Dockerfile index 476b07ae583c9..f707ecfe2e7c2 100644 --- a/Dockerfile +++ b/Dockerfile @@ -36,7 +36,7 @@ FROM base AS criu # Install CRIU for checkpoint/restore support ENV CRIU_VERSION 3.6 # Install dependency packages specific to criu -RUN apt-get update && apt-get install -y \ +RUN apt-get update && apt-get install -y --no-install-recommends \ libnet-dev \ libprotobuf-c0-dev \ libprotobuf-dev \ @@ -45,7 +45,8 @@ RUN apt-get update && apt-get install -y \ protobuf-compiler \ protobuf-c-compiler \ python-protobuf \ - && mkdir -p /usr/src/criu \ + && rm -rf /var/lib/apt/lists/* +RUN mkdir -p /usr/src/criu \ && curl -sSL https://github.com/checkpoint-restore/criu/archive/v${CRIU_VERSION}.tar.gz | tar -C /usr/src/criu/ -xz --strip-components=1 \ && cd /usr/src/criu \ && make \ @@ -84,7 +85,10 @@ RUN set -x \ && rm -rf "$GOPATH" FROM base AS frozen-images -RUN apt-get update && apt-get install -y jq ca-certificates --no-install-recommends +RUN apt-get update && apt-get install -y --no-install-recommends \ + ca-certificates \ + jq \ + && rm -rf /var/lib/apt/lists/* # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh / RUN /download-frozen-image-v2.sh /build \ @@ -102,28 +106,27 @@ RUN dpkg --add-architecture armhf RUN dpkg --add-architecture arm64 RUN dpkg --add-architecture armel RUN if [ "$(go env GOHOSTARCH)" = "amd64" ]; then \ - apt-get update \ - && apt-get install -y --no-install-recommends \ + apt-get update && apt-get install -y --no-install-recommends \ crossbuild-essential-armhf \ crossbuild-essential-arm64 \ - crossbuild-essential-armel; \ + crossbuild-essential-armel \ + && rm -rf /var/lib/apt/lists/*; \ fi FROM cross-${CROSS} as dev-base FROM dev-base AS runtime-dev-cross-false -RUN apt-get update && apt-get install -y \ +RUN apt-get update && apt-get install -y --no-install-recommends \ libapparmor-dev \ - libseccomp-dev - + libseccomp-dev \ + && rm -rf /var/lib/apt/lists/* FROM cross-true AS runtime-dev-cross-true # These crossbuild packages rely on gcc-, but this doesn't want to install # on non-amd64 systems. # Additionally, the crossbuild-amd64 is currently only on debian:buster, so # other architectures cannnot crossbuild amd64. RUN if [ "$(go env GOHOSTARCH)" = "amd64" ]; then \ - apt-get update \ - && apt-get install -y \ + apt-get update && apt-get install -y --no-install-recommends \ libseccomp-dev:armhf \ libseccomp-dev:arm64 \ libseccomp-dev:armel \ @@ -133,7 +136,8 @@ RUN if [ "$(go env GOHOSTARCH)" = "amd64" ]; then \ # install this arches seccomp here due to compat issues with the v0 builder # This is as opposed to inheriting from runtime-dev-cross-false libapparmor-dev \ - libseccomp-dev; \ + libseccomp-dev \ + && rm -rf /var/lib/apt/lists/*; \ fi FROM runtime-dev-cross-${CROSS} AS runtime-dev @@ -151,7 +155,9 @@ COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./ RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME FROM dev-base AS containerd -RUN apt-get update && apt-get install -y btrfs-tools +RUN apt-get update && apt-get install -y --no-install-recommends \ + btrfs-tools \ + && rm -rf /var/lib/apt/lists/* ENV INSTALL_BINARY_NAME=containerd COPY hack/dockerfile/install/install.sh ./install.sh COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./ @@ -188,7 +194,10 @@ COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./ RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME FROM dev-base AS tini -RUN apt-get update && apt-get install -y cmake vim-common +RUN apt-get update && apt-get install -y --no-install-recommends \ + cmake \ + vim-common \ + && rm -rf /var/lib/apt/lists/* COPY hack/dockerfile/install/install.sh ./install.sh ENV INSTALL_BINARY_NAME=tini COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./ @@ -208,7 +217,7 @@ RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker RUN ldconfig # This should only install packages that are specifically needed for the dev environment and nothing else # Do you really need to add another package here? Can it be done in a different build stage? -RUN apt-get update && apt-get install -y \ +RUN apt-get update && apt-get install -y --no-install-recommends \ apparmor \ aufs-tools \ bash-completion \ @@ -232,7 +241,7 @@ RUN apt-get update && apt-get install -y \ zip \ bzip2 \ xz-utils \ - --no-install-recommends + && rm -rf /var/lib/apt/lists/* RUN pip3 install yamllint==1.16.0 From fdac9f877fe54b8018c44051db2885d66f93e8d5 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Sun, 11 Aug 2019 17:08:33 +0200 Subject: [PATCH 17/27] Dockerfile: use DEBIAN_FRONTEND=noninteractive Using a build-arg so that we don't have to specify it for each `apt-get install`, and to preserve that the `DEBIAN_FRONTEND` is preserved in the image itself (which changes the default behavior, and can be surprising if the image is run interactively).` With this patch, some (harmless, but possibly confusing) errors are no longer printed during build, for example: ```patch Unpacking libgcc1:armhf (1:6.3.0-18+deb9u1) ... Selecting previously unselected package libc6:armhf. Preparing to unpack .../04-libc6_2.24-11+deb9u4_armhf.deb ... -debconf: unable to initialize frontend: Dialog -debconf: (TERM is not set, so the dialog frontend is not usable.) -debconf: falling back to frontend: Readline Unpacking libc6:armhf (2.24-11+deb9u4) ... Selecting previously unselected package libgcc1:arm64. Preparing to unpack .../05-libgcc1_1%3a6.3.0-18+deb9u1_arm64.deb ... Unpacking libgcc1:arm64 (1:6.3.0-18+deb9u1) ... Selecting previously unselected package libc6:arm64. Preparing to unpack .../06-libc6_2.24-11+deb9u4_arm64.deb ... -debconf: unable to initialize frontend: Dialog -debconf: (TERM is not set, so the dialog frontend is not usable.) -debconf: falling back to frontend: Readline ``` Looks like some output is now also printed on stdout instead of stderr Signed-off-by: Sebastiaan van Stijn (cherry picked from commit 2ff9ac4de5fbd4c6afc215373362ea65f4a44fbc) Signed-off-by: Sebastiaan van Stijn --- Dockerfile | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/Dockerfile b/Dockerfile index f707ecfe2e7c2..67277a6faff29 100644 --- a/Dockerfile +++ b/Dockerfile @@ -26,6 +26,7 @@ ARG CROSS="false" ARG GO_VERSION=1.11.13 +ARG DEBIAN_FRONTEND=noninteractive FROM golang:${GO_VERSION}-stretch AS base ARG APT_MIRROR @@ -33,6 +34,7 @@ RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/ && sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list FROM base AS criu +ARG DEBIAN_FRONTEND # Install CRIU for checkpoint/restore support ENV CRIU_VERSION 3.6 # Install dependency packages specific to criu @@ -85,6 +87,7 @@ RUN set -x \ && rm -rf "$GOPATH" FROM base AS frozen-images +ARG DEBIAN_FRONTEND RUN apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ jq \ @@ -102,6 +105,7 @@ RUN /download-frozen-image-v2.sh /build \ FROM base AS cross-false FROM base AS cross-true +ARG DEBIAN_FRONTEND RUN dpkg --add-architecture armhf RUN dpkg --add-architecture arm64 RUN dpkg --add-architecture armel @@ -116,11 +120,13 @@ RUN if [ "$(go env GOHOSTARCH)" = "amd64" ]; then \ FROM cross-${CROSS} as dev-base FROM dev-base AS runtime-dev-cross-false +ARG DEBIAN_FRONTEND RUN apt-get update && apt-get install -y --no-install-recommends \ libapparmor-dev \ libseccomp-dev \ && rm -rf /var/lib/apt/lists/* FROM cross-true AS runtime-dev-cross-true +ARG DEBIAN_FRONTEND # These crossbuild packages rely on gcc-, but this doesn't want to install # on non-amd64 systems. # Additionally, the crossbuild-amd64 is currently only on debian:buster, so @@ -155,6 +161,7 @@ COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./ RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME FROM dev-base AS containerd +ARG DEBIAN_FRONTEND RUN apt-get update && apt-get install -y --no-install-recommends \ btrfs-tools \ && rm -rf /var/lib/apt/lists/* @@ -194,6 +201,7 @@ COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./ RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME FROM dev-base AS tini +ARG DEBIAN_FRONTEND RUN apt-get update && apt-get install -y --no-install-recommends \ cmake \ vim-common \ @@ -207,6 +215,7 @@ RUN PREFIX=/build ./install.sh $INSTALL_BINARY_NAME # TODO: Some of this is only really needed for testing, it would be nice to split this up FROM runtime-dev AS dev +ARG DEBIAN_FRONTEND RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser # Let us use a .bashrc file From 00793f785cf476185c4bb228dc39f96593970faf Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Sun, 11 Aug 2019 17:26:26 +0200 Subject: [PATCH 18/27] Jenkinsfile: run DCO check before everything else This will run the DCO check in a lightweight alpine container, before running other stages, and before building the development image/container (which can take a long time). A Jenkins parameter was added to optionally skip the DCO check (skip_dco) Signed-off-by: Sebastiaan van Stijn (cherry picked from commit d6f7909c7639105c93e0670999e6f0536e9f6fff) Signed-off-by: Sebastiaan van Stijn --- Jenkinsfile | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/Jenkinsfile b/Jenkinsfile index eb52655a3c7d8..95b1a240ef280 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -14,6 +14,7 @@ pipeline { booleanParam(name: 'powerpc', defaultValue: true, description: 'PowerPC (ppc64le) Build/Test') booleanParam(name: 'windowsRS1', defaultValue: false, description: 'Windows 2016 (RS1) Build/Test') booleanParam(name: 'windowsRS5', defaultValue: false, description: 'Windows 2019 (RS5) Build/Test') + booleanParam(name: 'skip_dco', defaultValue: false, description: 'Skip the DCO check') } environment { DOCKER_BUILDKIT = '1' @@ -24,6 +25,20 @@ pipeline { TIMEOUT = '120m' } stages { + stage('DCO-check') { + when { + beforeAgent true + expression { !params.skip_dco } + } + agent { label 'linux' } + steps { + sh ''' + docker run --rm \ + -v "$WORKSPACE:/workspace" \ + alpine sh -c 'apk add --no-cache -q git bash && cd /workspace && hack/validate/dco' + ''' + } + } stage('Build') { parallel { stage('unit-validate') { From 45456b88c2650355f3fe8ddd142ee9817e7fc8b1 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Sun, 11 Aug 2019 00:52:01 +0200 Subject: [PATCH 19/27] Dockerfile: add python3-wheel back again (for yamllint) Although the Dockerfile builds without it, adding wheel back should save some time ``` 00:45:28 #14 10.70 Building wheels for collected packages: pathspec, pyyaml 00:45:28 #14 10.70 Running setup.py bdist_wheel for pathspec: started 00:45:28 #14 10.88 Running setup.py bdist_wheel for pathspec: finished with status 'error' 00:45:28 #14 10.88 Complete output from command /usr/bin/python3 -u -c "import setuptools, tokenize;__file__='/tmp/pip-build-mbotnxes/pathspec/setup.py';f=getattr(tokenize, 'open', open)(__file__);code=f.read().replace('\r\n', '\n');f.close();exec(compile(code, __file__, 'exec'))" bdist_wheel -d /tmp/tmpg9pl4u6kpip-wheel- --python-tag cp35: 00:45:28 #14 10.88 usage: -c [global_opts] cmd1 [cmd1_opts] [cmd2 [cmd2_opts] ...] 00:45:28 #14 10.88 or: -c --help [cmd1 cmd2 ...] 00:45:28 #14 10.88 or: -c --help-commands 00:45:28 #14 10.88 or: -c cmd --help 00:45:28 #14 10.88 00:45:28 #14 10.88 error: invalid command 'bdist_wheel' 00:45:28 #14 10.88 00:45:28 #14 10.88 ---------------------------------------- 00:45:28 #14 10.88 Failed building wheel for pathspec 00:45:28 #14 10.88 Running setup.py clean for pathspec 00:45:28 #14 11.05 Running setup.py bdist_wheel for pyyaml: started 00:45:28 #14 11.25 Running setup.py bdist_wheel for pyyaml: finished with status 'error' 00:45:28 #14 11.25 Complete output from command /usr/bin/python3 -u -c "import setuptools, tokenize;__file__='/tmp/pip-build-mbotnxes/pyyaml/setup.py';f=getattr(tokenize, 'open', open)(__file__);code=f.read().replace('\r\n', '\n');f.close();exec(compile(code, __file__, 'exec'))" bdist_wheel -d /tmp/tmpyci_xi0bpip-wheel- --python-tag cp35: 00:45:28 #14 11.25 usage: -c [global_opts] cmd1 [cmd1_opts] [cmd2 [cmd2_opts] ...] 00:45:28 #14 11.25 or: -c --help [cmd1 cmd2 ...] 00:45:28 #14 11.25 or: -c --help-commands 00:45:28 #14 11.25 or: -c cmd --help 00:45:28 #14 11.25 00:45:28 #14 11.25 error: invalid command 'bdist_wheel' 00:45:28 #14 11.25 00:45:28 #14 11.25 ---------------------------------------- 00:45:28 #14 11.25 Failed building wheel for pyyaml 00:45:28 #14 11.25 Running setup.py clean for pyyaml 00:45:28 #14 11.44 Failed to build pathspec pyyaml 00:45:28 #14 11.45 Installing collected packages: pathspec, pyyaml, yamllint 00:45:28 #14 11.45 Running setup.py install for pathspec: started 00:45:29 #14 11.73 Running setup.py install for pathspec: finished with status 'done' 00:45:29 #14 11.73 Running setup.py install for pyyaml: started 00:45:29 #14 12.05 Running setup.py install for pyyaml: finished with status 'done' 00:45:29 #14 12.12 Successfully installed pathspec-0.5.9 pyyaml-5.1.2 yamllint-1.16.0 ``` Signed-off-by: Sebastiaan van Stijn (cherry picked from commit ad70bf6866ac40753d0f629269169b4be74e2ad0) Signed-off-by: Sebastiaan van Stijn --- Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/Dockerfile b/Dockerfile index 67277a6faff29..c30aa62e7af91 100644 --- a/Dockerfile +++ b/Dockerfile @@ -243,6 +243,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ pigz \ python3-pip \ python3-setuptools \ + python3-wheel \ thin-provisioning-tools \ vim \ vim-common \ From cae19999e4c9dab30c0b6b917bd866a6461a913c Mon Sep 17 00:00:00 2001 From: Stefan Scherer Date: Wed, 14 Aug 2019 14:26:34 -0700 Subject: [PATCH 20/27] Use new windows labels Signed-off-by: Stefan Scherer (cherry picked from commit ca3e230b7749d02dd6019392eeffcd0e0d5d2c16) Signed-off-by: Sebastiaan van Stijn --- Jenkinsfile | 39 ++++++++++++---- hack/ci/windows.ps1 | 61 ++++++++++++++++---------- integration-cli/docker_cli_run_test.go | 2 +- 3 files changed, 70 insertions(+), 32 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 95b1a240ef280..ff0960a0bc9f4 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -12,8 +12,8 @@ pipeline { booleanParam(name: 'janky', defaultValue: true, description: 'x86 Build/Test') booleanParam(name: 'z', defaultValue: true, description: 'IBM Z (s390x) Build/Test') booleanParam(name: 'powerpc', defaultValue: true, description: 'PowerPC (ppc64le) Build/Test') - booleanParam(name: 'windowsRS1', defaultValue: false, description: 'Windows 2016 (RS1) Build/Test') - booleanParam(name: 'windowsRS5', defaultValue: false, description: 'Windows 2019 (RS5) Build/Test') + booleanParam(name: 'windowsRS1', defaultValue: true, description: 'Windows 2016 (RS1) Build/Test') + booleanParam(name: 'windowsRS5', defaultValue: true, description: 'Windows 2019 (RS5) Build/Test') booleanParam(name: 'skip_dco', defaultValue: false, description: 'Skip the DCO check') } environment { @@ -668,10 +668,20 @@ pipeline { beforeAgent true expression { params.windowsRS1 } } + environment { + DOCKER_BUILDKIT = '0' + SKIP_VALIDATION_TESTS = '1' + SOURCES_DRIVE = 'd' + SOURCES_SUBDIR = 'gopath' + TESTRUN_DRIVE = 'd' + TESTRUN_SUBDIR = "CI-$BUILD_NUMBER" + WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore' + WINDOWS_BASE_IMAGE_TAG = 'ltsc2016' + } agent { node { - label 'windows-rs1' - customWorkspace 'c:\\gopath\\src\\github.com\\docker\\docker' + customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker' + label 'windows-2016' } } stages { @@ -685,7 +695,9 @@ pipeline { steps { powershell ''' $ErrorActionPreference = 'Stop' - .\\hack\\ci\\windows.ps1 + [Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 + Invoke-WebRequest https://github.com/jhowardmsft/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe + ./hack/ci/windows.ps1 exit $LastExitCode ''' } @@ -697,10 +709,20 @@ pipeline { beforeAgent true expression { params.windowsRS5 } } + environment { + DOCKER_BUILDKIT = '0' + SKIP_VALIDATION_TESTS = '1' + SOURCES_DRIVE = 'd' + SOURCES_SUBDIR = 'gopath' + TESTRUN_DRIVE = 'd' + TESTRUN_SUBDIR = "CI-$BUILD_NUMBER" + WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore' + WINDOWS_BASE_IMAGE_TAG = 'ltsc2019' + } agent { node { - label 'windows-rs5' - customWorkspace 'c:\\gopath\\src\\github.com\\docker\\docker' + customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker' + label 'windows-2019' } } stages { @@ -714,7 +736,8 @@ pipeline { steps { powershell ''' $ErrorActionPreference = 'Stop' - .\\hack\\ci\\windows.ps1 + Invoke-WebRequest https://github.com/jhowardmsft/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe + ./hack/ci/windows.ps1 exit $LastExitCode ''' } diff --git a/hack/ci/windows.ps1 b/hack/ci/windows.ps1 index 8828f73d01f63..dc2cd5246eed7 100644 --- a/hack/ci/windows.ps1 +++ b/hack/ci/windows.ps1 @@ -78,6 +78,9 @@ if ($env:BUILD_TAG -match "-WoW") { $env:LCOW_MODE="" } # docker integration tests are also coded to use the same # environment variable, and if no set, defaults to microsoft/windowsservercore # +# WINDOWS_BASE_IMAGE_TAG if defined, uses that as the tag name for the base image. +# if no set, defaults to latest +# # LCOW_BASIC_MODE if defined, does very basic LCOW verification. Ultimately we # want to run the entire CI suite from docker, but that's a way off. # @@ -139,7 +142,7 @@ Function Nuke-Everything { } $allImages = $(docker images --format "{{.Repository}}#{{.ID}}") - $toRemove = ($allImages | Select-String -NotMatch "windowsservercore","nanoserver","docker") + $toRemove = ($allImages | Select-String -NotMatch "servercore","nanoserver","docker") $imageCount = ($toRemove | Measure-Object -line).Lines if ($imageCount -gt 0) { @@ -261,6 +264,18 @@ Try { # Make sure docker-ci-zap is installed if ($null -eq (Get-Command "docker-ci-zap" -ErrorAction SilentlyContinue)) { Throw "ERROR: docker-ci-zap is not installed or not found on path" } + # Make sure Windows Defender is disabled + $defender = $false + Try { + $status = Get-MpComputerStatus + if ($status) { + if ($status.RealTimeProtectionEnabled) { + $defender = $true + } + } + } Catch {} + if ($defender) { Throw "ERROR: Windows Defender real time protection must be disabled for integration tests" } + # Make sure SOURCES_DRIVE is set if ($null -eq $env:SOURCES_DRIVE) { Throw "ERROR: Environment variable SOURCES_DRIVE is not set" } @@ -345,14 +360,16 @@ Try { Write-Host -ForegroundColor Green "INFO: docker load of"$ControlDaemonBaseImage" completed successfully" } else { # We need to docker pull it instead. It will come in directly as microsoft/imagename:latest - Write-Host -ForegroundColor Green $("INFO: Pulling microsoft/"+$ControlDaemonBaseImage+":latest from docker hub. This may take some time...") + Write-Host -ForegroundColor Green $("INFO: Pulling $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG from docker hub. This may take some time...") $ErrorActionPreference = "SilentlyContinue" - docker pull $("microsoft/"+$ControlDaemonBaseImage) + docker pull "$($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG" $ErrorActionPreference = "Stop" if (-not $LastExitCode -eq 0) { - Throw $("ERROR: Failed to docker pull microsoft/"+$ControlDaemonBaseImage+":latest.") + Throw $("ERROR: Failed to docker pull $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG.") } - Write-Host -ForegroundColor Green $("INFO: docker pull of microsoft/"+$ControlDaemonBaseImage+":latest completed successfully") + Write-Host -ForegroundColor Green $("INFO: docker pull of $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG completed successfully") + Write-Host -ForegroundColor Green $("INFO: Tagging $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG as microsoft/$ControlDaemonBaseImage") + docker tag "$($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG" microsoft/$ControlDaemonBaseImage } } else { Write-Host -ForegroundColor Green "INFO: Image"$("microsoft/"+$ControlDaemonBaseImage+":latest")"is already loaded in the control daemon" @@ -663,17 +680,20 @@ Try { if ($null -eq $env:WINDOWS_BASE_IMAGE) { $env:WINDOWS_BASE_IMAGE="microsoft/windowsservercore" } + if ($null -eq $env:WINDOWS_BASE_IMAGE_TAG) { + $env:WINDOWS_BASE_IMAGE_TAG="latest" + } # Lowercase and make sure it has a microsoft/ prefix $env:WINDOWS_BASE_IMAGE = $env:WINDOWS_BASE_IMAGE.ToLower() - if ($($env:WINDOWS_BASE_IMAGE -Split "/")[0] -ne "microsoft") { - Throw "ERROR: WINDOWS_BASE_IMAGE should start microsoft/" + if (! $($env:WINDOWS_BASE_IMAGE -Split "/")[0] -match "microsoft") { + Throw "ERROR: WINDOWS_BASE_IMAGE should start microsoft/ or mcr.microsoft.com/" } Write-Host -ForegroundColor Green "INFO: Base image for tests is $env:WINDOWS_BASE_IMAGE" $ErrorActionPreference = "SilentlyContinue" - if ($((& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" images --format "{{.Repository}}:{{.Tag}}" | Select-String $($env:WINDOWS_BASE_IMAGE+":latest") | Measure-Object -Line).Lines) -eq 0) { + if ($((& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" images --format "{{.Repository}}:{{.Tag}}" | Select-String "$($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG" | Measure-Object -Line).Lines) -eq 0) { # Try the internal azure CI image version or Microsoft internal corpnet where the base image is already pre-prepared on the disk, # either through Invoke-DockerCI or, in the case of Azure CI servers, baked into the VHD at the same location. if (Test-Path $("c:\baseimages\"+$($env:WINDOWS_BASE_IMAGE -Split "/")[1]+".tar")) { @@ -686,18 +706,20 @@ Try { } Write-Host -ForegroundColor Green "INFO: docker load of"$($env:WINDOWS_BASE_IMAGE -Split "/")[1]" into daemon under test completed successfully" } else { - # We need to docker pull it instead. It will come in directly as microsoft/imagename:latest - Write-Host -ForegroundColor Green $("INFO: Pulling "+$env:WINDOWS_BASE_IMAGE+":latest from docker hub into daemon under test. This may take some time...") + # We need to docker pull it instead. It will come in directly as microsoft/imagename:tagname + Write-Host -ForegroundColor Green $("INFO: Pulling "+$env:WINDOWS_BASE_IMAGE+":"+$env:WINDOWS_BASE_IMAGE_TAG+" from docker hub into daemon under test. This may take some time...") $ErrorActionPreference = "SilentlyContinue" - & "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" pull $($env:WINDOWS_BASE_IMAGE) + & "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" pull "$($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG" $ErrorActionPreference = "Stop" if (-not $LastExitCode -eq 0) { - Throw $("ERROR: Failed to docker pull "+$env:WINDOWS_BASE_IMAGE+":latest into daemon under test.") + Throw $("ERROR: Failed to docker pull $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG into daemon under test.") } - Write-Host -ForegroundColor Green $("INFO: docker pull of "+$env:WINDOWS_BASE_IMAGE+":latest into daemon under test completed successfully") + Write-Host -ForegroundColor Green $("INFO: docker pull of $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG into daemon under test completed successfully") + Write-Host -ForegroundColor Green $("INFO: Tagging $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG as microsoft/$ControlDaemonBaseImage in daemon under test") + & "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" tag "$($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG" microsoft/$ControlDaemonBaseImage } } else { - Write-Host -ForegroundColor Green "INFO: Image"$($env:WINDOWS_BASE_IMAGE+":latest")"is already loaded in the daemon under test" + Write-Host -ForegroundColor Green "INFO: Image $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG is already loaded in the daemon under test" } @@ -705,7 +727,7 @@ Try { $ErrorActionPreference = "SilentlyContinue" $dutimgVersion = $(&"$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" inspect $($env:WINDOWS_BASE_IMAGE) --format "{{.OsVersion}}") $ErrorActionPreference = "Stop" - Write-Host -ForegroundColor Green $("INFO: Version of "+$env:WINDOWS_BASE_IMAGE+":latest is '"+$dutimgVersion+"'") + Write-Host -ForegroundColor Green $("INFO: Version of $($env:WINDOWS_BASE_IMAGE):$env:WINDOWS_BASE_IMAGE_TAG is '"+$dutimgVersion+"'") } # Run the validation tests unless SKIP_VALIDATION_TESTS is defined. @@ -752,14 +774,7 @@ Try { #if ($bbCount -eq 0) { Write-Host -ForegroundColor Green "INFO: Building busybox" $ErrorActionPreference = "SilentlyContinue" - - # This is a temporary hack for nanoserver - if ($env:WINDOWS_BASE_IMAGE -ne "microsoft/windowsservercore") { - Write-Host -ForegroundColor Red "HACK HACK HACK - Building 64-bit nanoserver busybox image" - $(& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" build -t busybox https://raw.githubusercontent.com/jhowardmsft/busybox64/v1.1/Dockerfile | Out-Host) - } else { - $(& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" build -t busybox https://raw.githubusercontent.com/jhowardmsft/busybox/v1.1/Dockerfile | Out-Host) - } + $(& "$env:TEMP\binary\docker-$COMMITHASH" "-H=$($DASHH_CUT)" build -t busybox https://raw.githubusercontent.com/jhowardmsft/busybox/v1.1/Dockerfile | Out-Host) $ErrorActionPreference = "Stop" if (-not($LastExitCode -eq 0)) { Throw "ERROR: Failed to build busybox image" diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index 02b4ff9ac0bf9..2ebe70a3275c0 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -1706,7 +1706,7 @@ func (s *DockerSuite) TestRunCleanupCmdOnEntrypoint(c *check.C) { out = strings.TrimSpace(out) expected := "root" if testEnv.OSType == "windows" { - if strings.Contains(testEnv.PlatformDefaults.BaseImage, "windowsservercore") { + if strings.Contains(testEnv.PlatformDefaults.BaseImage, "servercore") { expected = `user manager\containeradministrator` } else { expected = `ContainerAdministrator` // nanoserver From 2bc1ed3a3a96ad7c66e4041980bca08ec8f56bd1 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Sun, 25 Aug 2019 15:57:22 +0200 Subject: [PATCH 21/27] hack/make.sh remove "latest" symlink This symlink was added in d42753485b71f5f26b682a187d1963ef138cd0ab, to allow finding the path to the latest built binary, because at the time, those paths were prefixed with the version or commit (e.g. `bundles/1.5.0-dev`). Commit bac2447964c8cdfcf35f928841d60310db997c76 removed the version-prefix in paths, but kept the old symlink for backward compatiblity. However, many things were moved since then (e.g. paths were renamed to `binary-daemon`, and various other changes). With the symlink pointing to the symlink's parent directory, following the symlink may result into an infinite recursion, which can happen if scripts using wildcards / globbing to find files. With this symlink no longer serving a real purpose, we can probably safely remove this symlink now. Signed-off-by: Sebastiaan van Stijn (cherry picked from commit dde1fd78c7c9a142a4099917198e69d88d34d088) Signed-off-by: Sebastiaan van Stijn --- hack/make.sh | 7 ------- 1 file changed, 7 deletions(-) diff --git a/hack/make.sh b/hack/make.sh index 2c7ca612f52f0..29e39122351b0 100755 --- a/hack/make.sh +++ b/hack/make.sh @@ -179,13 +179,6 @@ main() { fi mkdir -p bundles - # Windows and symlinks don't get along well - if [ "$(go env GOHOSTOS)" != 'windows' ]; then - rm -f bundles/latest - # preserve latest symlink for backward compatibility - ln -sf . bundles/latest - fi - if [ $# -lt 1 ]; then bundles=(${DEFAULT_BUNDLES[@]}) else From 8c28fd6b69b3326680f4b26113d49782abd41327 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Wed, 17 Jul 2019 14:35:51 +0200 Subject: [PATCH 22/27] Replace libprotobuf-c0-dev with libprotobuf-c-dev The `libprotobuf-c0-dev` virtual package is no longer available in Debian Buster, but is provided by `libprotobuf-c-dev`, which is available. https://packages.debian.org/stretch/libprotobuf-c0-dev > Virtual Package: libprotobuf-c0-dev > > This is a virtual package. See the Debian policy for a definition of virtual packages. > > Packages providing libprotobuf-c0-dev > libprotobuf-c-dev > Protocol Buffers C static library and headers (protobuf-c) Signed-off-by: Sebastiaan van Stijn (cherry picked from commit d185ca78ec53a448d8be0aaa4ade4824f9e4f928) Signed-off-by: Sebastiaan van Stijn --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index c30aa62e7af91..0acfe2c59bdd2 100644 --- a/Dockerfile +++ b/Dockerfile @@ -40,7 +40,7 @@ ENV CRIU_VERSION 3.6 # Install dependency packages specific to criu RUN apt-get update && apt-get install -y --no-install-recommends \ libnet-dev \ - libprotobuf-c0-dev \ + libprotobuf-c-dev \ libprotobuf-dev \ libnl-3-dev \ libcap-dev \ From 944eca3946ad97603daba9920d04c25a3ba0ca69 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Sat, 24 Aug 2019 12:27:50 +0200 Subject: [PATCH 23/27] hack/make: fix some linting issues reported by shellcheck Signed-off-by: Sebastiaan van Stijn (cherry picked from commit 917b0dcd3df383e841b7e0e4a7f3c174959544b7) Signed-off-by: Sebastiaan van Stijn --- hack/make/.integration-test-helpers | 33 +++++++++++++++++------------ 1 file changed, 19 insertions(+), 14 deletions(-) diff --git a/hack/make/.integration-test-helpers b/hack/make/.integration-test-helpers index 484aa448c0525..f430df9aa1ef7 100644 --- a/hack/make/.integration-test-helpers +++ b/hack/make/.integration-test-helpers @@ -17,15 +17,16 @@ if [[ "${TESTFLAGS}" = *-test.run* ]]; then fi -if [ -z ${MAKEDIR} ]; then - export MAKEDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +if [ -z "${MAKEDIR}" ]; then + MAKEDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + export MAKEDIR fi -source "$MAKEDIR/.go-autogen" +source "${MAKEDIR}/.go-autogen" # Set defaults -: ${TEST_REPEAT:=1} -: ${TESTFLAGS:=} -: ${TESTDEBUG:=} +: "${TEST_REPEAT:=1}" +: "${TESTFLAGS:=}" +: "${TESTDEBUG:=}" setup_integration_test_filter() { if [ -z "${TEST_FILTER}" ]; then @@ -33,9 +34,9 @@ setup_integration_test_filter() { fi if [ -z "${TEST_SKIP_INTEGRATION}" ]; then - : ${TEST_INTEGRATION_DIR:=$(grep -rl "func\ .*${TEST_FILTER}.*\(t\ \*testing\.T\)" ./integration | grep '_test\.go' | xargs -I file dirname file | uniq)} + : "${TEST_INTEGRATION_DIR:=$(grep -rl "func\ .*${TEST_FILTER}.*\(t\ \*testing\.T\)" ./integration | grep '_test\.go' | xargs -I file dirname file | uniq)}" if [ -z "${TEST_INTEGRATION_DIR}" ]; then - echo Skipping integration tests since the supplied filter \"${TEST_FILTER}\" omits all integration tests + echo "Skipping integration tests since the supplied filter \"${TEST_FILTER}\" omits all integration tests" TEST_SKIP_INTEGRATION=1 else TESTFLAGS_INTEGRATION+="-test.run ${TEST_FILTER}" @@ -46,7 +47,7 @@ setup_integration_test_filter() { # ease up on the filtering here since CLI suites are namespaced by an object if grep -r "${TEST_FILTER}.*\(c\ \*check\.C\)" ./integration-cli | grep -q '_test\.go$'; then TEST_SKIP_INTEGRATION_CLI=1 - echo Skipping integration-cli tests since the supplied filter \"${TEST_FILTER}\" omits all integration-cli tests + echo "Skipping integration-cli tests since the supplied filter \"${TEST_FILTER}\" omits all integration-cli tests" else TESTFLAGS_INTEGRATION_CLI+="-check.f ${TEST_FILTER}" fi @@ -54,7 +55,7 @@ setup_integration_test_filter() { } setup_integration_test_filter -integration_api_dirs=${TEST_INTEGRATION_DIR:-$(go list -test -f '{{- if ne .ForTest "" -}}{{- .Dir -}}{{- end -}}' ./integration/...)} +integration_api_dirs="${TEST_INTEGRATION_DIR:-$(go list -test -f '{{- if ne .ForTest "" -}}{{- .Dir -}}{{- end -}}' ./integration/...)}" run_test_integration() { set_platform_timeout @@ -72,6 +73,7 @@ run_test_integration_suites() { if ! ( cd "$dir" echo "Running $PWD flags=${flags}" + # shellcheck disable=SC2086 test_env ./test.main ${flags} ); then exit 1; fi done @@ -82,12 +84,13 @@ run_test_integration_legacy_suites() { flags="-check.v -check.timeout=${TIMEOUT} -test.timeout=360m $TESTFLAGS ${TESTFLAGS_INTEGRATION_CLI}" cd integration-cli echo "Running $PWD flags=${flags}" + # shellcheck disable=SC2086 test_env ./test.main $flags ) } build_test_suite_binaries() { - if [ ${DOCKER_INTEGRATION_TESTS_VERIFIED-} ]; then + if [ -n "${DOCKER_INTEGRATION_TESTS_VERIFIED}" ]; then echo "Skipping building test binaries; as DOCKER_INTEGRATION_TESTS_VERIFIED is set" return fi @@ -112,6 +115,7 @@ build_test_suite_binary() { cleanup_test_suite_binaries() { [ -n "$TESTDEBUG" ] && return echo "Removing test suite binaries" + # shellcheck disable=SC2038 find integration* -name test.main | xargs -r rm } @@ -160,6 +164,7 @@ error_on_leaked_containerd_shims() { awk '$2 == "containerd-shim" && $4 ~ /.*\/bundles\/.*\/test-integration/ { print $1 }') if [ -n "$leftovers" ]; then ps aux + # shellcheck disable=SC2086 kill -9 ${leftovers} 2> /dev/null echo "!!!! WARNING you have left over shim(s), Cleanup your test !!!!" exit 1 @@ -169,11 +174,11 @@ error_on_leaked_containerd_shims() { set_platform_timeout() { # Test timeout. if [ "${DOCKER_ENGINE_GOARCH}" = "arm64" ] || [ "${DOCKER_ENGINE_GOARCH}" = "arm" ]; then - : ${TIMEOUT:=10m} + : "${TIMEOUT:=10m}" elif [ "${DOCKER_ENGINE_GOARCH}" = "windows" ]; then - : ${TIMEOUT:=8m} + : "${TIMEOUT:=8m}" else - : ${TIMEOUT:=5m} + : "${TIMEOUT:=5m}" fi if [ "${TEST_REPEAT}" -gt 1 ]; then From 835e9266b81b010dd52a2ea9356f9290653d36a9 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Sun, 11 Aug 2019 22:12:39 +0200 Subject: [PATCH 24/27] Jenkinsfile: use wildcards for artifacts, and don't fail on missing ones Signed-off-by: Sebastiaan van Stijn (cherry picked from commit 8b65e058bedba80cd50eae0c211c71976b7be88f) Signed-off-by: Sebastiaan van Stijn --- Jenkinsfile | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index ff0960a0bc9f4..e9391330df9f0 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -114,7 +114,7 @@ pipeline { tar -czf docker-py-bundles.tar.gz bundles/test-docker-py/*.xml bundles/test-docker-py/*.log ''' - archiveArtifacts artifacts: 'docker-py-bundles.tar.gz' + archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true } } } @@ -205,7 +205,7 @@ pipeline { tar -czvf unit-bundles.tar.gz bundles/junit-report.xml bundles/go-test-report.json bundles/profile.out ''' - archiveArtifacts artifacts: 'unit-bundles.tar.gz' + archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true } cleanup { sh 'make clean' @@ -323,7 +323,7 @@ pipeline { find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf janky-bundles.tar.gz ''' - archiveArtifacts artifacts: 'janky-bundles.tar.gz' + archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true } cleanup { sh 'make clean' @@ -417,7 +417,7 @@ pipeline { find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf s390x-integration-bundles.tar.gz ''' - archiveArtifacts artifacts: 's390x-integration-bundles.tar.gz' + archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true } cleanup { sh 'make clean' @@ -491,7 +491,7 @@ pipeline { find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf s390x-integration-cli-bundles.tar.gz ''' - archiveArtifacts artifacts: 's390x-integration-cli-bundles.tar.gz' + archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true } cleanup { sh 'make clean' @@ -583,7 +583,7 @@ pipeline { find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf powerpc-integration-bundles.tar.gz ''' - archiveArtifacts artifacts: 'powerpc-integration-bundles.tar.gz' + archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true } cleanup { sh 'make clean' @@ -655,7 +655,7 @@ pipeline { find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf powerpc-integration-cli-bundles.tar.gz ''' - archiveArtifacts artifacts: 'powerpc-integration-cli-bundles.tar.gz' + archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true } cleanup { sh 'make clean' From 61ada8d4b41db9f617deef5432f567833fa3b78a Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Sat, 24 Aug 2019 14:14:56 +0200 Subject: [PATCH 25/27] Jenkinsfile: don't mark build failed when failing to create bundles Failing to archive the bundles should not mark the build as failed. This can happen if a build is terminated early, or if (to be implemented) an optional build-stage is skipped / failed; ``` 2019-08-24T10:53:09.354Z] + bundleName=janky [2019-08-24T10:53:09.354Z] + echo Creating janky-bundles.tar.gz [2019-08-24T10:53:09.354Z] Creating janky-bundles.tar.gz [2019-08-24T10:53:09.354Z] + xargs tar -czf janky-bundles.tar.gz [2019-08-24T10:53:09.354Z] + find bundles -path */root/*overlay2 -prune -o -type f ( -name *-report.json -o -name *.log -o -name *.prof -o -name *-report.xml ) -print [2019-08-24T10:53:09.354Z] find: bundles: No such file or directory [2019-08-24T10:53:09.354Z] tar: Cowardly refusing to create an empty archive [2019-08-24T10:53:09.354Z] Try 'tar --help' or 'tar --usage' for more information. Error when executing always post condition: hudson.AbortException: script returned exit code 123 at org.jenkinsci.plugins.workflow.steps.durable_task.DurableTaskStep$Execution.handleExit(DurableTaskStep.java:569) at org.jenkinsci.plugins.workflow.steps.durable_task.DurableTaskStep$Execution.check(DurableTaskStep.java:515) at org.jenkinsci.plugins.workflow.steps.durable_task.DurableTaskStep$Execution.run(DurableTaskStep.java:461) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) at java.util.concurrent.FutureTask.run(FutureTask.java:266) at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$201(ScheduledThreadPoolExecutor.java:180) at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:293) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) ``` Signed-off-by: Sebastiaan van Stijn (cherry picked from commit a76ff632a4a833851ef36726c9e9bfb5e7dbed39) Signed-off-by: Sebastiaan van Stijn --- Jenkinsfile | 101 ++++++++++++++++++++++++++++++++-------------------- 1 file changed, 62 insertions(+), 39 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index e9391330df9f0..423a4066bf1ab 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -109,12 +109,15 @@ pipeline { docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace ''' - sh ''' - echo 'Creating docker-py-bundles.tar.gz' - tar -czf docker-py-bundles.tar.gz bundles/test-docker-py/*.xml bundles/test-docker-py/*.log - ''' - - archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') { + sh ''' + bundleName=docker-py + echo "Creating ${bundleName}-bundles.tar.gz" + tar -czf ${bundleName}-bundles.tar.gz bundles/test-docker-py/*.xml bundles/test-docker-py/*.log + ''' + + archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true + } } } } @@ -200,12 +203,15 @@ pipeline { docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace ''' - sh ''' - echo 'Creating unit-bundles.tar.gz' - tar -czvf unit-bundles.tar.gz bundles/junit-report.xml bundles/go-test-report.json bundles/profile.out - ''' + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') { + sh ''' + bundleName=unit + echo "Creating ${bundleName}-bundles.tar.gz" + tar -czvf ${bundleName}-bundles.tar.gz bundles/junit-report.xml bundles/go-test-report.json bundles/profile.out + ''' - archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true + archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true + } } cleanup { sh 'make clean' @@ -317,13 +323,16 @@ pipeline { docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace ''' - sh ''' - echo "Creating janky-bundles.tar.gz" - # exclude overlay2 directories - find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf janky-bundles.tar.gz - ''' + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') { + sh ''' + bundleName=janky + echo "Creating ${bundleName}-bundles.tar.gz" + # exclude overlay2 directories + find bundles -path '*/root/*overlay2' -prune -o -type f \\( -o -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz + ''' - archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true + archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true + } } cleanup { sh 'make clean' @@ -411,13 +420,16 @@ pipeline { docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace ''' - sh ''' - echo "Creating s390x-integration-bundles.tar.gz" - # exclude overlay2 directories - find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf s390x-integration-bundles.tar.gz - ''' + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') { + sh ''' + bundleName=s390x-integration + echo "Creating ${bundleName}-bundles.tar.gz" + # exclude overlay2 directories + find bundles -path '*/root/*overlay2' -prune -o -type f \\( -o -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz + ''' - archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true + archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true + } } cleanup { sh 'make clean' @@ -486,12 +498,16 @@ pipeline { docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace ''' - sh ''' - echo "Creating s390x-integration-cli-bundles.tar.gz" - find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf s390x-integration-cli-bundles.tar.gz - ''' + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') { + sh ''' + bundleName=s390x-integration-cli + echo "Creating ${bundleName}-bundles.tar.gz" + # exclude overlay2 directories + find bundles -path '*/root/*overlay2' -prune -o -type f \\( -o -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz + ''' - archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true + archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true + } } cleanup { sh 'make clean' @@ -577,13 +593,16 @@ pipeline { docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace ''' - sh ''' - echo "Creating powerpc-integration-bundles.tar.gz" - # exclude overlay2 directories - find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf powerpc-integration-bundles.tar.gz - ''' + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') { + sh ''' + bundleName=powerpc-integration + echo "Creating ${bundleName}-bundles.tar.gz" + # exclude overlay2 directories + find bundles -path '*/root/*overlay2' -prune -o -type f \\( -o -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz + ''' - archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true + archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true + } } cleanup { sh 'make clean' @@ -650,12 +669,16 @@ pipeline { docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace ''' - sh ''' - echo "Creating powerpc-integration-cli-bundles.tar.gz" - find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf powerpc-integration-cli-bundles.tar.gz - ''' + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') { + sh ''' + bundleName=powerpc-integration-cli + echo "Creating ${bundleName}-bundles.tar.gz" + # exclude overlay2 directories + find bundles -path '*/root/*overlay2' -prune -o -type f \\( -o -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz + ''' - archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true + archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true + } } cleanup { sh 'make clean' From 344fd1e6881759c3db9c5c1de4c98669ecb10229 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Wed, 28 Aug 2019 02:14:29 +0200 Subject: [PATCH 26/27] Jenkinsfile: fix invalid expression in bundles script This was introduced in a76ff632a4a833851ef36726c9e9bfb5e7dbed39: + find bundles -path */root/*overlay2 -prune -o -type f ( -o -name *.log -o -name *.prof ) -print find: invalid expression; you have used a binary operator '-o' with nothing before it. Signed-off-by: Sebastiaan van Stijn (cherry picked from commit ca1e7a3b4a112d496fed16c548af8d585f52be92) Signed-off-by: Sebastiaan van Stijn --- Jenkinsfile | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 423a4066bf1ab..4cd0521b58340 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -328,7 +328,7 @@ pipeline { bundleName=janky echo "Creating ${bundleName}-bundles.tar.gz" # exclude overlay2 directories - find bundles -path '*/root/*overlay2' -prune -o -type f \\( -o -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz + find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz ''' archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true @@ -425,7 +425,7 @@ pipeline { bundleName=s390x-integration echo "Creating ${bundleName}-bundles.tar.gz" # exclude overlay2 directories - find bundles -path '*/root/*overlay2' -prune -o -type f \\( -o -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz + find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz ''' archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true @@ -503,7 +503,7 @@ pipeline { bundleName=s390x-integration-cli echo "Creating ${bundleName}-bundles.tar.gz" # exclude overlay2 directories - find bundles -path '*/root/*overlay2' -prune -o -type f \\( -o -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz + find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz ''' archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true @@ -598,7 +598,7 @@ pipeline { bundleName=powerpc-integration echo "Creating ${bundleName}-bundles.tar.gz" # exclude overlay2 directories - find bundles -path '*/root/*overlay2' -prune -o -type f \\( -o -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz + find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz ''' archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true @@ -674,7 +674,7 @@ pipeline { bundleName=powerpc-integration-cli echo "Creating ${bundleName}-bundles.tar.gz" # exclude overlay2 directories - find bundles -path '*/root/*overlay2' -prune -o -type f \\( -o -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz + find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*.log' -o -name '*.prof' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz ''' archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true From 6a15a8114ed9b23a60427c0e4c35d42bb7eb9a7d Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Thu, 29 Aug 2019 23:41:45 +0200 Subject: [PATCH 27/27] windows.ps1: fix leaked NdisAdapters not being cleaned up on RS1 Windows RS1 has problems with leaking NdisAdapters during the integration tests; the windows.ps1 script has a cleanup stesp to remove those leaked adapters. For internal testing at Microsoft on internal builds, this cleanup step was skipped, and only ran on the CI machines in our Jenkins. Due to the move to our new Jenkins, the names of Windows machines changed, and because of that, the cleanup step was never executed, resulting in the leaked adapters not being cleaned up: ``` 20:32:23 WARNING: There are 608 NdisAdapters leaked under Psched\Parameters 20:32:23 WARNING: Not cleaning as not a production RS1 server 20:32:24 WARNING: There are 608 NdisAdapters leaked under WFPLWFS\Parameters 20:32:24 WARNING: Not cleaning as not a production RS1 server ``` ``` 22:01:31 WARNING: There are 1209 NdisAdapters leaked under Psched\Parameters 22:01:31 WARNING: Not cleaning as not a production RS1 server 22:01:31 WARNING: There are 1209 NdisAdapters leaked under WFPLWFS\Parameters 22:01:31 WARNING: Not cleaning as not a production RS1 server ``` This patch removes the check for non-production builds, and unconditionally cleans up leaked adapters if they are found. Signed-off-by: Sebastiaan van Stijn (cherry picked from commit 156ad54fb707ed8b03a1084a841ef5602198799a) Signed-off-by: Sebastiaan van Stijn --- hack/ci/windows.ps1 | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/hack/ci/windows.ps1 b/hack/ci/windows.ps1 index dc2cd5246eed7..705a23b1d3e6a 100644 --- a/hack/ci/windows.ps1 +++ b/hack/ci/windows.ps1 @@ -203,12 +203,8 @@ Function Nuke-Everything { $count=(Get-ChildItem $reg | Measure-Object).Count if ($count -gt 0) { Write-Warning "There are $count NdisAdapters leaked under Psched\Parameters" - if ($env:COMPUTERNAME -match "jenkins-rs1-") { - Write-Warning "Cleaning Psched..." - Get-ChildItem $reg | Remove-Item -Recurse -Force -ErrorAction SilentlyContinue | Out-Null - } else { - Write-Warning "Not cleaning as not a production RS1 server" - } + Write-Warning "Cleaning Psched..." + Get-ChildItem $reg | Remove-Item -Recurse -Force -ErrorAction SilentlyContinue | Out-Null } # TODO: This should be able to be removed in August 2017 update. Only needed for RS1 @@ -216,12 +212,8 @@ Function Nuke-Everything { $count=(Get-ChildItem $reg | Measure-Object).Count if ($count -gt 0) { Write-Warning "There are $count NdisAdapters leaked under WFPLWFS\Parameters" - if ($env:COMPUTERNAME -match "jenkins-rs1-") { - Write-Warning "Cleaning WFPLWFS..." - Get-ChildItem $reg | Remove-Item -Recurse -Force -ErrorAction SilentlyContinue | Out-Null - } else { - Write-Warning "Not cleaning as not a production RS1 server" - } + Write-Warning "Cleaning WFPLWFS..." + Get-ChildItem $reg | Remove-Item -Recurse -Force -ErrorAction SilentlyContinue | Out-Null } } catch { # Don't throw any errors onwards Throw $_