From 60b9d7ba98429736eebd5d3179ad5d330ed2061a Mon Sep 17 00:00:00 2001 From: Dan Mace Date: Thu, 18 Sep 2014 14:57:12 -0400 Subject: [PATCH] Remove Docker-in-Docker from builds Remove all traces of Docker-in-Docker from builds. The rationale is explained generally in the `docs/builds.md` file. In short, Docker-in-Docker isn't secure or stable for production, and there's no indication it will be in the forseeable future. The codebase can be simplified by eliminating it, and our focus and efforts can shift to solving the remaining issues with host Docker access. --- docs/builds.md | 172 +++++------------- examples/builds/docker-build.json | 9 + .../builds/images/docker-build/Dockerfile | 2 + examples/builds/sti-build.json | 10 + hack/build-images.sh | 1 - .../builder/docker/docker-builder/Dockerfile | 4 +- images/builder/docker/docker-builder/build.sh | 33 +--- .../docker/kubernetes-fedora-dind/Dockerfile | 9 - .../docker/kubernetes-fedora-dind/dind | 109 ----------- images/builder/docker/sti-builder/Dockerfile | 4 +- images/builder/docker/sti-builder/build.sh | 35 +--- pkg/build/doc.go | 31 +++- pkg/build/strategy/docker.go | 10 +- pkg/build/strategy/docker_test.go | 5 +- pkg/build/strategy/sti.go | 57 +++--- pkg/build/strategy/sti_test.go | 5 +- pkg/build/strategy/util.go | 41 ++--- pkg/build/strategy/util_test.go | 26 +-- pkg/cmd/master/master.go | 8 +- 19 files changed, 164 insertions(+), 407 deletions(-) create mode 100644 examples/builds/docker-build.json create mode 100644 examples/builds/images/docker-build/Dockerfile create mode 100644 examples/builds/sti-build.json delete mode 100644 images/builder/docker/kubernetes-fedora-dind/Dockerfile delete mode 100755 images/builder/docker/kubernetes-fedora-dind/dind diff --git a/docs/builds.md b/docs/builds.md index 277ded282367..35d991486050 100644 --- a/docs/builds.md +++ b/docs/builds.md @@ -1,160 +1,80 @@ -Kubernetes Proposal - Build Plugin -================================== +# OpenShift Builds -Problem/Rationale ------------------ +## Problem/Rationale -Kubernetes creates Docker containers from images that were built elsewhere and pushed to a Docker -registry. Building Docker images is a foundational use-case in Docker-based workflows for -application development and deployment. Without support for builds in Kubernetes, if a system -administrator wanted a system that could build images, he or she would have to select a pre-existing -build system or write a new one, and then figure out how to deploy and maintain it on or off -Kubernetes. However, in most cases operators would wish to leverage the ability of Kubernetes to -schedule task execution into a pool of available resources, and most build systems would want to -take advantage of that mechanism. Offering an API for builds also makes Kubernetes a viable -backend for arbitrary third-party Docker image build systems which require resource constrainment -and scheduling capabilities, and allows organizations to orchestrate docker builds from their -existing continuous integration processes. This is not a core component of Kubernetes, but would -have significant value as a plugin to enable CI/CD flows around Docker images. +Kubernetes creates Docker containers from images that were built elsewhere and pushed to a Docker registry. Building Docker images is a foundational use-case in Docker-based workflows for application development and deployment. Without support for builds in Kubernetes, if a system administrator wanted a system that could build images, he or she would have to select a pre-existing build system or write a new one, and then figure out how to deploy and maintain it on or off Kubernetes. However, in most cases operators would wish to leverage the ability of Kubernetes to schedule task execution into a pool of available resources, and most build systems would want to take advantage of that mechanism. -Most build jobs share common characteristics - a set of build context parameters that define the job, -the need to run a certain process to completion, the capture of the logs from that build process, -publishing resources from successful builds, and the final “status” of the build. In addition, the -image-driven deployment flow that Kubernetes advocates depends on having images available. - -Builds should take advantage of resource restrictions – specifying limitations on things such as CPU -usage, memory usage, and build (pod) execution time – once support for this exists in Kubernetes. -Additionally, builds would become repeatable and consistent (same inputs = same output). +Offering an API for builds makes OpenShift a viable backend for arbitrary third-party Docker image build systems which require resource constrainment and scheduling capabilities, and allows organizations to orchestrate Docker builds from their existing continuous integration processes. OpenShift enables CI/CD flows around Docker images. -There are potentially several different types of builds that produce other types of output as well. -This proposal is for adding functionality to Kubernetes to build Docker images. +Most build jobs share common characteristics - a set of inputs to a build, the need to run a certain build process to completion, the capture of the logs from that build process, publishing resources from successful builds, and the final status of the build. In addition, the image-driven deployment flow that Kubernetes advocates depends on having images available. -Here are some possible user scenarios for builds in Kubernetes: +Builds should take advantage of resource restrictions – specifying limitations on things such as CPU usage, memory usage, and build (pod) execution time – once support for this exists in Kubernetes. Additionally, builds should be repeatable and consistent (same inputs = same output). -1. As a user of Kubernetes, I want to build an image from a source URL and push it to a registry - (for eventual deployment in Kubernetes). -2. As a user of Kubernetes, I want to build an image from a binary input (docker context, artifact) - and push it to a registry (for eventual deployment in Kubernetes). -3. As a provider of a service that involves building docker images, I want to offload the resource - allocation, scheduling, and garbage collection associated with that activity to Kubernetes - instead of solving those problems myself. -4. As a developer of a system which involves building docker images, I want to take advantage of - Kubernetes to perform the build, but orchestrate from an existing CI in order to integrate with - my organization’s devops SOPs. +Alhtough there are potentially several different types of builds that produce other types of output, OpenShift by default provides the ability to build Docker images. -Example Use: Cloud IDE ----------------------- +Here are some possible user scenarios for builds in OpenShift: -Company X offers a docker-based cloud IDE service and needs to build docker images at scale for -their customers’ hosted projects. Company X wants a turn-key solution for this that handles -scheduling, resource allocation, and garbage collection. Using the build API, Company X can -leverage Kubernetes for the build work and concentrate on solving their core business problems. +1. As a user of OpenShift, I want to build an image from a source URL and push it to a registry (for eventual deployment in OpenShift). +2. As a user of OpenShift, I want to build an image from a binary input (Docker context, artifact) and push it to a registry (for eventual deployment in OpenShift). +3. As a provider of a service that involves building Docker images, I want to offload the resource allocation, scheduling, and garbage collection associated with that activity to OpenShift instead of solving those problems myself. +4. As a developer of a system which involves building Docker images, I want to take advantage of OpenShift to perform the build, but orchestrate from an existing CI in order to integrate with my organization’s devops SOPs. -Example Use: Enterprise Devops ------------------------------- +### Example Use: Cloud IDE -Company Y wants to leverage Kubernetes to build docker images, but their Devops SOPs mandate the -use of a third-party CI server in order to facilitate things like triggering builds when an -upstream project is built and promoting builds when the result is signed off on in the CI server. -Using the build API, company Y implements workflows in the CI server that orchestrate building in -Kubernetes which integrating with their organization’s SOPs. +Company X offers a Docker-based cloud IDE service and needs to build Docker images at scale for their customers’ hosted projects. Company X wants a turn-key solution for this that handles scheduling, resource allocation, and garbage collection. Using the build API, Company X can leverage OpenShift for the build work and concentrate on solving their core business problems. -Proposed Design ---------------- +### Example Use: Enterprise Devops -Note: The proposed solution requires that run-once containers be implemented in Kubernetes. +Company Y wants to leverage OpenShift to build Docker images, but their Devops SOPs mandate the use of a third-party CI server in order to facilitate actions such as triggering builds when an upstream project is built and promoting builds when the result is signed off on in the CI server. Using the build API, company Y implements workflows in the CI server that orchestrate building in OpenShift which integrates with their organization’s SOPs. -**BuildConfig** +## Build Strategies -Add a new BuildConfig type that will be used to record the inputs to a Build. Its fields could include: +The OpenShift build system provides extensible support for build strategies based on selectable types specified in the build API. By default, two strategies are supported: Docker builds, and [Source-To-Images (sti)](https://github.com/openshift/geard/tree/master/cmd/sti) builds. -1. Source URI -2. Source ref (e.g. git branch) -3. Image to use to perform the build -4. Desired image tag -5. Docker registry URL +### Docker Builds -Add appropriate registries and storage for BuildConfig and register /buildConfigs with the apiserver. +OpenShift supports Docker builds. Using this strategy, users may supply a URL to a Docker context which is used as the basis for a [Docker build](https://docs.docker.com/reference/commandline/cli/#build). -**Build** +#### How It Works -Add a new Build type that will be used to record a build for historical purposes. A Build includes: +To implement Docker builds, OpenShift provides build containers access to a node’s Docker daemon. -1. A copy of a BuildConfig (as the standalone BuildConfig could be updated over time and should not - affect a specific build) -2. A status field (new, pending, running, complete, failed) -3. The ID of the Pod associated with this Build +During a build, a pod containing a single container–a build container–is created. The node’s Docker socket is bind mounted into the build container. The build container executes `docker build` using the the supplied Docker context, and all interaction with Docker occurs via the node's Docker daemon. -Add appropriate registries and storage for Build and register /builds with the apiserver. +**Advantages** -**BuildController** - -Add a new BuildController that runs a sync loop to execute builds. - -For newly created builds, the BuildController will assign a pod ID to the build and set the build’s -state to pending. This way, the assignment of the pod ID and pending status is idempotent and won’t -result in two BuildControllers potentially scheduling two different pods for the same build. - -For pending builds, the BuildController will attempt to create a pod to perform the build. If the -creation succeeds, it sets the build’s status to pending. If the pod already exists, that means -another BuildController already processed this build in a pending state, resulting in a no-op. Any -other pod creation error would result in the build’s status being set to failed. - -It may be desirable to support variations in the pod descriptor used to create the build pod. As -such, it could be possible for plugins/extensions to register additional build pod definitions. -Examples of variations include a builder that runs `docker build` as well as a builder that uses -the Source-To-Images (sti) tool (https://github.com/openshift/geard/tree/master/cmd/sti). - -For running builds, the BuildController will monitor the status of the pod. If the pod is still -running and the build has exceeded its allotted execution time, the BuildController will consider -it failed. If the pod is terminated, the BuildController will examine the exit codes for each of -the pod’s containers. If any exit code is non-zero, the build is marked as failed. Otherwise, it -is considered complete (successful). +1. Allows Docker builds in unprivileged containers +2. Minimizes image storage requirements +3. Reduces the number of Docker daemons required -Once the build has reached a terminal state (complete or failed), the BuildController will delete -the pod associated with the build. In the future, it will be desirable to keep a record of the -pod’s containers’ logs but that is out of scope of this proposal. +**Disadvantages** -Docker Daemon Location: Use the minion’s Docker socket ------------------------------------------------------- +1. Constraining resources per-user is made more difficult +2. Containers created during the build are created outside the scope of the kubelet +3. Container processes created during the build are children of a remote Docker process, making container cleanup more difficult -With this approach, a pod containing a single container–a build container–would be created. The -minion’s Docker socket would be bind mounted into the build container. The build container would -execute the build command (e.g. `docker build`) and all interaction with Docker would be using the -host’s (minion’s) Docker daemon. +There are viable paths to alleviate or resolve each of these disadvantages, and this mechanism is considered a work in progress. -**Pros** +##### Why not Docker-in-Docker? -1. Reduces number of Docker daemons required -2. Minimizes image storage requirements +It's theoretically possible to implement builds using a nested Docker daemon within a Docker container (Docker-in-Docker). On the surface, this approach offers some compelling advantages: -**Cons** +1. Build process resources can be naturally constrained to the user’s acceptable limits (cgroups) +2. Containers created during the build have the build container as their parent process, making container cleanup simple -1. Not possible to constrain resources per-user -2. Containers created during the build are created outside the scope of / not managed by Kubernetes -3. Containers created during the build don’t have the build container as their parent process, making - container cleanup more difficult +In practice, however, there are (at present) some serious problems with the approach which render it unusable: -Docker Daemon Location: Docker-in-Docker ----------------------------------------- +1. Requires a privileged container, which is a show-stopping security concern with no solution on the horizon + * In addition, this nullifies the theoretical benefit of cgroups isolation, as the process could break out the container +2. With devicemapper, it's very easy to leak both loopback devices and storage on the host +3. No easy way to share storage of images/layers among build containers, requiring each Docker-in-Docker instance to store its own unique, full copy of any image(s) downloaded during the build process. + * A caching proxy running on the node could at least minimize the number of times an image is pulled from a remote registry, but that doesn’t eliminate the need for each build container to have its own copy of the images. -With this approach, a pod containing a single container–a build container–would be created. The -build container would launch its own Docker daemon in the background, and then it would execute -the build command (e.g. `docker build`) and all interaction with Docker would be using the -container’s own (private) Docker daemon. +For these reasons, Docker-in-Docker is not considered a viable build strategy for a secure, multi-tenant production environment. -**Pros** +### STI (Source-to-Image) Builds -1. Build process resources can be constrained to the user’s acceptable limits (cgroups) -2. Containers created during the build have the build container as their parent process, making - container cleanup trivial +OpenShift also supports [Source-To-Images (sti)](https://github.com/openshift/geard/tree/master/cmd/sti) builds. -**Cons** +Source-to-images (sti) is a tool for building reproducable Docker images. It produces ready-to-run images by injecting a user source into a docker image and assembling a new Docker image which incorporates the base image and built source, and is ready to use with `docker run`. STI supports incremental builds which re-use previously downloaded dependencies, previously built artifacts, etc. -1. Requires a privileged container as running the Docker daemon (even as Docker-in-Docker) requires - more privileges than a non-privileged container offers -2. No easy way to share storage of images/layers among build containers, requiring each - Docker-in-Docker instance to store its own unique, full copy of any image(s) downloaded during - the build process. A caching proxy running on the minion could at least minimize the number of - times an image is pulled from a remote registry, but that doesn’t eliminate the need for each - build container to have its own copy of the images. \ No newline at end of file diff --git a/examples/builds/docker-build.json b/examples/builds/docker-build.json new file mode 100644 index 000000000000..a3242d6764ad --- /dev/null +++ b/examples/builds/docker-build.json @@ -0,0 +1,9 @@ +{ + "kind": "Build", + "apiVersion": "v1beta1", + "input": { + "type": "docker", + "sourceURI": "https://raw.githubusercontent.com/openshift/origin/master/examples/builds/images/docker-build/Dockerfile", + "imageTag": "openshift/docker-build-example" + } +} diff --git a/examples/builds/images/docker-build/Dockerfile b/examples/builds/images/docker-build/Dockerfile new file mode 100644 index 000000000000..d1ceac6b7439 --- /dev/null +++ b/examples/builds/images/docker-build/Dockerfile @@ -0,0 +1,2 @@ +FROM busybox:latest +CMD echo "success" diff --git a/examples/builds/sti-build.json b/examples/builds/sti-build.json new file mode 100644 index 000000000000..ce85734821b3 --- /dev/null +++ b/examples/builds/sti-build.json @@ -0,0 +1,10 @@ +{ + "kind": "Build", + "apiVersion": "v1beta1", + "input": { + "type": "sti", + "sourceURI": "https://raw.githubusercontent.com/openshift/origin/master/examples/builds/images/docker-build/Dockerfile", + "imageTag": "openshift/sti-build-example", + "builderImage": "ncdc/sti-busybox" + } +} diff --git a/hack/build-images.sh b/hack/build-images.sh index fa7e116e8d65..f69a29aa169c 100755 --- a/hack/build-images.sh +++ b/hack/build-images.sh @@ -18,7 +18,6 @@ cd "${OS_REPO_ROOT}" version=$(gitcommit) kube_version=$(go run ${hackdir}/version.go ${hackdir}/../Godeps/Godeps.json github.com/GoogleCloudPlatform/kubernetes/pkg/api) -docker build -t openshift/kubernetes-fedora-dind images/builder/docker/kubernetes-fedora-dind docker build -t openshift/docker-builder images/builder/docker/docker-builder docker build -t openshift/sti-builder images/builder/docker/sti-builder docker build -t openshift/hello-openshift examples/hello-openshift diff --git a/images/builder/docker/docker-builder/Dockerfile b/images/builder/docker/docker-builder/Dockerfile index ed2da9c1b9d6..bf90ac93f461 100644 --- a/images/builder/docker/docker-builder/Dockerfile +++ b/images/builder/docker/docker-builder/Dockerfile @@ -1,4 +1,4 @@ -FROM openshift/kubernetes-fedora-dind -RUN yum -y install git +FROM fedora:20 +RUN yum -y install docker-io git ADD ./build.sh /tmp/build.sh CMD ["/tmp/build.sh"] diff --git a/images/builder/docker/docker-builder/build.sh b/images/builder/docker/docker-builder/build.sh index b46aeecb930e..05e3e91cf1f2 100755 --- a/images/builder/docker/docker-builder/build.sh +++ b/images/builder/docker/docker-builder/build.sh @@ -2,32 +2,11 @@ set -uo pipefail IFS=$'\n\t' -NEED_DIND=false -if [ ! -e /var/run/docker.sock ]; then - NEED_DIND=true -fi - -if $NEED_DIND; then - DOCKER_READY=false - dind & - - # wait for docker to be available - ATTEMPTS=0 - while [ $ATTEMPTS -lt 10 ]; do - docker version &> /dev/null - if [ $? -eq 0 ]; then - DOCKER_READY=true - break - fi +DOCKER_SOCKET=/var/run/docker.sock - let ATTEMPTS=ATTEMPTS+1 - sleep 1 - done - - if ! $DOCKER_READY; then - echo 'Docker-in-Docker daemon not accessible' - exit 1 - fi +if [ ! -e $DOCKER_SOCKET ]; then + echo "Docker socket missing at $DOCKER_SOCKET" + exit 1 fi TAG=$BUILD_TAG @@ -40,7 +19,3 @@ docker build --rm -t $TAG $DOCKER_CONTEXT_URL if [ -n "$DOCKER_REGISTRY" ]; then docker push $TAG fi - -if $NEED_DIND; then - kill -15 $(cat /var/run/docker.pid) -fi diff --git a/images/builder/docker/kubernetes-fedora-dind/Dockerfile b/images/builder/docker/kubernetes-fedora-dind/Dockerfile deleted file mode 100644 index 5517d448674f..000000000000 --- a/images/builder/docker/kubernetes-fedora-dind/Dockerfile +++ /dev/null @@ -1,9 +0,0 @@ -FROM fedora:20 - -RUN yum update -y && \ - yum install -y docker-io e2fsprogs && \ - yum clean all - -ADD ./dind /usr/bin/dind - -VOLUME /var/lib/docker diff --git a/images/builder/docker/kubernetes-fedora-dind/dind b/images/builder/docker/kubernetes-fedora-dind/dind deleted file mode 100755 index 4ccc88fb29d5..000000000000 --- a/images/builder/docker/kubernetes-fedora-dind/dind +++ /dev/null @@ -1,109 +0,0 @@ -#!/bin/bash - -# First, make sure that cgroups are mounted correctly. -CGROUP=/sys/fs/cgroup -: {LOG:=stdio} - -[ -d $CGROUP ] || - mkdir $CGROUP - -mountpoint -q $CGROUP || - mount -n -t tmpfs -o uid=0,gid=0,mode=0755 cgroup $CGROUP || { - echo "Could not make a tmpfs mount. Did you use -privileged?" - exit 1 - } - -if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security -then - mount -t securityfs none /sys/kernel/security || { - echo "Could not mount /sys/kernel/security." - echo "AppArmor detection and -privileged mode might break." - } -fi - -# Mount the cgroup hierarchies exactly as they are in the parent system. -for SUBSYS in $(cut -d: -f2 /proc/1/cgroup) -do - [ -d $CGROUP/$SUBSYS ] || mkdir $CGROUP/$SUBSYS - mountpoint -q $CGROUP/$SUBSYS || - mount -n -t cgroup -o $SUBSYS cgroup $CGROUP/$SUBSYS - - # The two following sections address a bug which manifests itself - # by a cryptic "lxc-start: no ns_cgroup option specified" when - # trying to start containers withina container. - # The bug seems to appear when the cgroup hierarchies are not - # mounted on the exact same directories in the host, and in the - # container. - - # Named, control-less cgroups are mounted with "-o name=foo" - # (and appear as such under /proc//cgroup) but are usually - # mounted on a directory named "foo" (without the "name=" prefix). - # Systemd and OpenRC (and possibly others) both create such a - # cgroup. To avoid the aforementioned bug, we symlink "foo" to - # "name=foo". This shouldn't have any adverse effect. - echo $SUBSYS | grep -q ^name= && { - NAME=$(echo $SUBSYS | sed s/^name=//) - ln -s $SUBSYS $CGROUP/$NAME - } - - # Likewise, on at least one system, it has been reported that - # systemd would mount the CPU and CPU accounting controllers - # (respectively "cpu" and "cpuacct") with "-o cpuacct,cpu" - # but on a directory called "cpu,cpuacct" (note the inversion - # in the order of the groups). This tries to work around it. - [ $SUBSYS = cpuacct,cpu ] && ln -s $SUBSYS $CGROUP/cpu,cpuacct -done - -# Note: as I write those lines, the LXC userland tools cannot setup -# a "sub-container" properly if the "devices" cgroup is not in its -# own hierarchy. Let's detect this and issue a warning. -grep -q :devices: /proc/1/cgroup || - echo "WARNING: the 'devices' cgroup should be in its own hierarchy." -grep -qw devices /proc/1/cgroup || - echo "WARNING: it looks like the 'devices' cgroup is not mounted." - -# Now, close extraneous file descriptors. -pushd /proc/self/fd >/dev/null -for FD in * -do - case "$FD" in - # Keep stdin/stdout/stderr - [012]) - ;; - # Nuke everything else - *) - eval exec "$FD>&-" - ;; - esac -done -popd >/dev/null - -ensure_loop(){ - num="$1" - dev="/dev/loop$num" - if test -b "$dev"; then - echo "$dev is a usable loop device." - return 0 - fi - - echo "Attempting to create $dev for docker ..." - if ! mknod -m660 $dev b 7 $num; then - echo "Failed to create $dev!" 1>&2 - return 3 - fi - - return 0 -} - -LOOP_A=$(losetup -f) -LOOP_A=${LOOP_A#/dev/loop} -LOOP_B=$(expr $LOOP_A + 1) - -ensure_loop $LOOP_A -ensure_loop $LOOP_B - -# If a pidfile is still around (for example after a container restart), -# delete it so that docker can start. -rm -rf /var/run/docker.pid &>/dev/null - -docker -d -H unix:///var/run/docker.sock diff --git a/images/builder/docker/sti-builder/Dockerfile b/images/builder/docker/sti-builder/Dockerfile index 71e77f2b8dc0..f4a2d1ab42a6 100644 --- a/images/builder/docker/sti-builder/Dockerfile +++ b/images/builder/docker/sti-builder/Dockerfile @@ -1,5 +1,5 @@ -FROM openshift/kubernetes-fedora-dind -RUN yum -y install golang golang-src golang-pkg-bin-linux-amd64 golang-pkg-linux-amd64 git && \ +FROM fedora:20 +RUN yum -y install docker-io golang golang-src golang-pkg-bin-linux-amd64 golang-pkg-linux-amd64 git && \ yum clean all RUN mkdir -p /tmp/go/src/github.com/openshift diff --git a/images/builder/docker/sti-builder/build.sh b/images/builder/docker/sti-builder/build.sh index c78f22ca8129..95af9b36a8ec 100755 --- a/images/builder/docker/sti-builder/build.sh +++ b/images/builder/docker/sti-builder/build.sh @@ -1,33 +1,10 @@ #!/bin/bash -ex -NEED_DIND=false -if [ ! -e /var/run/docker.sock ]; then - NEED_DIND=true -fi - -if [ $NEED_DIND == "true" ]; then - DOCKER_READY=false - dind & - - # wait for docker to be available - ATTEMPTS=0 - while [ $ATTEMPTS -lt 10 ]; do - set +e - docker version &> /dev/null - if [ $? -eq 0 ]; then - DOCKER_READY=true - break - fi - set -e +DOCKER_SOCKET=/var/run/docker.sock - let ATTEMPTS=ATTEMPTS+1 - sleep 1 - done - - if [ $DOCKER_READY != "true" ]; then - echo 'Docker-in-Docker daemon not accessible' - exit 1 - fi +if [ ! -e $DOCKER_SOCKET ]; then + echo "Docker socket missing at $DOCKER_SOCKET" + exit 1 fi TAG=$BUILD_TAG @@ -46,7 +23,3 @@ TMPDIR=$BUILD_TEMP_DIR sti build $SOURCE_URI $BUILDER_IMAGE $TAG $REF_OPTION if [ -n "$DOCKER_REGISTRY" ]; then docker push $TAG fi - -if [ $NEED_DIND == "true" ]; then - kill -15 $(cat /var/run/docker.pid) -fi diff --git a/pkg/build/doc.go b/pkg/build/doc.go index 512c85e07013..b14a2352ebcf 100644 --- a/pkg/build/doc.go +++ b/pkg/build/doc.go @@ -1,4 +1,29 @@ -// Package build contains a build system defined on top of Kubernetes. -// It consists of two resource types: Build and BuildConfig, along with -// associated storage, as well as a controller that manages states of existing builds +/* +Package build contains the OpenShift build system. + +It defines a Build resource type, along with associated storage and a controller +that executes builds and manages states of existing builds. + +For newly created builds, the BuildController will assign a pod ID to the build +and set the build’s state to pending. This way, the assignment of the pod ID and +pending status is idempotent and won’t result in two BuildControllers +potentially scheduling two different pods for the same build. + +For pending builds, the BuildController will attempt to create a pod to perform +the build. If the creation succeeds, it sets the build’s status to pending. If +the pod already exists, that means another BuildController already processed +this build in a pending state, resulting in a no-op. Any other pod creation +error would result in the build’s status being set to failed. + +For running builds, the BuildController will monitor the status of the pod. If +the pod is still running and the build has exceeded its allotted execution time, +the BuildController will consider it failed. If the pod is terminated, the +BuildController will examine the exit codes for each of the pod’s containers. If +any exit code is non-zero, the build is marked as failed. Otherwise, it is +considered complete (successful). + +Once the build has reached a terminal state (complete or failed), the +BuildController will delete the pod associated with the build. In the future, it +will be desirable to keep a record of the pod’s containers’ logs. +*/ package build diff --git a/pkg/build/strategy/docker.go b/pkg/build/strategy/docker.go index fafa3e1bc6d2..a738997ffaf7 100644 --- a/pkg/build/strategy/docker.go +++ b/pkg/build/strategy/docker.go @@ -6,16 +6,13 @@ import ( ) // DockerBuildStrategy creates Docker build using a docker builder image -// useHostDocker determines whether the minion Docker daemon is used for the build -// or a separate Docker daemon is run inside the container type DockerBuildStrategy struct { dockerBuilderImage string - useHostDocker bool } // NewDockerBuildStrategy creates a new DockerBuildStrategy -func NewDockerBuildStrategy(dockerBuilderImage string, useHostDocker bool) *DockerBuildStrategy { - return &DockerBuildStrategy{dockerBuilderImage, useHostDocker} +func NewDockerBuildStrategy(dockerBuilderImage string) *DockerBuildStrategy { + return &DockerBuildStrategy{dockerBuilderImage} } // CreateBuildPod creates the pod to be used for the Docker build @@ -38,13 +35,12 @@ func (bs *DockerBuildStrategy) CreateBuildPod(build *buildapi.Build, dockerRegis {Name: "DOCKER_CONTEXT_URL", Value: build.Input.SourceURI}, {Name: "DOCKER_REGISTRY", Value: dockerRegistry}, }, - Privileged: true, }, }, }, }, } - setupDockerSocket(bs.useHostDocker, pod) + setupDockerSocket(pod) return pod, nil } diff --git a/pkg/build/strategy/docker_test.go b/pkg/build/strategy/docker_test.go index b5795babba48..f4bd47601e06 100644 --- a/pkg/build/strategy/docker_test.go +++ b/pkg/build/strategy/docker_test.go @@ -9,7 +9,7 @@ import ( func TestDockerCreateBuildPod(t *testing.T) { const dockerRegistry = "docker-test-registry" - strategy := NewDockerBuildStrategy("docker-test-image", false) + strategy := NewDockerBuildStrategy("docker-test-image") expected := mockDockerBuild() actual, _ := strategy.CreateBuildPod(expected, dockerRegistry) @@ -30,9 +30,6 @@ func TestDockerCreateBuildPod(t *testing.T) { if container.RestartPolicy != "runOnce" { t.Errorf("Expected runOnce, but got %s!", container.RestartPolicy) } - if !container.Privileged { - t.Errorf("Expected Privileged") - } if e := container.Env[0]; e.Name != "BUILD_TAG" && e.Value != expected.Input.ImageTag { t.Errorf("Expected %s, got %s:%s!", expected.Input.ImageTag, e.Name, e.Value) } diff --git a/pkg/build/strategy/sti.go b/pkg/build/strategy/sti.go index 3c964fdefdb4..1abd09f17206 100644 --- a/pkg/build/strategy/sti.go +++ b/pkg/build/strategy/sti.go @@ -8,11 +8,8 @@ import ( ) // STIBuildStrategy creates STI(source to image) builds -// useHostDocker determines whether the minion Docker daemon is used for the build -// or a separate Docker daemon is run inside the container type STIBuildStrategy struct { stiBuilderImage string - useHostDocker bool tempDirectoryCreator TempDirectoryCreator } @@ -30,8 +27,8 @@ var STITempDirectoryCreator = &tempDirectoryCreator{} // NewSTIBuildStrategy creates a new STIBuildStrategy with the given // builder image -func NewSTIBuildStrategy(stiBuilderImage string, useHostDocker bool, tc TempDirectoryCreator) *STIBuildStrategy { - return &STIBuildStrategy{stiBuilderImage, useHostDocker, tc} +func NewSTIBuildStrategy(stiBuilderImage string, tc TempDirectoryCreator) *STIBuildStrategy { + return &STIBuildStrategy{stiBuilderImage, tc} } // CreateBuildPod creates a pod that will execute the STI build @@ -56,34 +53,40 @@ func (bs *STIBuildStrategy) CreateBuildPod(build *buildapi.Build, dockerRegistry {Name: "SOURCE_REF", Value: build.Input.SourceRef}, {Name: "BUILDER_IMAGE", Value: build.Input.BuilderImage}, }, - Privileged: true, }, }, }, }, } - if bs.useHostDocker { - tempDir, err := bs.tempDirectoryCreator.CreateTempDirectory() - if err != nil { - return nil, err - } - tmpVolume := api.Volume{ - Name: "tmp", - Source: &api.VolumeSource{ - HostDirectory: &api.HostDirectory{ - Path: tempDir, - }, - }, - } - tmpMount := api.VolumeMount{Name: "tmp", ReadOnly: false, MountPath: tempDir} - pod.DesiredState.Manifest.Volumes = append(pod.DesiredState.Manifest.Volumes, tmpVolume) - pod.DesiredState.Manifest.Containers[0].VolumeMounts = - append(pod.DesiredState.Manifest.Containers[0].VolumeMounts, tmpMount) - pod.DesiredState.Manifest.Containers[0].Env = - append(pod.DesiredState.Manifest.Containers[0].Env, api.EnvVar{ - Name: "TEMP_DIR", Value: tempDir}) + + if err := bs.setupTempVolume(pod); err != nil { + return nil, err } - setupDockerSocket(bs.useHostDocker, pod) + setupDockerSocket(pod) return pod, nil } + +func (bs *STIBuildStrategy) setupTempVolume(pod *api.Pod) error { + tempDir, err := bs.tempDirectoryCreator.CreateTempDirectory() + if err != nil { + return err + } + tmpVolume := api.Volume{ + Name: "tmp", + Source: &api.VolumeSource{ + HostDirectory: &api.HostDirectory{ + Path: tempDir, + }, + }, + } + tmpMount := api.VolumeMount{Name: "tmp", ReadOnly: false, MountPath: tempDir} + pod.DesiredState.Manifest.Volumes = append(pod.DesiredState.Manifest.Volumes, tmpVolume) + pod.DesiredState.Manifest.Containers[0].VolumeMounts = + append(pod.DesiredState.Manifest.Containers[0].VolumeMounts, tmpMount) + pod.DesiredState.Manifest.Containers[0].Env = + append(pod.DesiredState.Manifest.Containers[0].Env, api.EnvVar{ + Name: "TEMP_DIR", Value: tempDir}) + + return nil +} diff --git a/pkg/build/strategy/sti_test.go b/pkg/build/strategy/sti_test.go index 8d41bab86019..9a56b4c58f70 100644 --- a/pkg/build/strategy/sti_test.go +++ b/pkg/build/strategy/sti_test.go @@ -15,7 +15,7 @@ func (t *FakeTempDirCreator) CreateTempDirectory() (string, error) { func TestSTICreateBuildPod(t *testing.T) { const dockerRegistry = "sti-test-registry" - strategy := NewSTIBuildStrategy("sti-test-image", false, &FakeTempDirCreator{}) + strategy := NewSTIBuildStrategy("sti-test-image", &FakeTempDirCreator{}) expected := mockSTIBuild() actual, _ := strategy.CreateBuildPod(expected, dockerRegistry) @@ -36,9 +36,6 @@ func TestSTICreateBuildPod(t *testing.T) { if container.RestartPolicy != "runOnce" { t.Errorf("Expected runOnce, but got %s!", container.RestartPolicy) } - if !container.Privileged { - t.Errorf("Expected Privileged") - } if e := container.Env[0]; e.Name != "BUILD_TAG" || e.Value != expected.Input.ImageTag { t.Errorf("Expected %s, got %s:%s!", expected.Input.ImageTag, e.Name, e.Value) } diff --git a/pkg/build/strategy/util.go b/pkg/build/strategy/util.go index 51e457f33f15..97d5b3312d8e 100644 --- a/pkg/build/strategy/util.go +++ b/pkg/build/strategy/util.go @@ -4,30 +4,25 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/api" ) -// setupDockerSocket configures the pod to support either the host's Docker socket -// or a Docker-in-Docker socket where Docker runs in the container itself. -func setupDockerSocket(useHostDocker bool, podSpec *api.Pod) { - if useHostDocker { - dockerSocketVolume := api.Volume{ - Name: "docker-socket", - Source: &api.VolumeSource{ - HostDirectory: &api.HostDirectory{ - Path: "/var/run/docker.sock", - }, +// setupDockerSocket configures the pod to support the host's Docker socket +func setupDockerSocket(podSpec *api.Pod) { + dockerSocketVolume := api.Volume{ + Name: "docker-socket", + Source: &api.VolumeSource{ + HostDirectory: &api.HostDirectory{ + Path: "/var/run/docker.sock", }, - } - - dockerSocketVolumeMount := api.VolumeMount{ - Name: "docker-socket", - MountPath: "/var/run/docker.sock", - } + }, + } - podSpec.DesiredState.Manifest.Volumes = append(podSpec.DesiredState.Manifest.Volumes, - dockerSocketVolume) - podSpec.DesiredState.Manifest.Containers[0].VolumeMounts = - append(podSpec.DesiredState.Manifest.Containers[0].VolumeMounts, - dockerSocketVolumeMount) - } else { - podSpec.DesiredState.Manifest.Containers[0].Privileged = true + dockerSocketVolumeMount := api.VolumeMount{ + Name: "docker-socket", + MountPath: "/var/run/docker.sock", } + + podSpec.DesiredState.Manifest.Volumes = append(podSpec.DesiredState.Manifest.Volumes, + dockerSocketVolume) + podSpec.DesiredState.Manifest.Containers[0].VolumeMounts = + append(podSpec.DesiredState.Manifest.Containers[0].VolumeMounts, + dockerSocketVolumeMount) } diff --git a/pkg/build/strategy/util_test.go b/pkg/build/strategy/util_test.go index 5dbf188ed4f9..f1733b917eb2 100644 --- a/pkg/build/strategy/util_test.go +++ b/pkg/build/strategy/util_test.go @@ -17,7 +17,7 @@ func TestSetupDockerSocketHostSocket(t *testing.T) { }, } - setupDockerSocket(true, &pod) + setupDockerSocket(&pod) if len(pod.DesiredState.Manifest.Volumes) != 1 { t.Fatalf("Expected 1 volume, got: %#v", pod.DesiredState.Manifest.Volumes) @@ -53,27 +53,3 @@ func TestSetupDockerSocketHostSocket(t *testing.T) { t.Error("Expected privileged to be false") } } - -func TestSetupDockerSocketDockerInDocker(t *testing.T) { - pod := api.Pod{ - DesiredState: api.PodState{ - Manifest: api.ContainerManifest{ - Containers: []api.Container{ - {}, - }, - }, - }, - } - - setupDockerSocket(false, &pod) - - if len(pod.DesiredState.Manifest.Volumes) != 0 { - t.Errorf("Expected 0 volumes, got: %#v", pod.DesiredState.Manifest.Volumes) - } - if len(pod.DesiredState.Manifest.Containers[0].VolumeMounts) != 0 { - t.Errorf("Expected 0 volume mounts, got: %#v", pod.DesiredState.Manifest.Containers[0].VolumeMounts) - } - if !pod.DesiredState.Manifest.Containers[0].Privileged { - t.Error("Expected privileged to be true") - } -} diff --git a/pkg/cmd/master/master.go b/pkg/cmd/master/master.go index 6d1d8ee7ae28..9b8a7dd093dc 100644 --- a/pkg/cmd/master/master.go +++ b/pkg/cmd/master/master.go @@ -24,7 +24,7 @@ import ( "github.com/coreos/etcd/etcd" etcdclient "github.com/coreos/go-etcd/etcd" "github.com/golang/glog" - "github.com/google/cadvisor/client" + cadvisor "github.com/google/cadvisor/client" "github.com/spf13/cobra" _ "github.com/openshift/origin/pkg/api" @@ -326,14 +326,12 @@ func (c *config) runBuildController() { // initialize build controller dockerBuilderImage := env("OPENSHIFT_DOCKER_BUILDER_IMAGE", "openshift/docker-builder") - useHostDockerSocket := len(env("USE_HOST_DOCKER_SOCKET", "")) > 0 stiBuilderImage := env("OPENSHIFT_STI_BUILDER_IMAGE", "openshift/sti-builder") dockerRegistry := env("DOCKER_REGISTRY", "") buildStrategies := map[buildapi.BuildType]build.BuildJobStrategy{ - buildapi.DockerBuildType: strategy.NewDockerBuildStrategy(dockerBuilderImage, useHostDockerSocket), - buildapi.STIBuildType: strategy.NewSTIBuildStrategy(stiBuilderImage, - useHostDockerSocket, strategy.STITempDirectoryCreator), + buildapi.DockerBuildType: strategy.NewDockerBuildStrategy(dockerBuilderImage), + buildapi.STIBuildType: strategy.NewSTIBuildStrategy(stiBuilderImage, strategy.STITempDirectoryCreator), } buildController := build.NewBuildController(kubeClient, osClient, buildStrategies, dockerRegistry, 1200)