From 154f363a3464fc658b0faef9f88ab3e5a4e9ea75 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Iranzo=20G=C3=B3mez?= Date: Wed, 27 Oct 2021 12:53:03 +0200 Subject: [PATCH] Add precommit configuration and apply settings to files and fix typos MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Pablo Iranzo Gómez --- .github/ISSUE_TEMPLATE/bug-report.md | 15 +- .../ISSUE_TEMPLATE/enhancement-tracking.md | 7 +- .github/PULL_REQUEST_TEMPLATE.md | 3 + .github/workflows/nightly.yaml | 4 +- .github/workflows/release.yaml | 8 +- .github/workflows/ubuntu.yaml | 10 +- .pre-commit-config.yaml | 93 +++++++ .yaspeller.json | 62 +++++ CONTRIBUTING.md | 61 +++-- README.md | 30 ++- assets/apps/0000_00_flannel-daemonset.yaml | 2 +- assets/apps/0000_70_dns_01-dns-daemonset.yaml | 2 +- assets/core/0000_00_flannel-configmap.yaml | 2 +- .../core/0000_00_flannel-service-account.yaml | 2 +- ...000_80_hostpath-provisioner-namespace.yaml | 2 +- ...0_hostpath-provisioner-serviceaccount.yaml | 2 +- assets/core/0000_80_openshift-router-cm.yaml | 2 +- ..._80_openshift-router-external-service.yaml | 2 +- assets/rbac/0000_00_flannel-clusterrole.yaml | 2 +- .../0000_00_flannel-clusterrolebinding.yaml | 2 +- .../0000_00_podsecuritypolicy-flannel.yaml | 2 +- assets/rbac/0000_60_service-ca_00_role.yaml | 2 +- ...stpath-provisioner-clusterrolebinding.yaml | 2 +- ..._80_hostpath-provisioner-storageclass.yaml | 2 +- docs/.gitignore | 1 - docs/Gemfile | 0 docs/_layouts/default.html | 2 +- docs/_layouts/page.html | 2 +- docs/_layouts/post.html | 4 +- docs/design/design.md | 10 +- docs/index.md | 25 +- docs/known-issues.md | 20 +- docs/microshift-aio/README.md | 16 +- docs/microshift-containerized/README.md | 130 ---------- hack/all-in-one/Dockerfile | 4 +- hack/all-in-one/README.md | 27 +- hack/all-in-one/build-aio-dev.sh | 15 +- hack/all-in-one/build-images.sh | 14 +- hack/verify.sh | 6 +- install.sh | 76 +++--- packaging/images/components/README.md | 67 ++--- packaging/images/components/build.sh | 234 +++++++++--------- .../components/base-image/Dockerfile.riscv64 | 2 +- .../components/components/cli/Dockerfile | 2 +- .../components/coredns/Dockerfile.riscv64 | 1 - .../components/coredns/build_binaries | 7 +- .../components/flannel/build_binaries | 1 - .../haproxy-router/Dockerfile.riscv64 | 2 +- .../components/haproxy-router/build_binaries | 6 +- .../hostpath-provisioner/Dockerfile | 1 - .../hostpath-provisioner/arm32.patch | 1 - .../hostpath-provisioner/build_binaries | 11 +- .../0001-workaround-riscv64.patch | 7 +- .../components/kube-rbac-proxy/build_binaries | 8 +- .../service-ca-operator/build_binaries | 10 +- packaging/images/microshift-aio/Dockerfile | 2 +- .../images/microshift-aio/crio-bridge.conf | 2 +- packaging/rpm/make-rpm.sh | 78 +++--- packaging/systemd/microshift-aio | 8 +- packaging/systemd/microshift-containerized | 4 +- pkg/assets/applier.go | 0 pkg/assets/apps.go | 0 pkg/assets/apps/bindata.go | 2 +- pkg/assets/core.go | 0 pkg/assets/core/bindata.go | 2 +- pkg/assets/crd.go | 0 pkg/assets/rbac.go | 0 pkg/components/components.go | 0 pkg/components/render.go | 0 scripts/bindata.sh | 7 +- scripts/rebase.sh | 60 ++--- scripts/release.sh | 234 +++++++++--------- .../e2e/microshift/10-assert.yaml | 4 +- .../e2e/microshift/15-assert.yaml | 4 +- .../e2e/microshift/20-assert.yaml | 4 +- 75 files changed, 722 insertions(+), 722 deletions(-) create mode 100644 .pre-commit-config.yaml create mode 100644 .yaspeller.json mode change 100755 => 100644 docs/Gemfile delete mode 100644 docs/microshift-containerized/README.md mode change 100755 => 100644 pkg/assets/applier.go mode change 100755 => 100644 pkg/assets/apps.go mode change 100755 => 100644 pkg/assets/core.go mode change 100755 => 100644 pkg/assets/crd.go mode change 100755 => 100644 pkg/assets/rbac.go mode change 100755 => 100644 pkg/components/components.go mode change 100755 => 100644 pkg/components/render.go diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md index 51d0c6f93e..eefcfe3702 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.md +++ b/.github/ISSUE_TEMPLATE/bug-report.md @@ -1,15 +1,14 @@ --- name: Bug Report -about: Report a bug encountered while operating Microshift +about: Report a bug encountered while operating MicroShift labels: bug -title: '[BUG] ' - +title: "[BUG] <title>" +modified: "2021-10-27T12:41:31.324+02:00" --- <!-- Please use this template while reporting a bug and provide as much info as possible. Not doing so may result in your bug not being addressed in a timely manner. Thanks! --> - #### What happened: #### What you expected to happen: @@ -21,14 +20,12 @@ title: '[BUG] <title>' #### Anything else we need to know?: - #### Environment: -- Microshift version (use `microshift version`): + +- MicroShift version (use `microshift version`): - Hardware configuration: - OS (e.g: `cat /etc/os-release`): - Kernel (e.g. `uname -a`): - Others: -#### Relevant Logs - - +#### Relevant Logs diff --git a/.github/ISSUE_TEMPLATE/enhancement-tracking.md b/.github/ISSUE_TEMPLATE/enhancement-tracking.md index 8ba092cf32..468fcf191c 100644 --- a/.github/ISSUE_TEMPLATE/enhancement-tracking.md +++ b/.github/ISSUE_TEMPLATE/enhancement-tracking.md @@ -2,14 +2,15 @@ name: Enhancement Tracking Issue about: Provide supporting details for a feature in development labels: feature -title: '[Enhancement]: <title>' - +title: "[Enhancement]: <title>" --- + <!-- Feature requests are unlikely to make progress as an issue. A proposal that works through the design along with the implications of the change should be submitted as a design document in https://github.com/redhat-et/microshift/tree/main/docs/design/ --> + #### Design Document Link PR: #<PR Number> @@ -17,5 +18,3 @@ PR: #<PR Number> #### What would you like to be added: #### Why is this needed: - - diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index e2462808c8..257414eb53 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -2,9 +2,12 @@ If this is your first contribution, read our Contributing guide https://github.com/redhat-et/microshift/CONTRIBUTING.md If the PR is not yet ready for review, prefix [WIP] in the title. Once prepared, remote the prefix. --> + **Which issue(s) this PR addresses**: + <!-- *Automatically closes linked issue when PR is merged. Usage: `Closes #<issue number>`, or `Closes (paste link of issue)`. --> + Closes #<Issue Number> diff --git a/.github/workflows/nightly.yaml b/.github/workflows/nightly.yaml index 70f443ec67..ec8627bfb4 100644 --- a/.github/workflows/nightly.yaml +++ b/.github/workflows/nightly.yaml @@ -12,7 +12,7 @@ jobs: uses: actions/checkout@v2 - name: Install required packages - run: sudo apt install build-essential qemu-user qemu-user-static + run: sudo apt install build-essential qemu-user qemu-user-static - name: launch build run: make build-containerized-cross-build @@ -37,4 +37,4 @@ jobs: files: | microshift-linux-amd64 microshift-linux-arm64 - release.sha256 \ No newline at end of file + release.sha256 diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 0f3378ef46..e74eae326d 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -20,13 +20,13 @@ jobs: password: ${{ secrets.REGISTRY_PASS }} - name: Install required packages - run: sudo apt install build-essential qemu-user qemu-user-static - + run: sudo apt install build-essential qemu-user qemu-user-static + - name: Checkout source uses: actions/checkout@v2 - + - name: execute release.sh shell: bash run: make release TOKEN=${GITHUB_TOKEN} env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/ubuntu.yaml b/.github/workflows/ubuntu.yaml index dcbd3dd7d7..56b590f86f 100644 --- a/.github/workflows/ubuntu.yaml +++ b/.github/workflows/ubuntu.yaml @@ -23,7 +23,7 @@ jobs: run: sudo apt install build-essential - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v2 - name: make run: make build @@ -33,7 +33,7 @@ jobs: with: name: microshift path: ./microshift - + deploy-microshift: name: Deploy-Microshift needs: Build-Microshift @@ -43,7 +43,7 @@ jobs: steps: - name: Checkout uses: actions/checkout@v2 - + - name: download archive uses: actions/download-artifact@v2.0.10 with: @@ -51,7 +51,7 @@ jobs: path: /tmp/ - name: sleep then run kubectl - run: chmod +x /tmp/microshift + run: chmod +x /tmp/microshift - name: install script run: CONFIG_ENV_ONLY=true ./install.sh @@ -64,4 +64,4 @@ jobs: sudo -i sh -c 'until [ -f /var/lib/microshift/resources/kubeadmin/kubeconfig ]; do sudo sleep 5; done' sudo cp /var/lib/microshift/resources/kubeadmin/kubeconfig /tmp/kubeconfig sudo chown `whoami`: /tmp/kubeconfig - make test-e2e + make test-e2e diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000000..0f9df8c3b4 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,93 @@ +fail_fast: true +exclude: ^vendor/|^features/|^scripts/rebase_patches/ +repos: + - repo: meta + hooks: + - id: check-useless-excludes + - repo: https://github.com/asottile/pyupgrade + rev: v2.29.0 + hooks: + - id: pyupgrade + args: [--py39-plus] + - repo: https://github.com/pre-commit/mirrors-prettier + rev: v2.4.1 + hooks: + - id: prettier + files: \.(css|js|md|markdown|json) + - repo: https://github.com/asottile/seed-isort-config + rev: v2.2.0 + hooks: + - id: seed-isort-config + - repo: https://github.com/pre-commit/mirrors-isort + rev: v5.9.3 + hooks: + - id: isort + - repo: https://github.com/python/black + rev: 21.6b0 + hooks: + - id: black + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.0.1 + hooks: + - id: check-added-large-files + args: ["--maxkb=10000"] + - id: check-ast + - id: check-case-conflict + - id: check-executables-have-shebangs + - id: check-json + - id: check-merge-conflict + - id: check-symlinks + # - id: check-vcs-permalinks + - id: debug-statements # Check for debugger imports and py37+ `breakpoint()` calls in python source. + - id: check-xml + - id: check-yaml + args: + - --unsafe + - id: end-of-file-fixer + - id: forbid-new-submodules + - id: no-commit-to-branch + args: + - --branch + - gh-pages + - id: requirements-txt-fixer + - id: sort-simple-yaml + - id: trailing-whitespace + - id: mixed-line-ending + - id: detect-private-key + - id: check-byte-order-marker + - id: check-docstring-first + # - id: detect-aws-credentials + # - id: fix-encoding-pragma + - repo: https://gitlab.com/pycqa/flake8 + rev: 3.9.2 + hooks: + - id: flake8 + - repo: local + hooks: + - id: shfmt + name: shfmt + minimum_pre_commit_version: 2.4.0 + language: golang + additional_dependencies: + - mvdan.cc/sh/v3/cmd/shfmt@v3.1.1 + entry: shfmt + args: + - -w + - -i + - "4" + types: + - shell + - repo: https://github.com/asottile/blacken-docs + rev: v1.11.0 + hooks: + - id: blacken-docs + - repo: https://github.com/hcodes/yaspeller.git + rev: v7.2.0 + hooks: + - id: yaspeller + types: + - markdown + # - repo: https://github.com/pre-commit/mirrors-mypy + # rev: v0.910 + # hooks: + # - id: mypy diff --git a/.yaspeller.json b/.yaspeller.json new file mode 100644 index 0000000000..19b6320f50 --- /dev/null +++ b/.yaspeller.json @@ -0,0 +1,62 @@ +{ + "ignoreUrls": true, + "findRepeatWords": true, + "maxRequests": 5, + "ignoreDigits": true, + "lang": "en", + "dictionary": [ + "KUBECONFIG", + "MicroShift", + "NetworkManager", + "OpenShift", + "podman", + "RHEL", + "systemd", + "toc", + "workaround", + "Andrés", + "VM", + "Virtualization", + "hostname", + "VSCode", + "dropdown", + "OKD", + "IoT", + "LTE", + "Kubernetes", + "MicroShift's", + "lifecycle", + "SELinux", + "MacOS", + "GPU", + "CentOS", + "CRI-O", + "Non-OKD", + "non-OKD", + "endraw", + "endcomment", + "AIO", + "µShift", + "barebone", + "seamlessly", + "resp", + "firewalled", + "LBN", + "GW", + "OKD's", + "CVE", + "SemVer", + "STIG", + "CI", + "DISA", + "FedRAMP", + "versioning", + "CSI", + "NAT", + "rebasing", + "deployable", + "personas", + "dongle", + "NAT'ed" + ] +} diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 63d21c91b6..03d583ae82 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,3 +1,7 @@ +--- +modified: "2021-10-27T12:18:27.143+02:00" +--- + # Contributing to MicroShift If you would like to develop MicroShift locally, you can follow this guide on getting @@ -5,23 +9,19 @@ it installed and running through the provided Makefile. This guide will primarily focus on running MicroShift within a VM using Vagrant. - ## (optional) Developing with Vagrant To get started with development, it is recommended to use Vagrant for VM provisioning, however it is not necessary. -You can find a guide on how to install it for your system [here](https://www.vagrantup.com/downloads). +You can find a guide on how to install it for your system [here](https://www.vagrantup.com/downloads). Once Vagrant is installed, you will need to create a Vagrant box for the operating system of your choice. For this example we will be looking at a [fedora 34 cloud image](https://app.vagrantup.com/fedora/boxes/34-cloud-base), however you can substitute any vagrant image of your choice. - First, navigate to the MicroShift directory on your host system, or another designated -directory where we will be storing the Vagrantfile. - - +directory where we will be storing the `Vagrantfile`. Next, download the vagrant image. For this example we will use a fedora 34 cloud image: @@ -43,8 +43,9 @@ Running this command will create a `Vagrantfile` in your working directory which is used to configure your vagrant box. Before we start our Vagrant box, we will need to increase the amount of RAM available -to the system. -To do this, edit the Vagrantfile and configure your provider settings to include +to the system. + +To do this, edit the `Vagrantfile` and configure your provider settings to include the following: ```rb @@ -57,10 +58,9 @@ the following: ``` The value of `config.vm.provider` depends on the provider you selected when you -ran `vagrant add` earlier. For example, if you selected virtualbox then the first +ran `vagrant add` earlier. For example, if you selected `virtualbox` then the first line should be: `config.vm.provider "virtualbox" do |v|` - Now we can start the VM: ``` @@ -73,15 +73,15 @@ Once the VM is up, connect to it: vagrant ssh ``` -### (Extra Optional) Connecting VSCode to Vagrant +### (Extra Optional) Connecting VSCode to Vagrant If you're using VSCode, you can connect to your vagrant box with a few extra steps. #### Increasing Memory Requirements -Since VS Code leans more on the heavy side of development, the RAM usage on your Vagrant environment +Since VSCode leans more on the heavy side of development, the RAM usage on your Vagrant environment can go up to 5GB, and therefore we will need to modify the `Vagrantfile` to -increase the amount of available RAM from 3GB to 5GB (or 6GB if you want to be safe). +increase the amount of available RAM from 3GB to 5GB (or 6GB if you want to be safe). To do this, set `v.memory` to the following in your `Vagrantfile`: ```rb @@ -99,10 +99,11 @@ First we need to ask Vagrant for an SSH config file. From your host machine, run vagrant ssh-config > ssh-config.conf ``` -*You can edit the `ssh-config.conf` file to change the hostname from `default` to -`vagrant` to be more easily identifiable, but that's up to you. :)* +_You can edit the `ssh-config.conf` file to change the hostname from `default` to +`vagrant` to be more easily identifiable, but that's up to you. :)_ Here's an example of my working SSH config file: + ``` Host default HostName 127.0.0.1 @@ -116,11 +117,8 @@ Host default LogLevel FATAL ``` - Next, you'll want to install the `Remote - SSH` extension from the [VSCode Marketplace](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-ssh) - - With the extension installed, you'll click on the green bottom in the bottom-left corner of VSCode to open a dropdown menu for SSH options: @@ -133,28 +131,27 @@ Next you'll want to navigate to the "Remote Explorer" tab on the left-hand side of VSCode, then select on the vagrant target (default if you haven't renamed it) and click on the button to connect to it in a remote window. - -*(Credits to Andrés Lopez for this guide: [Connect Visual Studio Code with Vagrant in your local machine -](https://medium.com/@lopezgand/connect-visual-studio-code-with-vagrant-in-your-local-machine-24903fb4a9de))* - +_(Credits to Andrés Lopez for this guide: [Connect Visual Studio Code with Vagrant in your local machine +](https://medium.com/@lopezgand/connect-visual-studio-code-with-vagrant-in-your-local-machine-24903fb4a9de))_ ## Running MicroShift Locally ### Pre-Installation You will need to install the required binaries: -- git -- make -- golang -- glibc -- podman + +- `git` +- `make` +- `golang` +- `glibc` +- `podman` ```sh -# Fedora/CentOS +# Fedora/CentOS sudo dnf install \ git \ make \ - golang \ + golang \ glibc-static # Ubuntu @@ -168,8 +165,8 @@ sudo apt install \ To install podman, you can find the appropriate guide for your respective system: [Install Podman](https://podman.io/getting-started/installation) +Next you'll want to clone the repository and `cd` into it: -Next you'll want to clone the repository and cd into it: ```sh git clone https://github.com/redhat-et/microshift.git cd microshift @@ -182,7 +179,5 @@ Now we can build MicroShift: ```sh make build CONFIG_ENV_ONLY=true ./install.sh -sudo ./microshift run +sudo ./microshift run ``` - - diff --git a/README.md b/README.md index 9cf3d6b6df..e603420497 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,7 @@ +--- +modified: "2021-10-29T08:27:16.055+02:00" +--- + # MicroShift MicroShift is a research project that is exploring how OpenShift<sup>1</sup> Kubernetes can be optimized for small form factor and edge computing. @@ -27,8 +31,7 @@ In order to run MicroShift, you will need at least: - ~124MB of free storage space for the MicroShift binary - 64-bit CPU (although 32-bit is _technically_ possible, if you're up for the challenge) -For barebones development the minimum requirement is 3GB of RAM, though this can increase -if you are using resource-intensive devtools. +For barebone development the minimum requirement is 3GB of RAM, though this can increase if you are using resource-intensive development tools. ### OS Requirements @@ -46,7 +49,7 @@ It may be possible to run MicroShift on other systems, however they haven't been ## Using MicroShift -To give MicroShift a try, simply install a recent test version (we don't provide stable releases yet) on a Fedora-derived Linux distro (we've only tested Fedora, RHEL, and CentOS Stream so far) using: +To give MicroShift a try, simply install a recent test version (we don't provide stable releases yet) on a Fedora-derived Linux distribution (we've only tested Fedora, RHEL, and CentOS Stream so far) using: ```sh curl -sfL https://raw.githubusercontent.com/redhat-et/microshift/main/install.sh | bash @@ -54,7 +57,7 @@ curl -sfL https://raw.githubusercontent.com/redhat-et/microshift/main/install.sh This will install MicroShift's dependencies (CRI-O), install it as a systemd service and start it. -For convenience, the script will also add a new "microshift" context to your `$HOME/.kube/config`, so you'll be able to access your cluster using, e.g.: +For convenience, the script will also add a new `microshift` context to your `$HOME/.kube/config`, so you'll be able to access your cluster using, e.g.: ```sh kubectl get all -A --context microshift @@ -72,16 +75,17 @@ Notes: When installing MicroShift on a system with an older version already inst ```sh rm -rf /var/lib/microshift && rm -r $HOME/.microshift ``` + ## Deployment Strategies In production environment MicroShift can be deployed as: -1. Install via an RPM, utilizing a host-provided cri-o runtime and be lifecycle-managed by systemd -2. [Install as a container via Podman, utilizing cri-o runtime and be lifecycle-managed by systemd](./docs/microshift-containerized/README.md) +1. Install via an RPM, utilizing a host-provided `cri-o` runtime and be lifecycle-managed by systemd +2. [Install as a container via Podman, utilizing `cri-o` runtime and be lifecycle-managed by systemd](./docs/microshift-containerized/README.md) For app developer deployments: -1. [Run an all-in-one microshift deployment on which devs can test their applications locally](.docs/microshift-aio/README.md). `microshift-aio` packages cri-o runtime and can be run and managed via podman and systemd +1. [Run an all-in-one MicroShift deployment on which developers can test their applications locally](.docs/microshift-aio/README.md). `microshift-aio` packages `cri-o` runtime and can be run and managed via podman and systemd ## [Known Issues](./docs/known-issues.md) @@ -91,7 +95,7 @@ For app developer deployments: ### Building -You can locally build MicroShift using one of two methods, either using a container build (recommended) on Podman or Docker: +You can locally build MicroShift using one of two methods, either using a container build (recommended) on `podman` or Docker: ```sh sudo yum -y install make golang @@ -120,18 +124,18 @@ Before running MicroShift, the host must first be configured. This can be handle CONFIG_ENV_ONLY=true ./install.sh ``` -MicroShift keeps all its state in its data-dir, which defaults to `/var/lib/microshift` when running MicroShift as privileged user and `$HOME/.microshift` otherwise. Note that running MicroShift unprivileged only works without node role at the moment (i.e. using `--roles=controlplane` instead of the default of `--roles=controlplane,node`). +MicroShift keeps all its state in its data directory, which defaults to `/var/lib/microshift` when running MicroShift as privileged user and `$HOME/.microshift` otherwise. Note that running MicroShift unprivileged only works without node role at the moment (i.e. using `--roles=controlplane` instead of the default of `--roles=controlplane,node`). -### Kubeconfig +### `Kubeconfig` -When starting the MicroShift for the first time the Kubeconfig file is created. If you need it for another user or to use externally the kubeadmin's kubeconfig is placed at `/var/lib/microshift/resources/kubeadmin/kubeconfig`. +When starting the MicroShift for the first time the `kubeconfig` file is created. If you need it for another user or to use externally the `kubeadmin`'s `kubeconfig` is placed at `/var/lib/microshift/resources/kubeadmin/kubeconfig`. ### Contributing -For more information on working with MicroShift, you can find a contributor's guide in [CONTRIBUTING.md](./CONTRIBUTING.md) +For more information on working with MicroShift, you can find a contributor's guide in [`CONTRIBUTING.md`](./CONTRIBUTING.md) ### Community -Join us on [Slack](https://microshift.slack.com)! ([Invite to the Slack space](https://join.slack.com/t/microshift/shared_invite/zt-uxncbjbl-XOjueb1ShNP7xfByDxNaaA)) +Join us on [Slack](https://MicroShift.slack.com)! ([Invite to the Slack space](https://join.slack.com/t/MicroShift/shared_invite/zt-uxncbjbl-XOjueb1ShNP7xfByDxNaaA)) Community meetings are held weekly, Wednesdays at 10:30AM - 11:00AM EST. Be sure to join the community [calendar](https://calendar.google.com/calendar/embed?src=nj6l882mfe4d2g9nr1h7avgrcs%40group.calendar.google.com&ctz=America%2FChicago)! Click "Google Calendar" in the lower right-hand corner to subscribe. diff --git a/assets/apps/0000_00_flannel-daemonset.yaml b/assets/apps/0000_00_flannel-daemonset.yaml index 457205a1ba..e9017cdb4a 100644 --- a/assets/apps/0000_00_flannel-daemonset.yaml +++ b/assets/apps/0000_00_flannel-daemonset.yaml @@ -87,4 +87,4 @@ spec: path: /etc/cni/net.d - name: flannel-cfg configMap: - name: kube-flannel-cfg \ No newline at end of file + name: kube-flannel-cfg diff --git a/assets/apps/0000_70_dns_01-dns-daemonset.yaml b/assets/apps/0000_70_dns_01-dns-daemonset.yaml index 9c635d3df3..b6a47f48d0 100644 --- a/assets/apps/0000_70_dns_01-dns-daemonset.yaml +++ b/assets/apps/0000_70_dns_01-dns-daemonset.yaml @@ -81,7 +81,7 @@ spec: readOnly: true dnsPolicy: Default nodeSelector: - kubernetes.io/os: linux + kubernetes.io/os: linux volumes: - name: config-volume configMap: diff --git a/assets/core/0000_00_flannel-configmap.yaml b/assets/core/0000_00_flannel-configmap.yaml index 7a0750c6c2..9d72ff3e2d 100644 --- a/assets/core/0000_00_flannel-configmap.yaml +++ b/assets/core/0000_00_flannel-configmap.yaml @@ -33,4 +33,4 @@ data: "Backend": { "Type": "vxlan" } - } \ No newline at end of file + } diff --git a/assets/core/0000_00_flannel-service-account.yaml b/assets/core/0000_00_flannel-service-account.yaml index c731020846..7c0411b161 100644 --- a/assets/core/0000_00_flannel-service-account.yaml +++ b/assets/core/0000_00_flannel-service-account.yaml @@ -2,4 +2,4 @@ apiVersion: v1 kind: ServiceAccount metadata: name: flannel - namespace: kube-system \ No newline at end of file + namespace: kube-system diff --git a/assets/core/0000_80_hostpath-provisioner-namespace.yaml b/assets/core/0000_80_hostpath-provisioner-namespace.yaml index fe8e57079b..4048a355ae 100644 --- a/assets/core/0000_80_hostpath-provisioner-namespace.yaml +++ b/assets/core/0000_80_hostpath-provisioner-namespace.yaml @@ -1,4 +1,4 @@ apiVersion: v1 kind: Namespace metadata: - name: kubevirt-hostpath-provisioner \ No newline at end of file + name: kubevirt-hostpath-provisioner diff --git a/assets/core/0000_80_hostpath-provisioner-serviceaccount.yaml b/assets/core/0000_80_hostpath-provisioner-serviceaccount.yaml index eb1a9735dc..0e3fd608ef 100644 --- a/assets/core/0000_80_hostpath-provisioner-serviceaccount.yaml +++ b/assets/core/0000_80_hostpath-provisioner-serviceaccount.yaml @@ -2,4 +2,4 @@ apiVersion: v1 kind: ServiceAccount metadata: name: kubevirt-hostpath-provisioner-admin - namespace: kubevirt-hostpath-provisioner \ No newline at end of file + namespace: kubevirt-hostpath-provisioner diff --git a/assets/core/0000_80_openshift-router-cm.yaml b/assets/core/0000_80_openshift-router-cm.yaml index 01ae4ed455..3afde8c89c 100644 --- a/assets/core/0000_80_openshift-router-cm.yaml +++ b/assets/core/0000_80_openshift-router-cm.yaml @@ -2,6 +2,6 @@ apiVersion: v1 kind: ConfigMap metadata: namespace: openshift-ingress - name: service-ca-bundle + name: service-ca-bundle annotations: service.beta.openshift.io/inject-cabundle: "true" diff --git a/assets/core/0000_80_openshift-router-external-service.yaml b/assets/core/0000_80_openshift-router-external-service.yaml index d95198fcfc..4ace806cae 100644 --- a/assets/core/0000_80_openshift-router-external-service.yaml +++ b/assets/core/0000_80_openshift-router-external-service.yaml @@ -10,7 +10,7 @@ metadata: spec: selector: ingresscontroller.operator.openshift.io/deployment-ingresscontroller: default - type: NodePort + type: NodePort ports: - name: http port: 80 diff --git a/assets/rbac/0000_00_flannel-clusterrole.yaml b/assets/rbac/0000_00_flannel-clusterrole.yaml index 8e30979418..dc07eb8095 100644 --- a/assets/rbac/0000_00_flannel-clusterrole.yaml +++ b/assets/rbac/0000_00_flannel-clusterrole.yaml @@ -25,4 +25,4 @@ rules: resources: - nodes/status verbs: - - patch \ No newline at end of file + - patch diff --git a/assets/rbac/0000_00_flannel-clusterrolebinding.yaml b/assets/rbac/0000_00_flannel-clusterrolebinding.yaml index fa4a4ec4d7..9021d11fa5 100644 --- a/assets/rbac/0000_00_flannel-clusterrolebinding.yaml +++ b/assets/rbac/0000_00_flannel-clusterrolebinding.yaml @@ -9,4 +9,4 @@ roleRef: subjects: - kind: ServiceAccount name: flannel - namespace: kube-system \ No newline at end of file + namespace: kube-system diff --git a/assets/rbac/0000_00_podsecuritypolicy-flannel.yaml b/assets/rbac/0000_00_podsecuritypolicy-flannel.yaml index 04e8be18d3..137641209a 100644 --- a/assets/rbac/0000_00_podsecuritypolicy-flannel.yaml +++ b/assets/rbac/0000_00_podsecuritypolicy-flannel.yaml @@ -43,4 +43,4 @@ spec: # SELinux seLinux: # SELinux is unused in CaaSP - rule: 'RunAsAny' \ No newline at end of file + rule: 'RunAsAny' diff --git a/assets/rbac/0000_60_service-ca_00_role.yaml b/assets/rbac/0000_60_service-ca_00_role.yaml index 049106e265..617eb18e44 100644 --- a/assets/rbac/0000_60_service-ca_00_role.yaml +++ b/assets/rbac/0000_60_service-ca_00_role.yaml @@ -44,4 +44,4 @@ rules: verbs: - get - list - - watch \ No newline at end of file + - watch diff --git a/assets/rbac/0000_80_hostpath-provisioner-clusterrolebinding.yaml b/assets/rbac/0000_80_hostpath-provisioner-clusterrolebinding.yaml index 7102060276..7c5e26f8ed 100644 --- a/assets/rbac/0000_80_hostpath-provisioner-clusterrolebinding.yaml +++ b/assets/rbac/0000_80_hostpath-provisioner-clusterrolebinding.yaml @@ -9,4 +9,4 @@ subjects: roleRef: kind: ClusterRole name: kubevirt-hostpath-provisioner - apiGroup: rbac.authorization.k8s.io \ No newline at end of file + apiGroup: rbac.authorization.k8s.io diff --git a/assets/storage/0000_80_hostpath-provisioner-storageclass.yaml b/assets/storage/0000_80_hostpath-provisioner-storageclass.yaml index 732978ff9e..ed38597a4e 100644 --- a/assets/storage/0000_80_hostpath-provisioner-storageclass.yaml +++ b/assets/storage/0000_80_hostpath-provisioner-storageclass.yaml @@ -4,4 +4,4 @@ metadata: name: kubevirt-hostpath-provisioner provisioner: kubevirt.io/hostpath-provisioner reclaimPolicy: Delete -volumeBindingMode: WaitForFirstConsumer \ No newline at end of file +volumeBindingMode: WaitForFirstConsumer diff --git a/docs/.gitignore b/docs/.gitignore index dd6b60552f..291f0817f1 100644 --- a/docs/.gitignore +++ b/docs/.gitignore @@ -1,4 +1,3 @@ vendor/** _site/** Gemfile.lock - diff --git a/docs/Gemfile b/docs/Gemfile old mode 100755 new mode 100644 diff --git a/docs/_layouts/default.html b/docs/_layouts/default.html index efcda1b46c..3573433f41 100644 --- a/docs/_layouts/default.html +++ b/docs/_layouts/default.html @@ -27,4 +27,4 @@ {%- include footer.html -%} </body> -</html> \ No newline at end of file +</html> diff --git a/docs/_layouts/page.html b/docs/_layouts/page.html index 9d1ec7defc..5ecc9f16ed 100644 --- a/docs/_layouts/page.html +++ b/docs/_layouts/page.html @@ -11,4 +11,4 @@ <h1 class="post-title">{{ page.title | escape }}</h1> {{ content | toc }} </div> -</article> \ No newline at end of file +</article> diff --git a/docs/_layouts/post.html b/docs/_layouts/post.html index 9df2c2f33a..552ae7dae0 100644 --- a/docs/_layouts/post.html +++ b/docs/_layouts/post.html @@ -11,7 +11,7 @@ <h1 class="post-title p-name" itemprop="name headline">{{ page.title | escape }} {{ page.date | date: date_format }} </time> {%- if page.modified_date -%} - ~ + ~ {%- assign mdate = page.modified_date | date_to_xmlschema -%} <time class="dt-modified" datetime="{{ mdate }}" itemprop="dateModified"> {{ mdate | date: date_format }} @@ -35,4 +35,4 @@ <h1 class="post-title p-name" itemprop="name headline">{{ page.title | escape }} {%- endif -%} <a class="u-url" href="{{ page.url | relative_url }}" hidden></a> -</article> \ No newline at end of file +</article> diff --git a/docs/design/design.md b/docs/design/design.md index b3e84935b9..791d1e7974 100644 --- a/docs/design/design.md +++ b/docs/design/design.md @@ -1,5 +1,5 @@ --- -modified: "2021-10-25T10:52:50.869+02:00" +modified: "2021-10-27T12:24:27.200+02:00" title: Design doc. tags: Design documentation, goals layout: page @@ -20,15 +20,15 @@ MicroShift aims at meeting all of the following design goals: - e.g. disconnected or rarely connected, NAT'ed or firewalled, changing IP addresses, IPv4 or v6, high latency / low bandwidth, no control over local network (DNS, DHCP, LBN, GW), connectivity via LTE dongle (i.e. no LAN) - MicroShift operates autonomously; it does not require external orchestration. - MicroShift is safe to change<sup>1</sup>; it has means to automatically recover from faulty software or configuration updates that would render it unmanageable or non-operational. - - MicroShift is secure<sup>1</sup> even in environments without physical accesss security. + - MicroShift is secure<sup>1</sup> even in environments without physical access security. <sup>1) when used in combination with an edge-optimized OS like RHEL 4 Edge or Fedora IoT</sup> - **Production-grade:** - MicroShift supports deployments with 1 or 3 control plane and 0..N worker instances. - - MicroShift can be deployed containerized on Podman or Docker or non-containerized via RPM and managed via systemd; it is compatible with `rpm-ostree`-based systems. - - MicroShift's lifecyle is decoupled from the underlying OS's lifecycle. + - MicroShift can be deployed containerized on `podman` or Docker or non-containerized via RPM and managed via systemd; it is compatible with `rpm-ostree`-based systems. + - MicroShift's lifecycle is decoupled from the underlying OS's lifecycle. - MicroShift can be deployed such that updates or changes to it do not disrupt running workloads. - MicroShift meets DISA STIG and FedRAMP security requirements; it runs as non-privileged workload and supports common CVE and auditing workflows. - MicroShift allows segregation between the "edge device administrator" and the "edge service development and operations" personas. @@ -78,7 +78,7 @@ When deciding between different design options, we follow the following principl - Smaller resource footprint has _not_ been a motivation, it may be a welcome side-effect. - MicroShift provides a small, optional set of infrastructure services to support common use cases and reuses OpenShift's container images for these: - openshift-dns, openshift-router, service-ca, local storage provider -- MicroShift instances (processes) run directly on the host or containerized on Podman. They can take on the roles of Control Plane, Node, or both: +- MicroShift instances (processes) run directly on the host or containerized on `podman`. They can take on the roles of Control Plane, Node, or both: - Instances with Control Plane role run etcd and the Kubernetes and OpenShift control plane services. As these services don't require a kubelet, pure Control Plane instances are not nodes in the Kubernetes sense and require fewer system privileges. - Instances with Node role run a kubelet (and thus register as node) and kube-proxy and interface with CRI-O for running workloads. They may thus require higher system privileges. - While it's possible to run a single MicroShift instance with both Control Plane and Node roles, there may be reasons to run two instances - one Control Plane and one Node - on the same host, e.g. to run the Control Plane with fewer privileges for security reasons. Implementation decisions should consider this. diff --git a/docs/index.md b/docs/index.md index 7ae753e570..50bdafdc66 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,7 +1,7 @@ --- -modified: "2021-10-26T13:12:07.131+02:00" +modified: "2021-10-27T12:49:28.175+02:00" title: The Project -tags: microshift project, edge, µShift +tags: MicroShift project, edge, µShift, MicroShift layout: page toc: true --- @@ -21,8 +21,7 @@ We believe these properties should also make MicroShift a great tool for other u Watch this [end-to-end MicroShift provisioning demo video](https://youtu.be/QOiB8NExtA4) to get a first impression of MicroShift deployed onto a [RHEL for edge computing](https://www.redhat.com/en/technologies/linux-platforms/enterprise-linux/edge-computing) device and managed through [Open Cluster Management](https://github.com/open-cluster-management). > warning "" -> MicroShift is still early days and moving fast. Features are missing. Things break. But you can still help shape it, too.\*\* - +> MicroShift is still early days and moving fast. Features are missing. Things break. But you can still help shape it, too. <sup>1) more precisely [OKD](https://www.okd.io/), the Kubernetes distribution by the OpenShift community</sup> @@ -35,8 +34,8 @@ In order to run MicroShift, you will need at least: - ~124MB of free storage space for the MicroShift binary - 64-bit CPU (although 32-bit is _technically_ possible, if you're up for the challenge) -For barebones development the minimum requirement is 3GB of RAM, though this can increase -if you are using resource-intensive devtools. +For barebone development the minimum requirement is 3GB of RAM, though this can increase +if you are using resource-intensive development tools. ### OS Requirements @@ -54,7 +53,7 @@ It may be possible to run MicroShift on other systems, however they haven't been ## Using MicroShift -To give MicroShift a try, simply install a recent test version (we don't provide stable releases yet) on a Fedora-derived Linux distro (we've only tested Fedora, RHEL, and CentOS Stream so far) using: +To give MicroShift a try, simply install a recent test version (we don't provide stable releases yet) on a Fedora-derived Linux distribution (we've only tested Fedora, RHEL, and CentOS Stream so far) using: ```sh curl -sfL https://raw.githubusercontent.com/redhat-et/microshift/main/install.sh | bash @@ -62,7 +61,7 @@ curl -sfL https://raw.githubusercontent.com/redhat-et/microshift/main/install.sh This will install MicroShift's dependencies (CRI-O), install it as a systemd service and start it. -For convenience, the script will also add a new "microshift" context to your `$HOME/.kube/config`, so you'll be able to access your cluster using, e.g.: +For convenience, the script will also add a new `microshift` context to your `$HOME/.kube/config`, so you'll be able to access your cluster using, e.g.: ```sh kubectl get all -A --context microshift @@ -91,7 +90,7 @@ rm -rf /var/lib/microshift && rm -r $HOME/.microshift ### Building -You can locally build MicroShift using one of two methods, either using a container build (recommended) on Podman or Docker: +You can locally build MicroShift using one of two methods, either using a container build (recommended) on `podman` or Docker: ```sh sudo yum -y install make golang @@ -120,15 +119,15 @@ Before running MicroShift, the host must first be configured. This can be handle CONFIG_ENV_ONLY=true ./install.sh ``` -MicroShift keeps all its state in its data-dir, which defaults to `/var/lib/microshift` when running MicroShift as privileged user and `$HOME/.microshift` otherwise. Note that running MicroShift unprivileged only works without node role at the moment (i.e. using `--roles=controlplane` instead of the default of `--roles=controlplane,node`). +MicroShift keeps all its state in its data directory, which defaults to `/var/lib/microshift` when running MicroShift as privileged user and `$HOME/.microshift` otherwise. Note that running MicroShift unprivileged only works without node role at the moment (i.e. using `--roles=controlplane` instead of the default of `--roles=controlplane,node`). -### Kubeconfig +### `Kubeconfig` -When starting the MicroShift for the first time the Kubeconfig file is created. If you need it for another user or to use externally the kubeadmin's kubeconfig is placed at `/var/lib/microshift/resources/kubeadmin/kubeconfig`. +When starting the MicroShift for the first time the `kubeconfig` file is created. If you need it for another user or to use externally the `kubeadmin`'s `kubeconfig` is placed at `/var/lib/microshift/resources/kubeadmin/kubeconfig`. ### Contributing -For more information on working with MicroShift, you can find a contributor's guide in [CONTRIBUTING.md](./CONTRIBUTING.md) +For more information on working with MicroShift, you can find a contributor's guide in [`CONTRIBUTING.md`](./CONTRIBUTING.md) ### Community diff --git a/docs/known-issues.md b/docs/known-issues.md index 78155937a2..dedaade109 100644 --- a/docs/known-issues.md +++ b/docs/known-issues.md @@ -1,5 +1,5 @@ --- -modified: "2021-10-25T11:10:27.304+02:00" +modified: "2021-10-27T12:15:46.667+02:00" title: Known Issues tags: known issues, troubleshooting layout: page @@ -16,20 +16,20 @@ Inside the failing pods, you might find errors as: `10.43.0.1:443: read: connect This a [known issue](https://bugzilla.redhat.com/show_bug.cgi?id=1912236#c30) on RHEL 8.4 and will be resolved in 8.5. -In order to work on RHEL 8.4, you may disable the networkManager and reboot to resolve this issue. +In order to work on RHEL 8.4, you may disable the NetworkManager and reboot to resolve this issue. -Eg: +Example: ```sh systemctl disable nm-cloud-setup.service nm-cloud-setup.timer reboot ``` -You can find the details of this EC2 networkManage issue tracked at [issue](https://gitlab.freedesktop.org/NetworkManager/NetworkManager/-/issues/740). +You can find the details of this EC2 NetworkManager issue tracked at [issue](https://gitlab.freedesktop.org/NetworkManager/NetworkManager/-/issues/740). -### Openshift pods restarts on `CrashLoopBackOff` +### OpenShift pods restarts on `CrashLoopBackOff` -A few minutes after `microshift` started, openshift pods fall into `CrashLoopBackOff`. +A few minutes after `microshift` started, OpenShift pods fall into `CrashLoopBackOff`. If you check up the `journalctl |grep iptables`, you may see the following: @@ -41,7 +41,7 @@ Sep 21 19:13:50 ip-172-31-85-30.ec2.internal systemd-coredump[2442]: Process 243 Sep 21 20:35:57 ip-172-31-85-30.ec2.internal microshift[1297]: E0921 20:35:57.914558 1297 remote_runtime.go:143] StopPodSandbox "1ae45abde0b46d8ea5176b6a00f0e5b4291e6bb496762ca25a4196a5f18d0475" from runtime service failed: rpc error: code = Unknown desc = failed to destroy network for pod sandbox k8s_service-ca-64547678c6-2nxnp_openshift-service-ca_6236deba-fc5f-4915-817d-f8699a4accfc_0(1ae45abde0b46d8ea5176b6a00f0e5b4291e6bb496762ca25a4196a5f18d0475): error removing pod openshift-service-ca_service-ca-64547678c6-2nxnp from CNI network "crio": running [/usr/sbin/iptables -t nat -D POSTROUTING -s 10.42.0.3 -j CNI-d5d0edec163ce01e4591c1c4 -m comment --comment name: "crio" id: "1ae45abde0b46d8ea5176b6a00f0e5b4291e6bb496762ca25a4196a5f18d0475" --wait]: exit status 2: iptables v1.8.4 (nf_tables): Chain 'CNI-d5d0edec163ce01e4591c1c4' does not exist ``` -Also, the `openshift-ingress` pod will faild on: +Also, the `openshift-ingress` pod will fail on: ```console I0921 17:36:17.811391 1 router.go:262] router "msg"="router is including routes in all namespaces" @@ -51,14 +51,14 @@ I0921 17:36:17.948417 1 router.go:579] template "msg"="router reloaded" " As a workaround, you can follow steps below: -- delete `flannel` daemonset +- delete `flannel` `daemonset` ```sh kubectl delete ds -n kube-system kube-flannel-ds ``` -- restart all the openshift pods. +- restart all the OpenShift pods. -This workaround won't affect the single node `microshift` functionality since the `flannel` daemonset is used for multi-node microshift. +This workaround won't affect the single node `microshift` functionality since the `flannel` `daemonset` is used for multi-node MicroShift. This issue is tracked at: [#296](https://github.com/redhat-et/microshift/issues/296) diff --git a/docs/microshift-aio/README.md b/docs/microshift-aio/README.md index 3fc7f959c8..d4e608d370 100644 --- a/docs/microshift-aio/README.md +++ b/docs/microshift-aio/README.md @@ -1,21 +1,21 @@ --- -modified: "2021-10-25T11:09:31.544+02:00" +modified: "2021-10-27T12:37:56.620+02:00" title: All-In-One layout: page -tags: all-in-one, aio +tags: all-in-one, AIO toc: true --- ## Run MicroShift All-In-One as a Systemd Service -Copy `microshift-aio` unit file to `/etc/systemd` and the aio run script to `/usr/bin` +Copy `microshift-aio` unit file to `/etc/systemd` and the AIO run script to `/usr/bin` ```bash cp packaging/systemd/microshift-aio.service /etc/systemd/system/microshift-aio.service cp packaging/systemd/microshift-aio /usr/bin/ ``` -Now enable and start the service. The `KUBECONFIG` location will be written to `/etc/microshift-aio/microshift-aio.conf`. +Now enable and start the service. The `KUBECONFIG` location will be written to `/etc/microshift-aio/microshift-aio.conf`. If the `microshift-data` podman volume does not exist, the systemd service will create one. ```bash @@ -23,7 +23,7 @@ systemctl enable microshift-aio --now source /etc/microshift-aio/microshift-aio.conf ``` -Verify that microshift is running. +Verify that `microshift` is running. ```sh kubectl get pods -A @@ -36,11 +36,11 @@ systemctl stop microshift-aio ``` > note "" -> Stopping microshift-aio service _does not_ remove the podman volume `microshift-data`. A restart will use the same volume. +> Stopping `microshift-aio` service _does not_ remove the podman volume `microshift-data`. A restart will use the same volume. ## Run the Image Without Systemd -First, enable the following SElinux rule: +First, enable the following SELinux rule: ```bash setsebool -P container_manage_cgroup true @@ -68,7 +68,7 @@ Execute the following command to get into the container: sudo podman exec -ti microshift-aio bash ``` -Inside the container, install kubectl: +Inside the container, install `kubectl`: ```bash export ARCH=$(uname -m |sed -e "s/x86_64/amd64/" |sed -e "s/aarch64/arm64/") diff --git a/docs/microshift-containerized/README.md b/docs/microshift-containerized/README.md deleted file mode 100644 index 409349e1d1..0000000000 --- a/docs/microshift-containerized/README.md +++ /dev/null @@ -1,130 +0,0 @@ ---- -modified: "2021-10-25T11:09:43.609+02:00" -title: Containerized -tags: container, docker, podman -layout: page -toc: true ---- - -## Pre-requisites - -Before runnng microshift as a systemd service, ensure to update the host `crio-bridge.conf` as - -```bash -{ - "cniVersion": "0.4.0", - "name": "crio", - "type": "bridge", - "bridge": "cni0", - "isGateway": true, - "ipMasq": true, - "hairpinMode": true, - "ipam": { - "type": "host-local", - "routes": [ - { "dst": "0.0.0.0/0" } - ], - "ranges": [ - [{ "subnet": "10.42.0.0/24" }] - ] - } -} -``` - -## Run microshift as a systemd service - -Copy microshift unit file to `/etc/systemd/system` and the microshift-containerized run script to `/usr/bin` - -```bash -sudo cp packaging/systemd/microshift-containerized.service /etc/systemd/system/microshift.service -sudo cp packaging/systemd/microshift-containerized /usr/bin/ -``` - -Now enable and start the service. The KUBECONFIG location will be written to `/etc/microshift-containerized/microshift-containerized.conf`. - -```bash -sudo systemctl enable microshift --now -source /etc/microshift-containerized/microshift-containerized.conf -``` - -Verify that microshift is running. - -```sh -kubectl get pods -A -``` - -Stop microshift service - -```bash -systemctl stop microshift -``` - -You can check microshift via - -```bash -sudo podman ps -sudo critcl ps -``` - -To access the cluster on the host or inside the container - -### Access the cluster inside the container - -Execute the following command to get into the container: - -```bash -sudo podman exec -ti microshift bash -``` - -Inside the container, run the following to see the pods: - -```bash -export KUBECONFIG=/var/lib/microshift/resources/kubeadmin/kubeconfig -kubectl get pods -A -``` - -### Access the cluster on the host - -#### Linux - -```bash -export KUBECONFIG=/var/lib/microshift/resources/kubeadmin/kubeconfig -kubectl get pods -A -w -``` - -## Auto-Update on demand via Podman -Since Podman 3.4, Podman enables users to automate container updates using what are called auto-updates. On a high level, you can configure Podman to check the availability of new images for auto-updates, pull down these new images if needed, and restart the containers. - -### Configuring Podman auto-updates - -To ensure Podman is checking the fully qualified image path for new images and download them, the systemd file adds a label `--label "io.containers.autoupdate=registry"` to the `microshift` container - -```bash -ExecStart=/usr/bin/podman run \ ---cidfile=%t/%n.ctr-id \ ---cgroups=no-conmon \ ---rm -d --replace \ ---sdnotify=container \ ---label io.containers.autoupdate=registry \ ---privileged --name microshift \ --v /var/run:/var/run -v /sys:/sys:ro -v /var/lib:/var/lib:rw,rshared -v /lib/modules:/lib/modules -v /etc:/etc\ --v /run/containers:/run/containers -v /var/log:/var/log \ --e KUBECONFIG=/var/lib/microshift/resources/kubeadmin/kubeconfig \ -quay.io/microshift/microshift:4.7.0-0.microshift-2021-08-31-224727-linux-amd64 -``` -### Testing auto-updates - -Podman `auto-update` command will look for containers that are having the label and a systemd service file as described above. If the command finds one container, it will check for a new image, download it, restart the container service. - -```bash -sudo podman auto-update --dry-run - -UNIT CONTAINER IMAGE POLICY UPDATED -microshift 2f7fa3962ee0 (microshift) quay.io/microshift/microshift:4.7.0-0.microshift-2021-08-31-224727-linux-amd64 registry false - -``` -The `--dry-run` feature allows you to assemble information about which services, containers, and images need updates before applying them. To apply them do - -```bash -sudo podman auto-update -``` diff --git a/hack/all-in-one/Dockerfile b/hack/all-in-one/Dockerfile index ad3186a281..cc6ee4a70f 100644 --- a/hack/all-in-one/Dockerfile +++ b/hack/all-in-one/Dockerfile @@ -38,7 +38,7 @@ COPY crio-bridge.conf /etc/cni/net.d/100-crio-bridge.conf RUN export VERSION=1.20 && \ export OS=CentOS_8_Stream && \ curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/$OS/devel:kubic:libcontainers:stable.repo && \ - curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable:cri-o:$VERSION.repo https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable:cri-o:$VERSION/$OS/devel:kubic:libcontainers:stable:cri-o:$VERSION.repo + curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable:cri-o:$VERSION.repo https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable:cri-o:$VERSION/$OS/devel:kubic:libcontainers:stable:cri-o:$VERSION.repo RUN dnf install -y cri-o \ cri-tools \ @@ -51,7 +51,7 @@ RUN curl -LO "https://storage.googleapis.com/kubernetes-release/release/$(curl - mv ./kubectl /usr/local/bin/kubectl && \ sed -i 's|/usr/libexec/crio/conmon|/usr/bin/conmon|' /etc/crio/crio.conf && \ systemctl enable microshift.service && \ - systemctl enable crio + systemctl enable crio RUN curl -s -L https://nvidia.github.io/nvidia-docker/rhel8.3/nvidia-docker.repo | tee /etc/yum.repos.d/nvidia-docker.repo && \ dnf install nvidia-container-toolkit -y diff --git a/hack/all-in-one/README.md b/hack/all-in-one/README.md index 96118f3095..f21e70b46b 100644 --- a/hack/all-in-one/README.md +++ b/hack/all-in-one/README.md @@ -1,8 +1,12 @@ -# Containerized MicroShift With GPU Support and Kubectl +--- +modified: "2021-10-27T12:26:51.322+02:00" +--- + +# Containerized MicroShift With GPU Support and `kubectl` ## Run MicroShift All-In-One as a Systemd Service -Copy microshift-aio unit file to /etc/systemd and the aio run script to /usr/bin +Copy `microshift-aio.service` unit file to `/etc/systemd` and the AIO run script to `/usr/bin` ```bash # from microshift repository root directory @@ -10,32 +14,32 @@ cp packaging/systemd/microshift-aio.service /etc/systemd/system/microshift-aio.s cp packaging/systemd/microshift-aio /usr/bin/ ``` -Now enable and start the service. The KUBECONFIG location will be written to /etc/microshift-aio/microshift-aio.conf. -If the `microshift-data` podman volume does not exist, the systemd service will create one. +Now enable and start the service. The `KUBECONFIG` location will be written to `/etc/microshift-aio/microshift-aio.conf`. +If the `microshift-data` `podman` volume does not exist, the `systemd` service will create one. ```bash systemctl enable microshift-aio --now source /etc/microshift-aio/microshift-aio.conf ``` -Verify that microshift is running. +Verify that MicroShift is running: -``` +```sh kubectl get pods -A ``` -Stop microshift-aio service +Stop `microshift-aio` service ```bash systemctl stop microshift-aio ``` -**NOTE** Stopping microshift-aio service _does not_ remove the podman volume `microshift-data`. +**NOTE** Stopping `microshift-aio` service _does not_ remove the podman volume `microshift-data`. A restart will use the same volume. ## Run the Image Without Systemd -First, enable the following selinux rule: +First, enable the following SELinux rule: ```bash setsebool -P container_manage_cgroup true @@ -50,7 +54,7 @@ sudo podman volume create microshift-data The following example binds localhost the container volume to `/var/lib` ```bash -sudo podman run -d --rm --name microshift-aio --privileged -v /lib/modules:/lib/modules -v microshift-data:/var/lib -p 6443:6443 microshift-aio +sudo podman run -d --rm --name microshift-aio --privileged -v /lib/modules:/lib/modules -v microshift-data:/var/lib -p 6443:6443 microshift-aio ``` You can access the cluster either on the host or inside the container @@ -71,6 +75,7 @@ kubectl get pods -A ``` ### Access the Cluster From the Host + #### Linux ```bash @@ -108,5 +113,5 @@ TAG="quay.io/myname/myrepo:dev" ./hack/build-aio-dev.sh ## Limitation -These instructions are tested on Linux, Mac, and Windows. +These instructions are tested on Linux, Mac, and Windows. On MacOS, running containerized MicroShift as non-root is not supported on MacOS. diff --git a/hack/all-in-one/build-aio-dev.sh b/hack/all-in-one/build-aio-dev.sh index a104bae20b..1b12203eba 100755 --- a/hack/all-in-one/build-aio-dev.sh +++ b/hack/all-in-one/build-aio-dev.sh @@ -1,10 +1,11 @@ +#!/usr/bin/env bash # This script runs hack/all-in-one/Dockerfile # to build a dev microshift-aio image with GPU support and kubectl installed # FROM_SOURCE="true" ./build-aio-dev.sh # to build image with locally built binary # ./build-aio-dev.sh # to build image with latest released binary #!/bin/bash -cleanup () { +cleanup() { rm -f unit crio-bridge.conf kubelet-cgroups.conf microshift } @@ -15,12 +16,12 @@ FROM_SOURCE="${FROM_SOURCE:-false}" IMAGE_NAME="${IMAGE_NAME:-registry.access.redhat.com/ubi8/ubi-init:8.4}" cp ../../packaging/images/microshift-aio/unit ../../packaging/images/microshift-aio/crio-bridge.conf ../../packaging/images/microshift-aio/kubelet-cgroups.conf . -ARCH=$(uname -m |sed -e "s/x86_64/amd64/" |sed -e "s/aarch64/arm64/") -if [ "$FROM_SOURCE" == "true" ]; then \ - pushd ../../ && \ - make && \ - mv microshift hack/all-in-one/. && \ - popd; \ +ARCH=$(uname -m | sed -e "s/x86_64/amd64/" | sed -e "s/aarch64/arm64/") +if [ "$FROM_SOURCE" == "true" ]; then + pushd ../../ && + make && + mv microshift hack/all-in-one/. && + popd fi podman build \ diff --git a/hack/all-in-one/build-images.sh b/hack/all-in-one/build-images.sh index d906fa2151..43044b46eb 100755 --- a/hack/all-in-one/build-images.sh +++ b/hack/all-in-one/build-images.sh @@ -1,21 +1,21 @@ #!/bin/bash -cleanup () { +cleanup() { rm -f unit crio-bridge.conf kubelet-cgroups.conf } cp ../../packaging/images/microshift-aio/unit ../../packaging/images/microshift-aio/crio-bridge.conf ../../packaging/images/microshift-aio/kubelet-cgroups.conf . -ARCH=$(uname -m |sed -e "s/x86_64/amd64/" |sed -e "s/aarch64/arm64/") +ARCH=$(uname -m | sed -e "s/x86_64/amd64/" | sed -e "s/aarch64/arm64/") TAG="${TAG:-quay.io/microshift/microshift-aio:$(date +%Y-%m-%d-%H-%M)}" for img in "registry.access.redhat.com/ubi8/ubi-init:8.4" "docker.io/nvidia/cuda:11.4.2-base-ubi8"; do - echo "build microshift aio image using base image "${i} - tag=$(echo ${img} |awk -F"/" '{print $NF}'| sed -e 's/:/-/g') - echo ${tag} + echo "build microshift aio image using base image "${i} + tag=$(echo ${img} | awk -F"/" '{print $NF}' | sed -e 's/:/-/g') + echo ${tag} - for host in "rhel7" "rhel8"; do + for host in "rhel7" "rhel8"; do host_tag="" [ "${host}" == "rhel7" ] && host_tag="-rhel7" podman build --build-arg ARCH=${ARCH} --build-arg IMAGE_NAME=${img} --build-arg HOST=${host} -t ${TAG}-${tag}${host_tag} . - done + done done diff --git a/hack/verify.sh b/hack/verify.sh index 56e04cf880..4607ae3fb0 100755 --- a/hack/verify.sh +++ b/hack/verify.sh @@ -1,6 +1,5 @@ #!/bin/bash - set -x set -o errexit set -o nounset @@ -14,12 +13,11 @@ REPO_ROOT=$(readlink -f $(dirname "${BASH_SOURCE[0]}")/..) GOPKG=go/pkg GO_FILES=$(find . -iname '*.go' -type f | grep -v /vendor/) - go get -u golang.org/x/lint/golint test -z $(gofmt -s -l -e $GO_FILES) go vet -v $(go list ./... | grep -v /vendor/) - cd ${GOPKG}; go test + cd ${GOPKG} + go test ) - diff --git a/install.sh b/install.sh index 8b22928f1f..33e05f1037 100755 --- a/install.sh +++ b/install.sh @@ -11,24 +11,22 @@ set -e -o pipefail CONFIG_ENV_ONLY=${CONFIG_ENV_ONLY:=false} # Only get the version number if installing a release version -[ $CONFIG_ENV_ONLY = false ] && \ - VERSION=$(curl -s https://api.github.com/repos/redhat-et/microshift/releases | grep tag_name | head -n 1 | cut -d '"' -f 4) +[ $CONFIG_ENV_ONLY = false ] && + VERSION=$(curl -s https://api.github.com/repos/redhat-et/microshift/releases | grep tag_name | head -n 1 | cut -d '"' -f 4) # Function to get Linux distribution get_distro() { - DISTRO=$(egrep '^(ID)=' /etc/os-release| sed 's/"//g' | cut -f2 -d"=") - if [[ $DISTRO != @(rhel|fedora|centos|ubuntu) ]] - then - echo "This Linux distro is not supported by the install script" - exit 1 + DISTRO=$(egrep '^(ID)=' /etc/os-release | sed 's/"//g' | cut -f2 -d"=") + if [[ $DISTRO != @(rhel|fedora|centos|ubuntu) ]]; then + echo "This Linux distro is not supported by the install script" + exit 1 fi } # Function to get system architecture get_arch() { ARCH=$(uname -m | sed "s/x86_64/amd64/" | sed "s/aarch64/arm64/") - if [[ $ARCH != @(amd64|arm64) ]] - then + if [[ $ARCH != @(amd64|arm64) ]]; then printf "arch %s unsupported" "$ARCH" >&2 exit 1 fi @@ -45,17 +43,15 @@ register_subs() { REPO="rhocp-4.7-for-rhel-8-x86_64-rpms" # Check subscription status and register if not STATUS=$(sudo subscription-manager status | awk '/Overall Status/ { print $3 }') - if [[ $STATUS != "Current" ]] - then - sudo subscription-manager register --auto-attach < /dev/tty + if [[ $STATUS != "Current" ]]; then + sudo subscription-manager register --auto-attach </dev/tty POOL=$(sudo subscription-manager list --available --matches '*OpenShift' | grep Pool | head -n1 | awk -F: '{print $2}' | tr -d ' ') sudo subscription-manager attach --pool $POOL sudo subscription-manager config --rhsm.manage_repos=1 fi set -e -o pipefail # Check if already subscribed to the proper repository - if ! sudo subscription-manager repos --list-enabled | grep -q ${REPO} - then + if ! sudo subscription-manager repos --list-enabled | grep -q ${REPO}; then sudo subscription-manager repos --enable=${REPO} fi } @@ -86,7 +82,7 @@ install_dependencies() { } # Establish Iptables rules -establish_firewall () { +establish_firewall() { sudo systemctl enable firewalld --now sudo firewall-cmd --zone=public --permanent --add-port=6443/tcp sudo firewall-cmd --zone=public --permanent --add-port=30000-32767/tcp @@ -100,30 +96,29 @@ establish_firewall () { sudo firewall-cmd --reload } - # Install CRI-O depending on the distro install_crio() { case $DISTRO in - "fedora") + "fedora") sudo dnf module -y enable cri-o:1.20 sudo dnf install -y cri-o cri-tools - ;; - "rhel") + ;; + "rhel") sudo dnf install cri-o cri-tools -y - ;; - "centos") + ;; + "centos") CRIOVERSION=1.20 OS=CentOS_8_Stream sudo curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/$OS/devel:kubic:libcontainers:stable.repo sudo curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable:cri-o:$CRIOVERSION.repo https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable:cri-o:$CRIOVERSION/$OS/devel:kubic:libcontainers:stable:cri-o:$CRIOVERSION.repo sudo dnf install -y cri-o cri-tools - ;; - "ubuntu") + ;; + "ubuntu") CRIOVERSION=1.20 OS=xUbuntu_$OS_VERSION KEYRINGS_DIR=/usr/share/keyrings - echo "deb [signed-by=$KEYRINGS_DIR/libcontainers-archive-keyring.gpg] https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/$OS/ /" | sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list > /dev/null - echo "deb [signed-by=$KEYRINGS_DIR/libcontainers-crio-archive-keyring.gpg] http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/$CRIOVERSION/$OS/ /" | sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable:cri-o:$CRIOVERSION.list > /dev/null + echo "deb [signed-by=$KEYRINGS_DIR/libcontainers-archive-keyring.gpg] https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/$OS/ /" | sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list >/dev/null + echo "deb [signed-by=$KEYRINGS_DIR/libcontainers-crio-archive-keyring.gpg] http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/$CRIOVERSION/$OS/ /" | sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable:cri-o:$CRIOVERSION.list >/dev/null sudo mkdir -p $KEYRINGS_DIR curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/$OS/Release.key | sudo gpg --dearmor -o $KEYRINGS_DIR/libcontainers-archive-keyring.gpg @@ -132,11 +127,10 @@ install_crio() { sudo apt-get update -y # Vagrant Ubuntu VMs don't provide containernetworking-plugins by default sudo apt-get install -y cri-o cri-o-runc cri-tools containernetworking-plugins - ;; + ;; esac } - # CRI-O config to match MicroShift networking values crio_conf() { sudo sh -c 'cat << EOF > /etc/cni/net.d/100-crio-bridge.conf @@ -159,10 +153,10 @@ crio_conf() { } } EOF' - - if [ "$DISTRO" == "rhel" ]; then - sudo sed -i 's|/usr/libexec/crio/conmon|/usr/bin/conmon|' /etc/crio/crio.conf - fi + + if [ "$DISTRO" == "rhel" ]; then + sudo sed -i 's|/usr/libexec/crio/conmon|/usr/bin/conmon|' /etc/crio/crio.conf + fi } # Start CRI-O @@ -186,14 +180,14 @@ get_microshift() { BIN_SHA="$(sha256sum microshift-linux-$ARCH | awk '{print $1}')" KNOWN_SHA="$(grep "microshift-linux-$ARCH" release.sha256 | awk '{print $1}')" - if [[ "$BIN_SHA" != "$KNOWN_SHA" ]]; then + if [[ "$BIN_SHA" != "$KNOWN_SHA" ]]; then echo "SHA256 checksum failed" && exit 1 fi sudo chmod +x microshift-linux-$ARCH sudo mv microshift-linux-$ARCH /usr/local/bin/microshift - cat << EOF | sudo tee /usr/lib/systemd/system/microshift.service + cat <<EOF | sudo tee /usr/lib/systemd/system/microshift.service [Unit] Description=MicroShift After=crio.service @@ -231,14 +225,13 @@ prepare_kubeconfig() { if [ -f $HOME/.kube/config ]; then mv $HOME/.kube/config $HOME/.kube/config.orig fi - sudo KUBECONFIG=/var/lib/microshift/resources/kubeadmin/kubeconfig:$HOME/.kube/config.orig /usr/local/bin/kubectl config view --flatten > $HOME/.kube/config + sudo KUBECONFIG=/var/lib/microshift/resources/kubeadmin/kubeconfig:$HOME/.kube/config.orig /usr/local/bin/kubectl config view --flatten >$HOME/.kube/config } -# validation checks for deployment -validation_check(){ +# validation checks for deployment +validation_check() { echo $HOSTNAME | grep -P '(?=^.{1,254}$)(^(?>(?!\d+\.)[a-zA-Z0-9_\-]{1,63}\.?)+(?:[a-zA-Z]{2,})$)' && echo "Correct" - if [ $? != 0 ]; - then + if [ $? != 0 ]; then echo "======================================================================" echo "!!! WARNING !!!" echo "The hostname $HOSTNAME does not follow FQDN, which might cause problems while operating the cluster." @@ -270,11 +263,10 @@ crio_conf verify_crio get_kubectl -[ "$CONFIG_ENV_ONLY" = true ] && { echo "Env config complete" && exit 0 ; } +[ "$CONFIG_ENV_ONLY" = true ] && { echo "Env config complete" && exit 0; } get_microshift -until sudo test -f /var/lib/microshift/resources/kubeadmin/kubeconfig -do - sleep 2 +until sudo test -f /var/lib/microshift/resources/kubeadmin/kubeconfig; do + sleep 2 done prepare_kubeconfig diff --git a/packaging/images/components/README.md b/packaging/images/components/README.md index 69b565dcee..2560db0ac0 100644 --- a/packaging/images/components/README.md +++ b/packaging/images/components/README.md @@ -1,39 +1,44 @@ -# Multiarchitecture side-image building +--- +modified: "2021-10-27T12:30:56.061+02:00" +--- + +# Multi-architecture side-image building MicroShift optionally deploys multiple container images at runtime to provide infrastructure services like DNS and ingress. While most of those images are available from OKD and it does consume the original unmodified OKD image, this is only true for `amd64` because OKD still lacks support for other -architectures MicroShift supports like `arm`, `arm64`, `ppc64le`, `riscv64` +architectures MicroShift supports like `arm`, `arm64`, `ppc64le`, `riscv64` # Usage `./build.sh` will generate all the necessary images, and it accepts a few parameters as environment variables you can use to tweak and debug the build: -* `DEST_REGISTRY=quay.io/microshift` -* `COMPONENTS="base-image pause cli coredns flannel haproxy-router hostpath-provisioner kube-rbac-proxy service-ca-operator"` -* `ARCHITECTURES="amd64 arm64 arm ppc64le riscv64"` -* `PUSH=no` -* `PARALLEL=yes` +- `DEST_REGISTRY=quay.io/microshift` +- `COMPONENTS="base-image pause cli coredns flannel haproxy-router hostpath-provisioner kube-rbac-proxy service-ca-operator"` +- `ARCHITECTURES="amd64 arm64 arm ppc64le riscv64"` +- `PUSH=no` +- `PARALLEL=yes` -You can use an alternate DEST_REGISTRY for testing, pick individual components or architectures. +You can use an alternate `DEST_REGISTRY` for testing, pick individual components or architectures. For example: `COMPONENTS="base-image" ./build.sh` The base-image is used for composing some of the images on top, as OKD does. This serves two purposes: - * Faster multiarch building, since the initial layers are very expensive to construct (done via - qemu-static) - * Thinner storage, as the base-image layer is downloaded just once. + +- Faster multi-arch building, since the initial layers are very expensive to construct (done via + `qemu-static`) +- Thinner storage, as the base-image layer is downloaded just once. ## Existing issues -Currently buildah doesn't recognize and use the locally built base-image for some +Currently `buildah` doesn't recognize and use the locally built base-image for some reason to be identified. That means that local building doesn't properly work without build. As a workaround when locally testing for a new tag you can use a separate registry with -a "base-image" repo. +a "base-image" `repo`. For example: @@ -47,7 +52,7 @@ without pushing. ## Directory structure The `components` directory inside `side-images` contains all necessary information to -build each image. +build each image. Each component source code is extracted into the `src` directory, the references are extracted from the specific OKD release, and for components not @@ -55,16 +60,17 @@ being part of OKD the `repo` and `commit` files should exist (except for the bas (see flannel for an example). In addition, each component can have: -* `ImageSource.$ARCH` or `Dockerfile.$ARCH` specific for an architecture. -* `ImageSource` or `Dockerfile` general building strategy. + +- `ImageSource.$ARCH` or `Dockerfile.$ARCH` specific for an architecture. +- `ImageSource` or `Dockerfile` general building strategy. An `ImageSource` file means that if no other specific method exist for an architecture -the image should be retrieved from an specific ImageSource, for example in `flannel` we use -`quay.io/coreos/flannel:v0.14.0` as ImageSource for most architectures, since they publish +the image should be retrieved from an specific `ImageSource`, for example in `flannel` we use +`quay.io/coreos/flannel:v0.14.0` as `ImageSource` for most architectures, since they publish a multi-architecture manifest. A `Dockerfile` file means that if no other specific method exist for an architecture, -the image will be built according to the instructions of the Dockerfile. +the image will be built according to the instructions of the `Dockerfile`. An `ImageSource.$ARCH` or `Dockerfile.$ARCH` will source or build an image for an specific architecture. @@ -74,23 +80,28 @@ the different architecture binaries under `bin`, will be triggered only when nec # Consumed images -The reference to the consumed images can be found in [pkg/release](../pkg/release). +The reference to the consumed images can be found in [`pkg/release`](../pkg/release). # Image sources and source code -The available OKD images, and otherwise the reference to the sourcecode and git-tag -from which the OKD images are built is extracted from -`oc adm release extract "quay.io/openshift/okd:${OKD_BASE_TAG}" --file=image-references` +The available OKD images, and otherwise the reference to the source code and git-tag +from which the OKD images are built is extracted from: + +```sh +oc adm release extract "quay.io/openshift/okd:${OKD_BASE_TAG}" --file=image-references +``` If an OKD image exists for the specific architecture, such specific image will be -added into the multiarch manifest, otherwise we need to build the specific images. +added into the multi-arch manifest, otherwise we need to build the specific images. For architectures where `ubi8` or `ubi8-minimal` images exist such base will be used, in some cases we use `fedora-minimal` (when a newer version of packages is necessary) -# Non OKD images -We consume a few non-okd images, like `flannel`, `hostpath-provisioner`, `pause`, -we build those images from exiting image sources, or from source code. +# Non-OKD images + +We consume a few non-OKD images, like `flannel`, `hostpath-provisioner`, `pause`, +we build those images from existing image sources, or from source code. # generated images -We publish the multi-arch images under quay.io/microshift/$IMAGE:$OKD_BASE_TAG + +We publish the multi-arch images under `quay.io/microshift/$IMAGE:$OKD_BASE_TAG` diff --git a/packaging/images/components/build.sh b/packaging/images/components/build.sh index 8600694f05..7c857aab19 100755 --- a/packaging/images/components/build.sh +++ b/packaging/images/components/build.sh @@ -13,174 +13,169 @@ GRAY="\e[1;34m" GREEN="\e[32m" CLEAR="\e[0m" -function source_repo { - jq ' .spec.tags[] | select(.name == "'$1'") | .annotations."io.openshift.build.source-location"' < "${IMG_REFS}" | tr -d '" ' +function source_repo() { + jq ' .spec.tags[] | select(.name == "'$1'") | .annotations."io.openshift.build.source-location"' <"${IMG_REFS}" | tr -d '" ' } -function source_commit { - jq ' .spec.tags[] | select(.name == "'$1'") | .annotations."io.openshift.build.commit.id"' < "${IMG_REFS}" | tr -d '" ' +function source_commit() { + jq ' .spec.tags[] | select(.name == "'$1'") | .annotations."io.openshift.build.commit.id"' <"${IMG_REFS}" | tr -d '" ' } -function source_image { - jq ' .spec.tags[] | select(.name == "'$1'") | .from.name' < "${IMG_REFS}" | tr -d '" ' +function source_image() { + jq ' .spec.tags[] | select(.name == "'$1'") | .from.name' <"${IMG_REFS}" | tr -d '" ' } -function build_component { - local component=$1 - SRC_REPO=$(source_repo $component) - SRC_COMMIT=$(source_commit $component) - OKD_IMG=$(source_image $component) +function build_component() { + local component=$1 + SRC_REPO=$(source_repo $component) + SRC_COMMIT=$(source_commit $component) + OKD_IMG=$(source_image $component) - [ -z "$SRC_REPO" ] && SRC_REPO=$(cat components/$component/repo || :) - [ -z "$SRC_COMMIT" ] && SRC_COMMIT=$(cat components/$component/commit || :) + [ -z "$SRC_REPO" ] && SRC_REPO=$(cat components/$component/repo || :) + [ -z "$SRC_COMMIT" ] && SRC_COMMIT=$(cat components/$component/commit || :) - echo "" - echo -e "${GREEN}building component: $component${CLEAR}" - echo " Source Repo: $SRC_REPO" - echo " Source Commit: $SRC_COMMIT" - echo " Source Image: $OKD_IMG" + echo "" + echo -e "${GREEN}building component: $component${CLEAR}" + echo " Source Repo: $SRC_REPO" + echo " Source Commit: $SRC_COMMIT" + echo " Source Image: $OKD_IMG" - pushd components/$component >/dev/null + pushd components/$component >/dev/null if [ ! -z "${SRC_REPO}" ]; then - checkout_component $SRC_REPO $SRC_COMMIT - build_cross_binaries + checkout_component $SRC_REPO $SRC_COMMIT + build_cross_binaries fi build_multiarch_image $component $OKD_IMG - popd + popd - if [ "${PUSH}" == "yes" ]; then - echo -e "${GRAY}> pushing multiarch manifest ${MULTIARCH_MANIFEST}${CLEAR}" - buildah manifest push --all "${MULTIARCH_MANIFEST}" docker://"${MULTIARCH_MANIFEST}" - fi + if [ "${PUSH}" == "yes" ]; then + echo -e "${GRAY}> pushing multiarch manifest ${MULTIARCH_MANIFEST}${CLEAR}" + buildah manifest push --all "${MULTIARCH_MANIFEST}" docker://"${MULTIARCH_MANIFEST}" + fi } -function checkout_component { - echo "" - echo -e "${GRAY}> making sure we have the source code for $1, at commit $2${CLEAR}" - [ ! -d src ] && git clone $1 src - cd src - git fetch -a - git stash >/dev/null # just in case we had patches applied in last run - git clean -f # remove any out-of-tree files (from patches) - echo git checkout $2 -B building-side-images - git checkout $2 -B building-side-images - cd .. +function checkout_component() { + echo "" + echo -e "${GRAY}> making sure we have the source code for $1, at commit $2${CLEAR}" + [ ! -d src ] && git clone $1 src + cd src + git fetch -a + git stash >/dev/null # just in case we had patches applied in last run + git clean -f # remove any out-of-tree files (from patches) + echo git checkout $2 -B building-side-images + git checkout $2 -B building-side-images + cd .. } -function build_cross_binaries { - for ARCH in ${ARCHITECTURES} - do - if [ -f Dockerfile.$ARCH ] || [ -f Dockerfile ] && [ ! -f ImageSource.$ARCH ] && [ -x ./build_binaries ]; then - echo "" - echo -e "${GRAY}> building binaries for architecture ${ARCH} ${CLEAR}" - ./build_binaries $ARCH - fi - done +function build_cross_binaries() { + for ARCH in ${ARCHITECTURES}; do + if [ -f Dockerfile.$ARCH ] || [ -f Dockerfile ] && [ ! -f ImageSource.$ARCH ] && [ -x ./build_binaries ]; then + echo "" + echo -e "${GRAY}> building binaries for architecture ${ARCH} ${CLEAR}" + ./build_binaries $ARCH + fi + done } -function build_multiarch_image { - COMPONENT=$1 - OKD_IMG=$2 - MULTIARCH_MANIFEST="${DEST_REGISTRY}/${COMPONENT}:${OKD_BASE_TAG}" +function build_multiarch_image() { + COMPONENT=$1 + OKD_IMG=$2 + MULTIARCH_MANIFEST="${DEST_REGISTRY}/${COMPONENT}:${OKD_BASE_TAG}" - echo "" - echo -e "${GRAY}> preparing multiarch manifest ${MULTIARCH_MANIFEST} ${CLEAR}" + echo "" + echo -e "${GRAY}> preparing multiarch manifest ${MULTIARCH_MANIFEST} ${CLEAR}" + + buildah manifest rm "${MULTIARCH_MANIFEST}" 2>/dev/null >/dev/null || : + buildah manifest create "${MULTIARCH_MANIFEST}" + if [ -d src ]; then + cd src + VERSION=$(git describe --tags) + cd .. + fi - buildah manifest rm "${MULTIARCH_MANIFEST}" 2>/dev/null >/dev/null || : - buildah manifest create "${MULTIARCH_MANIFEST}" - if [ -d src ]; then - cd src - VERSION=$(git describe --tags) - cd .. - fi + # allow to disable parallelization, helpful for debugging + if [ "${PARALLEL}" == "yes" ]; then + echo "" + echo -e "${GRAY}> preparing ${COMPONENT} images in parallel for: ${ARCHITECTURES}${CLEAR}" + for ARCH in ${ARCHITECTURES}; do + ARCH_IMAGE="${MULTIARCH_MANIFEST}-${ARCH}" + ( + set -o pipefail + build_arch_image |& sed "s/^/[${COMPONENT}:${ARCH}] /" + ) & + done + wait + else + for ARCH in ${ARCHITECTURES}; do + echo "" + echo -e "${GRAY}> preparing arch image ${ARCH_IMAGE} ${CLEAR}" + build_arch_image |& sed "s/^/[${COMPONENT}:${ARCH}] /" + done + wait + fi - # allow to disable parallelization, helpful for debugging - if [ "${PARALLEL}" == "yes" ]; then - echo "" - echo -e "${GRAY}> preparing ${COMPONENT} images in parallel for: ${ARCHITECTURES}${CLEAR}" - for ARCH in ${ARCHITECTURES} - do - ARCH_IMAGE="${MULTIARCH_MANIFEST}-${ARCH}" - ( - set -o pipefail - build_arch_image |& sed "s/^/[${COMPONENT}:${ARCH}] /" - ) & + for ARCH in ${ARCHITECTURES}; do + ARCH_IMAGE="${MULTIARCH_MANIFEST}-${ARCH}" + echo -e "${GRAY}> adding ${ARCH} image to ${MULTIARCH_MANIFEST}${CLEAR}" + buildah manifest add "${MULTIARCH_MANIFEST}" "${ARCH_IMAGE}" done - wait - else - for ARCH in ${ARCHITECTURES} - do - echo "" - echo -e "${GRAY}> preparing arch image ${ARCH_IMAGE} ${CLEAR}" - build_arch_image |& sed "s/^/[${COMPONENT}:${ARCH}] /" - done - wait - fi - - for ARCH in ${ARCHITECTURES} - do - ARCH_IMAGE="${MULTIARCH_MANIFEST}-${ARCH}" - echo -e "${GRAY}> adding ${ARCH} image to ${MULTIARCH_MANIFEST}${CLEAR}" - buildah manifest add "${MULTIARCH_MANIFEST}" "${ARCH_IMAGE}" - done } -function build_arch_image { - # different methods to build a component for an arch, we can source a pre-exiting image, +function build_arch_image() { + # different methods to build a component for an arch, we can source a pre-exiting image, # have an arch specific Dockerfile, ... have a single Image for all, or a single Dockerfile # for all. if [ -f "ImageSource.${ARCH}" ]; then - build_using_image "ImageSource.${ARCH}" + build_using_image "ImageSource.${ARCH}" elif [ -f "Dockerfile.${ARCH}" ]; then - build_using_dockerfile "Dockerfile.${ARCH}" + build_using_dockerfile "Dockerfile.${ARCH}" elif [ -f "ImageSource" ]; then - build_using_image "ImageSource" + build_using_image "ImageSource" elif [ -f "Dockerfile" ]; then - build_using_dockerfile "Dockerfile" + build_using_dockerfile "Dockerfile" else - echo "I don't know how to build this image" - exit 1 + echo "I don't know how to build this image" + exit 1 fi } -function build_using_dockerfile { - - BUILD_ARGS="-f $1 -t ${ARCH_IMAGE}" - BUILD_ARGS="${BUILD_ARGS} --build-arg VERSION=${VERSION} --build-arg TARGETARCH=${ARCH}" - BUILD_ARGS="${BUILD_ARGS} --build-arg REGISTRY=${DEST_REGISTRY} --build-arg OKD_TAG=${OKD_BASE_TAG}" +function build_using_dockerfile() { + BUILD_ARGS="-f $1 -t ${ARCH_IMAGE}" + BUILD_ARGS="${BUILD_ARGS} --build-arg VERSION=${VERSION} --build-arg TARGETARCH=${ARCH}" + BUILD_ARGS="${BUILD_ARGS} --build-arg REGISTRY=${DEST_REGISTRY} --build-arg OKD_TAG=${OKD_BASE_TAG}" - buildah build-using-dockerfile --override-arch "${ARCH}" $BUILD_ARGS . || \ - if [ ${ARCH} == arm ]; then # fedora registry uses armhfp instead for arm (arm32 with floating point) - buildah build-using-dockerfile --override-arch "armhfp" $BUILD_ARGS . - fi + buildah build-using-dockerfile --override-arch "${ARCH}" $BUILD_ARGS . || + if [ ${ARCH} == arm ]; then # fedora registry uses armhfp instead for arm (arm32 with floating point) + buildah build-using-dockerfile --override-arch "armhfp" $BUILD_ARGS . + fi } -function build_using_image { +function build_using_image() { - IMG_REF=$(get_image_ref $1 "${OKD_IMG}") - buildah pull --arch "${ARCH}" "${IMG_REF}" - buildah tag "${IMG_REF}" "${ARCH_IMAGE}" + IMG_REF=$(get_image_ref $1 "${OKD_IMG}") + buildah pull --arch "${ARCH}" "${IMG_REF}" + buildah tag "${IMG_REF}" "${ARCH_IMAGE}" } -function get_image_ref { +function get_image_ref() { - IMG=$(cat $1) - # check if we must use the one captured from oc adm image-releases - if [ "${IMG}" == "\$OKD_IMAGE_AMD64" ]; then - echo $2 - else - echo ${IMG} - fi + IMG=$(cat $1) + # check if we must use the one captured from oc adm image-releases + if [ "${IMG}" == "\$OKD_IMAGE_AMD64" ]; then + echo $2 + else + echo ${IMG} + fi } # we need qemu static configured on the system during build if not already installed @@ -194,10 +189,9 @@ echo OKD Base: "${OKD_BASE_TAG}" IMG_REFS=".image-references.${OKD_BASE_TAG}" if [ ! -f "${IMG_REFS}" ]; then - oc adm release extract "quay.io/openshift/okd:${OKD_BASE_TAG}" --file=image-references > ".image-references.${OKD_BASE_TAG}" + oc adm release extract "quay.io/openshift/okd:${OKD_BASE_TAG}" --file=image-references >".image-references.${OKD_BASE_TAG}" fi -for component in $COMPONENTS -do - build_component $component +for component in $COMPONENTS; do + build_component $component done diff --git a/packaging/images/components/components/base-image/Dockerfile.riscv64 b/packaging/images/components/components/base-image/Dockerfile.riscv64 index 0389981619..8973b9b264 100644 --- a/packaging/images/components/components/base-image/Dockerfile.riscv64 +++ b/packaging/images/components/components/base-image/Dockerfile.riscv64 @@ -6,4 +6,4 @@ ENV PATH=$PATH:/ RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates dnsutils debianutils \ tar wget hostname socat locate lsof gzip procps rsync python3 && \ rm -rf /var/lib/apt/lists/* && \ - update-ca-certificates \ No newline at end of file + update-ca-certificates diff --git a/packaging/images/components/components/cli/Dockerfile b/packaging/images/components/components/cli/Dockerfile index 40ff6a9b4b..4d8be3eda0 100644 --- a/packaging/images/components/components/cli/Dockerfile +++ b/packaging/images/components/components/cli/Dockerfile @@ -1,3 +1,3 @@ ARG OKD_TAG ARG REGISTRY -FROM $REGISTRY/base-image:$OKD_TAG \ No newline at end of file +FROM $REGISTRY/base-image:$OKD_TAG diff --git a/packaging/images/components/components/coredns/Dockerfile.riscv64 b/packaging/images/components/components/coredns/Dockerfile.riscv64 index 3f5f50cff9..65fc243acd 100644 --- a/packaging/images/components/components/coredns/Dockerfile.riscv64 +++ b/packaging/images/components/components/coredns/Dockerfile.riscv64 @@ -16,4 +16,3 @@ LABEL io.k8s.display-name="CoreDNS" \ maintainer="Carlos Eduardo <carlosedp@gmail.com>" ENTRYPOINT ["/usr/bin/coredns"] - diff --git a/packaging/images/components/components/coredns/build_binaries b/packaging/images/components/components/coredns/build_binaries index ab0a4440cb..a2db8941e8 100755 --- a/packaging/images/components/components/coredns/build_binaries +++ b/packaging/images/components/components/coredns/build_binaries @@ -6,8 +6,5 @@ cd src set -x GOOS=linux GOARCH=$1 CGO_ENABLED=0 \ -GO111MODULE=on GOFLAGS=-mod=vendor \ -go build -o ../bin/coredns-$1 . - - - + GO111MODULE=on GOFLAGS=-mod=vendor \ + go build -o ../bin/coredns-$1 . diff --git a/packaging/images/components/components/flannel/build_binaries b/packaging/images/components/components/flannel/build_binaries index 18c9e25189..8c8f79308d 100755 --- a/packaging/images/components/components/flannel/build_binaries +++ b/packaging/images/components/components/flannel/build_binaries @@ -4,4 +4,3 @@ mkdir -p bin cd src GOOS=linux GOARCH=$1 make dist/flanneld && mv dist/flanneld ../bin/flanneld-$1 - diff --git a/packaging/images/components/components/haproxy-router/Dockerfile.riscv64 b/packaging/images/components/components/haproxy-router/Dockerfile.riscv64 index 196f4c037e..014250fdd7 100644 --- a/packaging/images/components/components/haproxy-router/Dockerfile.riscv64 +++ b/packaging/images/components/components/haproxy-router/Dockerfile.riscv64 @@ -27,4 +27,4 @@ EXPOSE 80 443 WORKDIR /var/lib/haproxy/conf ENV TEMPLATE_FILE=/var/lib/haproxy/conf/haproxy-config.template \ RELOAD_SCRIPT=/var/lib/haproxy/reload-haproxy -ENTRYPOINT ["/usr/bin/openshift-router", "--v=2"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/openshift-router", "--v=2"] diff --git a/packaging/images/components/components/haproxy-router/build_binaries b/packaging/images/components/components/haproxy-router/build_binaries index 29502b5cf1..089ab3d6d3 100755 --- a/packaging/images/components/components/haproxy-router/build_binaries +++ b/packaging/images/components/components/haproxy-router/build_binaries @@ -7,16 +7,16 @@ cd src export SOURCE_GIT_TAG=$(git describe --tags) if [ "x$1" == "xriscv64" ]; then - git apply ../../kube-rbac-proxy/0001-workaround-riscv64.patch + git apply ../../kube-rbac-proxy/0001-workaround-riscv64.patch fi set -x GO111MODULE=on GOOS=linux GOARCH=$1 GOFLAGS="-mod=vendor" \ -make build + make build set +x cp openshift-router ../bin/openshift-router-$1 if [ "x$1" == "xriscv64" ]; then - git stash + git stash fi diff --git a/packaging/images/components/components/hostpath-provisioner/Dockerfile b/packaging/images/components/components/hostpath-provisioner/Dockerfile index dcb77695d7..3323bf7939 100644 --- a/packaging/images/components/components/hostpath-provisioner/Dockerfile +++ b/packaging/images/components/components/hostpath-provisioner/Dockerfile @@ -2,4 +2,3 @@ FROM scratch ARG TARGETARCH COPY bin/hostpath-provisioner-$TARGETARCH /hostpath-provisioner CMD ["/hostpath-provisioner"] - diff --git a/packaging/images/components/components/hostpath-provisioner/arm32.patch b/packaging/images/components/components/hostpath-provisioner/arm32.patch index d7284868b2..ff9c63a21d 100644 --- a/packaging/images/components/components/hostpath-provisioner/arm32.patch +++ b/packaging/images/components/components/hostpath-provisioner/arm32.patch @@ -10,4 +10,3 @@ index 049125c..f3d2a16 100644 + quantity := resource.NewQuantity(int64(roundDownCapacityPretty(int64(statfs.Blocks)*int64(statfs.Bsize))), resource.BinarySI) return quantity, nil } - diff --git a/packaging/images/components/components/hostpath-provisioner/build_binaries b/packaging/images/components/components/hostpath-provisioner/build_binaries index 5cb4a0e093..31b535aaed 100755 --- a/packaging/images/components/components/hostpath-provisioner/build_binaries +++ b/packaging/images/components/components/hostpath-provisioner/build_binaries @@ -5,18 +5,17 @@ mkdir -p bin cd src if [ "x$1" == "xarm" ]; then - git apply ../arm32.patch + git apply ../arm32.patch fi set -x -GOOS=linux GOARCH=$1 CGO_ENABLED=0 GOFLAGS="-mod=vendor" \ - go build -a -ldflags '-extldflags "-static"' -o ../bin/hostpath-provisioner-$1 cmd/provisioner/hostpath-provisioner.go +GOOS=linux GOARCH=$1 CGO_ENABLED=0 GOFLAGS="-mod=vendor" \ + go build -a -ldflags '-extldflags "-static"' -o ../bin/hostpath-provisioner-$1 cmd/provisioner/hostpath-provisioner.go set +x if [ "x$1" == "xarm" ]; then - git stash - git clean -f + git stash + git clean -f fi - diff --git a/packaging/images/components/components/kube-rbac-proxy/0001-workaround-riscv64.patch b/packaging/images/components/components/kube-rbac-proxy/0001-workaround-riscv64.patch index 706ec9fb17..c0f489ca8b 100644 --- a/packaging/images/components/components/kube-rbac-proxy/0001-workaround-riscv64.patch +++ b/packaging/images/components/components/kube-rbac-proxy/0001-workaround-riscv64.patch @@ -17,14 +17,14 @@ index 31d42f71..70651ed6 100644 @@ -71,6 +71,10 @@ func (fs FS) CPUInfo() ([]CPUInfo, error) { return parseCPUInfo(data) } - + +func parseCPUInfoRiscv64(info []byte) ([]CPUInfo, error) { + return nil, nil +} + func parseCPUInfoX86(info []byte) ([]CPUInfo, error) { scanner := bufio.NewScanner(bytes.NewReader(info)) - + diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_riscv64.go b/vendor/github.com/prometheus/procfs/cpuinfo_riscv64.go new file mode 100644 index 00000000..cb75cc7d @@ -49,6 +49,5 @@ index 00000000..cb75cc7d +package procfs + +var parseCPUInfo = parseCPUInfoRiscv64 --- +-- 2.31.1 - diff --git a/packaging/images/components/components/kube-rbac-proxy/build_binaries b/packaging/images/components/components/kube-rbac-proxy/build_binaries index 742254e14e..389c0e8a13 100755 --- a/packaging/images/components/components/kube-rbac-proxy/build_binaries +++ b/packaging/images/components/components/kube-rbac-proxy/build_binaries @@ -6,17 +6,17 @@ mkdir -p bin cd src if [ "x$1" == "xriscv64" ]; then - git apply ../0001-workaround-riscv64.patch + git apply ../0001-workaround-riscv64.patch fi set -x GO111MODULE=on GOOS=linux GOARCH=$1 GOFLAGS="-mod=vendor" \ -make build + make build set +x cp _output/kube-rbac-proxy-linux-$1 ../bin/kube-rbac-proxy-$1 if [ "x$1" == "xriscv64" ]; then - git stash - git clean -f + git stash + git clean -f fi diff --git a/packaging/images/components/components/service-ca-operator/build_binaries b/packaging/images/components/components/service-ca-operator/build_binaries index 27dad84a39..6b06c00e7a 100755 --- a/packaging/images/components/components/service-ca-operator/build_binaries +++ b/packaging/images/components/components/service-ca-operator/build_binaries @@ -6,19 +6,17 @@ mkdir -p bin cd src if [ "x$1" == "xriscv64" ]; then - git apply ../../kube-rbac-proxy/0001-workaround-riscv64.patch + git apply ../../kube-rbac-proxy/0001-workaround-riscv64.patch fi - set -x GO111MODULE=on GOOS=linux GOARCH=$1 GOFLAGS="-mod=vendor" \ -make build + make build set +x cp service-ca-operator ../bin/service-ca-operator-$1 if [ "x$1" == "xriscv64" ]; then - git stash - git clean -f + git stash + git clean -f fi - diff --git a/packaging/images/microshift-aio/Dockerfile b/packaging/images/microshift-aio/Dockerfile index 7b718a17a6..c970b31597 100644 --- a/packaging/images/microshift-aio/Dockerfile +++ b/packaging/images/microshift-aio/Dockerfile @@ -50,7 +50,7 @@ RUN chmod 755 /usr/local/bin/microshift RUN export VERSION=1.20 && \ export OS=CentOS_8_Stream && \ curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/$OS/devel:kubic:libcontainers:stable.repo && \ - curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable:cri-o:$VERSION.repo https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable:cri-o:$VERSION/$OS/devel:kubic:libcontainers:stable:cri-o:$VERSION.repo + curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable:cri-o:$VERSION.repo https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable:cri-o:$VERSION/$OS/devel:kubic:libcontainers:stable:cri-o:$VERSION.repo RUN dnf install -y cri-o \ cri-tools \ diff --git a/packaging/images/microshift-aio/crio-bridge.conf b/packaging/images/microshift-aio/crio-bridge.conf index 540fc11aae..31bebb5a31 100644 --- a/packaging/images/microshift-aio/crio-bridge.conf +++ b/packaging/images/microshift-aio/crio-bridge.conf @@ -15,4 +15,4 @@ [{ "subnet": "10.42.0.0/24" }] ] } -} \ No newline at end of file +} diff --git a/packaging/rpm/make-rpm.sh b/packaging/rpm/make-rpm.sh index 8608a8fc67..1429da740f 100755 --- a/packaging/rpm/make-rpm.sh +++ b/packaging/rpm/make-rpm.sh @@ -7,12 +7,8 @@ RELEASE_PRE=${RELEASE_PRE:-${RELEASE_BASE}-0.microshift} BUILD=${BUILD:-all} # generated from other info -SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" -RPM_REL=$(git describe --tags | sed s/"${RELEASE_PRE}-"//g | sed s/-/_/g ) - -# add the git commit timestamp for nightlies, so updates will always work on devices old pkg < new pkg -RPM_REL=$(echo "${RPM_REL}" | sed s/nightly_/nightly_$(git show -s --format=%ct)_/g) - +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd)" +RPM_REL=$(git describe --tags | sed s/"${RELEASE_PRE}-"//g | sed s/-/_/g) GIT_SHA=$(git rev-parse HEAD) # using this instead of rev-parse --short because github's is 1 char shorter than --short GIT_SHORTHASH="${GIT_SHA:0:7}" @@ -20,42 +16,42 @@ TARBALL_FILE="microshift-${GIT_SHORTHASH}.tar.gz" RPMBUILD_DIR="${SCRIPT_DIR}/_rpmbuild/" create_local_tarball() { - tar -czf "${RPMBUILD_DIR}/SOURCES/${TARBALL_FILE}" \ - --exclude='.git' --exclude='.idea' --exclude='.vagrant' \ - --exclude='_output' --exclude='rpm/_rpmbuild' \ - --transform="s|^|microshift-${GIT_SHA}/|" \ - --exclude="${TARBALL_FILE}" "${SCRIPT_DIR}/../../" + tar -czf "${RPMBUILD_DIR}/SOURCES/${TARBALL_FILE}" \ + --exclude='.git' --exclude='.idea' --exclude='.vagrant' \ + --exclude='_output' --exclude='rpm/_rpmbuild' \ + --transform="s|^|microshift-${GIT_SHA}/|" \ + --exclude="${TARBALL_FILE}" "${SCRIPT_DIR}/../../" } download_commit_tarball() { - GIT_SHA=${1:-$GIT_SHA} - spectool -g --define "_topdir ${RPMBUILD_DIR}" --define="release ${RPM_REL}" --define="version ${RELEASE_BASE}" \ - --define "git_commit ${GIT_SHA}" \ - -R "${SCRIPT_DIR}/microshift.spec" + GIT_SHA=${1:-$GIT_SHA} + spectool -g --define "_topdir ${RPMBUILD_DIR}" --define="release ${RPM_REL}" --define="version ${RELEASE_BASE}" \ + --define "git_commit ${GIT_SHA}" \ + -R "${SCRIPT_DIR}/microshift.spec" } download_tag_tarball() { - spectool -g --define "_topdir ${RPMBUILD_DIR}" --define="release ${RPM_REL}" --define="version ${RELEASE_BASE}" \ - --define "github_tag ${1}" \ - -R "${SCRIPT_DIR}/microshift.spec" + spectool -g --define "_topdir ${RPMBUILD_DIR}" --define="release ${RPM_REL}" --define="version ${RELEASE_BASE}" \ + --define "github_tag ${1}" \ + -R "${SCRIPT_DIR}/microshift.spec" } case $BUILD in - all) RPMBUILD_OPT=-ba ;; - rpm) RPMBUILD_OPT=-bb ;; - srpm) RPMBUILD_OPT=-bs ;; +all) RPMBUILD_OPT=-ba ;; +rpm) RPMBUILD_OPT=-bb ;; +srpm) RPMBUILD_OPT=-bs ;; esac build_commit() { - # using --defines worka for rpm building, but not for an srpm - cat >"${RPMBUILD_DIR}"SPECS/microshift.spec <<EOF + # using --defines worka for rpm building, but not for an srpm + cat >"${RPMBUILD_DIR}"SPECS/microshift.spec <<EOF %global release ${RPM_REL} %global version ${RELEASE_BASE} %global git_commit ${1} EOF - cat "${SCRIPT_DIR}/microshift.spec" >> "${RPMBUILD_DIR}SPECS/microshift.spec" + cat "${SCRIPT_DIR}/microshift.spec" >>"${RPMBUILD_DIR}SPECS/microshift.spec" - rpmbuild "${RPMBUILD_OPT}" --define "_topdir ${RPMBUILD_DIR}" "${RPMBUILD_DIR}"SPECS/microshift.spec + rpmbuild "${RPMBUILD_OPT}" --define "_topdir ${RPMBUILD_DIR}" "${RPMBUILD_DIR}"SPECS/microshift.spec } build_tag_commit() { @@ -64,26 +60,30 @@ build_tag_commit() { %global version ${RELEASE_BASE} %global github_tag ${1} EOF - cat "${SCRIPT_DIR}/microshift.spec" >> "${RPMBUILD_DIR}SPECS/microshift.spec" + cat "${SCRIPT_DIR}/microshift.spec" >>"${RPMBUILD_DIR}SPECS/microshift.spec" - rpmbuild "${RPMBUILD_OPT}" --define "_topdir ${RPMBUILD_DIR}" "${RPMBUILD_DIR}"SPECS/microshift.spec + rpmbuild "${RPMBUILD_OPT}" --define "_topdir ${RPMBUILD_DIR}" "${RPMBUILD_DIR}"SPECS/microshift.spec } # prepare the rpmbuild env mkdir -p "${RPMBUILD_DIR}"/{BUILD,RPMS,SOURCES,SPECS,SRPMS} case $1 in - local) create_local_tarball - build_commit "${GIT_SHA}" - ;; - commit) download_commit_tarball "$2" - build_commit "$2" - ;; - tag) download_tag_tarball "$2" - build_tag_commit "$2" - ;; +local) + create_local_tarball + build_commit "${GIT_SHA}" + ;; +commit) + download_commit_tarball "$2" + build_commit "$2" + ;; +tag) + download_tag_tarball "$2" + build_tag_commit "$2" + ;; - *) - echo "Usage: $0 local|commit <commit-id>|tag <tag-name>" - exit 1 +*) + echo "Usage: $0 local|commit <commit-id>|tag <tag-name>" + exit 1 + ;; esac diff --git a/packaging/systemd/microshift-aio b/packaging/systemd/microshift-aio index b9ef31061d..204c20b4c2 100755 --- a/packaging/systemd/microshift-aio +++ b/packaging/systemd/microshift-aio @@ -4,13 +4,11 @@ set -euxo pipefail setsebool -P container_manage_cgroup true -if ! /usr/bin/podman volume exists microshift-data -then - /usr/bin/podman volume create microshift-data +if ! /usr/bin/podman volume exists microshift-data; then + /usr/bin/podman volume create microshift-data fi [[ -d /etc/microshift-aio ]] || mkdir /etc/microshift-aio -cat <<EOF > /etc/microshift-aio/microshift-aio.conf +cat <<EOF >/etc/microshift-aio/microshift-aio.conf export KUBECONFIG=$(/usr/bin/podman volume inspect microshift-data --format "{{.Mountpoint}}")/microshift/resources/kubeadmin/kubeconfig EOF - diff --git a/packaging/systemd/microshift-containerized b/packaging/systemd/microshift-containerized index 3b4b1065b3..1b6b10eace 100755 --- a/packaging/systemd/microshift-containerized +++ b/packaging/systemd/microshift-containerized @@ -3,6 +3,6 @@ set -euxo pipefail [[ -d /etc/microshift-containerized ]] || mkdir -p /etc/microshift-containerized -cat <<EOF > /etc/microshift-containerized/microshift-containerized.conf +cat <<EOF >/etc/microshift-containerized/microshift-containerized.conf export KUBECONFIG=/var/lib/microshift/resources/kubeadmin/kubeconfig -EOF \ No newline at end of file +EOF diff --git a/pkg/assets/applier.go b/pkg/assets/applier.go old mode 100755 new mode 100644 diff --git a/pkg/assets/apps.go b/pkg/assets/apps.go old mode 100755 new mode 100644 diff --git a/pkg/assets/apps/bindata.go b/pkg/assets/apps/bindata.go index 2d83e07f68..41147abd2c 100644 --- a/pkg/assets/apps/bindata.go +++ b/pkg/assets/apps/bindata.go @@ -333,7 +333,7 @@ spec: readOnly: true dnsPolicy: Default nodeSelector: - kubernetes.io/os: linux + kubernetes.io/os: linux volumes: - name: config-volume configMap: diff --git a/pkg/assets/core.go b/pkg/assets/core.go old mode 100755 new mode 100644 diff --git a/pkg/assets/core/bindata.go b/pkg/assets/core/bindata.go index c5fbf26810..f0d3643aaa 100644 --- a/pkg/assets/core/bindata.go +++ b/pkg/assets/core/bindata.go @@ -426,7 +426,7 @@ var _assetsCore0000_80_openshiftRouterCmYaml = []byte(`apiVersion: v1 kind: ConfigMap metadata: namespace: openshift-ingress - name: service-ca-bundle + name: service-ca-bundle annotations: service.beta.openshift.io/inject-cabundle: "true" `) diff --git a/pkg/assets/crd.go b/pkg/assets/crd.go old mode 100755 new mode 100644 diff --git a/pkg/assets/rbac.go b/pkg/assets/rbac.go old mode 100755 new mode 100644 diff --git a/pkg/components/components.go b/pkg/components/components.go old mode 100755 new mode 100644 diff --git a/pkg/components/render.go b/pkg/components/render.go old mode 100755 new mode 100644 diff --git a/scripts/bindata.sh b/scripts/bindata.sh index 91110cf389..414d8efa29 100755 --- a/scripts/bindata.sh +++ b/scripts/bindata.sh @@ -1,6 +1,7 @@ +#!/usr/bin/env bash go install github.com/go-bindata/go-bindata/... for i in crd core rbac apps scc storage; do - OUTPUT="pkg/assets/${i}/bindata.go" - ${GOPATH}/bin/go-bindata -nocompress -nometadata -prefix "pkg/assets/${i}" -pkg assets -o ${OUTPUT} "./assets/${i}/..." - gofmt -s -w "${OUTPUT}" + OUTPUT="pkg/assets/${i}/bindata.go" + ${GOPATH}/bin/go-bindata -nocompress -nometadata -prefix "pkg/assets/${i}" -pkg assets -o ${OUTPUT} "./assets/${i}/..." + gofmt -s -w "${OUTPUT}" done diff --git a/scripts/rebase.sh b/scripts/rebase.sh index 5c9b62b3be..fa781d8bb8 100755 --- a/scripts/rebase.sh +++ b/scripts/rebase.sh @@ -27,9 +27,8 @@ STAGING_DIR="$REPOROOT/_output/staging" EMBEDDED_COMPONENTS="etcd hyperkube openshift-apiserver openshift-controller-manager" LOADED_COMPONENTS="cluster-dns-operator cluster-ingress-operator service-ca-operator" - title() { - echo -e "\E[34m\n$1\E[00m"; + echo -e "\E[34m\n$1\E[00m" } # Reads go.mod file $1 and prints lines in its section $2 ("require" or "replace") @@ -72,8 +71,7 @@ update_versions() { update_file=$2 re="^(.+) ([a-z0-9.-]+)$" - while IFS="" read -r line || [ -n "$line" ] - do + while IFS="" read -r line || [ -n "$line" ]; do if [[ "${line}" =~ ^//.* ]]; then continue fi @@ -88,8 +86,8 @@ update_versions() { version=$(printf '%s\n%s\n' "${base_version}" "${update_version}" | sort --version-sort | tail -n 1) fi - echo "${mod} ${version}" - done < "${base_file}" + echo "${mod} ${version}" + done <"${base_file}" } # Returns the list of release image names from a release_${arch}.go file @@ -99,26 +97,23 @@ get_release_images() { awk "BEGIN {output=0} /^}/ {output=0} {if (output == 1) print substr(\$1, 2, length(\$1)-3)} /^var Image/ {output=1}" "${file}" } - # == MAIN == if [[ $EUID -ne 0 ]]; then - >&2 echo "You need to run this script as root or in a 'buildah unshare' environment:" - >&2 echo " buildah unshare $0" - exit 1 + echo >&2 "You need to run this script as root or in a 'buildah unshare' environment:" + echo >&2 " buildah unshare $0" + exit 1 fi if [[ -z ${1+x} ]]; then - >&2 echo "You need to provide an OKD release name, e.g.:" - >&2 echo " $0 4.7.0-0.okd-2021-08-22-163618" + echo >&2 "You need to provide an OKD release name, e.g.:" + echo >&2 " $0 4.7.0-0.okd-2021-08-22-163618" exit 1 fi OKD_RELEASE=$1 - rm -rf "${STAGING_DIR}" mkdir -p "${STAGING_DIR}" pushd "${STAGING_DIR}" >/dev/null - title "Downloading and extracting ${OKD_RELEASE} release image..." curl -LO "https://github.com/openshift/okd/releases/download/${OKD_RELEASE}/release.txt" @@ -127,15 +122,13 @@ podman pull "${OKD_RELEASE_IMAGE}" cnt=$(buildah from "${OKD_RELEASE_IMAGE}") mnt=$(buildah mount "${cnt}" | cut -d ' ' -f 2) jq -r '.spec.tags[] | "\(.name) \(.annotations."io.openshift.build.source-location") \(.annotations."io.openshift.build.commit.id")"' \ - "${mnt}/release-manifests/image-references" > source_commits.txt + "${mnt}/release-manifests/image-references" >source_commits.txt mkdir -p "${STAGING_DIR}/release-manifests" cp -- "${mnt}"/release-manifests/*.yaml "${STAGING_DIR}/release-manifests" - title "Cloning git repos..." git config --global advice.detachedHead false -while IFS="" read -r line || [ -n "$line" ] -do +while IFS="" read -r line || [ -n "$line" ]; do COMPONENT=$(echo "${line}" | cut -d ' ' -f 1) REPO=$(echo "${line}" | cut -d ' ' -f 2) COMMIT=$(echo "${line}" | cut -d ' ' -f 3) @@ -146,25 +139,25 @@ do echo popd >/dev/null fi -done < source_commits.txt - +done <source_commits.txt title "Rebasing go.mod..." -extract_section "${REPOROOT}/go.mod" require > latest_require -extract_section "${REPOROOT}/go.mod" replace > latest_replace -while IFS="" read -r line || [ -n "$line" ] -do +extract_section "${REPOROOT}/go.mod" require >latest_require +extract_section "${REPOROOT}/go.mod" replace >latest_replace +while IFS="" read -r line || [ -n "$line" ]; do COMPONENT=$(echo "${line}" | cut -d ' ' -f 1) REPO=$(echo "${line}" | cut -d ' ' -f 2) if [[ "${EMBEDDED_COMPONENTS}" == *"${COMPONENT}"* ]]; then - extract_section "${REPO##*/}/go.mod" require > require - extract_section "${REPO##*/}/go.mod" replace > replace - update_versions latest_require require > t; mv t latest_require - update_versions latest_replace replace > t; mv t latest_replace + extract_section "${REPO##*/}/go.mod" require >require + extract_section "${REPO##*/}/go.mod" replace >replace + update_versions latest_require require >t + mv t latest_require + update_versions latest_replace replace >t + mv t latest_replace fi -done < source_commits.txt +done <source_commits.txt -cat << EOF > "${REPOROOT}/go.mod" +cat <<EOF >"${REPOROOT}/go.mod" module github.com/openshift/microshift go 1.16 @@ -187,7 +180,6 @@ make gen_openapi cp ./pkg/generated/openapi/zz_generated.openapi.go "${REPOROOT}/vendor/k8s.io/kubernetes/pkg/generated/openapi" popd >/dev/null - title "Rebasing release_*.go" images="$(get_release_images "${REPOROOT}/pkg/release/release.go" | xargs)" @@ -197,7 +189,7 @@ for arch in amd64; do digest=$(awk "/ ${i//_/-} / {print \$2}" release.txt) if [[ ! -z "${digest}" ]]; then awk "!/\"${i}\"/ {print \$0} /\"${i}\"/ {printf(\"\\t\\t%-${w}s %s\n\", \"\\\"${i}\\\":\", \"\\\"${digest}\\\",\")}" \ - "${REPOROOT}/pkg/release/release_${arch}.go" > t + "${REPOROOT}/pkg/release/release_${arch}.go" >t mv t "${REPOROOT}/pkg/release/release_${arch}.go" fi done @@ -205,7 +197,6 @@ done sed -i "/^var Base/c\var Base = \"${OKD_RELEASE}\"" "${REPOROOT}/pkg/release/release.go" - title "Rebasing manifests" assets=$(find "${REPOROOT}/assets" -name \*.yaml) for asset in ${assets}; do @@ -262,10 +253,9 @@ for asset in ${assets}; do echo "Updating ${asset} from ${updated_asset}" cp "${updated_asset}" "${asset}" else - echo -e "\E[31mNo update source found for ${asset}\E[00m"; + echo -e "\E[31mNo update source found for ${asset}\E[00m" fi done - title "Done." popd >/dev/null diff --git a/scripts/release.sh b/scripts/release.sh index 7c227c8865..9e5d34b481 100755 --- a/scripts/release.sh +++ b/scripts/release.sh @@ -43,7 +43,7 @@ alias podman=${__ctr_mgr_alias:?"a container manager (podman || docker) is requi ######### help() { - printf 'Microshift: release.sh + printf 'Microshift: release.sh This script provides some simple automation for cutting new releases of Microshift. Use: @@ -69,161 +69,161 @@ quay.io owner or org. } generate_api_release_request() { - local is_prerelease="${1:=true}" # (copejon) assume for now that all releases are prerelease, unless otherwise specified - printf '{"tag_name": "%s","name": "%s","prerelease": %s}' "$VERSION" "$VERSION" "$is_prerelease" + local is_prerelease="${1:=true}" # (copejon) assume for now that all releases are prerelease, unless otherwise specified + printf '{"tag_name": "%s","name": "%s","prerelease": %s}' "$VERSION" "$VERSION" "$is_prerelease" } git_create_release() { - local data="$1" - local response - response="$( - curl -X POST \ - -H "Accept: application/vnd.github.v3+json" \ - -H "Authorization: token $TOKEN" \ - "https://api.github.com/repos/$GIT_OWNER/microshift/releases" \ - -d "${data[@]}" - )" - local raw_upload_url - raw_upload_url="$(echo "$response" | grep "upload_url")" - local upload_url - upload_url=$(echo "$raw_upload_url" | sed -n 's,.*\(https://uploads.github.com/repos/'$GIT_OWNER'/microshift/releases/[0-9a-zA-Z]*/assets\).*,\1,p') - # curl will return 0 even on 4xx http errors, so verify that the actually got an up_load url - [ -z "$upload_url" ] && return 1 - echo "$upload_url" + local data="$1" + local response + response="$( + curl -X POST \ + -H "Accept: application/vnd.github.v3+json" \ + -H "Authorization: token $TOKEN" \ + "https://api.github.com/repos/$GIT_OWNER/microshift/releases" \ + -d "${data[@]}" + )" + local raw_upload_url + raw_upload_url="$(echo "$response" | grep "upload_url")" + local upload_url + upload_url=$(echo "$raw_upload_url" | sed -n 's,.*\(https://uploads.github.com/repos/'$GIT_OWNER'/microshift/releases/[0-9a-zA-Z]*/assets\).*,\1,p') + # curl will return 0 even on 4xx http errors, so verify that the actually got an up_load url + [ -z "$upload_url" ] && return 1 + echo "$upload_url" } git_post() { - local bin_file="$1" - local upload_url="$2" - local mime_type - mime_type="$(file -b --mime-type "$bin_file")" - curl --fail-early \ - -X POST \ - -H "Accept: application/vnd.github.v3" \ - -H "Authorization: token $TOKEN" \ - -H "Content-Type: $mime_type" \ - --data-binary @"$bin_file" \ - "$upload_url"?name="$(basename $bin_file)" + local bin_file="$1" + local upload_url="$2" + local mime_type + mime_type="$(file -b --mime-type "$bin_file")" + curl --fail-early \ + -X POST \ + -H "Accept: application/vnd.github.v3" \ + -H "Authorization: token $TOKEN" \ + -H "Content-Type: $mime_type" \ + --data-binary @"$bin_file" \ + "$upload_url"?name="$(basename $bin_file)" } git_post_artifacts() { - local asset_dir="$1" - local upload_url="$2" - local files - files="$(ls "$asset_dir")" - for f in $files; do - git_post "$asset_dir/$f" "$upload_url" - done + local asset_dir="$1" + local upload_url="$2" + local files + files="$(ls "$asset_dir")" + for f in $files; do + git_post "$asset_dir/$f" "$upload_url" + done } prep_stage_area() { - local asset_dir - asset_dir=$(mktemp -d -p "$STAGING_DIR/") - echo "$asset_dir" + local asset_dir + asset_dir=$(mktemp -d -p "$STAGING_DIR/") + echo "$asset_dir" } extract_release_image_binary() { - local tag="$1" - local dest="$2" - local out_bin="$dest"/microshift-"${tag#*"$VERSION-"}" - podman cp "$(podman create "$tag")":/usr/bin/microshift "$out_bin" >&2 - echo "$out_bin" + local tag="$1" + local dest="$2" + local out_bin="$dest"/microshift-"${tag#*"$VERSION-"}" + podman cp "$(podman create "$tag")":/usr/bin/microshift "$out_bin" >&2 + echo "$out_bin" } stage_release_image_binaries() { - local dest - dest="$(prep_stage_area)" - for t in "${RELEASE_IMAGE_TAGS[@]}"; do - local out_bin - out_bin=$(extract_release_image_binary "$t" "$dest") || return 1 - ( - cd "$dest" - sha256sum "$(basename "$out_bin")" >>"$dest"/release.sha256 - ) || return 1 - done - echo "$dest" + local dest + dest="$(prep_stage_area)" + for t in "${RELEASE_IMAGE_TAGS[@]}"; do + local out_bin + out_bin=$(extract_release_image_binary "$t" "$dest") || return 1 + ( + cd "$dest" + sha256sum "$(basename "$out_bin")" >>"$dest"/release.sha256 + ) || return 1 + done + echo "$dest" } build_container_images_artifacts() { - ( - cd "$ROOT" - make build-containerized-cross-build SOURCE_GIT_TAG="$VERSION" IMAGE_REPO="$IMAGE_REPO" - ) || return 1 + ( + cd "$ROOT" + make build-containerized-cross-build SOURCE_GIT_TAG="$VERSION" IMAGE_REPO="$IMAGE_REPO" + ) || return 1 } push_container_image_artifacts() { - for t in "${RELEASE_IMAGE_TAGS[@]}"; do - podman push "$t" - done + for t in "${RELEASE_IMAGE_TAGS[@]}"; do + podman push "$t" + done } -podman_create_manifest(){ - podman manifest create "$IMAGE_REPO:$VERSION" >&2 - for ref in "${RELEASE_IMAGE_TAGS[@]}"; do - podman manifest add "$IMAGE_REPO:$VERSION" "docker://$ref" - done +podman_create_manifest() { + podman manifest create "$IMAGE_REPO:$VERSION" >&2 + for ref in "${RELEASE_IMAGE_TAGS[@]}"; do + podman manifest add "$IMAGE_REPO:$VERSION" "docker://$ref" + done podman manifest push "$IMAGE_REPO:$VERSION" "$IMAGE_REPO:$VERSION" podman manifest push "$IMAGE_REPO:$VERSION" "$IMAGE_REPO:latest" } -docker_create_manifest(){ - local amend_images_options - for image in "${RELEASE_IMAGE_TAGS[@]}"; do - amend_images_options+="--amend $image" - done - # use docker cli directly for clarity, as this is a docker-only func - docker manifest create "$IMAGE_REPO:$VERSION" "${RELEASE_IMAGE_TAGS[@]}" >&2 - docker tag "$IMAGE_REPO:$VERSION" "$IMAGE_REPO:latest" - docker manifest push "$IMAGE_REPO:$VERSION" - docker manifest push "$IMAGE_REPO:latest" +docker_create_manifest() { + local amend_images_options + for image in "${RELEASE_IMAGE_TAGS[@]}"; do + amend_images_options+="--amend $image" + done + # use docker cli directly for clarity, as this is a docker-only func + docker manifest create "$IMAGE_REPO:$VERSION" "${RELEASE_IMAGE_TAGS[@]}" >&2 + docker tag "$IMAGE_REPO:$VERSION" "$IMAGE_REPO:latest" + docker manifest push "$IMAGE_REPO:$VERSION" + docker manifest push "$IMAGE_REPO:latest" } push_container_manifest() { - local cli="$(alias podman)" - if [[ "${cli#*=}" =~ docker ]]; then - docker_create_manifest - else - podman_create_manifest - fi + local cli="$(alias podman)" + if [[ "${cli#*=}" =~ docker ]]; then + docker_create_manifest + else + podman_create_manifest + fi } debug() { - local version="$1" - local api_request="$2" - printf "Git Target: %s\n" "$TARGET" - printf "Image Artifact: %s\n" "$IMAGE_REPO:$VERSION" - printf "generate_version: %s\n" "$version" - printf "compose_release_request: %s\n" "$api_request" + local version="$1" + local api_request="$2" + printf "Git Target: %s\n" "$TARGET" + printf "Image Artifact: %s\n" "$IMAGE_REPO:$VERSION" + printf "generate_version: %s\n" "$version" + printf "compose_release_request: %s\n" "$api_request" } ######## # MAIN # ######## while [ $# -gt 0 ]; do - case "$1" in + case "$1" in "--token") - TOKEN="${2:-}" - [[ "$TOKEN" =~ ^-.* ]] || [[ -z "$TOKEN" ]] && { - printf "flag $1 git release API calls require robot token" - exit 1 - } - shift 2 - ;; + TOKEN="${2:-}" + [[ "$TOKEN" =~ ^-.* ]] || [[ -z "$TOKEN" ]] && { + printf "flag $1 git release API calls require robot token" + exit 1 + } + shift 2 + ;; "--version") - VERSION="${2:-}" - [[ "$VERSION" =~ ^-.* ]] || [[ -z "$VERSION" ]] && { - printf "flag $1 expects a version input value" - exit 1 - } - shift 2 - ;; + VERSION="${2:-}" + [[ "$VERSION" =~ ^-.* ]] || [[ -z "$VERSION" ]] && { + printf "flag $1 expects a version input value" + exit 1 + } + shift 2 + ;; "-h" | "--help") - help && exit - ;; + help && exit + ;; *) - echo "unknown input: $1" && help && exit 1 - ;; - esac + echo "unknown input: $1" && help && exit 1 + ;; + esac done printf "Using container manager: %s\n" "$(podman --version)" @@ -237,14 +237,14 @@ QUAY_OWNER=${QUAY_OWNER:="microshift"} API_DATA="$(generate_api_release_request "true")" # leave body empty for now IMAGE_REPO="quay.io/$QUAY_OWNER/microshift" -RELEASE_IMAGE_TAGS=("$IMAGE_REPO:$VERSION-linux-amd64" "$IMAGE_REPO:$VERSION-linux-arm64" ) +RELEASE_IMAGE_TAGS=("$IMAGE_REPO:$VERSION-linux-amd64" "$IMAGE_REPO:$VERSION-linux-arm64") STAGING_DIR="$ROOT/_output/staging" mkdir -p "$STAGING_DIR" -build_container_images_artifacts || exit 1 -STAGE_DIR=$(stage_release_image_binaries) || exit 1 -push_container_image_artifacts || exit 1 -push_container_manifest || exit 1 -UPLOAD_URL="$(git_create_release "$API_DATA" "$TOKEN")" || exit 1 -git_post_artifacts "$STAGE_DIR" "$UPLOAD_URL" "$TOKEN" || exit 1 +build_container_images_artifacts || exit 1 +STAGE_DIR=$(stage_release_image_binaries) || exit 1 +push_container_image_artifacts || exit 1 +push_container_manifest || exit 1 +UPLOAD_URL="$(git_create_release "$API_DATA" "$TOKEN")" || exit 1 +git_post_artifacts "$STAGE_DIR" "$UPLOAD_URL" "$TOKEN" || exit 1 diff --git a/validate-microshift/e2e/microshift/10-assert.yaml b/validate-microshift/e2e/microshift/10-assert.yaml index af4ae772ce..8655a5829b 100644 --- a/validate-microshift/e2e/microshift/10-assert.yaml +++ b/validate-microshift/e2e/microshift/10-assert.yaml @@ -4,9 +4,9 @@ kind: TestAssert timeout: 300 --- apiVersion: apps/v1 -kind: DaemonSet +kind: DaemonSet metadata: name: kube-flannel-ds - namespace: kube-system + namespace: kube-system status: numberReady: 1 diff --git a/validate-microshift/e2e/microshift/15-assert.yaml b/validate-microshift/e2e/microshift/15-assert.yaml index fbf86e82c7..e9b4c4493e 100644 --- a/validate-microshift/e2e/microshift/15-assert.yaml +++ b/validate-microshift/e2e/microshift/15-assert.yaml @@ -4,9 +4,9 @@ kind: TestAssert timeout: 300 --- apiVersion: apps/v1 -kind: Deployment +kind: Deployment metadata: - name: service-ca + name: service-ca namespace: openshift-service-ca status: availableReplicas: 1 diff --git a/validate-microshift/e2e/microshift/20-assert.yaml b/validate-microshift/e2e/microshift/20-assert.yaml index 340d15d3a5..d20adb8f76 100644 --- a/validate-microshift/e2e/microshift/20-assert.yaml +++ b/validate-microshift/e2e/microshift/20-assert.yaml @@ -4,9 +4,9 @@ kind: TestAssert timeout: 300 --- apiVersion: apps/v1 -kind: Deployment +kind: Deployment metadata: - name: router-default + name: router-default namespace: openshift-ingress status: availableReplicas: 1