From ac114bc0c54b3ca3b72841edc5078ec63f60a88d Mon Sep 17 00:00:00 2001 From: Samaresh Kumar Singh Date: Wed, 8 Apr 2026 21:21:22 -0500 Subject: [PATCH 1/2] docker: add OCI image labels to all published images --- .devops/cann.Dockerfile | 14 ++++++++++++++ .devops/cpu.Dockerfile | 14 ++++++++++++++ .devops/cuda.Dockerfile | 15 +++++++++++++++ .devops/intel.Dockerfile | 14 ++++++++++++++ .devops/llama-cli-cann.Dockerfile | 15 +++++++++++++++ .devops/musa.Dockerfile | 15 +++++++++++++++ .devops/openvino.Dockerfile | 14 ++++++++++++++ .devops/rocm.Dockerfile | 15 +++++++++++++++ .devops/s390x.Dockerfile | 14 ++++++++++++++ .devops/vulkan.Dockerfile | 14 ++++++++++++++ .github/workflows/docker.yml | 14 ++++++++++++++ 11 files changed, 158 insertions(+) diff --git a/.devops/cann.Dockerfile b/.devops/cann.Dockerfile index 843fe37d062..7858e55c803 100644 --- a/.devops/cann.Dockerfile +++ b/.devops/cann.Dockerfile @@ -5,6 +5,9 @@ # Define the CANN base image for easier version updates later ARG CHIP_TYPE=910b ARG CANN_BASE_IMAGE=quay.io/ascend/cann:8.5.0-${CHIP_TYPE}-openeuler24.03-py3.11 +ARG BUILD_DATE=N/A +ARG APP_VERSION=N/A +ARG APP_REVISION=N/A # ============================================================================== # BUILD STAGE @@ -67,6 +70,17 @@ RUN mkdir -p /app/full && \ # ============================================================================== FROM ${CANN_BASE_IMAGE} AS base +ARG BUILD_DATE=N/A +ARG APP_VERSION=N/A +ARG APP_REVISION=N/A +LABEL org.opencontainers.image.created=$BUILD_DATE \ + org.opencontainers.image.version=$APP_VERSION \ + org.opencontainers.image.revision=$APP_REVISION \ + org.opencontainers.image.title="llama.cpp" \ + org.opencontainers.image.description="LLM inference in C/C++" \ + org.opencontainers.image.url="https://github.com/ggml-org/llama.cpp" \ + org.opencontainers.image.source="https://github.com/ggml-org/llama.cpp" + # -- Install runtime dependencies -- RUN yum install -y libgomp curl && \ yum clean all && \ diff --git a/.devops/cpu.Dockerfile b/.devops/cpu.Dockerfile index d6579ecf1ad..0a304815ea4 100644 --- a/.devops/cpu.Dockerfile +++ b/.devops/cpu.Dockerfile @@ -1,4 +1,7 @@ ARG UBUNTU_VERSION=24.04 +ARG BUILD_DATE=N/A +ARG APP_VERSION=N/A +ARG APP_REVISION=N/A FROM ubuntu:$UBUNTU_VERSION AS build @@ -35,6 +38,17 @@ RUN mkdir -p /app/full \ ## Base image FROM ubuntu:$UBUNTU_VERSION AS base +ARG BUILD_DATE=N/A +ARG APP_VERSION=N/A +ARG APP_REVISION=N/A +LABEL org.opencontainers.image.created=$BUILD_DATE \ + org.opencontainers.image.version=$APP_VERSION \ + org.opencontainers.image.revision=$APP_REVISION \ + org.opencontainers.image.title="llama.cpp" \ + org.opencontainers.image.description="LLM inference in C/C++" \ + org.opencontainers.image.url="https://github.com/ggml-org/llama.cpp" \ + org.opencontainers.image.source="https://github.com/ggml-org/llama.cpp" + RUN apt-get update \ && apt-get install -y libgomp1 curl \ && apt autoremove -y \ diff --git a/.devops/cuda.Dockerfile b/.devops/cuda.Dockerfile index b3f6ccfc984..0f467766daf 100644 --- a/.devops/cuda.Dockerfile +++ b/.devops/cuda.Dockerfile @@ -6,6 +6,10 @@ ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VER ARG BASE_CUDA_RUN_CONTAINER=nvidia/cuda:${CUDA_VERSION}-runtime-ubuntu${UBUNTU_VERSION} +ARG BUILD_DATE=N/A +ARG APP_VERSION=N/A +ARG APP_REVISION=N/A + FROM ${BASE_CUDA_DEV_CONTAINER} AS build # CUDA architecture to build for (defaults to all supported archs) @@ -40,6 +44,17 @@ RUN mkdir -p /app/full \ ## Base image FROM ${BASE_CUDA_RUN_CONTAINER} AS base +ARG BUILD_DATE=N/A +ARG APP_VERSION=N/A +ARG APP_REVISION=N/A +LABEL org.opencontainers.image.created=$BUILD_DATE \ + org.opencontainers.image.version=$APP_VERSION \ + org.opencontainers.image.revision=$APP_REVISION \ + org.opencontainers.image.title="llama.cpp" \ + org.opencontainers.image.description="LLM inference in C/C++" \ + org.opencontainers.image.url="https://github.com/ggml-org/llama.cpp" \ + org.opencontainers.image.source="https://github.com/ggml-org/llama.cpp" + RUN apt-get update \ && apt-get install -y libgomp1 curl \ && apt autoremove -y \ diff --git a/.devops/intel.Dockerfile b/.devops/intel.Dockerfile index 8e830d46251..01ffc962095 100644 --- a/.devops/intel.Dockerfile +++ b/.devops/intel.Dockerfile @@ -1,4 +1,7 @@ ARG ONEAPI_VERSION=2025.3.3-0-devel-ubuntu24.04 +ARG BUILD_DATE=N/A +ARG APP_VERSION=N/A +ARG APP_REVISION=N/A ## Build Image @@ -33,6 +36,17 @@ RUN mkdir -p /app/full \ FROM intel/deep-learning-essentials:$ONEAPI_VERSION AS base +ARG BUILD_DATE=N/A +ARG APP_VERSION=N/A +ARG APP_REVISION=N/A +LABEL org.opencontainers.image.created=$BUILD_DATE \ + org.opencontainers.image.version=$APP_VERSION \ + org.opencontainers.image.revision=$APP_REVISION \ + org.opencontainers.image.title="llama.cpp" \ + org.opencontainers.image.description="LLM inference in C/C++" \ + org.opencontainers.image.url="https://github.com/ggml-org/llama.cpp" \ + org.opencontainers.image.source="https://github.com/ggml-org/llama.cpp" + ARG IGC_VERSION=v2.30.1 ARG IGC_VERSION_FULL=2_2.30.1+20950 ARG COMPUTE_RUNTIME_VERSION=26.09.37435.1 diff --git a/.devops/llama-cli-cann.Dockerfile b/.devops/llama-cli-cann.Dockerfile index d54e70838f2..201d16ab408 100644 --- a/.devops/llama-cli-cann.Dockerfile +++ b/.devops/llama-cli-cann.Dockerfile @@ -1,4 +1,7 @@ ARG ASCEND_VERSION=8.5.0-910b-openeuler22.03-py3.10 +ARG BUILD_DATE=N/A +ARG APP_VERSION=N/A +ARG APP_REVISION=N/A FROM ascendai/cann:$ASCEND_VERSION AS build @@ -28,6 +31,18 @@ RUN echo "Building with static libs" && \ # TODO: use image with NNRT FROM ascendai/cann:$ASCEND_VERSION AS runtime + +ARG BUILD_DATE=N/A +ARG APP_VERSION=N/A +ARG APP_REVISION=N/A +LABEL org.opencontainers.image.created=$BUILD_DATE \ + org.opencontainers.image.version=$APP_VERSION \ + org.opencontainers.image.revision=$APP_REVISION \ + org.opencontainers.image.title="llama.cpp" \ + org.opencontainers.image.description="LLM inference in C/C++" \ + org.opencontainers.image.url="https://github.com/ggml-org/llama.cpp" \ + org.opencontainers.image.source="https://github.com/ggml-org/llama.cpp" + COPY --from=build /app/build/bin/llama-cli /app/build/bin/llama-completion / ENV LC_ALL=C.utf8 diff --git a/.devops/musa.Dockerfile b/.devops/musa.Dockerfile index 665a76f58ce..24a970e8226 100644 --- a/.devops/musa.Dockerfile +++ b/.devops/musa.Dockerfile @@ -6,6 +6,10 @@ ARG BASE_MUSA_DEV_CONTAINER=mthreads/musa:${MUSA_VERSION}-devel-ubuntu${UBUNTU_V ARG BASE_MUSA_RUN_CONTAINER=mthreads/musa:${MUSA_VERSION}-runtime-ubuntu${UBUNTU_VERSION}-amd64 +ARG BUILD_DATE=N/A +ARG APP_VERSION=N/A +ARG APP_REVISION=N/A + FROM ${BASE_MUSA_DEV_CONTAINER} AS build # MUSA architecture to build for (defaults to all supported archs) @@ -45,6 +49,17 @@ RUN mkdir -p /app/full \ ## Base image FROM ${BASE_MUSA_RUN_CONTAINER} AS base +ARG BUILD_DATE=N/A +ARG APP_VERSION=N/A +ARG APP_REVISION=N/A +LABEL org.opencontainers.image.created=$BUILD_DATE \ + org.opencontainers.image.version=$APP_VERSION \ + org.opencontainers.image.revision=$APP_REVISION \ + org.opencontainers.image.title="llama.cpp" \ + org.opencontainers.image.description="LLM inference in C/C++" \ + org.opencontainers.image.url="https://github.com/ggml-org/llama.cpp" \ + org.opencontainers.image.source="https://github.com/ggml-org/llama.cpp" + RUN apt-get update \ && apt-get install -y libgomp1 curl \ && apt autoremove -y \ diff --git a/.devops/openvino.Dockerfile b/.devops/openvino.Dockerfile index 31b58736d7e..c0c3f59a2a7 100644 --- a/.devops/openvino.Dockerfile +++ b/.devops/openvino.Dockerfile @@ -18,6 +18,10 @@ ARG LIBZE1_VERSION=1.27.0-1~24.04~ppa2 ARG http_proxy= ARG https_proxy= +ARG BUILD_DATE=N/A +ARG APP_VERSION=N/A +ARG APP_REVISION=N/A + ## Build Image FROM ubuntu:${UBUNTU_VERSION} AS build @@ -88,6 +92,16 @@ FROM ubuntu:${UBUNTU_VERSION} AS base # Pass proxy args to runtime stage ARG http_proxy ARG https_proxy +ARG BUILD_DATE=N/A +ARG APP_VERSION=N/A +ARG APP_REVISION=N/A +LABEL org.opencontainers.image.created=$BUILD_DATE \ + org.opencontainers.image.version=$APP_VERSION \ + org.opencontainers.image.revision=$APP_REVISION \ + org.opencontainers.image.title="llama.cpp" \ + org.opencontainers.image.description="LLM inference in C/C++" \ + org.opencontainers.image.url="https://github.com/ggml-org/llama.cpp" \ + org.opencontainers.image.source="https://github.com/ggml-org/llama.cpp" RUN apt-get update \ && apt-get install -y libgomp1 libtbb12 curl wget ocl-icd-libopencl1 \ diff --git a/.devops/rocm.Dockerfile b/.devops/rocm.Dockerfile index 525ddc79051..c7f79a8be37 100644 --- a/.devops/rocm.Dockerfile +++ b/.devops/rocm.Dockerfile @@ -7,6 +7,10 @@ ARG AMDGPU_VERSION=7.2.1 # Target the ROCm build image ARG BASE_ROCM_DEV_CONTAINER=rocm/dev-ubuntu-${UBUNTU_VERSION}:${ROCM_VERSION}-complete +ARG BUILD_DATE=N/A +ARG APP_VERSION=N/A +ARG APP_REVISION=N/A + ### Build image FROM ${BASE_ROCM_DEV_CONTAINER} AS build @@ -57,6 +61,17 @@ RUN mkdir -p /app/full \ ## Base image FROM ${BASE_ROCM_DEV_CONTAINER} AS base +ARG BUILD_DATE=N/A +ARG APP_VERSION=N/A +ARG APP_REVISION=N/A +LABEL org.opencontainers.image.created=$BUILD_DATE \ + org.opencontainers.image.version=$APP_VERSION \ + org.opencontainers.image.revision=$APP_REVISION \ + org.opencontainers.image.title="llama.cpp" \ + org.opencontainers.image.description="LLM inference in C/C++" \ + org.opencontainers.image.url="https://github.com/ggml-org/llama.cpp" \ + org.opencontainers.image.source="https://github.com/ggml-org/llama.cpp" + RUN apt-get update \ && apt-get install -y libgomp1 curl \ && apt autoremove -y \ diff --git a/.devops/s390x.Dockerfile b/.devops/s390x.Dockerfile index 757cd97cd4c..6de741bcbee 100644 --- a/.devops/s390x.Dockerfile +++ b/.devops/s390x.Dockerfile @@ -1,5 +1,8 @@ ARG GCC_VERSION=15.2.0 ARG UBUNTU_VERSION=24.04 +ARG BUILD_DATE=N/A +ARG APP_VERSION=N/A +ARG APP_REVISION=N/A ### Build Llama.cpp stage FROM gcc:${GCC_VERSION} AS build @@ -52,6 +55,17 @@ COPY --from=build /opt/llama.cpp/gguf-py /llama.cpp/gguf-py ### Base image FROM ubuntu:${UBUNTU_VERSION} AS base +ARG BUILD_DATE=N/A +ARG APP_VERSION=N/A +ARG APP_REVISION=N/A +LABEL org.opencontainers.image.created=$BUILD_DATE \ + org.opencontainers.image.version=$APP_VERSION \ + org.opencontainers.image.revision=$APP_REVISION \ + org.opencontainers.image.title="llama.cpp" \ + org.opencontainers.image.description="LLM inference in C/C++" \ + org.opencontainers.image.url="https://github.com/ggml-org/llama.cpp" \ + org.opencontainers.image.source="https://github.com/ggml-org/llama.cpp" + RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,target=/var/lib/apt/lists,sharing=locked \ apt update -y && \ diff --git a/.devops/vulkan.Dockerfile b/.devops/vulkan.Dockerfile index f4d199ed426..1ab48a810f1 100644 --- a/.devops/vulkan.Dockerfile +++ b/.devops/vulkan.Dockerfile @@ -1,4 +1,7 @@ ARG UBUNTU_VERSION=26.04 +ARG BUILD_DATE=N/A +ARG APP_VERSION=N/A +ARG APP_REVISION=N/A FROM ubuntu:$UBUNTU_VERSION AS build @@ -31,6 +34,17 @@ RUN mkdir -p /app/full \ ## Base image FROM ubuntu:$UBUNTU_VERSION AS base +ARG BUILD_DATE=N/A +ARG APP_VERSION=N/A +ARG APP_REVISION=N/A +LABEL org.opencontainers.image.created=$BUILD_DATE \ + org.opencontainers.image.version=$APP_VERSION \ + org.opencontainers.image.revision=$APP_REVISION \ + org.opencontainers.image.title="llama.cpp" \ + org.opencontainers.image.description="LLM inference in C/C++" \ + org.opencontainers.image.url="https://github.com/ggml-org/llama.cpp" \ + org.opencontainers.image.source="https://github.com/ggml-org/llama.cpp" + RUN apt-get update \ && apt-get install -y libgomp1 curl libvulkan1 mesa-vulkan-drivers \ libglvnd0 libgl1 libglx0 libegl1 libgles2 \ diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index a5bae7141fe..68459c9f64b 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -132,6 +132,7 @@ jobs: config: ${{ fromJSON(needs.prepare_matrices.outputs.build_matrix) }} steps: - name: Check out the repo + id: checkout uses: actions/checkout@v6 with: fetch-depth: 0 @@ -187,6 +188,10 @@ jobs: env: GITHUB_REPOSITORY_OWNER: '${{ github.repository_owner }}' + - name: Get build date + id: build_date + run: echo "date=$(date -u +"%Y-%m-%dT%H:%M:%SZ")" >> $GITHUB_OUTPUT + - name: Free Disk Space (Ubuntu) if: ${{ matrix.config.free_disk_space == true }} uses: ggml-org/free-disk-space@v1.3.1 @@ -216,6 +221,9 @@ jobs: target: full provenance: false build-args: | + BUILD_DATE=${{ steps.build_date.outputs.date }} + APP_VERSION=${{ needs.create_tag.outputs.source_tag }} + APP_REVISION=${{ steps.checkout.outputs.commit }} ${{ matrix.config.ubuntu_version && format('UBUNTU_VERSION={0}', matrix.config.ubuntu_version) || '' }} ${{ matrix.config.cuda_version && format('CUDA_VERSION={0}', matrix.config.cuda_version) || '' }} # using github experimental cache @@ -240,6 +248,9 @@ jobs: target: light provenance: false build-args: | + BUILD_DATE=${{ steps.build_date.outputs.date }} + APP_VERSION=${{ needs.create_tag.outputs.source_tag }} + APP_REVISION=${{ steps.checkout.outputs.commit }} ${{ matrix.config.ubuntu_version && format('UBUNTU_VERSION={0}', matrix.config.ubuntu_version) || '' }} ${{ matrix.config.cuda_version && format('CUDA_VERSION={0}', matrix.config.cuda_version) || '' }} # using github experimental cache @@ -264,6 +275,9 @@ jobs: target: server provenance: false build-args: | + BUILD_DATE=${{ steps.build_date.outputs.date }} + APP_VERSION=${{ needs.create_tag.outputs.source_tag }} + APP_REVISION=${{ steps.checkout.outputs.commit }} ${{ matrix.config.ubuntu_version && format('UBUNTU_VERSION={0}', matrix.config.ubuntu_version) || '' }} ${{ matrix.config.cuda_version && format('CUDA_VERSION={0}', matrix.config.cuda_version) || '' }} # using github experimental cache From 49c762fe5b933447a36a50e5608c7d0c2dfeb4c5 Mon Sep 17 00:00:00 2001 From: Samaresh Kumar Singh Date: Wed, 22 Apr 2026 15:16:32 -0500 Subject: [PATCH 2/2] docker: propagate OCI labels as manifest and index annotations --- .github/workflows/docker.yml | 51 ++++++++++++++++++++++++++++++++---- 1 file changed, 46 insertions(+), 5 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 68459c9f64b..1e91662c0bc 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -216,7 +216,7 @@ jobs: with: context: . platforms: ${{ matrix.config.platforms }} - outputs: type=image,name=${{ steps.meta.outputs.image_repo }},push-by-digest=true,name-canonical=true,push=true + outputs: type=image,name=${{ steps.meta.outputs.image_repo }},push-by-digest=true,name-canonical=true,push=true,oci-mediatypes=true file: ${{ matrix.config.dockerfile }} target: full provenance: false @@ -226,6 +226,14 @@ jobs: APP_REVISION=${{ steps.checkout.outputs.commit }} ${{ matrix.config.ubuntu_version && format('UBUNTU_VERSION={0}', matrix.config.ubuntu_version) || '' }} ${{ matrix.config.cuda_version && format('CUDA_VERSION={0}', matrix.config.cuda_version) || '' }} + annotations: | + manifest:org.opencontainers.image.created=${{ steps.build_date.outputs.date }} + manifest:org.opencontainers.image.version=${{ needs.create_tag.outputs.source_tag }} + manifest:org.opencontainers.image.revision=${{ steps.checkout.outputs.commit }} + manifest:org.opencontainers.image.title=llama.cpp + manifest:org.opencontainers.image.description=LLM inference in C/C++ + manifest:org.opencontainers.image.url=https://github.com/ggml-org/llama.cpp + manifest:org.opencontainers.image.source=https://github.com/ggml-org/llama.cpp # using github experimental cache #cache-from: type=gha #cache-to: type=gha,mode=max @@ -243,7 +251,7 @@ jobs: with: context: . platforms: ${{ matrix.config.platforms }} - outputs: type=image,name=${{ steps.meta.outputs.image_repo }},push-by-digest=true,name-canonical=true,push=true + outputs: type=image,name=${{ steps.meta.outputs.image_repo }},push-by-digest=true,name-canonical=true,push=true,oci-mediatypes=true file: ${{ matrix.config.dockerfile }} target: light provenance: false @@ -253,6 +261,14 @@ jobs: APP_REVISION=${{ steps.checkout.outputs.commit }} ${{ matrix.config.ubuntu_version && format('UBUNTU_VERSION={0}', matrix.config.ubuntu_version) || '' }} ${{ matrix.config.cuda_version && format('CUDA_VERSION={0}', matrix.config.cuda_version) || '' }} + annotations: | + manifest:org.opencontainers.image.created=${{ steps.build_date.outputs.date }} + manifest:org.opencontainers.image.version=${{ needs.create_tag.outputs.source_tag }} + manifest:org.opencontainers.image.revision=${{ steps.checkout.outputs.commit }} + manifest:org.opencontainers.image.title=llama.cpp + manifest:org.opencontainers.image.description=LLM inference in C/C++ + manifest:org.opencontainers.image.url=https://github.com/ggml-org/llama.cpp + manifest:org.opencontainers.image.source=https://github.com/ggml-org/llama.cpp # using github experimental cache #cache-from: type=gha #cache-to: type=gha,mode=max @@ -270,7 +286,7 @@ jobs: with: context: . platforms: ${{ matrix.config.platforms }} - outputs: type=image,name=${{ steps.meta.outputs.image_repo }},push-by-digest=true,name-canonical=true,push=true + outputs: type=image,name=${{ steps.meta.outputs.image_repo }},push-by-digest=true,name-canonical=true,push=true,oci-mediatypes=true file: ${{ matrix.config.dockerfile }} target: server provenance: false @@ -280,6 +296,14 @@ jobs: APP_REVISION=${{ steps.checkout.outputs.commit }} ${{ matrix.config.ubuntu_version && format('UBUNTU_VERSION={0}', matrix.config.ubuntu_version) || '' }} ${{ matrix.config.cuda_version && format('CUDA_VERSION={0}', matrix.config.cuda_version) || '' }} + annotations: | + manifest:org.opencontainers.image.created=${{ steps.build_date.outputs.date }} + manifest:org.opencontainers.image.version=${{ needs.create_tag.outputs.source_tag }} + manifest:org.opencontainers.image.revision=${{ steps.checkout.outputs.commit }} + manifest:org.opencontainers.image.title=llama.cpp + manifest:org.opencontainers.image.description=LLM inference in C/C++ + manifest:org.opencontainers.image.url=https://github.com/ggml-org/llama.cpp + manifest:org.opencontainers.image.source=https://github.com/ggml-org/llama.cpp # using github experimental cache #cache-from: type=gha #cache-to: type=gha,mode=max @@ -344,10 +368,15 @@ jobs: steps: - name: Check out the repo + id: checkout uses: actions/checkout@v6 with: fetch-depth: 0 + - name: Get build date + id: build_date + run: echo "date=$(date -u +"%Y-%m-%dT%H:%M:%SZ")" >> $GITHUB_OUTPUT + - name: Download digest metadata uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8 with: @@ -375,6 +404,8 @@ jobs: IMAGE_REPO="ghcr.io/${REPO_OWNER}/${REPO_NAME}" PREFIX="${IMAGE_REPO}:" SRC_TAG="${{ needs.create_tag.outputs.source_tag }}" + BUILD_DATE="${{ steps.build_date.outputs.date }}" + COMMIT_SHA="${{ steps.checkout.outputs.commit }}" TAGS="${{ matrix.config.tag }}" ARCHES="${{ matrix.config.arches }}" DIGEST_GLOB="/tmp/digests/*.tsv" @@ -426,11 +457,21 @@ jobs: refs+=("${IMAGE_REPO}@${digest}") done + local annotations=( + --annotation "index:org.opencontainers.image.created=${BUILD_DATE}" + --annotation "index:org.opencontainers.image.version=${SRC_TAG}" + --annotation "index:org.opencontainers.image.revision=${COMMIT_SHA}" + --annotation "index:org.opencontainers.image.title=llama.cpp" + --annotation "index:org.opencontainers.image.description=LLM inference in C/C++" + --annotation "index:org.opencontainers.image.url=https://github.com/ggml-org/llama.cpp" + --annotation "index:org.opencontainers.image.source=https://github.com/ggml-org/llama.cpp" + ) + echo "Creating ${merged_tag} from ${refs[*]}" - docker buildx imagetools create --tag "${merged_tag}" "${refs[@]}" + docker buildx imagetools create "${annotations[@]}" --tag "${merged_tag}" "${refs[@]}" echo "Creating ${merged_versioned_tag} from ${refs[*]}" - docker buildx imagetools create --tag "${merged_versioned_tag}" "${refs[@]}" + docker buildx imagetools create "${annotations[@]}" --tag "${merged_versioned_tag}" "${refs[@]}" } for tag in $TAGS; do