Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
77 commits
Select commit Hold shift + click to select a range
3f8752b
docs : fix broken link to ggml-openvino in OPENVINO.md (#21709)
ibelem Apr 10, 2026
d7ff074
common : enable reasoning budget sampler for gemma4 (#21697)
berkidem Apr 10, 2026
f989a6e
webui: Static build output improvements (#21667)
allozaur Apr 10, 2026
0893f50
common: mark --split-mode tensor as experimental (#21684)
JohannesGaessler Apr 10, 2026
fb38d6f
common : fix when loading a cached HF models with unavailable API (#2…
angt Apr 10, 2026
5dd1025
server : ignore --alias when using --models-preset (#21380)
angt Apr 10, 2026
e4fed9d
ggml-webgpu: address quantization precision and backend lifecycle man…
Constannnnnt Apr 10, 2026
bfd1f45
ggml-webgpu: support non-square subgroup matrix configs for Intel GPU…
SharmaRithik Apr 10, 2026
e62fa13
model : make Gemma 4 shared-KV tail attn_k tensors optional on load (…
MoonRide303 Apr 10, 2026
05b3caa
common : add callback interface for download progress (#21735)
angt Apr 10, 2026
3fc6506
common : better align to the updated official gemma4 template (#21704)
aldehir Apr 10, 2026
9aa2807
hexagon: improved Op queuing, buffer and cache management (#21705)
max-krasnyansky Apr 10, 2026
81069a8
hexagon: add support for linux on snapdragon (#21707)
tboinovski1 Apr 10, 2026
b136b62
fix: Fix broken structured output when using $refs in json_schema (#2…
Galunid Apr 10, 2026
a29e4c0
CUDA: also store node->src ne/nb for graph equality (#21736)
am17an Apr 11, 2026
660386f
py : Bump typer to latest to fix huggingface_hub issue (#21701)
bartowski1182 Apr 11, 2026
2b2cd57
ggml : fix a few instances of missing GGML_TYPE_Q1_0 cases (#21716)
CISC Apr 11, 2026
865ff06
TP: fix Qwen 3 Next data split (#21732)
JohannesGaessler Apr 11, 2026
af1127d
opencl: add basic support for q5_k (#21593)
shaofeiqi Apr 11, 2026
073bb2c
mtmd : add MERaLiON-2 multimodal audio support (#21756)
SiruiHe Apr 11, 2026
ff5ef82
CUDA: skip compilation of superfluous FA kernels (#21768)
JohannesGaessler Apr 11, 2026
6313acb
docs: add guide on how to add multimodal support (#21778)
ngxson Apr 12, 2026
9e209c5
fix: Proper messages rendering for "Show raw output" (#21672)
allozaur Apr 12, 2026
547765a
mtmd: add Gemma 4 audio conformer encoder support (#21421)
stephencox-ict Apr 12, 2026
aa4695c
mtmd: add gemma 4 test (vision + audio) [no ci] (#21806)
ngxson Apr 12, 2026
1e9d771
convert : force f16 or f32 on step3-vl conv weights (#21646)
CISC Apr 12, 2026
21a4933
mtmd: qwen3 audio support (qwen3-omni and qwen3-asr) (#19441)
ngxson Apr 12, 2026
82764d8
mtmd: fix crash when sending image under 2x2 pixels (#21711)
mzsergiu Apr 12, 2026
873c825
sycl: disable Q1_0 in backend and cleanup unused variables (#21807)
qnixsynapse Apr 13, 2026
bafae27
Remove extra conditional check on debug mode. (#21798)
yomaytk Apr 13, 2026
227ed28
webui: MCP Diagnostics improvements (#21803)
allozaur Apr 13, 2026
974c8c9
webui: add setting for first-line chat titles (#21797)
crodjer Apr 13, 2026
920b3e7
mtmd: use causal attn for gemma 4 audio (#21824)
ngxson Apr 13, 2026
9f5e1ed
CUDA: Limit DeviceSegmentedSort to immediate mode (#21718)
ORippler Apr 13, 2026
ce8fd4b
server: Expose build_info in router mode (#21835)
gaspardpetit Apr 13, 2026
aa00911
common : add download cancellation and temp file cleanup (#21813)
angt Apr 13, 2026
75f3bc9
vulkan: Flash Attention DP4A shader for quantized KV cache (#20797)
0cc4m Apr 13, 2026
a8bad38
ci: Also exempt 'security' tag from auto-close (#21844)
ckastner Apr 13, 2026
1c0d908
chat: dedicated DeepSeek v3.2 parser + "official" template (#21785)
pwilkin Apr 13, 2026
e974923
docs: listing qwen3-asr and qwen3-omni as supported (#21857)
ngxson Apr 13, 2026
e21cdc1
common/gemma4 : handle parsing edge cases (#21760)
aldehir Apr 13, 2026
e489a5c
server: support OAI /v1/audio/transcriptions API (#21863)
ngxson Apr 14, 2026
6a6780a
vulkan: Support GGML_TYPE_NVFP4 (#21455)
jeffbolznv Apr 14, 2026
56666fa
common: skip reasoning budget sampler when no budget is requested (#2…
berkidem Apr 14, 2026
5a23695
ggml-webgpu: Update register tiling matmul to use f32 accumulation (#…
reeselevine Apr 14, 2026
acc37a4
cmake: fix CMP0194 warning on Windows with MSVC (#21630)
texasich Apr 14, 2026
2e05f06
ggml : fix ARM NEON nvfp4 dot product on non-dotprod targets (#21559)
richarddd Apr 14, 2026
be76dd0
vendor : update BoringSSL to 0.20260413.0 (#21881)
angt Apr 14, 2026
aa0f189
metal : add XIELU unary op (#20802)
seyoungjeong Apr 14, 2026
f4b5bf2
ci : re-enable mac workflows (#21894)
ggerganov Apr 14, 2026
1f30ac0
vulkan: Programmatically add RoundingModeRTE to all shaders when the …
jeffbolznv Apr 14, 2026
707c0b7
mtmd: add mtmd_image_tokens_get_decoder_pos() API (#21851)
ngxson Apr 14, 2026
c0de6ed
metal : fix FA support logic (#21898)
ggerganov Apr 14, 2026
fae3a28
ggml : remove ggml-ext.h (#21869)
ngxson Apr 14, 2026
5d14e5d
hexagon: optimization for HMX mat_mul (#21554)
njsyw1997 Apr 14, 2026
e39eba2
read n_ctx back after making llama_context (#21939)
smashedpumpkin Apr 15, 2026
e1a9a6d
autoparser: support case of JSON_NATIVE with per-call markers (test c…
pwilkin Apr 15, 2026
8dc530b
ci: disable test-backend-ops on Vulkan llvmpipe run and resture defau…
0cc4m Apr 15, 2026
80d8770
docs: more extensive RoPE documentation [no ci] (#21953)
ngxson Apr 15, 2026
adb541a
rpc : add native RDMA transport for RPC backend (RoCEv2) (#20590)
dvv101111 Apr 15, 2026
014dca4
CUDA: manage NCCL communicators in context (#21891)
JohannesGaessler Apr 15, 2026
a620695
CUDA: require explicit opt-in for P2P access (#21910)
JohannesGaessler Apr 15, 2026
20d3bc2
ggml-webgpu: Fix dequantization helpers to not pass in pointers (#21872)
reeselevine Apr 15, 2026
7e72b38
cuda: Q1_0 initial backend (#21629)
khosravipasha Apr 15, 2026
b3d7587
vulkan: optimize im2col (#21713)
0cc4m Apr 15, 2026
408225b
server: use random media marker (#21962)
ngxson Apr 15, 2026
b1be68e
[SYCL] Fix Q8_0 reorder: garbage on 2nd prompt + crash on full VRAM (…
PMZFX Apr 16, 2026
8612ed1
ci : Use ggml-org/ccache-action on RISC-V as well (#21632)
luhenry Apr 16, 2026
82677a6
ggml-webgpu: compute pass batching and removing profiling overhead (#…
reeselevine Apr 16, 2026
90fb96a
devops : added spirv-headers to nix (#21965)
yuannan Apr 16, 2026
5637536
ggml : implemented simd_gemm kernel for riscv vector extension (#20627)
rehan-10xengineer Apr 16, 2026
1e796eb
ggml-cpu: add 128-bit RVV implementation for Quantization Vector Dot …
rehan-10xengineer Apr 16, 2026
ae2d348
metal: Implement ROLL op (#21946)
kushagharahi Apr 16, 2026
3f7c29d
ggml: add graph_reused (#21764)
am17an Apr 16, 2026
03b3d07
Convert: Fix NemotronH Config Parsing (#21664)
anavp-nvidia Apr 16, 2026
b572d1e
codeowners: add team member comments (#21714)
0cc4m Apr 16, 2026
d27acb0
Merge branch 'layla-build' into merge
l3utterfly Apr 16, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .devops/nix/package.nix
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
vulkan-loader,
openssl,
shaderc,
spirv-headers,
useBlas ?
builtins.all (x: !x) [
useCuda
Expand Down Expand Up @@ -145,6 +146,7 @@ effectiveStdenv.mkDerivation (finalAttrs: {
ninja
pkg-config
git
spirv-headers
]
++ optionals useCuda [
cudaPackages.cuda_nvcc
Expand Down
2 changes: 1 addition & 1 deletion .devops/vulkan.Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ RUN apt update && apt install -y git build-essential cmake wget xz-utils

# Install SSL and Vulkan SDK dependencies
RUN apt install -y libssl-dev curl \
libxcb-xinput0 libxcb-xinerama0 libxcb-cursor-dev libvulkan-dev glslc
libxcb-xinput0 libxcb-xinerama0 libxcb-cursor-dev libvulkan-dev glslc spirv-headers

# Build it
WORKDIR /app
Expand Down
24 changes: 6 additions & 18 deletions .github/workflows/build-riscv.yml
Original file line number Diff line number Diff line change
Expand Up @@ -47,22 +47,10 @@ jobs:
steps:
- name: Install dependencies
run: |
sudo apt-get update

# Install necessary packages
sudo apt-get install -y libatomic1 libtsan2 gcc-14 g++-14 cmake build-essential wget git-lfs

# Set gcc-14 and g++-14 as the default compilers
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-14 100
sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-14 100

if ! which rustc; then
# Install Rust stable version
sudo apt-get install -y rustup
rustup install stable
rustup default stable
fi

git lfs install

- name: GCC version check
Expand All @@ -74,12 +62,12 @@ jobs:
id: checkout
uses: actions/checkout@v6

# FIXME: Enable when ggml-org/ccache-action works on riscv64
# - name: ccache
# uses: ggml-org/ccache-action@v1.2.21
# with:
# key: ubuntu-riscv64-native-sanitizer-${{ matrix.sanytizer }}-${{ matrix.build_type }}
# save: ${{ github.event_name == 'push' && github.ref == 'refs/heads/master' }}
- name: ccache
uses: ggml-org/ccache-action@afde29e5b5422e5da23cb1f639e8baecadeadfc3 # https://github.com/ggml-org/ccache-action/pull/1
with:
key: ubuntu-riscv64-native-sanitizer-${{ matrix.sanitizer }}-${{ matrix.build_type }}
evict-old-files: 1d
save: ${{ github.event_name == 'push' && github.ref == 'refs/heads/master' }}

- name: Build
id: cmake_build
Expand Down
108 changes: 53 additions & 55 deletions .github/workflows/build-self-hosted.yml
Original file line number Diff line number Diff line change
Expand Up @@ -141,61 +141,59 @@ jobs:
# amd-smi static
# GG_BUILD_ROCM=1 GG_BUILD_AMDGPU_TARGETS="gfx1101" bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp

# TODO: sandbox Mac runners
# ggml-ci-mac-metal:
# runs-on: [self-hosted, macOS, ARM64]
#
# steps:
# - name: Clone
# id: checkout
# uses: actions/checkout@v6
#
# - name: Test
# id: ggml-ci
# run: |
# GG_BUILD_METAL=1 bash ./ci/run.sh ~/results/llama.cpp ~/mnt/llama.cpp
#
# ggml-ci-mac-webgpu:
# runs-on: [self-hosted, macOS, ARM64]
#
# steps:
# - name: Clone
# id: checkout
# uses: actions/checkout@v6
#
# - name: Dawn Dependency
# id: dawn-depends
# run: |
# DAWN_VERSION="v2.0.0"
# DAWN_OWNER="reeselevine"
# DAWN_REPO="dawn"
# DAWN_ASSET_NAME="Dawn-5e9a4865b1635796ccc77dd30057f2b4002a1355-macos-latest-Release"
# echo "Fetching release asset from https://github.com/${DAWN_OWNER}/${DAWN_REPO}/releases/download/${DAWN_VERSION}/${DAWN_ASSET_NAME}.zip"
# curl -L -o artifact.zip \
# "https://github.com/${DAWN_OWNER}/${DAWN_REPO}/releases/download/${DAWN_VERSION}/${DAWN_ASSET_NAME}.zip"
# mkdir dawn
# unzip artifact.zip
# tar -xvf ${DAWN_ASSET_NAME}.tar.gz -C dawn --strip-components=1
#
# - name: Test
# id: ggml-ci
# run: |
# GG_BUILD_WEBGPU=1 GG_BUILD_WEBGPU_DAWN_PREFIX="$GITHUB_WORKSPACE/dawn" \
# bash ./ci/run.sh ~/results/llama.cpp ~/mnt/llama.cpp
#
# ggml-ci-mac-vulkan:
# runs-on: [self-hosted, macOS, ARM64]
#
# steps:
# - name: Clone
# id: checkout
# uses: actions/checkout@v6
#
# - name: Test
# id: ggml-ci
# run: |
# vulkaninfo --summary
# GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp ~/mnt/llama.cpp
ggml-ci-mac-metal:
runs-on: [self-hosted, macOS, ARM64]

steps:
- name: Clone
id: checkout
uses: actions/checkout@v6

- name: Test
id: ggml-ci
run: |
GG_BUILD_METAL=1 bash ./ci/run.sh ~/results/llama.cpp ~/mnt/llama.cpp

ggml-ci-mac-webgpu:
runs-on: [self-hosted, macOS, ARM64]

steps:
- name: Clone
id: checkout
uses: actions/checkout@v6

- name: Dawn Dependency
id: dawn-depends
run: |
DAWN_VERSION="v20260317.182325"
DAWN_OWNER="google"
DAWN_REPO="dawn"
DAWN_ASSET_NAME="Dawn-18eb229ef5f707c1464cc581252e7603c73a3ef0-macos-latest-Release"
echo "Fetching release asset from https://github.com/google/dawn/releases/download/${DAWN_VERSION}/${DAWN_ASSET_NAME}.tar.gz"
curl -L -o artifact.tar.gz \
"https://github.com/google/dawn/releases/download/${DAWN_VERSION}/${DAWN_ASSET_NAME}.tar.gz"
mkdir dawn
tar -xvf artifact.tar.gz -C dawn --strip-components=1

- name: Test
id: ggml-ci
run: |
GG_BUILD_WEBGPU=1 GG_BUILD_WEBGPU_DAWN_PREFIX="$GITHUB_WORKSPACE/dawn" \
bash ./ci/run.sh ~/results/llama.cpp ~/mnt/llama.cpp

ggml-ci-mac-vulkan:
runs-on: [self-hosted, macOS, ARM64]

steps:
- name: Clone
id: checkout
uses: actions/checkout@v6

- name: Test
id: ggml-ci
run: |
vulkaninfo --summary
GG_BUILD_VULKAN=1 bash ./ci/run.sh ~/results/llama.cpp ~/mnt/llama.cpp

ggml-ci-linux-intel-vulkan:
runs-on: [self-hosted, Linux, Intel]
Expand Down
3 changes: 2 additions & 1 deletion .github/workflows/build-vulkan.yml
Original file line number Diff line number Diff line change
Expand Up @@ -93,4 +93,5 @@ jobs:
export GGML_VK_DISABLE_F16=1
export GGML_VK_DISABLE_COOPMAT=1
# This is using llvmpipe and runs slower than other backends
ctest -L main --verbose --timeout 4800
# test-backend-ops is too slow on llvmpipe, skip it
ctest -L main -E test-backend-ops --verbose --timeout 900
27 changes: 9 additions & 18 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -318,7 +318,7 @@ jobs:
id: depends
run: |
sudo apt-get update
sudo apt-get install -y gcc-14 g++-14 build-essential glslc libvulkan-dev libssl-dev ninja-build
sudo apt-get install -y gcc-14 g++-14 build-essential glslc libvulkan-dev spirv-headers libssl-dev ninja-build
echo "CC=gcc-14" >> "$GITHUB_ENV"
echo "CXX=g++-14" >> "$GITHUB_ENV"

Expand Down Expand Up @@ -1001,22 +1001,14 @@ jobs:
steps:
- name: Install dependencies
run: |
sudo apt-get update

# Install necessary packages
sudo apt-get install -y libatomic1 libtsan2 gcc-14 g++-14 cmake build-essential libssl-dev wget git-lfs
sudo apt-get update
sudo apt-get install -y libssl-dev

# Set gcc-14 and g++-14 as the default compilers
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-14 100
sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-14 100

if ! which rustc; then
# Install Rust stable version
sudo apt-get install -y rustup
rustup install stable
rustup default stable
fi

git lfs install

- name: Check environment
Expand All @@ -1032,13 +1024,12 @@ jobs:
id: checkout
uses: actions/checkout@v6

# FIXME: Enable when ggml-org/ccache-action works on riscv64
# - name: ccache
# uses: ggml-org/ccache-action@v1.2.21
# with:
# key: ubuntu-cpu-riscv64-native
# evict-old-files: 1d
# save: ${{ github.event_name == 'push' && github.ref == 'refs/heads/master' }}
- name: ccache
uses: ggml-org/ccache-action@afde29e5b5422e5da23cb1f639e8baecadeadfc3 # https://github.com/ggml-org/ccache-action/pull/1
with:
key: ubuntu-cpu-riscv64-native
evict-old-files: 1d
save: ${{ github.event_name == 'push' && github.ref == 'refs/heads/master' }}

- name: Build
id: cmake_build
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/close-issue.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ jobs:
steps:
- uses: actions/stale@v10
with:
exempt-issue-labels: "refactoring,help wanted,good first issue,research 🔬,bug,roadmap"
exempt-issue-labels: "refactoring,help wanted,good first issue,research 🔬,bug,roadmap,security"
days-before-issue-stale: 30
days-before-issue-close: 14
stale-issue-label: "stale"
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -202,7 +202,7 @@ jobs:
sudo apt-get install -y build-essential mesa-vulkan-drivers vulkan-sdk libssl-dev
else
sudo apt-get update -y
sudo apt-get install -y gcc-14 g++-14 build-essential glslc libvulkan-dev libssl-dev ninja-build
sudo apt-get install -y gcc-14 g++-14 build-essential glslc libvulkan-dev spirv-headers libssl-dev ninja-build
echo "CC=gcc-14" >> "$GITHUB_ENV"
echo "CXX=g++-14" >> "$GITHUB_ENV"
fi
Expand Down
77 changes: 39 additions & 38 deletions .github/workflows/server-self-hosted.yml
Original file line number Diff line number Diff line change
Expand Up @@ -84,41 +84,42 @@ jobs:
export ${{ matrix.extra_args }}
pytest -v -x -m "not slow"

server-cuda:
runs-on: [self-hosted, llama-server, Linux, NVIDIA]

name: server-cuda (${{ matrix.wf_name }})
strategy:
matrix:
build_type: [Release]
wf_name: ["GPUx1"]
include:
- build_type: Release
extra_args: "LLAMA_ARG_BACKEND_SAMPLING=1"
wf_name: "GPUx1, backend-sampling"
fail-fast: false

steps:
- name: Clone
id: checkout
uses: actions/checkout@v6
with:
fetch-depth: 0
ref: ${{ github.event.inputs.sha || github.event.pull_request.head.sha || github.sha || github.head_ref || github.ref_name }}

- name: Build
id: cmake_build
run: |
cmake -B build -DGGML_SCHED_NO_REALLOC=ON
cmake --build build --config ${{ matrix.build_type }} -j $(sysctl -n hw.logicalcpu) --target llama-server

- name: Tests
id: server_integration_tests
if: ${{ (!matrix.disabled_on_pr || !github.event.pull_request) }}
run: |
cd tools/server/tests
python3 -m venv venv
source venv/bin/activate
pip install -r requirements.txt
export ${{ matrix.extra_args }}
pytest -v -x -m "not slow"
# TODO: provision CUDA runner
# server-cuda:
# runs-on: [self-hosted, llama-server, Linux, NVIDIA]
#
# name: server-cuda (${{ matrix.wf_name }})
# strategy:
# matrix:
# build_type: [Release]
# wf_name: ["GPUx1"]
# include:
# - build_type: Release
# extra_args: "LLAMA_ARG_BACKEND_SAMPLING=1"
# wf_name: "GPUx1, backend-sampling"
# fail-fast: false
#
# steps:
# - name: Clone
# id: checkout
# uses: actions/checkout@v6
# with:
# fetch-depth: 0
# ref: ${{ github.event.inputs.sha || github.event.pull_request.head.sha || github.sha || github.head_ref || github.ref_name }}
#
# - name: Build
# id: cmake_build
# run: |
# cmake -B build -DGGML_SCHED_NO_REALLOC=ON
# cmake --build build --config ${{ matrix.build_type }} -j $(sysctl -n hw.logicalcpu) --target llama-server
#
# - name: Tests
# id: server_integration_tests
# if: ${{ (!matrix.disabled_on_pr || !github.event.pull_request) }}
# run: |
# cd tools/server/tests
# python3 -m venv venv
# source venv/bin/activate
# pip install -r requirements.txt
# export ${{ matrix.extra_args }}
# pytest -v -x -m "not slow"
18 changes: 17 additions & 1 deletion CODEOWNERS
Original file line number Diff line number Diff line change
@@ -1,5 +1,21 @@
# collaborators can optionally add themselves here to indicate their availability for reviewing related PRs
# multiplie collaborators per item can be specified
# multiple collaborators per item can be specified
#
# ggml-org/ci : CISC, danbev, ggerganov, netrunnereve, ngxson, taronaeo
# ggml-org/ggml-cann : hipudding
# ggml-org/ggml-cuda : JohannesGaessler, am17an, IMbackK, ORippler
# ggml-org/ggml-hexagon : lhez, max-krasnyansky
# ggml-org/ggml-metal : ggerganov
# ggml-org/ggml-opencl : lhez, max-krasnyansky
# ggml-org/ggml-rpc : rgerganov
# ggml-org/ggml-sycl : arthw
# ggml-org/ggml-vulkan : 0cc4m, jeffbolznv
# ggml-org/ggml-webgpu : reeselevine
# ggml-org/ggml-zdnn : taronaeo
# ggml-org/llama-common : ggerganov, aldehir, angt, danbev, ngxson, pwilkin
# ggml-org/llama-mtmd : ngxson
# ggml-org/llama-server : ggerganov, ngxson, allozaur, angt, ServeurpersoCom
# ggml-org/llama-webui : allozaur

/.devops/*.Dockerfile @ngxson
/.github/actions/ @ggml-org/ci
Expand Down
17 changes: 17 additions & 0 deletions cmake/arm64-linux-clang.cmake
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
set( CMAKE_SYSTEM_NAME Linux )
set( CMAKE_SYSTEM_PROCESSOR arm64 )

set( target aarch64-linux-gnu )

set( CMAKE_C_COMPILER clang )
set( CMAKE_CXX_COMPILER clang++ )

set( CMAKE_C_COMPILER_TARGET ${target} )
set( CMAKE_CXX_COMPILER_TARGET ${target} )

set( arch_c_flags "-march=armv8.7-a -fvectorize -ffp-model=fast -fno-finite-math-only" )
set( warn_c_flags "-Wno-format -Wno-unused-variable -Wno-unused-function -Wno-gnu-zero-variadic-macro-arguments" )

set( CMAKE_C_FLAGS_INIT "${arch_c_flags} ${warn_c_flags}" )
set( CMAKE_CXX_FLAGS_INIT "${arch_c_flags} ${warn_c_flags}" )

Loading
Loading