diff --git a/manifests/g/ggml/llamacpp/b8934/ggml.llamacpp.installer.yaml b/manifests/g/ggml/llamacpp/b8934/ggml.llamacpp.installer.yaml new file mode 100644 index 0000000000000..bf7eff5f1643b --- /dev/null +++ b/manifests/g/ggml/llamacpp/b8934/ggml.llamacpp.installer.yaml @@ -0,0 +1,30 @@ +# Created with komac v2.15.0 +# yaml-language-server: $schema=https://aka.ms/winget-manifest.installer.1.12.0.schema.json + +PackageIdentifier: ggml.llamacpp +PackageVersion: b8934 +InstallerType: zip +NestedInstallerType: portable +NestedInstallerFiles: +- RelativeFilePath: llama-batched-bench.exe +- RelativeFilePath: llama-bench.exe +- RelativeFilePath: llama-cli.exe +- RelativeFilePath: llama-gguf-split.exe +- RelativeFilePath: llama-imatrix.exe +- RelativeFilePath: llama-mtmd-cli.exe +- RelativeFilePath: llama-perplexity.exe +- RelativeFilePath: llama-quantize.exe +- RelativeFilePath: llama-server.exe +- RelativeFilePath: llama-tokenize.exe +- RelativeFilePath: llama-tts.exe +Dependencies: + PackageDependencies: + - PackageIdentifier: Microsoft.VCRedist.2015+.x64 +ReleaseDate: 2026-04-26 +ArchiveBinariesDependOnPath: true +Installers: +- Architecture: x64 + InstallerUrl: https://github.com/ggml-org/llama.cpp/releases/download/b8934/llama-b8934-bin-win-vulkan-x64.zip + InstallerSha256: 8C73A3096F0461666BB9DA09EB0642C21B43EFEFC39236D88A42D4B5D013F730 +ManifestType: installer +ManifestVersion: 1.12.0 diff --git a/manifests/g/ggml/llamacpp/b8934/ggml.llamacpp.locale.en-US.yaml b/manifests/g/ggml/llamacpp/b8934/ggml.llamacpp.locale.en-US.yaml new file mode 100644 index 0000000000000..c1c1d357f04ee --- /dev/null +++ b/manifests/g/ggml/llamacpp/b8934/ggml.llamacpp.locale.en-US.yaml @@ -0,0 +1,55 @@ +# Created with komac v2.15.0 +# yaml-language-server: $schema=https://aka.ms/winget-manifest.defaultLocale.1.12.0.schema.json + +PackageIdentifier: ggml.llamacpp +PackageVersion: b8934 +PackageLocale: en-US +Publisher: ggml +PublisherUrl: https://github.com/ggml-org +PublisherSupportUrl: https://github.com/ggml-org/llama.cpp/issues +PackageName: llama.cpp +PackageUrl: https://github.com/ggml-org/llama.cpp +License: MIT +LicenseUrl: https://github.com/ggml-org/llama.cpp/blob/HEAD/LICENSE +ShortDescription: LLM inference in C/C++ +Tags: +- ggml +- llama +ReleaseNotes: |- + hexagon: guard HMX clock request for v75+ platforms (#22377) + macOS/iOS: + - macOS Apple Silicon (arm64) + - macOS Apple Silicon (arm64, KleidiAI enabled) + - macOS Intel (x64) + - iOS XCFramework + Linux: + - Ubuntu x64 (CPU) + - Ubuntu arm64 (CPU) + - Ubuntu s390x (CPU) + - Ubuntu x64 (Vulkan) + - Ubuntu arm64 (Vulkan) + - Ubuntu x64 (ROCm 7.2) + - Ubuntu x64 (OpenVINO) + - Ubuntu x64 (SYCL FP32) + - Ubuntu x64 (SYCL FP16) + Android: + - Android arm64 (CPU) + Windows: + - Windows x64 (CPU) + - Windows arm64 (CPU) + - Windows x64 (CUDA 12) - CUDA 12.4 DLLs + - Windows x64 (CUDA 13) - CUDA 13.1 DLLs + - Windows x64 (Vulkan) + - Windows x64 (SYCL) + - Windows x64 (HIP) + openEuler: + - openEuler x86 (310p) + - openEuler x86 (910b, ACL Graph) + - openEuler aarch64 (310p) + - openEuler aarch64 (910b, ACL Graph) +ReleaseNotesUrl: https://github.com/ggml-org/llama.cpp/releases/tag/b8934 +Documentations: +- DocumentLabel: Wiki + DocumentUrl: https://github.com/ggml-org/llama.cpp/wiki +ManifestType: defaultLocale +ManifestVersion: 1.12.0 diff --git a/manifests/g/ggml/llamacpp/b8934/ggml.llamacpp.yaml b/manifests/g/ggml/llamacpp/b8934/ggml.llamacpp.yaml new file mode 100644 index 0000000000000..786c47e5b172d --- /dev/null +++ b/manifests/g/ggml/llamacpp/b8934/ggml.llamacpp.yaml @@ -0,0 +1,8 @@ +# Created with komac v2.15.0 +# yaml-language-server: $schema=https://aka.ms/winget-manifest.version.1.12.0.schema.json + +PackageIdentifier: ggml.llamacpp +PackageVersion: b8934 +DefaultLocale: en-US +ManifestType: version +ManifestVersion: 1.12.0