Skip to content

build : link against build info instead of compiling against it#3879

Merged
ggerganov merged 6 commits intoggml-org:masterfrom
cebtenzzre:build-info-obj
Nov 2, 2023
Merged

build : link against build info instead of compiling against it#3879
ggerganov merged 6 commits intoggml-org:masterfrom
cebtenzzre:build-info-obj

Conversation

@cebtenzzre
Copy link
Copy Markdown
Collaborator

ref: #3842 (comment)

This makes incremental builds even faster by avoiding the need to recompile many objects just because the build info has changed (e.g., because the user ran git commit or git pull).

Instead, only the build info itself is recompiled, and executables that depend on it are re-linked.

I chose to export it as variables LLAMA_BUILD_NUMBER, LLAMA_COMMIT, LLAMA_COMPILER, and LLAMA_BUILD_TARGET, but these could have different names, or be functions as originally mentioned.

Other changes:

  • Make the cmake build actually succeed when run without a .git folder
  • cmake: Always generate build-info via the custom target instead of generating it early, simplifying the code and making the build faster as more things can run in parallel
  • Add a build info dependency to a few cmake targets that didn't have it

@cebtenzzre
Copy link
Copy Markdown
Collaborator Author

I can't figure out how to get zig to have an object file's build step depend on a cmake-style configuration "header". @kassane You recently touched build.zig, do you know what to do here?

Comment thread Makefile
@kassane
Copy link
Copy Markdown
Contributor

kassane commented Nov 1, 2023

Hi @cebtenzzre,

My patch

diff --git a/build.zig b/build.zig
index 9b58b74..699738f 100644
--- a/build.zig
+++ b/build.zig
@@ -10,7 +10,6 @@ const Maker = struct {
     builder: *std.build.Builder,
     target: CrossTarget,
     optimize: Mode,
-    config_header: *ConfigHeader,
     enable_lto: bool,
 
     include_dirs: ArrayList([]const u8),
@@ -41,26 +40,24 @@ const Maker = struct {
         const commit_hash = try std.ChildProcess.exec(
             .{ .allocator = builder.allocator, .argv = &.{ "git", "rev-parse", "HEAD" } },
         );
-        const config_header = builder.addConfigHeader(
-            .{ .style = .blank, .include_path = "build-info.h" },
-            .{
-                .BUILD_NUMBER = 0,
-                .BUILD_COMMIT = commit_hash.stdout[0 .. commit_hash.stdout.len - 1], // omit newline
-                .BUILD_COMPILER = builder.fmt("Zig {s}", .{zig_version}),
-                .BUILD_TARGET = try target.allocDescription(builder.allocator),
-            },
-        );
+        try std.fs.cwd().writeFile("common/build-info.cpp", builder.fmt(
+            \\int LLAMA_BUILD_NUMBER = {};
+            \\char const *LLAMA_COMMIT = "{s}";
+            \\char const *LLAMA_COMPILER = "Zig {s}";
+            \\char const *LLAMA_BUILD_TARGET = "{s}";
+            \\
+        , .{ 0, commit_hash.stdout[0 .. commit_hash.stdout.len - 1], zig_version, try target.allocDescription(builder.allocator) }));
         var m = Maker{
             .builder = builder,
             .target = target,
             .optimize = builder.standardOptimizeOption(.{}),
-            .config_header = config_header,
             .enable_lto = false,
             .include_dirs = ArrayList([]const u8).init(builder.allocator),
             .cflags = ArrayList([]const u8).init(builder.allocator),
             .cxxflags = ArrayList([]const u8).init(builder.allocator),
             .objs = ArrayList(*Compile).init(builder.allocator),
         };
+
         try m.addCFlag("-std=c11");
         try m.addCxxFlag("-std=c++11");
         try m.addProjectInclude(&.{});
@@ -72,7 +69,7 @@ const Maker = struct {
         const o = m.builder.addObject(.{ .name = name, .target = m.target, .optimize = m.optimize });
         if (o.target.getAbi() != .msvc)
             o.defineCMacro("_GNU_SOURCE", null);
-        o.addConfigHeader(m.config_header);
+
         if (std.mem.endsWith(u8, src, ".c")) {
             o.addCSourceFiles(&.{src}, m.cflags.items);
             o.linkLibC();
@@ -85,7 +82,6 @@ const Maker = struct {
                 o.linkLibCpp();
             }
         }
-        o.addConfigHeader(m.config_header);
         for (m.include_dirs.items) |i| o.addIncludePath(.{ .path = i });
         o.want_lto = m.enable_lto;
         return o;
@@ -105,7 +101,6 @@ const Maker = struct {
             // linkLibCpp already add (libc++ + libunwind + libc)
             e.linkLibCpp();
         }
-        e.addConfigHeader(m.config_header);
         m.builder.installArtifact(e);
         e.want_lto = m.enable_lto;
         return e;
@@ -121,6 +116,7 @@ pub fn build(b: *std.build.Builder) !void {
     const ggml_backend = make.obj("ggml-backend", "ggml-backend.c");
     const ggml_quants = make.obj("ggml-quants", "ggml-quants.c");
     const llama = make.obj("llama", "llama.cpp");
+    const buildinfo = make.obj("common", "common/build-info.cpp");
     const common = make.obj("common", "common/common.cpp");
     const console = make.obj("console", "common/console.cpp");
     const sampling = make.obj("sampling", "common/sampling.cpp");
@@ -128,14 +124,14 @@ pub fn build(b: *std.build.Builder) !void {
     const train = make.obj("train", "common/train.cpp");
     const clip = make.obj("clip", "examples/llava/clip.cpp");
 
-    _ = make.exe("main", "examples/main/main.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, sampling, console, grammar_parser });
-    _ = make.exe("quantize", "examples/quantize/quantize.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common });
-    _ = make.exe("perplexity", "examples/perplexity/perplexity.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common });
-    _ = make.exe("embedding", "examples/embedding/embedding.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common });
-    _ = make.exe("finetune", "examples/finetune/finetune.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, train });
-    _ = make.exe("train-text-from-scratch", "examples/train-text-from-scratch/train-text-from-scratch.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, train });
+    _ = make.exe("main", "examples/main/main.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo, sampling, console, grammar_parser });
+    _ = make.exe("quantize", "examples/quantize/quantize.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo });
+    _ = make.exe("perplexity", "examples/perplexity/perplexity.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo });
+    _ = make.exe("embedding", "examples/embedding/embedding.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo });
+    _ = make.exe("finetune", "examples/finetune/finetune.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo, train });
+    _ = make.exe("train-text-from-scratch", "examples/train-text-from-scratch/train-text-from-scratch.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo, train });
 
-    const server = make.exe("server", "examples/server/server.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, sampling, grammar_parser, clip });
+    const server = make.exe("server", "examples/server/server.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, common, buildinfo, sampling, grammar_parser, clip });
     if (server.target.isWindows()) {
         server.linkSystemLibrary("ws2_32");
     }

Output

Log start
main: build = 0 (56c1fad0b4a4ebce8cbb300ba4e4f4b2f5213b19)
main: built with Zig 0.11.0 for native
main: seed  = 1698843849
error loading model: failed to open models/7B/ggml-model-f16.gguf: Arquivo ou diretório inexistente
llama_load_model_from_file: failed to load model
llama_init_from_gpt_params: error: failed to load model 'models/7B/ggml-model-f16.gguf'
main: error: unable to load model

@ggerganov ggerganov added the build Compilation issues label Nov 1, 2023
Co-authored-by: Matheus C. França <matheus-catarino@hotmail.com>
Comment thread CMakeLists.txt Outdated
@ggerganov ggerganov merged commit b12fa0d into ggml-org:master Nov 2, 2023
olexiyb pushed a commit to Sanctum-AI/llama.cpp that referenced this pull request Nov 23, 2023
…-org#3879)

* cmake : fix build when .git does not exist

* cmake : simplify BUILD_INFO target

* cmake : add missing dependencies on BUILD_INFO

* build : link against build info instead of compiling against it

* zig : make build info a .cpp source instead of a header

Co-authored-by: Matheus C. França <matheus-catarino@hotmail.com>

* cmake : revert change to CMP0115

---------

Co-authored-by: Matheus C. França <matheus-catarino@hotmail.com>
Seunghhon pushed a commit to Seunghhon/llama.cpp that referenced this pull request Apr 26, 2026
…-org#3879)

* cmake : fix build when .git does not exist

* cmake : simplify BUILD_INFO target

* cmake : add missing dependencies on BUILD_INFO

* build : link against build info instead of compiling against it

* zig : make build info a .cpp source instead of a header

Co-authored-by: Matheus C. França <matheus-catarino@hotmail.com>

* cmake : revert change to CMP0115

---------

Co-authored-by: Matheus C. França <matheus-catarino@hotmail.com>
phuongncn pushed a commit to phuongncn/llama.cpp-gx10-dgx-sparks-deepseekv4 that referenced this pull request Apr 28, 2026
…-org#3879)

* cmake : fix build when .git does not exist

* cmake : simplify BUILD_INFO target

* cmake : add missing dependencies on BUILD_INFO

* build : link against build info instead of compiling against it

* zig : make build info a .cpp source instead of a header

Co-authored-by: Matheus C. França <matheus-catarino@hotmail.com>

* cmake : revert change to CMP0115

---------

Co-authored-by: Matheus C. França <matheus-catarino@hotmail.com>
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment

Labels

build Compilation issues

Projects

None yet

Development

Successfully merging this pull request may close these issues.

3 participants