Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 7 additions & 18 deletions .github/workflows/cmake-multi-platform.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,32 +16,26 @@ jobs:
# Set fail-fast to false to ensure that feedback is delivered for all matrix combinations. Consider changing this to true when your workflow is stable.
fail-fast: false

# Set up a matrix to run the following 3 configurations:
# Set up a matrix to run the following 2 configurations:
# 1. <Windows, Release, latest MSVC compiler toolchain on the default runner image, default generator>
# 2. <Linux, Release, latest GCC compiler toolchain on the default runner image, default generator>
# 3. <Linux, Release, latest Clang compiler toolchain on the default runner image, default generator>
# 2. <macOS, Release, latest Clang compiler toolchain on the default runner image, default generator>
#
# To add more build types (Release, Debug, RelWithDebInfo, etc.) customize the build_type list.
matrix:
os: [ubuntu-latest, windows-latest]
os: [macos-latest, windows-latest]
build_type: [Release]
c_compiler: [gcc, clang, cl]
c_compiler: [clang, cl]
include:
- os: windows-latest
c_compiler: cl
cpp_compiler: cl
- os: ubuntu-latest
c_compiler: gcc
cpp_compiler: g++
- os: ubuntu-latest
- os: macos-latest
c_compiler: clang
cpp_compiler: clang++
exclude:
- os: windows-latest
c_compiler: gcc
- os: windows-latest
c_compiler: clang
- os: ubuntu-latest
- os: macos-latest
c_compiler: cl

steps:
Expand All @@ -62,14 +56,9 @@ jobs:
-DCMAKE_CXX_COMPILER=${{ matrix.cpp_compiler }}
-DCMAKE_C_COMPILER=${{ matrix.c_compiler }}
-DCMAKE_BUILD_TYPE=${{ matrix.build_type }}
-DLLAMA_CURL=OFF
-S ${{ github.workspace }}

- name: Build
# Build your program with the given configuration. Note that --config is needed because the default Windows generator is a multi-config generator (Visual Studio generator).
run: cmake --build ${{ steps.strings.outputs.build-output-dir }} --config ${{ matrix.build_type }}

- name: Test
working-directory: ${{ steps.strings.outputs.build-output-dir }}
# Execute tests defined by the CMake configuration. Note that --build-config is needed because the default Windows generator is a multi-config generator (Visual Studio generator).
# See https://cmake.org/cmake/help/latest/manual/ctest.1.html for more detail
run: ctest --build-config ${{ matrix.build_type }}
16 changes: 10 additions & 6 deletions common/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -99,13 +99,17 @@ if (LLAMA_CURL)
# Use curl to download model url
find_package(CURL)
if (NOT CURL_FOUND)
message(FATAL_ERROR "Could NOT find CURL. Hint: to disable this feature, set -DLLAMA_CURL=OFF")
message(WARNING "Could NOT find CURL. Falling back to cpp-httplib. Hint: to explicitly disable CURL, set -DLLAMA_CURL=OFF")
set(LLAMA_CURL OFF)
else()
target_compile_definitions(${TARGET} PUBLIC LLAMA_USE_CURL)
include_directories(${CURL_INCLUDE_DIRS})
set(LLAMA_COMMON_EXTRA_LIBS ${LLAMA_COMMON_EXTRA_LIBS} ${CURL_LIBRARIES})
endif()
target_compile_definitions(${TARGET} PUBLIC LLAMA_USE_CURL)
include_directories(${CURL_INCLUDE_DIRS})
set(LLAMA_COMMON_EXTRA_LIBS ${LLAMA_COMMON_EXTRA_LIBS} ${CURL_LIBRARIES})
elseif (LLAMA_HTTPLIB)
# otherwise, use cpp-httplib
endif()

if (NOT LLAMA_CURL AND LLAMA_HTTPLIB)
# Use cpp-httplib if CURL is disabled or not found
target_compile_definitions(${TARGET} PUBLIC LLAMA_USE_HTTPLIB)
set(LLAMA_COMMON_EXTRA_LIBS ${LLAMA_COMMON_EXTRA_LIBS} cpp-httplib)
endif()
Expand Down
2 changes: 1 addition & 1 deletion docs/build.md
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ cmake --build build --config Release
cmake --preset x64-windows-llvm-release
cmake --build build-x64-windows-llvm-release
```
- Curl usage is enabled by default and can be turned off with `-DLLAMA_CURL=OFF`. Otherwise you need to install development libraries for libcurl.
- Curl usage is enabled by default and can be turned off with `-DLLAMA_CURL=OFF`. If libcurl is not found, the build will automatically fall back to using cpp-httplib. To install libcurl for better performance:
- **Debian / Ubuntu:** `sudo apt-get install libcurl4-openssl-dev` # (or `libcurl4-gnutls-dev` if you prefer GnuTLS)
- **Fedora / RHEL / Rocky / Alma:** `sudo dnf install libcurl-devel`
- **Arch / Manjaro:** `sudo pacman -S curl` # includes libcurl headers
Expand Down
1 change: 1 addition & 0 deletions tools/frameforge/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ target_link_libraries(${TARGET} PRIVATE
if(EXISTS ${CMAKE_SOURCE_DIR}/external/whisper/CMakeLists.txt)
add_subdirectory(${CMAKE_SOURCE_DIR}/external/whisper ${CMAKE_BINARY_DIR}/whisper EXCLUDE_FROM_ALL)
target_link_libraries(${TARGET} PRIVATE whisper)
target_compile_definitions(${TARGET} PRIVATE FRAMEFORGE_WHISPER_SUPPORT)
else()
message(WARNING "Whisper not found at ${CMAKE_SOURCE_DIR}/external/whisper, frameforge-sidecar will build without Whisper support")
endif()
Expand Down
41 changes: 40 additions & 1 deletion tools/frameforge/frameforge-sidecar.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
#include "../../common/common.h"
#ifdef FRAMEFORGE_WHISPER_SUPPORT
#include "../../external/whisper/include/whisper.h"
#endif
#include "frameforge-ipc.h"
#include "frameforge-json.h"
#include "frameforge-schema.h"
Expand Down Expand Up @@ -67,7 +69,9 @@ Important rules:
Do not include explanations, only the JSON object.)";

struct frameforge_params {
#ifdef FRAMEFORGE_WHISPER_SUPPORT
std::string whisper_model;
#endif
std::string llama_model;
std::string audio_file;
std::string pipe_name = "frameforge_pipe";
Expand All @@ -79,7 +83,9 @@ struct frameforge_params {
static void print_usage(const char * argv0) {
fprintf(stderr, "Usage: %s [options]\n", argv0);
fprintf(stderr, "Options:\n");
#ifdef FRAMEFORGE_WHISPER_SUPPORT
fprintf(stderr, " -wm, --whisper-model FNAME Path to Whisper model file\n");
#endif
fprintf(stderr, " -lm, --llama-model FNAME Path to Llama model file\n");
fprintf(stderr, " -a, --audio FILE Audio file to transcribe (for testing)\n");
fprintf(stderr, " -p, --pipe NAME Named pipe name (default: frameforge_pipe)\n");
Expand All @@ -93,14 +99,17 @@ static bool parse_params(int argc, char ** argv, frameforge_params & params) {
for (int i = 1; i < argc; i++) {
std::string arg = argv[i];

#ifdef FRAMEFORGE_WHISPER_SUPPORT
if (arg == "-wm" || arg == "--whisper-model") {
if (i + 1 < argc) {
params.whisper_model = argv[++i];
} else {
fprintf(stderr, "Error: Missing value for %s\n", arg.c_str());
return false;
}
} else if (arg == "-lm" || arg == "--llama-model") {
} else
#endif
if (arg == "-lm" || arg == "--llama-model") {
if (i + 1 < argc) {
params.llama_model = argv[++i];
} else {
Expand Down Expand Up @@ -147,10 +156,12 @@ static bool parse_params(int argc, char ** argv, frameforge_params & params) {
}
}

#ifdef FRAMEFORGE_WHISPER_SUPPORT
if (params.whisper_model.empty()) {
fprintf(stderr, "Error: Whisper model path is required\n");
return false;
}
#endif

if (params.llama_model.empty()) {
fprintf(stderr, "Error: Llama model path is required\n");
Expand Down Expand Up @@ -201,6 +212,7 @@ static bool read_wav(const std::string & fname, std::vector<float> & pcmf32, int
return true;
}

#ifdef FRAMEFORGE_WHISPER_SUPPORT
// Transcribe audio using Whisper
static std::string transcribe_audio(whisper_context * wctx, const std::vector<float> & pcmf32, bool verbose) {
if (!wctx) {
Expand Down Expand Up @@ -229,6 +241,7 @@ static std::string transcribe_audio(whisper_context * wctx, const std::vector<fl

return text;
}
#endif

// Classify intent using Llama
static std::string classify_intent(llama_context * lctx, llama_model * model, const std::string & user_input, bool verbose) {
Expand Down Expand Up @@ -329,6 +342,7 @@ int main(int argc, char ** argv) {
fprintf(stderr, "No verb definitions file specified, using hard-coded defaults\n");
}

#ifdef FRAMEFORGE_WHISPER_SUPPORT
// Initialize Whisper
fprintf(stderr, "Loading Whisper model: %s\n", params.whisper_model.c_str());
whisper_context_params cparams = whisper_context_default_params();
Expand All @@ -337,14 +351,20 @@ int main(int argc, char ** argv) {
fprintf(stderr, "Error: Failed to load Whisper model\n");
return 1;
}
#else
fprintf(stderr, "Note: Whisper support not available (not compiled with FRAMEFORGE_WHISPER_SUPPORT)\n");
void * wctx = nullptr; // Placeholder for conditional code paths
Copy link

Copilot AI Jan 1, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Creating a placeholder void pointer for wctx when Whisper support is disabled could lead to maintenance issues. If code accidentally tries to use wctx in non-guarded sections, it could cause undefined behavior. Consider using a more type-safe approach, such as removing the placeholder entirely and ensuring all wctx usage is properly guarded with ifdef blocks.

Suggested change
void * wctx = nullptr; // Placeholder for conditional code paths

Copilot uses AI. Check for mistakes.
#endif

// Initialize Llama
fprintf(stderr, "Loading Llama model: %s\n", params.llama_model.c_str());
llama_model_params model_params = llama_model_default_params();
llama_model * model = llama_model_load_from_file(params.llama_model.c_str(), model_params);
if (!model) {
fprintf(stderr, "Error: Failed to load Llama model\n");
#ifdef FRAMEFORGE_WHISPER_SUPPORT
whisper_free(wctx);
#endif
return 1;
}

Expand All @@ -355,7 +375,9 @@ int main(int argc, char ** argv) {
if (!lctx) {
fprintf(stderr, "Error: Failed to create Llama context\n");
llama_model_free(model);
#ifdef FRAMEFORGE_WHISPER_SUPPORT
whisper_free(wctx);
#endif
return 1;
}

Expand All @@ -366,6 +388,7 @@ int main(int argc, char ** argv) {
if (!params.audio_file.empty()) {
fprintf(stderr, "Processing audio file: %s\n", params.audio_file.c_str());

#ifdef FRAMEFORGE_WHISPER_SUPPORT
std::vector<float> pcmf32;
int sample_rate = 0;
if (!read_wav(params.audio_file, pcmf32, sample_rate)) {
Expand All @@ -378,9 +401,19 @@ int main(int argc, char ** argv) {
fprintf(stderr, "Transcribing audio...\n");
std::string transcription = transcribe_audio(wctx, pcmf32, params.verbose);
fprintf(stderr, "Transcription: %s\n", transcription.c_str());
#else
fprintf(stderr, "Error: Audio transcription requires Whisper support (not compiled)\n");
llama_free(lctx);
llama_model_free(model);
return 1;
#endif

fprintf(stderr, "Classifying intent...\n");
#ifdef FRAMEFORGE_WHISPER_SUPPORT
std::string llm_response = classify_intent(lctx, model, transcription, params.verbose);
#else
std::string llm_response = ""; // Unreachable due to error above
#endif
Comment on lines +414 to +416
Copy link

Copilot AI Jan 1, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This else block contains unreachable dead code. Since the code at lines 405-408 returns early when FRAMEFORGE_WHISPER_SUPPORT is not defined, the subsequent classification code will never execute. Consider removing this else block entirely, or restructure the code to avoid the dead code path.

Copilot uses AI. Check for mistakes.
fprintf(stderr, "LLM Response: %s\n", llm_response.c_str());

// Validate the command
Expand All @@ -398,7 +431,9 @@ int main(int argc, char ** argv) {

llama_free(lctx);
llama_model_free(model);
#ifdef FRAMEFORGE_WHISPER_SUPPORT
whisper_free(wctx);
#endif
return 0;
}

Expand All @@ -410,7 +445,9 @@ int main(int argc, char ** argv) {
fprintf(stderr, "Error: Failed to start IPC server\n");
llama_free(lctx);
llama_model_free(model);
#ifdef FRAMEFORGE_WHISPER_SUPPORT
whisper_free(wctx);
#endif
return 1;
}

Expand All @@ -433,7 +470,9 @@ int main(int argc, char ** argv) {
ipc_server.stop();
llama_free(lctx);
llama_model_free(model);
#ifdef FRAMEFORGE_WHISPER_SUPPORT
whisper_free(wctx);
#endif

return 0;
}
Loading