diff --git a/Makefile b/Makefile index 297938ae4c46..f7b4abb561d1 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ DETECT_LIBS?=true # llama.cpp versions GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be -CPPLLAMA_VERSION?=081fe431aa8fb6307145c4feb3eed4f48cab19f8 +CPPLLAMA_VERSION?=b841d0740855c5af1344a81f261139a45a2b39ee # gpt4all version GPT4ALL_REPO?=https://github.com/nomic-ai/gpt4all diff --git a/backend/cpp/llama/CMakeLists.txt b/backend/cpp/llama/CMakeLists.txt index 031e49643fbb..1c68e60b42e3 100644 --- a/backend/cpp/llama/CMakeLists.txt +++ b/backend/cpp/llama/CMakeLists.txt @@ -75,11 +75,24 @@ add_library(hw_grpc_proto ${hw_proto_hdrs} ) add_executable(${TARGET} grpc-server.cpp utils.hpp json.hpp) -target_link_libraries(${TARGET} PRIVATE common llama myclip ${CMAKE_THREAD_LIBS_INIT} absl::flags hw_grpc_proto - absl::flags_parse - gRPC::${_REFLECTION} - gRPC::${_GRPC_GRPCPP} - protobuf::${_PROTOBUF_LIBPROTOBUF}) + +# Conditionally link SYCL to grpc-server +# https://github.com/ggerganov/llama.cpp/issues/8665 +if ( DEFINED ENV{ONEAPI_ROOT}) + target_link_libraries(${TARGET} PRIVATE common llama myclip ${CMAKE_THREAD_LIBS_INIT} absl::flags hw_grpc_proto + absl::flags_parse + gRPC::${_REFLECTION} + gRPC::${_GRPC_GRPCPP} + protobuf::${_PROTOBUF_LIBPROTOBUF} + sycl) +else() + target_link_libraries(${TARGET} PRIVATE common llama myclip ${CMAKE_THREAD_LIBS_INIT} absl::flags hw_grpc_proto + absl::flags_parse + gRPC::${_REFLECTION} + gRPC::${_GRPC_GRPCPP} + protobuf::${_PROTOBUF_LIBPROTOBUF}) +endif() + target_compile_features(${TARGET} PRIVATE cxx_std_11) if(TARGET BUILD_INFO) add_dependencies(${TARGET} BUILD_INFO) diff --git a/backend/cpp/llama/CMakeLists.txt.rpc-8662 b/backend/cpp/llama/CMakeLists.txt.rpc-8662 new file mode 100644 index 000000000000..82bbedcaa838 --- /dev/null +++ b/backend/cpp/llama/CMakeLists.txt.rpc-8662 @@ -0,0 +1,8 @@ +# https://github.com/ggerganov/llama.cpp/issues/8665 + +add_executable(rpc-server rpc-server.cpp) +if ( DEFINED ENV{ONEAPI_ROOT}) +target_link_libraries(rpc-server PRIVATE ggml llama sycl) +else() +target_link_libraries(rpc-server PRIVATE ggml llama) +endif() \ No newline at end of file diff --git a/backend/cpp/llama/prepare.sh b/backend/cpp/llama/prepare.sh index 6c00f27caa38..c1c94ce6c9d6 100644 --- a/backend/cpp/llama/prepare.sh +++ b/backend/cpp/llama/prepare.sh @@ -17,4 +17,7 @@ cp -rfv llama.cpp/examples/llava/clip.h llama.cpp/examples/grpc-server/clip.h cp -rfv llama.cpp/examples/llava/llava.cpp llama.cpp/examples/grpc-server/llava.cpp echo '#include "llama.h"' > llama.cpp/examples/grpc-server/llava.h cat llama.cpp/examples/llava/llava.h >> llama.cpp/examples/grpc-server/llava.h -cp -rfv llama.cpp/examples/llava/clip.cpp llama.cpp/examples/grpc-server/clip.cpp \ No newline at end of file +cp -rfv llama.cpp/examples/llava/clip.cpp llama.cpp/examples/grpc-server/clip.cpp + +# https://github.com/ggerganov/llama.cpp/issues/8665 +cp -rfv CMakeLists.txt.rpc-8662 llama.cpp/examples/rpc/CMakeLists.txt \ No newline at end of file