Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit 1a55417

Browse files
committed
fix: Update LLAMA_ flags to GGML_ flags
1 parent 218d361 commit 1a55417

File tree

1 file changed

+6
-6
lines changed

1 file changed

+6
-6
lines changed

CMakeLists.txt

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -59,14 +59,14 @@ if (LLAMA_BUILD)
5959
if (APPLE AND NOT CMAKE_SYSTEM_PROCESSOR MATCHES "arm64")
6060
# Need to disable these llama.cpp flags on Apple x86_64,
6161
# otherwise users may encounter invalid instruction errors
62-
set(LLAMA_AVX "Off" CACHE BOOL "llama: enable AVX" FORCE)
63-
set(LLAMA_AVX2 "Off" CACHE BOOL "llama: enable AVX2" FORCE)
64-
set(LLAMA_FMA "Off" CACHE BOOL "llama: enable FMA" FORCE)
65-
set(LLAMA_F16C "Off" CACHE BOOL "llama: enable F16C" FORCE)
62+
set(GGML_AVX "Off" CACHE BOOL "ggml: enable AVX" FORCE)
63+
set(GGML_AVX2 "Off" CACHE BOOL "ggml: enable AVX2" FORCE)
64+
set(GGML_FMA "Off" CACHE BOOL "gml: enable FMA" FORCE)
65+
set(GGML_F16C "Off" CACHE BOOL "gml: enable F16C" FORCE)
6666
endif()
6767

6868
if (APPLE)
69-
set(LLAMA_METAL_EMBED_LIBRARY "On" CACHE BOOL "llama: embed metal library" FORCE)
69+
set(GGML_METAL_EMBED_LIBRARY "On" CACHE BOOL "llama: embed metal library" FORCE)
7070
endif()
7171

7272
add_subdirectory(vendor/llama.cpp)
@@ -122,4 +122,4 @@ if (LLAMA_BUILD)
122122
)
123123
endif()
124124
endif()
125-
endif()
125+
endif()

0 commit comments

Comments
 (0)