Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit cd548bd

Browse files
committed
feat: Add support for new mtmd api, add Qwen2.5-VL chat handler
1 parent 0dec788 commit cd548bd

File tree

4 files changed

+554
-124
lines changed

4 files changed

+554
-124
lines changed

CMakeLists.txt

Lines changed: 41 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -143,46 +143,45 @@ if (LLAMA_BUILD)
143143
)
144144
endif()
145145

146-
# if (LLAVA_BUILD)
147-
# if (LLAMA_CUBLAS OR LLAMA_CUDA)
148-
# add_compile_definitions(GGML_USE_CUBLAS)
149-
# add_compile_definitions(GGML_USE_CUDA)
150-
# endif()
151-
#
152-
# if (LLAMA_METAL)
153-
# add_compile_definitions(GGML_USE_METAL)
154-
# endif()
155-
#
156-
# # Building llava
157-
# add_subdirectory(vendor/llama.cpp/tools/mtmd)
158-
# set_target_properties(llava_shared PROPERTIES OUTPUT_NAME "llava")
159-
#
160-
# if (WIN32)
161-
# set_target_properties(llava_shared PROPERTIES CUDA_ARCHITECTURES OFF)
162-
# endif()
163-
# llama_cpp_python_install_target(llava_shared)
164-
# if (WIN32)
165-
# install(
166-
# FILES $<TARGET_RUNTIME_DLLS:llava_shared>
167-
# DESTINATION ${CMAKE_CURRENT_SOURCE_DIR}/llama_cpp/lib
168-
# )
169-
# install(
170-
# FILES $<TARGET_RUNTIME_DLLS:llava_shared>
171-
# DESTINATION ${SKBUILD_PLATLIB_DIR}/llama_cpp/lib
172-
# )
173-
# endif()
174-
#
175-
# # Fix for llava build: Add include directory for llama.h
176-
# # Move these commands after the add_subdirectory call
177-
# target_include_directories(llava PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/vendor/llama.cpp/include)
178-
# target_include_directories(llava PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/vendor/llama.cpp/ggml/include)
179-
#
180-
# if (BUILD_SHARED_LIBS)
181-
# target_include_directories(llava_shared PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/vendor/llama.cpp/include)
182-
# target_include_directories(llava_shared PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/vendor/llama.cpp/ggml/include)
183-
# endif()
184-
#
185-
# target_include_directories(llama-llava-cli PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/vendor/llama.cpp/include)
186-
# target_include_directories(llama-minicpmv-cli PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/vendor/llama.cpp/include)
187-
# endif()
146+
if (LLAVA_BUILD)
147+
if (LLAMA_CUBLAS OR LLAMA_CUDA)
148+
add_compile_definitions(GGML_USE_CUBLAS)
149+
add_compile_definitions(GGML_USE_CUDA)
150+
endif()
151+
152+
if (LLAMA_METAL)
153+
add_compile_definitions(GGML_USE_METAL)
154+
endif()
155+
156+
# Building llava
157+
add_subdirectory(vendor/llama.cpp/tools/mtmd)
158+
159+
if (WIN32)
160+
set_target_properties(mtmd PROPERTIES CUDA_ARCHITECTURES OFF)
161+
endif()
162+
llama_cpp_python_install_target(mtmd)
163+
if (WIN32)
164+
install(
165+
FILES $<TARGET_RUNTIME_DLLS:mtmd>
166+
DESTINATION ${CMAKE_CURRENT_SOURCE_DIR}/llama_cpp/lib
167+
)
168+
install(
169+
FILES $<TARGET_RUNTIME_DLLS:mtmd>
170+
DESTINATION ${SKBUILD_PLATLIB_DIR}/llama_cpp/lib
171+
)
172+
endif()
173+
174+
# Fix for mtmd build: Add include directory for llama.h
175+
# Move these commands after the add_subdirectory call
176+
target_include_directories(mtmd PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/vendor/llama.cpp/include)
177+
target_include_directories(mtmd PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/vendor/llama.cpp/ggml/include)
178+
179+
if (BUILD_SHARED_LIBS)
180+
target_include_directories(mtmd PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/vendor/llama.cpp/include)
181+
target_include_directories(mtmd PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/vendor/llama.cpp/ggml/include)
182+
endif()
183+
184+
# target_include_directories(llama-llava-cli PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/vendor/llama.cpp/include)
185+
# target_include_directories(llama-minicpmv-cli PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/vendor/llama.cpp/include)
186+
endif()
188187
endif()

0 commit comments

Comments
 (0)