@@ -21,6 +21,15 @@ build.debug:
21
21
--config-settings=cmake.args=" -DCMAKE_BUILD_TYPE=Debug;-DCMAKE_C_FLAGS='-ggdb -O0';-DCMAKE_CXX_FLAGS='-ggdb -O0'" \
22
22
--editable .
23
23
24
+ build.debug.extra :
25
+ python3 -m pip install \
26
+ --verbose \
27
+ --config-settings=cmake.verbose=true \
28
+ --config-settings=logging.level=INFO \
29
+ --config-settings=install.strip=false \
30
+ --config-settings=cmake.args=" -DCMAKE_BUILD_TYPE=Debug;-DCMAKE_C_FLAGS='-fsanitize=address -ggdb -O0';-DCMAKE_CXX_FLAGS='-fsanitize=address -ggdb -O0'" \
31
+ --editable .
32
+
24
33
build.cuda :
25
34
CMAKE_ARGS=" -DGGML_CUDA=on" python3 -m pip install --verbose -e .
26
35
@@ -46,7 +55,7 @@ build.rpc:
46
55
CMAKE_ARGS=" -DGGML_RPC=on" python3 -m pip install --verbose -e .
47
56
48
57
build.sdist :
49
- python3 -m build --sdist
58
+ python3 -m build --sdist --verbose
50
59
51
60
deploy.pypi :
52
61
python3 -m twine upload dist/*
@@ -56,7 +65,7 @@ deploy.gh-docs:
56
65
mkdocs gh-deploy
57
66
58
67
test :
59
- python3 -m pytest
68
+ python3 -m pytest --full-trace -v
60
69
61
70
docker :
62
71
docker build -t llama-cpp-python:latest -f docker/simple/Dockerfile .
@@ -68,11 +77,11 @@ clean:
68
77
- cd vendor/llama.cpp && make clean
69
78
- cd vendor/llama.cpp && rm libllama.so
70
79
- rm -rf _skbuild
71
- - rm llama_cpp/* .so
72
- - rm llama_cpp/* .dylib
73
- - rm llama_cpp/* .metal
74
- - rm llama_cpp/* .dll
75
- - rm llama_cpp/* .lib
80
+ - rm llama_cpp/lib/ * .so
81
+ - rm llama_cpp/lib/ * .dylib
82
+ - rm llama_cpp/lib/ * .metal
83
+ - rm llama_cpp/lib/ * .dll
84
+ - rm llama_cpp/lib/ * .lib
76
85
77
86
.PHONY : \
78
87
update \
0 commit comments