From 854d27f4d8fa59a45fd61fa0fccdbe418d768411 Mon Sep 17 00:00:00 2001 From: Chris Ismael Date: Mon, 18 Sep 2023 20:15:02 -0700 Subject: [PATCH 1/4] Update dockerfile --- docker/cuda_simple/Dockerfile | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/docker/cuda_simple/Dockerfile b/docker/cuda_simple/Dockerfile index a9e51cdc1..409a7436b 100644 --- a/docker/cuda_simple/Dockerfile +++ b/docker/cuda_simple/Dockerfile @@ -11,7 +11,7 @@ RUN apt-get update && apt-get upgrade -y \ libclblast-dev libopenblas-dev \ && mkdir -p /etc/OpenCL/vendors && echo "libnvidia-opencl.so.1" > /etc/OpenCL/vendors/nvidia.icd -COPY . . +# COPY . . # setting build related env vars ENV CUDA_DOCKER_ARCH=all @@ -20,6 +20,14 @@ ENV LLAMA_CUBLAS=1 # Install depencencies RUN python3 -m pip install --upgrade pip pytest cmake scikit-build setuptools fastapi uvicorn sse-starlette pydantic-settings starlette-context +RUN apt install -y git && git clone https://github.com/ismaelc/llama-cpp-python +RUN cd llama-cpp-python && \ + git submodule update --init --recursive && \ + cd vendor/llama.cpp/ && \ + make LLAMA_CUBLAS=1 -j8 && \ + cd ../../ && \ + CMAKE_ARGS="-DLLAMA_CUBLAS=on" FORCE_CMAKE=1 pip install . --no-cache-dir + # Install llama-cpp-python (build with cuda) RUN CMAKE_ARGS="-DLLAMA_CUBLAS=on" pip install llama-cpp-python From a7b3c065a05f2b70b324d4141b042b852367cf22 Mon Sep 17 00:00:00 2001 From: Chris Ismael Date: Mon, 18 Sep 2023 21:13:05 -0700 Subject: [PATCH 2/4] Fix entrypoint --- docker/cuda_simple/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/cuda_simple/Dockerfile b/docker/cuda_simple/Dockerfile index 409a7436b..b13c1ad61 100644 --- a/docker/cuda_simple/Dockerfile +++ b/docker/cuda_simple/Dockerfile @@ -32,4 +32,4 @@ RUN cd llama-cpp-python && \ RUN CMAKE_ARGS="-DLLAMA_CUBLAS=on" pip install llama-cpp-python # Run the server -CMD python3 -m llama_cpp.server +ENTRYPOINT python3 -m llama_cpp.server From 5bb7d54f2a4c17ff3f41689f219314abbf82193d Mon Sep 17 00:00:00 2001 From: Chris Ismael Date: Mon, 18 Sep 2023 22:26:33 -0700 Subject: [PATCH 3/4] Another fix --- docker/cuda_simple/Dockerfile | 3 --- 1 file changed, 3 deletions(-) diff --git a/docker/cuda_simple/Dockerfile b/docker/cuda_simple/Dockerfile index b13c1ad61..cca2c26d3 100644 --- a/docker/cuda_simple/Dockerfile +++ b/docker/cuda_simple/Dockerfile @@ -28,8 +28,5 @@ RUN cd llama-cpp-python && \ cd ../../ && \ CMAKE_ARGS="-DLLAMA_CUBLAS=on" FORCE_CMAKE=1 pip install . --no-cache-dir -# Install llama-cpp-python (build with cuda) -RUN CMAKE_ARGS="-DLLAMA_CUBLAS=on" pip install llama-cpp-python - # Run the server ENTRYPOINT python3 -m llama_cpp.server From 076490abb19e475144f405c4f37156c0bddd132c Mon Sep 17 00:00:00 2001 From: Chris Ismael Date: Mon, 18 Sep 2023 23:48:52 -0700 Subject: [PATCH 4/4] Update --- docker/cuda_simple/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/cuda_simple/Dockerfile b/docker/cuda_simple/Dockerfile index cca2c26d3..75f47fea0 100644 --- a/docker/cuda_simple/Dockerfile +++ b/docker/cuda_simple/Dockerfile @@ -20,7 +20,7 @@ ENV LLAMA_CUBLAS=1 # Install depencencies RUN python3 -m pip install --upgrade pip pytest cmake scikit-build setuptools fastapi uvicorn sse-starlette pydantic-settings starlette-context -RUN apt install -y git && git clone https://github.com/ismaelc/llama-cpp-python +RUN apt install -y git && git clone https://github.com/billcai/llama-cpp-python RUN cd llama-cpp-python && \ git submodule update --init --recursive && \ cd vendor/llama.cpp/ && \