diff --git a/.github/workflows/e2e-aws-custom.yml b/.github/workflows/e2e-aws-custom.yml index 970ffb78f2..7960732902 100644 --- a/.github/workflows/e2e-aws-custom.yml +++ b/.github/workflows/e2e-aws-custom.yml @@ -148,7 +148,7 @@ jobs: nvidia-smi python3.11 -m pip cache remove llama_cpp_python - CMAKE_ARGS="-DGGML_CUDA=on" python3.11 -m pip install -v . + CMAKE_ARGS="-DGGML_CUDA=on" python3.11 -m pip install -v . -c constraints-dev.txt # https://github.com/instructlab/instructlab/issues/1821 # install with Torch and build dependencies installed diff --git a/.github/workflows/e2e-nvidia-l4-x1.yml b/.github/workflows/e2e-nvidia-l4-x1.yml index a4f59abeab..9b2063b3d3 100644 --- a/.github/workflows/e2e-nvidia-l4-x1.yml +++ b/.github/workflows/e2e-nvidia-l4-x1.yml @@ -129,7 +129,7 @@ jobs: nvidia-smi python3.11 -m pip cache remove llama_cpp_python - CMAKE_ARGS="-DGGML_CUDA=on" python3.11 -m pip install -v . + CMAKE_ARGS="-DGGML_CUDA=on" python3.11 -m pip install -v . -c constraints-dev.txt # https://github.com/instructlab/instructlab/issues/1821 # install with Torch and build dependencies installed diff --git a/.github/workflows/e2e-nvidia-l40s-x4-llama.yml b/.github/workflows/e2e-nvidia-l40s-x4-llama.yml index a72ab8049a..cbb075e202 100644 --- a/.github/workflows/e2e-nvidia-l40s-x4-llama.yml +++ b/.github/workflows/e2e-nvidia-l40s-x4-llama.yml @@ -171,7 +171,7 @@ jobs: nvidia-smi python3.11 -m pip cache remove llama_cpp_python - CMAKE_ARGS="-DGGML_CUDA=on" python3.11 -m pip install -v . + CMAKE_ARGS="-DGGML_CUDA=on" python3.11 -m pip install -v . -c constraints-dev.txt # https://github.com/instructlab/instructlab/issues/1821 # install with Torch and build dependencies installed diff --git a/.github/workflows/e2e-nvidia-l40s-x4-py312.yml b/.github/workflows/e2e-nvidia-l40s-x4-py312.yml index d0fce0c663..705a802e52 100644 --- a/.github/workflows/e2e-nvidia-l40s-x4-py312.yml +++ b/.github/workflows/e2e-nvidia-l40s-x4-py312.yml @@ -141,7 +141,7 @@ jobs: nvidia-smi python3.12 -m pip cache remove llama_cpp_python - CMAKE_ARGS="-DGGML_CUDA=on" python3.12 -m pip install -v . + CMAKE_ARGS="-DGGML_CUDA=on" python3.12 -m pip install -v . -c constraints-dev.txt # https://github.com/instructlab/instructlab/issues/1821 # install with Torch and build dependencies installed diff --git a/.github/workflows/e2e-nvidia-l40s-x4.yml b/.github/workflows/e2e-nvidia-l40s-x4.yml index d481820e9d..6c7f3c3dd7 100644 --- a/.github/workflows/e2e-nvidia-l40s-x4.yml +++ b/.github/workflows/e2e-nvidia-l40s-x4.yml @@ -141,7 +141,7 @@ jobs: nvidia-smi python3.11 -m pip cache remove llama_cpp_python - CMAKE_ARGS="-DGGML_CUDA=on" python3.11 -m pip install -v . + CMAKE_ARGS="-DGGML_CUDA=on" python3.11 -m pip install -v . -c constraints-dev.txt # https://github.com/instructlab/instructlab/issues/1821 # install with Torch and build dependencies installed diff --git a/.github/workflows/e2e-nvidia-l40s-x8.yml b/.github/workflows/e2e-nvidia-l40s-x8.yml index 5b69c1282e..a4308f3f3d 100644 --- a/.github/workflows/e2e-nvidia-l40s-x8.yml +++ b/.github/workflows/e2e-nvidia-l40s-x8.yml @@ -221,7 +221,7 @@ jobs: nvidia-smi python3.11 -m pip cache remove llama_cpp_python - CMAKE_ARGS="-DGGML_CUDA=on" python3.11 -m pip install -v . + CMAKE_ARGS="-DGGML_CUDA=on" python3.11 -m pip install -v . -c constraints-dev.txt # https://github.com/instructlab/instructlab/issues/1821 # install with Torch and build dependencies installed diff --git a/.github/workflows/e2e-nvidia-t4-x1.yml b/.github/workflows/e2e-nvidia-t4-x1.yml index c3325f70c0..07b4a9a59f 100644 --- a/.github/workflows/e2e-nvidia-t4-x1.yml +++ b/.github/workflows/e2e-nvidia-t4-x1.yml @@ -136,7 +136,7 @@ jobs: nvidia-smi python3.11 -m pip cache remove llama_cpp_python - CMAKE_ARGS="-DGGML_CUDA=on" python3.11 -m pip install -v . + CMAKE_ARGS="-DGGML_CUDA=on" python3.11 -m pip install -v . -c constraints-dev.txt # https://github.com/instructlab/instructlab/issues/1821 # install with Torch and build dependencies installed