Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit c68b570

Browse files
piDackarthw
authored andcommitted
llama : use F32 precision in GLM4 attention and no FA (ggml-org#9130)
1 parent bc65fcf commit c68b570

File tree

1 file changed

+1
-1
lines changed

1 file changed

+1
-1
lines changed

src/llama.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8889,7 +8889,7 @@ static struct ggml_tensor * llm_build_kqv(
88898889
struct ggml_tensor * kq = ggml_mul_mat(ctx, k, q);
88908890
cb(kq, "kq", il);
88918891

8892-
if (model.arch == LLM_ARCH_PHI2 || model.arch == LLM_ARCH_PHI3 || model.arch == LLM_ARCH_GPTNEOX || model.arch == LLM_ARCH_QWEN2 || model.arch == LLM_ARCH_NEMOTRON) {
8892+
if (model.arch == LLM_ARCH_PHI2 || model.arch == LLM_ARCH_PHI3 || model.arch == LLM_ARCH_GPTNEOX || model.arch == LLM_ARCH_QWEN2 || model.arch == LLM_ARCH_NEMOTRON || model.arch == LLM_ARCH_CHATGLM) {
88938893
// for this arch, we need to perform the KQ multiplication with F32 precision, otherwise we get NaNs
88948894
// ref: https://github.com/ggerganov/llama.cpp/pull/4490#issuecomment-1859055847
88958895
ggml_mul_mat_set_prec(kq, GGML_PREC_F32);

0 commit comments

Comments
 (0)