Thanks to visit codestin.com Credit goes to github.com
We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent d318cc8 commit 4084aabCopy full SHA for 4084aab
llama_cpp/llama.py
@@ -79,7 +79,7 @@ def __init__(
79
n_threads: Optional[int] = None,
80
n_threads_batch: Optional[int] = None,
81
rope_scaling_type: Optional[int] = llama_cpp.LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED,
82
- pooling_type: int = llama_cpp.LLAMA_POOLING_TYPE_MEAN,
+ pooling_type: int = llama_cpp.LLAMA_POOLING_TYPE_UNSPECIFIED,
83
rope_freq_base: float = 0.0,
84
rope_freq_scale: float = 0.0,
85
yarn_ext_factor: float = -1.0,
0 commit comments