Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit 3641913

Browse files
committed
Merge branch 'main' into configurable-chat-templates
2 parents d545955 + a945404 commit 3641913

File tree

1 file changed

+8
-7
lines changed

1 file changed

+8
-7
lines changed

llama_cpp/llama.py

+8-7
Original file line numberDiff line numberDiff line change
@@ -230,8 +230,8 @@ def __init__(
230230
n_batch: int = 512,
231231
n_threads: Optional[int] = None,
232232
n_threads_batch: Optional[int] = None,
233-
rope_freq_base: float = 10000.0,
234-
rope_freq_scale: float = 1.0,
233+
rope_freq_base: float = 0.0,
234+
rope_freq_scale: float = 0.0,
235235
mul_mat_q: bool = True,
236236
f16_kv: bool = True,
237237
logits_all: bool = False,
@@ -286,7 +286,6 @@ def __init__(
286286
Returns:
287287
A Llama instance.
288288
"""
289-
290289
self.verbose = verbose
291290

292291
self.numa = numa
@@ -324,16 +323,19 @@ def __init__(
324323
self.n_threads_batch = n_threads_batch or max(
325324
multiprocessing.cpu_count() // 2, 1
326325
)
327-
328326
# Context Params
329327
self.context_params = llama_cpp.llama_context_default_params()
330328
self.context_params.seed = seed
331329
self.context_params.n_ctx = n_ctx
332330
self.context_params.n_batch = self.n_batch
333331
self.context_params.n_threads = self.n_threads
334332
self.context_params.n_threads_batch = self.n_threads_batch
335-
self.context_params.rope_freq_base = rope_freq_base
336-
self.context_params.rope_freq_scale = rope_freq_scale
333+
self.context_params.rope_freq_base = (
334+
rope_freq_base if rope_freq_base != 0.0 else 0
335+
)
336+
self.context_params.rope_freq_scale = (
337+
rope_freq_scale if rope_freq_scale != 0.0 else 0
338+
)
337339
self.context_params.mul_mat_q = mul_mat_q
338340
self.context_params.f16_kv = f16_kv
339341
self.context_params.logits_all = logits_all
@@ -342,7 +344,6 @@ def __init__(
342344
# Sampling Params
343345
self.last_n_tokens_size = last_n_tokens_size
344346

345-
346347
self.cache: Optional[BaseLlamaCache] = None
347348

348349
self.lora_base = lora_base

0 commit comments

Comments
 (0)