Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit c854c25

Browse files
committed
Don't serialize stateful parameters
1 parent 2f9b649 commit c854c25

File tree

1 file changed

+0
-5
lines changed

1 file changed

+0
-5
lines changed

llama_cpp/llama.py

-5
Original file line numberDiff line numberDiff line change
@@ -763,8 +763,6 @@ def __getstate__(self):
763763
use_mlock=self.params.use_mlock,
764764
embedding=self.params.embedding,
765765
last_n_tokens_size=self.last_n_tokens_size,
766-
last_n_tokens_data=self.last_n_tokens_data,
767-
tokens_consumed=self.tokens_consumed,
768766
n_batch=self.n_batch,
769767
n_threads=self.n_threads,
770768
)
@@ -786,9 +784,6 @@ def __setstate__(self, state):
786784
last_n_tokens_size=state["last_n_tokens_size"],
787785
verbose=state["verbose"],
788786
)
789-
self.last_n_tokens_data = state["last_n_tokens_data"]
790-
self.tokens_consumed = state["tokens_consumed"]
791-
792787

793788
@staticmethod
794789
def token_eos() -> llama_cpp.llama_token:

0 commit comments

Comments
 (0)