Thanks to visit codestin.com Credit goes to github.com
We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent 2f9b649 commit c854c25Copy full SHA for c854c25
llama_cpp/llama.py
@@ -763,8 +763,6 @@ def __getstate__(self):
763
use_mlock=self.params.use_mlock,
764
embedding=self.params.embedding,
765
last_n_tokens_size=self.last_n_tokens_size,
766
- last_n_tokens_data=self.last_n_tokens_data,
767
- tokens_consumed=self.tokens_consumed,
768
n_batch=self.n_batch,
769
n_threads=self.n_threads,
770
)
@@ -786,9 +784,6 @@ def __setstate__(self, state):
786
784
last_n_tokens_size=state["last_n_tokens_size"],
787
785
verbose=state["verbose"],
788
789
- self.last_n_tokens_data = state["last_n_tokens_data"]
790
- self.tokens_consumed = state["tokens_consumed"]
791
-
792
793
@staticmethod
794
def token_eos() -> llama_cpp.llama_token:
0 commit comments