@@ -112,32 +112,32 @@ def load_llama_from_model_settings(settings: ModelSettings) -> llama_cpp.Llama:
112
112
chat_handler = llama_cpp .llama_chat_format .Llava16ChatHandler (
113
113
clip_model_path = settings .clip_model_path , verbose = settings .verbose
114
114
)
115
- elif settings .chat_format == "nanollava " :
115
+ elif settings .chat_format == "moondream " :
116
116
assert settings .clip_model_path is not None , "clip model not found"
117
117
if settings .hf_model_repo_id is not None :
118
118
chat_handler = (
119
- llama_cpp .llama_chat_format .NanoLlavaChatHandler .from_pretrained (
119
+ llama_cpp .llama_chat_format .MoondreamChatHanlder .from_pretrained (
120
120
repo_id = settings .hf_model_repo_id ,
121
121
filename = settings .clip_model_path ,
122
122
verbose = settings .verbose ,
123
123
)
124
124
)
125
125
else :
126
- chat_handler = llama_cpp .llama_chat_format .NanoLlavaChatHandler (
126
+ chat_handler = llama_cpp .llama_chat_format .MoondreamChatHanlder (
127
127
clip_model_path = settings .clip_model_path , verbose = settings .verbose
128
128
)
129
- elif settings .chat_format == "moondream " :
129
+ elif settings .chat_format == "nanollava " :
130
130
assert settings .clip_model_path is not None , "clip model not found"
131
131
if settings .hf_model_repo_id is not None :
132
132
chat_handler = (
133
- llama_cpp .llama_chat_format .MoondreamChatHanlder .from_pretrained (
133
+ llama_cpp .llama_chat_format .NanoLlavaChatHandler .from_pretrained (
134
134
repo_id = settings .hf_model_repo_id ,
135
135
filename = settings .clip_model_path ,
136
136
verbose = settings .verbose ,
137
137
)
138
138
)
139
139
else :
140
- chat_handler = llama_cpp .llama_chat_format .MoondreamChatHanlder (
140
+ chat_handler = llama_cpp .llama_chat_format .NanoLlavaChatHandler (
141
141
clip_model_path = settings .clip_model_path , verbose = settings .verbose
142
142
)
143
143
elif settings .chat_format == "hf-autotokenizer" :
0 commit comments