From a82e3a4d9206ee1ff3ea672abd04a52d9a21b7d0 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 15 Aug 2023 08:51:07 +0300 Subject: [PATCH 01/29] llama : style formatting + remove helper methods --- ggml.h | 10 +-- gguf-llama.cpp | 172 +++++++++++++++++++------------------------------ gguf-util.h | 12 ++-- 3 files changed, 79 insertions(+), 115 deletions(-) diff --git a/ggml.h b/ggml.h index fb3db10e2cedb..9a9c7ab391f02 100644 --- a/ggml.h +++ b/ggml.h @@ -1744,12 +1744,12 @@ extern "C" { GGML_API size_t gguf_get_data_offset(struct gguf_context * ctx); GGML_API void * gguf_get_data (struct gguf_context * ctx); - GGML_API int gguf_get_n_kv(struct gguf_context * ctx); - GGML_API int gguf_find_key(struct gguf_context * ctx, const char * key); - GGML_API const char * gguf_get_key (struct gguf_context * ctx, int i); + GGML_API int gguf_get_n_kv(struct gguf_context * ctx); + GGML_API int gguf_find_key(struct gguf_context * ctx, const char * key); + GGML_API const char * gguf_get_key (struct gguf_context * ctx, int i); + GGML_API enum gguf_type gguf_get_kv_type (struct gguf_context * ctx, int i); - GGML_API enum gguf_type gguf_get_arr_type (struct gguf_context * ctx, int i); - GGML_API void gguf_get_val (struct gguf_context * ctx, int i, void * val); + GGML_API enum gguf_type gguf_get_arr_type(struct gguf_context * ctx, int i); GGML_API const char * gguf_get_arr_str(struct gguf_context * ctx, int key_id, int i); GGML_API float gguf_get_arr_f32(struct gguf_context * ctx, int key_id, int i); diff --git a/gguf-llama.cpp b/gguf-llama.cpp index e36d8e77cfc4d..de937958b34dd 100644 --- a/gguf-llama.cpp +++ b/gguf-llama.cpp @@ -510,22 +510,9 @@ struct llama_state { // global state static llama_state g_state; -template -static T checked_mul(T a, T b) { - T ret = a * b; - if (a != 0 && ret / a != b) { - throw std::runtime_error(format("overflow multiplying %llu * %llu", - (unsigned long long) a, (unsigned long long) b)); - } - return ret; -} - -static size_t checked_div(size_t a, size_t b) { - if (b == 0 || a % b != 0) { - throw std::runtime_error(format("error dividing %zu / %zu", a, b)); - } - return a / b; -} +// +// model loading and saving +// static std::string llama_format_tensor_shape(const std::vector & ne) { char buf[256]; @@ -536,14 +523,6 @@ static std::string llama_format_tensor_shape(const std::vector & ne) { return buf; } -static size_t llama_calc_tensor_size(const std::vector & ne, enum ggml_type type) { - size_t size = ggml_type_size(type); - for (uint32_t dim : ne) { - size = checked_mul(size, dim); - } - return size / ggml_blck_size(type); -} - struct gguf_load_tensor { std::string name; enum ggml_type type = GGML_TYPE_F32; @@ -573,20 +552,19 @@ struct gguf_file_loader { struct ggml_context * ctx_data = NULL; - gguf_file_loader(const char * fname, gguf_load_tensors_map & tensors_map) - : file(fname, "rb") { + gguf_file_loader(const char * fname, gguf_load_tensors_map & tensors_map) : file(fname, "rb") { fprintf(stderr, "llama.cpp: loading model from %s\n", fname); - struct gguf_init_params params = { - /*.no_alloc = */ true, - /*.ctx = */ &ctx_data, - }; + struct gguf_init_params params = { + /*.no_alloc = */ true, + /*.ctx = */ &ctx_data, + }; - gguf_ctx = gguf_init_from_file(fname, params); - file_version = (enum gguf_file_version) gguf_get_version(gguf_ctx); + gguf_ctx = gguf_init_from_file(fname, params); + file_version = (enum gguf_file_version) gguf_get_version(gguf_ctx); - read_hparams(); - read_vocab(); + read_hparams(); + read_vocab(); read_tensor_metadata(tensors_map); } @@ -637,18 +615,18 @@ struct gguf_file_loader { void read_vocab() { vocab.id_to_token.resize(hparams.n_vocab); - int token_idx = gguf_find_key(gguf_ctx, "tokenizer.ggml.tokens"); + + const int token_idx = gguf_find_key(gguf_ctx, "tokenizer.ggml.tokens"); if (token_idx == -1) { throw std::runtime_error("cannot find token list in GGUF file\n"); } - int score_idx = gguf_find_key(gguf_ctx, "tokenizer.ggml.scores"); + const int score_idx = gguf_find_key(gguf_ctx, "tokenizer.ggml.scores"); if (score_idx == -1) { throw std::runtime_error("cannot find token scores list in GGUF file\n"); } for (uint32_t i = 0; i < hparams.n_vocab; i++) { - std::string word = gguf_get_arr_str(gguf_ctx, token_idx, i); vocab.token_to_id[word] = i; @@ -702,7 +680,7 @@ struct gguf_file_loader { tensor.file_off = gguf_get_data_offset(gguf_ctx) + gguf_get_tensor_offset(gguf_ctx, i); tensor.name = name; - tensor.size = llama_calc_tensor_size(tensor.ne, tensor.type); + tensor.size = ggml_nbytes(cur); tensors_map.tensors.push_back(tensor); tensors_map.name_to_idx[name] = tensors_map.tensors.size() - 1; @@ -787,7 +765,7 @@ struct gguf_file_saver { gguf_type arr_type; int n_arr; - switch(vtype) { + switch (vtype) { case GGUF_TYPE_BOOL: bool_val = gguf_get_val_bool(fl->gguf_ctx, i); file.write_val(key, GGUF_TYPE_BOOL, bool_val); @@ -810,7 +788,7 @@ struct gguf_file_saver { break; case GGUF_TYPE_STRING: str_val = gguf_get_val_str(fl->gguf_ctx, i); - file.write_val(key, GGUF_TYPE_STRING, str_val); + file.write_str(key, GGUF_TYPE_STRING, str_val); break; case GGUF_TYPE_UINT16: u16_val = gguf_get_val_u16(fl->gguf_ctx, i); @@ -826,7 +804,7 @@ struct gguf_file_saver { break; case GGUF_TYPE_ARRAY: arr_type = gguf_get_arr_type(fl->gguf_ctx, i); - n_arr = gguf_get_arr_n(fl->gguf_ctx, i); + n_arr = gguf_get_arr_n (fl->gguf_ctx, i); if (arr_type == GGUF_TYPE_FLOAT32) { write_hparam_arr_f32(key, arr_type, i, n_arr); } else if (arr_type == GGUF_TYPE_STRING) { @@ -923,20 +901,6 @@ struct llama_model_loader { } } - struct ggml_tensor * get_tensor(const std::string & name, const std::vector & ne, ggml_backend backend) { - auto it = tensors_map.name_to_idx.find(name); - if (it == tensors_map.name_to_idx.end()) { - throw std::runtime_error(std::runtime_error(format("llama.cpp: tensor '%s' is missing from model", name.c_str()))); - } - gguf_load_tensor & lt = tensors_map.tensors.at(it->second); - if (lt.ne != ne) { - throw std::runtime_error(format("llama.cpp: tensor '%s' has wrong shape; expected %s, got %s", - name.c_str(), llama_format_tensor_shape(ne).c_str(), llama_format_tensor_shape(lt.ne).c_str())); - } - - return get_tensor_for(lt, backend); - } - struct ggml_tensor * get_tensor_for(gguf_load_tensor & lt, ggml_backend backend) { struct ggml_tensor * tensor; if (backend != GGML_BACKEND_CPU) { @@ -960,16 +924,41 @@ struct llama_model_loader { return tensor; } + struct ggml_tensor * get_tensor(const std::string & name, const std::vector & ne, ggml_backend backend) { + auto it = tensors_map.name_to_idx.find(name); + if (it == tensors_map.name_to_idx.end()) { + throw std::runtime_error(std::runtime_error(format("llama.cpp: tensor '%s' is missing from model", name.c_str()))); + } + gguf_load_tensor & lt = tensors_map.tensors.at(it->second); + if (lt.ne != ne) { + throw std::runtime_error(format("llama.cpp: tensor '%s' has wrong shape; expected %s, got %s", + name.c_str(), llama_format_tensor_shape(ne).c_str(), llama_format_tensor_shape(lt.ne).c_str())); + } + + return get_tensor_for(lt, backend); + } + void done_getting_tensors() const { if (num_ggml_tensors_created != tensors_map.tensors.size()) { throw std::runtime_error(std::string("llama.cpp: file contained more tensors than expected")); } } - void load_all_data(llama_progress_callback progress_callback, void * progress_callback_user_data, gguf_mlock * lmlock) { - size_t data_size = 0; + void load_data_for(gguf_load_tensor & lt) const { + if (use_mmap) { + lt.data = (uint8_t *) mapping->addr + lt.file_off; + } else { + gguf_file & file = file_loader->file; + file.seek(lt.file_off, SEEK_SET); + file.read_raw(lt.data, lt.size); + } + } + + void load_all_data(llama_progress_callback progress_callback, void * progress_callback_user_data, gguf_mlock * lmlock) { + size_t data_size = 0; size_t prefetch_size = 0; - size_t lock_size = 0; + size_t lock_size = 0; + for (const gguf_load_tensor & lt : tensors_map.tensors) { data_size += lt.size; if (lt.ggml_tensor->backend == GGML_BACKEND_CPU) { @@ -1031,31 +1020,6 @@ struct llama_model_loader { done_size += lt.size; } } - - void load_data_for(gguf_load_tensor & lt) { - if (use_mmap) { - lt.data = (uint8_t *) mapping->addr + lt.file_off; - } else { - gguf_file & file = file_loader->file; - file.seek(lt.file_off, SEEK_SET); - file.read_raw(lt.data, lt.size); - } - - if (0) { - print_checksum(lt); - } - } - - static void print_checksum(gguf_load_tensor & lt) { - uint32_t sum = 0; - for (size_t i = 0; i < lt.size; i++) { - uint8_t byte = lt.data[i]; - sum = byte + (sum << 6) + (sum << 16) - sum; // sdbm hash - } - fprintf(stderr, "%s checksum: %#08x (%s, size %zu)\n", lt.name.c_str(), sum, - llama_format_tensor_shape(lt.ne).c_str(), lt.size); - } - }; // @@ -1185,18 +1149,18 @@ int64_t llama_time_us() { } // -// model loading +// load LLaMA models // -static const char *gguf_file_version_name(gguf_file_version version) { +static const char * gguf_file_version_name(gguf_file_version version) { switch (version) { case GGUF_FILE_VERSION_V1: return "GGUF V1 (latest)"; - } + } return "unknown"; } -static const char *llama_ftype_name(enum llama_ftype ftype) { +static const char * llama_ftype_name(enum llama_ftype ftype) { switch (ftype) { case LLAMA_FTYPE_ALL_F32: return "all F32"; case LLAMA_FTYPE_MOSTLY_F16: return "mostly F16"; @@ -1207,8 +1171,9 @@ static const char *llama_ftype_name(enum llama_ftype ftype) { case LLAMA_FTYPE_MOSTLY_Q5_0: return "mostly Q5_0"; case LLAMA_FTYPE_MOSTLY_Q5_1: return "mostly Q5_1"; case LLAMA_FTYPE_MOSTLY_Q8_0: return "mostly Q8_0"; + // K-quants - case LLAMA_FTYPE_MOSTLY_Q2_K: return "mostly Q2_K"; + case LLAMA_FTYPE_MOSTLY_Q2_K: return "mostly Q2_K"; case LLAMA_FTYPE_MOSTLY_Q3_K_S: return "mostly Q3_K - Small"; case LLAMA_FTYPE_MOSTLY_Q3_K_M: return "mostly Q3_K - Medium"; case LLAMA_FTYPE_MOSTLY_Q3_K_L: return "mostly Q3_K - Large"; @@ -1216,15 +1181,16 @@ static const char *llama_ftype_name(enum llama_ftype ftype) { case LLAMA_FTYPE_MOSTLY_Q4_K_M: return "mostly Q4_K - Medium"; case LLAMA_FTYPE_MOSTLY_Q5_K_S: return "mostly Q5_K - Small"; case LLAMA_FTYPE_MOSTLY_Q5_K_M: return "mostly Q5_K - Medium"; - case LLAMA_FTYPE_MOSTLY_Q6_K: return "mostly Q6_K"; - default: return "unknown, may not work"; + case LLAMA_FTYPE_MOSTLY_Q6_K: return "mostly Q6_K"; + + default: return "unknown, may not work"; } } -static const char *llama_model_type_name(e_model type) { +static const char * llama_model_type_name(e_model type) { switch (type) { - case MODEL_3B: return "3B"; - case MODEL_7B: return "7B"; + case MODEL_3B: return "3B"; + case MODEL_7B: return "7B"; case MODEL_13B: return "13B"; case MODEL_30B: return "30B"; case MODEL_65B: return "65B"; @@ -1605,7 +1571,6 @@ static struct ggml_cgraph * llama_build_graph( const int64_t n_embd_head = hparams.n_embd_head(); const int64_t n_embd_gqa = hparams.n_embd_gqa(); - GGML_ASSERT(n_embd_head == hparams.n_rot); const float freq_base = hparams.rope_freq_base; @@ -1714,7 +1679,7 @@ static struct ggml_cgraph * llama_build_graph( struct ggml_tensor * inpSA = inpL; - lctx.use_buf(ctx0, 0); + llama_context::use_buf(ctx0, 0); // norm { @@ -1853,7 +1818,7 @@ static struct ggml_cgraph * llama_build_graph( ggml_set_name(cur, "result_wo"); } - lctx.use_buf(ctx0, 1); + llama_context::use_buf(ctx0, 1); struct ggml_tensor * inpFF = ggml_add(ctx0, cur, inpSA); offload_func(inpFF); @@ -1909,7 +1874,7 @@ static struct ggml_cgraph * llama_build_graph( inpL = cur; } - lctx.use_buf(ctx0, 0); + llama_context::use_buf(ctx0, 0); // norm { @@ -1927,7 +1892,7 @@ static struct ggml_cgraph * llama_build_graph( cur = ggml_mul_mat(ctx0, model.output, cur); ggml_set_name(cur, "result_output"); - lctx.use_buf(ctx0, -1); + llama_context::use_buf(ctx0, -1); // logits -> probs //cur = ggml_soft_max_inplace(ctx0, cur); @@ -2997,9 +2962,8 @@ void llama_sample_grammar(struct llama_context * ctx, llama_token_data_array * c } } - const auto rejects = - llama_grammar_reject_candidates(grammar->rules, grammar->stacks, candidates_grammar); - for (auto & reject : rejects) { + const auto rejects = llama_grammar_reject_candidates(grammar->rules, grammar->stacks, candidates_grammar); + for (const auto & reject : rejects) { candidates->data[reject.index].logit = -INFINITY; } @@ -3726,7 +3690,7 @@ void llama_free(struct llama_context * ctx) { int llama_model_quantize( const char * fname_inp, const char * fname_out, - const llama_model_quantize_params *params) { + const llama_model_quantize_params * params) { try { llama_model_quantize_internal(fname_inp, fname_out, params); return 0; @@ -4344,8 +4308,7 @@ static bool llama_load_session_file_internal(struct llama_context * ctx, const c GGML_UNUSED(n_token_capacity); GGML_UNUSED(n_token_count_out); - -// TODO: implement with GGUF format + // TODO: implement with GGUF format return true; } @@ -4390,7 +4353,6 @@ int llama_eval( return 0; } - int llama_eval_embd( struct llama_context * ctx, const float * embd, diff --git a/gguf-util.h b/gguf-util.h index d8557d94f114d..b6a20cf5be4e5 100644 --- a/gguf-util.h +++ b/gguf-util.h @@ -122,9 +122,10 @@ struct gguf_file { template void write_val(const std::string & key, enum gguf_type type, const T & val) { + static_assert(std::is_fundamental::value, "T must be a primitive type"); write_str(key); fwrite((const char *) &type, sizeof(type), 1, fp); - fwrite((const char *) &val, sizeof(val), 1, fp); + fwrite((const char *) &val, sizeof(val), 1, fp); } template @@ -137,7 +138,7 @@ struct gguf_file { const int32_t n = val.size(); fwrite((const char *) &type, sizeof(type), 1, fp); - fwrite((const char *) &n, sizeof(n), 1, fp); + fwrite((const char *) &n, sizeof(n), 1, fp); fwrite(val.data(), sizeof(T), n, fp); } @@ -159,7 +160,7 @@ struct gguf_file { const int32_t n = val.size(); fwrite((const char *) &type, sizeof(type), 1, fp); - fwrite((const char *) &n, sizeof(n), 1, fp); + fwrite((const char *) &n, sizeof(n), 1, fp); for (int i = 0; i < n; ++i) { const int32_t nstr = val[i].size(); fwrite((const char *) &nstr, sizeof(nstr), 1, fp); @@ -265,7 +266,7 @@ struct gguf_mmap { #elif defined(_WIN32) static constexpr bool SUPPORTED = true; - gguf_mmap(struct llama_file * file, bool prefetch = true, bool numa = false) { + gguf_mmap(struct gguf_file * file, bool prefetch = true, bool numa = false) { (void) numa; size = file->size; @@ -312,7 +313,8 @@ struct gguf_mmap { #else static constexpr bool SUPPORTED = false; - gguf_mmap(struct llama_file *, bool prefetch = true, bool numa = false) { + gguf_mmap(struct gguf_file * file, bool prefetch = true, bool numa = false) { + (void) file; (void) prefetch; (void) numa; From 66ce19aecb711ad7fc994e083a02a6f773d095ee Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 15 Aug 2023 10:55:42 +0300 Subject: [PATCH 02/29] llama : fix quantization using gguf tool --- gguf-llama.cpp | 2 +- gguf-util.h | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/gguf-llama.cpp b/gguf-llama.cpp index de937958b34dd..5df684c19ad72 100644 --- a/gguf-llama.cpp +++ b/gguf-llama.cpp @@ -730,7 +730,7 @@ struct gguf_file_saver { data[j] = val; } - file.write_arr(key, type, data); + file.write_arr(key, type, data); } void write_hparam_arr_f32(const std::string & key, enum gguf_type type, int i, int n_arr) { diff --git a/gguf-util.h b/gguf-util.h index b6a20cf5be4e5..c22a14e9c3fc8 100644 --- a/gguf-util.h +++ b/gguf-util.h @@ -130,6 +130,7 @@ struct gguf_file { template void write_arr(const std::string & key, enum gguf_type type, const std::vector & val) { + static_assert(std::is_fundamental::value, "T must be a primitive type"); write_str(key); { const enum gguf_type tarr = GGUF_TYPE_ARRAY; @@ -151,7 +152,7 @@ struct gguf_file { fwrite(val.c_str(), n, 1, fp); } - void write_str(const std::string & key, enum gguf_type type, const std::vector & val) { + void write_arr(const std::string & key, enum gguf_type type, const std::vector & val) { write_str(key); { const enum gguf_type tarr = GGUF_TYPE_ARRAY; From c9c0b758d4e3c5a03e96c9094c766c8b863ab177 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 15 Aug 2023 11:09:26 +0300 Subject: [PATCH 03/29] llama : simplify gguf_file_saver --- examples/gguf/gguf-llama-simple.cpp | 7 ++- gguf-llama.cpp | 89 +++++++++-------------------- 2 files changed, 31 insertions(+), 65 deletions(-) diff --git a/examples/gguf/gguf-llama-simple.cpp b/examples/gguf/gguf-llama-simple.cpp index 0679240d31317..e59d1cfc19d74 100644 --- a/examples/gguf/gguf-llama-simple.cpp +++ b/examples/gguf/gguf-llama-simple.cpp @@ -74,7 +74,9 @@ int main(int argc, char ** argv) { // tokens (see "infinite text generation via context swapping" in the main example), but in this minimalist // example, we will just stop the loop once this cache is full or once an end of stream is detected. - while (llama_get_kv_cache_token_count(ctx) < max_context_size) { + const int n_gen = std::min(32, max_context_size); + + while (llama_get_kv_cache_token_count(ctx) < n_gen) { // evaluate the transformer if (llama_eval(ctx, tokens_list.data(), int(tokens_list.size()), llama_get_kv_cache_token_count(ctx), params.n_threads)) { @@ -114,7 +116,6 @@ int main(int argc, char ** argv) { // push this new token for next evaluation tokens_list.push_back(new_token_id); - } llama_free(ctx); @@ -122,5 +123,7 @@ int main(int argc, char ** argv) { llama_backend_free(); + fprintf(stderr, "\n\n"); + return 0; } diff --git a/gguf-llama.cpp b/gguf-llama.cpp index 5df684c19ad72..19367385119a2 100644 --- a/gguf-llama.cpp +++ b/gguf-llama.cpp @@ -701,11 +701,11 @@ struct gguf_file_saver { size_t info_offset; size_t tensor_offset = 0; - gguf_file_saver(const char * fname, gguf_file_loader * fl, enum llama_ftype new_ftype) + gguf_file_saver(const char * fname, gguf_file_loader * fl) : file(fname, "wb"), fl(fl) { fprintf(stderr, "llama.cpp: saving model to %s\n", fname); write_header(); - write_hparams(new_ftype); + write_kv(); } void write_header() { @@ -744,75 +744,38 @@ struct gguf_file_saver { file.write_arr(key, type, data); } - void write_hparams(enum llama_ftype new_ftype) { + // re-write the key-value section from the loaded file + void write_kv() { const int32_t n_kv = gguf_get_n_kv(fl->gguf_ctx); for (int i = 0; i < n_kv; ++i) { const char * key = gguf_get_key(fl->gguf_ctx, i); if (strcmp(key, "general.quantization_version") == 0) { - file.write_val("general.quantization_version", GGUF_TYPE_UINT32, new_ftype); + file.write_val("general.quantization_version", GGUF_TYPE_UINT32, GGML_QNT_VERSION); } else { const gguf_type vtype = gguf_get_kv_type(fl->gguf_ctx, i); - bool bool_val; - float f32_val; - int16_t i16_val; - int32_t i32_val; - int8_t i8_val; - std::string str_val; - uint16_t u16_val; - uint32_t u32_val; - uint8_t u8_val; - gguf_type arr_type; - int n_arr; - switch (vtype) { - case GGUF_TYPE_BOOL: - bool_val = gguf_get_val_bool(fl->gguf_ctx, i); - file.write_val(key, GGUF_TYPE_BOOL, bool_val); - break; - case GGUF_TYPE_FLOAT32: - f32_val = gguf_get_val_f32(fl->gguf_ctx, i); - file.write_val(key, GGUF_TYPE_FLOAT32, f32_val); - break; - case GGUF_TYPE_INT16: - i16_val = gguf_get_val_i16(fl->gguf_ctx, i); - file.write_val(key, GGUF_TYPE_INT16, i16_val); - break; - case GGUF_TYPE_INT32: - i32_val = gguf_get_val_i32(fl->gguf_ctx, i); - file.write_val(key, GGUF_TYPE_INT32, i32_val); - break; - case GGUF_TYPE_INT8: - i8_val = gguf_get_val_i8(fl->gguf_ctx, i); - file.write_val(key, GGUF_TYPE_INT8, i8_val); - break; - case GGUF_TYPE_STRING: - str_val = gguf_get_val_str(fl->gguf_ctx, i); - file.write_str(key, GGUF_TYPE_STRING, str_val); - break; - case GGUF_TYPE_UINT16: - u16_val = gguf_get_val_u16(fl->gguf_ctx, i); - file.write_val(key, GGUF_TYPE_UINT16, u16_val); - break; - case GGUF_TYPE_UINT32: - u32_val = gguf_get_val_u32(fl->gguf_ctx, i); - file.write_val(key, GGUF_TYPE_UINT32, u32_val); - break; - case GGUF_TYPE_UINT8: - u8_val = gguf_get_val_u8(fl->gguf_ctx, i); - file.write_val(key, GGUF_TYPE_UINT8, u8_val); - break; + case GGUF_TYPE_BOOL: file.write_val (key, GGUF_TYPE_BOOL, gguf_get_val_bool(fl->gguf_ctx, i)); break; + case GGUF_TYPE_FLOAT32: file.write_val (key, GGUF_TYPE_FLOAT32, gguf_get_val_f32 (fl->gguf_ctx, i)); break; + case GGUF_TYPE_INT16: file.write_val (key, GGUF_TYPE_INT16, gguf_get_val_i16 (fl->gguf_ctx, i)); break; + case GGUF_TYPE_INT32: file.write_val (key, GGUF_TYPE_INT32, gguf_get_val_i32 (fl->gguf_ctx, i)); break; + case GGUF_TYPE_INT8: file.write_val (key, GGUF_TYPE_INT8, gguf_get_val_i8 (fl->gguf_ctx, i)); break; + case GGUF_TYPE_STRING: file.write_str (key, GGUF_TYPE_STRING, gguf_get_val_str (fl->gguf_ctx, i)); break; + case GGUF_TYPE_UINT16: file.write_val(key, GGUF_TYPE_UINT16, gguf_get_val_u16 (fl->gguf_ctx, i)); break; + case GGUF_TYPE_UINT32: file.write_val(key, GGUF_TYPE_UINT32, gguf_get_val_u32 (fl->gguf_ctx, i)); break; + case GGUF_TYPE_UINT8: file.write_val (key, GGUF_TYPE_UINT8, gguf_get_val_u8 (fl->gguf_ctx, i)); break; case GGUF_TYPE_ARRAY: - arr_type = gguf_get_arr_type(fl->gguf_ctx, i); - n_arr = gguf_get_arr_n (fl->gguf_ctx, i); - if (arr_type == GGUF_TYPE_FLOAT32) { - write_hparam_arr_f32(key, arr_type, i, n_arr); - } else if (arr_type == GGUF_TYPE_STRING) { - write_hparam_arr_str(key, GGUF_TYPE_STRING, i, n_arr); - } else { - throw std::runtime_error("not implemented"); - } - break; + { + const gguf_type arr_type = gguf_get_arr_type(fl->gguf_ctx, i); + const int n_arr = gguf_get_arr_n (fl->gguf_ctx, i); + if (arr_type == GGUF_TYPE_FLOAT32) { + write_hparam_arr_f32(key, arr_type, i, n_arr); + } else if (arr_type == GGUF_TYPE_STRING) { + write_hparam_arr_str(key, arr_type, i, n_arr); + } else { + throw std::runtime_error("not implemented"); + } + } break; default: throw std::runtime_error(format("cannot recognize value type for key %s\n", key)); } @@ -3264,7 +3227,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s } std::unique_ptr model_loader(new llama_model_loader(fname_inp, /*use_mmap*/ false)); - gguf_file_saver file_saver(fname_out.c_str(), model_loader->file_loader.get(), params->ftype); + gguf_file_saver file_saver(fname_out.c_str(), model_loader->file_loader.get()); #ifdef GGML_USE_K_QUANTS int n_attention_wv = 0; From 6e29ed52fbab48f53655a5e9ea0b5dc92aab4965 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 15 Aug 2023 11:10:26 +0300 Subject: [PATCH 04/29] llama : fix method names --- gguf-llama.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/gguf-llama.cpp b/gguf-llama.cpp index 19367385119a2..dc97e3bcef315 100644 --- a/gguf-llama.cpp +++ b/gguf-llama.cpp @@ -722,7 +722,7 @@ struct gguf_file_saver { file.write_i32(n_kv); } - void write_hparam_arr_str(const std::string & key, enum gguf_type type, int i, int n_arr) { + void write_kv_arr_str(const std::string & key, enum gguf_type type, int i, int n_arr) { std::vector data(n_arr); for (int j = 0; j < n_arr; ++j) { @@ -733,7 +733,7 @@ struct gguf_file_saver { file.write_arr(key, type, data); } - void write_hparam_arr_f32(const std::string & key, enum gguf_type type, int i, int n_arr) { + void write_kv_arr_f32(const std::string & key, enum gguf_type type, int i, int n_arr) { std::vector data(n_arr); for (int j = 0; j < n_arr; ++j) { @@ -769,9 +769,9 @@ struct gguf_file_saver { const gguf_type arr_type = gguf_get_arr_type(fl->gguf_ctx, i); const int n_arr = gguf_get_arr_n (fl->gguf_ctx, i); if (arr_type == GGUF_TYPE_FLOAT32) { - write_hparam_arr_f32(key, arr_type, i, n_arr); + write_kv_arr_f32(key, arr_type, i, n_arr); } else if (arr_type == GGUF_TYPE_STRING) { - write_hparam_arr_str(key, arr_type, i, n_arr); + write_kv_arr_str(key, arr_type, i, n_arr); } else { throw std::runtime_error("not implemented"); } From 5c85332e99602251909950fdab5e0003441b22dd Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 15 Aug 2023 11:11:22 +0300 Subject: [PATCH 05/29] llama : simplify write_header() --- gguf-llama.cpp | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/gguf-llama.cpp b/gguf-llama.cpp index dc97e3bcef315..4006d33470f00 100644 --- a/gguf-llama.cpp +++ b/gguf-llama.cpp @@ -709,17 +709,10 @@ struct gguf_file_saver { } void write_header() { - const int32_t magic = GGUF_MAGIC; - file.write_i32(magic); - - const int32_t version = GGUF_VERSION; - file.write_i32(version); - - const int32_t n_tensors = gguf_get_n_tensors(fl->gguf_ctx); - file.write_i32(n_tensors); - - const int32_t n_kv = gguf_get_n_kv(fl->gguf_ctx); - file.write_i32(n_kv); + file.write_i32(GGUF_MAGIC); + file.write_i32(GGUF_VERSION); + file.write_i32(gguf_get_n_tensors(fl->gguf_ctx)); + file.write_i32(gguf_get_n_kv (fl->gguf_ctx)); } void write_kv_arr_str(const std::string & key, enum gguf_type type, int i, int n_arr) { From 9574f41818a917d10dfdce38184944d78a846de1 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 15 Aug 2023 11:22:37 +0300 Subject: [PATCH 06/29] llama : no need to pass full file loader to the file saver just gguf_ctx --- gguf-llama.cpp | 48 ++++++++++++++++++++++++------------------------ 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/gguf-llama.cpp b/gguf-llama.cpp index 4006d33470f00..bff6213cac28e 100644 --- a/gguf-llama.cpp +++ b/gguf-llama.cpp @@ -697,12 +697,12 @@ struct gguf_file_saver { // we need to calculate the delta in number of bytes written with a counter as a struct member. gguf_file file; - gguf_file_loader * fl; + gguf_context * ctx; // loaded gguf context (used to re-write the KV section (good enough for now)) size_t info_offset; size_t tensor_offset = 0; - gguf_file_saver(const char * fname, gguf_file_loader * fl) - : file(fname, "wb"), fl(fl) { + gguf_file_saver(const char * fname, gguf_context * ctx) + : file(fname, "wb"), ctx(ctx) { fprintf(stderr, "llama.cpp: saving model to %s\n", fname); write_header(); write_kv(); @@ -711,15 +711,15 @@ struct gguf_file_saver { void write_header() { file.write_i32(GGUF_MAGIC); file.write_i32(GGUF_VERSION); - file.write_i32(gguf_get_n_tensors(fl->gguf_ctx)); - file.write_i32(gguf_get_n_kv (fl->gguf_ctx)); + file.write_i32(gguf_get_n_tensors(ctx)); + file.write_i32(gguf_get_n_kv (ctx)); } void write_kv_arr_str(const std::string & key, enum gguf_type type, int i, int n_arr) { std::vector data(n_arr); for (int j = 0; j < n_arr; ++j) { - std::string val = gguf_get_arr_str(fl->gguf_ctx, i, j); + std::string val = gguf_get_arr_str(ctx, i, j); data[j] = val; } @@ -730,7 +730,7 @@ struct gguf_file_saver { std::vector data(n_arr); for (int j = 0; j < n_arr; ++j) { - float val = gguf_get_arr_f32(fl->gguf_ctx, i, j); + float val = gguf_get_arr_f32(ctx, i, j); data[j] = val; } @@ -739,28 +739,28 @@ struct gguf_file_saver { // re-write the key-value section from the loaded file void write_kv() { - const int32_t n_kv = gguf_get_n_kv(fl->gguf_ctx); + const int32_t n_kv = gguf_get_n_kv(ctx); for (int i = 0; i < n_kv; ++i) { - const char * key = gguf_get_key(fl->gguf_ctx, i); + const char * key = gguf_get_key(ctx, i); if (strcmp(key, "general.quantization_version") == 0) { file.write_val("general.quantization_version", GGUF_TYPE_UINT32, GGML_QNT_VERSION); } else { - const gguf_type vtype = gguf_get_kv_type(fl->gguf_ctx, i); + const gguf_type vtype = gguf_get_kv_type(ctx, i); switch (vtype) { - case GGUF_TYPE_BOOL: file.write_val (key, GGUF_TYPE_BOOL, gguf_get_val_bool(fl->gguf_ctx, i)); break; - case GGUF_TYPE_FLOAT32: file.write_val (key, GGUF_TYPE_FLOAT32, gguf_get_val_f32 (fl->gguf_ctx, i)); break; - case GGUF_TYPE_INT16: file.write_val (key, GGUF_TYPE_INT16, gguf_get_val_i16 (fl->gguf_ctx, i)); break; - case GGUF_TYPE_INT32: file.write_val (key, GGUF_TYPE_INT32, gguf_get_val_i32 (fl->gguf_ctx, i)); break; - case GGUF_TYPE_INT8: file.write_val (key, GGUF_TYPE_INT8, gguf_get_val_i8 (fl->gguf_ctx, i)); break; - case GGUF_TYPE_STRING: file.write_str (key, GGUF_TYPE_STRING, gguf_get_val_str (fl->gguf_ctx, i)); break; - case GGUF_TYPE_UINT16: file.write_val(key, GGUF_TYPE_UINT16, gguf_get_val_u16 (fl->gguf_ctx, i)); break; - case GGUF_TYPE_UINT32: file.write_val(key, GGUF_TYPE_UINT32, gguf_get_val_u32 (fl->gguf_ctx, i)); break; - case GGUF_TYPE_UINT8: file.write_val (key, GGUF_TYPE_UINT8, gguf_get_val_u8 (fl->gguf_ctx, i)); break; + case GGUF_TYPE_BOOL: file.write_val (key, GGUF_TYPE_BOOL, gguf_get_val_bool(ctx, i)); break; + case GGUF_TYPE_FLOAT32: file.write_val (key, GGUF_TYPE_FLOAT32, gguf_get_val_f32 (ctx, i)); break; + case GGUF_TYPE_INT16: file.write_val (key, GGUF_TYPE_INT16, gguf_get_val_i16 (ctx, i)); break; + case GGUF_TYPE_INT32: file.write_val (key, GGUF_TYPE_INT32, gguf_get_val_i32 (ctx, i)); break; + case GGUF_TYPE_INT8: file.write_val (key, GGUF_TYPE_INT8, gguf_get_val_i8 (ctx, i)); break; + case GGUF_TYPE_STRING: file.write_str (key, GGUF_TYPE_STRING, gguf_get_val_str (ctx, i)); break; + case GGUF_TYPE_UINT16: file.write_val(key, GGUF_TYPE_UINT16, gguf_get_val_u16 (ctx, i)); break; + case GGUF_TYPE_UINT32: file.write_val(key, GGUF_TYPE_UINT32, gguf_get_val_u32 (ctx, i)); break; + case GGUF_TYPE_UINT8: file.write_val (key, GGUF_TYPE_UINT8, gguf_get_val_u8 (ctx, i)); break; case GGUF_TYPE_ARRAY: { - const gguf_type arr_type = gguf_get_arr_type(fl->gguf_ctx, i); - const int n_arr = gguf_get_arr_n (fl->gguf_ctx, i); + const gguf_type arr_type = gguf_get_arr_type(ctx, i); + const int n_arr = gguf_get_arr_n (ctx, i); if (arr_type == GGUF_TYPE_FLOAT32) { write_kv_arr_f32(key, arr_type, i, n_arr); } else if (arr_type == GGUF_TYPE_STRING) { @@ -777,9 +777,9 @@ struct gguf_file_saver { info_offset = file.tell(); - GGML_ASSERT(gguf_get_data_offset(fl->gguf_ctx) >= info_offset); + GGML_ASSERT(gguf_get_data_offset(ctx) >= info_offset); - size_t count = gguf_get_data_offset(fl->gguf_ctx) - info_offset; + size_t count = gguf_get_data_offset(ctx) - info_offset; file.write_zeros(count); file.seek(info_offset, SEEK_SET); GGML_ASSERT(info_offset == file.tell()); @@ -3220,7 +3220,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s } std::unique_ptr model_loader(new llama_model_loader(fname_inp, /*use_mmap*/ false)); - gguf_file_saver file_saver(fname_out.c_str(), model_loader->file_loader.get()); + gguf_file_saver file_saver(fname_out.c_str(), model_loader->file_loader->gguf_ctx); #ifdef GGML_USE_K_QUANTS int n_attention_wv = 0; From da424b66992e9d9ac57553fe089215f5dd9e0dbe Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 15 Aug 2023 11:31:42 +0300 Subject: [PATCH 07/29] llama : gguf_file_saver write I32 --- ggml.c | 12 ++++++++---- ggml.h | 3 ++- gguf-llama.cpp | 26 ++++++++++++++++++++------ 3 files changed, 30 insertions(+), 11 deletions(-) diff --git a/ggml.c b/ggml.c index c8fa60328e2d7..cdba137da0a68 100644 --- a/ggml.c +++ b/ggml.c @@ -19039,16 +19039,20 @@ enum gguf_type gguf_get_arr_type(struct gguf_context * ctx, int i) { return ctx->header.kv[i].value.arr.type; } -const char * gguf_get_arr_str(struct gguf_context * ctx, int key_id, int i) { - struct gguf_kv * kv = &ctx->header.kv[key_id]; - struct gguf_str * str = &((struct gguf_str *) kv->value.arr.data)[i]; - return str->data; +int32_t gguf_get_arr_i32(struct gguf_context * ctx, int key_id, int i) { + return ((int32_t *) ctx->header.kv[key_id].value.arr.data)[i]; } float gguf_get_arr_f32(struct gguf_context * ctx, int key_id, int i) { return ((float *) ctx->header.kv[key_id].value.arr.data)[i]; } +const char * gguf_get_arr_str(struct gguf_context * ctx, int key_id, int i) { + struct gguf_kv * kv = &ctx->header.kv[key_id]; + struct gguf_str * str = &((struct gguf_str *) kv->value.arr.data)[i]; + return str->data; +} + int gguf_get_arr_n(struct gguf_context * ctx, int i) { return ctx->header.kv[i].value.arr.n; } diff --git a/ggml.h b/ggml.h index 9a9c7ab391f02..79bda4538c3b4 100644 --- a/ggml.h +++ b/ggml.h @@ -1751,8 +1751,9 @@ extern "C" { GGML_API enum gguf_type gguf_get_kv_type (struct gguf_context * ctx, int i); GGML_API enum gguf_type gguf_get_arr_type(struct gguf_context * ctx, int i); - GGML_API const char * gguf_get_arr_str(struct gguf_context * ctx, int key_id, int i); GGML_API float gguf_get_arr_f32(struct gguf_context * ctx, int key_id, int i); + GGML_API int32_t gguf_get_arr_i32(struct gguf_context * ctx, int key_id, int i); + GGML_API const char * gguf_get_arr_str(struct gguf_context * ctx, int key_id, int i); GGML_API uint8_t gguf_get_val_u8 (struct gguf_context * ctx, int i); GGML_API int8_t gguf_get_val_i8 (struct gguf_context * ctx, int i); diff --git a/gguf-llama.cpp b/gguf-llama.cpp index bff6213cac28e..76f65b71f3d00 100644 --- a/gguf-llama.cpp +++ b/gguf-llama.cpp @@ -737,11 +737,24 @@ struct gguf_file_saver { file.write_arr(key, type, data); } + void write_kv_arr_i32(const std::string & key, enum gguf_type type, int i, int n_arr) { + std::vector data(n_arr); + + for (int j = 0; j < n_arr; ++j) { + int32_t val = gguf_get_arr_i32(ctx, i, j); + data[j] = val; + } + + file.write_arr(key, type, data); + } + // re-write the key-value section from the loaded file void write_kv() { const int32_t n_kv = gguf_get_n_kv(ctx); for (int i = 0; i < n_kv; ++i) { const char * key = gguf_get_key(ctx, i); + LLAMA_LOG_INFO("%s: writing key '%s'\n", __func__, key); + if (strcmp(key, "general.quantization_version") == 0) { file.write_val("general.quantization_version", GGUF_TYPE_UINT32, GGML_QNT_VERSION); } else { @@ -761,12 +774,13 @@ struct gguf_file_saver { { const gguf_type arr_type = gguf_get_arr_type(ctx, i); const int n_arr = gguf_get_arr_n (ctx, i); - if (arr_type == GGUF_TYPE_FLOAT32) { - write_kv_arr_f32(key, arr_type, i, n_arr); - } else if (arr_type == GGUF_TYPE_STRING) { - write_kv_arr_str(key, arr_type, i, n_arr); - } else { - throw std::runtime_error("not implemented"); + + switch (arr_type) { + case GGUF_TYPE_FLOAT32: write_kv_arr_f32(key, arr_type, i, n_arr); break; + case GGUF_TYPE_INT32: write_kv_arr_i32(key, arr_type, i, n_arr); break; + case GGUF_TYPE_STRING: write_kv_arr_str(key, arr_type, i, n_arr); break; + default: + throw std::runtime_error(format("cannot recognize array type for key %s\n", key)); } } break; default: From 2d87c9c7963e239e29471a5394a3d57a81d51948 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=2E=20Yusuf=20Sar=C4=B1g=C3=B6z?= Date: Tue, 15 Aug 2023 13:29:30 +0300 Subject: [PATCH 08/29] llama : refactor tensor names (#2622) * gguf: update tensor names searched in quantization * gguf : define tensor names as constants --- gguf-llama.cpp | 55 ++++++++++++++++++++++++++++++-------------------- 1 file changed, 33 insertions(+), 22 deletions(-) diff --git a/gguf-llama.cpp b/gguf-llama.cpp index 76f65b71f3d00..b4cb864788c80 100644 --- a/gguf-llama.cpp +++ b/gguf-llama.cpp @@ -56,6 +56,20 @@ #pragma warning(disable: 4244 4267) // possible loss of data #endif +// tensor names +#define TN_TOKEN_EMBD "token_embd.weight" +#define TN_OUTPUT_NORM "output_norm.weight" +#define TN_OUTPUT "output.weight" +#define TN_ATTN_NORM "blk.%d.attn_norm.weight" +#define TN_ATTN_Q "blk.%d.attn_q.weight" +#define TN_ATTN_K "blk.%d.attn_k.weight" +#define TN_ATTN_V "blk.%d.attn_v.weight" +#define TN_ATTN_OUTPUT "blk.%d.attn_output.weight" +#define TN_FFN_NORM "blk.%d.ffn_norm.weight" +#define TN_FFN_GATE "blk.%d.ffn_gate.weight" +#define TN_FFN_DOWN "blk.%d.ffn_down.weight" +#define TN_FFN_UP "blk.%d.ffn_up.weight" + static void llama_log_internal(llama_log_level level, const char* format, ...); static void llama_log_callback_default(llama_log_level level, const char * text, void * user_data); #define LLAMA_LOG_INFO(...) llama_log_internal(LLAMA_LOG_LEVEL_INFO , __VA_ARGS__) @@ -1310,7 +1324,7 @@ static void llama_model_load_internal( ml->ggml_ctx = ctx; - model.tok_embeddings = ml->get_tensor("token_embd.weight", {n_embd, n_vocab}, GGML_BACKEND_CPU); + model.tok_embeddings = ml->get_tensor(TN_TOKEN_EMBD, {n_embd, n_vocab}, GGML_BACKEND_CPU); // "output" tensor { @@ -1331,8 +1345,8 @@ static void llama_model_load_internal( backend_output = GGML_BACKEND_CPU; } - model.norm = ml->get_tensor("output_norm.weight", {n_embd}, backend_norm); - model.output = ml->get_tensor("output.weight", {n_embd, n_vocab}, backend_output); + model.norm = ml->get_tensor(TN_OUTPUT_NORM, {n_embd}, backend_norm); + model.output = ml->get_tensor(TN_OUTPUT, {n_embd, n_vocab}, backend_output); if (backend_norm == GGML_BACKEND_GPU) { vram_weights += ggml_nbytes(model.norm); } @@ -1349,21 +1363,18 @@ static void llama_model_load_internal( const ggml_backend backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD_SPLIT; // NOLINT auto & layer = model.layers[i]; + layer.attention_norm = ml->get_tensor(format(TN_ATTN_NORM, i), {n_embd}, backend); - std::string layers_i = "blk." + std::to_string(i); - - layer.attention_norm = ml->get_tensor(layers_i + ".attn_norm.weight", {n_embd}, backend); - - layer.wq = ml->get_tensor(layers_i + ".attn_q.weight", {n_embd, n_embd}, backend_split); - layer.wk = ml->get_tensor(layers_i + ".attn_k.weight", {n_embd, n_embd_gqa}, backend_split); - layer.wv = ml->get_tensor(layers_i + ".attn_v.weight", {n_embd, n_embd_gqa}, backend_split); - layer.wo = ml->get_tensor(layers_i + ".attn_output.weight", {n_embd, n_embd}, backend_split); + layer.wq = ml->get_tensor(format(TN_ATTN_Q, i), {n_embd, n_embd}, backend_split); + layer.wk = ml->get_tensor(format(TN_ATTN_K, i), {n_embd, n_embd_gqa}, backend_split); + layer.wv = ml->get_tensor(format(TN_ATTN_V, i), {n_embd, n_embd_gqa}, backend_split); + layer.wo = ml->get_tensor(format(TN_ATTN_OUTPUT, i), {n_embd, n_embd}, backend_split); - layer.ffn_norm = ml->get_tensor(layers_i + ".ffn_norm.weight", {n_embd}, backend); + layer.ffn_norm = ml->get_tensor(format(TN_FFN_NORM, i), {n_embd}, backend); - layer.w1 = ml->get_tensor(layers_i + ".ffn_gate.weight", {n_embd, n_ff}, backend_split); - layer.w2 = ml->get_tensor(layers_i + ".ffn_down.weight", { n_ff, n_embd}, backend_split); - layer.w3 = ml->get_tensor(layers_i + ".ffn_up.weight", {n_embd, n_ff}, backend_split); + layer.w1 = ml->get_tensor(format(TN_FFN_GATE, i), {n_embd, n_ff}, backend_split); + layer.w2 = ml->get_tensor(format(TN_FFN_DOWN, i), { n_ff, n_embd}, backend_split); + layer.w3 = ml->get_tensor(format(TN_FFN_UP, i), {n_embd, n_ff}, backend_split); if (backend == GGML_BACKEND_GPU) { vram_weights += @@ -3240,10 +3251,10 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s int n_attention_wv = 0; int n_feed_forward_w2 = 0; for (auto& tensor : model_loader->tensors_map.tensors) { - if (tensor.name.find("attention.wv.weight") != std::string::npos) { + if (tensor.name.find("attn_v.weight") != std::string::npos) { ++n_attention_wv; } - else if (tensor.name.find("feed_forward.w2.weight") != std::string::npos) { + else if (tensor.name.find("ffn_down.weight") != std::string::npos) { ++n_feed_forward_w2; } } @@ -3298,13 +3309,13 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s } else { new_type = quantized_type; #ifdef GGML_USE_K_QUANTS - if (tensor.name == "output.weight") { + if (tensor.name == TN_OUTPUT) { int nx = tensor.ne.at(0); int ny = tensor.ne.at(1); if (nx % QK_K == 0 && ny % QK_K == 0) { new_type = GGML_TYPE_Q6_K; } - } else if (tensor.name.find("attention.wv.weight") != std::string::npos) { + } else if (tensor.name.find("attn_v.weight") != std::string::npos) { if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q4_K; else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K; else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) && @@ -3319,7 +3330,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s use_more_bits(i_feed_forward_w2, n_feed_forward_w2)) new_type = GGML_TYPE_Q6_K; //else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && i_feed_forward_w2 < n_feed_forward_w2/8) new_type = GGML_TYPE_Q6_K; ++i_feed_forward_w2; - } else if (tensor.name.find("attention.wo.weight") != std::string::npos) { + } else if (tensor.name.find("attn_output.weight") != std::string::npos) { if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q4_K; else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K; } @@ -3334,10 +3345,10 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s } } if (convert_incompatible_tensor) { - if (tensor.name == "output.weight") { + if (tensor.name == TN_OUTPUT) { new_type = GGML_TYPE_F16; //fall back to F16 instead of just failing. LLAMA_LOG_WARN("F16 will be used for this tensor instead.\n"); - } else if (tensor.name == "tok_embeddings.weight") { + } else if (tensor.name == TN_TOKEN_EMBD) { new_type = GGML_TYPE_Q4_0; //fall back to Q4_0 instead of just failing. LLAMA_LOG_WARN("Q4_0 will be used for this tensor instead.\n"); } else { From 5cb9d9a87f6004dfb7a7a112a02f655034c6272b Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 15 Aug 2023 13:39:10 +0300 Subject: [PATCH 09/29] gguf : initial write API (not tested yet) --- ggml.c | 234 +++++++++++++++++++++++++++++++++++++++++++------ ggml.h | 25 +++++- gguf-llama.cpp | 38 ++++---- 3 files changed, 247 insertions(+), 50 deletions(-) diff --git a/ggml.c b/ggml.c index cdba137da0a68..8e9cd42d72e0c 100644 --- a/ggml.c +++ b/ggml.c @@ -18613,8 +18613,6 @@ struct gguf_header { uint32_t version; uint32_t n_tensors; uint32_t n_kv; - - struct gguf_kv * kv; }; struct gguf_tensor_info { @@ -18630,7 +18628,9 @@ struct gguf_tensor_info { }; struct gguf_context { - struct gguf_header header; + struct gguf_header header; + + struct gguf_kv * kv; struct gguf_tensor_info * infos; size_t alignment; @@ -18660,6 +18660,26 @@ static bool gguf_fread_str(struct gguf_str * p, FILE * file, size_t * offset) { return ok; } +struct gguf_context * gguf_init_empty(void) { + struct gguf_context * ctx = GGML_ALIGNED_MALLOC(sizeof(struct gguf_context)); + + ctx->header.magic = GGUF_MAGIC; + ctx->header.version = GGUF_VERSION; + ctx->header.n_tensors = 0; + ctx->header.n_kv = 0; + + ctx->kv = NULL; + ctx->infos = NULL; + + ctx->alignment = GGUF_DEFAULT_ALIGNMENT; + ctx->offset = 0; + ctx->size_data = 0; + + ctx->data = NULL; + + return ctx; +} + struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_params params) { FILE * file = fopen(fname, "rb"); if (!file) { @@ -18689,8 +18709,8 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p // read the header { ctx->header.magic = magic; - ctx->header.kv = NULL; + ctx->kv = NULL; ctx->infos = NULL; ctx->data = NULL; @@ -18708,10 +18728,10 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p // read the kv pairs { - ctx->header.kv = GGML_ALIGNED_MALLOC(ctx->header.n_kv * sizeof(struct gguf_kv)); + ctx->kv = GGML_ALIGNED_MALLOC(ctx->header.n_kv * sizeof(struct gguf_kv)); for (uint32_t i = 0; i < ctx->header.n_kv; ++i) { - struct gguf_kv * kv = &ctx->header.kv[i]; + struct gguf_kv * kv = &ctx->kv[i]; //fprintf(stderr, "%s: reading kv %d\n", __func__, i); @@ -18757,7 +18777,7 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p } } break; case GGUF_TYPE_ARRAY: - case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type"); + case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type"); break; }; } break; case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type"); @@ -18827,7 +18847,6 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p // compute the total size of the data section, taking into account the alignment { - ctx->size_data = 0; for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) { struct gguf_tensor_info * info = &ctx->infos[i]; @@ -18944,10 +18963,10 @@ void gguf_free(struct gguf_context * ctx) { return; } - if (ctx->header.kv) { + if (ctx->kv) { // free string memory - not great.. for (uint32_t i = 0; i < ctx->header.n_kv; ++i) { - struct gguf_kv * kv = &ctx->header.kv[i]; + struct gguf_kv * kv = &ctx->kv[i]; if (kv->key.data) { free(kv->key.data); @@ -18974,7 +18993,7 @@ void gguf_free(struct gguf_context * ctx) { } } - GGML_ALIGNED_FREE(ctx->header.kv); + GGML_ALIGNED_FREE(ctx->kv); } if (ctx->infos) { @@ -19014,8 +19033,9 @@ int gguf_get_n_kv(struct gguf_context * ctx) { int gguf_find_key(struct gguf_context * ctx, const char * key) { // return -1 if key not found + int keyfound = -1; + const int n_kv = gguf_get_n_kv(ctx); - int keyfound = -1; for (int i = 0; i < n_kv; ++i) { if (strcmp(key, gguf_get_key(ctx, i)) == 0) { @@ -19028,69 +19048,69 @@ int gguf_find_key(struct gguf_context * ctx, const char * key) { } const char * gguf_get_key(struct gguf_context * ctx, int i) { - return ctx->header.kv[i].key.data; + return ctx->kv[i].key.data; } enum gguf_type gguf_get_kv_type(struct gguf_context * ctx, int i) { - return ctx->header.kv[i].type; + return ctx->kv[i].type; } enum gguf_type gguf_get_arr_type(struct gguf_context * ctx, int i) { - return ctx->header.kv[i].value.arr.type; + return ctx->kv[i].value.arr.type; } int32_t gguf_get_arr_i32(struct gguf_context * ctx, int key_id, int i) { - return ((int32_t *) ctx->header.kv[key_id].value.arr.data)[i]; + return ((int32_t *) ctx->kv[key_id].value.arr.data)[i]; } float gguf_get_arr_f32(struct gguf_context * ctx, int key_id, int i) { - return ((float *) ctx->header.kv[key_id].value.arr.data)[i]; + return ((float *) ctx->kv[key_id].value.arr.data)[i]; } const char * gguf_get_arr_str(struct gguf_context * ctx, int key_id, int i) { - struct gguf_kv * kv = &ctx->header.kv[key_id]; + struct gguf_kv * kv = &ctx->kv[key_id]; struct gguf_str * str = &((struct gguf_str *) kv->value.arr.data)[i]; return str->data; } int gguf_get_arr_n(struct gguf_context * ctx, int i) { - return ctx->header.kv[i].value.arr.n; + return ctx->kv[i].value.arr.n; } uint8_t gguf_get_val_u8(struct gguf_context * ctx, int i) { - return ctx->header.kv[i].value.uint8; + return ctx->kv[i].value.uint8; } int8_t gguf_get_val_i8(struct gguf_context * ctx, int i) { - return ctx->header.kv[i].value.int8; + return ctx->kv[i].value.int8; } uint16_t gguf_get_val_u16(struct gguf_context * ctx, int i) { - return ctx->header.kv[i].value.uint16; + return ctx->kv[i].value.uint16; } int16_t gguf_get_val_i16(struct gguf_context * ctx, int i) { - return ctx->header.kv[i].value.int16; + return ctx->kv[i].value.int16; } uint32_t gguf_get_val_u32(struct gguf_context * ctx, int i) { - return ctx->header.kv[i].value.uint32; + return ctx->kv[i].value.uint32; } int32_t gguf_get_val_i32(struct gguf_context * ctx, int i) { - return ctx->header.kv[i].value.int32; + return ctx->kv[i].value.int32; } float gguf_get_val_f32(struct gguf_context * ctx, int i) { - return ctx->header.kv[i].value.float32; + return ctx->kv[i].value.float32; } bool gguf_get_val_bool(struct gguf_context * ctx, int i) { - return ctx->header.kv[i].value.bool_; + return ctx->kv[i].value.bool_; } const char * gguf_get_val_str (struct gguf_context * ctx, int i) { - return ctx->header.kv[i].value.str.data; + return ctx->kv[i].value.str.data; } int gguf_get_n_tensors(struct gguf_context * ctx) { @@ -19105,6 +19125,164 @@ char * gguf_get_tensor_name(struct gguf_context * ctx, int i) { return ctx->infos[i].name.data; } +// returns the index +static int gguf_get_or_add_key(struct gguf_context * ctx, const char * key) { + const int idx = gguf_find_key(ctx, key); + if (idx >= 0) { + return idx; + } + + const int n_kv = gguf_get_n_kv(ctx); + + ctx->kv = realloc(ctx->kv, (n_kv + 1) * sizeof(struct gguf_kv)); + ctx->kv[n_kv].key.n = strlen(key) + 1; + ctx->kv[n_kv].key.data = strdup(key); + ctx->header.n_kv++; + + return n_kv; +} + +void gguf_set_val_u8(struct gguf_context * ctx, const char * key, uint8_t val) { + const int idx = gguf_get_or_add_key(ctx, key); + + ctx->kv[idx].type = GGUF_TYPE_UINT8; + ctx->kv[idx].value.uint8 = val; +} + +void gguf_set_val_i8(struct gguf_context * ctx, const char * key, int8_t val) { + const int idx = gguf_get_or_add_key(ctx, key); + + ctx->kv[idx].type = GGUF_TYPE_INT8; + ctx->kv[idx].value.int8 = val; +} + +void gguf_set_val_u16(struct gguf_context * ctx, const char * key, uint16_t val) { + const int idx = gguf_get_or_add_key(ctx, key); + + ctx->kv[idx].type = GGUF_TYPE_UINT16; + ctx->kv[idx].value.uint16 = val; +} + +void gguf_set_val_i16(struct gguf_context * ctx, const char * key, int16_t val) { + const int idx = gguf_get_or_add_key(ctx, key); + + ctx->kv[idx].type = GGUF_TYPE_INT16; + ctx->kv[idx].value.int16 = val; +} + +void gguf_set_val_u32(struct gguf_context * ctx, const char * key, uint32_t val) { + const int idx = gguf_get_or_add_key(ctx, key); + + ctx->kv[idx].type = GGUF_TYPE_UINT32; + ctx->kv[idx].value.uint32 = val; +} + +void gguf_set_val_i32(struct gguf_context * ctx, const char * key, int32_t val) { + const int idx = gguf_get_or_add_key(ctx, key); + + ctx->kv[idx].type = GGUF_TYPE_INT32; + ctx->kv[idx].value.int32 = val; +} + +void gguf_set_val_f32(struct gguf_context * ctx, const char * key, float val) { + const int idx = gguf_get_or_add_key(ctx, key); + + ctx->kv[idx].type = GGUF_TYPE_FLOAT32; + ctx->kv[idx].value.float32 = val; +} + +void gguf_set_val_bool(struct gguf_context * ctx, const char * key, bool val) { + const int idx = gguf_get_or_add_key(ctx, key); + + ctx->kv[idx].type = GGUF_TYPE_BOOL; + ctx->kv[idx].value.bool_ = val; +} + +void gguf_set_val_str(struct gguf_context * ctx, const char * key, const char * val) { + const int idx = gguf_get_or_add_key(ctx, key); + + ctx->kv[idx].type = GGUF_TYPE_STRING; + ctx->kv[idx].value.str.n = strlen(val) + 1; + ctx->kv[idx].value.str.data = strdup(val); +} + +void gguf_set_arr_data(struct gguf_context * ctx, const char * key, enum gguf_type type, const void * data, int n) { + const int idx = gguf_get_or_add_key(ctx, key); + + ctx->kv[idx].type = GGUF_TYPE_ARRAY; + ctx->kv[idx].value.arr.type = type; + ctx->kv[idx].value.arr.n = n; + ctx->kv[idx].value.arr.data = malloc(n*GGUF_TYPE_SIZE[type]); + memcpy(ctx->kv[idx].value.arr.data, data, n*GGUF_TYPE_SIZE[type]); +} + +void gguf_set_arr_str(struct gguf_context * ctx, const char * key, const char ** data, int n) { + const int idx = gguf_get_or_add_key(ctx, key); + + ctx->kv[idx].type = GGUF_TYPE_ARRAY; + ctx->kv[idx].value.arr.type = GGUF_TYPE_STRING; + ctx->kv[idx].value.arr.n = n; + ctx->kv[idx].value.arr.data = malloc(n*sizeof(struct gguf_str)); + for (int i = 0; i < n; i++) { + struct gguf_str * str = &((struct gguf_str *)ctx->kv[idx].value.arr.data)[i]; + str->n = strlen(data[i]) + 1; + str->data = strdup(data[i]); + } +} + +// set or add KV pairs from another context +void gguf_set_kv(struct gguf_context * ctx, struct gguf_context * src) { + for (uint32_t i = 0; i < src->header.n_kv; i++) { + switch (src->kv[i].type) { + case GGUF_TYPE_UINT8: gguf_set_val_u8 (ctx, src->kv[i].key.data, src->kv[i].value.uint8); break; + case GGUF_TYPE_INT8: gguf_set_val_i8 (ctx, src->kv[i].key.data, src->kv[i].value.int8); break; + case GGUF_TYPE_UINT16: gguf_set_val_u16 (ctx, src->kv[i].key.data, src->kv[i].value.uint16); break; + case GGUF_TYPE_INT16: gguf_set_val_i16 (ctx, src->kv[i].key.data, src->kv[i].value.int16); break; + case GGUF_TYPE_UINT32: gguf_set_val_u32 (ctx, src->kv[i].key.data, src->kv[i].value.uint32); break; + case GGUF_TYPE_INT32: gguf_set_val_i32 (ctx, src->kv[i].key.data, src->kv[i].value.int32); break; + case GGUF_TYPE_FLOAT32: gguf_set_val_f32 (ctx, src->kv[i].key.data, src->kv[i].value.float32); break; + case GGUF_TYPE_BOOL: gguf_set_val_bool(ctx, src->kv[i].key.data, src->kv[i].value.bool_); break; + case GGUF_TYPE_STRING: gguf_set_val_str (ctx, src->kv[i].key.data, src->kv[i].value.str.data); break; + case GGUF_TYPE_ARRAY: + { + if (src->kv[i].value.arr.type == GGUF_TYPE_STRING) { + const char ** data = malloc(src->kv[i].value.arr.n*sizeof(char *)); + for (uint32_t j = 0; j < src->kv[i].value.arr.n; j++) { + data[j] = ((struct gguf_str *)src->kv[i].value.arr.data)[j].data; + } + gguf_set_arr_str(ctx, src->kv[i].key.data, data, src->kv[i].value.arr.n); + free(data); + } if (src->kv[i].value.arr.type == GGUF_TYPE_ARRAY) { + GGML_ASSERT(false && "nested arrays not supported"); + } else { + gguf_set_arr_data(ctx, src->kv[i].key.data, src->kv[i].value.arr.type, src->kv[i].value.arr.data, src->kv[i].value.arr.n); + } + } break; + case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type"); break; + } + } +} + +void gguf_add_tensor(struct gguf_context * ctx, const struct ggml_tensor * tensor) { + const int idx = ctx->header.n_tensors; + ctx->infos = realloc(ctx->infos, (idx + 1)*sizeof(struct gguf_tensor_info)); + + ctx->infos[idx].name.n = strlen(tensor->name) + 1; + ctx->infos[idx].name.data = strdup(tensor->name); + + ctx->infos[idx].n_dims = tensor->n_dims; + for (int i = 0; i < tensor->n_dims; i++) { + ctx->infos[idx].ne[i] = tensor->ne[i]; + } + //ctx->infos[idx].n_elms = tensor->n_elms; + + ctx->infos[idx].type = tensor->type; + + ctx->infos[idx].offset = -1; // set later; + + ctx->header.n_tensors++; +} + //////////////////////////////////////////////////////////////////////////////// int ggml_cpu_has_avx(void) { diff --git a/ggml.h b/ggml.h index 79bda4538c3b4..96c3c707ea9e0 100644 --- a/ggml.h +++ b/ggml.h @@ -1735,6 +1735,7 @@ extern "C" { struct ggml_context ** ctx; }; + GGML_API struct gguf_context * gguf_init_empty(void); GGML_API struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_params params); //GGML_API struct gguf_context * gguf_init_from_buffer(..); GGML_API void gguf_free(struct gguf_context * ctx); @@ -1751,9 +1752,8 @@ extern "C" { GGML_API enum gguf_type gguf_get_kv_type (struct gguf_context * ctx, int i); GGML_API enum gguf_type gguf_get_arr_type(struct gguf_context * ctx, int i); - GGML_API float gguf_get_arr_f32(struct gguf_context * ctx, int key_id, int i); - GGML_API int32_t gguf_get_arr_i32(struct gguf_context * ctx, int key_id, int i); - GGML_API const char * gguf_get_arr_str(struct gguf_context * ctx, int key_id, int i); + GGML_API int32_t gguf_get_arr_i32(struct gguf_context * ctx, int key_id, int i); // TODO: remove + GGML_API float gguf_get_arr_f32(struct gguf_context * ctx, int key_id, int i); // TODO: remove GGML_API uint8_t gguf_get_val_u8 (struct gguf_context * ctx, int i); GGML_API int8_t gguf_get_val_i8 (struct gguf_context * ctx, int i); @@ -1766,11 +1766,30 @@ extern "C" { GGML_API const char * gguf_get_val_str (struct gguf_context * ctx, int i); GGML_API int gguf_get_arr_n (struct gguf_context * ctx, int i); GGML_API void gguf_get_arr_data(struct gguf_context * ctx, int i, void * data); + GGML_API const char * gguf_get_arr_str (struct gguf_context * ctx, int key_id, int i); GGML_API int gguf_get_n_tensors (struct gguf_context * ctx); GGML_API size_t gguf_get_tensor_offset(struct gguf_context * ctx, int i); GGML_API char * gguf_get_tensor_name (struct gguf_context * ctx, int i); + // overrides existing values or adds a new one + GGML_API void gguf_set_val_u8 (struct gguf_context * ctx, const char * key, uint8_t val); + GGML_API void gguf_set_val_i8 (struct gguf_context * ctx, const char * key, int8_t val); + GGML_API void gguf_set_val_u16 (struct gguf_context * ctx, const char * key, uint16_t val); + GGML_API void gguf_set_val_i16 (struct gguf_context * ctx, const char * key, int16_t val); + GGML_API void gguf_set_val_u32 (struct gguf_context * ctx, const char * key, uint32_t val); + GGML_API void gguf_set_val_i32 (struct gguf_context * ctx, const char * key, int32_t val); + GGML_API void gguf_set_val_f32 (struct gguf_context * ctx, const char * key, float val); + GGML_API void gguf_set_val_bool(struct gguf_context * ctx, const char * key, bool val); + GGML_API void gguf_set_val_str (struct gguf_context * ctx, const char * key, const char * val); + GGML_API void gguf_set_arr_data(struct gguf_context * ctx, const char * key, enum gguf_type type, const void * data, int n); + GGML_API void gguf_set_arr_str (struct gguf_context * ctx, const char * key, const char ** data, int n); + + // set or add KV pairs from another context + GGML_API void gguf_set_kv(struct gguf_context * ctx, struct gguf_context * src); + + GGML_API void gguf_add_tensor(struct gguf_context * ctx, const struct ggml_tensor * tensor); + // // system info // diff --git a/gguf-llama.cpp b/gguf-llama.cpp index b4cb864788c80..684b30936c1cc 100644 --- a/gguf-llama.cpp +++ b/gguf-llama.cpp @@ -710,17 +710,18 @@ struct gguf_file_saver { // but better to have it as uint32). // we need to calculate the delta in number of bytes written with a counter as a struct member. - gguf_file file; gguf_context * ctx; // loaded gguf context (used to re-write the KV section (good enough for now)) + + gguf_file file; size_t info_offset; - size_t tensor_offset = 0; + size_t tensor_offset; - gguf_file_saver(const char * fname, gguf_context * ctx) - : file(fname, "wb"), ctx(ctx) { - fprintf(stderr, "llama.cpp: saving model to %s\n", fname); - write_header(); - write_kv(); - } + gguf_file_saver(const char * fname, gguf_context * ctx) : ctx(ctx), file(fname, "wb") { + LLAMA_LOG_INFO("%s: saving model to %s\n", __func__, fname); + + write_header(); + write_kv(); + } void write_header() { file.write_i32(GGUF_MAGIC); @@ -729,15 +730,15 @@ struct gguf_file_saver { file.write_i32(gguf_get_n_kv (ctx)); } - void write_kv_arr_str(const std::string & key, enum gguf_type type, int i, int n_arr) { - std::vector data(n_arr); + void write_kv_arr_i32(const std::string & key, enum gguf_type type, int i, int n_arr) { + std::vector data(n_arr); for (int j = 0; j < n_arr; ++j) { - std::string val = gguf_get_arr_str(ctx, i, j); + int32_t val = gguf_get_arr_i32(ctx, i, j); data[j] = val; } - file.write_arr(key, type, data); + file.write_arr(key, type, data); } void write_kv_arr_f32(const std::string & key, enum gguf_type type, int i, int n_arr) { @@ -751,15 +752,15 @@ struct gguf_file_saver { file.write_arr(key, type, data); } - void write_kv_arr_i32(const std::string & key, enum gguf_type type, int i, int n_arr) { - std::vector data(n_arr); + void write_kv_arr_str(const std::string & key, enum gguf_type type, int i, int n_arr) { + std::vector data(n_arr); for (int j = 0; j < n_arr; ++j) { - int32_t val = gguf_get_arr_i32(ctx, i, j); + std::string val = gguf_get_arr_str(ctx, i, j); data[j] = val; } - file.write_arr(key, type, data); + file.write_arr(key, type, data); } // re-write the key-value section from the loaded file @@ -807,16 +808,15 @@ struct gguf_file_saver { GGML_ASSERT(gguf_get_data_offset(ctx) >= info_offset); - size_t count = gguf_get_data_offset(ctx) - info_offset; + const size_t count = gguf_get_data_offset(ctx) - info_offset; + file.write_zeros(count); file.seek(info_offset, SEEK_SET); - GGML_ASSERT(info_offset == file.tell()); } size_t write_tensor_info(gguf_load_tensor & tensor, enum ggml_type type) { size_t total_written = 0; file.seek(info_offset, SEEK_SET); - GGML_ASSERT(info_offset == file.tell()); total_written += file.write_str(tensor.name); int32_t n_dims = tensor.ne.size(); From 85ebfb8e5d131a3a99017f431d6ededf2867c044 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 15 Aug 2023 14:26:28 +0300 Subject: [PATCH 10/29] gguf : write to file API (not tested) --- convert-llama-h5-to-gguf.py | 2 +- ggml.c | 206 ++++++++++++++++++++++++++++-------- ggml.h | 2 + 3 files changed, 167 insertions(+), 43 deletions(-) diff --git a/convert-llama-h5-to-gguf.py b/convert-llama-h5-to-gguf.py index 22405673f8a8d..c052d1460b685 100644 --- a/convert-llama-h5-to-gguf.py +++ b/convert-llama-h5-to-gguf.py @@ -132,7 +132,7 @@ def count_model_parts(dir_model: str) -> int: toktype = 1 # defualt to normal token type if tokenizer.is_unknown(i): toktype = 2 if tokenizer.is_control(i): toktype = 3 - + # TODO: How to determinate if a token is user defined? # ref: https://github.com/google/sentencepiece/blob/master/src/sentencepiece_model.proto # if tokenizer.is_user_defined(i): toktype = 4 diff --git a/ggml.c b/ggml.c index 8e9cd42d72e0c..4faf784ca2d70 100644 --- a/ggml.c +++ b/ggml.c @@ -18620,11 +18620,13 @@ struct gguf_tensor_info { uint32_t n_dims; uint32_t ne[GGML_MAX_DIMS]; - uint32_t n_elms; // TODO: is this needed? enum ggml_type type; uint64_t offset; // offset from start of `data`, must be a multiple of `ALIGNMENT` + + // for writing + const struct ggml_tensor * tensor; }; struct gguf_context { @@ -18635,27 +18637,27 @@ struct gguf_context { size_t alignment; size_t offset; // offset of `data` from beginning of file - size_t size_data; // size of `data` in bytes + size_t size; // size of `data` in bytes //uint8_t * padding; - uint8_t * data; + void * data; }; -static bool gguf_fread_el(void * dst, size_t size, FILE * file, size_t * offset) { +static bool gguf_fread_el(FILE * file, void * dst, size_t size, size_t * offset) { const size_t n = fread(dst, 1, size, file); *offset += n; return n == size; } -static bool gguf_fread_str(struct gguf_str * p, FILE * file, size_t * offset) { +static bool gguf_fread_str(FILE * file, struct gguf_str * p, size_t * offset) { p->n = 0; p->data = NULL; bool ok = true; // TODO: how to avoid mallocs for strings? - ok = ok && gguf_fread_el(&p->n, sizeof(p->n), file, offset); p->data = calloc(p->n + 1, 1); - ok = ok && gguf_fread_el( p->data, p->n, file, offset); + ok = ok && gguf_fread_el(file, &p->n, sizeof(p->n), offset); p->data = calloc(p->n + 1, 1); + ok = ok && gguf_fread_el(file, p->data, p->n, offset); return ok; } @@ -18673,7 +18675,7 @@ struct gguf_context * gguf_init_empty(void) { ctx->alignment = GGUF_DEFAULT_ALIGNMENT; ctx->offset = 0; - ctx->size_data = 0; + ctx->size = 0; ctx->data = NULL; @@ -18693,7 +18695,7 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p // check the magic before making allocations { - gguf_fread_el(&magic, sizeof(magic), file, &offset); + gguf_fread_el(file, &magic, sizeof(magic), &offset); if (magic != GGUF_MAGIC) { fprintf(stderr, "%s: invalid magic number %08x\n", __func__, magic); @@ -18714,9 +18716,9 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p ctx->infos = NULL; ctx->data = NULL; - ok = ok && gguf_fread_el(&ctx->header.version, sizeof(ctx->header.version), file, &offset); - ok = ok && gguf_fread_el(&ctx->header.n_tensors, sizeof(ctx->header.n_tensors), file, &offset); - ok = ok && gguf_fread_el(&ctx->header.n_kv, sizeof(ctx->header.n_kv), file, &offset); + ok = ok && gguf_fread_el(file, &ctx->header.version, sizeof(ctx->header.version), &offset); + ok = ok && gguf_fread_el(file, &ctx->header.n_tensors, sizeof(ctx->header.n_tensors), &offset); + ok = ok && gguf_fread_el(file, &ctx->header.n_kv, sizeof(ctx->header.n_kv), &offset); if (!ok) { fprintf(stderr, "%s: failed to read header\n", __func__); @@ -18735,26 +18737,26 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p //fprintf(stderr, "%s: reading kv %d\n", __func__, i); - ok = ok && gguf_fread_str(&kv->key, file, &offset); - //ok = ok && gguf_fread_el (&kv->n_bytes, sizeof(kv->n_bytes), file, &offset); - ok = ok && gguf_fread_el (&kv->type, sizeof(kv->type), file, &offset); + ok = ok && gguf_fread_str(file, &kv->key, &offset); + //ok = ok && gguf_fread_el (file, &kv->n_bytes, sizeof(kv->n_bytes), &offset); + ok = ok && gguf_fread_el (file, &kv->type, sizeof(kv->type), &offset); //fprintf(stderr, "%s: reading kv with key %s\n", __func__, kv->key.data); switch (kv->type) { - case GGUF_TYPE_UINT8: ok = ok && gguf_fread_el (&kv->value.uint8, sizeof(kv->value.uint8), file, &offset); break; - case GGUF_TYPE_INT8: ok = ok && gguf_fread_el (&kv->value.int8, sizeof(kv->value.int8), file, &offset); break; - case GGUF_TYPE_UINT16: ok = ok && gguf_fread_el (&kv->value.uint16, sizeof(kv->value.uint16), file, &offset); break; - case GGUF_TYPE_INT16: ok = ok && gguf_fread_el (&kv->value.int16, sizeof(kv->value.int16), file, &offset); break; - case GGUF_TYPE_UINT32: ok = ok && gguf_fread_el (&kv->value.uint32, sizeof(kv->value.uint32), file, &offset); break; - case GGUF_TYPE_INT32: ok = ok && gguf_fread_el (&kv->value.int32, sizeof(kv->value.int32), file, &offset); break; - case GGUF_TYPE_FLOAT32: ok = ok && gguf_fread_el (&kv->value.float32, sizeof(kv->value.float32), file, &offset); break; - case GGUF_TYPE_BOOL: ok = ok && gguf_fread_el (&kv->value.bool_, sizeof(kv->value.bool_), file, &offset); break; - case GGUF_TYPE_STRING: ok = ok && gguf_fread_str(&kv->value.str, file, &offset); break; + case GGUF_TYPE_UINT8: ok = ok && gguf_fread_el (file, &kv->value.uint8, sizeof(kv->value.uint8), &offset); break; + case GGUF_TYPE_INT8: ok = ok && gguf_fread_el (file, &kv->value.int8, sizeof(kv->value.int8), &offset); break; + case GGUF_TYPE_UINT16: ok = ok && gguf_fread_el (file, &kv->value.uint16, sizeof(kv->value.uint16), &offset); break; + case GGUF_TYPE_INT16: ok = ok && gguf_fread_el (file, &kv->value.int16, sizeof(kv->value.int16), &offset); break; + case GGUF_TYPE_UINT32: ok = ok && gguf_fread_el (file, &kv->value.uint32, sizeof(kv->value.uint32), &offset); break; + case GGUF_TYPE_INT32: ok = ok && gguf_fread_el (file, &kv->value.int32, sizeof(kv->value.int32), &offset); break; + case GGUF_TYPE_FLOAT32: ok = ok && gguf_fread_el (file, &kv->value.float32, sizeof(kv->value.float32), &offset); break; + case GGUF_TYPE_BOOL: ok = ok && gguf_fread_el (file, &kv->value.bool_, sizeof(kv->value.bool_), &offset); break; + case GGUF_TYPE_STRING: ok = ok && gguf_fread_str(file, &kv->value.str, &offset); break; case GGUF_TYPE_ARRAY: { - ok = ok && gguf_fread_el(&kv->value.arr.type, sizeof(kv->value.arr.type), file, &offset); - ok = ok && gguf_fread_el(&kv->value.arr.n, sizeof(kv->value.arr.n), file, &offset); + ok = ok && gguf_fread_el(file, &kv->value.arr.type, sizeof(kv->value.arr.type), &offset); + ok = ok && gguf_fread_el(file, &kv->value.arr.n, sizeof(kv->value.arr.n), &offset); switch (kv->value.arr.type) { case GGUF_TYPE_UINT8: @@ -18767,13 +18769,13 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p case GGUF_TYPE_BOOL: { kv->value.arr.data = malloc(kv->value.arr.n * GGUF_TYPE_SIZE[kv->value.arr.type]); - ok = ok && gguf_fread_el(kv->value.arr.data, kv->value.arr.n * GGUF_TYPE_SIZE[kv->value.arr.type], file, &offset); + ok = ok && gguf_fread_el(file, kv->value.arr.data, kv->value.arr.n * GGUF_TYPE_SIZE[kv->value.arr.type], &offset); } break; case GGUF_TYPE_STRING: { kv->value.arr.data = malloc(kv->value.arr.n * sizeof(struct gguf_str)); for (uint32_t j = 0; j < kv->value.arr.n; ++j) { - ok = ok && gguf_fread_str(&((struct gguf_str *) kv->value.arr.data)[j], file, &offset); + ok = ok && gguf_fread_str(file, &((struct gguf_str *) kv->value.arr.data)[j], &offset); } } break; case GGUF_TYPE_ARRAY: @@ -18807,14 +18809,13 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p info->ne[j] = 1; } - ok = ok && gguf_fread_str(&info->name, file, &offset); - ok = ok && gguf_fread_el (&info->n_dims, sizeof(info->n_dims), file, &offset); + ok = ok && gguf_fread_str(file, &info->name, &offset); + ok = ok && gguf_fread_el (file, &info->n_dims, sizeof(info->n_dims), &offset); for (uint32_t j = 0; j < info->n_dims; ++j) { - ok = ok && gguf_fread_el(&info->ne[j], sizeof(info->ne[j]), file, &offset); + ok = ok && gguf_fread_el(file, &info->ne[j], sizeof(info->ne[j]), &offset); } - //ok = ok && gguf_fread_el (&info->n_elms, sizeof(info->n_elms), file, &offset); - ok = ok && gguf_fread_el (&info->type, sizeof(info->type), file, &offset); - ok = ok && gguf_fread_el (&info->offset, sizeof(info->offset), file, &offset); + ok = ok && gguf_fread_el (file, &info->type, sizeof(info->type), &offset); + ok = ok && gguf_fread_el (file, &info->offset, sizeof(info->offset), &offset); if (!ok) { fprintf(stderr, "%s: failed to read tensor info\n", __func__); @@ -18847,7 +18848,7 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p // compute the total size of the data section, taking into account the alignment { - ctx->size_data = 0; + ctx->size = 0; for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) { struct gguf_tensor_info * info = &ctx->infos[i]; @@ -18867,7 +18868,7 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p const size_t size_cur = (ne*ggml_type_size(info->type))/ggml_blck_size(info->type); - ctx->size_data += GGML_PAD(size_cur, ctx->alignment); + ctx->size += GGML_PAD(size_cur, ctx->alignment); } } @@ -18881,7 +18882,7 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p const size_t mem_size = params.no_alloc ? (ctx->header.n_tensors )*ggml_tensor_overhead() : - (ctx->header.n_tensors + 1)*ggml_tensor_overhead() + ctx->size_data; + (ctx->header.n_tensors + 1)*ggml_tensor_overhead() + ctx->size; struct ggml_init_params pdata = { .mem_size = mem_size, @@ -18896,12 +18897,12 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p struct ggml_tensor * data = NULL; if (params.no_alloc == false) { - data = ggml_new_tensor_1d(ctx_data, GGML_TYPE_I8, ctx->size_data); + data = ggml_new_tensor_1d(ctx_data, GGML_TYPE_I8, ctx->size); ok = ok && data != NULL; // read the binary blob with the tensor data - ok = ok && gguf_fread_el(data->data, ctx->size_data, file, &offset); + ok = ok && gguf_fread_el(file, data->data, ctx->size, &offset); if (!ok) { fprintf(stderr, "%s: failed to read tensor data\n", __func__); @@ -19274,15 +19275,136 @@ void gguf_add_tensor(struct gguf_context * ctx, const struct ggml_tensor * tenso for (int i = 0; i < tensor->n_dims; i++) { ctx->infos[idx].ne[i] = tensor->ne[i]; } - //ctx->infos[idx].n_elms = tensor->n_elms; - ctx->infos[idx].type = tensor->type; + ctx->infos[idx].type = tensor->type; + ctx->infos[idx].offset = 0; + ctx->infos[idx].tensor = tensor; - ctx->infos[idx].offset = -1; // set later; + if (ctx->header.n_tensors > 0) { + ctx->infos[idx].offset = ctx->infos[idx - 1].offset + GGML_PAD(ggml_nbytes(tensor), ctx->alignment); + } ctx->header.n_tensors++; } +static void gguf_fwrite_str(FILE * file, const struct gguf_str * val) { + fwrite(&val->n, sizeof(val->n), 1, file); + fwrite(val->data, sizeof(char), val->n, file); +} + +static void gguf_fwrite_el(FILE * file, const void * val, size_t size) { + fwrite(val, sizeof(char), size, file); +} + +void gguf_write_to_file(struct gguf_context * ctx, const char * fname) { + FILE * file = fopen(fname, "wb"); + if (!file) { + GGML_ASSERT(false && "failed to open file for writing"); + } + + // write header + fwrite(&ctx->header, sizeof(struct gguf_header), 1, file); + + // write key-value pairs + for (uint32_t i = 0; i < ctx->header.n_kv; ++i) { + struct gguf_kv * kv = &ctx->kv[i]; + + gguf_fwrite_str(file, &kv->key); + gguf_fwrite_el (file, &kv->type, sizeof(kv->type)); + + switch (kv->type) { + case GGUF_TYPE_UINT8: gguf_fwrite_el (file, &kv->value.uint8, sizeof(kv->value.uint8) ); break; + case GGUF_TYPE_INT8: gguf_fwrite_el (file, &kv->value.int8, sizeof(kv->value.int8) ); break; + case GGUF_TYPE_UINT16: gguf_fwrite_el (file, &kv->value.uint16, sizeof(kv->value.uint16) ); break; + case GGUF_TYPE_INT16: gguf_fwrite_el (file, &kv->value.int16, sizeof(kv->value.int16) ); break; + case GGUF_TYPE_UINT32: gguf_fwrite_el (file, &kv->value.uint32, sizeof(kv->value.uint32) ); break; + case GGUF_TYPE_INT32: gguf_fwrite_el (file, &kv->value.int32, sizeof(kv->value.int32) ); break; + case GGUF_TYPE_FLOAT32: gguf_fwrite_el (file, &kv->value.float32, sizeof(kv->value.float32)); break; + case GGUF_TYPE_BOOL: gguf_fwrite_el (file, &kv->value.bool_, sizeof(kv->value.bool_) ); break; + case GGUF_TYPE_STRING: gguf_fwrite_str(file, &kv->value.str ); break; + case GGUF_TYPE_ARRAY: + { + gguf_fwrite_el(file, &kv->value.arr.type, sizeof(kv->value.arr.type)); + gguf_fwrite_el(file, &kv->value.arr.n, sizeof(kv->value.arr.n) ); + + switch (kv->value.arr.type) { + case GGUF_TYPE_UINT8: + case GGUF_TYPE_INT8: + case GGUF_TYPE_UINT16: + case GGUF_TYPE_INT16: + case GGUF_TYPE_UINT32: + case GGUF_TYPE_INT32: + case GGUF_TYPE_FLOAT32: + case GGUF_TYPE_BOOL: + { + gguf_fwrite_el(file, kv->value.arr.data, kv->value.arr.n * GGUF_TYPE_SIZE[kv->value.arr.type]); + } break; + case GGUF_TYPE_STRING: + { + for (uint32_t j = 0; j < kv->value.arr.n; ++j) { + gguf_fwrite_str(file, &((struct gguf_str *) kv->value.arr.data)[j]); + } + } break; + case GGUF_TYPE_ARRAY: + case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type"); break; + }; + } break; + case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type"); + }; + } + + // write tensor infos + for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) { + struct gguf_tensor_info * info = &ctx->infos[i]; + + gguf_fwrite_str(file, &info->name); + gguf_fwrite_el (file, &info->n_dims, sizeof(info->n_dims)); + for (uint32_t j = 0; j < info->n_dims; ++j) { + gguf_fwrite_el(file, &info->ne[j], sizeof(info->ne[j])); + } + gguf_fwrite_el (file, &info->type, sizeof(info->type)); + gguf_fwrite_el (file, &info->offset, sizeof(info->offset)); + } + + // we require the data section to be aligned, so take into account any padding + { + const size_t offset = ftell(file); + const size_t offset_pad = GGML_PAD(offset, ctx->alignment); + + if (offset_pad != offset) { + uint8_t pad = 0; + for (size_t i = 0; i < offset_pad - offset; ++i) { + gguf_fwrite_el(file, &pad, sizeof(pad)); + } + } + } + + size_t offset = 0; + + // write tensor data + for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) { + struct gguf_tensor_info * info = &ctx->infos[i]; + + const size_t size = ggml_nbytes(info->tensor); + const size_t size_pad = GGML_PAD(size, ctx->alignment); + + gguf_fwrite_el(file, info->tensor->data, size); + + if (size_pad != size) { + uint8_t pad = 0; + for (size_t j = 0; j < size_pad - size; ++j) { + gguf_fwrite_el(file, &pad, sizeof(pad)); + } + } + + GGML_ASSERT(offset == info->offset); + + offset += size_pad; + } + + fclose(file); +} + //////////////////////////////////////////////////////////////////////////////// int ggml_cpu_has_avx(void) { diff --git a/ggml.h b/ggml.h index 96c3c707ea9e0..d19574ee94726 100644 --- a/ggml.h +++ b/ggml.h @@ -1790,6 +1790,8 @@ extern "C" { GGML_API void gguf_add_tensor(struct gguf_context * ctx, const struct ggml_tensor * tensor); + GGML_API void gguf_write_to_file(struct gguf_context * ctx, const char * fname); + // // system info // From f6ecd15f8329747487e77caaa9c845e86f64234a Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 15 Aug 2023 14:35:00 +0300 Subject: [PATCH 11/29] gguf : initial write API ready + example --- examples/gguf/gguf.cpp | 103 ++++++++--------------------------------- ggml.c | 2 +- 2 files changed, 20 insertions(+), 85 deletions(-) diff --git a/examples/gguf/gguf.cpp b/examples/gguf/gguf.cpp index b32367f301ee9..fe5fe1b4d4d3d 100644 --- a/examples/gguf/gguf.cpp +++ b/examples/gguf/gguf.cpp @@ -107,48 +107,24 @@ void gguf_ex_write_arr(std::ofstream & fout, const std::string & ke } bool gguf_ex_write(const std::string & fname) { - std::ofstream fout(fname.c_str(), std::ios::binary); + struct gguf_context * ctx = gguf_init_empty(); { - const int32_t magic = GGUF_MAGIC; - fout.write((const char *) &magic, sizeof(magic)); + gguf_set_val_u8 (ctx, "some.parameter.uint8", 0x12); + gguf_set_val_i8 (ctx, "some.parameter.int8", -0x13); + gguf_set_val_u16 (ctx, "some.parameter.uint16", 0x1234); + gguf_set_val_i16 (ctx, "some.parameter.int16", -0x1235); + gguf_set_val_u32 (ctx, "some.parameter.uint32", 0x12345678); + gguf_set_val_i32 (ctx, "some.parameter.int32", -0x12345679); + gguf_set_val_f32 (ctx, "some.parameter.float32", 0.123456789f); + gguf_set_val_bool(ctx, "some.parameter.bool", true); + gguf_set_val_str (ctx, "some.parameter.string", "hello world"); + + //gguf_set_arr_data(ctx, "some.parameter.arr.i16", GGUF_TYPE_INT16, std::vector{ 1, 2, 3, 4, }.data(), 4); + //gguf_set_arr_data(ctx, "some.parameter.arr.f32", GGUF_TYPE_FLOAT32, std::vector{ 3.145f, 2.718f, 1.414f, }.data(), 3); + //gguf_ex_write_arr(fout, "some.parameter.arr.str", GGUF_TYPE_STRING, { "hello", "world", "!" }); } - { - const int32_t version = GGUF_VERSION; - fout.write((const char *) &version, sizeof(version)); - } - - // NOTE: these have to match the output below! - const int n_tensors = 10; - const int n_kv = 12; - - fout.write((const char*) &n_tensors, sizeof(n_tensors)); - fout.write((const char*) &n_kv, sizeof(n_kv)); - - fprintf(stdout, "%s: write header\n", __func__); - - // kv data - { - gguf_ex_write_val< uint8_t>(fout, "some.parameter.uint8", GGUF_TYPE_UINT8, 0x12); - gguf_ex_write_val< int8_t>(fout, "some.parameter.int8", GGUF_TYPE_INT8, -0x13); - gguf_ex_write_val(fout, "some.parameter.uint16", GGUF_TYPE_UINT16, 0x1234); - gguf_ex_write_val< int16_t>(fout, "some.parameter.int16", GGUF_TYPE_INT16, -0x1235); - gguf_ex_write_val(fout, "some.parameter.uint32", GGUF_TYPE_UINT32, 0x12345678); - gguf_ex_write_val< int32_t>(fout, "some.parameter.int32", GGUF_TYPE_INT32, -0x12345679); - - gguf_ex_write_val (fout, "some.parameter.float32", GGUF_TYPE_FLOAT32, 0.123456789f); - gguf_ex_write_val (fout, "some.parameter.bool", GGUF_TYPE_BOOL, true); - - gguf_ex_write_val(fout, "some.parameter.string", GGUF_TYPE_STRING, "hello world"); - - gguf_ex_write_arr (fout, "some.parameter.arr.i16", GGUF_TYPE_INT16, { 1, 2, 3, 4, }); - gguf_ex_write_arr (fout, "some.parameter.arr.f32", GGUF_TYPE_FLOAT32, { 3.145f, 2.718f, 1.414f, }); - gguf_ex_write_arr(fout, "some.parameter.arr.str", GGUF_TYPE_STRING, { "hello", "world", "!" }); - } - - uint64_t offset_tensor = 0; - struct ggml_init_params params = { /*.mem_size =*/ 128ull*1024ull*1024ull, /*.mem_buffer =*/ NULL, @@ -157,6 +133,8 @@ bool gguf_ex_write(const std::string & fname) { struct ggml_context * ctx_data = ggml_init(params); + const int n_tensors = 10; + // tensor infos for (int i = 0; i < n_tensors; ++i) { const std::string name = "tensor_" + to_string(i); @@ -178,58 +156,15 @@ bool gguf_ex_write(const std::string & fname) { } } - fprintf(stdout, "%s: tensor: %s, %d dims, ne = [", __func__, name.c_str(), n_dims); - for (int j = 0; j < 4; ++j) { - fprintf(stdout, "%s%3d", j == 0 ? "" : ", ", (int) cur->ne[j]); - } - fprintf(stdout, "], offset_tensor = %6" PRIu64 "\n", offset_tensor); - - gguf_ex_write_str(fout, name); - gguf_ex_write_i32(fout, n_dims); - for (int j = 0; j < n_dims; ++j) { - gguf_ex_write_i32(fout, cur->ne[j]); - } - gguf_ex_write_i32(fout, cur->type); - gguf_ex_write_u64(fout, offset_tensor); - - offset_tensor += GGML_PAD(ggml_nbytes(cur), GGUF_DEFAULT_ALIGNMENT); + gguf_add_tensor(ctx, cur); } - const uint64_t offset_data = GGML_PAD((uint64_t) fout.tellp(), GGUF_DEFAULT_ALIGNMENT); - - fprintf(stdout, "%s: data offset = %" PRIu64 "\n", __func__, offset_data); - - { - const size_t pad = offset_data - fout.tellp(); - - for (size_t j = 0; j < pad; ++j) { - fout.put(0); - } - } - - for (int i = 0; i < n_tensors; ++i) { - fprintf(stdout, "%s: writing tensor %d data\n", __func__, i); - - const std::string name = "tensor_" + to_string(i); - - struct ggml_tensor * cur = ggml_get_tensor(ctx_data, name.c_str()); - - fout.write((const char *) cur->data, ggml_nbytes(cur)); - - { - const size_t pad = GGML_PAD(ggml_nbytes(cur), GGUF_DEFAULT_ALIGNMENT) - ggml_nbytes(cur); - - for (size_t j = 0; j < pad; ++j) { - fout.put(0); - } - } - } - - fout.close(); + gguf_write_to_file(ctx, fname.c_str()); fprintf(stdout, "%s: wrote file '%s;\n", __func__, fname.c_str()); ggml_free(ctx_data); + gguf_free(ctx); return true; } diff --git a/ggml.c b/ggml.c index 4faf784ca2d70..19bdc2c0d792c 100644 --- a/ggml.c +++ b/ggml.c @@ -19281,7 +19281,7 @@ void gguf_add_tensor(struct gguf_context * ctx, const struct ggml_tensor * tenso ctx->infos[idx].tensor = tensor; if (ctx->header.n_tensors > 0) { - ctx->infos[idx].offset = ctx->infos[idx - 1].offset + GGML_PAD(ggml_nbytes(tensor), ctx->alignment); + ctx->infos[idx].offset = ctx->infos[idx - 1].offset + GGML_PAD(ggml_nbytes(ctx->infos[idx - 1].tensor), ctx->alignment); } ctx->header.n_tensors++; From 4463965401193793fdeef1cdf8b58c75ba246c0f Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 15 Aug 2023 14:39:27 +0300 Subject: [PATCH 12/29] gguf : fix header write --- ggml.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ggml.c b/ggml.c index 19bdc2c0d792c..754ceda25273b 100644 --- a/ggml.c +++ b/ggml.c @@ -19303,7 +19303,10 @@ void gguf_write_to_file(struct gguf_context * ctx, const char * fname) { } // write header - fwrite(&ctx->header, sizeof(struct gguf_header), 1, file); + gguf_fwrite_el(file, &ctx->header.magic, sizeof(ctx->header.magic)); + gguf_fwrite_el(file, &ctx->header.version, sizeof(ctx->header.version)); + gguf_fwrite_el(file, &ctx->header.n_kv, sizeof(ctx->header.n_kv)); + gguf_fwrite_el(file, &ctx->header.n_tensors, sizeof(ctx->header.n_tensors)); // write key-value pairs for (uint32_t i = 0; i < ctx->header.n_kv; ++i) { From c9b2f7f1bf15c52174eea98dd7a72e7ed706fda6 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 15 Aug 2023 16:01:38 +0300 Subject: [PATCH 13/29] gguf : fixes + simplify example + add ggml_nbytes_pad() --- examples/gguf/gguf.cpp | 93 ++---------------------------------------- ggml.c | 16 ++++++-- ggml.h | 1 + 3 files changed, 17 insertions(+), 93 deletions(-) diff --git a/examples/gguf/gguf.cpp b/examples/gguf/gguf.cpp index fe5fe1b4d4d3d..fe22ab729dfa5 100644 --- a/examples/gguf/gguf.cpp +++ b/examples/gguf/gguf.cpp @@ -21,91 +21,6 @@ static std::string to_string(const T & val) { return ss.str(); } -void gguf_ex_write_str(std::ofstream & fout, const std::string & val) { - const int32_t n = val.size(); - fout.write((const char *) &n, sizeof(n)); - fout.write(val.c_str(), n); -} - -void gguf_ex_write_i32(std::ofstream & fout, int32_t val) { - fout.write((const char *) &val, sizeof(val)); -} - -void gguf_ex_write_u64(std::ofstream & fout, size_t val) { - fout.write((const char *) &val, sizeof(val)); -} - -template -void gguf_ex_write_val(std::ofstream & fout, const std::string & key, enum gguf_type type, const T & val) { - gguf_ex_write_str(fout, key); - fout.write((const char *) &type, sizeof(type)); - fout.write((const char *) &val, sizeof(val)); - - fprintf(stdout, "%s: write param: %s = %s\n", __func__, key.c_str(), to_string(val).c_str()); -} - -template<> -void gguf_ex_write_val(std::ofstream & fout, const std::string & key, enum gguf_type type, const std::string & val) { - gguf_ex_write_str(fout, key); - fout.write((const char *) &type, sizeof(type)); - - const int32_t n = val.size(); - fout.write((const char *) &n, sizeof(n)); - fout.write(val.c_str(), n); - - fprintf(stdout, "%s: write param: %s = %s\n", __func__, key.c_str(), val.c_str()); -} - -template -void gguf_ex_write_arr(std::ofstream & fout, const std::string & key, enum gguf_type type, const std::vector & val) { - gguf_ex_write_str(fout, key); - { - const enum gguf_type tarr = GGUF_TYPE_ARRAY; - fout.write((const char *) &tarr, sizeof(tarr)); - } - - const int32_t n = val.size(); - fout.write((const char *) &type, sizeof(type)); - fout.write((const char *) &n, sizeof(n)); - fout.write((const char *) val.data(), n * sizeof(T)); - - fprintf(stdout, "%s: write param: %s = [", __func__, key.c_str()); - for (int i = 0; i < n; ++i) { - fprintf(stdout, "%s", to_string(val[i]).c_str()); - if (i < n - 1) { - fprintf(stdout, ", "); - } - } - fprintf(stdout, "]\n"); -} - -template<> -void gguf_ex_write_arr(std::ofstream & fout, const std::string & key, enum gguf_type type, const std::vector & val) { - gguf_ex_write_str(fout, key); - { - const enum gguf_type tarr = GGUF_TYPE_ARRAY; - fout.write((const char *) &tarr, sizeof(tarr)); - } - - const int32_t n = val.size(); - fout.write((const char *) &type, sizeof(type)); - fout.write((const char *) &n, sizeof(n)); - for (int i = 0; i < n; ++i) { - const int32_t nstr = val[i].size(); - fout.write((const char *) &nstr, sizeof(nstr)); - fout.write(val[i].c_str(), nstr); - } - - fprintf(stdout, "%s: write param: %s = [", __func__, key.c_str()); - for (int i = 0; i < n; ++i) { - fprintf(stdout, "%s", val[i].c_str()); - if (i < n - 1) { - fprintf(stdout, ", "); - } - } - fprintf(stdout, "]\n"); -} - bool gguf_ex_write(const std::string & fname) { struct gguf_context * ctx = gguf_init_empty(); @@ -118,11 +33,11 @@ bool gguf_ex_write(const std::string & fname) { gguf_set_val_i32 (ctx, "some.parameter.int32", -0x12345679); gguf_set_val_f32 (ctx, "some.parameter.float32", 0.123456789f); gguf_set_val_bool(ctx, "some.parameter.bool", true); - gguf_set_val_str (ctx, "some.parameter.string", "hello world"); + gguf_set_val_str (ctx, "some.parameter.string", "hello world"); - //gguf_set_arr_data(ctx, "some.parameter.arr.i16", GGUF_TYPE_INT16, std::vector{ 1, 2, 3, 4, }.data(), 4); - //gguf_set_arr_data(ctx, "some.parameter.arr.f32", GGUF_TYPE_FLOAT32, std::vector{ 3.145f, 2.718f, 1.414f, }.data(), 3); - //gguf_ex_write_arr(fout, "some.parameter.arr.str", GGUF_TYPE_STRING, { "hello", "world", "!" }); + gguf_set_arr_data(ctx, "some.parameter.arr.i16", GGUF_TYPE_INT16, std::vector{ 1, 2, 3, 4, }.data(), 4); + gguf_set_arr_data(ctx, "some.parameter.arr.f32", GGUF_TYPE_FLOAT32, std::vector{ 3.145f, 2.718f, 1.414f, }.data(), 3); + gguf_set_arr_str (ctx, "some.parameter.arr.str", std::vector{ "hello", "world", "!" }.data(), 3); } struct ggml_init_params params = { diff --git a/ggml.c b/ggml.c index 754ceda25273b..c69a183e85a17 100644 --- a/ggml.c +++ b/ggml.c @@ -213,10 +213,10 @@ inline static void * ggml_aligned_malloc(size_t size) { error_desc = "insufficient memory"; break; } - GGML_PRINT("%s: %s (attempted to allocate %6.2f MB)\n", - __func__, error_desc, size/(1024.0*1024.0)); + GGML_PRINT("%s: %s (attempted to allocate %6.2f MB)\n", __func__, error_desc, size/(1024.0*1024.0)); return NULL; } + return aligned_memory; } #define GGML_ALIGNED_MALLOC(size) ggml_aligned_malloc(size) @@ -4109,7 +4109,11 @@ size_t ggml_nbytes(const struct ggml_tensor * tensor) { // // is enough, but just in case, adding the second part - return GGML_PAD(MAX(tensor->ne[3]*tensor->nb[3], (ggml_nelements(tensor)*GGML_TYPE_SIZE[tensor->type])/GGML_BLCK_SIZE[tensor->type]), GGML_MEM_ALIGN); + return MAX(tensor->ne[3]*tensor->nb[3], (ggml_nelements(tensor)*GGML_TYPE_SIZE[tensor->type])/GGML_BLCK_SIZE[tensor->type]); +} + +size_t ggml_nbytes_pad(const struct ggml_tensor * tensor) { + return GGML_PAD(ggml_nbytes(tensor), GGML_MEM_ALIGN); } size_t ggml_nbytes_split(const struct ggml_tensor * tensor, int nrows_split) { @@ -19271,6 +19275,10 @@ void gguf_add_tensor(struct gguf_context * ctx, const struct ggml_tensor * tenso ctx->infos[idx].name.n = strlen(tensor->name) + 1; ctx->infos[idx].name.data = strdup(tensor->name); + for (int i = 0; i < GGML_MAX_DIMS; ++i) { + ctx->infos[idx].ne[i] = 1; + } + ctx->infos[idx].n_dims = tensor->n_dims; for (int i = 0; i < tensor->n_dims; i++) { ctx->infos[idx].ne[i] = tensor->ne[i]; @@ -19305,8 +19313,8 @@ void gguf_write_to_file(struct gguf_context * ctx, const char * fname) { // write header gguf_fwrite_el(file, &ctx->header.magic, sizeof(ctx->header.magic)); gguf_fwrite_el(file, &ctx->header.version, sizeof(ctx->header.version)); - gguf_fwrite_el(file, &ctx->header.n_kv, sizeof(ctx->header.n_kv)); gguf_fwrite_el(file, &ctx->header.n_tensors, sizeof(ctx->header.n_tensors)); + gguf_fwrite_el(file, &ctx->header.n_kv, sizeof(ctx->header.n_kv)); // write key-value pairs for (uint32_t i = 0; i < ctx->header.n_kv; ++i) { diff --git a/ggml.h b/ggml.h index d19574ee94726..5984d433da3bd 100644 --- a/ggml.h +++ b/ggml.h @@ -566,6 +566,7 @@ extern "C" { GGML_API int64_t ggml_nelements (const struct ggml_tensor * tensor); GGML_API int64_t ggml_nrows (const struct ggml_tensor * tensor); GGML_API size_t ggml_nbytes (const struct ggml_tensor * tensor); + GGML_API size_t ggml_nbytes_pad (const struct ggml_tensor * tensor); // same as ggml_nbytes() but padded to GGML_MEM_ALIGN GGML_API size_t ggml_nbytes_split(const struct ggml_tensor * tensor, int nrows_split); GGML_API int ggml_blck_size (enum ggml_type type); From 35177d735d0d244dd9fa139991dd1b4d568a26ab Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 15 Aug 2023 16:05:23 +0300 Subject: [PATCH 14/29] gguf : minor --- examples/gguf/gguf.cpp | 28 +++++++++++++--------------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/examples/gguf/gguf.cpp b/examples/gguf/gguf.cpp index fe22ab729dfa5..934e774056463 100644 --- a/examples/gguf/gguf.cpp +++ b/examples/gguf/gguf.cpp @@ -24,21 +24,19 @@ static std::string to_string(const T & val) { bool gguf_ex_write(const std::string & fname) { struct gguf_context * ctx = gguf_init_empty(); - { - gguf_set_val_u8 (ctx, "some.parameter.uint8", 0x12); - gguf_set_val_i8 (ctx, "some.parameter.int8", -0x13); - gguf_set_val_u16 (ctx, "some.parameter.uint16", 0x1234); - gguf_set_val_i16 (ctx, "some.parameter.int16", -0x1235); - gguf_set_val_u32 (ctx, "some.parameter.uint32", 0x12345678); - gguf_set_val_i32 (ctx, "some.parameter.int32", -0x12345679); - gguf_set_val_f32 (ctx, "some.parameter.float32", 0.123456789f); - gguf_set_val_bool(ctx, "some.parameter.bool", true); - gguf_set_val_str (ctx, "some.parameter.string", "hello world"); - - gguf_set_arr_data(ctx, "some.parameter.arr.i16", GGUF_TYPE_INT16, std::vector{ 1, 2, 3, 4, }.data(), 4); - gguf_set_arr_data(ctx, "some.parameter.arr.f32", GGUF_TYPE_FLOAT32, std::vector{ 3.145f, 2.718f, 1.414f, }.data(), 3); - gguf_set_arr_str (ctx, "some.parameter.arr.str", std::vector{ "hello", "world", "!" }.data(), 3); - } + gguf_set_val_u8 (ctx, "some.parameter.uint8", 0x12); + gguf_set_val_i8 (ctx, "some.parameter.int8", -0x13); + gguf_set_val_u16 (ctx, "some.parameter.uint16", 0x1234); + gguf_set_val_i16 (ctx, "some.parameter.int16", -0x1235); + gguf_set_val_u32 (ctx, "some.parameter.uint32", 0x12345678); + gguf_set_val_i32 (ctx, "some.parameter.int32", -0x12345679); + gguf_set_val_f32 (ctx, "some.parameter.float32", 0.123456789f); + gguf_set_val_bool(ctx, "some.parameter.bool", true); + gguf_set_val_str (ctx, "some.parameter.string", "hello world"); + + gguf_set_arr_data(ctx, "some.parameter.arr.i16", GGUF_TYPE_INT16, std::vector{ 1, 2, 3, 4, }.data(), 4); + gguf_set_arr_data(ctx, "some.parameter.arr.f32", GGUF_TYPE_FLOAT32, std::vector{ 3.145f, 2.718f, 1.414f, }.data(), 3); + gguf_set_arr_str (ctx, "some.parameter.arr.str", std::vector{ "hello", "world", "!" }.data(), 3); struct ggml_init_params params = { /*.mem_size =*/ 128ull*1024ull*1024ull, From 4ef5e792e378a2a0ed42969de89d5cb161fb9530 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 15 Aug 2023 16:30:07 +0300 Subject: [PATCH 15/29] llama : replace gguf_file_saver with new gguf write API --- examples/gguf/gguf.cpp | 3 +- ggml.c | 29 ++++--- ggml.h | 8 ++ gguf-llama.cpp | 180 +++++------------------------------------ 4 files changed, 47 insertions(+), 173 deletions(-) diff --git a/examples/gguf/gguf.cpp b/examples/gguf/gguf.cpp index 934e774056463..f67af1416a7c8 100644 --- a/examples/gguf/gguf.cpp +++ b/examples/gguf/gguf.cpp @@ -193,8 +193,7 @@ bool gguf_ex_read_1(const std::string & fname) { struct ggml_tensor * cur = ggml_get_tensor(ctx_data, name); - fprintf(stdout, "%s: tensor[%d]: n_dims = %d, name = %s, data = %p\n", - __func__, i, cur->n_dims, cur->name, cur->data); + fprintf(stdout, "%s: tensor[%d]: n_dims = %d, name = %s, data = %p\n", __func__, i, cur->n_dims, cur->name, cur->data); // check data { diff --git a/ggml.c b/ggml.c index c69a183e85a17..ead9ab526328e 100644 --- a/ggml.c +++ b/ggml.c @@ -16903,7 +16903,7 @@ void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname) { // compute size of intermediate results // TODO: does not take into account scratch buffers !!!! for (int i = 0; i < cgraph->n_nodes; ++i) { - size_eval += ggml_nbytes(cgraph->nodes[i]); + size_eval += ggml_nbytes_pad(cgraph->nodes[i]); } // print @@ -18629,8 +18629,9 @@ struct gguf_tensor_info { uint64_t offset; // offset from start of `data`, must be a multiple of `ALIGNMENT` - // for writing - const struct ggml_tensor * tensor; + // for writing API + const void * data; + size_t size; }; struct gguf_context { @@ -19268,7 +19269,12 @@ void gguf_set_kv(struct gguf_context * ctx, struct gguf_context * src) { } } -void gguf_add_tensor(struct gguf_context * ctx, const struct ggml_tensor * tensor) { +void gguf_add_tensor_ex( + struct gguf_context * ctx, + const struct ggml_tensor * tensor, + enum ggml_type type, + const void * data, + size_t size) { const int idx = ctx->header.n_tensors; ctx->infos = realloc(ctx->infos, (idx + 1)*sizeof(struct gguf_tensor_info)); @@ -19284,17 +19290,22 @@ void gguf_add_tensor(struct gguf_context * ctx, const struct ggml_tensor * tenso ctx->infos[idx].ne[i] = tensor->ne[i]; } - ctx->infos[idx].type = tensor->type; + ctx->infos[idx].type = type; ctx->infos[idx].offset = 0; - ctx->infos[idx].tensor = tensor; + ctx->infos[idx].data = data; + ctx->infos[idx].size = size; if (ctx->header.n_tensors > 0) { - ctx->infos[idx].offset = ctx->infos[idx - 1].offset + GGML_PAD(ggml_nbytes(ctx->infos[idx - 1].tensor), ctx->alignment); + ctx->infos[idx].offset = ctx->infos[idx - 1].offset + GGML_PAD(ctx->infos[idx - 1].size, ctx->alignment); } ctx->header.n_tensors++; } +void gguf_add_tensor(struct gguf_context * ctx, const struct ggml_tensor * tensor) { + gguf_add_tensor_ex(ctx, tensor, tensor->type, tensor->data, ggml_nbytes(tensor)); +} + static void gguf_fwrite_str(FILE * file, const struct gguf_str * val) { fwrite(&val->n, sizeof(val->n), 1, file); fwrite(val->data, sizeof(char), val->n, file); @@ -19396,10 +19407,10 @@ void gguf_write_to_file(struct gguf_context * ctx, const char * fname) { for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) { struct gguf_tensor_info * info = &ctx->infos[i]; - const size_t size = ggml_nbytes(info->tensor); + const size_t size = info->size; const size_t size_pad = GGML_PAD(size, ctx->alignment); - gguf_fwrite_el(file, info->tensor->data, size); + gguf_fwrite_el(file, info->data, size); if (size_pad != size) { uint8_t pad = 0; diff --git a/ggml.h b/ggml.h index 5984d433da3bd..368cb00cb00c3 100644 --- a/ggml.h +++ b/ggml.h @@ -1791,6 +1791,14 @@ extern "C" { GGML_API void gguf_add_tensor(struct gguf_context * ctx, const struct ggml_tensor * tensor); + // same as gguf_add_tensor, but allows to override tensor data + GGML_API void gguf_add_tensor_ex( + struct gguf_context * ctx, + const struct ggml_tensor * tensor, + enum ggml_type type, + const void * data, + size_t size); + GGML_API void gguf_write_to_file(struct gguf_context * ctx, const char * fname); // diff --git a/gguf-llama.cpp b/gguf-llama.cpp index 684b30936c1cc..e738060448e27 100644 --- a/gguf-llama.cpp +++ b/gguf-llama.cpp @@ -695,6 +695,7 @@ struct gguf_file_loader { tensor.name = name; tensor.size = ggml_nbytes(cur); + tensor.ggml_tensor = cur; tensors_map.tensors.push_back(tensor); tensors_map.name_to_idx[name] = tensors_map.tensors.size() - 1; @@ -702,165 +703,6 @@ struct gguf_file_loader { } }; -struct gguf_file_saver { - // TODO - // this implementation now assumes that the data section is of the same length as the unquantized model. - // this is needed to write tensor metadata and weights in a single pass by seeking to appropriate positions in the file. - // this may not be true when we add quantization version and change ftype description (currently it's string according to the specs, - // but better to have it as uint32). - // we need to calculate the delta in number of bytes written with a counter as a struct member. - - gguf_context * ctx; // loaded gguf context (used to re-write the KV section (good enough for now)) - - gguf_file file; - size_t info_offset; - size_t tensor_offset; - - gguf_file_saver(const char * fname, gguf_context * ctx) : ctx(ctx), file(fname, "wb") { - LLAMA_LOG_INFO("%s: saving model to %s\n", __func__, fname); - - write_header(); - write_kv(); - } - - void write_header() { - file.write_i32(GGUF_MAGIC); - file.write_i32(GGUF_VERSION); - file.write_i32(gguf_get_n_tensors(ctx)); - file.write_i32(gguf_get_n_kv (ctx)); - } - - void write_kv_arr_i32(const std::string & key, enum gguf_type type, int i, int n_arr) { - std::vector data(n_arr); - - for (int j = 0; j < n_arr; ++j) { - int32_t val = gguf_get_arr_i32(ctx, i, j); - data[j] = val; - } - - file.write_arr(key, type, data); - } - - void write_kv_arr_f32(const std::string & key, enum gguf_type type, int i, int n_arr) { - std::vector data(n_arr); - - for (int j = 0; j < n_arr; ++j) { - float val = gguf_get_arr_f32(ctx, i, j); - data[j] = val; - } - - file.write_arr(key, type, data); - } - - void write_kv_arr_str(const std::string & key, enum gguf_type type, int i, int n_arr) { - std::vector data(n_arr); - - for (int j = 0; j < n_arr; ++j) { - std::string val = gguf_get_arr_str(ctx, i, j); - data[j] = val; - } - - file.write_arr(key, type, data); - } - - // re-write the key-value section from the loaded file - void write_kv() { - const int32_t n_kv = gguf_get_n_kv(ctx); - for (int i = 0; i < n_kv; ++i) { - const char * key = gguf_get_key(ctx, i); - LLAMA_LOG_INFO("%s: writing key '%s'\n", __func__, key); - - if (strcmp(key, "general.quantization_version") == 0) { - file.write_val("general.quantization_version", GGUF_TYPE_UINT32, GGML_QNT_VERSION); - } else { - const gguf_type vtype = gguf_get_kv_type(ctx, i); - - switch (vtype) { - case GGUF_TYPE_BOOL: file.write_val (key, GGUF_TYPE_BOOL, gguf_get_val_bool(ctx, i)); break; - case GGUF_TYPE_FLOAT32: file.write_val (key, GGUF_TYPE_FLOAT32, gguf_get_val_f32 (ctx, i)); break; - case GGUF_TYPE_INT16: file.write_val (key, GGUF_TYPE_INT16, gguf_get_val_i16 (ctx, i)); break; - case GGUF_TYPE_INT32: file.write_val (key, GGUF_TYPE_INT32, gguf_get_val_i32 (ctx, i)); break; - case GGUF_TYPE_INT8: file.write_val (key, GGUF_TYPE_INT8, gguf_get_val_i8 (ctx, i)); break; - case GGUF_TYPE_STRING: file.write_str (key, GGUF_TYPE_STRING, gguf_get_val_str (ctx, i)); break; - case GGUF_TYPE_UINT16: file.write_val(key, GGUF_TYPE_UINT16, gguf_get_val_u16 (ctx, i)); break; - case GGUF_TYPE_UINT32: file.write_val(key, GGUF_TYPE_UINT32, gguf_get_val_u32 (ctx, i)); break; - case GGUF_TYPE_UINT8: file.write_val (key, GGUF_TYPE_UINT8, gguf_get_val_u8 (ctx, i)); break; - case GGUF_TYPE_ARRAY: - { - const gguf_type arr_type = gguf_get_arr_type(ctx, i); - const int n_arr = gguf_get_arr_n (ctx, i); - - switch (arr_type) { - case GGUF_TYPE_FLOAT32: write_kv_arr_f32(key, arr_type, i, n_arr); break; - case GGUF_TYPE_INT32: write_kv_arr_i32(key, arr_type, i, n_arr); break; - case GGUF_TYPE_STRING: write_kv_arr_str(key, arr_type, i, n_arr); break; - default: - throw std::runtime_error(format("cannot recognize array type for key %s\n", key)); - } - } break; - default: - throw std::runtime_error(format("cannot recognize value type for key %s\n", key)); - } - } - } - - info_offset = file.tell(); - - GGML_ASSERT(gguf_get_data_offset(ctx) >= info_offset); - - const size_t count = gguf_get_data_offset(ctx) - info_offset; - - file.write_zeros(count); - file.seek(info_offset, SEEK_SET); - } - - size_t write_tensor_info(gguf_load_tensor & tensor, enum ggml_type type) { - size_t total_written = 0; - file.seek(info_offset, SEEK_SET); - total_written += file.write_str(tensor.name); - - int32_t n_dims = tensor.ne.size(); - total_written += file.write_i32(n_dims); - for (int32_t i = 0; i < n_dims; ++i) { - total_written += file.write_i32(tensor.ne[i]); - } - - total_written += file.write_i32(type); - total_written += file.write_u64(tensor_offset); - info_offset += total_written; // position to write info of the next tensor - - file.seek(0, SEEK_END); - - return total_written; - } - - void write_tensor(gguf_load_tensor & tensor, enum ggml_type new_type, const void * new_data, size_t new_size) { - switch (new_type) { - case GGML_TYPE_F32: - case GGML_TYPE_F16: - case GGML_TYPE_Q4_0: - case GGML_TYPE_Q4_1: - case GGML_TYPE_Q5_0: - case GGML_TYPE_Q5_1: - case GGML_TYPE_Q8_0: - case GGML_TYPE_Q2_K: - case GGML_TYPE_Q3_K: - case GGML_TYPE_Q4_K: - case GGML_TYPE_Q5_K: - case GGML_TYPE_Q6_K: - break; - default: GGML_ASSERT(false); - } - - write_tensor_info(tensor, new_type); - file.write_raw(new_data, new_size); - size_t padded_size = GGML_PAD(new_size, GGUF_DEFAULT_ALIGNMENT); // TODO: handle custom alignment - size_t pad = padded_size - new_size; - file.write_zeros(pad); - tensor_offset += padded_size; // offset of the next tensor - } -}; - struct llama_model_loader { std::unique_ptr file_loader; gguf_load_tensors_map tensors_map; @@ -897,7 +739,6 @@ struct llama_model_loader { tensor = ggml_new_tensor_1d(ggml_ctx, lt.type, lt.ne.at(0)); } ggml_set_name(tensor, lt.name.c_str()); - GGML_ASSERT(lt.ggml_tensor == NULL); // if this fails, we called get_tensor twice on the same tensor if (backend != GGML_BACKEND_CPU) { ggml_set_no_alloc(ggml_ctx, use_mmap); @@ -3245,7 +3086,12 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s } std::unique_ptr model_loader(new llama_model_loader(fname_inp, /*use_mmap*/ false)); - gguf_file_saver file_saver(fname_out.c_str(), model_loader->file_loader->gguf_ctx); + + struct gguf_context * ctx_out = gguf_init_empty(); + + // copy the KV pairs from the input file + gguf_set_kv(ctx_out, model_loader->file_loader->gguf_ctx); + gguf_set_val_u32(ctx_out, "general.quantization_version", GGML_QNT_VERSION); #ifdef GGML_USE_K_QUANTS int n_attention_wv = 0; @@ -3279,6 +3125,8 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s std::vector read_data; std::vector work; + std::vector> work_map(model_loader->tensors_map.tensors.size()); + for (gguf_load_tensor & tensor : model_loader->tensors_map.tensors) { read_data.resize(tensor.size); tensor.data = read_data.data(); @@ -3437,12 +3285,20 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s } total_size_org += tensor.size; total_size_new += new_size; - file_saver.write_tensor(tensor, new_type, new_data, new_size); + + // TODO: temp fix until we have stream support in gguf + work_map[idx - 1] = std::vector((char *) new_data, (char *) new_data + new_size); + + gguf_add_tensor_ex(ctx_out, tensor.ggml_tensor, new_type, work_map[idx - 1].data(), new_size); } + gguf_write_to_file(ctx_out, fname_out.c_str()); + gguf_free(ctx_out); + LLAMA_LOG_INFO("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0); LLAMA_LOG_INFO("%s: quant size = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0); + // print histogram for all tensors { int64_t sum_all = 0; for (size_t i = 0; i < hist_all.size(); i++) { From f7a6aa99112c843b5d17f0a29d79215aa61155be Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 15 Aug 2023 19:57:37 +0300 Subject: [PATCH 16/29] gguf : streaming support when writing files --- examples/gguf/gguf.cpp | 2 +- ggml.c | 208 +++++++++++++++++++++++++++++++---------- ggml.h | 39 ++++++-- gguf-llama.cpp | 42 +++++++-- 4 files changed, 227 insertions(+), 64 deletions(-) diff --git a/examples/gguf/gguf.cpp b/examples/gguf/gguf.cpp index f67af1416a7c8..74a447c07d900 100644 --- a/examples/gguf/gguf.cpp +++ b/examples/gguf/gguf.cpp @@ -72,7 +72,7 @@ bool gguf_ex_write(const std::string & fname) { gguf_add_tensor(ctx, cur); } - gguf_write_to_file(ctx, fname.c_str()); + gguf_write_to_file(ctx, fname.c_str(), false); fprintf(stdout, "%s: wrote file '%s;\n", __func__, fname.c_str()); diff --git a/ggml.c b/ggml.c index ead9ab526328e..7549566aa5c45 100644 --- a/ggml.c +++ b/ggml.c @@ -19123,6 +19123,22 @@ int gguf_get_n_tensors(struct gguf_context * ctx) { return ctx->header.n_tensors; } +int gguf_find_tensor(struct gguf_context * ctx, const char * name) { + // return -1 if tensor not found + int tensorfound = -1; + + const int n_tensors = gguf_get_n_tensors(ctx); + + for (int i = 0; i < n_tensors; ++i) { + if (strcmp(name, gguf_get_tensor_name(ctx, i)) == 0) { + tensorfound = i; + break; + } + } + + return tensorfound; +} + size_t gguf_get_tensor_offset(struct gguf_context * ctx, int i) { return ctx->infos[i].offset; } @@ -19269,12 +19285,9 @@ void gguf_set_kv(struct gguf_context * ctx, struct gguf_context * src) { } } -void gguf_add_tensor_ex( +void gguf_add_tensor( struct gguf_context * ctx, - const struct ggml_tensor * tensor, - enum ggml_type type, - const void * data, - size_t size) { + const struct ggml_tensor * tensor) { const int idx = ctx->header.n_tensors; ctx->infos = realloc(ctx->infos, (idx + 1)*sizeof(struct gguf_tensor_info)); @@ -19290,10 +19303,10 @@ void gguf_add_tensor_ex( ctx->infos[idx].ne[i] = tensor->ne[i]; } - ctx->infos[idx].type = type; + ctx->infos[idx].type = tensor->type; ctx->infos[idx].offset = 0; - ctx->infos[idx].data = data; - ctx->infos[idx].size = size; + ctx->infos[idx].data = tensor->data; + ctx->infos[idx].size = ggml_nbytes(tensor); if (ctx->header.n_tensors > 0) { ctx->infos[idx].offset = ctx->infos[idx - 1].offset + GGML_PAD(ctx->infos[idx - 1].size, ctx->alignment); @@ -19302,52 +19315,115 @@ void gguf_add_tensor_ex( ctx->header.n_tensors++; } -void gguf_add_tensor(struct gguf_context * ctx, const struct ggml_tensor * tensor) { - gguf_add_tensor_ex(ctx, tensor, tensor->type, tensor->data, ggml_nbytes(tensor)); +void gguf_set_tensor_type(struct gguf_context * ctx, const char * name, enum ggml_type type) { + const int idx = gguf_find_tensor(ctx, name); + if (idx < 0) { + GGML_ASSERT(false && "tensor not found"); + } + + ctx->infos[idx].type = type; } -static void gguf_fwrite_str(FILE * file, const struct gguf_str * val) { - fwrite(&val->n, sizeof(val->n), 1, file); - fwrite(val->data, sizeof(char), val->n, file); +void gguf_set_tensor_data(struct gguf_context * ctx, const char * name, const void * data, size_t size) { + const int idx = gguf_find_tensor(ctx, name); + if (idx < 0) { + GGML_ASSERT(false && "tensor not found"); + } + + ctx->infos[idx].data = data; + ctx->infos[idx].size = size; + + // update offsets + for (uint32_t i = idx + 1; i < ctx->header.n_tensors; ++i) { + ctx->infos[i].offset = ctx->infos[i - 1].offset + GGML_PAD(ctx->infos[i - 1].size, ctx->alignment); + } } -static void gguf_fwrite_el(FILE * file, const void * val, size_t size) { - fwrite(val, sizeof(char), size, file); +//static void gguf_fwrite_str(FILE * file, const struct gguf_str * val) { +// fwrite(&val->n, sizeof(val->n), 1, file); +// fwrite(val->data, sizeof(char), val->n, file); +//} +// +//static void gguf_fwrite_el(FILE * file, const void * val, size_t size) { +// fwrite(val, sizeof(char), size, file); +//} + +struct gguf_buf { + void * data; + size_t size; + size_t offset; +}; + +static struct gguf_buf gguf_buf_init(size_t size) { + struct gguf_buf buf = { + /*buf.data =*/ size == 0 ? NULL : malloc(size), + /*buf.size =*/ size, + /*buf.offset =*/ 0, + }; + + return buf; } -void gguf_write_to_file(struct gguf_context * ctx, const char * fname) { - FILE * file = fopen(fname, "wb"); - if (!file) { - GGML_ASSERT(false && "failed to open file for writing"); +static void gguf_buf_free(struct gguf_buf buf) { + if (buf.data) { + free(buf.data); } +} +static void gguf_buf_grow(struct gguf_buf * buf, size_t size) { + if (buf->offset + size > buf->size) { + buf->size = 1.5*(buf->offset + size); + if (buf->data) { + buf->data = realloc(buf->data, buf->size); + } + } +} + +static void gguf_bwrite_str(struct gguf_buf * buf, const struct gguf_str * val) { + gguf_buf_grow(buf, sizeof(val->n) + val->n); + + buf->data && memcpy((char *) buf->data + buf->offset, &val->n, sizeof(val->n)); + buf->offset += sizeof(val->n); + + buf->data && memcpy((char *) buf->data + buf->offset, val->data, val->n); + buf->offset += val->n; +} + +static void gguf_bwrite_el(struct gguf_buf * buf, const void * val, size_t el_size) { + gguf_buf_grow(buf, el_size); + + buf->data && memcpy((char *) buf->data + buf->offset, val, el_size); + buf->offset += el_size; +} + +static void gguf_write_to_buf(struct gguf_context * ctx, struct gguf_buf * buf, bool only_meta) { // write header - gguf_fwrite_el(file, &ctx->header.magic, sizeof(ctx->header.magic)); - gguf_fwrite_el(file, &ctx->header.version, sizeof(ctx->header.version)); - gguf_fwrite_el(file, &ctx->header.n_tensors, sizeof(ctx->header.n_tensors)); - gguf_fwrite_el(file, &ctx->header.n_kv, sizeof(ctx->header.n_kv)); + gguf_bwrite_el(buf, &ctx->header.magic, sizeof(ctx->header.magic)); + gguf_bwrite_el(buf, &ctx->header.version, sizeof(ctx->header.version)); + gguf_bwrite_el(buf, &ctx->header.n_tensors, sizeof(ctx->header.n_tensors)); + gguf_bwrite_el(buf, &ctx->header.n_kv, sizeof(ctx->header.n_kv)); // write key-value pairs for (uint32_t i = 0; i < ctx->header.n_kv; ++i) { struct gguf_kv * kv = &ctx->kv[i]; - gguf_fwrite_str(file, &kv->key); - gguf_fwrite_el (file, &kv->type, sizeof(kv->type)); + gguf_bwrite_str(buf, &kv->key); + gguf_bwrite_el (buf, &kv->type, sizeof(kv->type)); switch (kv->type) { - case GGUF_TYPE_UINT8: gguf_fwrite_el (file, &kv->value.uint8, sizeof(kv->value.uint8) ); break; - case GGUF_TYPE_INT8: gguf_fwrite_el (file, &kv->value.int8, sizeof(kv->value.int8) ); break; - case GGUF_TYPE_UINT16: gguf_fwrite_el (file, &kv->value.uint16, sizeof(kv->value.uint16) ); break; - case GGUF_TYPE_INT16: gguf_fwrite_el (file, &kv->value.int16, sizeof(kv->value.int16) ); break; - case GGUF_TYPE_UINT32: gguf_fwrite_el (file, &kv->value.uint32, sizeof(kv->value.uint32) ); break; - case GGUF_TYPE_INT32: gguf_fwrite_el (file, &kv->value.int32, sizeof(kv->value.int32) ); break; - case GGUF_TYPE_FLOAT32: gguf_fwrite_el (file, &kv->value.float32, sizeof(kv->value.float32)); break; - case GGUF_TYPE_BOOL: gguf_fwrite_el (file, &kv->value.bool_, sizeof(kv->value.bool_) ); break; - case GGUF_TYPE_STRING: gguf_fwrite_str(file, &kv->value.str ); break; + case GGUF_TYPE_UINT8: gguf_bwrite_el( buf, &kv->value.uint8, sizeof(kv->value.uint8) ); break; + case GGUF_TYPE_INT8: gguf_bwrite_el (buf, &kv->value.int8, sizeof(kv->value.int8) ); break; + case GGUF_TYPE_UINT16: gguf_bwrite_el (buf, &kv->value.uint16, sizeof(kv->value.uint16) ); break; + case GGUF_TYPE_INT16: gguf_bwrite_el (buf, &kv->value.int16, sizeof(kv->value.int16) ); break; + case GGUF_TYPE_UINT32: gguf_bwrite_el (buf, &kv->value.uint32, sizeof(kv->value.uint32) ); break; + case GGUF_TYPE_INT32: gguf_bwrite_el (buf, &kv->value.int32, sizeof(kv->value.int32) ); break; + case GGUF_TYPE_FLOAT32: gguf_bwrite_el (buf, &kv->value.float32, sizeof(kv->value.float32)); break; + case GGUF_TYPE_BOOL: gguf_bwrite_el (buf, &kv->value.bool_, sizeof(kv->value.bool_) ); break; + case GGUF_TYPE_STRING: gguf_bwrite_str(buf, &kv->value.str ); break; case GGUF_TYPE_ARRAY: { - gguf_fwrite_el(file, &kv->value.arr.type, sizeof(kv->value.arr.type)); - gguf_fwrite_el(file, &kv->value.arr.n, sizeof(kv->value.arr.n) ); + gguf_bwrite_el(buf, &kv->value.arr.type, sizeof(kv->value.arr.type)); + gguf_bwrite_el(buf, &kv->value.arr.n, sizeof(kv->value.arr.n) ); switch (kv->value.arr.type) { case GGUF_TYPE_UINT8: @@ -19359,12 +19435,12 @@ void gguf_write_to_file(struct gguf_context * ctx, const char * fname) { case GGUF_TYPE_FLOAT32: case GGUF_TYPE_BOOL: { - gguf_fwrite_el(file, kv->value.arr.data, kv->value.arr.n * GGUF_TYPE_SIZE[kv->value.arr.type]); + gguf_bwrite_el(buf, kv->value.arr.data, kv->value.arr.n * GGUF_TYPE_SIZE[kv->value.arr.type]); } break; case GGUF_TYPE_STRING: { for (uint32_t j = 0; j < kv->value.arr.n; ++j) { - gguf_fwrite_str(file, &((struct gguf_str *) kv->value.arr.data)[j]); + gguf_bwrite_str(buf, &((struct gguf_str *) kv->value.arr.data)[j]); } } break; case GGUF_TYPE_ARRAY: @@ -19379,28 +19455,32 @@ void gguf_write_to_file(struct gguf_context * ctx, const char * fname) { for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) { struct gguf_tensor_info * info = &ctx->infos[i]; - gguf_fwrite_str(file, &info->name); - gguf_fwrite_el (file, &info->n_dims, sizeof(info->n_dims)); + gguf_bwrite_str(buf, &info->name); + gguf_bwrite_el (buf, &info->n_dims, sizeof(info->n_dims)); for (uint32_t j = 0; j < info->n_dims; ++j) { - gguf_fwrite_el(file, &info->ne[j], sizeof(info->ne[j])); + gguf_bwrite_el(buf, &info->ne[j], sizeof(info->ne[j])); } - gguf_fwrite_el (file, &info->type, sizeof(info->type)); - gguf_fwrite_el (file, &info->offset, sizeof(info->offset)); + gguf_bwrite_el(buf, &info->type, sizeof(info->type)); + gguf_bwrite_el(buf, &info->offset, sizeof(info->offset)); } // we require the data section to be aligned, so take into account any padding { - const size_t offset = ftell(file); + const size_t offset = buf->offset; const size_t offset_pad = GGML_PAD(offset, ctx->alignment); if (offset_pad != offset) { uint8_t pad = 0; for (size_t i = 0; i < offset_pad - offset; ++i) { - gguf_fwrite_el(file, &pad, sizeof(pad)); + gguf_bwrite_el(buf, &pad, sizeof(pad)); } } } + if (only_meta) { + return; + } + size_t offset = 0; // write tensor data @@ -19410,12 +19490,12 @@ void gguf_write_to_file(struct gguf_context * ctx, const char * fname) { const size_t size = info->size; const size_t size_pad = GGML_PAD(size, ctx->alignment); - gguf_fwrite_el(file, info->data, size); + gguf_bwrite_el(buf, info->data, size); if (size_pad != size) { uint8_t pad = 0; for (size_t j = 0; j < size_pad - size; ++j) { - gguf_fwrite_el(file, &pad, sizeof(pad)); + gguf_bwrite_el(buf, &pad, sizeof(pad)); } } @@ -19423,10 +19503,44 @@ void gguf_write_to_file(struct gguf_context * ctx, const char * fname) { offset += size_pad; } +} + +void gguf_write_to_file(struct gguf_context * ctx, const char * fname, bool only_meta) { + FILE * file = fopen(fname, "wb"); + if (!file) { + GGML_ASSERT(false && "failed to open file for writing"); + } + + struct gguf_buf buf = gguf_buf_init(16*1024); + + gguf_write_to_buf(ctx, &buf, only_meta); + + fwrite(buf.data, 1, buf.offset, file); + + gguf_buf_free(buf); fclose(file); } +size_t gguf_get_meta_size(struct gguf_context * ctx) { + // no allocs - only compute size + struct gguf_buf buf = gguf_buf_init(0); + + gguf_write_to_buf(ctx, &buf, true); + + return buf.offset; +} + +void gguf_get_meta_data(struct gguf_context * ctx, void * data) { + struct gguf_buf buf = gguf_buf_init(16*1024); + + gguf_write_to_buf(ctx, &buf, true); + + memcpy(data, buf.data, buf.offset); + + gguf_buf_free(buf); +} + //////////////////////////////////////////////////////////////////////////////// int ggml_cpu_has_avx(void) { diff --git a/ggml.h b/ggml.h index 368cb00cb00c3..3eb6acb1016dd 100644 --- a/ggml.h +++ b/ggml.h @@ -1712,7 +1712,6 @@ extern "C" { // gguf // - // TODO: can be removed if the API is extended for writing enum gguf_type { GGUF_TYPE_UINT8 = 0, GGUF_TYPE_INT8 = 1, @@ -1739,7 +1738,8 @@ extern "C" { GGML_API struct gguf_context * gguf_init_empty(void); GGML_API struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_params params); //GGML_API struct gguf_context * gguf_init_from_buffer(..); - GGML_API void gguf_free(struct gguf_context * ctx); + + GGML_API void gguf_free(struct gguf_context * ctx); GGML_API int gguf_get_version (struct gguf_context * ctx); GGML_API size_t gguf_get_alignment (struct gguf_context * ctx); @@ -1770,6 +1770,7 @@ extern "C" { GGML_API const char * gguf_get_arr_str (struct gguf_context * ctx, int key_id, int i); GGML_API int gguf_get_n_tensors (struct gguf_context * ctx); + GGML_API int gguf_find_tensor (struct gguf_context * ctx, const char * name); GGML_API size_t gguf_get_tensor_offset(struct gguf_context * ctx, int i); GGML_API char * gguf_get_tensor_name (struct gguf_context * ctx, int i); @@ -1789,17 +1790,35 @@ extern "C" { // set or add KV pairs from another context GGML_API void gguf_set_kv(struct gguf_context * ctx, struct gguf_context * src); + // manage tensor info GGML_API void gguf_add_tensor(struct gguf_context * ctx, const struct ggml_tensor * tensor); + GGML_API void gguf_set_tensor_type(struct gguf_context * ctx, const char * name, enum ggml_type type); + GGML_API void gguf_set_tensor_data(struct gguf_context * ctx, const char * name, const void * data, size_t size); + + // writing gguf files can be done in 2 ways: + // + // - write the entire gguf_context to a binary file in a single pass: + // + // gguf_write_to_file(ctx, fname); + // + // - first prepare a file with a placeholder for the meta data, write the tensor data, then write the meta data: + // + // FILE * f = fopen(fname, "wb"); + // fseek(f, gguf_get_meta_size(ctx), SEEK_SET); + // fwrite(f, ...); + // void * data = gguf_meta_get_meta_data(ctx); + // fseek(f, 0, SEEK_SET); + // fwrite(f, data, gguf_get_meta_size(ctx)); + // free(data); + // fclose(f); + // - // same as gguf_add_tensor, but allows to override tensor data - GGML_API void gguf_add_tensor_ex( - struct gguf_context * ctx, - const struct ggml_tensor * tensor, - enum ggml_type type, - const void * data, - size_t size); + // write the entire context to a binary file + GGML_API void gguf_write_to_file(struct gguf_context * ctx, const char * fname, bool only_meta); - GGML_API void gguf_write_to_file(struct gguf_context * ctx, const char * fname); + // get the size in bytes of the meta data (header, kv pairs, tensor info) including padding + GGML_API size_t gguf_get_meta_size(struct gguf_context * ctx); + GGML_API void gguf_get_meta_data(struct gguf_context * ctx, void * data); // // system info diff --git a/gguf-llama.cpp b/gguf-llama.cpp index e738060448e27..bc95591307eb5 100644 --- a/gguf-llama.cpp +++ b/gguf-llama.cpp @@ -83,6 +83,13 @@ static std::string to_string(const T & val) { return ss.str(); } +static void zeros(std::ofstream & file, size_t n) { + char zero = 0; + for (size_t i = 0; i < n; ++i) { + file.write(&zero, 1); + } +} + #if !defined(GGML_USE_CUBLAS) && !defined(GGML_USE_METAL) #include "ggml-alloc.h" #define LLAMA_USE_ALLOCATOR @@ -3049,7 +3056,6 @@ static void llama_convert_tensor_internal(const gguf_load_tensor & tensor, std:: for (auto & worker : workers) { worker.join(); } - } static void llama_model_quantize_internal(const std::string & fname_inp, const std::string & fname_out, const llama_model_quantize_params * params) { @@ -3087,6 +3093,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s std::unique_ptr model_loader(new llama_model_loader(fname_inp, /*use_mmap*/ false)); + const size_t align = GGUF_DEFAULT_ALIGNMENT; struct gguf_context * ctx_out = gguf_init_empty(); // copy the KV pairs from the input file @@ -3125,7 +3132,18 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s std::vector read_data; std::vector work; - std::vector> work_map(model_loader->tensors_map.tensors.size()); + for (gguf_load_tensor & tensor : model_loader->tensors_map.tensors) { + gguf_add_tensor(ctx_out, tensor.ggml_tensor); + } + + std::ofstream fout(fname_out, std::ios::binary); + + const size_t meta_size = gguf_get_meta_size(ctx_out); + + LLAMA_LOG_INFO("%s: meta size = %zu bytes\n", __func__, meta_size); + + // placeholder for the meta data + ::zeros(fout, meta_size); for (gguf_load_tensor & tensor : model_loader->tensors_map.tensors) { read_data.resize(tensor.size); @@ -3286,13 +3304,25 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s total_size_org += tensor.size; total_size_new += new_size; - // TODO: temp fix until we have stream support in gguf - work_map[idx - 1] = std::vector((char *) new_data, (char *) new_data + new_size); + // update the gguf meta data as we go + gguf_set_tensor_type(ctx_out, tensor.name.c_str(), new_type); + gguf_set_tensor_data(ctx_out, tensor.name.c_str(), new_data, new_size); - gguf_add_tensor_ex(ctx_out, tensor.ggml_tensor, new_type, work_map[idx - 1].data(), new_size); + // write tensor data + padding + fout.write((const char *) new_data, new_size); + zeros(fout, GGML_PAD(new_size, align) - new_size); } - gguf_write_to_file(ctx_out, fname_out.c_str()); + // go back to beginning of file and write the updated meta data + { + fout.seekp(0); + std::vector data(gguf_get_meta_size(ctx_out)); + gguf_get_meta_data(ctx_out, data.data()); + fout.write((const char *) data.data(), data.size()); + } + + fout.close(); + gguf_free(ctx_out); LLAMA_LOG_INFO("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0); From 1751bd46936f395a75236cceb78639ec32606dcb Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 15 Aug 2023 20:41:53 +0300 Subject: [PATCH 17/29] gguf : remove oboslete write methods --- gguf-util.h | 76 ----------------------------------------------------- 1 file changed, 76 deletions(-) diff --git a/gguf-util.h b/gguf-util.h index c22a14e9c3fc8..15fbc603696d8 100644 --- a/gguf-util.h +++ b/gguf-util.h @@ -99,82 +99,6 @@ struct gguf_file { GGML_ASSERT(ret == 0); // same } - size_t write_str(const std::string & val) { - size_t total_written = 0; - const int32_t n = val.size(); - fwrite((const char *) &n, sizeof(n), 1, fp); - total_written += sizeof(n); - fwrite(val.c_str(), n, 1, fp); - total_written += n; - - return total_written; - } - - size_t write_i32(int32_t val) { - fwrite((const char *) &val, sizeof(val), 1, fp); - return sizeof(val); - } - - size_t write_u64(size_t val) { - fwrite((const char *) &val, sizeof(val), 1, fp); - return sizeof(val); - } - - template - void write_val(const std::string & key, enum gguf_type type, const T & val) { - static_assert(std::is_fundamental::value, "T must be a primitive type"); - write_str(key); - fwrite((const char *) &type, sizeof(type), 1, fp); - fwrite((const char *) &val, sizeof(val), 1, fp); - } - - template - void write_arr(const std::string & key, enum gguf_type type, const std::vector & val) { - static_assert(std::is_fundamental::value, "T must be a primitive type"); - write_str(key); - { - const enum gguf_type tarr = GGUF_TYPE_ARRAY; - fwrite((const char *) &tarr, sizeof(tarr), 1, fp); - } - - const int32_t n = val.size(); - fwrite((const char *) &type, sizeof(type), 1, fp); - fwrite((const char *) &n, sizeof(n), 1, fp); - fwrite(val.data(), sizeof(T), n, fp); - } - - void write_str(const std::string & key, enum gguf_type type, const std::string & val) { - write_str(key); - fwrite((const char *) &type, sizeof(type), 1, fp); - - const int32_t n = val.size(); - fwrite((const char *) &n, sizeof(n), 1, fp); - fwrite(val.c_str(), n, 1, fp); - } - - void write_arr(const std::string & key, enum gguf_type type, const std::vector & val) { - write_str(key); - { - const enum gguf_type tarr = GGUF_TYPE_ARRAY; - fwrite((const char *) &tarr, sizeof(tarr), 1, fp); - } - - const int32_t n = val.size(); - fwrite((const char *) &type, sizeof(type), 1, fp); - fwrite((const char *) &n, sizeof(n), 1, fp); - for (int i = 0; i < n; ++i) { - const int32_t nstr = val[i].size(); - fwrite((const char *) &nstr, sizeof(nstr), 1, fp); - fwrite(val[i].c_str(), nstr, 1, fp); - } - } - - void write_zeros(size_t count) { - for (size_t i = 0; i < count; ++i) { - fputc(0, fp); - } - } - void read_raw(void * ptr, size_t len) const { if (len == 0) { return; From 2906d5492d1ee01187de770797a019a5d3e22a0b Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 15 Aug 2023 20:46:18 +0300 Subject: [PATCH 18/29] gguf : remove obosolete gguf_get_arr_xxx API --- ggml.c | 4 ++++ ggml.h | 5 +---- gguf-llama.cpp | 5 ++++- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/ggml.c b/ggml.c index 7549566aa5c45..7c90f44ecf5e8 100644 --- a/ggml.c +++ b/ggml.c @@ -19073,6 +19073,10 @@ float gguf_get_arr_f32(struct gguf_context * ctx, int key_id, int i) { return ((float *) ctx->kv[key_id].value.arr.data)[i]; } +const void * gguf_get_arr_data(struct gguf_context * ctx, int i) { + return ctx->kv[i].value.arr.data; +} + const char * gguf_get_arr_str(struct gguf_context * ctx, int key_id, int i) { struct gguf_kv * kv = &ctx->kv[key_id]; struct gguf_str * str = &((struct gguf_str *) kv->value.arr.data)[i]; diff --git a/ggml.h b/ggml.h index 3eb6acb1016dd..4dc3ff977cfa0 100644 --- a/ggml.h +++ b/ggml.h @@ -1753,9 +1753,6 @@ extern "C" { GGML_API enum gguf_type gguf_get_kv_type (struct gguf_context * ctx, int i); GGML_API enum gguf_type gguf_get_arr_type(struct gguf_context * ctx, int i); - GGML_API int32_t gguf_get_arr_i32(struct gguf_context * ctx, int key_id, int i); // TODO: remove - GGML_API float gguf_get_arr_f32(struct gguf_context * ctx, int key_id, int i); // TODO: remove - GGML_API uint8_t gguf_get_val_u8 (struct gguf_context * ctx, int i); GGML_API int8_t gguf_get_val_i8 (struct gguf_context * ctx, int i); GGML_API uint16_t gguf_get_val_u16 (struct gguf_context * ctx, int i); @@ -1766,7 +1763,7 @@ extern "C" { GGML_API bool gguf_get_val_bool(struct gguf_context * ctx, int i); GGML_API const char * gguf_get_val_str (struct gguf_context * ctx, int i); GGML_API int gguf_get_arr_n (struct gguf_context * ctx, int i); - GGML_API void gguf_get_arr_data(struct gguf_context * ctx, int i, void * data); + GGML_API const void * gguf_get_arr_data(struct gguf_context * ctx, int i); GGML_API const char * gguf_get_arr_str (struct gguf_context * ctx, int key_id, int i); GGML_API int gguf_get_n_tensors (struct gguf_context * ctx); diff --git a/gguf-llama.cpp b/gguf-llama.cpp index bc95591307eb5..9bf6cc9a95fa5 100644 --- a/gguf-llama.cpp +++ b/gguf-llama.cpp @@ -528,6 +528,7 @@ struct llama_state { llama_log_callback log_callback = llama_log_callback_default; void * log_callback_user_data = nullptr; }; + // global state static llama_state g_state; @@ -647,6 +648,8 @@ struct gguf_file_loader { throw std::runtime_error("cannot find token scores list in GGUF file\n"); } + const float * scores = (const float * ) gguf_get_arr_data(gguf_ctx, score_idx); + for (uint32_t i = 0; i < hparams.n_vocab; i++) { std::string word = gguf_get_arr_str(gguf_ctx, token_idx, i); @@ -654,7 +657,7 @@ struct gguf_file_loader { auto & tok_score = vocab.id_to_token[i]; tok_score.tok = std::move(word); - tok_score.score = gguf_get_arr_f32(gguf_ctx, score_idx, i); + tok_score.score = scores[i]; } } From 6c3f824697530deb5fdf4399f7aadcd2f9139283 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 15 Aug 2023 20:53:53 +0300 Subject: [PATCH 19/29] llama : simplify gguf_file_loader --- gguf-llama.cpp | 41 ++++++++++++++--------------------------- 1 file changed, 14 insertions(+), 27 deletions(-) diff --git a/gguf-llama.cpp b/gguf-llama.cpp index 9bf6cc9a95fa5..895fc98bd9984 100644 --- a/gguf-llama.cpp +++ b/gguf-llama.cpp @@ -590,24 +590,6 @@ struct gguf_file_loader { read_tensor_metadata(tensors_map); } - uint32_t read_u32(const char * key) const { - int i = gguf_find_key(gguf_ctx, key); - if (i == -1) { - throw std::runtime_error(format("cannot find param with key %s\n", key)); - } - - return gguf_get_val_u32(gguf_ctx, i); - } - - float read_f32(const char * key) const { - int i = gguf_find_key(gguf_ctx, key); - if (i == -1) { - throw std::runtime_error(format("cannot find param with key %s\n", key)); - } - - return gguf_get_val_f32(gguf_ctx, i); - } - int read_n_vocab() const { int i = gguf_find_key(gguf_ctx, "tokenizer.ggml.tokens"); if (i == -1) { @@ -622,17 +604,22 @@ struct gguf_file_loader { // TODO: read all hparams from file hparams.n_vocab = read_n_vocab(); - hparams.n_ctx = read_u32("llama.context_length"); - hparams.n_embd = read_u32("llama.embedding_length"); - hparams.n_ff = read_u32("llama.feed_forward_length"); - hparams.n_head = read_u32("llama.attention.head_count"); - hparams.n_layer = read_u32("llama.block_count"); - hparams.n_rot = read_u32("llama.rope.dimension_count"); - hparams.f_rms_norm_eps = read_f32("llama.attention.layer_norm_rms_epsilon"); + hparams.n_ctx = gguf_get_val_u32(gguf_ctx, gguf_find_key(gguf_ctx, "llama.context_length")); + hparams.n_embd = gguf_get_val_u32(gguf_ctx, gguf_find_key(gguf_ctx, "llama.embedding_length")); + hparams.n_ff = gguf_get_val_u32(gguf_ctx, gguf_find_key(gguf_ctx, "llama.feed_forward_length")); + hparams.n_head = gguf_get_val_u32(gguf_ctx, gguf_find_key(gguf_ctx, "llama.attention.head_count")); + hparams.n_layer = gguf_get_val_u32(gguf_ctx, gguf_find_key(gguf_ctx, "llama.block_count")); + hparams.n_rot = gguf_get_val_u32(gguf_ctx, gguf_find_key(gguf_ctx, "llama.rope.dimension_count")); + hparams.f_rms_norm_eps = gguf_get_val_f32(gguf_ctx, gguf_find_key(gguf_ctx, "llama.rms_norm_epsilon")); // n_head_kv default to n_head - hparams.n_head_kv = gguf_find_key(gguf_ctx, "llama.attention.head_count_kv") == -1 ? hparams.n_head : read_u32("llama.attention.head_count_kv"); - + hparams.n_head_kv = hparams.n_head; + { + const int idx = gguf_find_key(gguf_ctx, "llama.attention.head_count_kv"); + if (idx >= 0) { + hparams.n_head_kv = gguf_get_val_u32(gguf_ctx, idx); + } + } } void read_vocab() { From a02b809a2ec183228a411d282a5f5d1cd0b1abd0 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 15 Aug 2023 21:09:27 +0300 Subject: [PATCH 20/29] llama : move hparams and vocab from gguf_file_loader to llama_model_loader --- gguf-llama.cpp | 151 ++++++++++++++++++++++--------------------------- gguf-llama.h | 21 ++++--- 2 files changed, 78 insertions(+), 94 deletions(-) diff --git a/gguf-llama.cpp b/gguf-llama.cpp index 895fc98bd9984..c62840e887a46 100644 --- a/gguf-llama.cpp +++ b/gguf-llama.cpp @@ -367,6 +367,7 @@ struct llama_model { e_model type = MODEL_UNKNOWN; llama_hparams hparams; + llama_vocab vocab; struct ggml_tensor * tok_embeddings; @@ -395,8 +396,6 @@ struct llama_model { int64_t t_load_us = 0; int64_t t_start_us = 0; - llama_vocab vocab; - ~llama_model() { if (ctx) { ggml_free(ctx); @@ -567,10 +566,8 @@ enum gguf_file_version { struct gguf_file_loader { gguf_file file; - gguf_context * gguf_ctx; + gguf_context * ctx_gguf; gguf_file_version file_version; - llama_hparams hparams; - llama_vocab vocab; struct ggml_context * ctx_data = NULL; @@ -582,78 +579,18 @@ struct gguf_file_loader { /*.ctx = */ &ctx_data, }; - gguf_ctx = gguf_init_from_file(fname, params); - file_version = (enum gguf_file_version) gguf_get_version(gguf_ctx); + ctx_gguf = gguf_init_from_file(fname, params); + file_version = (enum gguf_file_version) gguf_get_version(ctx_gguf); - read_hparams(); - read_vocab(); read_tensor_metadata(tensors_map); } - int read_n_vocab() const { - int i = gguf_find_key(gguf_ctx, "tokenizer.ggml.tokens"); - if (i == -1) { - throw std::runtime_error("cannot find token list in GGUF file\n"); - } - - return gguf_get_arr_n(gguf_ctx, i); - } - - void read_hparams() { - // TODO define keys as constants in header - // TODO: read all hparams from file - - hparams.n_vocab = read_n_vocab(); - hparams.n_ctx = gguf_get_val_u32(gguf_ctx, gguf_find_key(gguf_ctx, "llama.context_length")); - hparams.n_embd = gguf_get_val_u32(gguf_ctx, gguf_find_key(gguf_ctx, "llama.embedding_length")); - hparams.n_ff = gguf_get_val_u32(gguf_ctx, gguf_find_key(gguf_ctx, "llama.feed_forward_length")); - hparams.n_head = gguf_get_val_u32(gguf_ctx, gguf_find_key(gguf_ctx, "llama.attention.head_count")); - hparams.n_layer = gguf_get_val_u32(gguf_ctx, gguf_find_key(gguf_ctx, "llama.block_count")); - hparams.n_rot = gguf_get_val_u32(gguf_ctx, gguf_find_key(gguf_ctx, "llama.rope.dimension_count")); - hparams.f_rms_norm_eps = gguf_get_val_f32(gguf_ctx, gguf_find_key(gguf_ctx, "llama.rms_norm_epsilon")); - - // n_head_kv default to n_head - hparams.n_head_kv = hparams.n_head; - { - const int idx = gguf_find_key(gguf_ctx, "llama.attention.head_count_kv"); - if (idx >= 0) { - hparams.n_head_kv = gguf_get_val_u32(gguf_ctx, idx); - } - } - } - - void read_vocab() { - vocab.id_to_token.resize(hparams.n_vocab); - - const int token_idx = gguf_find_key(gguf_ctx, "tokenizer.ggml.tokens"); - if (token_idx == -1) { - throw std::runtime_error("cannot find token list in GGUF file\n"); - } - - const int score_idx = gguf_find_key(gguf_ctx, "tokenizer.ggml.scores"); - if (score_idx == -1) { - throw std::runtime_error("cannot find token scores list in GGUF file\n"); - } - - const float * scores = (const float * ) gguf_get_arr_data(gguf_ctx, score_idx); - - for (uint32_t i = 0; i < hparams.n_vocab; i++) { - std::string word = gguf_get_arr_str(gguf_ctx, token_idx, i); - - vocab.token_to_id[word] = i; - - auto & tok_score = vocab.id_to_token[i]; - tok_score.tok = std::move(word); - tok_score.score = scores[i]; - } - } - void read_tensor_metadata(gguf_load_tensors_map & tensors_map) const { - const int n_tensors = gguf_get_n_tensors(gguf_ctx); + const int n_tensors = gguf_get_n_tensors(ctx_gguf); for (int i = 0; i < n_tensors; ++i) { gguf_load_tensor tensor; - const char * name = gguf_get_tensor_name(gguf_ctx, i); + const char * name = gguf_get_tensor_name(ctx_gguf, i); struct ggml_tensor * cur = ggml_get_tensor(ctx_data, name); @@ -688,7 +625,7 @@ struct gguf_file_loader { } } - tensor.file_off = gguf_get_data_offset(gguf_ctx) + gguf_get_tensor_offset(gguf_ctx, i); + tensor.file_off = gguf_get_data_offset(ctx_gguf) + gguf_get_tensor_offset(ctx_gguf, i); tensor.name = name; tensor.size = ggml_nbytes(cur); @@ -929,15 +866,15 @@ struct llama_model_quantize_params llama_model_quantize_default_params() { return result; } -int llama_max_devices() { +int llama_max_devices(void) { return LLAMA_MAX_DEVICES; } -bool llama_mmap_supported() { +bool llama_mmap_supported(void) { return gguf_mmap::SUPPORTED; } -bool llama_mlock_supported() { +bool llama_mlock_supported(void) { return gguf_mlock::SUPPORTED; } @@ -960,13 +897,13 @@ void llama_backend_init(bool numa) { #endif } -void llama_backend_free() { +void llama_backend_free(void) { #ifdef GGML_USE_MPI ggml_mpi_backend_free(); #endif } -int64_t llama_time_us() { +int64_t llama_time_us(void) { return ggml_time_us(); } @@ -1044,14 +981,33 @@ static void llama_model_load_internal( std::unique_ptr ml(new llama_model_loader(fname, use_mmap)); - vocab = std::move(ml->file_loader->vocab); - model.hparams = ml->file_loader->hparams; model.n_gpu_layers = n_gpu_layers; gguf_file_version file_version = ml->file_loader->file_version; auto & hparams = model.hparams; + // read hparams { + struct gguf_context * ctx = ml->file_loader->ctx_gguf; + + hparams.n_vocab = gguf_get_arr_n (ctx, gguf_find_key(ctx, "tokenizer.ggml.tokens")); + hparams.n_ctx = gguf_get_val_u32(ctx, gguf_find_key(ctx, "llama.context_length")); + hparams.n_embd = gguf_get_val_u32(ctx, gguf_find_key(ctx, "llama.embedding_length")); + hparams.n_ff = gguf_get_val_u32(ctx, gguf_find_key(ctx, "llama.feed_forward_length")); + hparams.n_head = gguf_get_val_u32(ctx, gguf_find_key(ctx, "llama.attention.head_count")); + hparams.n_layer = gguf_get_val_u32(ctx, gguf_find_key(ctx, "llama.block_count")); + hparams.n_rot = gguf_get_val_u32(ctx, gguf_find_key(ctx, "llama.rope.dimension_count")); + hparams.f_rms_norm_eps = gguf_get_val_f32(ctx, gguf_find_key(ctx, "llama.rms_norm_epsilon")); + + // n_head_kv default to n_head + hparams.n_head_kv = hparams.n_head; + { + const int idx = gguf_find_key(ctx, "llama.attention.head_count_kv"); + if (idx >= 0) { + hparams.n_head_kv = gguf_get_val_u32(ctx, idx); + } + } + switch (hparams.n_layer) { case 26: model.type = e_model::MODEL_3B; break; case 32: model.type = e_model::MODEL_7B; break; @@ -1083,7 +1039,34 @@ static void llama_model_load_internal( hparams.rope_freq_scale = rope_freq_scale; } - const uint32_t n_ff = hparams.n_ff; + // read vocab + { + struct gguf_context * ctx = ml->file_loader->ctx_gguf; + + vocab.id_to_token.resize(hparams.n_vocab); + + const int token_idx = gguf_find_key(ctx, "tokenizer.ggml.tokens"); + if (token_idx == -1) { + throw std::runtime_error("cannot find token list in GGUF file\n"); + } + + const int score_idx = gguf_find_key(ctx, "tokenizer.ggml.scores"); + if (score_idx == -1) { + throw std::runtime_error("cannot find token scores list in GGUF file\n"); + } + + const float * scores = (const float * ) gguf_get_arr_data(ctx, score_idx); + + for (uint32_t i = 0; i < hparams.n_vocab; i++) { + std::string word = gguf_get_arr_str(ctx, token_idx, i); + + vocab.token_to_id[word] = i; + + auto & tok_score = vocab.id_to_token[i]; + tok_score.tok = std::move(word); + tok_score.score = scores[i]; + } + } { LLAMA_LOG_INFO("%s: format = %s\n", __func__, gguf_file_version_name(file_version)); @@ -1096,7 +1079,7 @@ static void llama_model_load_internal( LLAMA_LOG_INFO("%s: n_rot = %u\n", __func__, hparams.n_rot); // a.k.a. n_embd_head, n_head_dim LLAMA_LOG_INFO("%s: n_gqa = %u\n", __func__, hparams.n_gqa()); LLAMA_LOG_INFO("%s: rnorm_eps = %.1e\n", __func__, hparams.f_rms_norm_eps); - LLAMA_LOG_INFO("%s: n_ff = %u\n", __func__, n_ff); + LLAMA_LOG_INFO("%s: n_ff = %u\n", __func__, hparams.n_ff); LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, hparams.rope_freq_base); LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, hparams.rope_freq_scale); LLAMA_LOG_INFO("%s: ftype = %u (%s)\n", __func__, hparams.ftype, llama_ftype_name(hparams.ftype)); @@ -1193,6 +1176,8 @@ static void llama_model_load_internal( } } + const uint32_t n_ff = hparams.n_ff; + const int i_gpu_start = n_layer - n_gpu_layers; model.layers.resize(n_layer); @@ -3087,7 +3072,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s struct gguf_context * ctx_out = gguf_init_empty(); // copy the KV pairs from the input file - gguf_set_kv(ctx_out, model_loader->file_loader->gguf_ctx); + gguf_set_kv(ctx_out, model_loader->file_loader->ctx_gguf); gguf_set_val_u32(ctx_out, "general.quantization_version", GGML_QNT_VERSION); #ifdef GGML_USE_K_QUANTS @@ -4460,15 +4445,15 @@ std::string llama_token_to_str_bpe(const struct llama_context * ctx, llama_token return std::string(result.data(), result.size()); } -llama_token llama_token_bos() { +llama_token llama_token_bos(void) { return 1; } -llama_token llama_token_eos() { +llama_token llama_token_eos(void) { return 2; } -llama_token llama_token_nl() { +llama_token llama_token_nl(void) { return 13; } diff --git a/gguf-llama.h b/gguf-llama.h index f342a534c3ccb..62e48b13c94e8 100644 --- a/gguf-llama.h +++ b/gguf-llama.h @@ -194,13 +194,12 @@ extern "C" { // If this is not called, or NULL is supplied, everything is output on stderr. LLAMA_API void llama_log_set(llama_log_callback log_callback, void * user_data); - LLAMA_API int llama_max_devices(); + LLAMA_API struct llama_context_params llama_context_default_params(void); + LLAMA_API struct llama_model_quantize_params llama_model_quantize_default_params(void); - LLAMA_API struct llama_context_params llama_context_default_params(); - LLAMA_API struct llama_model_quantize_params llama_model_quantize_default_params(); - - LLAMA_API bool llama_mmap_supported(); - LLAMA_API bool llama_mlock_supported(); + LLAMA_API int llama_max_devices(void); + LLAMA_API bool llama_mmap_supported(void); + LLAMA_API bool llama_mlock_supported(void); // TODO: not great API - very likely to change // Initialize the llama + ggml backend @@ -208,9 +207,9 @@ extern "C" { // Call once at the start of the program LLAMA_API void llama_backend_init(bool numa); // Call once at the end of the program - currently only used for MPI - LLAMA_API void llama_backend_free(); + LLAMA_API void llama_backend_free(void); - LLAMA_API int64_t llama_time_us(); + LLAMA_API int64_t llama_time_us(void); LLAMA_API struct llama_model * llama_load_model_from_file( const char * path_model, @@ -377,9 +376,9 @@ extern "C" { char * str, int length); // Special tokens - LLAMA_API llama_token llama_token_bos(); // beginning-of-sentence - LLAMA_API llama_token llama_token_eos(); // end-of-sentence - LLAMA_API llama_token llama_token_nl(); // next-line + LLAMA_API llama_token llama_token_bos(void); // beginning-of-sentence + LLAMA_API llama_token llama_token_eos(void); // end-of-sentence + LLAMA_API llama_token llama_token_nl(void); // next-line // Grammar // From afd135a64cf9f375949f3f634517498382ca27e7 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 15 Aug 2023 22:09:56 +0300 Subject: [PATCH 21/29] llama : merge gguf-util.h in llama.cpp --- Makefile | 2 +- examples/gguf/gguf.cpp | 53 +---- gguf-llama.cpp | 495 ++++++++++++++++++++++++++++++++++++----- gguf-util.h | 397 --------------------------------- 4 files changed, 448 insertions(+), 499 deletions(-) delete mode 100644 gguf-util.h diff --git a/Makefile b/Makefile index 5e50f46919b4d..ce36fcf6b730c 100644 --- a/Makefile +++ b/Makefile @@ -332,7 +332,7 @@ OBJS += ggml-alloc.o llama.o: llama.cpp ggml.h ggml-alloc.h ggml-cuda.h ggml-metal.h llama.h llama-util.h $(CXX) $(CXXFLAGS) -c $< -o $@ -gguf-llama.o: gguf-llama.cpp ggml.h ggml-alloc.h ggml-cuda.h ggml-metal.h gguf-llama.h gguf-util.h +gguf-llama.o: gguf-llama.cpp ggml.h ggml-alloc.h ggml-cuda.h ggml-metal.h gguf-llama.h $(CXX) $(CXXFLAGS) -c $< -o $@ common.o: examples/common.cpp examples/common.h diff --git a/examples/gguf/gguf.cpp b/examples/gguf/gguf.cpp index 74a447c07d900..ad212d75280a9 100644 --- a/examples/gguf/gguf.cpp +++ b/examples/gguf/gguf.cpp @@ -1,5 +1,4 @@ #include "ggml.h" -#include "gguf-util.h" #include "gguf-llama.h" #include @@ -195,6 +194,15 @@ bool gguf_ex_read_1(const std::string & fname) { fprintf(stdout, "%s: tensor[%d]: n_dims = %d, name = %s, data = %p\n", __func__, i, cur->n_dims, cur->name, cur->data); + // print first 10 elements + const float * data = (const float *) cur->data; + + printf("%s data[:10] : ", name); + for (int j = 0; j < MIN(10, ggml_nelements(cur)); ++j) { + printf("%f ", data[j]); + } + printf("\n\n"); + // check data { const float * data = (const float *) cur->data; @@ -216,48 +224,6 @@ bool gguf_ex_read_1(const std::string & fname) { return true; } -// read just the tensor info and mmap the data in user code -bool gguf_ex_read_2(const std::string & fname) { - struct ggml_context * ctx_data = NULL; - - struct gguf_init_params params = { - /*.no_alloc = */ true, - /*.ctx = */ &ctx_data, - }; - - struct gguf_context * ctx = gguf_init_from_file(fname.c_str(), params); - - struct gguf_file file(fname.c_str(), "rb"); - gguf_mmap data_mmap(&file, 0, false); - - const int n_tensors = gguf_get_n_tensors(ctx); - - for (int i = 0; i < n_tensors; ++i) { - const char * name = gguf_get_tensor_name(ctx, i); - const size_t offset = gguf_get_data_offset(ctx) + gguf_get_tensor_offset(ctx, i); - - struct ggml_tensor * cur = ggml_get_tensor(ctx_data, name); - - cur->data = static_cast(data_mmap.addr) + offset; - - // print first 10 elements - const float * data = (const float *) cur->data; - - printf("%s data[:10] : ", name); - for (int j = 0; j < MIN(10, ggml_nelements(cur)); ++j) { - printf("%f ", data[j]); - } - printf("\n\n"); - } - - fprintf(stdout, "%s: ctx_data size: %zu\n", __func__, ggml_get_mem_size(ctx_data)); - - ggml_free(ctx_data); - gguf_free(ctx); - - return true; -} - int main(int argc, char ** argv) { if (argc < 3) { fprintf(stdout, "usage: %s data.gguf r|w\n", argv[0]); @@ -274,7 +240,6 @@ int main(int argc, char ** argv) { } else if (mode == "r") { GGML_ASSERT(gguf_ex_read_0(fname) && "failed to read gguf file"); GGML_ASSERT(gguf_ex_read_1(fname) && "failed to read gguf file"); - GGML_ASSERT(gguf_ex_read_2(fname) && "failed to read gguf file"); } else if (mode == "q") { llama_model_quantize_params params = llama_model_quantize_default_params(); llama_model_quantize(fname.c_str(), "quant.gguf", ¶ms); diff --git a/gguf-llama.cpp b/gguf-llama.cpp index c62840e887a46..b3de2671fd263 100644 --- a/gguf-llama.cpp +++ b/gguf-llama.cpp @@ -6,31 +6,61 @@ #include #endif -#include "gguf-util.h" #define LLAMA_API_CPP // TODO: eliminate me #include "gguf-llama.h" #include "ggml.h" + #ifdef GGML_USE_CUBLAS -#include "ggml-cuda.h" +# include "ggml-cuda.h" #elif defined(GGML_USE_CLBLAST) -#include "ggml-opencl.h" +# include "ggml-opencl.h" #endif #ifdef GGML_USE_METAL -#include "ggml-metal.h" +# include "ggml-metal.h" #endif #ifdef GGML_USE_MPI -#include "ggml-mpi.h" +# include "ggml-mpi.h" #endif #ifdef GGML_USE_K_QUANTS -#ifndef QK_K -#ifdef GGML_QKK_64 -#define QK_K 64 +# ifndef QK_K +# ifdef GGML_QKK_64 +# define QK_K 64 +# else +# define QK_K 256 +# endif +# endif +#endif + +#if !defined(GGML_USE_CUBLAS) && !defined(GGML_USE_METAL) +# include "ggml-alloc.h" +# define LLAMA_USE_ALLOCATOR #else -#define QK_K 256 +# define LLAMA_USE_SCRATCH +# define LLAMA_MAX_SCRATCH_BUFFERS 16 #endif + +#ifdef __has_include + #if __has_include() + #include + #if defined(_POSIX_MAPPED_FILES) + #include + #endif + #if defined(_POSIX_MEMLOCK_RANGE) + #include + #endif + #endif #endif + +#if defined(_WIN32) + #define WIN32_LEAN_AND_MEAN + #ifndef NOMINMAX + #define NOMINMAX + #endif + #include + #include + #include // for _fseeki64 #endif #include @@ -68,7 +98,11 @@ #define TN_FFN_NORM "blk.%d.ffn_norm.weight" #define TN_FFN_GATE "blk.%d.ffn_gate.weight" #define TN_FFN_DOWN "blk.%d.ffn_down.weight" -#define TN_FFN_UP "blk.%d.ffn_up.weight" +#define TN_FFN_UP "blk.%d.ffn_up.weight" + +// +// logging +// static void llama_log_internal(llama_log_level level, const char* format, ...); static void llama_log_callback_default(llama_log_level level, const char * text, void * user_data); @@ -76,6 +110,10 @@ static void llama_log_callback_default(llama_log_level level, const char * text, #define LLAMA_LOG_WARN(...) llama_log_internal(LLAMA_LOG_LEVEL_WARN , __VA_ARGS__) #define LLAMA_LOG_ERROR(...) llama_log_internal(LLAMA_LOG_LEVEL_ERROR, __VA_ARGS__) +// +// helpers +// + template static std::string to_string(const T & val) { std::stringstream ss; @@ -90,25 +128,69 @@ static void zeros(std::ofstream & file, size_t n) { } } -#if !defined(GGML_USE_CUBLAS) && !defined(GGML_USE_METAL) -#include "ggml-alloc.h" -#define LLAMA_USE_ALLOCATOR +#ifdef __GNUC__ +#ifdef __MINGW32__ +__attribute__((format(gnu_printf, 1, 2))) #else -#define LLAMA_USE_SCRATCH -#define LLAMA_MAX_SCRATCH_BUFFERS 16 +__attribute__((format(printf, 1, 2))) +#endif #endif +static std::string format(const char * fmt, ...) { + va_list ap, ap2; + va_start(ap, fmt); + va_copy(ap2, ap); + int size = vsnprintf(NULL, 0, fmt, ap); + GGML_ASSERT(size >= 0 && size < INT_MAX); + std::vector buf(size + 1); + int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2); + GGML_ASSERT(size2 == size); + va_end(ap2); + va_end(ap); + return std::string(buf.data(), size); +} + +// +// ggml helpers +// -#define UNUSED GGML_UNUSED +static void ggml_graph_compute_helper(std::vector & buf, ggml_cgraph * graph, int n_threads) { + struct ggml_cplan plan = ggml_graph_plan(graph, n_threads); + + if (plan.work_size > 0) { + buf.resize(plan.work_size); + plan.work_data = buf.data(); + } + + ggml_graph_compute(graph, &plan); +} + +// +// llama helpers +// #ifdef GGML_USE_CUBLAS -#define llama_host_malloc(n) ggml_cuda_host_malloc(n) -#define llama_host_free(data) ggml_cuda_host_free(data) +# define llama_host_malloc(n) ggml_cuda_host_malloc(n) +# define llama_host_free(data) ggml_cuda_host_free(data) #elif GGML_USE_METAL -#define llama_host_malloc(n) ggml_metal_host_malloc(n) -#define llama_host_free(data) ggml_metal_host_free(data) +# define llama_host_malloc(n) ggml_metal_host_malloc(n) +# define llama_host_free(data) ggml_metal_host_free(data) #else -#define llama_host_malloc(n) malloc(n) -#define llama_host_free(data) free(data) +# define llama_host_malloc(n) malloc(n) +# define llama_host_free(data) free(data) +#endif + +#if defined(_WIN32) +static std::string llama_format_win_err(DWORD err) { + LPSTR buf; + size_t size = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, + NULL, err, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&buf, 0, NULL); + if (!size) { + return "FormatMessageA failed"; + } + std::string ret(buf, size); + LocalFree(buf); + return ret; +} #endif struct llama_buffer { @@ -147,25 +229,326 @@ struct llama_buffer { } }; -typedef void (*offload_func_t)(struct ggml_tensor * tensor); +struct llama_file { + // use FILE * so we don't have to re-open the file to mmap + FILE * fp; + size_t size; -void llama_nop(struct ggml_tensor * tensor) { // don't offload by default - (void) tensor; -} + llama_file(const char * fname, const char * mode) { + fp = std::fopen(fname, mode); + if (fp == NULL) { + throw std::runtime_error(format("failed to open %s: %s", fname, strerror(errno))); + } + seek(0, SEEK_END); + size = tell(); + seek(0, SEEK_SET); + } -// -// ggml helpers -// + size_t tell() const { +#ifdef _WIN32 + __int64 ret = _ftelli64(fp); +#else + long ret = std::ftell(fp); +#endif + GGML_ASSERT(ret != -1); // this really shouldn't fail + return (size_t) ret; + } -static void ggml_graph_compute_helper(std::vector & buf, ggml_cgraph * graph, int n_threads) { - struct ggml_cplan plan = ggml_graph_plan(graph, n_threads); + void seek(size_t offset, int whence) { +#ifdef _WIN32 + int ret = _fseeki64(fp, (__int64) offset, whence); +#else + int ret = std::fseek(fp, (long) offset, whence); +#endif + GGML_ASSERT(ret == 0); // same + } - if (plan.work_size > 0) { - buf.resize(plan.work_size); - plan.work_data = buf.data(); + void read_raw(void * ptr, size_t len) const { + if (len == 0) { + return; + } + errno = 0; + std::size_t ret = std::fread(ptr, len, 1, fp); + if (ferror(fp)) { + throw std::runtime_error(format("read error: %s", strerror(errno))); + } + if (ret != 1) { + throw std::runtime_error(std::string("unexpectedly reached end of file")); + } } - ggml_graph_compute(graph, &plan); + void write_raw(const void * ptr, size_t len) const { + if (len == 0) { + return; + } + errno = 0; + size_t ret = std::fwrite(ptr, len, 1, fp); + if (ret != 1) { + throw std::runtime_error(format("write error: %s", strerror(errno))); + } + } + + ~llama_file() { + if (fp) { + std::fclose(fp); + } + } +}; + +struct llama_mmap { + void * addr; + size_t size; + + llama_mmap(const llama_mmap &) = delete; + +#ifdef _POSIX_MAPPED_FILES + static constexpr bool SUPPORTED = true; + + llama_mmap(struct llama_file * file, size_t prefetch = (size_t) -1 /* -1 = max value */, bool numa = false) { + size = file->size; + int fd = fileno(file->fp); + int flags = MAP_SHARED; + // prefetch/readahead impairs performance on NUMA systems + if (numa) { prefetch = 0; } +#ifdef __linux__ + if (prefetch) { flags |= MAP_POPULATE; } +#endif + addr = mmap(NULL, file->size, PROT_READ, flags, fd, 0); + if (addr == MAP_FAILED) { + throw std::runtime_error(format("mmap failed: %s", strerror(errno))); + } + + if (prefetch > 0) { + // Advise the kernel to preload the mapped memory + if (madvise(addr, std::min(file->size, prefetch), MADV_WILLNEED)) { + fprintf(stderr, "warning: madvise(.., MADV_WILLNEED) failed: %s\n", + strerror(errno)); + } + } + if (numa) { + // advise the kernel not to use readahead + // (because the next page might not belong on the same node) + if (madvise(addr, file->size, MADV_RANDOM)) { + fprintf(stderr, "warning: madvise(.., MADV_RANDOM) failed: %s\n", + strerror(errno)); + } + } + } + + ~llama_mmap() { + munmap(addr, size); + } +#elif defined(_WIN32) + static constexpr bool SUPPORTED = true; + + llama_mmap(struct llama_file * file, bool prefetch = true, bool numa = false) { + (void) numa; + + size = file->size; + + HANDLE hFile = (HANDLE) _get_osfhandle(_fileno(file->fp)); + + HANDLE hMapping = CreateFileMappingA(hFile, NULL, PAGE_READONLY, 0, 0, NULL); + DWORD error = GetLastError(); + + if (hMapping == NULL) { + throw std::runtime_error(format("CreateFileMappingA failed: %s", llama_format_win_err(error).c_str())); + } + + addr = MapViewOfFile(hMapping, FILE_MAP_READ, 0, 0, 0); + error = GetLastError(); + CloseHandle(hMapping); + + if (addr == NULL) { + throw std::runtime_error(format("MapViewOfFile failed: %s", llama_format_win_err(error).c_str())); + } + + #if _WIN32_WINNT >= _WIN32_WINNT_WIN8 + if (prefetch) { + // Advise the kernel to preload the mapped memory + WIN32_MEMORY_RANGE_ENTRY range; + range.VirtualAddress = addr; + range.NumberOfBytes = (SIZE_T)size; + if (!PrefetchVirtualMemory(GetCurrentProcess(), 1, &range, 0)) { + fprintf(stderr, "warning: PrefetchVirtualMemory failed: %s\n", + llama_format_win_err(GetLastError()).c_str()); + } + } + #else + #pragma message("warning: You are building for pre-Windows 8; prefetch not supported") + #endif // _WIN32_WINNT >= _WIN32_WINNT_WIN8 + } + + ~llama_mmap() { + if (!UnmapViewOfFile(addr)) { + fprintf(stderr, "warning: UnmapViewOfFile failed: %s\n", + llama_format_win_err(GetLastError()).c_str()); + } + } +#else + static constexpr bool SUPPORTED = false; + + llama_mmap(struct llama_file * file, bool prefetch = true, bool numa = false) { + (void) file; + (void) prefetch; + (void) numa; + + throw std::runtime_error(std::string("mmap not supported")); + } +#endif +}; + +// Represents some region of memory being locked using mlock or VirtualLock; +// will automatically unlock on destruction. +struct llama_mlock { + void * addr = NULL; + size_t size = 0; + + bool failed_already = false; + + llama_mlock() {} + llama_mlock(const llama_mlock &) = delete; + + ~llama_mlock() { + if (size) { + raw_unlock(addr, size); + } + } + + void init(void * ptr) { + GGML_ASSERT(addr == NULL && size == 0); + addr = ptr; + } + + void grow_to(size_t target_size) { + GGML_ASSERT(addr); + if (failed_already) { + return; + } + size_t granularity = lock_granularity(); + target_size = (target_size + granularity - 1) & ~(granularity - 1); + if (target_size > size) { + if (raw_lock((uint8_t *) addr + size, target_size - size)) { + size = target_size; + } else { + failed_already = true; + } + } + } + +#ifdef _POSIX_MEMLOCK_RANGE + static constexpr bool SUPPORTED = true; + + size_t lock_granularity() { + return (size_t) sysconf(_SC_PAGESIZE); + } + + #ifdef __APPLE__ + #define MLOCK_SUGGESTION \ + "Try increasing the sysctl values 'vm.user_wire_limit' and 'vm.global_user_wire_limit' and/or " \ + "decreasing 'vm.global_no_user_wire_amount'. Also try increasing RLIMIT_MLOCK (ulimit -l).\n" + #else + #define MLOCK_SUGGESTION \ + "Try increasing RLIMIT_MLOCK ('ulimit -l' as root).\n" + #endif + + bool raw_lock(const void * addr, size_t size) { + if (!mlock(addr, size)) { + return true; + } else { + char* errmsg = std::strerror(errno); + bool suggest = (errno == ENOMEM); + + // Check if the resource limit is fine after all + struct rlimit lock_limit; + if (suggest && getrlimit(RLIMIT_MEMLOCK, &lock_limit)) + suggest = false; + if (suggest && (lock_limit.rlim_max > lock_limit.rlim_cur + size)) + suggest = false; + + fprintf(stderr, "warning: failed to mlock %zu-byte buffer (after previously locking %zu bytes): %s\n%s", + size, this->size, errmsg, suggest ? MLOCK_SUGGESTION : ""); + return false; + } + } + + #undef MLOCK_SUGGESTION + + void raw_unlock(void * addr, size_t size) { + if (munlock(addr, size)) { + fprintf(stderr, "warning: failed to munlock buffer: %s\n", std::strerror(errno)); + } + } +#elif defined(_WIN32) + static constexpr bool SUPPORTED = true; + + size_t lock_granularity() { + SYSTEM_INFO si; + GetSystemInfo(&si); + return (size_t) si.dwPageSize; + } + + bool raw_lock(void * ptr, size_t len) { + for (int tries = 1; ; tries++) { + if (VirtualLock(ptr, len)) { + return true; + } + if (tries == 2) { + fprintf(stderr, "warning: failed to VirtualLock %zu-byte buffer (after previously locking %zu bytes): %s\n", + len, size, llama_format_win_err(GetLastError()).c_str()); + return false; + } + + // It failed but this was only the first try; increase the working + // set size and try again. + SIZE_T min_ws_size, max_ws_size; + if (!GetProcessWorkingSetSize(GetCurrentProcess(), &min_ws_size, &max_ws_size)) { + fprintf(stderr, "warning: GetProcessWorkingSetSize failed: %s\n", + gguf_format_win_err(GetLastError()).c_str()); + return false; + } + // Per MSDN: "The maximum number of pages that a process can lock + // is equal to the number of pages in its minimum working set minus + // a small overhead." + // Hopefully a megabyte is enough overhead: + size_t increment = len + 1048576; + // The minimum must be <= the maximum, so we need to increase both: + min_ws_size += increment; + max_ws_size += increment; + if (!SetProcessWorkingSetSize(GetCurrentProcess(), min_ws_size, max_ws_size)) { + fprintf(stderr, "warning: SetProcessWorkingSetSize failed: %s\n", + gguf_format_win_err(GetLastError()).c_str()); + return false; + } + } + } + + void raw_unlock(void * ptr, size_t len) { + if (!VirtualUnlock(ptr, len)) { + fprintf(stderr, "warning: failed to VirtualUnlock buffer: %s\n", + gguf_format_win_err(GetLastError()).c_str()); + } + } +#else + static constexpr bool SUPPORTED = false; + + size_t lock_granularity() { + return (size_t) 65536; + } + + bool raw_lock(const void * addr, size_t len) { + fprintf(stderr, "warning: mlock not supported on this system\n"); + return false; + } + + void raw_unlock(const void * addr, size_t len) {} +#endif +}; + +typedef void (*offload_func_t)(struct ggml_tensor * tensor); + +void llama_nop(struct ggml_tensor * tensor) { // don't offload by default + (void) tensor; } // @@ -384,11 +767,11 @@ struct llama_model { llama_buffer buf; // model memory mapped file - std::unique_ptr mapping; + std::unique_ptr mapping; // objects representing data potentially being locked in memory - gguf_mlock mlock_buf; - gguf_mlock mlock_mmap; + llama_mlock mlock_buf; + llama_mlock mlock_mmap; // for quantize-stats only std::vector> tensors_by_name; @@ -565,7 +948,7 @@ enum gguf_file_version { }; struct gguf_file_loader { - gguf_file file; + llama_file file; gguf_context * ctx_gguf; gguf_file_version file_version; @@ -643,11 +1026,11 @@ struct llama_model_loader { bool use_mmap; size_t num_ggml_tensors_created = 0; struct ggml_context * ggml_ctx = NULL; - std::unique_ptr mapping; + std::unique_ptr mapping; llama_model_loader(const std::string & fname_base, bool use_mmap) { file_loader = std::unique_ptr(new gguf_file_loader(fname_base.c_str(), tensors_map)); - if (!gguf_mmap::SUPPORTED) { + if (!llama_mmap::SUPPORTED) { use_mmap = false; } this->use_mmap = use_mmap; @@ -707,13 +1090,13 @@ struct llama_model_loader { if (use_mmap) { lt.data = (uint8_t *) mapping->addr + lt.file_off; } else { - gguf_file & file = file_loader->file; + llama_file & file = file_loader->file; file.seek(lt.file_off, SEEK_SET); file.read_raw(lt.data, lt.size); } } - void load_all_data(llama_progress_callback progress_callback, void * progress_callback_user_data, gguf_mlock * lmlock) { + void load_all_data(llama_progress_callback progress_callback, void * progress_callback_user_data, llama_mlock * lmlock) { size_t data_size = 0; size_t prefetch_size = 0; size_t lock_size = 0; @@ -726,7 +1109,7 @@ struct llama_model_loader { } if (use_mmap) { - mapping.reset(new gguf_mmap(&file_loader->file, prefetch_size, ggml_is_numa())); + mapping.reset(new llama_mmap(&file_loader->file, prefetch_size, ggml_is_numa())); if (lmlock) { lmlock->init(mapping->addr); } @@ -748,7 +1131,7 @@ struct llama_model_loader { load_data_for(lt); - switch(lt.ggml_tensor->backend) { + switch (lt.ggml_tensor->backend) { case GGML_BACKEND_CPU: lt.ggml_tensor->data = lt.data; if (use_mmap && lmlock) { @@ -871,11 +1254,11 @@ int llama_max_devices(void) { } bool llama_mmap_supported(void) { - return gguf_mmap::SUPPORTED; + return llama_mmap::SUPPORTED; } bool llama_mlock_supported(void) { - return gguf_mlock::SUPPORTED; + return llama_mlock::SUPPORTED; } void llama_backend_init(bool numa) { @@ -982,7 +1365,6 @@ static void llama_model_load_internal( std::unique_ptr ml(new llama_model_loader(fname, use_mmap)); model.n_gpu_layers = n_gpu_layers; - gguf_file_version file_version = ml->file_loader->file_version; auto & hparams = model.hparams; @@ -1069,7 +1451,7 @@ static void llama_model_load_internal( } { - LLAMA_LOG_INFO("%s: format = %s\n", __func__, gguf_file_version_name(file_version)); + LLAMA_LOG_INFO("%s: format = %s\n", __func__, gguf_file_version_name(ml->file_loader->file_version)); LLAMA_LOG_INFO("%s: n_vocab = %u\n", __func__, hparams.n_vocab); LLAMA_LOG_INFO("%s: n_ctx = %u\n", __func__, hparams.n_ctx); LLAMA_LOG_INFO("%s: n_embd = %u\n", __func__, hparams.n_embd); @@ -1935,15 +2317,15 @@ static bool llama_is_eos_token(const llama_vocab & vocab, llama_token token) { } static bool llama_is_user_defined_token(const llama_vocab & vocab, llama_token token) { - UNUSED(vocab); - UNUSED(token); + GGML_UNUSED(vocab); + GGML_UNUSED(token); // TODO: improve? return false; } static bool llama_is_unused_token(const llama_vocab & vocab, llama_token token) { - UNUSED(vocab); - UNUSED(token); + GGML_UNUSED(vocab); + GGML_UNUSED(token); // TODO: improve? return false; } @@ -2598,7 +2980,6 @@ void llama_sample_tail_free(struct llama_context * ctx, llama_token_data_array * } } - void llama_sample_typical(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep) { // Reference implementation: // https://github.com/huggingface/transformers/compare/main...cimeister:typical-sampling:typical-pr @@ -3615,7 +3996,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const // maybe this should in llama_model_loader if (model_loader->use_mmap) { - model_loader->mapping.reset(new gguf_mmap(&model_loader->file_loader->file, /* prefetch */ 0, ggml_is_numa())); + model_loader->mapping.reset(new llama_mmap(&model_loader->file_loader->file, /* prefetch */ 0, ggml_is_numa())); } } @@ -4143,7 +4524,7 @@ size_t llama_set_state_data(struct llama_context * ctx, uint8_t * src) { } static bool llama_load_session_file_internal(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) { - gguf_file file(path_session, "rb"); + llama_file file(path_session, "rb"); GGML_UNUSED(ctx); GGML_UNUSED(path_session); GGML_UNUSED(tokens_out); @@ -4164,7 +4545,7 @@ bool llama_load_session_file(struct llama_context * ctx, const char * path_sessi } bool llama_save_session_file(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) { - gguf_file file(path_session, "wb"); + llama_file file(path_session, "wb"); GGML_UNUSED(ctx); GGML_UNUSED(tokens); GGML_UNUSED(n_token_count); diff --git a/gguf-util.h b/gguf-util.h deleted file mode 100644 index 15fbc603696d8..0000000000000 --- a/gguf-util.h +++ /dev/null @@ -1,397 +0,0 @@ -// GGUF counterpart of llama-util.h. -// we may consider making it a part of ggml.c once GGUF work is complete. -// this will require extra work to migrate this to pure C. -// Contains wrappers around OS interfaces. - -#ifndef GGUF_UTIL_H -#define GGUF_UTIL_H - -#include "ggml.h" - -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#ifdef __has_include - #if __has_include() - #include - #if defined(_POSIX_MAPPED_FILES) - #include - #endif - #if defined(_POSIX_MEMLOCK_RANGE) - #include - #endif - #endif -#endif - -#if defined(_WIN32) - #define WIN32_LEAN_AND_MEAN - #ifndef NOMINMAX - #define NOMINMAX - #endif - #include - #include - #include // for _fseeki64 -#endif - -#ifdef __GNUC__ -#ifdef __MINGW32__ -__attribute__((format(gnu_printf, 1, 2))) -#else -__attribute__((format(printf, 1, 2))) -#endif -#endif -static std::string format(const char * fmt, ...) { - va_list ap, ap2; - va_start(ap, fmt); - va_copy(ap2, ap); - int size = vsnprintf(NULL, 0, fmt, ap); - GGML_ASSERT(size >= 0 && size < INT_MAX); - std::vector buf(size + 1); - int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2); - GGML_ASSERT(size2 == size); - va_end(ap2); - va_end(ap); - return std::string(buf.data(), size); -} - -// TODO: can we merge this one and gguf_context? -struct gguf_file { - // use FILE * so we don't have to re-open the file to mmap - FILE * fp; - size_t size; - - gguf_file(const char * fname, const char * mode) { - fp = std::fopen(fname, mode); - if (fp == NULL) { - throw std::runtime_error(format("failed to open %s: %s", fname, strerror(errno))); - } - seek(0, SEEK_END); - size = tell(); - seek(0, SEEK_SET); - } - - size_t tell() const { -#ifdef _WIN32 - __int64 ret = _ftelli64(fp); -#else - long ret = std::ftell(fp); -#endif - GGML_ASSERT(ret != -1); // this really shouldn't fail - return (size_t) ret; - } - - void seek(size_t offset, int whence) { -#ifdef _WIN32 - int ret = _fseeki64(fp, (__int64) offset, whence); -#else - int ret = std::fseek(fp, (long) offset, whence); -#endif - GGML_ASSERT(ret == 0); // same - } - - void read_raw(void * ptr, size_t len) const { - if (len == 0) { - return; - } - errno = 0; - std::size_t ret = std::fread(ptr, len, 1, fp); - if (ferror(fp)) { - throw std::runtime_error(format("read error: %s", strerror(errno))); - } - if (ret != 1) { - throw std::runtime_error(std::string("unexpectedly reached end of file")); - } - } - - void write_raw(const void * ptr, size_t len) const { - if (len == 0) { - return; - } - errno = 0; - size_t ret = std::fwrite(ptr, len, 1, fp); - if (ret != 1) { - throw std::runtime_error(format("write error: %s", strerror(errno))); - } - } - - ~gguf_file() { - if (fp) { - std::fclose(fp); - } - } -}; - -#if defined(_WIN32) -static std::string gguf_format_win_err(DWORD err) { - LPSTR buf; - size_t size = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, - NULL, err, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&buf, 0, NULL); - if (!size) { - return "FormatMessageA failed"; - } - std::string ret(buf, size); - LocalFree(buf); - return ret; -} -#endif - -struct gguf_mmap { - void * addr; - size_t size; - - gguf_mmap(const gguf_mmap &) = delete; - -#ifdef _POSIX_MAPPED_FILES - static constexpr bool SUPPORTED = true; - - gguf_mmap(struct gguf_file * file, size_t prefetch = (size_t) -1 /* -1 = max value */, bool numa = false) { - size = file->size; - int fd = fileno(file->fp); - int flags = MAP_SHARED; - // prefetch/readahead impairs performance on NUMA systems - if (numa) { prefetch = 0; } -#ifdef __linux__ - if (prefetch) { flags |= MAP_POPULATE; } -#endif - addr = mmap(NULL, file->size, PROT_READ, flags, fd, 0); - if (addr == MAP_FAILED) { - throw std::runtime_error(format("mmap failed: %s", strerror(errno))); - } - - if (prefetch > 0) { - // Advise the kernel to preload the mapped memory - if (madvise(addr, std::min(file->size, prefetch), MADV_WILLNEED)) { - fprintf(stderr, "warning: madvise(.., MADV_WILLNEED) failed: %s\n", - strerror(errno)); - } - } - if (numa) { - // advise the kernel not to use readahead - // (because the next page might not belong on the same node) - if (madvise(addr, file->size, MADV_RANDOM)) { - fprintf(stderr, "warning: madvise(.., MADV_RANDOM) failed: %s\n", - strerror(errno)); - } - } - } - - ~gguf_mmap() { - munmap(addr, size); - } -#elif defined(_WIN32) - static constexpr bool SUPPORTED = true; - - gguf_mmap(struct gguf_file * file, bool prefetch = true, bool numa = false) { - (void) numa; - - size = file->size; - - HANDLE hFile = (HANDLE) _get_osfhandle(_fileno(file->fp)); - - HANDLE hMapping = CreateFileMappingA(hFile, NULL, PAGE_READONLY, 0, 0, NULL); - DWORD error = GetLastError(); - - if (hMapping == NULL) { - throw std::runtime_error(format("CreateFileMappingA failed: %s", llama_format_win_err(error).c_str())); - } - - addr = MapViewOfFile(hMapping, FILE_MAP_READ, 0, 0, 0); - error = GetLastError(); - CloseHandle(hMapping); - - if (addr == NULL) { - throw std::runtime_error(format("MapViewOfFile failed: %s", llama_format_win_err(error).c_str())); - } - - #if _WIN32_WINNT >= _WIN32_WINNT_WIN8 - if (prefetch) { - // Advise the kernel to preload the mapped memory - WIN32_MEMORY_RANGE_ENTRY range; - range.VirtualAddress = addr; - range.NumberOfBytes = (SIZE_T)size; - if (!PrefetchVirtualMemory(GetCurrentProcess(), 1, &range, 0)) { - fprintf(stderr, "warning: PrefetchVirtualMemory failed: %s\n", - gguf_format_win_err(GetLastError()).c_str()); - } - } - #else - #pragma message("warning: You are building for pre-Windows 8; prefetch not supported") - #endif // _WIN32_WINNT >= _WIN32_WINNT_WIN8 - } - - ~gguf_mmap() { - if (!UnmapViewOfFile(addr)) { - fprintf(stderr, "warning: UnmapViewOfFile failed: %s\n", - llama_format_win_err(GetLastError()).c_str()); - } - } -#else - static constexpr bool SUPPORTED = false; - - gguf_mmap(struct gguf_file * file, bool prefetch = true, bool numa = false) { - (void) file; - (void) prefetch; - (void) numa; - - throw std::runtime_error(std::string("mmap not supported")); - } -#endif -}; - -// Represents some region of memory being locked using mlock or VirtualLock; -// will automatically unlock on destruction. -struct gguf_mlock { - void * addr = NULL; - size_t size = 0; - bool failed_already = false; - - gguf_mlock() {} - gguf_mlock(const gguf_mlock &) = delete; - - ~gguf_mlock() { - if (size) { - raw_unlock(addr, size); - } - } - - void init(void * ptr) { - GGML_ASSERT(addr == NULL && size == 0); - addr = ptr; - } - - void grow_to(size_t target_size) { - GGML_ASSERT(addr); - if (failed_already) { - return; - } - size_t granularity = lock_granularity(); - target_size = (target_size + granularity - 1) & ~(granularity - 1); - if (target_size > size) { - if (raw_lock((uint8_t *) addr + size, target_size - size)) { - size = target_size; - } else { - failed_already = true; - } - } - } - -#ifdef _POSIX_MEMLOCK_RANGE - static constexpr bool SUPPORTED = true; - - size_t lock_granularity() { - return (size_t) sysconf(_SC_PAGESIZE); - } - - #ifdef __APPLE__ - #define MLOCK_SUGGESTION \ - "Try increasing the sysctl values 'vm.user_wire_limit' and 'vm.global_user_wire_limit' and/or " \ - "decreasing 'vm.global_no_user_wire_amount'. Also try increasing RLIMIT_MLOCK (ulimit -l).\n" - #else - #define MLOCK_SUGGESTION \ - "Try increasing RLIMIT_MLOCK ('ulimit -l' as root).\n" - #endif - - bool raw_lock(const void * addr, size_t size) { - if (!mlock(addr, size)) { - return true; - } else { - char* errmsg = std::strerror(errno); - bool suggest = (errno == ENOMEM); - - // Check if the resource limit is fine after all - struct rlimit lock_limit; - if (suggest && getrlimit(RLIMIT_MEMLOCK, &lock_limit)) - suggest = false; - if (suggest && (lock_limit.rlim_max > lock_limit.rlim_cur + size)) - suggest = false; - - fprintf(stderr, "warning: failed to mlock %zu-byte buffer (after previously locking %zu bytes): %s\n%s", - size, this->size, errmsg, suggest ? MLOCK_SUGGESTION : ""); - return false; - } - } - - #undef MLOCK_SUGGESTION - - void raw_unlock(void * addr, size_t size) { - if (munlock(addr, size)) { - fprintf(stderr, "warning: failed to munlock buffer: %s\n", std::strerror(errno)); - } - } -#elif defined(_WIN32) - static constexpr bool SUPPORTED = true; - - size_t lock_granularity() { - SYSTEM_INFO si; - GetSystemInfo(&si); - return (size_t) si.dwPageSize; - } - - bool raw_lock(void * ptr, size_t len) { - for (int tries = 1; ; tries++) { - if (VirtualLock(ptr, len)) { - return true; - } - if (tries == 2) { - fprintf(stderr, "warning: failed to VirtualLock %zu-byte buffer (after previously locking %zu bytes): %s\n", - len, size, llama_format_win_err(GetLastError()).c_str()); - return false; - } - - // It failed but this was only the first try; increase the working - // set size and try again. - SIZE_T min_ws_size, max_ws_size; - if (!GetProcessWorkingSetSize(GetCurrentProcess(), &min_ws_size, &max_ws_size)) { - fprintf(stderr, "warning: GetProcessWorkingSetSize failed: %s\n", - gguf_format_win_err(GetLastError()).c_str()); - return false; - } - // Per MSDN: "The maximum number of pages that a process can lock - // is equal to the number of pages in its minimum working set minus - // a small overhead." - // Hopefully a megabyte is enough overhead: - size_t increment = len + 1048576; - // The minimum must be <= the maximum, so we need to increase both: - min_ws_size += increment; - max_ws_size += increment; - if (!SetProcessWorkingSetSize(GetCurrentProcess(), min_ws_size, max_ws_size)) { - fprintf(stderr, "warning: SetProcessWorkingSetSize failed: %s\n", - gguf_format_win_err(GetLastError()).c_str()); - return false; - } - } - } - - void raw_unlock(void * ptr, size_t len) { - if (!VirtualUnlock(ptr, len)) { - fprintf(stderr, "warning: failed to VirtualUnlock buffer: %s\n", - gguf_format_win_err(GetLastError()).c_str()); - } - } -#else - static constexpr bool SUPPORTED = false; - - size_t lock_granularity() { - return (size_t) 65536; - } - - bool raw_lock(const void * addr, size_t len) { - fprintf(stderr, "warning: mlock not supported on this system\n"); - return false; - } - - void raw_unlock(const void * addr, size_t len) {} -#endif -}; - -#endif From f477fb069b4e56588caec611b5644dd468663aaf Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 15 Aug 2023 22:29:56 +0300 Subject: [PATCH 22/29] llama : reorder definitions in .cpp to match .h --- gguf-llama.cpp | 878 +++++++++++++++++++++++++------------------------ gguf-llama.h | 9 +- 2 files changed, 446 insertions(+), 441 deletions(-) diff --git a/gguf-llama.cpp b/gguf-llama.cpp index b3de2671fd263..d08c64fd70d33 100644 --- a/gguf-llama.cpp +++ b/gguf-llama.cpp @@ -11,6 +11,14 @@ #include "ggml.h" +#if !defined(GGML_USE_CUBLAS) && !defined(GGML_USE_METAL) +# include "ggml-alloc.h" +# define LLAMA_USE_ALLOCATOR +#else +# define LLAMA_USE_SCRATCH +# define LLAMA_MAX_SCRATCH_BUFFERS 16 +#endif + #ifdef GGML_USE_CUBLAS # include "ggml-cuda.h" #elif defined(GGML_USE_CLBLAST) @@ -33,14 +41,6 @@ # endif #endif -#if !defined(GGML_USE_CUBLAS) && !defined(GGML_USE_METAL) -# include "ggml-alloc.h" -# define LLAMA_USE_ALLOCATOR -#else -# define LLAMA_USE_SCRATCH -# define LLAMA_MAX_SCRATCH_BUFFERS 16 -#endif - #ifdef __has_include #if __has_include() #include @@ -254,7 +254,7 @@ struct llama_file { return (size_t) ret; } - void seek(size_t offset, int whence) { + void seek(size_t offset, int whence) const { #ifdef _WIN32 int ret = _fseeki64(fp, (__int64) offset, whence); #else @@ -416,7 +416,7 @@ struct llama_mlock { } void init(void * ptr) { - GGML_ASSERT(addr == NULL && size == 0); + GGML_ASSERT(addr == NULL && size == 0); // NOLINT addr = ptr; } @@ -439,7 +439,7 @@ struct llama_mlock { #ifdef _POSIX_MEMLOCK_RANGE static constexpr bool SUPPORTED = true; - size_t lock_granularity() { + static size_t lock_granularity() { return (size_t) sysconf(_SC_PAGESIZE); } @@ -452,29 +452,31 @@ struct llama_mlock { "Try increasing RLIMIT_MLOCK ('ulimit -l' as root).\n" #endif - bool raw_lock(const void * addr, size_t size) { + bool raw_lock(const void * addr, size_t size) const { if (!mlock(addr, size)) { return true; - } else { - char* errmsg = std::strerror(errno); - bool suggest = (errno == ENOMEM); + } - // Check if the resource limit is fine after all - struct rlimit lock_limit; - if (suggest && getrlimit(RLIMIT_MEMLOCK, &lock_limit)) - suggest = false; - if (suggest && (lock_limit.rlim_max > lock_limit.rlim_cur + size)) - suggest = false; + char* errmsg = std::strerror(errno); + bool suggest = (errno == ENOMEM); - fprintf(stderr, "warning: failed to mlock %zu-byte buffer (after previously locking %zu bytes): %s\n%s", - size, this->size, errmsg, suggest ? MLOCK_SUGGESTION : ""); - return false; + // Check if the resource limit is fine after all + struct rlimit lock_limit; + if (suggest && getrlimit(RLIMIT_MEMLOCK, &lock_limit)) { + suggest = false; } + if (suggest && (lock_limit.rlim_max > lock_limit.rlim_cur + size)) { + suggest = false; + } + + fprintf(stderr, "warning: failed to mlock %zu-byte buffer (after previously locking %zu bytes): %s\n%s", + size, this->size, errmsg, suggest ? MLOCK_SUGGESTION : ""); + return false; } #undef MLOCK_SUGGESTION - void raw_unlock(void * addr, size_t size) { + static void raw_unlock(void * addr, size_t size) { if (munlock(addr, size)) { fprintf(stderr, "warning: failed to munlock buffer: %s\n", std::strerror(errno)); } @@ -482,13 +484,13 @@ struct llama_mlock { #elif defined(_WIN32) static constexpr bool SUPPORTED = true; - size_t lock_granularity() { + static size_t lock_granularity() { SYSTEM_INFO si; GetSystemInfo(&si); return (size_t) si.dwPageSize; } - bool raw_lock(void * ptr, size_t len) { + bool raw_lock(void * ptr, size_t len) const { for (int tries = 1; ; tries++) { if (VirtualLock(ptr, len)) { return true; @@ -504,7 +506,7 @@ struct llama_mlock { SIZE_T min_ws_size, max_ws_size; if (!GetProcessWorkingSetSize(GetCurrentProcess(), &min_ws_size, &max_ws_size)) { fprintf(stderr, "warning: GetProcessWorkingSetSize failed: %s\n", - gguf_format_win_err(GetLastError()).c_str()); + llama_format_win_err(GetLastError()).c_str()); return false; } // Per MSDN: "The maximum number of pages that a process can lock @@ -517,31 +519,31 @@ struct llama_mlock { max_ws_size += increment; if (!SetProcessWorkingSetSize(GetCurrentProcess(), min_ws_size, max_ws_size)) { fprintf(stderr, "warning: SetProcessWorkingSetSize failed: %s\n", - gguf_format_win_err(GetLastError()).c_str()); + llama_format_win_err(GetLastError()).c_str()); return false; } } } - void raw_unlock(void * ptr, size_t len) { + static void raw_unlock(void * ptr, size_t len) { if (!VirtualUnlock(ptr, len)) { fprintf(stderr, "warning: failed to VirtualUnlock buffer: %s\n", - gguf_format_win_err(GetLastError()).c_str()); + llama_format_win_err(GetLastError()).c_str()); } } #else static constexpr bool SUPPORTED = false; - size_t lock_granularity() { + static size_t lock_granularity() { return (size_t) 65536; } - bool raw_lock(const void * addr, size_t len) { + bool raw_lock(const void * addr, size_t len) const { fprintf(stderr, "warning: mlock not supported on this system\n"); return false; } - void raw_unlock(const void * addr, size_t len) {} + static void raw_unlock(const void * addr, size_t len) {} #endif }; @@ -551,6 +553,18 @@ void llama_nop(struct ggml_tensor * tensor) { // don't offload by default (void) tensor; } +// +// globals +// + +struct llama_state { + // We save the log callback globally + llama_log_callback log_callback = llama_log_callback_default; + void * log_callback_user_data = nullptr; +}; + +static llama_state g_state; + // // memory sizes (calculated for n_batch == 512) // @@ -729,10 +743,10 @@ struct llama_kv_cache { }; struct llama_vocab { - // TODO: convert to this gguf_vocab - // add a vector of merges - // add members for bos/eos/pad/sep tokens - // so that we can pass it to different types of tokenizers with a common interface + // TODO: + // - add a vector of merges + // - add members for bos/eos/pad/sep tokens + // so that we can pass it to different types of tokenizers with a common interface using id = int32_t; using token = std::string; @@ -905,19 +919,71 @@ struct llama_context { } }; -struct llama_state { - // We save the log callback globally - llama_log_callback log_callback = llama_log_callback_default; - void * log_callback_user_data = nullptr; -}; +// +// kv cache helpers +// -// global state -static llama_state g_state; +static bool llama_kv_cache_init( + const struct llama_hparams & hparams, + struct llama_kv_cache & cache, + ggml_type wtype, + int n_ctx, + int n_gpu_layers) { + const int n_embd = hparams.n_embd_gqa(); + const int n_layer = hparams.n_layer; + + const int64_t n_mem = n_layer*n_ctx; + const int64_t n_elements = n_embd*n_mem; + + cache.buf.resize(2u*n_elements*ggml_type_size(wtype) + 2u*MB); + cache.n = 0; + + struct ggml_init_params params; + params.mem_size = cache.buf.size; + params.mem_buffer = cache.buf.data; + params.no_alloc = false; + + cache.ctx = ggml_init(params); + + if (!cache.ctx) { + LLAMA_LOG_ERROR("%s: failed to allocate memory for kv cache\n", __func__); + return false; + } + + cache.k = ggml_new_tensor_1d(cache.ctx, wtype, n_elements); + cache.v = ggml_new_tensor_1d(cache.ctx, wtype, n_elements); + ggml_set_name(cache.k, "cache_k"); + ggml_set_name(cache.v, "cache_v"); + + (void) n_gpu_layers; +#ifdef GGML_USE_CUBLAS + if (n_gpu_layers > n_layer + 1) { + ggml_cuda_assign_buffers_no_scratch(cache.v); + } + if (n_gpu_layers > n_layer + 2) { + ggml_cuda_assign_buffers_no_scratch(cache.k); + } +#endif // GGML_USE_CUBLAS + + return true; +} // // model loading and saving // +enum llama_file_version { + GGUF_FILE_VERSION_V1 = 1, +}; + +static const char * llama_file_version_name(llama_file_version version) { + switch (version) { + case GGUF_FILE_VERSION_V1: return "GGUF V1 (latest)"; + } + + return "unknown"; +} + static std::string llama_format_tensor_shape(const std::vector & ne) { char buf[256]; snprintf(buf, sizeof(buf), "%5u", ne.at(0)); @@ -927,7 +993,7 @@ static std::string llama_format_tensor_shape(const std::vector & ne) { return buf; } -struct gguf_load_tensor { +struct llama_load_tensor { std::string name; enum ggml_type type = GGML_TYPE_F32; std::vector ne; @@ -937,24 +1003,20 @@ struct gguf_load_tensor { uint8_t * data; }; -struct gguf_load_tensors_map { +struct llama_load_tensors_map { // tensors is kept in a separate vector to preserve file order - std::vector tensors; + std::vector tensors; std::unordered_map name_to_idx; }; -enum gguf_file_version { - GGUF_FILE_VERSION_V1 = 1, -}; - -struct gguf_file_loader { +struct llama_file_loader { llama_file file; gguf_context * ctx_gguf; - gguf_file_version file_version; + llama_file_version file_version; struct ggml_context * ctx_data = NULL; - gguf_file_loader(const char * fname, gguf_load_tensors_map & tensors_map) : file(fname, "rb") { + llama_file_loader(const char * fname, llama_load_tensors_map & tensors_map) : file(fname, "rb") { fprintf(stderr, "llama.cpp: loading model from %s\n", fname); struct gguf_init_params params = { @@ -963,16 +1025,16 @@ struct gguf_file_loader { }; ctx_gguf = gguf_init_from_file(fname, params); - file_version = (enum gguf_file_version) gguf_get_version(ctx_gguf); + file_version = (enum llama_file_version) gguf_get_version(ctx_gguf); read_tensor_metadata(tensors_map); } - void read_tensor_metadata(gguf_load_tensors_map & tensors_map) const { + void read_tensor_metadata(llama_load_tensors_map & tensors_map) const { const int n_tensors = gguf_get_n_tensors(ctx_gguf); for (int i = 0; i < n_tensors; ++i) { - gguf_load_tensor tensor; + llama_load_tensor tensor; const char * name = gguf_get_tensor_name(ctx_gguf, i); struct ggml_tensor * cur = ggml_get_tensor(ctx_data, name); @@ -1021,15 +1083,15 @@ struct gguf_file_loader { }; struct llama_model_loader { - std::unique_ptr file_loader; - gguf_load_tensors_map tensors_map; + std::unique_ptr file_loader; + llama_load_tensors_map tensors_map; bool use_mmap; size_t num_ggml_tensors_created = 0; struct ggml_context * ggml_ctx = NULL; std::unique_ptr mapping; llama_model_loader(const std::string & fname_base, bool use_mmap) { - file_loader = std::unique_ptr(new gguf_file_loader(fname_base.c_str(), tensors_map)); + file_loader = std::unique_ptr(new llama_file_loader(fname_base.c_str(), tensors_map)); if (!llama_mmap::SUPPORTED) { use_mmap = false; } @@ -1038,13 +1100,13 @@ struct llama_model_loader { void calc_sizes(size_t * ctx_size_p, size_t * mmapped_size_p) const { *ctx_size_p = *mmapped_size_p = 0; - for (const gguf_load_tensor & lt : tensors_map.tensors) { + for (const llama_load_tensor & lt : tensors_map.tensors) { *ctx_size_p += sizeof(struct ggml_tensor) + GGML_OBJECT_SIZE; *(use_mmap ? mmapped_size_p : ctx_size_p) += lt.size + 16; } } - struct ggml_tensor * get_tensor_for(gguf_load_tensor & lt, ggml_backend backend) { + struct ggml_tensor * get_tensor_for(llama_load_tensor & lt, ggml_backend backend) { struct ggml_tensor * tensor; if (backend != GGML_BACKEND_CPU) { ggml_set_no_alloc(ggml_ctx, true); @@ -1071,7 +1133,7 @@ struct llama_model_loader { if (it == tensors_map.name_to_idx.end()) { throw std::runtime_error(std::runtime_error(format("llama.cpp: tensor '%s' is missing from model", name.c_str()))); } - gguf_load_tensor & lt = tensors_map.tensors.at(it->second); + llama_load_tensor & lt = tensors_map.tensors.at(it->second); if (lt.ne != ne) { throw std::runtime_error(format("llama.cpp: tensor '%s' has wrong shape; expected %s, got %s", name.c_str(), llama_format_tensor_shape(ne).c_str(), llama_format_tensor_shape(lt.ne).c_str())); @@ -1086,7 +1148,7 @@ struct llama_model_loader { } } - void load_data_for(gguf_load_tensor & lt) const { + void load_data_for(llama_load_tensor & lt) const { if (use_mmap) { lt.data = (uint8_t *) mapping->addr + lt.file_off; } else { @@ -1101,7 +1163,7 @@ struct llama_model_loader { size_t prefetch_size = 0; size_t lock_size = 0; - for (const gguf_load_tensor & lt : tensors_map.tensors) { + for (const llama_load_tensor & lt : tensors_map.tensors) { data_size += lt.size; if (lt.ggml_tensor->backend == GGML_BACKEND_CPU) { prefetch_size += lt.size; @@ -1116,7 +1178,7 @@ struct llama_model_loader { } size_t done_size = 0; - for (gguf_load_tensor & lt : tensors_map.tensors) { + for (llama_load_tensor & lt : tensors_map.tensors) { if (progress_callback) { progress_callback((float) done_size / data_size, progress_callback_user_data); } @@ -1164,144 +1226,10 @@ struct llama_model_loader { } }; -// -// kv cache -// - -static bool kv_cache_init( - const struct llama_hparams & hparams, - struct llama_kv_cache & cache, - ggml_type wtype, - int n_ctx, - int n_gpu_layers) { - const int n_embd = hparams.n_embd_gqa(); - const int n_layer = hparams.n_layer; - - const int64_t n_mem = n_layer*n_ctx; - const int64_t n_elements = n_embd*n_mem; - - cache.buf.resize(2u*n_elements*ggml_type_size(wtype) + 2u*MB); - cache.n = 0; - - struct ggml_init_params params; - params.mem_size = cache.buf.size; - params.mem_buffer = cache.buf.data; - params.no_alloc = false; - - cache.ctx = ggml_init(params); - - if (!cache.ctx) { - LLAMA_LOG_ERROR("%s: failed to allocate memory for kv cache\n", __func__); - return false; - } - - cache.k = ggml_new_tensor_1d(cache.ctx, wtype, n_elements); - cache.v = ggml_new_tensor_1d(cache.ctx, wtype, n_elements); - ggml_set_name(cache.k, "cache_k"); - ggml_set_name(cache.v, "cache_v"); - - (void) n_gpu_layers; -#ifdef GGML_USE_CUBLAS - if (n_gpu_layers > n_layer + 1) { - ggml_cuda_assign_buffers_no_scratch(cache.v); - } - if (n_gpu_layers > n_layer + 2) { - ggml_cuda_assign_buffers_no_scratch(cache.k); - } -#endif // GGML_USE_CUBLAS - - return true; -} - -struct llama_context_params llama_context_default_params() { - struct llama_context_params result = { - /*.seed =*/ LLAMA_DEFAULT_SEED, - /*.n_ctx =*/ 512, - /*.n_batch =*/ 512, - /*.gpu_layers =*/ 0, - /*.main_gpu =*/ 0, - /*.tensor_split =*/ nullptr, - /*.rope_freq_base =*/ 10000.0f, - /*.rope_freq_scale =*/ 1.0f, - /*.progress_callback =*/ nullptr, - /*.progress_callback_user_data =*/ nullptr, - /*.low_vram =*/ false, - /*.mul_mat_q =*/ false, - /*.f16_kv =*/ true, - /*.logits_all =*/ false, - /*.vocab_only =*/ false, - /*.use_mmap =*/ true, - /*.use_mlock =*/ false, - /*.embedding =*/ false, - }; - - return result; -} - -struct llama_model_quantize_params llama_model_quantize_default_params() { - struct llama_model_quantize_params result = { - /*.nthread =*/ 0, - /*.ftype =*/ LLAMA_FTYPE_MOSTLY_Q5_1, - /*.allow_requantize =*/ false, - /*.quantize_output_tensor =*/ true, - }; - - return result; -} - -int llama_max_devices(void) { - return LLAMA_MAX_DEVICES; -} - -bool llama_mmap_supported(void) { - return llama_mmap::SUPPORTED; -} - -bool llama_mlock_supported(void) { - return llama_mlock::SUPPORTED; -} - -void llama_backend_init(bool numa) { - ggml_time_init(); - - // needed to initialize f16 tables - { - struct ggml_init_params params = { 0, NULL, false }; - struct ggml_context * ctx = ggml_init(params); - ggml_free(ctx); - } - - if (numa) { - ggml_numa_init(); - } - -#ifdef GGML_USE_MPI - ggml_mpi_backend_init(); -#endif -} - -void llama_backend_free(void) { -#ifdef GGML_USE_MPI - ggml_mpi_backend_free(); -#endif -} - -int64_t llama_time_us(void) { - return ggml_time_us(); -} - // // load LLaMA models // -static const char * gguf_file_version_name(gguf_file_version version) { - switch (version) { - case GGUF_FILE_VERSION_V1: return "GGUF V1 (latest)"; - } - - return "unknown"; -} - static const char * llama_ftype_name(enum llama_ftype ftype) { switch (ftype) { case LLAMA_FTYPE_ALL_F32: return "all F32"; @@ -1451,7 +1379,7 @@ static void llama_model_load_internal( } { - LLAMA_LOG_INFO("%s: format = %s\n", __func__, gguf_file_version_name(ml->file_loader->file_version)); + LLAMA_LOG_INFO("%s: format = %s\n", __func__, llama_file_version_name(ml->file_loader->file_version)); LLAMA_LOG_INFO("%s: n_vocab = %u\n", __func__, hparams.n_vocab); LLAMA_LOG_INFO("%s: n_ctx = %u\n", __func__, hparams.n_ctx); LLAMA_LOG_INFO("%s: n_embd = %u\n", __func__, hparams.n_embd); @@ -1677,7 +1605,7 @@ static void llama_model_load_internal( } // populate `tensors_by_name` - for (gguf_load_tensor & lt : ml->tensors_map.tensors) { + for (llama_load_tensor & lt : ml->tensors_map.tensors) { model.tensors_by_name.emplace_back(lt.name, lt.ggml_tensor); } @@ -3357,7 +3285,7 @@ void llama_grammar_accept_token(struct llama_context * ctx, struct llama_grammar // quantization // -static void llama_convert_tensor_internal(const gguf_load_tensor & tensor, std::vector & output, const size_t nelements, const int nthread) { +static void llama_convert_tensor_internal(const llama_load_tensor & tensor, std::vector & output, const size_t nelements, const int nthread) { if (output.size() < nelements) { output.resize(nelements); } @@ -3488,7 +3416,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s std::vector read_data; std::vector work; - for (gguf_load_tensor & tensor : model_loader->tensors_map.tensors) { + for (llama_load_tensor & tensor : model_loader->tensors_map.tensors) { gguf_add_tensor(ctx_out, tensor.ggml_tensor); } @@ -3501,7 +3429,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s // placeholder for the meta data ::zeros(fout, meta_size); - for (gguf_load_tensor & tensor : model_loader->tensors_map.tensors) { + for (llama_load_tensor & tensor : model_loader->tensors_map.tensors) { read_data.resize(tensor.size); tensor.data = read_data.data(); model_loader->load_data_for(tensor); @@ -3701,237 +3629,15 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s } } -// -// interface implementation -// - -struct llama_model * llama_load_model_from_file( - const char * path_model, - struct llama_context_params params) { - ggml_time_init(); - - llama_model * model = new llama_model; +int llama_apply_lora_from_file_internal(const struct llama_model & model, const char * path_lora, const char * path_base_model, int n_threads) { + LLAMA_LOG_INFO("%s: applying lora adapter from '%s' - please wait ...\n", __func__, path_lora); - ggml_type memory_type = params.f16_kv ? GGML_TYPE_F16 : GGML_TYPE_F32; + const int64_t t_start_lora_us = ggml_time_us(); - if (!llama_model_load(path_model, *model, model->vocab, params.n_ctx, params.n_batch, params.n_gpu_layers, - params.main_gpu, params.tensor_split, params.mul_mat_q, params.rope_freq_base, params.rope_freq_scale, - params.low_vram, memory_type, params.use_mmap, params.use_mlock, params.vocab_only, - params.progress_callback, params.progress_callback_user_data)) { - LLAMA_LOG_ERROR("%s: failed to load model\n", __func__); - delete model; - return nullptr; - } - - return model; -} - -void llama_free_model(struct llama_model * model) { - delete model; -} - -struct llama_context * llama_new_context_with_model( - struct llama_model * model, - struct llama_context_params params) { - - if (!model) { - return nullptr; - } - - llama_context * ctx = new llama_context(*model); - - if (params.seed == LLAMA_DEFAULT_SEED) { - params.seed = time(NULL); - } - - unsigned cur_percentage = 0; - if (params.progress_callback == NULL) { - params.progress_callback_user_data = &cur_percentage; - params.progress_callback = [](float progress, void * ctx) { - unsigned * cur_percentage_p = (unsigned *) ctx; - unsigned percentage = (unsigned) (100 * progress); - while (percentage > *cur_percentage_p) { - *cur_percentage_p = percentage; - LLAMA_LOG_INFO("."); - if (percentage >= 100) { - LLAMA_LOG_INFO("\n"); - } - } - }; - } - - ctx->rng = std::mt19937(params.seed); - ctx->logits_all = params.logits_all; - - ggml_type memory_type = params.f16_kv ? GGML_TYPE_F16 : GGML_TYPE_F32; - - // reserve memory for context buffers - if (!params.vocab_only) { - if (!kv_cache_init(ctx->model.hparams, ctx->kv_self, memory_type, ctx->model.hparams.n_ctx, params.n_gpu_layers)) { - LLAMA_LOG_ERROR("%s: kv_cache_init() failed for self-attention cache\n", __func__); - llama_free(ctx); - return nullptr; - } - - { - const size_t memory_size = ggml_nbytes(ctx->kv_self.k) + ggml_nbytes(ctx->kv_self.v); - LLAMA_LOG_INFO("%s: kv self size = %7.2f MB\n", __func__, memory_size / 1024.0 / 1024.0); - } - - const auto & hparams = ctx->model.hparams; - - // resized during inference - if (params.logits_all) { - ctx->logits.reserve(hparams.n_ctx*hparams.n_vocab); - } else { - ctx->logits.reserve(hparams.n_vocab); - } - - if (params.embedding){ - ctx->embedding.resize(hparams.n_embd); - } - -#ifdef LLAMA_USE_ALLOCATOR - { - static const size_t tensor_alignment = 32; - // the compute buffer is used to store the tensor and graph structs, while the allocator buffer is used for the tensor data - ctx->buf_compute.resize(ggml_tensor_overhead()*GGML_MAX_NODES + ggml_graph_overhead()); - - // create measure allocator - ctx->alloc = ggml_allocr_new_measure(tensor_alignment); - - // build worst-case graph - int n_tokens = std::min((int)hparams.n_ctx, params.n_batch); - int n_past = hparams.n_ctx - n_tokens; - llama_token token = llama_token_bos(); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph - ggml_cgraph * gf = llama_build_graph(*ctx, &token, NULL, n_tokens, n_past); - - // measure memory requirements for the graph - size_t alloc_size = ggml_allocr_alloc_graph(ctx->alloc, gf) + tensor_alignment; - - LLAMA_LOG_INFO("%s: compute buffer total size = %7.2f MB\n", __func__, (ctx->buf_compute.size + alloc_size) / 1024.0 / 1024.0); - - // debug - for comparison with scratch buffer - //size_t prev_req = - // MEM_REQ_SCRATCH0(hparams.n_ctx).at(ctx->model.type) + - // MEM_REQ_SCRATCH1().at(ctx->model.type) + - // MEM_REQ_EVAL().at(ctx->model.type); - //LLAMA_LOG_INFO("%s: (debug) equivalent with scratch buffer = %7.2f MB\n", __func__, prev_req / 1024.0 / 1024.0); - - // recreate allocator with exact memory requirements - ggml_allocr_free(ctx->alloc); - - ctx->buf_alloc.resize(alloc_size); - ctx->alloc = ggml_allocr_new(ctx->buf_alloc.data, ctx->buf_alloc.size, tensor_alignment); - } -#else - ctx->buf_compute.resize(MEM_REQ_EVAL().at(ctx->model.type) + ggml_graph_overhead()); -#endif - -#ifdef LLAMA_USE_SCRATCH - ctx->buf_scratch[0].resize(MEM_REQ_SCRATCH0(hparams.n_ctx).at(ctx->model.type)); - ctx->buf_scratch[1].resize(MEM_REQ_SCRATCH1().at(ctx->model.type)); -#endif - } - -#ifdef GGML_USE_METAL - if (params.n_gpu_layers > 0) { - // this allocates all Metal resources and memory buffers - ctx->ctx_metal = ggml_metal_init(1); - - if (!ctx->ctx_metal) { - LLAMA_LOG_ERROR("%s: ggml_metal_init() failed\n", __func__); - llama_free(ctx); - return NULL; - } - - void * data_ptr = NULL; - size_t data_size = 0; - - if (params.use_mmap) { - data_ptr = ctx->model.mapping->addr; - data_size = ctx->model.mapping->size; - } else { - data_ptr = ggml_get_mem_buffer(ctx->model.ctx); - data_size = ggml_get_mem_size (ctx->model.ctx); - } - - const size_t max_size = ggml_get_max_tensor_size(ctx->model.ctx); - - LLAMA_LOG_INFO("%s: max tensor size = %8.2f MB\n", __func__, max_size/1024.0/1024.0); - -#define LLAMA_METAL_CHECK_BUF(result) \ - if (!(result)) { \ - LLAMA_LOG_ERROR("%s: failed to add buffer\n", __func__); \ - llama_free(ctx); \ - return NULL; \ - } - - LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "data", data_ptr, data_size, max_size)); - - LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "eval", ctx->buf_compute.data, ctx->buf_compute.size, 0)); - LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "kv", ctx->kv_self.buf.data, ctx->kv_self.buf.size, 0)); - - LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "scr0", ctx->buf_scratch[0].data, ctx->buf_scratch[0].size, 0)); - LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "scr1", ctx->buf_scratch[1].data, ctx->buf_scratch[1].size, 0)); -#undef LLAMA_METAL_CHECK_BUF - } -#endif - -#ifdef GGML_USE_MPI - ctx->ctx_mpi = ggml_mpi_init(); - - if (ggml_mpi_rank(ctx->ctx_mpi) > 0) { - // Enter a blocking eval loop with dummy input, letting rank=0 drive the process - const std::vector tmp(ctx->model.hparams.n_ctx, llama_token_bos()); - while (!llama_eval(ctx, tmp.data(), tmp.size(), 0, 0)) {}; - llama_backend_free(); - exit(1); - } -#endif - - return ctx; -} - -struct llama_context * llama_init_from_file( - const char * path_model, - struct llama_context_params params) { - - struct llama_model * model = llama_load_model_from_file(path_model, params); - if (!model) { - return nullptr; - } - struct llama_context * ctx = llama_new_context_with_model(model, params); - ctx->model_owner = true; - return ctx; -} - -void llama_free(struct llama_context * ctx) { - delete ctx; -} - -int llama_model_quantize( - const char * fname_inp, - const char * fname_out, - const llama_model_quantize_params * params) { - try { - llama_model_quantize_internal(fname_inp, fname_out, params); - return 0; - } catch (const std::exception & err) { - LLAMA_LOG_ERROR("%s: failed to quantize: %s\n", __func__, err.what()); - return 1; - } -} - -int llama_apply_lora_from_file_internal(const struct llama_model & model, const char * path_lora, const char * path_base_model, int n_threads) { - LLAMA_LOG_INFO("%s: applying lora adapter from '%s' - please wait ...\n", __func__, path_lora); - - const int64_t t_start_lora_us = ggml_time_us(); - - auto fin = std::ifstream(path_lora, std::ios::binary); - if (!fin) { - LLAMA_LOG_ERROR("%s: failed to open '%s'\n", __func__, path_lora); - return 1; + auto fin = std::ifstream(path_lora, std::ios::binary); + if (!fin) { + LLAMA_LOG_ERROR("%s: failed to open '%s'\n", __func__, path_lora); + return 1; } // verify magic and version @@ -4107,7 +3813,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const return 1; } size_t idx = model_loader->tensors_map.name_to_idx[base_name]; - gguf_load_tensor & lt = model_loader->tensors_map.tensors[idx]; + llama_load_tensor & lt = model_loader->tensors_map.tensors[idx]; base_t = model_loader->get_tensor(base_name, { (uint32_t)dest_t->ne[0], (uint32_t)dest_t->ne[1] }, GGML_BACKEND_CPU); lt.data = (uint8_t *) lt.ggml_tensor->data; model_loader->load_data_for(lt); @@ -4197,6 +3903,305 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const return 0; } +// +// interface implementation +// + +struct llama_context_params llama_context_default_params() { + struct llama_context_params result = { + /*.seed =*/ LLAMA_DEFAULT_SEED, + /*.n_ctx =*/ 512, + /*.n_batch =*/ 512, + /*.gpu_layers =*/ 0, + /*.main_gpu =*/ 0, + /*.tensor_split =*/ nullptr, + /*.rope_freq_base =*/ 10000.0f, + /*.rope_freq_scale =*/ 1.0f, + /*.progress_callback =*/ nullptr, + /*.progress_callback_user_data =*/ nullptr, + /*.low_vram =*/ false, + /*.mul_mat_q =*/ false, + /*.f16_kv =*/ true, + /*.logits_all =*/ false, + /*.vocab_only =*/ false, + /*.use_mmap =*/ true, + /*.use_mlock =*/ false, + /*.embedding =*/ false, + }; + + return result; +} + +struct llama_model_quantize_params llama_model_quantize_default_params() { + struct llama_model_quantize_params result = { + /*.nthread =*/ 0, + /*.ftype =*/ LLAMA_FTYPE_MOSTLY_Q5_1, + /*.allow_requantize =*/ false, + /*.quantize_output_tensor =*/ true, + }; + + return result; +} + +int llama_max_devices(void) { + return LLAMA_MAX_DEVICES; +} + +bool llama_mmap_supported(void) { + return llama_mmap::SUPPORTED; +} + +bool llama_mlock_supported(void) { + return llama_mlock::SUPPORTED; +} + +void llama_backend_init(bool numa) { + ggml_time_init(); + + // needed to initialize f16 tables + { + struct ggml_init_params params = { 0, NULL, false }; + struct ggml_context * ctx = ggml_init(params); + ggml_free(ctx); + } + + if (numa) { + ggml_numa_init(); + } + +#ifdef GGML_USE_MPI + ggml_mpi_backend_init(); +#endif +} + +void llama_backend_free(void) { +#ifdef GGML_USE_MPI + ggml_mpi_backend_free(); +#endif +} + +int64_t llama_time_us(void) { + return ggml_time_us(); +} + +struct llama_model * llama_load_model_from_file( + const char * path_model, + struct llama_context_params params) { + ggml_time_init(); + + llama_model * model = new llama_model; + + ggml_type memory_type = params.f16_kv ? GGML_TYPE_F16 : GGML_TYPE_F32; + + if (!llama_model_load(path_model, *model, model->vocab, params.n_ctx, params.n_batch, params.n_gpu_layers, + params.main_gpu, params.tensor_split, params.mul_mat_q, params.rope_freq_base, params.rope_freq_scale, + params.low_vram, memory_type, params.use_mmap, params.use_mlock, params.vocab_only, + params.progress_callback, params.progress_callback_user_data)) { + LLAMA_LOG_ERROR("%s: failed to load model\n", __func__); + delete model; + return nullptr; + } + + return model; +} + +void llama_free_model(struct llama_model * model) { + delete model; +} + +struct llama_context * llama_new_context_with_model( + struct llama_model * model, + struct llama_context_params params) { + + if (!model) { + return nullptr; + } + + llama_context * ctx = new llama_context(*model); + + if (params.seed == LLAMA_DEFAULT_SEED) { + params.seed = time(NULL); + } + + unsigned cur_percentage = 0; + if (params.progress_callback == NULL) { + params.progress_callback_user_data = &cur_percentage; + params.progress_callback = [](float progress, void * ctx) { + unsigned * cur_percentage_p = (unsigned *) ctx; + unsigned percentage = (unsigned) (100 * progress); + while (percentage > *cur_percentage_p) { + *cur_percentage_p = percentage; + LLAMA_LOG_INFO("."); + if (percentage >= 100) { + LLAMA_LOG_INFO("\n"); + } + } + }; + } + + ctx->rng = std::mt19937(params.seed); + ctx->logits_all = params.logits_all; + + ggml_type memory_type = params.f16_kv ? GGML_TYPE_F16 : GGML_TYPE_F32; + + // reserve memory for context buffers + if (!params.vocab_only) { + if (!llama_kv_cache_init(ctx->model.hparams, ctx->kv_self, memory_type, ctx->model.hparams.n_ctx, params.n_gpu_layers)) { + LLAMA_LOG_ERROR("%s: llama_kv_cache_init() failed for self-attention cache\n", __func__); + llama_free(ctx); + return nullptr; + } + + { + const size_t memory_size = ggml_nbytes(ctx->kv_self.k) + ggml_nbytes(ctx->kv_self.v); + LLAMA_LOG_INFO("%s: kv self size = %7.2f MB\n", __func__, memory_size / 1024.0 / 1024.0); + } + + const auto & hparams = ctx->model.hparams; + + // resized during inference + if (params.logits_all) { + ctx->logits.reserve(hparams.n_ctx*hparams.n_vocab); + } else { + ctx->logits.reserve(hparams.n_vocab); + } + + if (params.embedding){ + ctx->embedding.resize(hparams.n_embd); + } + +#ifdef LLAMA_USE_ALLOCATOR + { + static const size_t tensor_alignment = 32; + // the compute buffer is used to store the tensor and graph structs, while the allocator buffer is used for the tensor data + ctx->buf_compute.resize(ggml_tensor_overhead()*GGML_MAX_NODES + ggml_graph_overhead()); + + // create measure allocator + ctx->alloc = ggml_allocr_new_measure(tensor_alignment); + + // build worst-case graph + int n_tokens = std::min((int)hparams.n_ctx, params.n_batch); + int n_past = hparams.n_ctx - n_tokens; + llama_token token = llama_token_bos(); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph + ggml_cgraph * gf = llama_build_graph(*ctx, &token, NULL, n_tokens, n_past); + + // measure memory requirements for the graph + size_t alloc_size = ggml_allocr_alloc_graph(ctx->alloc, gf) + tensor_alignment; + + LLAMA_LOG_INFO("%s: compute buffer total size = %7.2f MB\n", __func__, (ctx->buf_compute.size + alloc_size) / 1024.0 / 1024.0); + + // debug - for comparison with scratch buffer + //size_t prev_req = + // MEM_REQ_SCRATCH0(hparams.n_ctx).at(ctx->model.type) + + // MEM_REQ_SCRATCH1().at(ctx->model.type) + + // MEM_REQ_EVAL().at(ctx->model.type); + //LLAMA_LOG_INFO("%s: (debug) equivalent with scratch buffer = %7.2f MB\n", __func__, prev_req / 1024.0 / 1024.0); + + // recreate allocator with exact memory requirements + ggml_allocr_free(ctx->alloc); + + ctx->buf_alloc.resize(alloc_size); + ctx->alloc = ggml_allocr_new(ctx->buf_alloc.data, ctx->buf_alloc.size, tensor_alignment); + } +#else + ctx->buf_compute.resize(MEM_REQ_EVAL().at(ctx->model.type) + ggml_graph_overhead()); +#endif + +#ifdef LLAMA_USE_SCRATCH + ctx->buf_scratch[0].resize(MEM_REQ_SCRATCH0(hparams.n_ctx).at(ctx->model.type)); + ctx->buf_scratch[1].resize(MEM_REQ_SCRATCH1().at(ctx->model.type)); +#endif + } + +#ifdef GGML_USE_METAL + if (params.n_gpu_layers > 0) { + // this allocates all Metal resources and memory buffers + ctx->ctx_metal = ggml_metal_init(1); + + if (!ctx->ctx_metal) { + LLAMA_LOG_ERROR("%s: ggml_metal_init() failed\n", __func__); + llama_free(ctx); + return NULL; + } + + void * data_ptr = NULL; + size_t data_size = 0; + + if (params.use_mmap) { + data_ptr = ctx->model.mapping->addr; + data_size = ctx->model.mapping->size; + } else { + data_ptr = ggml_get_mem_buffer(ctx->model.ctx); + data_size = ggml_get_mem_size (ctx->model.ctx); + } + + const size_t max_size = ggml_get_max_tensor_size(ctx->model.ctx); + + LLAMA_LOG_INFO("%s: max tensor size = %8.2f MB\n", __func__, max_size/1024.0/1024.0); + +#define LLAMA_METAL_CHECK_BUF(result) \ + if (!(result)) { \ + LLAMA_LOG_ERROR("%s: failed to add buffer\n", __func__); \ + llama_free(ctx); \ + return NULL; \ + } + + LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "data", data_ptr, data_size, max_size)); + + LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "eval", ctx->buf_compute.data, ctx->buf_compute.size, 0)); + LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "kv", ctx->kv_self.buf.data, ctx->kv_self.buf.size, 0)); + + LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "scr0", ctx->buf_scratch[0].data, ctx->buf_scratch[0].size, 0)); + LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "scr1", ctx->buf_scratch[1].data, ctx->buf_scratch[1].size, 0)); +#undef LLAMA_METAL_CHECK_BUF + } +#endif + +#ifdef GGML_USE_MPI + ctx->ctx_mpi = ggml_mpi_init(); + + if (ggml_mpi_rank(ctx->ctx_mpi) > 0) { + // Enter a blocking eval loop with dummy input, letting rank=0 drive the process + const std::vector tmp(ctx->model.hparams.n_ctx, llama_token_bos()); + while (!llama_eval(ctx, tmp.data(), tmp.size(), 0, 0)) {}; + llama_backend_free(); + exit(1); + } +#endif + + return ctx; +} + +struct llama_context * llama_init_from_file( + const char * path_model, + struct llama_context_params params) { + + struct llama_model * model = llama_load_model_from_file(path_model, params); + if (!model) { + return nullptr; + } + struct llama_context * ctx = llama_new_context_with_model(model, params); + ctx->model_owner = true; + return ctx; +} + +void llama_free(struct llama_context * ctx) { + delete ctx; +} + +int llama_model_quantize( + const char * fname_inp, + const char * fname_out, + const llama_model_quantize_params * params) { + try { + llama_model_quantize_internal(fname_inp, fname_out, params); + return 0; + } catch (const std::exception & err) { + LLAMA_LOG_ERROR("%s: failed to quantize: %s\n", __func__, err.what()); + return 1; + } +} + int llama_apply_lora_from_file(struct llama_context * ctx, const char * path_lora, const char * path_base_model, int n_threads) { try { return llama_apply_lora_from_file_internal(ctx->model, path_lora, path_base_model, n_threads); @@ -4903,7 +4908,6 @@ const std::vector>& llama_internal_ return ctx->model.tensors_by_name; } - void llama_log_set(llama_log_callback log_callback, void * user_data) { g_state.log_callback = log_callback ? log_callback : llama_log_callback_default; g_state.log_callback_user_data = user_data; diff --git a/gguf-llama.h b/gguf-llama.h index 62e48b13c94e8..d81a1b5defd89 100644 --- a/gguf-llama.h +++ b/gguf-llama.h @@ -111,6 +111,7 @@ extern "C" { bool use_mlock; // force system to keep model in RAM bool embedding; // embedding mode only }; + // model file types enum llama_ftype { LLAMA_FTYPE_ALL_F32 = 0, @@ -190,10 +191,6 @@ extern "C" { int32_t n_eval; }; - // Set callback for all future logging events. - // If this is not called, or NULL is supplied, everything is output on stderr. - LLAMA_API void llama_log_set(llama_log_callback log_callback, void * user_data); - LLAMA_API struct llama_context_params llama_context_default_params(void); LLAMA_API struct llama_model_quantize_params llama_model_quantize_default_params(void); @@ -458,6 +455,10 @@ extern "C" { // Print system information LLAMA_API const char * llama_print_system_info(void); + // Set callback for all future logging events. + // If this is not called, or NULL is supplied, everything is output on stderr. + LLAMA_API void llama_log_set(llama_log_callback log_callback, void * user_data); + #ifdef __cplusplus } #endif From 23248d7d32ba599dbded42b3c8ef35b1733e1247 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 15 Aug 2023 22:41:55 +0300 Subject: [PATCH 23/29] llama : minor simplifications --- gguf-llama.cpp | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/gguf-llama.cpp b/gguf-llama.cpp index d08c64fd70d33..079959ab12925 100644 --- a/gguf-llama.cpp +++ b/gguf-llama.cpp @@ -1098,11 +1098,11 @@ struct llama_model_loader { this->use_mmap = use_mmap; } - void calc_sizes(size_t * ctx_size_p, size_t * mmapped_size_p) const { - *ctx_size_p = *mmapped_size_p = 0; + void calc_sizes(size_t & ctx_size_p, size_t & mmapped_size_p) const { + ctx_size_p = mmapped_size_p = 0; for (const llama_load_tensor & lt : tensors_map.tensors) { - *ctx_size_p += sizeof(struct ggml_tensor) + GGML_OBJECT_SIZE; - *(use_mmap ? mmapped_size_p : ctx_size_p) += lt.size + 16; + ctx_size_p += sizeof(struct ggml_tensor) + GGML_OBJECT_SIZE; + (use_mmap ? mmapped_size_p : ctx_size_p) += ggml_nbytes_pad(lt.ggml_tensor); } } @@ -1159,19 +1159,19 @@ struct llama_model_loader { } void load_all_data(llama_progress_callback progress_callback, void * progress_callback_user_data, llama_mlock * lmlock) { - size_t data_size = 0; - size_t prefetch_size = 0; - size_t lock_size = 0; + size_t data_size = 0; + size_t lock_size = 0; + size_t pref_size = 0; // prefetch for (const llama_load_tensor & lt : tensors_map.tensors) { data_size += lt.size; if (lt.ggml_tensor->backend == GGML_BACKEND_CPU) { - prefetch_size += lt.size; + pref_size += lt.size; } } if (use_mmap) { - mapping.reset(new llama_mmap(&file_loader->file, prefetch_size, ggml_is_numa())); + mapping.reset(new llama_mmap(&file_loader->file, pref_size, ggml_is_numa())); if (lmlock) { lmlock->init(mapping->addr); } @@ -1404,7 +1404,7 @@ static void llama_model_load_internal( size_t ctx_size; size_t mmapped_size; - ml->calc_sizes(&ctx_size, &mmapped_size); + ml->calc_sizes(ctx_size, mmapped_size); LLAMA_LOG_INFO("%s: ggml ctx size = %7.2f MB\n", __func__, ctx_size/1024.0/1024.0); // create the ggml context @@ -3688,7 +3688,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const size_t ctx_size; size_t mmapped_size; - model_loader->calc_sizes(&ctx_size, &mmapped_size); + model_loader->calc_sizes(ctx_size, mmapped_size); base_buf.resize(ctx_size); ggml_init_params base_params; From 5339b859ec790f3ee16d024f210c88e1aced5ac5 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 16 Aug 2023 00:02:25 +0300 Subject: [PATCH 24/29] llama : refactor llama_model_loader (WIP) wip : remove ggml_ctx from llama_model_loader wip : merge gguf_file_loader in llama_model_loader --- ggml.c | 8 - ggml.h | 1 - gguf-llama.cpp | 448 +++++++++++++++++++++++-------------------------- 3 files changed, 208 insertions(+), 249 deletions(-) diff --git a/ggml.c b/ggml.c index 7c90f44ecf5e8..261695216b50f 100644 --- a/ggml.c +++ b/ggml.c @@ -19065,14 +19065,6 @@ enum gguf_type gguf_get_arr_type(struct gguf_context * ctx, int i) { return ctx->kv[i].value.arr.type; } -int32_t gguf_get_arr_i32(struct gguf_context * ctx, int key_id, int i) { - return ((int32_t *) ctx->kv[key_id].value.arr.data)[i]; -} - -float gguf_get_arr_f32(struct gguf_context * ctx, int key_id, int i) { - return ((float *) ctx->kv[key_id].value.arr.data)[i]; -} - const void * gguf_get_arr_data(struct gguf_context * ctx, int i) { return ctx->kv[i].value.arr.data; } diff --git a/ggml.h b/ggml.h index 4dc3ff977cfa0..8a1661cfbd319 100644 --- a/ggml.h +++ b/ggml.h @@ -1499,7 +1499,6 @@ extern "C" { struct ggml_context * ctx, struct ggml_tensor * tensor); - GGML_API void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor); GGML_API struct ggml_cgraph ggml_build_forward (struct ggml_tensor * tensor); diff --git a/gguf-llama.cpp b/gguf-llama.cpp index 079959ab12925..4f6de19f77e9a 100644 --- a/gguf-llama.cpp +++ b/gguf-llama.cpp @@ -993,227 +993,189 @@ static std::string llama_format_tensor_shape(const std::vector & ne) { return buf; } -struct llama_load_tensor { - std::string name; - enum ggml_type type = GGML_TYPE_F32; - std::vector ne; - size_t file_off; - size_t size; - struct ggml_tensor * ggml_tensor = NULL; - uint8_t * data; -}; - -struct llama_load_tensors_map { - // tensors is kept in a separate vector to preserve file order - std::vector tensors; - std::unordered_map name_to_idx; -}; +struct llama_model_loader { + int n_tensors = 0; + int n_created = 0; + bool use_mmap = false; -struct llama_file_loader { llama_file file; - gguf_context * ctx_gguf; llama_file_version file_version; - struct ggml_context * ctx_data = NULL; + std::unique_ptr mapping; - llama_file_loader(const char * fname, llama_load_tensors_map & tensors_map) : file(fname, "rb") { - fprintf(stderr, "llama.cpp: loading model from %s\n", fname); + struct gguf_context * ctx_gguf = NULL; + struct ggml_context * ctx_meta = NULL; + llama_model_loader(const std::string & fname, bool use_mmap) : file(fname.c_str(), "rb") { struct gguf_init_params params = { /*.no_alloc = */ true, - /*.ctx = */ &ctx_data, + /*.ctx = */ &ctx_meta, }; - ctx_gguf = gguf_init_from_file(fname, params); - file_version = (enum llama_file_version) gguf_get_version(ctx_gguf); - - read_tensor_metadata(tensors_map); - } - - void read_tensor_metadata(llama_load_tensors_map & tensors_map) const { - const int n_tensors = gguf_get_n_tensors(ctx_gguf); - - for (int i = 0; i < n_tensors; ++i) { - llama_load_tensor tensor; - const char * name = gguf_get_tensor_name(ctx_gguf, i); - - struct ggml_tensor * cur = ggml_get_tensor(ctx_data, name); + ctx_gguf = gguf_init_from_file(fname.c_str(), params); - const uint32_t n_dims = cur->n_dims; - tensor.type = cur->type; - tensor.ne.resize(n_dims); - - for (uint32_t j = 0; j < n_dims; ++j) { - tensor.ne[j] = cur->ne[j]; - } - - if (n_dims < 1 || n_dims > 2) { - throw std::runtime_error(format("llama.cpp: tensor '%s' should not be %u-dimensional", name, n_dims)); - } - - switch (tensor.type) { - case GGML_TYPE_F32: - case GGML_TYPE_F16: - case GGML_TYPE_Q4_0: - case GGML_TYPE_Q4_1: - case GGML_TYPE_Q5_0: - case GGML_TYPE_Q5_1: - case GGML_TYPE_Q8_0: - case GGML_TYPE_Q2_K: - case GGML_TYPE_Q3_K: - case GGML_TYPE_Q4_K: - case GGML_TYPE_Q5_K: - case GGML_TYPE_Q6_K: - break; - default: { - throw std::runtime_error(format("unrecognized tensor type %u\n", tensor.type)); - } - } - - tensor.file_off = gguf_get_data_offset(ctx_gguf) + gguf_get_tensor_offset(ctx_gguf, i); + n_tensors = gguf_get_n_tensors(ctx_gguf); + file_version = (enum llama_file_version) gguf_get_version(ctx_gguf); - tensor.name = name; - tensor.size = ggml_nbytes(cur); - tensor.ggml_tensor = cur; + LLAMA_LOG_INFO("%s: loaded %d tensors from %s (version %s)\n", + __func__, n_tensors, fname.c_str(), llama_file_version_name(file_version)); - tensors_map.tensors.push_back(tensor); - tensors_map.name_to_idx[name] = tensors_map.tensors.size() - 1; - } - } -}; - -struct llama_model_loader { - std::unique_ptr file_loader; - llama_load_tensors_map tensors_map; - bool use_mmap; - size_t num_ggml_tensors_created = 0; - struct ggml_context * ggml_ctx = NULL; - std::unique_ptr mapping; - - llama_model_loader(const std::string & fname_base, bool use_mmap) { - file_loader = std::unique_ptr(new llama_file_loader(fname_base.c_str(), tensors_map)); if (!llama_mmap::SUPPORTED) { + LLAMA_LOG_WARN("%s: mmap is not supported on this platform\n", __func__); use_mmap = false; } + this->use_mmap = use_mmap; } + const char * get_tensor_name(int i) const { + return gguf_get_tensor_name(ctx_gguf, i); + } + + struct ggml_tensor * get_tensor_meta(int i) const { + return ggml_get_tensor(ctx_meta, get_tensor_name(i)); + } + void calc_sizes(size_t & ctx_size_p, size_t & mmapped_size_p) const { - ctx_size_p = mmapped_size_p = 0; - for (const llama_load_tensor & lt : tensors_map.tensors) { + ctx_size_p = 0; + mmapped_size_p = 0; + + for (int i = 0; i < n_tensors; i++) { + struct ggml_tensor * meta = get_tensor_meta(i); ctx_size_p += sizeof(struct ggml_tensor) + GGML_OBJECT_SIZE; - (use_mmap ? mmapped_size_p : ctx_size_p) += ggml_nbytes_pad(lt.ggml_tensor); + (use_mmap ? mmapped_size_p : ctx_size_p) += ggml_nbytes_pad(meta); } } - struct ggml_tensor * get_tensor_for(llama_load_tensor & lt, ggml_backend backend) { - struct ggml_tensor * tensor; + struct ggml_tensor * create_tensor_for(struct ggml_context * ctx, struct ggml_tensor * meta, ggml_backend backend) { if (backend != GGML_BACKEND_CPU) { - ggml_set_no_alloc(ggml_ctx, true); + ggml_set_no_alloc(ctx, true); } - if (lt.ne.size() == 2) { - tensor = ggml_new_tensor_2d(ggml_ctx, lt.type, lt.ne.at(0), lt.ne.at(1)); - } else { - GGML_ASSERT(lt.ne.size() == 1); - tensor = ggml_new_tensor_1d(ggml_ctx, lt.type, lt.ne.at(0)); - } - ggml_set_name(tensor, lt.name.c_str()); + + struct ggml_tensor * tensor = ggml_dup_tensor(ctx, meta); + tensor->backend = backend; // TODO: ggml_set_backend + ggml_set_name(tensor, ggml_get_name(meta)); if (backend != GGML_BACKEND_CPU) { - ggml_set_no_alloc(ggml_ctx, use_mmap); + ggml_set_no_alloc(ctx, use_mmap); } - tensor->backend = backend; - lt.ggml_tensor = tensor; - num_ggml_tensors_created++; + + n_created++; + return tensor; } - struct ggml_tensor * get_tensor(const std::string & name, const std::vector & ne, ggml_backend backend) { - auto it = tensors_map.name_to_idx.find(name); - if (it == tensors_map.name_to_idx.end()) { - throw std::runtime_error(std::runtime_error(format("llama.cpp: tensor '%s' is missing from model", name.c_str()))); - } - llama_load_tensor & lt = tensors_map.tensors.at(it->second); - if (lt.ne != ne) { - throw std::runtime_error(format("llama.cpp: tensor '%s' has wrong shape; expected %s, got %s", - name.c_str(), llama_format_tensor_shape(ne).c_str(), llama_format_tensor_shape(lt.ne).c_str())); + struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::vector & ne, ggml_backend backend) { + struct ggml_tensor * cur = ggml_get_tensor(ctx_meta, name.c_str()); + + // TODO: simplify + { + bool is_ok = true; + for (size_t i = 0; i < ne.size(); ++i) { + if (ne[i] != cur->ne[i]) { + is_ok = false; + break; + } + } + if (!is_ok) { + throw std::runtime_error( + format("%s: tensor '%s' has wrong shape; expected [%d, %d, %d, %d], got [%d, %d, %d, %d]", + __func__, name.c_str(), ne[0], ne[1], ne[2], ne[3], + (int) cur->ne[0], (int) cur->ne[1], (int) cur->ne[2], (int) cur->ne[3])); + } } - return get_tensor_for(lt, backend); + return create_tensor_for(ctx, cur, backend); } void done_getting_tensors() const { - if (num_ggml_tensors_created != tensors_map.tensors.size()) { - throw std::runtime_error(std::string("llama.cpp: file contained more tensors than expected")); + if (n_created != n_tensors) { + throw std::runtime_error(format("%s: wrong number of tensors; expected %d, got %d", __func__, n_tensors, n_created)); + } + } + + size_t file_offset(const char * name) const { + const int idx = gguf_find_tensor(ctx_gguf, name); + + if (idx < 0) { + throw std::runtime_error(format("%s: tensor '%s' not found in the file", __func__, name)); } + + return gguf_get_data_offset(ctx_gguf) + gguf_get_tensor_offset(ctx_gguf, idx); } - void load_data_for(llama_load_tensor & lt) const { + void load_data_for(struct ggml_tensor * cur) const { + const size_t offs = file_offset(ggml_get_name(cur)); + if (use_mmap) { - lt.data = (uint8_t *) mapping->addr + lt.file_off; + cur->data = (uint8_t *) mapping->addr + offs; } else { - llama_file & file = file_loader->file; - file.seek(lt.file_off, SEEK_SET); - file.read_raw(lt.data, lt.size); + file.seek(offs, SEEK_SET); + file.read_raw(cur->data, ggml_nbytes(cur)); } } - void load_all_data(llama_progress_callback progress_callback, void * progress_callback_user_data, llama_mlock * lmlock) { - size_t data_size = 0; - size_t lock_size = 0; - size_t pref_size = 0; // prefetch + void load_all_data(struct ggml_context * ctx, llama_progress_callback progress_callback, void * progress_callback_user_data, llama_mlock * lmlock) { + size_t size_data = 0; + size_t size_lock = 0; + size_t size_pref = 0; // prefetch - for (const llama_load_tensor & lt : tensors_map.tensors) { - data_size += lt.size; - if (lt.ggml_tensor->backend == GGML_BACKEND_CPU) { - pref_size += lt.size; + for (int i = 0; i < gguf_get_n_tensors(ctx_gguf); i++) { + struct ggml_tensor * cur = ggml_get_tensor(ctx, gguf_get_tensor_name(ctx_gguf, i)); + size_data += ggml_nbytes(cur); + if (cur->backend == GGML_BACKEND_CPU) { + size_pref += ggml_nbytes(cur); } } if (use_mmap) { - mapping.reset(new llama_mmap(&file_loader->file, pref_size, ggml_is_numa())); + mapping.reset(new llama_mmap(&file, size_pref, ggml_is_numa())); if (lmlock) { lmlock->init(mapping->addr); } } size_t done_size = 0; - for (llama_load_tensor & lt : tensors_map.tensors) { + for (int i = 0; i < gguf_get_n_tensors(ctx_gguf); i++) { + struct ggml_tensor * cur = ggml_get_tensor(ctx, gguf_get_tensor_name(ctx_gguf, i)); + GGML_ASSERT(cur); // unused tensors should have been caught by load_data already + if (progress_callback) { - progress_callback((float) done_size / data_size, progress_callback_user_data); + progress_callback((float) done_size / size_data, progress_callback_user_data); } - GGML_ASSERT(lt.ggml_tensor); // unused tensors should have been caught by load_data already - lt.data = (uint8_t *) lt.ggml_tensor->data; // allocate temp buffer if not using mmap - if (!use_mmap && lt.data == NULL) { - GGML_ASSERT(lt.ggml_tensor->backend != GGML_BACKEND_CPU); - lt.data = (uint8_t*)malloc(ggml_nbytes(lt.ggml_tensor)); + if (!use_mmap && cur->data == NULL) { + GGML_ASSERT(cur->backend != GGML_BACKEND_CPU); + cur->data = malloc(ggml_nbytes(cur)); } - load_data_for(lt); + load_data_for(cur); - switch (lt.ggml_tensor->backend) { + switch (cur->backend) { case GGML_BACKEND_CPU: - lt.ggml_tensor->data = lt.data; if (use_mmap && lmlock) { - lock_size += lt.size; - lmlock->grow_to(lock_size); + size_lock += ggml_nbytes(cur); + lmlock->grow_to(size_lock); } break; #if defined(GGML_USE_CUBLAS) case GGML_BACKEND_GPU: case GGML_BACKEND_GPU_SPLIT: - ggml_cuda_transform_tensor(lt.data, lt.ggml_tensor); + // old code: + //ggml_cuda_transform_tensor(lt.data, lt.ggml_tensor); + + // TODO: test if this works !! + ggml_cuda_transform_tensor(cur->data, cur); if (!use_mmap) { - free(lt.data); + free(cur->data); } break; #elif defined(GGML_USE_CLBLAST) case GGML_BACKEND_GPU: - ggml_cl_transform_tensor(lt.data, lt.ggml_tensor); + ggml_cl_transform_tensor(cur->data, cur); if (!use_mmap) { - free(lt.data); + free(cur->data); } break; #endif @@ -1221,7 +1183,7 @@ struct llama_model_loader { continue; } - done_size += lt.size; + done_size += ggml_nbytes(cur); } } }; @@ -1298,7 +1260,7 @@ static void llama_model_load_internal( // read hparams { - struct gguf_context * ctx = ml->file_loader->ctx_gguf; + struct gguf_context * ctx = ml->ctx_gguf; hparams.n_vocab = gguf_get_arr_n (ctx, gguf_find_key(ctx, "tokenizer.ggml.tokens")); hparams.n_ctx = gguf_get_val_u32(ctx, gguf_find_key(ctx, "llama.context_length")); @@ -1351,7 +1313,7 @@ static void llama_model_load_internal( // read vocab { - struct gguf_context * ctx = ml->file_loader->ctx_gguf; + struct gguf_context * ctx = ml->ctx_gguf; vocab.id_to_token.resize(hparams.n_vocab); @@ -1379,7 +1341,7 @@ static void llama_model_load_internal( } { - LLAMA_LOG_INFO("%s: format = %s\n", __func__, llama_file_version_name(ml->file_loader->file_version)); + LLAMA_LOG_INFO("%s: format = %s\n", __func__, llama_file_version_name(ml->file_version)); LLAMA_LOG_INFO("%s: n_vocab = %u\n", __func__, hparams.n_vocab); LLAMA_LOG_INFO("%s: n_ctx = %u\n", __func__, hparams.n_ctx); LLAMA_LOG_INFO("%s: n_embd = %u\n", __func__, hparams.n_embd); @@ -1453,9 +1415,7 @@ static void llama_model_load_internal( const uint32_t n_layer = hparams.n_layer; const uint32_t n_vocab = hparams.n_vocab; - ml->ggml_ctx = ctx; - - model.tok_embeddings = ml->get_tensor(TN_TOKEN_EMBD, {n_embd, n_vocab}, GGML_BACKEND_CPU); + model.tok_embeddings = ml->create_tensor(ctx, TN_TOKEN_EMBD, {n_embd, n_vocab}, GGML_BACKEND_CPU); // "output" tensor { @@ -1476,8 +1436,8 @@ static void llama_model_load_internal( backend_output = GGML_BACKEND_CPU; } - model.norm = ml->get_tensor(TN_OUTPUT_NORM, {n_embd}, backend_norm); - model.output = ml->get_tensor(TN_OUTPUT, {n_embd, n_vocab}, backend_output); + model.norm = ml->create_tensor(ctx, TN_OUTPUT_NORM, {n_embd}, backend_norm); + model.output = ml->create_tensor(ctx, TN_OUTPUT, {n_embd, n_vocab}, backend_output); if (backend_norm == GGML_BACKEND_GPU) { vram_weights += ggml_nbytes(model.norm); } @@ -1496,18 +1456,18 @@ static void llama_model_load_internal( const ggml_backend backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD_SPLIT; // NOLINT auto & layer = model.layers[i]; - layer.attention_norm = ml->get_tensor(format(TN_ATTN_NORM, i), {n_embd}, backend); + layer.attention_norm = ml->create_tensor(ctx, format(TN_ATTN_NORM, i), {n_embd}, backend); - layer.wq = ml->get_tensor(format(TN_ATTN_Q, i), {n_embd, n_embd}, backend_split); - layer.wk = ml->get_tensor(format(TN_ATTN_K, i), {n_embd, n_embd_gqa}, backend_split); - layer.wv = ml->get_tensor(format(TN_ATTN_V, i), {n_embd, n_embd_gqa}, backend_split); - layer.wo = ml->get_tensor(format(TN_ATTN_OUTPUT, i), {n_embd, n_embd}, backend_split); + layer.wq = ml->create_tensor(ctx, format(TN_ATTN_Q, i), {n_embd, n_embd}, backend_split); + layer.wk = ml->create_tensor(ctx, format(TN_ATTN_K, i), {n_embd, n_embd_gqa}, backend_split); + layer.wv = ml->create_tensor(ctx, format(TN_ATTN_V, i), {n_embd, n_embd_gqa}, backend_split); + layer.wo = ml->create_tensor(ctx, format(TN_ATTN_OUTPUT, i), {n_embd, n_embd}, backend_split); - layer.ffn_norm = ml->get_tensor(format(TN_FFN_NORM, i), {n_embd}, backend); + layer.ffn_norm = ml->create_tensor(ctx, format(TN_FFN_NORM, i), {n_embd}, backend); - layer.w1 = ml->get_tensor(format(TN_FFN_GATE, i), {n_embd, n_ff}, backend_split); - layer.w2 = ml->get_tensor(format(TN_FFN_DOWN, i), { n_ff, n_embd}, backend_split); - layer.w3 = ml->get_tensor(format(TN_FFN_UP, i), {n_embd, n_ff}, backend_split); + layer.w1 = ml->create_tensor(ctx, format(TN_FFN_GATE, i), {n_embd, n_ff}, backend_split); + layer.w2 = ml->create_tensor(ctx, format(TN_FFN_DOWN, i), { n_ff, n_embd}, backend_split); + layer.w3 = ml->create_tensor(ctx, format(TN_FFN_UP, i), {n_embd, n_ff}, backend_split); if (backend == GGML_BACKEND_GPU) { vram_weights += @@ -1605,8 +1565,9 @@ static void llama_model_load_internal( } // populate `tensors_by_name` - for (llama_load_tensor & lt : ml->tensors_map.tensors) { - model.tensors_by_name.emplace_back(lt.name, lt.ggml_tensor); + for (int i = 0; i < ml->n_tensors; ++i) { + struct ggml_tensor * cur = ggml_get_tensor(ctx, ml->get_tensor_name(i)); + model.tensors_by_name.emplace_back(ggml_get_name(cur), cur); } (void) tensor_split; @@ -1616,7 +1577,7 @@ static void llama_model_load_internal( } #endif - ml->load_all_data(progress_callback, progress_callback_user_data, use_mlock ? &model.mlock_mmap : NULL); + ml->load_all_data(ctx, progress_callback, progress_callback_user_data, use_mlock ? &model.mlock_mmap : NULL); if (progress_callback) { progress_callback(1.0f, progress_callback_user_data); @@ -1666,7 +1627,7 @@ static struct ggml_cgraph * llama_build_graph( int n_tokens, int n_past) { - GGML_ASSERT((!tokens && embd) || (tokens && !embd)); + GGML_ASSERT((!tokens && embd) || (tokens && !embd)); // NOLINT const int N = n_tokens; @@ -1696,7 +1657,6 @@ static struct ggml_cgraph * llama_build_graph( auto & mem_per_token = lctx.mem_per_token; auto & buf_compute = lctx.buf_compute; - struct ggml_init_params params = { /*.mem_size =*/ buf_compute.size, /*.mem_buffer =*/ buf_compute.data, @@ -2049,7 +2009,7 @@ static bool llama_eval_internal( int n_threads, const char * cgraph_fname) { - GGML_ASSERT((!tokens && embd) || (tokens && !embd)); + GGML_ASSERT((!tokens && embd) || (tokens && !embd)); // NOLINT const int64_t t_start_us = ggml_time_us(); @@ -2526,8 +2486,8 @@ std::vector decode_utf8(const char * src) { // returns true iff pos points to the end of one of the definitions of a rule static bool llama_grammar_is_end_of_sequence(const llama_grammar_element * pos) { switch (pos->type) { - case LLAMA_GRETYPE_END: return true; - case LLAMA_GRETYPE_ALT: return true; + case LLAMA_GRETYPE_END: return true; // NOLINT + case LLAMA_GRETYPE_ALT: return true; // NOLINT default: return false; } } @@ -2540,7 +2500,7 @@ static std::pair llama_grammar_match_char( bool found = false; bool is_positive_char = pos->type == LLAMA_GRETYPE_CHAR; - GGML_ASSERT(is_positive_char || pos->type == LLAMA_GRETYPE_CHAR_NOT); + GGML_ASSERT(is_positive_char || pos->type == LLAMA_GRETYPE_CHAR_NOT); // NOLINT do { if (pos[1].type == LLAMA_GRETYPE_CHAR_RNG_UPPER) { @@ -2675,7 +2635,7 @@ static std::vector llama_grammar_reject_candidates_for_ } } - auto stack_pos_after = llama_grammar_match_char(stack_pos, 0).second; + const auto * stack_pos_after = llama_grammar_match_char(stack_pos, 0).second; // update top of stack to next element, if any std::vector stack_after(stack.begin(), stack.end() - 1); @@ -3285,35 +3245,35 @@ void llama_grammar_accept_token(struct llama_context * ctx, struct llama_grammar // quantization // -static void llama_convert_tensor_internal(const llama_load_tensor & tensor, std::vector & output, const size_t nelements, const int nthread) { +static void llama_convert_tensor_internal(struct ggml_tensor * tensor, std::vector & output, const size_t nelements, const int nthread) { if (output.size() < nelements) { output.resize(nelements); } float * f32_output = (float *) output.data(); ggml_type_traits_t qtype; - if (ggml_is_quantized(tensor.type)) { - qtype = ggml_internal_get_type_traits(tensor.type); + if (ggml_is_quantized(tensor->type)) { + qtype = ggml_internal_get_type_traits(tensor->type); if (qtype.to_float == NULL) { - throw std::runtime_error(format("type %s unsupported for integer quantization: no dequantization available", ggml_type_name(tensor.type))); + throw std::runtime_error(format("type %s unsupported for integer quantization: no dequantization available", ggml_type_name(tensor->type))); } - } else if (tensor.type != GGML_TYPE_F16) { - throw std::runtime_error(format("cannot dequantize/convert tensor type %s", ggml_type_name(tensor.type))); + } else if (tensor->type != GGML_TYPE_F16) { + throw std::runtime_error(format("cannot dequantize/convert tensor type %s", ggml_type_name(tensor->type))); } if (nthread < 2) { - if (tensor.type == GGML_TYPE_F16) { - ggml_fp16_to_fp32_row((ggml_fp16_t *)tensor.data, f32_output, nelements); - } else if (ggml_is_quantized(tensor.type)) { - qtype.to_float(tensor.data, f32_output, nelements); + if (tensor->type == GGML_TYPE_F16) { + ggml_fp16_to_fp32_row((ggml_fp16_t *)tensor->data, f32_output, nelements); + } else if (ggml_is_quantized(tensor->type)) { + qtype.to_float(tensor->data, f32_output, nelements); } else { GGML_ASSERT(false); // unreachable } return; } - auto block_size = tensor.type == GGML_TYPE_F16 ? 1 : (size_t)ggml_blck_size(tensor.type); - auto block_size_bytes = ggml_type_size(tensor.type); + auto block_size = tensor->type == GGML_TYPE_F16 ? 1 : (size_t)ggml_blck_size(tensor->type); + auto block_size_bytes = ggml_type_size(tensor->type); GGML_ASSERT(nelements % block_size == 0); auto nblocks = nelements / block_size; @@ -3333,7 +3293,7 @@ static void llama_convert_tensor_internal(const llama_load_tensor & tensor, std: qtype.to_float(inbuf, outbuf, nels); } }; - workers.push_back(std::thread(compute, tensor.type, tensor.data + in_buff_offs, f32_output + out_buff_offs, thr_elems)); + workers.push_back(std::thread(compute, tensor->type, (uint8_t *) tensor->data + in_buff_offs, f32_output + out_buff_offs, thr_elems)); in_buff_offs += thr_block_bytes; out_buff_offs += thr_elems; } @@ -3381,17 +3341,22 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s struct gguf_context * ctx_out = gguf_init_empty(); // copy the KV pairs from the input file - gguf_set_kv(ctx_out, model_loader->file_loader->ctx_gguf); + gguf_set_kv (ctx_out, model_loader->ctx_gguf); gguf_set_val_u32(ctx_out, "general.quantization_version", GGML_QNT_VERSION); #ifdef GGML_USE_K_QUANTS int n_attention_wv = 0; int n_feed_forward_w2 = 0; - for (auto& tensor : model_loader->tensors_map.tensors) { - if (tensor.name.find("attn_v.weight") != std::string::npos) { + + for (int i = 0; i < model_loader->n_tensors; ++i) { + struct ggml_tensor * meta = model_loader->get_tensor_meta(i); + + const std::string name = ggml_get_name(meta); + + if (name.find("attn_v.weight") != std::string::npos) { ++n_attention_wv; } - else if (tensor.name.find("ffn_down.weight") != std::string::npos) { + else if (name.find("ffn_down.weight") != std::string::npos) { ++n_feed_forward_w2; } } @@ -3416,8 +3381,10 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s std::vector read_data; std::vector work; - for (llama_load_tensor & tensor : model_loader->tensors_map.tensors) { - gguf_add_tensor(ctx_out, tensor.ggml_tensor); + // populate the original tensors so we get an initial meta data + for (int i = 0; i < model_loader->n_tensors; ++i) { + struct ggml_tensor * meta = model_loader->get_tensor_meta(i); + gguf_add_tensor(ctx_out, meta); } std::ofstream fout(fname_out, std::ios::binary); @@ -3429,43 +3396,47 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s // placeholder for the meta data ::zeros(fout, meta_size); - for (llama_load_tensor & tensor : model_loader->tensors_map.tensors) { - read_data.resize(tensor.size); - tensor.data = read_data.data(); + for (int i = 0; i < model_loader->n_tensors; ++i) { + struct ggml_tensor * tensor = model_loader->get_tensor_meta(i); + + const std::string name = ggml_get_name(tensor); + + read_data.resize(ggml_nbytes(tensor)); + tensor->data = read_data.data(); model_loader->load_data_for(tensor); - LLAMA_LOG_INFO("[%4zu/%4zu] %36s - %16s, type = %6s, ", - ++idx, model_loader->tensors_map.tensors.size(), - tensor.name.c_str(), llama_format_tensor_shape(tensor.ne).c_str(), - ggml_type_name(tensor.type)); + LLAMA_LOG_INFO("[%4zu/%4zu] %36s - [%5d, %5d], type = %6s, ", + ++idx, model_loader->n_tensors, + ggml_get_name(tensor), (int) tensor->ne[0], (int) tensor->ne[1], + ggml_type_name(tensor->type)); // This used to be a regex, but has an extreme cost to compile times. - bool quantize = tensor.name.rfind("weight") == tensor.name.size() - 6; // ends with 'weight'? + bool quantize = name.rfind("weight") == name.size() - 6; // ends with 'weight'? // quantize only 2D tensors - quantize &= (tensor.ne.size() == 2); - quantize &= params->quantize_output_tensor || tensor.name != "output.weight"; - quantize &= quantized_type != tensor.type; + quantize &= (tensor->n_dims == 2); + quantize &= params->quantize_output_tensor || name != "output.weight"; + quantize &= quantized_type != tensor->type; enum ggml_type new_type; void * new_data; size_t new_size; if (!quantize) { - new_type = tensor.type; - new_data = tensor.data; - new_size = tensor.size; - LLAMA_LOG_INFO("size = %8.3f MB\n", tensor.size/1024.0/1024.0); + new_type = tensor->type; + new_data = tensor->data; + new_size = ggml_nbytes(tensor); + LLAMA_LOG_INFO("size = %8.3f MB\n", ggml_nbytes(tensor)/1024.0/1024.0); } else { new_type = quantized_type; #ifdef GGML_USE_K_QUANTS - if (tensor.name == TN_OUTPUT) { - int nx = tensor.ne.at(0); - int ny = tensor.ne.at(1); + if (name == TN_OUTPUT) { + int nx = tensor->ne[0]; + int ny = tensor->ne[1]; if (nx % QK_K == 0 && ny % QK_K == 0) { new_type = GGML_TYPE_Q6_K; } - } else if (tensor.name.find("attn_v.weight") != std::string::npos) { + } else if (name.find("attn_v.weight") != std::string::npos) { if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q4_K; else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K; else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) && @@ -3473,32 +3444,32 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s else if (QK_K == 64 && (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_S) && (i_attention_wv < n_attention_wv/8 || i_attention_wv >= 7*n_attention_wv/8)) new_type = GGML_TYPE_Q6_K; ++i_attention_wv; - } else if (tensor.name.find("feed_forward.w2.weight") != std::string::npos) { + } else if (name.find("feed_forward.w2.weight") != std::string::npos) { if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q4_K; else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K; else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) && use_more_bits(i_feed_forward_w2, n_feed_forward_w2)) new_type = GGML_TYPE_Q6_K; //else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && i_feed_forward_w2 < n_feed_forward_w2/8) new_type = GGML_TYPE_Q6_K; ++i_feed_forward_w2; - } else if (tensor.name.find("attn_output.weight") != std::string::npos) { + } else if (name.find("attn_output.weight") != std::string::npos) { if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q4_K; else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K; } bool convert_incompatible_tensor = false; if (new_type == GGML_TYPE_Q2_K || new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_Q4_K || new_type == GGML_TYPE_Q5_K || new_type == GGML_TYPE_Q6_K) { - int nx = tensor.ne.at(0); - int ny = tensor.ne.at(1); + int nx = tensor->ne[0]; + int ny = tensor->ne[1]; if (nx % QK_K != 0 || ny % QK_K != 0) { LLAMA_LOG_INFO("\n\nTensor sizes %d x %d are not divisible by %d, required for k-quants.\n",nx,ny,QK_K); convert_incompatible_tensor = true; } } if (convert_incompatible_tensor) { - if (tensor.name == TN_OUTPUT) { + if (name == TN_OUTPUT) { new_type = GGML_TYPE_F16; //fall back to F16 instead of just failing. LLAMA_LOG_WARN("F16 will be used for this tensor instead.\n"); - } else if (tensor.name == TN_TOKEN_EMBD) { + } else if (name == TN_TOKEN_EMBD) { new_type = GGML_TYPE_Q4_0; //fall back to Q4_0 instead of just failing. LLAMA_LOG_WARN("Q4_0 will be used for this tensor instead.\n"); } else { @@ -3507,15 +3478,15 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s } #endif - const size_t nelements = tensor.ne.at(0) * tensor.ne.at(1); + const size_t nelements = ggml_nelements(tensor); float * f32_data; std::vector f32_conv_buf; - if (tensor.type == GGML_TYPE_F32) { - f32_data = (float *) tensor.data; - } else if (ggml_is_quantized(tensor.type) && !params->allow_requantize) { - throw std::runtime_error(format("requantizing from type %s is disabled", ggml_type_name(tensor.type))); + if (tensor->type == GGML_TYPE_F32) { + f32_data = (float *) tensor->data; + } else if (ggml_is_quantized(tensor->type) && !params->allow_requantize) { + throw std::runtime_error(format("requantizing from type %s is disabled", ggml_type_name(tensor->type))); } else { llama_convert_tensor_internal(tensor, f32_conv_buf, nelements, nthread); f32_data = (float *) f32_conv_buf.data(); @@ -3571,7 +3542,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s } } - LLAMA_LOG_INFO("size = %8.2f MB -> %8.2f MB | hist: ", tensor.size/1024.0/1024.0, new_size/1024.0/1024.0); + LLAMA_LOG_INFO("size = %8.2f MB -> %8.2f MB | hist: ", ggml_nbytes(tensor)/1024.0/1024.0, new_size/1024.0/1024.0); int64_t tot_count = 0; for (size_t i = 0; i < hist_cur.size(); i++) { hist_all[i] += hist_cur[i]; @@ -3585,12 +3556,12 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s } LLAMA_LOG_INFO("\n"); } - total_size_org += tensor.size; + total_size_org += ggml_nbytes(tensor); total_size_new += new_size; // update the gguf meta data as we go - gguf_set_tensor_type(ctx_out, tensor.name.c_str(), new_type); - gguf_set_tensor_data(ctx_out, tensor.name.c_str(), new_data, new_size); + gguf_set_tensor_type(ctx_out, name.c_str(), new_type); + gguf_set_tensor_data(ctx_out, name.c_str(), new_data, new_size); // write tensor data + padding fout.write((const char *) new_data, new_size); @@ -3674,7 +3645,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const // create a name -> tensor map of the model to accelerate lookups std::unordered_map model_tensors; - for (const auto & kv: model.tensors_by_name) { + for (const auto & kv : model.tensors_by_name) { model_tensors.insert(kv); } @@ -3698,11 +3669,9 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const base_ctx = ggml_init(base_params); - model_loader->ggml_ctx = base_ctx; - // maybe this should in llama_model_loader if (model_loader->use_mmap) { - model_loader->mapping.reset(new llama_mmap(&model_loader->file_loader->file, /* prefetch */ 0, ggml_is_numa())); + model_loader->mapping.reset(new llama_mmap(&model_loader->file, /* prefetch */ 0, ggml_is_numa())); } } @@ -3807,19 +3776,18 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const ggml_tensor * base_t; if (model_loader) { + struct gguf_context * ctx_gguf = model_loader->ctx_gguf; + // load from base model - if (model_loader->tensors_map.name_to_idx.find(base_name) == model_loader->tensors_map.name_to_idx.end()) { + if (gguf_find_tensor(ctx_gguf, base_name.c_str()) < 0) { LLAMA_LOG_ERROR("%s: error: tensor '%s' not found in base model\n", __func__, base_name.c_str()); return 1; } - size_t idx = model_loader->tensors_map.name_to_idx[base_name]; - llama_load_tensor & lt = model_loader->tensors_map.tensors[idx]; - base_t = model_loader->get_tensor(base_name, { (uint32_t)dest_t->ne[0], (uint32_t)dest_t->ne[1] }, GGML_BACKEND_CPU); - lt.data = (uint8_t *) lt.ggml_tensor->data; - model_loader->load_data_for(lt); - lt.ggml_tensor->data = lt.data; - } - else { + + // TODO: not tested!! maybe not working! + base_t = model_loader->create_tensor(base_ctx, base_name, { (uint32_t)dest_t->ne[0], (uint32_t)dest_t->ne[1] }, GGML_BACKEND_CPU); + model_loader->load_data_for(base_t); + } else { base_t = dest_t; } @@ -4767,7 +4735,7 @@ int llama_token_to_str_with_model(const struct llama_model * model, llama_token } strncpy(str, result.c_str(), result.length()); return result.length(); - } else if (llama_is_unknown_token(model->vocab, token)) { + } else if (llama_is_unknown_token(model->vocab, token)) { // NOLINT if (length < 3) { return -3; } From 31fb56e1d3ddb789fdf32780b9720b5afcc0c1d2 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 16 Aug 2023 11:38:17 +0300 Subject: [PATCH 25/29] llama : fix shape prints --- gguf-llama.cpp | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/gguf-llama.cpp b/gguf-llama.cpp index 4f6de19f77e9a..60e968153259f 100644 --- a/gguf-llama.cpp +++ b/gguf-llama.cpp @@ -993,9 +993,19 @@ static std::string llama_format_tensor_shape(const std::vector & ne) { return buf; } +static std::string llama_format_tensor_shape(const struct ggml_tensor * t) { + char buf[256]; + snprintf(buf, sizeof(buf), "%5" PRId64, t->ne[0]); + for (int i = 1; i < t->n_dims; i++) { + snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " x %5" PRId64, t->ne[i]); + } + return buf; +} + struct llama_model_loader { - int n_tensors = 0; + int n_tensors = 0; int n_created = 0; + bool use_mmap = false; llama_file file; @@ -1068,7 +1078,6 @@ struct llama_model_loader { struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::vector & ne, ggml_backend backend) { struct ggml_tensor * cur = ggml_get_tensor(ctx_meta, name.c_str()); - // TODO: simplify { bool is_ok = true; for (size_t i = 0; i < ne.size(); ++i) { @@ -1079,9 +1088,10 @@ struct llama_model_loader { } if (!is_ok) { throw std::runtime_error( - format("%s: tensor '%s' has wrong shape; expected [%d, %d, %d, %d], got [%d, %d, %d, %d]", - __func__, name.c_str(), ne[0], ne[1], ne[2], ne[3], - (int) cur->ne[0], (int) cur->ne[1], (int) cur->ne[2], (int) cur->ne[3])); + format("%s: tensor '%s' has wrong shape; expected %s, got %s", + __func__, name.c_str(), + llama_format_tensor_shape(ne).c_str(), + llama_format_tensor_shape(cur).c_str())); } } @@ -3405,9 +3415,10 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s tensor->data = read_data.data(); model_loader->load_data_for(tensor); - LLAMA_LOG_INFO("[%4zu/%4zu] %36s - [%5d, %5d], type = %6s, ", + LLAMA_LOG_INFO("[%4zu/%4zu] %36s - [%s], type = %6s, ", ++idx, model_loader->n_tensors, - ggml_get_name(tensor), (int) tensor->ne[0], (int) tensor->ne[1], + ggml_get_name(tensor), + llama_format_tensor_shape(tensor).c_str(), ggml_type_name(tensor->type)); // This used to be a regex, but has an extreme cost to compile times. From c1fe0aba72a8412808bb45a5899aaf5a55a6a400 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 16 Aug 2023 13:09:43 +0300 Subject: [PATCH 26/29] llama : fix Windows build + fix norm_rms_eps key --- gguf-llama.cpp | 46 ++++++++++++++++++++++++---------------------- 1 file changed, 24 insertions(+), 22 deletions(-) diff --git a/gguf-llama.cpp b/gguf-llama.cpp index 60e968153259f..cebe53d10ebf6 100644 --- a/gguf-llama.cpp +++ b/gguf-llama.cpp @@ -63,24 +63,25 @@ #include // for _fseeki64 #endif +#include #include -#include +#include #include +#include +#include +#include +#include #include -#include +#include #include -#include -#include -#include -#include -#include #include -#include -#include -#include #include -#include #include +#include +#include +#include +#include +#include #if defined(_MSC_VER) #pragma warning(disable: 4244 4267) // possible loss of data @@ -136,11 +137,12 @@ __attribute__((format(printf, 1, 2))) #endif #endif static std::string format(const char * fmt, ...) { - va_list ap, ap2; + va_list ap; + va_list ap2; va_start(ap, fmt); va_copy(ap2, ap); int size = vsnprintf(NULL, 0, fmt, ap); - GGML_ASSERT(size >= 0 && size < INT_MAX); + GGML_ASSERT(size >= 0 && size < INT_MAX); // NOLINT std::vector buf(size + 1); int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2); GGML_ASSERT(size2 == size); @@ -668,7 +670,7 @@ struct llama_hparams { uint32_t n_rot = 64; uint32_t n_ff = 11008; - float f_rms_norm_eps = 1e-5; + float f_norm_rms_eps = 1e-5; float rope_freq_base = 10000.0f; float rope_freq_scale = 1.0f; @@ -1279,7 +1281,7 @@ static void llama_model_load_internal( hparams.n_head = gguf_get_val_u32(ctx, gguf_find_key(ctx, "llama.attention.head_count")); hparams.n_layer = gguf_get_val_u32(ctx, gguf_find_key(ctx, "llama.block_count")); hparams.n_rot = gguf_get_val_u32(ctx, gguf_find_key(ctx, "llama.rope.dimension_count")); - hparams.f_rms_norm_eps = gguf_get_val_f32(ctx, gguf_find_key(ctx, "llama.rms_norm_epsilon")); + hparams.f_norm_rms_eps = gguf_get_val_f32(ctx, gguf_find_key(ctx, "llama.attention.layer_norm_rms_epsilon")); // n_head_kv default to n_head hparams.n_head_kv = hparams.n_head; @@ -1360,7 +1362,7 @@ static void llama_model_load_internal( LLAMA_LOG_INFO("%s: n_layer = %u\n", __func__, hparams.n_layer); LLAMA_LOG_INFO("%s: n_rot = %u\n", __func__, hparams.n_rot); // a.k.a. n_embd_head, n_head_dim LLAMA_LOG_INFO("%s: n_gqa = %u\n", __func__, hparams.n_gqa()); - LLAMA_LOG_INFO("%s: rnorm_eps = %.1e\n", __func__, hparams.f_rms_norm_eps); + LLAMA_LOG_INFO("%s: f_norm_eps = %.1e\n", __func__, hparams.f_norm_rms_eps); LLAMA_LOG_INFO("%s: n_ff = %u\n", __func__, hparams.n_ff); LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, hparams.rope_freq_base); LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, hparams.rope_freq_scale); @@ -1658,9 +1660,9 @@ static struct ggml_cgraph * llama_build_graph( GGML_ASSERT(n_embd_head == hparams.n_rot); - const float freq_base = hparams.rope_freq_base; - const float freq_scale = hparams.rope_freq_scale; - const float rms_norm_eps = hparams.f_rms_norm_eps; + const float freq_base = hparams.rope_freq_base; + const float freq_scale = hparams.rope_freq_scale; + const float norm_rms_eps = hparams.f_norm_rms_eps; const int n_gpu_layers = model.n_gpu_layers; @@ -1767,7 +1769,7 @@ static struct ggml_cgraph * llama_build_graph( // norm { - cur = ggml_rms_norm(ctx0, inpL, rms_norm_eps); + cur = ggml_rms_norm(ctx0, inpL, norm_rms_eps); offload_func(cur); ggml_set_name(cur, "rms_norm_0"); @@ -1912,7 +1914,7 @@ static struct ggml_cgraph * llama_build_graph( { // norm { - cur = ggml_rms_norm(ctx0, inpFF, rms_norm_eps); + cur = ggml_rms_norm(ctx0, inpFF, norm_rms_eps); offload_func(cur); ggml_set_name(cur, "rms_norm_1"); @@ -1962,7 +1964,7 @@ static struct ggml_cgraph * llama_build_graph( // norm { - cur = ggml_rms_norm(ctx0, inpL, rms_norm_eps); + cur = ggml_rms_norm(ctx0, inpL, norm_rms_eps); offload_func_nr(cur); ggml_set_name(cur, "rms_norm_2"); From f634b292c946a7944d88d0e9b6b4186c5f348737 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 16 Aug 2023 13:44:35 +0300 Subject: [PATCH 27/29] llama : throw error on missing KV paris in model meta data --- ggml.h | 1 + gguf-llama.cpp | 43 +++++++++++++++++++++++++++---------------- 2 files changed, 28 insertions(+), 16 deletions(-) diff --git a/ggml.h b/ggml.h index 8a1661cfbd319..48ce71ecdaa96 100644 --- a/ggml.h +++ b/ggml.h @@ -1752,6 +1752,7 @@ extern "C" { GGML_API enum gguf_type gguf_get_kv_type (struct gguf_context * ctx, int i); GGML_API enum gguf_type gguf_get_arr_type(struct gguf_context * ctx, int i); + // results are undefined if the wrong type is used for the key GGML_API uint8_t gguf_get_val_u8 (struct gguf_context * ctx, int i); GGML_API int8_t gguf_get_val_i8 (struct gguf_context * ctx, int i); GGML_API uint16_t gguf_get_val_u16 (struct gguf_context * ctx, int i); diff --git a/gguf-llama.cpp b/gguf-llama.cpp index cebe53d10ebf6..ec64ef8dcd886 100644 --- a/gguf-llama.cpp +++ b/gguf-llama.cpp @@ -107,6 +107,7 @@ static void llama_log_internal(llama_log_level level, const char* format, ...); static void llama_log_callback_default(llama_log_level level, const char * text, void * user_data); + #define LLAMA_LOG_INFO(...) llama_log_internal(LLAMA_LOG_LEVEL_INFO , __VA_ARGS__) #define LLAMA_LOG_WARN(...) llama_log_internal(LLAMA_LOG_LEVEL_WARN , __VA_ARGS__) #define LLAMA_LOG_ERROR(...) llama_log_internal(LLAMA_LOG_LEVEL_ERROR, __VA_ARGS__) @@ -1274,23 +1275,33 @@ static void llama_model_load_internal( { struct gguf_context * ctx = ml->ctx_gguf; - hparams.n_vocab = gguf_get_arr_n (ctx, gguf_find_key(ctx, "tokenizer.ggml.tokens")); - hparams.n_ctx = gguf_get_val_u32(ctx, gguf_find_key(ctx, "llama.context_length")); - hparams.n_embd = gguf_get_val_u32(ctx, gguf_find_key(ctx, "llama.embedding_length")); - hparams.n_ff = gguf_get_val_u32(ctx, gguf_find_key(ctx, "llama.feed_forward_length")); - hparams.n_head = gguf_get_val_u32(ctx, gguf_find_key(ctx, "llama.attention.head_count")); - hparams.n_layer = gguf_get_val_u32(ctx, gguf_find_key(ctx, "llama.block_count")); - hparams.n_rot = gguf_get_val_u32(ctx, gguf_find_key(ctx, "llama.rope.dimension_count")); - hparams.f_norm_rms_eps = gguf_get_val_f32(ctx, gguf_find_key(ctx, "llama.attention.layer_norm_rms_epsilon")); - - // n_head_kv default to n_head +#define GGUF_GET(dst, func, type, req, key) \ + { \ + const int kid = gguf_find_key(ctx, key); \ + if (kid >= 0) { \ + enum gguf_type ktype = gguf_get_kv_type(ctx, kid); \ + if (ktype != (type)) { \ + throw std::runtime_error(format("key %s has wrong type: %d", key, ktype)); \ + } \ + (dst) = func(ctx, kid); \ + } else if (req) { \ + throw std::runtime_error(format("key not found in model: %s", key)); \ + } \ + } + + GGUF_GET(hparams.n_vocab, gguf_get_arr_n, GGUF_TYPE_ARRAY, true, "tokenizer.ggml.tokens"); + GGUF_GET(hparams.n_ctx, gguf_get_val_u32, GGUF_TYPE_UINT32, true, "llama.context_length"); + GGUF_GET(hparams.n_embd, gguf_get_val_u32, GGUF_TYPE_UINT32, true, "llama.embedding_length"); + GGUF_GET(hparams.n_ff, gguf_get_val_u32, GGUF_TYPE_UINT32, true, "llama.feed_forward_length"); + GGUF_GET(hparams.n_head, gguf_get_val_u32, GGUF_TYPE_UINT32, true, "llama.attention.head_count"); + GGUF_GET(hparams.n_layer, gguf_get_val_u32, GGUF_TYPE_UINT32, true, "llama.block_count"); + GGUF_GET(hparams.n_rot, gguf_get_val_u32, GGUF_TYPE_UINT32, true, "llama.rope.dimension_count"); + GGUF_GET(hparams.f_norm_rms_eps, gguf_get_val_f32, GGUF_TYPE_FLOAT32, true, "llama.attention.layer_norm_rms_epsilon"); + + // n_head_kv is optional, default to n_head hparams.n_head_kv = hparams.n_head; - { - const int idx = gguf_find_key(ctx, "llama.attention.head_count_kv"); - if (idx >= 0) { - hparams.n_head_kv = gguf_get_val_u32(ctx, idx); - } - } + GGUF_GET(hparams.n_head_kv, gguf_get_val_u32, GGUF_TYPE_UINT32, false, "llama.attention.head_count_kv"); +#undef GGUF_GET switch (hparams.n_layer) { case 26: model.type = e_model::MODEL_3B; break; From e524750a6c23dbe238649ea528f2b9a949b3c499 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 16 Aug 2023 14:24:04 +0300 Subject: [PATCH 28/29] llama : improve printing + log meta data --- ggml.c | 17 ++++++++++++++ ggml.h | 2 ++ gguf-llama.cpp | 61 +++++++++++++++++++++++++++++++++++--------------- 3 files changed, 62 insertions(+), 18 deletions(-) diff --git a/ggml.c b/ggml.c index 261695216b50f..77f57a3fdaa41 100644 --- a/ggml.c +++ b/ggml.c @@ -18583,6 +18583,19 @@ static const size_t GGUF_TYPE_SIZE[GGUF_TYPE_COUNT] = { }; static_assert(GGUF_TYPE_COUNT == 10, "GGUF_TYPE_COUNT != 10"); +static const char * GGUF_TYPE_NAME[GGUF_TYPE_COUNT] = { + [GGUF_TYPE_UINT8] = "uint8", + [GGUF_TYPE_INT8] = "int8", + [GGUF_TYPE_UINT16] = "uint16", + [GGUF_TYPE_INT16] = "int16", + [GGUF_TYPE_UINT32] = "uint32", + [GGUF_TYPE_INT32] = "int32", + [GGUF_TYPE_FLOAT32] = "float32", + [GGUF_TYPE_BOOL] = "bool", + [GGUF_TYPE_STRING] = "string", + [GGUF_TYPE_ARRAY] = "array", +}; + union gguf_value { uint8_t uint8; int8_t int8; @@ -19017,6 +19030,10 @@ void gguf_free(struct gguf_context * ctx) { GGML_ALIGNED_FREE(ctx); } +const char * gguf_type_name(enum gguf_type type) { + return GGUF_TYPE_NAME[type]; +} + int gguf_get_version(struct gguf_context * ctx) { return ctx->header.version; } diff --git a/ggml.h b/ggml.h index 48ce71ecdaa96..ad12c133ee12d 100644 --- a/ggml.h +++ b/ggml.h @@ -1740,6 +1740,8 @@ extern "C" { GGML_API void gguf_free(struct gguf_context * ctx); + GGML_API const char * gguf_type_name(enum gguf_type type); + GGML_API int gguf_get_version (struct gguf_context * ctx); GGML_API size_t gguf_get_alignment (struct gguf_context * ctx); GGML_API size_t gguf_get_data_offset(struct gguf_context * ctx); diff --git a/gguf-llama.cpp b/gguf-llama.cpp index ec64ef8dcd886..2b197a2367904 100644 --- a/gguf-llama.cpp +++ b/gguf-llama.cpp @@ -101,11 +101,21 @@ #define TN_FFN_DOWN "blk.%d.ffn_down.weight" #define TN_FFN_UP "blk.%d.ffn_up.weight" +#ifdef __GNUC__ +#ifdef __MINGW32__ +#define LLAMA_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__))) +#else +#define LLAMA_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__))) +#endif +#else +#define LLAMA_ATTRIBUTE_FORMAT(...) +#endif + // // logging // - -static void llama_log_internal(llama_log_level level, const char* format, ...); +LLAMA_ATTRIBUTE_FORMAT(2, 3) +static void llama_log_internal (llama_log_level level, const char* format, ...); static void llama_log_callback_default(llama_log_level level, const char * text, void * user_data); #define LLAMA_LOG_INFO(...) llama_log_internal(LLAMA_LOG_LEVEL_INFO , __VA_ARGS__) @@ -130,13 +140,7 @@ static void zeros(std::ofstream & file, size_t n) { } } -#ifdef __GNUC__ -#ifdef __MINGW32__ -__attribute__((format(gnu_printf, 1, 2))) -#else -__attribute__((format(printf, 1, 2))) -#endif -#endif +LLAMA_ATTRIBUTE_FORMAT(1, 2) static std::string format(const char * fmt, ...) { va_list ap; va_list ap2; @@ -991,7 +995,7 @@ static std::string llama_format_tensor_shape(const std::vector & ne) { char buf[256]; snprintf(buf, sizeof(buf), "%5u", ne.at(0)); for (size_t i = 1; i < ne.size(); i++) { - snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " x %5u", ne.at(i)); + snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %5u", ne.at(i)); } return buf; } @@ -999,13 +1003,14 @@ static std::string llama_format_tensor_shape(const std::vector & ne) { static std::string llama_format_tensor_shape(const struct ggml_tensor * t) { char buf[256]; snprintf(buf, sizeof(buf), "%5" PRId64, t->ne[0]); - for (int i = 1; i < t->n_dims; i++) { - snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " x %5" PRId64, t->ne[i]); + for (int i = 1; i < GGML_MAX_DIMS; i++) { + snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %5" PRId64, t->ne[i]); } return buf; } struct llama_model_loader { + int n_kv = 0; int n_tensors = 0; int n_created = 0; @@ -1027,11 +1032,31 @@ struct llama_model_loader { ctx_gguf = gguf_init_from_file(fname.c_str(), params); + n_kv = gguf_get_n_kv(ctx_gguf); n_tensors = gguf_get_n_tensors(ctx_gguf); + file_version = (enum llama_file_version) gguf_get_version(ctx_gguf); - LLAMA_LOG_INFO("%s: loaded %d tensors from %s (version %s)\n", - __func__, n_tensors, fname.c_str(), llama_file_version_name(file_version)); + // print meta data + // TODO: make optional + { + LLAMA_LOG_INFO("%s: loaded meta data with %d key-value paris and %d tensors from %s (version %s)\n", + __func__, n_kv, n_tensors, fname.c_str(), llama_file_version_name(file_version)); + + for (int i = 0; i < n_kv; i++) { + const char * name = gguf_get_key(ctx_gguf, i); + const enum gguf_type type = gguf_get_kv_type(ctx_gguf, i); + + LLAMA_LOG_INFO("%s: - %3d: %42s %-8s\n", __func__, i, name, gguf_type_name(type)); + } + + for (int i = 0; i < n_tensors; i++) { + const char * name = gguf_get_tensor_name(ctx_gguf, i); + struct ggml_tensor * meta = ggml_get_tensor(ctx_meta, name); + + LLAMA_LOG_INFO("%s: - %3d: %32s %-8s [ %s ]\n", __func__, i, name, ggml_type_name(meta->type), llama_format_tensor_shape(meta).c_str()); + } + } if (!llama_mmap::SUPPORTED) { LLAMA_LOG_WARN("%s: mmap is not supported on this platform\n", __func__); @@ -1281,7 +1306,7 @@ static void llama_model_load_internal( if (kid >= 0) { \ enum gguf_type ktype = gguf_get_kv_type(ctx, kid); \ if (ktype != (type)) { \ - throw std::runtime_error(format("key %s has wrong type: %d", key, ktype)); \ + throw std::runtime_error(format("key %s has wrong type: %s", key, gguf_type_name(ktype))); \ } \ (dst) = func(ctx, kid); \ } else if (req) { \ @@ -1325,7 +1350,7 @@ static void llama_model_load_internal( const auto n_gqa = hparams.n_gqa(); if (model.type == e_model::MODEL_65B && n_gqa == 8) { - fprintf(stderr, "%s: warning: assuming 70B model based on GQA == %d\n", __func__, n_gqa); + LLAMA_LOG_WARN("%s: assuming 70B model based on GQA == %d\n", __func__, n_gqa); model.type = e_model::MODEL_70B; } } @@ -3399,7 +3424,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s return i_layer < num_layers/8 || i_layer >= 7*num_layers/8 || (i_layer - num_layers/8)%3 == 2; }; - size_t idx = 0; + int idx = 0; std::vector read_data; std::vector work; @@ -3428,7 +3453,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s tensor->data = read_data.data(); model_loader->load_data_for(tensor); - LLAMA_LOG_INFO("[%4zu/%4zu] %36s - [%s], type = %6s, ", + LLAMA_LOG_INFO("[%4d/%4d] %36s - [%s], type = %6s, ", ++idx, model_loader->n_tensors, ggml_get_name(tensor), llama_format_tensor_shape(tensor).c_str(), From 6823899f2d2914258cd8a1640a78ebc1574ef1b2 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 16 Aug 2023 14:32:59 +0300 Subject: [PATCH 29/29] llama : switch print order of meta data --- gguf-llama.cpp | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/gguf-llama.cpp b/gguf-llama.cpp index 2b197a2367904..d99d752ecf291 100644 --- a/gguf-llama.cpp +++ b/gguf-llama.cpp @@ -1043,18 +1043,18 @@ struct llama_model_loader { LLAMA_LOG_INFO("%s: loaded meta data with %d key-value paris and %d tensors from %s (version %s)\n", __func__, n_kv, n_tensors, fname.c_str(), llama_file_version_name(file_version)); - for (int i = 0; i < n_kv; i++) { - const char * name = gguf_get_key(ctx_gguf, i); - const enum gguf_type type = gguf_get_kv_type(ctx_gguf, i); - - LLAMA_LOG_INFO("%s: - %3d: %42s %-8s\n", __func__, i, name, gguf_type_name(type)); - } - for (int i = 0; i < n_tensors; i++) { const char * name = gguf_get_tensor_name(ctx_gguf, i); struct ggml_tensor * meta = ggml_get_tensor(ctx_meta, name); - LLAMA_LOG_INFO("%s: - %3d: %32s %-8s [ %s ]\n", __func__, i, name, ggml_type_name(meta->type), llama_format_tensor_shape(meta).c_str()); + LLAMA_LOG_INFO("%s: - tensor %3d: %32s %-8s [ %s ]\n", __func__, i, name, ggml_type_name(meta->type), llama_format_tensor_shape(meta).c_str()); + } + + for (int i = 0; i < n_kv; i++) { + const char * name = gguf_get_key(ctx_gguf, i); + const enum gguf_type type = gguf_get_kv_type(ctx_gguf, i); + + LLAMA_LOG_INFO("%s: - kv %3d: %42s %-8s\n", __func__, i, name, gguf_type_name(type)); } } @@ -1414,7 +1414,9 @@ static void llama_model_load_internal( size_t ctx_size; size_t mmapped_size; + ml->calc_sizes(ctx_size, mmapped_size); + LLAMA_LOG_INFO("%s: ggml ctx size = %7.2f MB\n", __func__, ctx_size/1024.0/1024.0); // create the ggml context