Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit f46d26f

Browse files
goerchpkrmf
authored andcommitted
Fixing the last deviations from sentencepiece indicated by test-tokenizer-1 (ggml-org#3170)
* Fix für ggml-org#2721 * Reenable tokenizer test for LLaMa * Add `console.cpp` dependency * Fix dependency to `common` * Fixing wrong fix. * Make console usage platform specific Work on compiler warnings. * Adapting makefile * Remove trailing whitespace * Adapting the other parts of the makefile * Fix typo. * Fixing the last deviations from sentencepiece indicated by test-tokenizer-1 * Simplify logic * Add missing change... * Fix ugly compiler warning * llama_tokenize should accept strings containing NUL now * Adding huichen's test case
1 parent 50cf679 commit f46d26f

File tree

6 files changed

+17
-14
lines changed

6 files changed

+17
-14
lines changed

common/common.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -801,10 +801,10 @@ std::vector<llama_token> llama_tokenize(
801801
// upper limit for the number of tokens
802802
int n_tokens = text.length() + add_bos;
803803
std::vector<llama_token> result(n_tokens);
804-
n_tokens = llama_tokenize(ctx, text.c_str(), result.data(), result.size(), add_bos);
804+
n_tokens = llama_tokenize(ctx, text.data(), text.length(), result.data(), result.size(), add_bos);
805805
if (n_tokens < 0) {
806806
result.resize(-n_tokens);
807-
int check = llama_tokenize(ctx, text.c_str(), result.data(), result.size(), add_bos);
807+
int check = llama_tokenize(ctx, text.data(), text.length(), result.data(), result.size(), add_bos);
808808
GGML_ASSERT(check == -n_tokens);
809809
} else {
810810
result.resize(n_tokens);

examples/train-text-from-scratch/train-text-from-scratch.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -965,10 +965,10 @@ int tokenize_file(struct llama_context * lctx, const char * filename, std::vecto
965965

966966
buf[size] = '\0';
967967

968-
int n_tokens = llama_tokenize(lctx, buf.data(), out.data(), out.size(), false);
968+
int n_tokens = llama_tokenize(lctx, buf.data(), buf.size(), out.data(), out.size(), false);
969969
if (n_tokens < 0) {
970970
out.resize(-n_tokens);
971-
n_tokens = llama_tokenize(lctx, buf.data(), out.data(), out.size(), false);
971+
n_tokens = llama_tokenize(lctx, buf.data(), buf.size(), out.data(), out.size(), false);
972972
}
973973
GGML_ASSERT(n_tokens >= 0);
974974
out.resize(n_tokens);

llama.cpp

+4-2
Original file line numberDiff line numberDiff line change
@@ -7032,19 +7032,21 @@ llama_token llama_token_nl(const struct llama_context * ctx) {
70327032
int llama_tokenize(
70337033
struct llama_context * ctx,
70347034
const char * text,
7035+
int text_len,
70357036
llama_token * tokens,
70367037
int n_max_tokens,
70377038
bool add_bos) {
7038-
return llama_tokenize_with_model(&ctx->model, text, tokens, n_max_tokens, add_bos);
7039+
return llama_tokenize_with_model(&ctx->model, text, text_len, tokens, n_max_tokens, add_bos);
70397040
}
70407041

70417042
int llama_tokenize_with_model(
70427043
const struct llama_model * model,
70437044
const char * text,
7045+
int text_len,
70447046
llama_token * tokens,
70457047
int n_max_tokens,
70467048
bool add_bos) {
7047-
auto res = llama_tokenize_internal(model->vocab, text, add_bos);
7049+
auto res = llama_tokenize_internal(model->vocab, std::string(text, text_len), add_bos);
70487050

70497051
if (n_max_tokens < (int) res.size()) {
70507052
// LLAMA_LOG_ERROR("%s: too many tokens\n", __func__);

llama.h

+2
Original file line numberDiff line numberDiff line change
@@ -374,13 +374,15 @@ extern "C" {
374374
LLAMA_API int llama_tokenize(
375375
struct llama_context * ctx,
376376
const char * text,
377+
int text_len,
377378
llama_token * tokens,
378379
int n_max_tokens,
379380
bool add_bos);
380381

381382
LLAMA_API int llama_tokenize_with_model(
382383
const struct llama_model * model,
383384
const char * text,
385+
int text_len,
384386
llama_token * tokens,
385387
int n_max_tokens,
386388
bool add_bos);

tests/test-tokenizer-0-llama.cpp

+1
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@ static const std::map<std::string, std::vector<llama_token>> & k_tests() {
3636
{ " Hello" , { 1678, 15043, }, },
3737
{ " Hello" , { 268, 15043, }, },
3838
{ " Hello\n Hello" , { 268, 15043, 13, 1678, 15043, }, },
39+
{ " (" , { 29871, 313, }, },
3940
};
4041

4142
return _k_tests;

tests/test-tokenizer-1-llama.cpp

+6-8
Original file line numberDiff line numberDiff line change
@@ -87,10 +87,9 @@ int main(int argc, char **argv) {
8787
std::vector<llama_token> tokens = llama_tokenize(ctx, str, false);
8888
std::string check = llama_detokenize_spm(ctx, tokens);
8989
if (check != str) {
90-
fprintf(stderr, "%s : error: token %d detokenizes to >%s<(%llu) but tokenization of this detokenizes to >%s<(%llu)\n",
90+
fprintf(stderr, "%s : error: token %d detokenizes to '%s'(%zu) but tokenization of this detokenizes to '%s'(%zu)\n",
9191
__func__, i, str.c_str(), str.length(), check.c_str(), check.length());
92-
if(i != 3)
93-
return 2;
92+
return 2;
9493
}
9594
}
9695

@@ -99,11 +98,10 @@ int main(int argc, char **argv) {
9998
std::string str = codepoint_to_utf8(cp);
10099
std::vector<llama_token> tokens = llama_tokenize(ctx, str, false);
101100
std::string check = llama_detokenize_spm(ctx, tokens);
102-
if (str != check) {
103-
fprintf(stderr, "%s : error: codepoint %d detokenizes to >%s<(%llu) instead of >%s<(%llu)\n",
101+
if (cp != 9601 && str != check) {
102+
fprintf(stderr, "%s : error: codepoint %d detokenizes to '%s'(%zu) instead of '%s'(%zu)\n",
104103
__func__, cp, check.c_str(), check.length(), str.c_str(), str.length());
105-
if(cp != 0 && cp != 9601)
106-
return 3;
104+
return 3;
107105
}
108106
}
109107
}
@@ -112,7 +110,7 @@ int main(int argc, char **argv) {
112110
std::vector<llama_token> tokens = llama_tokenize(ctx, str, false);
113111
std::string check = llama_detokenize_spm(ctx, tokens);
114112
if (str != check) {
115-
fprintf(stderr, "%s : error: codepoint %d detokenizes to >%s<(%llu) instead of >%s<(%llu)\n",
113+
fprintf(stderr, "%s : error: codepoint %d detokenizes to '%s'(%zu) instead of '%s'(%zu)\n",
116114
__func__, cp, check.c_str(), check.length(), str.c_str(), str.length());
117115
return 4;
118116
}

0 commit comments

Comments
 (0)