@@ -2414,8 +2414,8 @@ struct llama_vocab {
2414
2414
bool tokenizer_add_bos = false;
2415
2415
bool tokenizer_add_eos = false;
2416
2416
bool tokenizer_ignore_merges = false;
2417
- bool tokenizer_remove_extra_whitespaces = false;
2418
- bool tokenizer_escape_whitespaces = true;
2417
+ bool tokenizer_remove_extra_whitespaces = false;
2418
+ bool tokenizer_escape_whitespaces = true;
2419
2419
bool tokenizer_treat_whitespace_as_suffix = false;
2420
2420
2421
2421
uint32_t n_precompiled_charsmap = 0;
@@ -4840,8 +4840,8 @@ static void llm_load_hparams(
4840
4840
case 24:
4841
4841
switch (hparams.n_ff) {
4842
4842
case 4096: model.type = e_model::MODEL_770M; break;
4843
- case 16384: model.type = e_model::MODEL_3B; break;
4844
- case 65536: model.type = e_model::MODEL_11B; break;
4843
+ case 16384: model.type = e_model::MODEL_3B; break;
4844
+ case 65536: model.type = e_model::MODEL_11B; break;
4845
4845
default: model.type = e_model::MODEL_UNKNOWN;
4846
4846
} break;
4847
4847
default: model.type = e_model::MODEL_UNKNOWN;
@@ -7680,8 +7680,8 @@ struct llm_build_context {
7680
7680
lctx.inp_s_copy = nullptr;
7681
7681
lctx.inp_s_mask = nullptr;
7682
7682
lctx.inp_s_seq = nullptr;
7683
- lctx.inp_pos_bucket = nullptr;
7684
- lctx.inp_enc_output = nullptr;
7683
+ lctx.inp_pos_bucket = nullptr;
7684
+ lctx.inp_enc_output = nullptr;
7685
7685
lctx.inp_cross_KQ_mask = nullptr;
7686
7686
}
7687
7687
0 commit comments