Skip to content

Commit 5bfeaf8

Browse files
committed
Merge branch 'pr/8917' into specify
2 parents f010b77 + 98136ec commit 5bfeaf8

File tree

3 files changed

+146
-12
lines changed

3 files changed

+146
-12
lines changed

examples/quantize/quantize.cpp

+69-4
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,7 @@ static const std::vector<struct quant_option> QUANT_OPTIONS = {
5454
{ "F16", LLAMA_FTYPE_MOSTLY_F16, "14.00G, +0.0020 ppl @ Mistral-7B", },
5555
{ "BF16", LLAMA_FTYPE_MOSTLY_BF16, "14.00G, -0.0050 ppl @ Mistral-7B", },
5656
{ "F32", LLAMA_FTYPE_ALL_F32, "26.00G @ 7B", },
57+
{ "CQS", LLAMA_FTYPE_CQS, "Custom Quantization Scheme", },
5758
// Note: Ensure COPY comes after F32 to avoid ftype 0 from matching.
5859
{ "COPY", LLAMA_FTYPE_ALL_F32, "only copy tensors, no quantizing", },
5960
};
@@ -107,19 +108,35 @@ static bool try_parse_ftype(const std::string & ftype_str_in, llama_ftype & ftyp
107108
//
108109
[[noreturn]]
109110
static void usage(const char * executable) {
110-
printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--include-weights] [--exclude-weights] [--output-tensor-type] [--token-embedding-type] [--override-kv] model-f32.gguf [model-quant.gguf] type [nthreads]\n\n", executable);
111+
printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--include-weights] [--exclude-weights] [--output-tensor-type] [--token-embedding-type] [--attn-q-type] [--attn-k-type] [--attn-v-type] [--attn-qkv-type] [--attn-output-type] [--ffn-gate-type] [--ffn-down-type] [--ffn-up-type] [--keep-split] [--override-kv] model-f32.gguf [model-quant.gguf] type [nthreads]\n\n", executable);
111112
printf(" --allow-requantize: Allows requantizing tensors that have already been quantized. Warning: This can severely reduce quality compared to quantizing from 16bit or 32bit\n");
112113
printf(" --leave-output-tensor: Will leave output.weight un(re)quantized. Increases model size but may also increase quality, especially when requantizing\n");
113114
printf(" --pure: Disable k-quant mixtures and quantize all tensors to the same type\n");
114115
printf(" --imatrix file_name: use data in file_name as importance matrix for quant optimizations\n");
115116
printf(" --include-weights tensor_name: use importance matrix for this/these tensor(s)\n");
116117
printf(" --exclude-weights tensor_name: use importance matrix for this/these tensor(s)\n");
117-
printf(" --output-tensor-type ggml_type: use this ggml_type for the output.weight tensor\n");
118-
printf(" --token-embedding-type ggml_type: use this ggml_type for the token embeddings tensor\n");
118+
printf(" --output-tensor-type ggml_type: use this ggml_type for the output.weight tensor.\n");
119+
printf(" --token-embedding-type ggml_type: use this ggml_type for the token_embd.weight tensor.\n\n");
120+
printf("Additional specific tensor quantization types used in the custom quant scheme 'CQS (default is Q2_K):\n");
121+
printf(" --attn-q-type ggml_type: use this ggml_type for the attn_q.weight tensor.\n");
122+
printf(" --attn-k-type ggml_type: use this ggml_type for the attn_k.weight tensor.\n");
123+
printf(" --attn-v-type ggml_type: use this ggml_type for the attn_v.weight tensor.\n");
124+
printf(" --attn-qkv-type ggml_type: use this ggml_type for the attn_qkv.weight tensor.\n");
125+
printf(" --attn-output-type ggml_type: use this ggml_type for the attn_output.weight tensor.\n");
126+
printf(" --ffn-gate-type ggml_type: use this ggml_type for the ffn_gate tensor.\n");
127+
printf(" --ffn-down-type ggml_type: use this ggml_type for the ffn_down tensor.\n");
128+
printf(" --ffn-up-type ggml_type: use this ggml_type for the ffn_up tensor.\n\n");
119129
printf(" --keep-split: will generate quantized model in the same shards as input\n");
120130
printf(" --override-kv KEY=TYPE:VALUE\n");
121-
printf(" Advanced option to override model metadata by key in the quantized model. May be specified multiple times.\n");
131+
printf(" Advanced option to override model metadata by key in the quantized model. May be specified multiple times.\n\n");
122132
printf("Note: --include-weights and --exclude-weights cannot be used together\n");
133+
printf("Note: The token embeddings tensor is loaded in system RAM, even in case of full GPU/VRAM offload.\n");
134+
printf("Note: The recommanded type for the output tensor is q6_K for the ffn types > iq3_xxs and < q8_0.\n\n");
135+
printf("Note for the Custom Quant Scheme FTYPE:\n");
136+
printf(" Write the specific tensor legacy quants as qN_N, the K-Quants as qN_K, the IQ-Quants as iqN_xx.\n");
137+
printf(" Usually, attn-q-type can be one type below the chosen ffn type, and attn-v-type should be one type above.\n");
138+
printf(" attn-qkv-type replaces the types attn-q, attn-k and attn-v on some models.\n");
139+
//TODO: - eventually - harmonize the CAPS writing of the FTYPEs, and non CAPS writing of the GGML_TYPEs.
123140
printf("\nAllowed quantization types:\n");
124141
for (auto & it : QUANT_OPTIONS) {
125142
if (it.name != "COPY") {
@@ -279,6 +296,54 @@ int main(int argc, char ** argv) {
279296
} else {
280297
usage(argv[0]);
281298
}
299+
} else if (strcmp(argv[arg_idx], "--attn-q-type") == 0) {
300+
if (arg_idx < argc-1) {
301+
params.attn_q_type = parse_ggml_type(argv[++arg_idx]);
302+
} else {
303+
usage(argv[0]);
304+
}
305+
} else if (strcmp(argv[arg_idx], "--attn-k-type") == 0) {
306+
if (arg_idx < argc-1) {
307+
params.attn_k_type = parse_ggml_type(argv[++arg_idx]);
308+
} else {
309+
usage(argv[0]);
310+
}
311+
} else if (strcmp(argv[arg_idx], "--attn-v-type") == 0) {
312+
if (arg_idx < argc-1) {
313+
params.attn_v_type = parse_ggml_type(argv[++arg_idx]);
314+
} else {
315+
usage(argv[0]);
316+
}
317+
} else if (strcmp(argv[arg_idx], "--attn-qkv-type") == 0) {
318+
if (arg_idx < argc-1) {
319+
params.attn_qkv_type = parse_ggml_type(argv[++arg_idx]);
320+
} else {
321+
usage(argv[0]);
322+
}
323+
} else if (strcmp(argv[arg_idx], "--attn-output-type") == 0) {
324+
if (arg_idx < argc-1) {
325+
params.attn_output_type = parse_ggml_type(argv[++arg_idx]);
326+
} else {
327+
usage(argv[0]);
328+
}
329+
} else if (strcmp(argv[arg_idx], "--ffn-gate-type") == 0) {
330+
if (arg_idx < argc-1) {
331+
params.ffn_gate_type = parse_ggml_type(argv[++arg_idx]);
332+
} else {
333+
usage(argv[0]);
334+
}
335+
} else if (strcmp(argv[arg_idx], "--ffn-down-type") == 0) {
336+
if (arg_idx < argc-1) {
337+
params.ffn_down_type = parse_ggml_type(argv[++arg_idx]);
338+
} else {
339+
usage(argv[0]);
340+
}
341+
} else if (strcmp(argv[arg_idx], "--ffn-up-type") == 0) {
342+
if (arg_idx < argc-1) {
343+
params.ffn_up_type = parse_ggml_type(argv[++arg_idx]);
344+
} else {
345+
usage(argv[0]);
346+
}
282347
} else if (strcmp(argv[arg_idx], "--override-kv") == 0) {
283348
if (arg_idx == argc-1 || !string_parse_kv_override(argv[++arg_idx], kv_overrides)) {
284349
usage(argv[0]);

include/llama.h

+9
Original file line numberDiff line numberDiff line change
@@ -175,6 +175,7 @@ extern "C" {
175175
LLAMA_FTYPE_MOSTLY_Q4_0_8_8 = 35, // except 1d tensors
176176
LLAMA_FTYPE_MOSTLY_TQ1_0 = 36, // except 1d tensors
177177
LLAMA_FTYPE_MOSTLY_TQ2_0 = 37, // except 1d tensors
178+
LLAMA_FTYPE_CQS = 99, // except 1d tensors
178179

179180
LLAMA_FTYPE_GUESSED = 1024, // not specified in the model file
180181
};
@@ -360,6 +361,14 @@ extern "C" {
360361
enum llama_ftype ftype; // quantize to this llama_ftype
361362
enum ggml_type output_tensor_type; // output tensor type
362363
enum ggml_type token_embedding_type; // token embeddings tensor type
364+
enum ggml_type attn_q_type; // attention query tensor type
365+
enum ggml_type attn_k_type; // attention key tensor type
366+
enum ggml_type attn_v_type; // attention value tensor type
367+
enum ggml_type attn_qkv_type; // attention query-key-value tensor type
368+
enum ggml_type attn_output_type; // attention output tensor type
369+
enum ggml_type ffn_gate_type; // feedforward network gate type
370+
enum ggml_type ffn_down_type; // feedforward network down type
371+
enum ggml_type ffn_up_type; // feedforward network up type
363372
bool allow_requantize; // allow quantizing non-f32/f16 tensors
364373
bool quantize_output_tensor; // quantize output.weight
365374
bool only_copy; // only copy tensors - ftype, allow_requantize and quantize_output_tensor are ignored

src/llama.cpp

+68-8
Original file line numberDiff line numberDiff line change
@@ -5289,6 +5289,7 @@ static std::string llama_model_ftype_name(llama_ftype ftype) {
52895289
case LLAMA_FTYPE_MOSTLY_Q4_0_4_4: return "Q4_0_4_4";
52905290
case LLAMA_FTYPE_MOSTLY_Q4_0_4_8: return "Q4_0_4_8";
52915291
case LLAMA_FTYPE_MOSTLY_Q4_0_8_8: return "Q4_0_8_8";
5292+
case LLAMA_FTYPE_CQS: return "Custom Quantization Scheme";
52925293

52935294
default: return "unknown, may not work";
52945295
}
@@ -18090,7 +18091,10 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
1809018091
}
1809118092
}
1809218093
} else if (name.find("attn_v.weight") != std::string::npos) {
18093-
if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) {
18094+
if (ftype == LLAMA_FTYPE_CQS && qs.params->attn_v_type < GGML_TYPE_COUNT) {
18095+
new_type = qs.params->attn_v_type;
18096+
}
18097+
else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) {
1809418098
new_type = qs.model.hparams.n_gqa() >= 4 ? GGML_TYPE_Q4_K : GGML_TYPE_Q3_K;
1809518099
}
1809618100
else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S && qs.model.hparams.n_gqa() >= 4) {
@@ -18128,7 +18132,10 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
1812818132
}
1812918133
++qs.i_attention_wv;
1813018134
} else if (name.find("attn_k.weight") != std::string::npos) {
18131-
if (qs.model.hparams.n_expert == 8) {
18135+
if (ftype == LLAMA_FTYPE_CQS && qs.params->attn_k_type < GGML_TYPE_COUNT) {
18136+
new_type = qs.params->attn_k_type;
18137+
}
18138+
else if (qs.model.hparams.n_expert == 8) {
1813218139
// for the 8-expert model, bumping this to Q8_0 trades just ~128MB
1813318140
// TODO: explore better strategies
1813418141
new_type = GGML_TYPE_Q8_0;
@@ -18140,7 +18147,10 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
1814018147
new_type = GGML_TYPE_IQ2_S;
1814118148
}
1814218149
} else if (name.find("attn_q.weight") != std::string::npos) {
18143-
if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS) {
18150+
if (ftype == LLAMA_FTYPE_CQS && qs.params->attn_q_type < GGML_TYPE_COUNT) {
18151+
new_type = qs.params->attn_q_type;
18152+
}
18153+
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS) {
1814418154
new_type = GGML_TYPE_IQ3_XXS;
1814518155
}
1814618156
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
@@ -18149,7 +18159,10 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
1814918159
} else if (name.find("ffn_down") != std::string::npos) {
1815018160
auto info = layer_info(qs.i_ffn_down, qs.n_ffn_down, name.c_str());
1815118161
int i_layer = info.first, n_layer = info.second;
18152-
if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
18162+
if (ftype == LLAMA_FTYPE_CQS && qs.params->ffn_down_type < GGML_TYPE_COUNT) {
18163+
new_type = qs.params->ffn_down_type;
18164+
}
18165+
else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
1815318166
else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S) {
1815418167
if (i_layer < n_layer/8) new_type = GGML_TYPE_Q4_K;
1815518168
}
@@ -18192,7 +18205,10 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
1819218205
}
1819318206
++qs.i_ffn_down;
1819418207
} else if (name.find("attn_output.weight") != std::string::npos) {
18195-
if (arch != LLM_ARCH_FALCON) {
18208+
if (ftype == LLAMA_FTYPE_CQS && qs.params->attn_output_type < GGML_TYPE_COUNT) {
18209+
new_type = qs.params->attn_output_type;
18210+
}
18211+
else if (arch != LLM_ARCH_FALCON) {
1819618212
if (qs.model.hparams.n_expert == 8) {
1819718213
if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS ||
1819818214
ftype == LLAMA_FTYPE_MOSTLY_Q3_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL ||
@@ -18212,7 +18228,10 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
1821218228
}
1821318229
}
1821418230
else if (name.find("attn_qkv.weight") != std::string::npos) {
18215-
if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L || ftype == LLAMA_FTYPE_MOSTLY_IQ3_M) {
18231+
if (ftype == LLAMA_FTYPE_CQS && qs.params->attn_qkv_type < GGML_TYPE_COUNT) {
18232+
new_type = qs.params->attn_qkv_type;
18233+
}
18234+
else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L || ftype == LLAMA_FTYPE_MOSTLY_IQ3_M) {
1821618235
new_type = GGML_TYPE_Q4_K;
1821718236
}
1821818237
else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) new_type = GGML_TYPE_Q5_K;
@@ -18221,15 +18240,21 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
1822118240
else if (name.find("ffn_gate") != std::string::npos) {
1822218241
auto info = layer_info(qs.i_ffn_gate, qs.n_ffn_gate, name.c_str());
1822318242
int i_layer = info.first, n_layer = info.second;
18224-
if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS && (i_layer >= n_layer/8 && i_layer < 7*n_layer/8)) {
18243+
if (ftype == LLAMA_FTYPE_CQS && qs.params->ffn_gate_type < GGML_TYPE_COUNT) {
18244+
new_type = qs.params->ffn_gate_type;
18245+
}
18246+
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS && (i_layer >= n_layer/8 && i_layer < 7*n_layer/8)) {
1822518247
new_type = GGML_TYPE_IQ3_XXS;
1822618248
}
1822718249
++qs.i_ffn_gate;
1822818250
}
1822918251
else if (name.find("ffn_up") != std::string::npos) {
1823018252
auto info = layer_info(qs.i_ffn_up, qs.n_ffn_up, name.c_str());
1823118253
int i_layer = info.first, n_layer = info.second;
18232-
if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS && (i_layer >= n_layer/8 && i_layer < 7*n_layer/8)) {
18254+
if (ftype == LLAMA_FTYPE_CQS && qs.params->ffn_up_type < GGML_TYPE_COUNT) {
18255+
new_type = qs.params->ffn_up_type;
18256+
}
18257+
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS && (i_layer >= n_layer/8 && i_layer < 7*n_layer/8)) {
1823318258
new_type = GGML_TYPE_IQ3_XXS;
1823418259
}
1823518260
++qs.i_ffn_up;
@@ -18387,6 +18412,9 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
1838718412
case LLAMA_FTYPE_MOSTLY_Q4_0_4_8: default_type = GGML_TYPE_Q4_0_4_8; break;
1838818413
case LLAMA_FTYPE_MOSTLY_Q4_0_8_8: default_type = GGML_TYPE_Q4_0_8_8; break;
1838918414

18415+
// Custom Quantization Scheme
18416+
case LLAMA_FTYPE_CQS: default_type = GGML_TYPE_Q2_K; break;
18417+
1839018418
default: throw std::runtime_error(format("invalid output file type %d\n", ftype));
1839118419
}
1839218420

@@ -18645,6 +18673,30 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
1864518673
if (params->output_tensor_type < GGML_TYPE_COUNT && strcmp(tensor->name, "output.weight") == 0) {
1864618674
new_type = params->output_tensor_type;
1864718675
}
18676+
if (params->attn_q_type < GGML_TYPE_COUNT && strcmp(tensor->name, "attn_q.weight") == 0) {
18677+
new_type = params->attn_q_type;
18678+
}
18679+
if (params->attn_k_type < GGML_TYPE_COUNT && strcmp(tensor->name, "attn_k.weight") == 0) {
18680+
new_type = params->attn_k_type;
18681+
}
18682+
if (params->attn_v_type < GGML_TYPE_COUNT && strcmp(tensor->name, "attn_v.weight") == 0) {
18683+
new_type = params->attn_v_type;
18684+
}
18685+
if (params->attn_qkv_type < GGML_TYPE_COUNT && strcmp(tensor->name, "attn_qkv.weight") == 0) {
18686+
new_type = params->attn_qkv_type;
18687+
}
18688+
if (params->attn_output_type < GGML_TYPE_COUNT && strcmp(tensor->name, "attn_output.weight") == 0) {
18689+
new_type = params->attn_output_type;
18690+
}
18691+
if (params->ffn_gate_type < GGML_TYPE_COUNT && strcmp(tensor->name, "ffn_gate") == 0) {
18692+
new_type = params->ffn_gate_type;
18693+
}
18694+
if (params->ffn_down_type < GGML_TYPE_COUNT && strcmp(tensor->name, "ffn_down") == 0) {
18695+
new_type = params->ffn_down_type;
18696+
}
18697+
if (params->ffn_up_type < GGML_TYPE_COUNT && strcmp(tensor->name, "ffn_up") == 0) {
18698+
new_type = params->ffn_up_type;
18699+
}
1864818700

1864918701
// If we've decided to quantize to the same type the tensor is already
1865018702
// in then there's nothing to do.
@@ -19055,6 +19107,14 @@ struct llama_model_quantize_params llama_model_quantize_default_params() {
1905519107
/*.ftype =*/ LLAMA_FTYPE_MOSTLY_Q5_1,
1905619108
/*.output_tensor_type =*/ GGML_TYPE_COUNT,
1905719109
/*.token_embedding_type =*/ GGML_TYPE_COUNT,
19110+
/*.attn_q_type =*/ GGML_TYPE_COUNT,
19111+
/*.attn_k_type =*/ GGML_TYPE_COUNT,
19112+
/*.attn_v_type =*/ GGML_TYPE_COUNT,
19113+
/*.attn_qkv_type =*/ GGML_TYPE_COUNT,
19114+
/*.attn_output_type =*/ GGML_TYPE_COUNT,
19115+
/*.ffn_gate_type =*/ GGML_TYPE_COUNT,
19116+
/*.ffn_down_type =*/ GGML_TYPE_COUNT,
19117+
/*.ffn_up_type =*/ GGML_TYPE_COUNT,
1905819118
/*.allow_requantize =*/ false,
1905919119
/*.quantize_output_tensor =*/ true,
1906019120
/*.only_copy =*/ false,

0 commit comments

Comments
 (0)