@@ -255,7 +255,7 @@ static ggml_type llama_tensor_get_type(quantize_state_impl & qs, ggml_type new_t
255
255
&& qs.model .hparams .n_gqa () >= 2 )
256
256
new_type = GGML_TYPE_Q5_K;
257
257
else if ((ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL || ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS) && qs.model .hparams .n_gqa () >= 2 ) {
258
- new_type = GGML_TYPE_Q5_K ;
258
+ new_type = GGML_TYPE_Q6_K ;
259
259
}
260
260
// else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) &&
261
261
// use_more_bits(qs.i_attention_wv, qs.n_attention_wv)) new_type = GGML_TYPE_Q6_K;
@@ -360,13 +360,13 @@ static ggml_type llama_tensor_get_type(quantize_state_impl & qs, ggml_type new_t
360
360
// if (use_more_bits(i_layer, n_layer)) new_type = GGML_TYPE_Q6_K;
361
361
// }
362
362
// }
363
- else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && use_more_bits (i_layer, n_layer)) new_type = GGML_TYPE_Q5_K;
363
+ // else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && use_more_bits(i_layer, n_layer)) new_type = GGML_TYPE_Q5_K;
364
364
else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) new_type = GGML_TYPE_Q5_K;
365
365
else if (i_layer < n_layer/8 && (ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL || ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS)
366
366
&& !qs.has_imatrix && use_more_bits (i_layer, n_layer)) {
367
367
new_type = GGML_TYPE_Q5_K;
368
368
}
369
- else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_S && use_more_bits (i_layer, n_layer)) new_type = GGML_TYPE_Q6_K;
369
+ // else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_S && use_more_bits(i_layer, n_layer)) new_type = GGML_TYPE_Q6_K;
370
370
else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) new_type = GGML_TYPE_Q6_K;
371
371
else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && arch != LLM_ARCH_FALCON && i_layer < n_layer/8 ) {
372
372
new_type = GGML_TYPE_Q5_K;
0 commit comments