Skip to content

Commit a7e8bf6

Browse files
committed
refactor: adapt gguf library to project
- remove comments - remove argparse help text
1 parent f7f9a45 commit a7e8bf6

File tree

3 files changed

+1029
-931
lines changed

3 files changed

+1029
-931
lines changed

src/convert_hf_to_gguf.py

+15-17
Original file line numberDiff line numberDiff line change
@@ -4403,83 +4403,81 @@ def __torch_function__(cls, func, types, args=(), kwargs=None):
44034403

44044404

44054405
def parse_args() -> argparse.Namespace:
4406-
parser = argparse.ArgumentParser(
4407-
description="Convert a huggingface model to a GGML compatible file"
4408-
)
4406+
parser = argparse.ArgumentParser(description="")
44094407
parser.add_argument(
44104408
"--vocab-only",
44114409
action="store_true",
4412-
help="extract only the vocab",
4410+
help="",
44134411
)
44144412
parser.add_argument(
44154413
"--outfile",
44164414
type=Path,
4417-
help="path to write to; default: based on input. {ftype} will be replaced by the outtype.",
4415+
help="",
44184416
)
44194417
parser.add_argument(
44204418
"--outtype",
44214419
type=str,
44224420
choices=["f32", "f16", "bf16", "q8_0", "auto"],
44234421
default="f16",
4424-
help="output format - use f32 for float32, f16 for float16, bf16 for bfloat16, q8_0 for Q8_0, auto for the highest-fidelity 16-bit float type depending on the first loaded tensor type",
4422+
help="",
44254423
)
44264424
parser.add_argument(
44274425
"--bigendian",
44284426
action="store_true",
4429-
help="model is executed on big endian machine",
4427+
help="",
44304428
)
44314429
parser.add_argument(
44324430
"model",
44334431
type=Path,
4434-
help="directory containing model file",
4432+
help="",
44354433
)
44364434
parser.add_argument(
44374435
"--use-temp-file",
44384436
action="store_true",
4439-
help="use the tempfile library while processing (helpful when running out of memory, process killed)",
4437+
help="",
44404438
)
44414439
parser.add_argument(
44424440
"--no-lazy",
44434441
action="store_true",
4444-
help="use more RAM by computing all outputs before writing (use in case lazy evaluation is broken)",
4442+
help="",
44454443
)
44464444
parser.add_argument(
44474445
"--model-name",
44484446
type=str,
44494447
default=None,
4450-
help="name of the model",
4448+
help="",
44514449
)
44524450
parser.add_argument(
44534451
"--verbose",
44544452
action="store_true",
4455-
help="increase output verbosity",
4453+
help="",
44564454
)
44574455
parser.add_argument(
44584456
"--split-max-tensors",
44594457
type=int,
44604458
default=0,
4461-
help="max tensors in each split",
4459+
help="",
44624460
)
44634461
parser.add_argument(
44644462
"--split-max-size",
44654463
type=str,
44664464
default="0",
4467-
help="max size per split N(M|G)",
4465+
help="",
44684466
)
44694467
parser.add_argument(
44704468
"--dry-run",
44714469
action="store_true",
4472-
help="only print out a split plan and exit, without writing any new files",
4470+
help="",
44734471
)
44744472
parser.add_argument(
44754473
"--no-tensor-first-split",
44764474
action="store_true",
4477-
help="do not add tensors to the first split (disabled by default)",
4475+
help="",
44784476
)
44794477
parser.add_argument(
44804478
"--metadata",
44814479
type=Path,
4482-
help="Specify the path for an authorship metadata override file",
4480+
help="",
44834481
)
44844482

44854483
return parser.parse_args()

0 commit comments

Comments
 (0)