Skip to content

Commit 83237b8

Browse files
Merge branch 'OpenAccess-AI-Collective:main' into logging_enhancement
2 parents 9234b75 + 168a7a0 commit 83237b8

File tree

2 files changed

+9
-0
lines changed

2 files changed

+9
-0
lines changed

Diff for: README.md

+5
Original file line numberDiff line numberDiff line change
@@ -305,6 +305,8 @@ base_model_ignore_patterns:
305305
# if the base_model repo on hf hub doesn't include configuration .json files,
306306
# you can set that here, or leave this empty to default to base_model
307307
base_model_config: ./llama-7b-hf
308+
# you can specify to choose a specific model revision from huggingface hub
309+
model_revision:
308310
# Optional tokenizer configuration override in case you want to use a different tokenizer
309311
# than the one defined in the base model
310312
tokenizer_config:
@@ -411,6 +413,9 @@ logging_steps:
411413
save_steps:
412414
eval_steps:
413415
416+
# save model as safetensors (require safetensors package)
417+
save_safetensors:
418+
414419
# whether to mask out or include the human's prompt from the training labels
415420
train_on_inputs: false
416421
# don't use this, leads to wonky training (according to someone on the internet)

Diff for: src/axolotl/utils/trainer.py

+4
Original file line numberDiff line numberDiff line change
@@ -183,6 +183,10 @@ def setup_trainer(cfg, train_dataset, eval_dataset, model, tokenizer):
183183
if cfg.hub_model_id:
184184
training_arguments_kwargs["hub_model_id"] = cfg.hub_model_id
185185
training_arguments_kwargs["push_to_hub"] = True
186+
training_arguments_kwargs["hub_private_repo"] = True
187+
188+
if cfg.save_safetensors:
189+
training_arguments_kwargs["save_safetensors"] = cfg.save_safetensors
186190

187191
training_args = AxolotlTrainingArguments(
188192
per_device_train_batch_size=cfg.micro_batch_size,

0 commit comments

Comments
 (0)