Skip to content

Commit

Permalink
Pretrain multipack (#2278)
Browse files Browse the repository at this point in the history
* fix for pretrain with packing

* fix model name and loss expected

* make sure to check with micro batch size for pretraining

* change loss threshholds based on parametrization

* make tests smaller for CI

* fix pretrain packing

* fix pretrain packing test

* address pr feedback
  • Loading branch information
winglian authored Jan 24, 2025
1 parent 6086162 commit 2062077
Show file tree
Hide file tree
Showing 5 changed files with 42 additions and 21 deletions.
2 changes: 2 additions & 0 deletions src/axolotl/core/trainer_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -1880,6 +1880,8 @@ def build_collator(
if training_args.pretraining:
if self.cfg.pretraining_sample_concatenation is False:
return DataCollatorForSeq2Seq(self.tokenizer, **kwargs)
if self.cfg.micro_batch_size > 1:
return DataCollatorForSeq2Seq(self.tokenizer, **kwargs)
return None

if self.cfg.model_config_type == "mamba":
Expand Down
13 changes: 5 additions & 8 deletions src/axolotl/utils/data/pretraining.py
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,7 @@ def wrap_pretraining_dataset(
tokenizer,
return_tensors="pt",
padding=True,
pad_to_multiple_of=max_tokens * batch_size,
pad_to_multiple_of=max_tokens,
multipack_attn=cfg.pretrain_multipack_attn,
)
encode = functools.partial(
Expand All @@ -201,8 +201,6 @@ def wrap_pretraining_dataset(
max_seq_length=max_tokens,
batch_size=batch_size,
multipack_attn=cfg.pretrain_multipack_attn,
group_size=cfg.sample_packing_group_size,
bin_size=cfg.sample_packing_bin_size,
)
# set this to 1 so downstream data_loader doesn't try to increase the batch again
cfg.micro_batch_size = 1
Expand Down Expand Up @@ -247,9 +245,7 @@ def encode_packed_pretraining(
examples: Dict[str, List],
max_seq_length: int = 2048,
batch_size: int = 4,
multipack_attn: Optional[bool] = False,
group_size: int = 100000,
bin_size: int = 200,
multipack_attn: Optional[bool] = True,
) -> Dict[str, List]:
# pylint: disable=duplicate-code
# tokenize all the examples
Expand All @@ -260,15 +256,16 @@ def encode_packed_pretraining(
train_dataset,
max_seq_length,
skip_position_ids=not multipack_attn,
# FIXME using attention mask unpad/pad with trainer and packed pretraining is broken atm
# workaround by using the position id logic for now in trainer
drop_attention_mask=multipack_attn,
)

sampler = MultipackBatchSampler(
sampler=RandomSampler(train_dataset),
lengths=get_dataset_lengths(train_dataset),
batch_size=1,
batch_max_len=batch_size * max_seq_length,
group_size=group_size,
bin_size=bin_size,
drop_last=True,
)

Expand Down
7 changes: 5 additions & 2 deletions src/axolotl/utils/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -310,19 +310,22 @@ def drop_no_trainable_tokens(sample):


def process_pretraining_datasets_for_packing(
train_dataset, sequence_len, skip_position_ids=True
train_dataset, sequence_len, skip_position_ids=True, drop_attention_mask=False
):
drop_long = partial(drop_long_seq, sequence_len=sequence_len)

train_dataset = train_dataset.filter(
drop_long,
desc="Dropping Long Sequences",
load_from_cache_file=False,
)
if skip_position_ids:
if not skip_position_ids:
train_dataset = train_dataset.map(
add_position_ids,
desc="Add position_id column (Pretraining Sample Packing)",
)
if drop_attention_mask:
train_dataset = train_dataset.remove_columns("attention_mask")

return train_dataset

Expand Down
32 changes: 24 additions & 8 deletions tests/e2e/test_llama_pretrain.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
from axolotl.utils.config import normalize_config
from axolotl.utils.dict import DictDefault

from .utils import check_model_output_exists
from .utils import check_model_output_exists, check_tensorboard

LOG = logging.getLogger("axolotl.tests.e2e")
os.environ["WANDB_DISABLED"] = "true"
Expand All @@ -28,19 +28,25 @@ class TestPretrainLlama:
"sample_packing",
[True, False],
)
def test_pretrain(self, temp_dir, sample_packing):
@pytest.mark.parametrize(
"pretrain_multipack_attn",
[True, False],
)
def test_pretrain(self, temp_dir, sample_packing, pretrain_multipack_attn):
if not sample_packing and pretrain_multipack_attn:
return

# pylint: disable=duplicate-code
cfg = DictDefault(
{
"base_model": "JackFram/llama-68m",
"tokenizer_type": "LlamaTokenizer",
"base_model": "HuggingFaceTB/SmolLM2-135M",
"flash_attention": True,
"sequence_len": 1024,
"sample_packing": sample_packing,
"pretrain_multipack_attn": pretrain_multipack_attn,
"dataset_processes": 1,
"special_tokens": {
"unk_token": "<unk>",
"bos_token": "<s>",
"eos_token": "</s>",
"pad_token": "<|endoftext|>",
},
"pretraining_dataset": [
{
Expand All @@ -51,7 +57,7 @@ def test_pretrain(self, temp_dir, sample_packing):
],
"max_steps": 5,
"num_epochs": 1,
"micro_batch_size": 1,
"micro_batch_size": 2,
"gradient_accumulation_steps": 1,
"val_set_size": 0.0,
"output_dir": temp_dir,
Expand All @@ -60,6 +66,7 @@ def test_pretrain(self, temp_dir, sample_packing):
"lr_scheduler": "cosine",
"save_safetensors": True,
"bf16": "auto",
"use_tensorboard": True,
}
)
normalize_config(cfg)
Expand All @@ -68,3 +75,12 @@ def test_pretrain(self, temp_dir, sample_packing):

train(cfg=cfg, dataset_meta=dataset_meta)
check_model_output_exists(temp_dir, cfg)
loss_threshold = 3.5
if sample_packing and not pretrain_multipack_attn:
loss_threshold = 6.5
check_tensorboard(
temp_dir + "/runs",
"train/train_loss",
loss_threshold,
"Train Loss is too high",
)
9 changes: 6 additions & 3 deletions tests/test_packed_pretraining.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ def test_packing_stream_dataset(self):
}
],
"sample_packing": True,
"pretrain_multipack_attn": True,
"pad_to_sequence_len": True,
"sequence_len": 2048,
"micro_batch_size": 2,
Expand Down Expand Up @@ -87,9 +88,11 @@ def test_packing_stream_dataset(self):
assert data["labels"].shape == torch.Size(
[1, original_bsz * cfg.sequence_len]
)
assert data["attention_mask"].shape == torch.Size(
[1, original_bsz * cfg.sequence_len]
)
assert "attention_mask" not in data
# FIXME add back once we fix packing unpad/pad with attention mask
# assert data["attention_mask"].shape == torch.Size(
# [1, original_bsz * cfg.sequence_len]
# )
idx += 1


Expand Down

0 comments on commit 2062077

Please sign in to comment.