Skip to content

Commit 3593de8

Browse files
committed
Formatting
1 parent d060127 commit 3593de8

File tree

1 file changed

+4
-3
lines changed

1 file changed

+4
-3
lines changed

vllm/model_executor/models/llama.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -257,8 +257,8 @@ def __init__(
257257
(lora_config.max_loras or 1)) if lora_config else 0
258258
self.vocab_size = config.vocab_size + lora_vocab
259259
self.org_vocab_size = config.vocab_size
260-
if get_pp_group().is_first_rank or (config.tie_word_embeddings and
261-
get_pp_group().is_last_rank):
260+
if get_pp_group().is_first_rank or (config.tie_word_embeddings
261+
and get_pp_group().is_last_rank):
262262
self.embed_tokens = VocabParallelEmbedding(
263263
self.vocab_size,
264264
config.hidden_size,
@@ -386,7 +386,8 @@ def __init__(
386386

387387
logit_scale = getattr(config, "logit_scale", 1.0)
388388
self.logits_processor = LogitsProcessor(self.unpadded_vocab_size,
389-
config.vocab_size, logit_scale)
389+
config.vocab_size,
390+
logit_scale)
390391
self.sampler = Sampler()
391392
else:
392393
self.lm_head = PPMissingLayer()

0 commit comments

Comments
 (0)