Skip to content

Commit ebfb375

Browse files
authored
Lint
1 parent be20a80 commit ebfb375

File tree

1 file changed

+8
-8
lines changed

1 file changed

+8
-8
lines changed

examples/low_level_api/low_level_api_chat_cpp.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -445,15 +445,15 @@ def generate(self):
445445
],
446446
)
447447
candidates_p = llama_cpp.ctypes.pointer(
448-
llama_cpp.llama_token_data_array(_arr, len(_arr), False)
448+
llama_cpp.llama_token_data_array(_arr, len(_arr), False),
449449
)
450450

451451
# Apply penalties
452452
nl_logit = logits[llama_cpp.llama_token_nl(self.ctx)]
453453
last_n_repeat = min(len(self.last_n_tokens), repeat_last_n, self.n_ctx)
454454

455455
_arr = (llama_cpp.llama_token * last_n_repeat)(
456-
*self.last_n_tokens[len(self.last_n_tokens) - last_n_repeat :]
456+
*self.last_n_tokens[len(self.last_n_tokens) - last_n_repeat :],
457457
)
458458
llama_cpp.llama_sample_repetition_penalties(
459459
ctx=self.ctx,
@@ -480,7 +480,7 @@ def generate(self):
480480
mirostat_mu = 2.0 * self.params.mirostat_tau
481481
mirostat_m = 100
482482
llama_cpp.llama_sample_temperature(
483-
self.ctx, candidates_p, llama_cpp.c_float(self.params.temp)
483+
self.ctx, candidates_p, llama_cpp.c_float(self.params.temp),
484484
)
485485
id = llama_cpp.llama_sample_token_mirostat(
486486
self.ctx,
@@ -493,7 +493,7 @@ def generate(self):
493493
elif self.params.mirostat == 2:
494494
mirostat_mu = 2.0 * self.params.mirostat_tau
495495
llama_cpp.llama_sample_temperature(
496-
self.ctx, candidates_p, llama_cpp.c_float(self.params.temp)
496+
self.ctx, candidates_p, llama_cpp.c_float(self.params.temp),
497497
)
498498
id = llama_cpp.llama_sample_token_mirostat_v2(
499499
self.ctx,
@@ -529,7 +529,7 @@ def generate(self):
529529
min_keep=llama_cpp.c_size_t(1),
530530
)
531531
llama_cpp.llama_sample_temperature(
532-
self.ctx, candidates_p, llama_cpp.c_float(self.params.temp)
532+
self.ctx, candidates_p, llama_cpp.c_float(self.params.temp),
533533
)
534534
id = llama_cpp.llama_sample_token(self.ctx, candidates_p)
535535
# print("`{}`".format(candidates_p.size))
@@ -600,7 +600,7 @@ def generate(self):
600600

601601
# end of text token
602602
if len(self.embd) > 0 and self.embd[-1] == llama_cpp.llama_token_eos(
603-
self.ctx
603+
self.ctx,
604604
):
605605
if not self.params.instruct:
606606
for i in self.llama_token_eot:
@@ -636,7 +636,7 @@ def token_to_str(self, token_id: int) -> bytes:
636636
size = 32
637637
buffer = (ctypes.c_char * size)()
638638
n = llama_cpp.llama_token_to_piece(
639-
self.model, llama_cpp.llama_token(token_id), buffer, size
639+
self.model, llama_cpp.llama_token(token_id), buffer, size,
640640
)
641641
assert n <= size
642642
return bytes(buffer[:n])
@@ -709,7 +709,7 @@ def interact(self):
709709
else:
710710
print(self.params.input_prefix, end="")
711711
self.input(
712-
f"{self.params.input_prefix}{self.read_input()}{self.params.input_suffix}"
712+
f"{self.params.input_prefix}{self.read_input()}{self.params.input_suffix}",
713713
)
714714
print(self.params.input_suffix, end="")
715715
self.set_color(util.CONSOLE_COLOR_DEFAULT)

0 commit comments

Comments
 (0)