We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 5575fed commit 478a770Copy full SHA for 478a770
llama_cpp/llama.py
@@ -1409,15 +1409,15 @@ def logit_bias_processor(
1409
1410
if stream:
1411
remaining_tokens = completion_tokens[returned_tokens:]
1412
- all_text = self.detokenize(
+ remaining_text = self.detokenize(
1413
remaining_tokens,
1414
prev_tokens=prompt_tokens + completion_tokens[:returned_tokens],
1415
)
1416
- any_stop = [s for s in stop_sequences if s in all_text]
+ any_stop = [s for s in stop_sequences if s in remaining_text]
1417
if len(any_stop) > 0:
1418
- end = min(all_text.index(stop) for stop in any_stop)
+ end = min(remaining_text.index(stop) for stop in any_stop)
1419
else:
1420
- end = len(all_text)
+ end = len(remaining_text)
1421
1422
token_end_position = 0
1423
for token in remaining_tokens:
0 commit comments