Skip to content

Commit eba058a

Browse files
authored
Actually use all CPUs
1 parent 4a900cb commit eba058a

File tree

1 file changed

+1
-3
lines changed

1 file changed

+1
-3
lines changed

llama_cpp/llama.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -262,9 +262,7 @@ def __init__(
262262

263263
self.n_batch = min(n_ctx, n_batch) # ???
264264
self.n_threads = n_threads or max(multiprocessing.cpu_count() // 2, 1)
265-
self.n_threads_batch = n_threads_batch or max(
266-
multiprocessing.cpu_count(), 1
267-
)
265+
self.n_threads_batch = n_threads_batch or multiprocessing.cpu_count()
268266

269267
# Context Params
270268
self.context_params = llama_cpp.llama_context_default_params()

0 commit comments

Comments
 (0)