Skip to content

Commit 2a9979f

Browse files
committed
feat: Update llama.cpp
1 parent c50d330 commit 2a9979f

File tree

2 files changed

+10
-4
lines changed

2 files changed

+10
-4
lines changed

llama_cpp/llama_cpp.py

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1167,13 +1167,19 @@ def llama_n_seq_max(ctx: llama_context_p, /) -> int:
11671167
...
11681168

11691169

1170-
# LLAMA_API enum llama_vocab_type llama_vocab_type(const struct llama_model * model);
1170+
# LLAMA_API enum llama_pooling_type llama_pooling_type(const struct llama_context * ctx);
1171+
@ctypes_function("llama_pooling_type", [llama_context_p_ctypes], ctypes.c_int)
1172+
def llama_pooling_type(ctx: llama_context_p, /) -> int:
1173+
...
1174+
1175+
1176+
# LLAMA_API enum llama_vocab_type llama_vocab_type (const struct llama_model * model);
11711177
@ctypes_function("llama_vocab_type", [llama_model_p_ctypes], ctypes.c_int)
11721178
def llama_vocab_type(model: llama_model_p, /) -> int:
11731179
...
11741180

11751181

1176-
# LLAMA_API enum llama_rope_type llama_rope_type (const struct llama_model * model);
1182+
# LLAMA_API enum llama_rope_type llama_rope_type (const struct llama_model * model);
11771183
@ctypes_function("llama_rope_type", [llama_model_p_ctypes], ctypes.c_int)
11781184
def llama_rope_type(model: llama_model_p, /) -> int:
11791185
...
@@ -3091,7 +3097,7 @@ def llama_sample_token_greedy(
30913097
...
30923098

30933099

3094-
# /// @details Randomly selects a token from the candidates based on their probabilities.
3100+
# /// @details Randomly selects a token from the candidates based on their probabilities using the RNG of ctx.
30953101
# LLAMA_API llama_token llama_sample_token(
30963102
# struct llama_context * ctx,
30973103
# llama_token_data_array * candidates);

vendor/llama.cpp

0 commit comments

Comments
 (0)