Skip to content

Commit e42cf39

Browse files
committed
fix(types): correct llama_set_adapters_lora LoRA adapter ctypes signature and use pointer for scales
- change scale: float to float* (POINTER(c_float)) - make adapters and scales optional arrays to match C API Signed-off-by: JamePeng <jame_peng@sina.com>
1 parent 87157f3 commit e42cf39

1 file changed

Lines changed: 17 additions & 4 deletions

File tree

llama_cpp/llama_cpp.py

Lines changed: 17 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1911,6 +1911,10 @@ def llama_model_quantize(
19111911
...
19121912

19131913

1914+
# //
1915+
# // Adapters
1916+
# //
1917+
19141918
# // Load a LoRA adapter from file
19151919
# // The adapter is valid as long as the associated model is not freed
19161920
# LLAMA_API struct llama_adapter_lora * llama_adapter_lora_init(
@@ -2058,14 +2062,23 @@ def llama_adapter_get_alora_invocation_tokens(adapter: llama_adapter_lora_p, /)
20582062
# float * scales);
20592063
@ctypes_function(
20602064
"llama_set_adapters_lora",
2061-
[llama_context_p_ctypes, ctypes.POINTER(llama_adapter_lora_p_ctypes), ctypes.c_size_t, ctypes.c_float],
2065+
[llama_context_p_ctypes,
2066+
ctypes.POINTER(llama_adapter_lora_p_ctypes),
2067+
ctypes.c_size_t,
2068+
ctypes.POINTER(ctypes.c_float)
2069+
],
20622070
ctypes.c_int32,
20632071
)
20642072
def llama_set_adapters_lora(
2065-
ctx: llama_context_p, adapters: CtypesArray[llama_adapter_lora_p], n_adapters: ctypes.c_size_t, scale: float, /
2073+
ctx: llama_context_p,
2074+
adapters: Optional[CtypesArray[llama_adapter_lora_p]],
2075+
n_adapters: ctypes.c_size_t,
2076+
scales: Optional[CtypesArray[ctypes.c_float]], /
20662077
) -> int:
2067-
"""Set LoRa adapters on the context.
2068-
Will only modify if the adapters currently in context are different."""
2078+
"""
2079+
Set LoRa adapters on the context.
2080+
Will only modify if the adapters currently in context are different.
2081+
"""
20692082
...
20702083

20712084

0 commit comments

Comments
 (0)