Skip to content

Commit dee7f8d

Browse files
MasterYi1024MasterYi
andauthored
Correct free memory and total memory. (ggml-org#6630)
Co-authored-by: MasterYi <zouxiaoyi@kylinos.cn>
1 parent 81da18e commit dee7f8d

File tree

1 file changed

+3
-3
lines changed

1 file changed

+3
-3
lines changed

llama.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1638,17 +1638,17 @@ static size_t llama_get_device_memory(int device) {
16381638
#if defined(GGML_USE_CUDA)
16391639
size_t total;
16401640
size_t free;
1641-
ggml_backend_cuda_get_device_memory(device, &total, &free);
1641+
ggml_backend_cuda_get_device_memory(device, &free, &total);
16421642
return free;
16431643
#elif defined(GGML_USE_SYCL)
16441644
size_t total;
16451645
size_t free;
1646-
ggml_backend_sycl_get_device_memory(device, &total, &free);
1646+
ggml_backend_sycl_get_device_memory(device, &free, &total);
16471647
return free;
16481648
#elif defined(GGML_USE_VULKAN)
16491649
size_t total;
16501650
size_t free;
1651-
ggml_backend_vk_get_device_memory(device, &total, &free);
1651+
ggml_backend_vk_get_device_memory(device, &free, &total);
16521652
return free;
16531653
#else
16541654
return 1;

0 commit comments

Comments
 (0)