1
1
import json
2
2
import re
3
3
import shutil
4
- from datetime import datetime
5
4
6
- import psutil
7
- import requests
8
5
from functools import partial
9
6
from PySide6 .QtCore import *
10
7
from PySide6 .QtGui import *
11
8
from PySide6 .QtWidgets import *
12
9
13
- from DownloadThread import DownloadThread
14
- from GPUMonitor import GPUMonitor
15
- from KVOverrideEntry import KVOverrideEntry
16
- from Logger import Logger
17
- from ModelInfoDialog import ModelInfoDialog
18
- from QuantizationThread import QuantizationThread
19
- from TaskListItem import TaskListItem
20
- from error_handling import show_error , handle_error
21
- from imports_and_globals import ensure_directory , open_file_safe , resource_path
22
- from localizations import *
23
- from ui_update import *
10
+ from src .GPUMonitor import GPUMonitor
11
+ from src .KVOverrideEntry import KVOverrideEntry
12
+ from src .Logger import Logger
13
+ from src .ModelInfoDialog import ModelInfoDialog
14
+ from src .imports_and_globals import (
15
+ open_file_safe ,
16
+ resource_path ,
17
+ show_about ,
18
+ ensure_directory ,
19
+ )
20
+ from src .localizations import *
21
+ import src .ui_update
22
+ import src .lora_conversion
23
+ import src .utils
24
24
25
25
26
26
class AutoGGUF (QMainWindow ):
@@ -37,17 +37,35 @@ def __init__(self):
37
37
ensure_directory (os .path .abspath ("models" ))
38
38
39
39
# References
40
- self .update_base_model_visibility = partial (update_base_model_visibility , self )
41
- self .update_assets = update_assets .__get__ (self )
42
- self .update_cuda_option = update_cuda_option .__get__ (self )
43
- self .update_cuda_backends = update_cuda_backends .__get__ (self )
44
- self .update_threads_spinbox = partial (update_threads_spinbox , self )
45
- self .update_threads_slider = partial (update_threads_slider , self )
46
- self .update_gpu_offload_spinbox = partial (update_gpu_offload_spinbox , self )
47
- self .update_gpu_offload_slider = partial (update_gpu_offload_slider , self )
48
- self .update_model_info = partial (update_model_info , self .logger , self )
49
- self .update_system_info = partial (update_system_info , self )
50
- self .update_download_progress = partial (update_download_progress , self )
40
+ self .update_base_model_visibility = partial (
41
+ src .ui_update .update_base_model_visibility , self
42
+ )
43
+ self .update_assets = src .ui_update .update_assets .__get__ (self )
44
+ self .update_cuda_option = src .ui_update .update_cuda_option .__get__ (self )
45
+ self .update_cuda_backends = src .ui_update .update_cuda_backends .__get__ (self )
46
+ self .download_llama_cpp = src .utils .download_llama_cpp .__get__ (self )
47
+ self .refresh_releases = src .utils .refresh_releases .__get__ (self )
48
+ self .browse_lora_input = src .utils .browse_lora_input .__get__ (self )
49
+ self .browse_lora_output = src .utils .browse_lora_output .__get__ (self )
50
+ self .convert_lora = src .lora_conversion .convert_lora .__get__ (self )
51
+ self .show_about = show_about .__get__ (self )
52
+ self .update_threads_spinbox = partial (
53
+ src .ui_update .update_threads_spinbox , self
54
+ )
55
+ self .update_threads_slider = partial (src .ui_update .update_threads_slider , self )
56
+ self .update_gpu_offload_spinbox = partial (
57
+ src .ui_update .update_gpu_offload_spinbox , self
58
+ )
59
+ self .update_gpu_offload_slider = partial (
60
+ src .ui_update .update_gpu_offload_slider , self
61
+ )
62
+ self .update_model_info = partial (
63
+ src .ui_update .update_model_info , self .logger , self
64
+ )
65
+ self .update_system_info = partial (src .ui_update .update_system_info , self )
66
+ self .update_download_progress = partial (
67
+ src .ui_update .update_download_progress , self
68
+ )
51
69
52
70
# Create a central widget and main layout
53
71
central_widget = QWidget ()
@@ -711,14 +729,6 @@ def refresh_backends(self):
711
729
self .backend_combo .setEnabled (False )
712
730
self .logger .info (FOUND_VALID_BACKENDS .format (self .backend_combo .count ()))
713
731
714
- def show_about (self ):
715
- about_text = (
716
- "AutoGGUF\n \n "
717
- f"Version: { AUTOGGUF_VERSION } \n \n "
718
- "A tool for managing and converting GGUF models."
719
- )
720
- QMessageBox .about (self , "About AutoGGUF" , about_text )
721
-
722
732
def save_preset (self ):
723
733
self .logger .info (SAVING_PRESET )
724
734
preset = {
@@ -1060,87 +1070,6 @@ def restart_task(self, task_item):
1060
1070
task_item .update_status (IN_PROGRESS )
1061
1071
break
1062
1072
1063
- def browse_lora_input (self ):
1064
- self .logger .info (BROWSING_FOR_LORA_INPUT_DIRECTORY )
1065
- lora_input_path = QFileDialog .getExistingDirectory (
1066
- self , SELECT_LORA_INPUT_DIRECTORY
1067
- )
1068
- if lora_input_path :
1069
- self .lora_input .setText (os .path .abspath (lora_input_path ))
1070
- ensure_directory (lora_input_path )
1071
-
1072
- def browse_lora_output (self ):
1073
- self .logger .info (BROWSING_FOR_LORA_OUTPUT_FILE )
1074
- lora_output_file , _ = QFileDialog .getSaveFileName (
1075
- self , SELECT_LORA_OUTPUT_FILE , "" , GGUF_AND_BIN_FILES
1076
- )
1077
- if lora_output_file :
1078
- self .lora_output .setText (os .path .abspath (lora_output_file ))
1079
-
1080
- def convert_lora (self ):
1081
- self .logger .info (STARTING_LORA_CONVERSION )
1082
- try :
1083
- lora_input_path = self .lora_input .text ()
1084
- lora_output_path = self .lora_output .text ()
1085
- lora_output_type = self .lora_output_type_combo .currentText ()
1086
-
1087
- if not lora_input_path :
1088
- raise ValueError (LORA_INPUT_PATH_REQUIRED )
1089
- if not lora_output_path :
1090
- raise ValueError (LORA_OUTPUT_PATH_REQUIRED )
1091
-
1092
- if lora_output_type == "GGUF" : # Use new file and parameters for GGUF
1093
- command = [
1094
- "python" ,
1095
- "src/convert_lora_to_gguf.py" ,
1096
- "--outfile" ,
1097
- lora_output_path ,
1098
- lora_input_path ,
1099
- ]
1100
- base_model_path = self .base_model_path .text ()
1101
- if not base_model_path :
1102
- raise ValueError (BASE_MODEL_PATH_REQUIRED )
1103
- command .extend (["--base" , base_model_path ])
1104
- else : # Use old GGML parameters for GGML
1105
- command = ["python" , "src/convert_lora_to_ggml.py" , lora_input_path ]
1106
-
1107
- logs_path = self .logs_input .text ()
1108
- ensure_directory (logs_path )
1109
-
1110
- timestamp = datetime .now ().strftime ("%Y%m%d_%H%M%S" )
1111
- log_file = os .path .join (logs_path , f"lora_conversion_{ timestamp } .log" )
1112
-
1113
- command_str = " " .join (command )
1114
- self .logger .info (f"{ LORA_CONVERSION_COMMAND } : { command_str } " )
1115
-
1116
- thread = QuantizationThread (command , os .getcwd (), log_file )
1117
- self .quant_threads .append (thread )
1118
-
1119
- task_name = LORA_CONVERSION_FROM_TO .format (
1120
- os .path .basename (lora_input_path ), os .path .basename (lora_output_path )
1121
- )
1122
- task_item = TaskListItem (task_name , log_file , show_progress_bar = False )
1123
- list_item = QListWidgetItem (self .task_list )
1124
- list_item .setSizeHint (task_item .sizeHint ())
1125
- self .task_list .addItem (list_item )
1126
- self .task_list .setItemWidget (list_item , task_item )
1127
-
1128
- thread .status_signal .connect (task_item .update_status )
1129
- thread .finished_signal .connect (
1130
- lambda : self .lora_conversion_finished (
1131
- thread , lora_input_path , lora_output_path
1132
- )
1133
- )
1134
- thread .error_signal .connect (
1135
- lambda err : handle_error (self .logger , err , task_item )
1136
- )
1137
- thread .start ()
1138
- self .logger .info (LORA_CONVERSION_TASK_STARTED )
1139
- except ValueError as e :
1140
- show_error (self .logger , str (e ))
1141
- except Exception as e :
1142
- show_error (self .logger , ERROR_STARTING_LORA_CONVERSION .format (str (e )))
1143
-
1144
1073
def lora_conversion_finished (self , thread , input_path , output_path ):
1145
1074
self .logger .info (LORA_CONVERSION_FINISHED )
1146
1075
if thread in self .quant_threads :
@@ -1194,43 +1123,6 @@ def download_finished(self, extract_dir):
1194
1123
if index >= 0 :
1195
1124
self .backend_combo .setCurrentIndex (index )
1196
1125
1197
- def refresh_releases (self ):
1198
- self .logger .info (REFRESHING_LLAMACPP_RELEASES )
1199
- try :
1200
- response = requests .get (
1201
- "https://api.github.com/repos/ggerganov/llama.cpp/releases"
1202
- )
1203
- response .raise_for_status () # Raise an exception for bad status codes
1204
- releases = response .json ()
1205
- self .release_combo .clear ()
1206
- for release in releases :
1207
- self .release_combo .addItem (release ["tag_name" ], userData = release )
1208
- self .release_combo .currentIndexChanged .connect (self .update_assets )
1209
- self .update_assets ()
1210
- except requests .exceptions .RequestException as e :
1211
- show_error (self .logger , ERROR_FETCHING_RELEASES .format (str (e )))
1212
-
1213
- def download_llama_cpp (self ):
1214
- self .logger .info (STARTING_LLAMACPP_DOWNLOAD )
1215
- asset = self .asset_combo .currentData ()
1216
- if not asset :
1217
- show_error (self .logger , NO_ASSET_SELECTED )
1218
- return
1219
-
1220
- llama_bin = os .path .abspath ("llama_bin" )
1221
- os .makedirs (llama_bin , exist_ok = True )
1222
-
1223
- save_path = os .path .join (llama_bin , asset ["name" ])
1224
-
1225
- self .download_thread = DownloadThread (asset ["browser_download_url" ], save_path )
1226
- self .download_thread .progress_signal .connect (self .update_download_progress )
1227
- self .download_thread .finished_signal .connect (self .download_finished )
1228
- self .download_thread .error_signal .connect (self .download_error )
1229
- self .download_thread .start ()
1230
-
1231
- self .download_button .setEnabled (False )
1232
- self .download_progress .setValue (0 )
1233
-
1234
1126
def download_finished (self , extract_dir ):
1235
1127
self .download_button .setEnabled (True )
1236
1128
self .download_progress .setValue (100 )
0 commit comments