@@ -36,16 +36,17 @@ def worker():
36
36
import time
37
37
import shared
38
38
import random
39
+ import copy
39
40
import modules .default_pipeline as pipeline
40
41
import modules .path
41
42
import modules .patch
42
43
import fooocus_version
43
44
44
45
from modules .resolutions import get_resolution_string , resolutions
45
- from modules .sdxl_styles import apply_style_negative , apply_style_positive
46
+ from modules .sdxl_styles import apply_style
46
47
from modules .private_logger import log
47
48
from modules .expansion import safe_str
48
- from modules .util import join_prompts
49
+ from modules .util import join_prompts , remove_empty_str
49
50
50
51
try :
51
52
async_gradio_app = shared .gradio_root
@@ -56,20 +57,29 @@ def worker():
56
57
except Exception as e :
57
58
print (e )
58
59
60
+
61
+ def progressbar (number , text ):
62
+ outputs .append (['preview' , (number , text , None )])
63
+
64
+
59
65
def handler (task ):
60
- prompt , negative_prompt , style , performance , resolution , image_number , image_seed , \
66
+ prompt , negative_prompt , style_selections , performance , resolution , image_number , image_seed , \
61
67
sharpness , sampler_name , scheduler , custom_steps , custom_switch , cfg , \
62
68
base_model_name , refiner_model_name , base_clip_skip , refiner_clip_skip , \
63
69
l1 , w1 , l2 , w2 , l3 , w3 , l4 , w4 , l5 , w5 , save_metadata_json , save_metadata_image , \
64
70
img2img_mode , img2img_start_step , img2img_denoise , \
65
71
revision_mode , positive_prompt_strength , negative_prompt_strength , revision_strength_1 , revision_strength_2 , \
66
72
revision_strength_3 , revision_strength_4 , same_seed_for_all , output_format , \
67
73
control_lora_canny , canny_edge_low , canny_edge_high , canny_start , canny_stop , canny_strength , canny_model , \
68
- control_lora_depth , depth_start , depth_stop , depth_strength , depth_model , prompt_expansion , \
74
+ control_lora_depth , depth_start , depth_stop , depth_strength , depth_model , use_expansion , \
69
75
input_gallery , revision_gallery , keep_input_names = task
70
76
71
77
loras = [(l1 , w1 ), (l2 , w2 ), (l3 , w3 ), (l4 , w4 ), (l5 , w5 )]
72
78
79
+ raw_style_selections = copy .deepcopy (style_selections )
80
+
81
+ use_style = len (style_selections ) > 0
82
+
73
83
modules .patch .sharpness = sharpness
74
84
75
85
input_gallery_size = len (input_gallery )
@@ -84,10 +94,19 @@ def handler(task):
84
94
revision_mode = False
85
95
86
96
87
- outputs .append (['preview' , (1 , 'Initializing ...' , None )])
97
+ progressbar (1 , 'Initializing ...' )
98
+
99
+ raw_prompt = prompt
100
+ raw_negative_prompt = negative_prompt
101
+
102
+ prompts = remove_empty_str ([safe_str (p ) for p in prompt .split ('\n ' )], default = '' )
103
+ negative_prompts = remove_empty_str ([safe_str (p ) for p in negative_prompt .split ('\n ' )], default = '' )
104
+
105
+ prompt = prompts [0 ]
106
+ negative_prompt = negative_prompts [0 ]
88
107
89
- prompt = safe_str ( prompt )
90
- negative_prompt = safe_str ( negative_prompt )
108
+ extra_positive_prompts = prompts [ 1 :] if len ( prompts ) > 1 else []
109
+ extra_negative_prompts = negative_prompts [ 1 :] if len ( negative_prompts ) > 1 else []
91
110
92
111
try :
93
112
seed = int (image_seed )
@@ -97,10 +116,11 @@ def handler(task):
97
116
seed = random .randint (constants .MIN_SEED , constants .MAX_SEED )
98
117
99
118
100
- outputs . append ([ 'preview' , ( 3 , 'Loading models ...' , None )] )
119
+ progressbar ( 3 , 'Loading models ...' )
101
120
pipeline .refresh_base_model (base_model_name )
102
121
pipeline .refresh_refiner_model (refiner_model_name )
103
122
pipeline .refresh_loras (loras )
123
+ pipeline .set_clip_skips (base_clip_skip , refiner_clip_skip )
104
124
if revision_mode :
105
125
pipeline .refresh_clip_vision ()
106
126
if control_lora_canny :
@@ -115,7 +135,7 @@ def handler(task):
115
135
revision_images_filenames = list (map (lambda path : os .path .basename (path ), revision_images_paths ))
116
136
revision_strengths = [revision_strength_1 , revision_strength_2 , revision_strength_3 , revision_strength_4 ]
117
137
for i in range (revision_gallery_size ):
118
- outputs . append ([ 'preview' , ( 4 , f'Revision for image { i + 1 } ...' , None )] )
138
+ progressbar ( 4 , f'Revision for image { i + 1 } ...' )
119
139
print (f'Revision for image { i + 1 } started' )
120
140
if revision_strengths [i % 4 ] != 0 :
121
141
revision_image = get_image (revision_images_paths [i ])
@@ -128,60 +148,78 @@ def handler(task):
128
148
revision_strengths = []
129
149
130
150
131
- tasks = []
132
- if not prompt_expansion :
133
- outputs .append (['preview' , (5 , 'Encoding negative text ...' , None )])
134
- n_txt = apply_style_negative (style , negative_prompt )
135
- n_cond = pipeline .process_prompt (n_txt , base_clip_skip , refiner_clip_skip , negative_prompt_strength )
136
-
137
- outputs .append (['preview' , (9 , 'Encoding positive text ...' , None )])
138
- p_txt = apply_style_positive (style , prompt )
139
- p_cond = pipeline .process_prompt (p_txt , base_clip_skip , refiner_clip_skip , positive_prompt_strength , revision_mode , revision_strengths , clip_vision_outputs )
140
-
141
- for i in range (image_number ):
142
- current_seed = seed if same_seed_for_all else seed + i
143
- tasks .append (dict (
144
- prompt = prompt ,
145
- negative_prompt = negative_prompt ,
146
- seed = current_seed ,
147
- n_cond = n_cond ,
148
- p_cond = p_cond ,
149
- real_positive_prompt = p_txt ,
150
- real_negative_prompt = n_txt
151
- ))
151
+ pipeline .clear_all_caches ()
152
+
153
+ progressbar (5 , 'Processing prompts ...' )
154
+
155
+ positive_basic_workloads = []
156
+ negative_basic_workloads = []
157
+
158
+ if use_style :
159
+ for s in style_selections :
160
+ p , n = apply_style (s , positive = prompt )
161
+ positive_basic_workloads .append (p )
162
+ negative_basic_workloads .append (n )
152
163
else :
153
- for i in range (image_number ):
154
- outputs .append (['preview' , (5 , f'Preparing positive text #{ i + 1 } ...' , None )])
155
- current_seed = seed if same_seed_for_all else seed + i
164
+ positive_basic_workloads .append (prompt )
156
165
157
- expansion_weight = 0.1
166
+ negative_basic_workloads . append ( negative_prompt ) # Always use independent workload for negative.
158
167
159
- suffix = pipeline .expansion (prompt , current_seed )
160
- suffix = f'({ suffix } :{ expansion_weight } )'
161
- print (f'[Prompt Expansion] New suffix: { suffix } ' )
168
+ positive_basic_workloads = positive_basic_workloads + extra_positive_prompts
169
+ negative_basic_workloads = negative_basic_workloads + extra_negative_prompts
162
170
163
- p_txt = apply_style_positive ( style , prompt )
164
- p_txt = safe_str ( p_txt )
171
+ positive_basic_workloads = remove_empty_str ( positive_basic_workloads , default = prompt )
172
+ negative_basic_workloads = remove_empty_str ( negative_basic_workloads , default = negative_prompt )
165
173
166
- p_txt = join_prompts (p_txt , suffix )
174
+ positive_top_k = len (positive_basic_workloads )
175
+ negative_top_k = len (negative_basic_workloads )
167
176
168
- tasks .append (dict (
169
- prompt = prompt ,
170
- negative_prompt = negative_prompt ,
171
- seed = current_seed ,
172
- real_positive_prompt = p_txt ,
173
- ))
177
+ tasks = [dict (
178
+ task_seed = seed if same_seed_for_all else seed + i ,
179
+ positive = positive_basic_workloads ,
180
+ negative = negative_basic_workloads ,
181
+ expansion = '' ,
182
+ c = [None , None ],
183
+ uc = [None , None ],
184
+ ) for i in range (image_number )]
174
185
175
- outputs .append (['preview' , (9 , 'Encoding negative text ...' , None )])
176
- n_txt = apply_style_negative (style , negative_prompt )
177
- n_cond = pipeline .process_prompt (n_txt , base_clip_skip , refiner_clip_skip , negative_prompt_strength )
186
+ if use_expansion :
187
+ for i , t in enumerate (tasks ):
188
+ progressbar (5 , f'Preparing Fooocus text #{ i + 1 } ...' )
189
+ expansion = pipeline .expansion (prompt , t ['task_seed' ])
190
+ print (f'[Prompt Expansion] New suffix: { expansion } ' )
191
+ t ['expansion' ] = expansion
192
+ t ['positive' ] = copy .deepcopy (t ['positive' ]) + [join_prompts (prompt , expansion )] # Deep copy.
193
+
194
+ for i , t in enumerate (tasks ):
195
+ progressbar (7 , f'Encoding base positive #{ i + 1 } ...' )
196
+ t ['c' ][0 ] = pipeline .clip_encode (sd = pipeline .xl_base_patched , texts = t ['positive' ],
197
+ pool_top_k = positive_top_k )
198
+
199
+ for i , t in enumerate (tasks ):
200
+ progressbar (9 , f'Encoding base negative #{ i + 1 } ...' )
201
+ t ['uc' ][0 ] = pipeline .clip_encode (sd = pipeline .xl_base_patched , texts = t ['negative' ],
202
+ pool_top_k = negative_top_k )
203
+
204
+ if pipeline .xl_refiner is not None :
205
+ for i , t in enumerate (tasks ):
206
+ progressbar (11 , f'Encoding refiner positive #{ i + 1 } ...' )
207
+ t ['c' ][1 ] = pipeline .clip_encode (sd = pipeline .xl_refiner , texts = t ['positive' ],
208
+ pool_top_k = positive_top_k )
178
209
179
210
for i , t in enumerate (tasks ):
180
- outputs .append (['preview' , (12 , f'Encoding positive text #{ i + 1 } ...' , None )])
181
- t ['p_cond' ] = pipeline .process_prompt (t ['real_positive_prompt' ], base_clip_skip , refiner_clip_skip ,
182
- positive_prompt_strength , revision_mode , revision_strengths , clip_vision_outputs )
183
- t ['real_negative_prompt' ] = n_txt
184
- t ['n_cond' ] = n_cond
211
+ progressbar (13 , f'Encoding refiner negative #{ i + 1 } ...' )
212
+ t ['uc' ][1 ] = pipeline .clip_encode (sd = pipeline .xl_refiner , texts = t ['negative' ],
213
+ pool_top_k = negative_top_k )
214
+
215
+ for i , t in enumerate (tasks ):
216
+ progressbar (13 , f'Applying prompt strengths #{ i + 1 } ...' )
217
+ t ['c' ][0 ], t ['c' ][1 ] = pipeline .apply_prompt_strength (t ['c' ][0 ], t ['c' ][1 ], positive_prompt_strength )
218
+ t ['uc' ][0 ], t ['uc' ][1 ] = pipeline .apply_prompt_strength (t ['uc' ][0 ], t ['uc' ][1 ], negative_prompt_strength )
219
+
220
+ for i , t in enumerate (tasks ):
221
+ progressbar (13 , f'Applying Revision #{ i + 1 } ...' )
222
+ t ['c' ][0 ] = pipeline .apply_revision (t ['c' ][0 ], revision_mode , revision_strengths , clip_vision_outputs )
185
223
186
224
187
225
if performance == 'Speed' :
@@ -203,6 +241,7 @@ def handler(task):
203
241
resolution = default_settings ['resolution' ]
204
242
width , height = string_to_dimensions (resolution )
205
243
244
+ pipeline .clear_all_caches () # save memory
206
245
207
246
results = []
208
247
metadata_strings = []
@@ -241,8 +280,8 @@ def callback(step, x0, x, total_steps, y):
241
280
242
281
execution_start_time = time .perf_counter ()
243
282
try :
244
- imgs = pipeline .process_diffusion (task ['p_cond ' ], task ['n_cond ' ], steps , switch , width , height , task ['seed' ], sampler_name , scheduler ,
245
- cfg , img2img_mode , input_image , start_step , denoise , revision_mode , clip_vision_outputs , revision_strengths ,
283
+ imgs = pipeline .process_diffusion (task ['c ' ], task ['uc ' ], steps , switch , width , height , task ['task_seed' ] ,
284
+ sampler_name , scheduler , cfg , img2img_mode , input_image , start_step , denoise ,
246
285
control_lora_canny , canny_edge_low , canny_edge_high , canny_start , canny_stop , canny_strength ,
247
286
control_lora_depth , depth_start , depth_stop , depth_strength , callback = callback )
248
287
except InterruptProcessingException as iex :
@@ -253,8 +292,8 @@ def callback(step, x0, x, total_steps, y):
253
292
print (f'Prompt executed in { execution_time :.2f} seconds' )
254
293
255
294
metadata = {
256
- 'prompt' : prompt , 'negative_prompt' : negative_prompt , 'style ' : style ,
257
- 'seed' : seed , 'width' : width , 'height' : height , 'p_txt' : p_txt , 'n_txt' : n_txt ,
295
+ 'prompt' : raw_prompt , 'negative_prompt' : raw_negative_prompt , 'styles ' : raw_style_selections ,
296
+ 'seed' : task [ 'task_seed' ] , 'width' : width , 'height' : height ,
258
297
'sampler' : sampler_name , 'scheduler' : scheduler , 'performance' : performance ,
259
298
'steps' : steps , 'switch' : switch , 'sharpness' : sharpness , 'cfg' : cfg ,
260
299
'base_clip_skip' : base_clip_skip , 'refiner_clip_skip' : refiner_clip_skip ,
@@ -263,7 +302,7 @@ def callback(step, x0, x, total_steps, y):
263
302
'l4' : l4 , 'w4' : w4 , 'l5' : l5 , 'w5' : w5 , 'img2img' : img2img_mode , 'revision' : revision_mode ,
264
303
'positive_prompt_strength' : positive_prompt_strength , 'negative_prompt_strength' : negative_prompt_strength ,
265
304
'control_lora_canny' : control_lora_canny , 'control_lora_depth' : control_lora_depth ,
266
- 'prompt_expansion' : prompt_expansion
305
+ 'prompt_expansion' : task [ 'expansion' ]
267
306
}
268
307
if img2img_mode :
269
308
metadata |= {
@@ -291,13 +330,11 @@ def callback(step, x0, x, total_steps, y):
291
330
292
331
for x in imgs :
293
332
d = [
294
- ('Prompt' , task ['prompt' ]),
295
- ('Negative Prompt' , task ['negative_prompt' ]),
296
- ('Real Positive Prompt' , task ['real_positive_prompt' ]),
297
- ('Real Negative Prompt' , task ['real_negative_prompt' ]),
298
- ('Prompt Expansion' , str (prompt_expansion )),
299
- ('Style' , style ),
300
- ('Seed' , task ['seed' ]),
333
+ ('Prompt' , raw_prompt ),
334
+ ('Negative Prompt' , raw_negative_prompt ),
335
+ ('Fooocus V2 (Prompt Expansion)' , task ['expansion' ]),
336
+ ('Styles' , str (raw_style_selections )),
337
+ ('Seed' , task ['task_seed' ]),
301
338
('Resolution' , get_resolution_string (width , height )),
302
339
('Performance' , (performance , steps , switch )),
303
340
('Sampler & Scheduler' , (sampler_name , scheduler )),
@@ -318,7 +355,7 @@ def callback(step, x0, x, total_steps, y):
318
355
d .append ((f'LoRA [{ n } ] weight' , w ))
319
356
d .append (('Software' , fooocus_version .full_version ))
320
357
d .append (('Execution Time' , f'{ execution_time :.2f} seconds' ))
321
- log (x , d , metadata_string , save_metadata_json , save_metadata_image , keep_input_names , input_image_filename , output_format )
358
+ log (x , d , 3 , metadata_string , save_metadata_json , save_metadata_image , keep_input_names , input_image_filename , output_format )
322
359
323
360
results += imgs
324
361
0 commit comments