Skip to content

Commit e9fd8e8

Browse files
authored
Merge pull request #373 from mrhan1993/dev
Dev
2 parents ca56962 + 2110422 commit e9fd8e8

21 files changed

+454
-125
lines changed

fooocus_api_version.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
version = '0.4.1.0'
1+
version = '0.4.1.1'

fooocusapi/worker.py

+27-7
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,8 @@ def process_generate(async_task: QueueTask):
103103
remove_empty_str, HWC3, resize_image,
104104
get_image_shape_ceil, set_image_shape_ceil,
105105
get_shape_ceil, resample_image, erode_or_dilate,
106-
get_enabled_loras, parse_lora_references_from_prompt, apply_wildcards
106+
get_enabled_loras, parse_lora_references_from_prompt, apply_wildcards,
107+
remove_performance_lora
107108
)
108109

109110
from modules.upscaler import perform_upscale
@@ -540,8 +541,10 @@ def yield_result(_, images, tasks, extension='png',
540541
extra_negative_prompts = negative_prompts[1:] if len(negative_prompts) > 1 else []
541542

542543
progressbar(async_task, 3, 'Loading models ...')
543-
loras, prompt = parse_lora_references_from_prompt(prompt, loras, config.default_max_lora_number)
544+
lora_filenames = remove_performance_lora(config.lora_filenames, performance_selection)
545+
loras, prompt = parse_lora_references_from_prompt(prompt, loras, config.default_max_lora_number, lora_filenames=lora_filenames)
544546
loras += performance_loras
547+
545548
pipeline.refresh_everything(
546549
refiner_model_name=refiner_model_name,
547550
base_model_name=base_model_name,
@@ -926,16 +929,33 @@ def yield_result(_, images, tasks, extension='png',
926929

927930
if scheduler_name in ['lcm', 'tcd']:
928931
final_scheduler_name = 'sgm_uniform'
929-
if pipeline.final_unet is not None:
930-
pipeline.final_unet = core.opModelSamplingDiscrete.patch(
932+
933+
def patch_discrete(unet):
934+
return core.opModelSamplingDiscrete.patch(
931935
pipeline.final_unet,
932936
sampling=scheduler_name,
933937
zsnr=False)[0]
938+
939+
if pipeline.final_unet is not None:
940+
pipeline.final_unet = patch_discrete(pipeline.final_unet)
934941
if pipeline.final_refiner_unet is not None:
935-
pipeline.final_refiner_unet = core.opModelSamplingDiscrete.patch(
936-
pipeline.final_refiner_unet,
942+
pipeline.final_refiner_unet = patch_discrete(pipeline.final_refiner_unet)
943+
logger.std_info(f'[Fooocus] Using {scheduler_name} scheduler.')
944+
elif scheduler_name == 'edm_playground_v2.5':
945+
final_scheduler_name = 'karras'
946+
947+
def patch_edm(unet):
948+
return core.opModelSamplingContinuousEDM.patch(
949+
unet,
937950
sampling=scheduler_name,
938-
zsnr=False)[0]
951+
sigma_max=120.0,
952+
sigma_min=0.002)[0]
953+
954+
if pipeline.final_unet is not None:
955+
pipeline.final_unet = patch_edm(pipeline.final_unet)
956+
if pipeline.final_refiner_unet is not None:
957+
pipeline.final_refiner_unet = patch_edm(pipeline.final_refiner_unet)
958+
939959
logger.std_info(f'[Fooocus] Using {scheduler_name} scheduler.')
940960

941961
outputs.append(['preview', (13, 'Moving model to GPU ...', None)])

repositories/Fooocus/args_manager.py

-3
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,4 @@
11
import ldm_patched.modules.args_parser as args_parser
2-
import os
3-
4-
from tempfile import gettempdir
52

63
args_parser.parser.add_argument("--share", action='store_true', help="Set whether to share on Gradio.")
74

+1-1
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
version = '2.4.1'
1+
version = '2.4.3'

repositories/Fooocus/ldm_patched/contrib/external_custom_sampler.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -107,8 +107,7 @@ def INPUT_TYPES(s):
107107
def get_sigmas(self, model, steps, denoise):
108108
start_step = 10 - int(10 * denoise)
109109
timesteps = torch.flip(torch.arange(1, 11) * 100 - 1, (0,))[start_step:start_step + steps]
110-
ldm_patched.modules.model_management.load_models_gpu([model])
111-
sigmas = model.model.model_sampling.sigma(timesteps)
110+
sigmas = model.model_sampling.sigma(timesteps)
112111
sigmas = torch.cat([sigmas, sigmas.new_zeros([1])])
113112
return (sigmas, )
114113

repositories/Fooocus/ldm_patched/contrib/external_model_advanced.py

+10-2
Original file line numberDiff line numberDiff line change
@@ -108,7 +108,7 @@ class ModelSamplingContinuousEDM:
108108
@classmethod
109109
def INPUT_TYPES(s):
110110
return {"required": { "model": ("MODEL",),
111-
"sampling": (["v_prediction", "eps"],),
111+
"sampling": (["v_prediction", "edm_playground_v2.5", "eps"],),
112112
"sigma_max": ("FLOAT", {"default": 120.0, "min": 0.0, "max": 1000.0, "step":0.001, "round": False}),
113113
"sigma_min": ("FLOAT", {"default": 0.002, "min": 0.0, "max": 1000.0, "step":0.001, "round": False}),
114114
}}
@@ -121,17 +121,25 @@ def INPUT_TYPES(s):
121121
def patch(self, model, sampling, sigma_max, sigma_min):
122122
m = model.clone()
123123

124+
latent_format = None
125+
sigma_data = 1.0
124126
if sampling == "eps":
125127
sampling_type = ldm_patched.modules.model_sampling.EPS
126128
elif sampling == "v_prediction":
127129
sampling_type = ldm_patched.modules.model_sampling.V_PREDICTION
130+
elif sampling == "edm_playground_v2.5":
131+
sampling_type = ldm_patched.modules.model_sampling.EDM
132+
sigma_data = 0.5
133+
latent_format = ldm_patched.modules.latent_formats.SDXL_Playground_2_5()
128134

129135
class ModelSamplingAdvanced(ldm_patched.modules.model_sampling.ModelSamplingContinuousEDM, sampling_type):
130136
pass
131137

132138
model_sampling = ModelSamplingAdvanced(model.model.model_config)
133-
model_sampling.set_sigma_range(sigma_min, sigma_max)
139+
model_sampling.set_parameters(sigma_min, sigma_max, sigma_data)
134140
m.add_object_patch("model_sampling", model_sampling)
141+
if latent_format is not None:
142+
m.add_object_patch("latent_format", latent_format)
135143
return (m, )
136144

137145
class RescaleCFG:

repositories/Fooocus/ldm_patched/k_diffusion/sampling.py

+2
Original file line numberDiff line numberDiff line change
@@ -832,5 +832,7 @@ def sample_tcd(model, x, sigmas, extra_args=None, callback=None, disable=None, n
832832
if eta > 0 and sigmas[i + 1] > 0:
833833
noise = noise_sampler(sigmas[i], sigmas[i + 1])
834834
x = x / alpha_prod_s[i+1].sqrt() + noise * (sigmas[i+1]**2 + 1 - 1/alpha_prod_s[i+1]).sqrt()
835+
else:
836+
x *= torch.sqrt(1.0 + sigmas[i + 1] ** 2)
835837

836838
return x

repositories/Fooocus/ldm_patched/modules/latent_formats.py

+65
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
import torch
12

23
class LatentFormat:
34
scale_factor = 1.0
@@ -34,6 +35,70 @@ def __init__(self):
3435
]
3536
self.taesd_decoder_name = "taesdxl_decoder"
3637

38+
class SDXL_Playground_2_5(LatentFormat):
39+
def __init__(self):
40+
self.scale_factor = 0.5
41+
self.latents_mean = torch.tensor([-1.6574, 1.886, -1.383, 2.5155]).view(1, 4, 1, 1)
42+
self.latents_std = torch.tensor([8.4927, 5.9022, 6.5498, 5.2299]).view(1, 4, 1, 1)
43+
44+
self.latent_rgb_factors = [
45+
# R G B
46+
[ 0.3920, 0.4054, 0.4549],
47+
[-0.2634, -0.0196, 0.0653],
48+
[ 0.0568, 0.1687, -0.0755],
49+
[-0.3112, -0.2359, -0.2076]
50+
]
51+
self.taesd_decoder_name = "taesdxl_decoder"
52+
53+
def process_in(self, latent):
54+
latents_mean = self.latents_mean.to(latent.device, latent.dtype)
55+
latents_std = self.latents_std.to(latent.device, latent.dtype)
56+
return (latent - latents_mean) * self.scale_factor / latents_std
57+
58+
def process_out(self, latent):
59+
latents_mean = self.latents_mean.to(latent.device, latent.dtype)
60+
latents_std = self.latents_std.to(latent.device, latent.dtype)
61+
return latent * latents_std / self.scale_factor + latents_mean
62+
63+
3764
class SD_X4(LatentFormat):
3865
def __init__(self):
3966
self.scale_factor = 0.08333
67+
self.latent_rgb_factors = [
68+
[-0.2340, -0.3863, -0.3257],
69+
[ 0.0994, 0.0885, -0.0908],
70+
[-0.2833, -0.2349, -0.3741],
71+
[ 0.2523, -0.0055, -0.1651]
72+
]
73+
74+
class SC_Prior(LatentFormat):
75+
def __init__(self):
76+
self.scale_factor = 1.0
77+
self.latent_rgb_factors = [
78+
[-0.0326, -0.0204, -0.0127],
79+
[-0.1592, -0.0427, 0.0216],
80+
[ 0.0873, 0.0638, -0.0020],
81+
[-0.0602, 0.0442, 0.1304],
82+
[ 0.0800, -0.0313, -0.1796],
83+
[-0.0810, -0.0638, -0.1581],
84+
[ 0.1791, 0.1180, 0.0967],
85+
[ 0.0740, 0.1416, 0.0432],
86+
[-0.1745, -0.1888, -0.1373],
87+
[ 0.2412, 0.1577, 0.0928],
88+
[ 0.1908, 0.0998, 0.0682],
89+
[ 0.0209, 0.0365, -0.0092],
90+
[ 0.0448, -0.0650, -0.1728],
91+
[-0.1658, -0.1045, -0.1308],
92+
[ 0.0542, 0.1545, 0.1325],
93+
[-0.0352, -0.1672, -0.2541]
94+
]
95+
96+
class SC_B(LatentFormat):
97+
def __init__(self):
98+
self.scale_factor = 1.0 / 0.43
99+
self.latent_rgb_factors = [
100+
[ 0.1121, 0.2006, 0.1023],
101+
[-0.2093, -0.0222, -0.0195],
102+
[-0.3087, -0.1535, 0.0366],
103+
[ 0.0290, -0.1574, -0.4078]
104+
]

repositories/Fooocus/ldm_patched/modules/model_sampling.py

+82-9
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import torch
2-
import numpy as np
32
from ldm_patched.ldm.modules.diffusionmodules.util import make_beta_schedule
43
import math
4+
import numpy as np
55

66
class EPS:
77
def calculate_input(self, sigma, noise):
@@ -12,12 +12,28 @@ def calculate_denoised(self, sigma, model_output, model_input):
1212
sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1))
1313
return model_input - model_output * sigma
1414

15+
def noise_scaling(self, sigma, noise, latent_image, max_denoise=False):
16+
if max_denoise:
17+
noise = noise * torch.sqrt(1.0 + sigma ** 2.0)
18+
else:
19+
noise = noise * sigma
20+
21+
noise += latent_image
22+
return noise
23+
24+
def inverse_noise_scaling(self, sigma, latent):
25+
return latent
1526

1627
class V_PREDICTION(EPS):
1728
def calculate_denoised(self, sigma, model_output, model_input):
1829
sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1))
1930
return model_input * self.sigma_data ** 2 / (sigma ** 2 + self.sigma_data ** 2) - model_output * sigma * self.sigma_data / (sigma ** 2 + self.sigma_data ** 2) ** 0.5
2031

32+
class EDM(V_PREDICTION):
33+
def calculate_denoised(self, sigma, model_output, model_input):
34+
sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1))
35+
return model_input * self.sigma_data ** 2 / (sigma ** 2 + self.sigma_data ** 2) + model_output * sigma * self.sigma_data / (sigma ** 2 + self.sigma_data ** 2) ** 0.5
36+
2137

2238
class ModelSamplingDiscrete(torch.nn.Module):
2339
def __init__(self, model_config=None):
@@ -42,21 +58,25 @@ def _register_schedule(self, given_betas=None, beta_schedule="linear", timesteps
4258
else:
4359
betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
4460
alphas = 1. - betas
45-
alphas_cumprod = torch.tensor(np.cumprod(alphas, axis=0), dtype=torch.float32)
46-
# alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
61+
alphas_cumprod = torch.cumprod(alphas, dim=0)
4762

4863
timesteps, = betas.shape
4964
self.num_timesteps = int(timesteps)
5065
self.linear_start = linear_start
5166
self.linear_end = linear_end
5267

68+
# self.register_buffer('betas', torch.tensor(betas, dtype=torch.float32))
69+
# self.register_buffer('alphas_cumprod', torch.tensor(alphas_cumprod, dtype=torch.float32))
70+
# self.register_buffer('alphas_cumprod_prev', torch.tensor(alphas_cumprod_prev, dtype=torch.float32))
71+
5372
sigmas = ((1 - alphas_cumprod) / alphas_cumprod) ** 0.5
73+
alphas_cumprod = torch.tensor(np.cumprod(alphas, axis=0), dtype=torch.float32)
5474
self.set_sigmas(sigmas)
5575
self.set_alphas_cumprod(alphas_cumprod.float())
5676

5777
def set_sigmas(self, sigmas):
58-
self.register_buffer('sigmas', sigmas)
59-
self.register_buffer('log_sigmas', sigmas.log())
78+
self.register_buffer('sigmas', sigmas.float())
79+
self.register_buffer('log_sigmas', sigmas.log().float())
6080

6181
def set_alphas_cumprod(self, alphas_cumprod):
6282
self.register_buffer("alphas_cumprod", alphas_cumprod.float())
@@ -94,18 +114,18 @@ def percent_to_sigma(self, percent):
94114
class ModelSamplingContinuousEDM(torch.nn.Module):
95115
def __init__(self, model_config=None):
96116
super().__init__()
97-
self.sigma_data = 1.0
98-
99117
if model_config is not None:
100118
sampling_settings = model_config.sampling_settings
101119
else:
102120
sampling_settings = {}
103121

104122
sigma_min = sampling_settings.get("sigma_min", 0.002)
105123
sigma_max = sampling_settings.get("sigma_max", 120.0)
106-
self.set_sigma_range(sigma_min, sigma_max)
124+
sigma_data = sampling_settings.get("sigma_data", 1.0)
125+
self.set_parameters(sigma_min, sigma_max, sigma_data)
107126

108-
def set_sigma_range(self, sigma_min, sigma_max):
127+
def set_parameters(self, sigma_min, sigma_max, sigma_data):
128+
self.sigma_data = sigma_data
109129
sigmas = torch.linspace(math.log(sigma_min), math.log(sigma_max), 1000).exp()
110130

111131
self.register_buffer('sigmas', sigmas) #for compatibility with some schedulers
@@ -134,3 +154,56 @@ def percent_to_sigma(self, percent):
134154

135155
log_sigma_min = math.log(self.sigma_min)
136156
return math.exp((math.log(self.sigma_max) - log_sigma_min) * percent + log_sigma_min)
157+
158+
class StableCascadeSampling(ModelSamplingDiscrete):
159+
def __init__(self, model_config=None):
160+
super().__init__()
161+
162+
if model_config is not None:
163+
sampling_settings = model_config.sampling_settings
164+
else:
165+
sampling_settings = {}
166+
167+
self.set_parameters(sampling_settings.get("shift", 1.0))
168+
169+
def set_parameters(self, shift=1.0, cosine_s=8e-3):
170+
self.shift = shift
171+
self.cosine_s = torch.tensor(cosine_s)
172+
self._init_alpha_cumprod = torch.cos(self.cosine_s / (1 + self.cosine_s) * torch.pi * 0.5) ** 2
173+
174+
#This part is just for compatibility with some schedulers in the codebase
175+
self.num_timesteps = 10000
176+
sigmas = torch.empty((self.num_timesteps), dtype=torch.float32)
177+
for x in range(self.num_timesteps):
178+
t = (x + 1) / self.num_timesteps
179+
sigmas[x] = self.sigma(t)
180+
181+
self.set_sigmas(sigmas)
182+
183+
def sigma(self, timestep):
184+
alpha_cumprod = (torch.cos((timestep + self.cosine_s) / (1 + self.cosine_s) * torch.pi * 0.5) ** 2 / self._init_alpha_cumprod)
185+
186+
if self.shift != 1.0:
187+
var = alpha_cumprod
188+
logSNR = (var/(1-var)).log()
189+
logSNR += 2 * torch.log(1.0 / torch.tensor(self.shift))
190+
alpha_cumprod = logSNR.sigmoid()
191+
192+
alpha_cumprod = alpha_cumprod.clamp(0.0001, 0.9999)
193+
return ((1 - alpha_cumprod) / alpha_cumprod) ** 0.5
194+
195+
def timestep(self, sigma):
196+
var = 1 / ((sigma * sigma) + 1)
197+
var = var.clamp(0, 1.0)
198+
s, min_var = self.cosine_s.to(var.device), self._init_alpha_cumprod.to(var.device)
199+
t = (((var * min_var) ** 0.5).acos() / (torch.pi * 0.5)) * (1 + s) - s
200+
return t
201+
202+
def percent_to_sigma(self, percent):
203+
if percent <= 0.0:
204+
return 999999999.9
205+
if percent >= 1.0:
206+
return 0.0
207+
208+
percent = 1.0 - percent
209+
return self.sigma(torch.tensor(percent))

repositories/Fooocus/ldm_patched/modules/samplers.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -523,7 +523,7 @@ def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=N
523523

524524
KSAMPLER_NAMES = ["euler", "euler_ancestral", "heun", "heunpp2","dpm_2", "dpm_2_ancestral",
525525
"lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_sde", "dpmpp_sde_gpu",
526-
"dpmpp_2m", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "tcd"]
526+
"dpmpp_2m", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "tcd", "edm_playground_v2.5"]
527527

528528
class KSAMPLER(Sampler):
529529
def __init__(self, sampler_function, extra_options={}, inpaint_options={}):

0 commit comments

Comments
 (0)