Hello! I'm not sure if it's an issue with Zluda (it probably is), but freeU doesn't seem to work at all with it. Whenever I try to generate an image with freeU activated, it returns an error:
*** Error completing request
*** Arguments: ('task(4ctd2qw8925ax5d)', <gradio.routes.Request object at 0x0000020FAF26F460>, "a test to showcase that freeU doesn't work", '', [], 20, 'Euler a', 1, 1, 7, 1152, 768, False, 0.7, 2, 'Latent', 0, 0, 0, 'Use same checkpoint', 'Use same sampler', '', '', [], 0, False, '', 0.8, -1, False, -1, 0, 0, 0, False, 'MultiDiffusion', False, True, 1024, 1024, 96, 96, 48, 4, 'None', 2, False, 10, 1, 1, 64, False, False, False, False, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 0.4, 0.4, 0.2, 0.2, '', '', 'Background', 0.2, -1.0, False, 'DemoFusion', True, 128, 64, 4, 2, False, 10, 1, 1, 64, False, True, 3, 1, 1, False, 3072, 192, True, True, True, False, True, 0, 1, 0, 'Version 2', 1.2, 0.9, 0, 0.5, 0, 1, 1.4, 0.2, 0, 0.5, 0, 1, 1, 1, 0, 0.5, 0, 1, False, False, 'positive', 'comma', 0, False, False, 'start', '', 1, '', [], 0, '', [], 0, '', [], True, False, False, False, False, False, False, 0, False) {}
Traceback (most recent call last):
File "F:\SD-Zluda\modules\call_queue.py", line 57, in f
res = list(func(*args, **kwargs))
File "F:\SD-Zluda\modules\call_queue.py", line 36, in f
res = func(*args, **kwargs)
File "F:\SD-Zluda\modules\txt2img.py", line 110, in txt2img
processed = processing.process_images(p)
File "F:\SD-Zluda\modules\processing.py", line 787, in process_images
res = process_images_inner(p)
File "F:\SD-Zluda\modules\processing.py", line 1015, in process_images_inner
samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts)
File "F:\SD-Zluda\modules\processing.py", line 1351, in sample
samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x))
File "F:\SD-Zluda\modules\sd_samplers_kdiffusion.py", line 239, in sample
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs))
File "F:\SD-Zluda\modules\sd_samplers_common.py", line 261, in launch_sampling
return func()
File "F:\SD-Zluda\modules\sd_samplers_kdiffusion.py", line 239, in <lambda>
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs))
File "F:\SD-Zluda\venv\lib\site-packages\torch\utils\_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "F:\SD-Zluda\repositories\k-diffusion\k_diffusion\sampling.py", line 145, in sample_euler_ancestral
denoised = model(x, sigmas[i] * s_in, **extra_args)
File "F:\SD-Zluda\venv\lib\site-packages\torch\nn\modules\module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "F:\SD-Zluda\venv\lib\site-packages\torch\nn\modules\module.py", line 1520, in _call_impl
return forward_call(*args, **kwargs)
File "F:\SD-Zluda\modules\sd_samplers_cfg_denoiser.py", line 237, in forward
x_out = self.inner_model(x_in, sigma_in, cond=make_condition_dict(cond_in, image_cond_in))
File "F:\SD-Zluda\venv\lib\site-packages\torch\nn\modules\module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "F:\SD-Zluda\venv\lib\site-packages\torch\nn\modules\module.py", line 1520, in _call_impl
return forward_call(*args, **kwargs)
File "F:\SD-Zluda\repositories\k-diffusion\k_diffusion\external.py", line 112, in forward
eps = self.get_eps(input * c_in, self.sigma_to_t(sigma), **kwargs)
File "F:\SD-Zluda\repositories\k-diffusion\k_diffusion\external.py", line 138, in get_eps
return self.inner_model.apply_model(*args, **kwargs)
File "F:\SD-Zluda\modules\sd_models_xl.py", line 44, in apply_model
return self.model(x, t, cond)
File "F:\SD-Zluda\venv\lib\site-packages\torch\nn\modules\module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "F:\SD-Zluda\venv\lib\site-packages\torch\nn\modules\module.py", line 1561, in _call_impl
result = forward_call(*args, **kwargs)
File "F:\SD-Zluda\modules\sd_hijack_utils.py", line 18, in <lambda>
setattr(resolved_obj, func_path[-1], lambda *args, **kwargs: self(*args, **kwargs))
File "F:\SD-Zluda\modules\sd_hijack_utils.py", line 32, in __call__
return self.__orig_func(*args, **kwargs)
File "F:\SD-Zluda\repositories\generative-models\sgm\modules\diffusionmodules\wrappers.py", line 28, in forward
return self.diffusion_model(
File "F:\SD-Zluda\venv\lib\site-packages\torch\nn\modules\module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "F:\SD-Zluda\venv\lib\site-packages\torch\nn\modules\module.py", line 1561, in _call_impl
result = forward_call(*args, **kwargs)
File "F:\SD-Zluda\modules\sd_unet.py", line 91, in UNetModel_forward
return original_forward(self, x, timesteps, context, *args, **kwargs)
File "F:\SD-Zluda\repositories\generative-models\sgm\modules\diffusionmodules\openaimodel.py", line 997, in forward
h = th.cat([h, hs.pop()], dim=1)
File "F:\SD-Zluda\extensions\sd-webui-freeu\lib_free_u\unet.py", line 67, in free_u_cat_hijack
h_skip = filter_skip(
File "F:\SD-Zluda\extensions\sd-webui-freeu\lib_free_u\unet.py", line 99, in filter_skip
x_freq = torch.fft.fftn(x.to(fft_device).float(), dim=(-2, -1))
RuntimeError: cuFFT error: CUFFT_INTERNAL_ERROR
I'm running auto1111 on an AMD GPU (rx 6800) with the directml fork of auto1111 and Zluda installed on top, if that helps. I'm not sure what other details I can provide to help troubleshoot the issue, so feel free to ask :)