I am facing the following error while trying to run the provided notebooks. What could be the possible reasons for this?
---------------------------------------------------------------------------
NotImplementedError Traceback (most recent call last)
Cell In[7], line 5
3 prompts = [prompt]
4 controller = AttentionStore()
----> 5 image = run_and_display(prompts=prompts,
6 controller=controller,
7 indices_to_alter=token_indices,
8 generator=g,
9 run_standard_sd=True,
10 display_output=True)
11 vis_utils.show_cross_attention(attention_store=controller,
12 prompt=prompt,
13 tokenizer=tokenizer,
(...)
16 indices_to_alter=token_indices,
17 orig_image=image)
Cell In[4], line 19, in run_and_display(prompts, controller, indices_to_alter, generator, run_standard_sd, scale_factor, thresholds, max_iter_to_alter, display_output)
5 def run_and_display(prompts: List[str],
6 controller: AttentionStore,
7 indices_to_alter: List[int],
(...)
12 max_iter_to_alter: int = 25,
13 display_output: bool = False):
14 config = RunConfig(prompt=prompts[0],
15 run_standard_sd=run_standard_sd,
16 scale_factor=scale_factor,
17 thresholds=thresholds,
18 max_iter_to_alter=max_iter_to_alter)
---> 19 image = run_on_prompt(model=stable,
20 prompt=prompts,
21 controller=controller,
22 token_indices=indices_to_alter,
23 seed=generator,
24 config=config)
25 if display_output:
26 display(image)
File ~/ptp_sd_exps/Attend-and-Excite/notebooks/../run.py:45, in run_on_prompt(prompt, model, controller, token_indices, seed, config)
42 ptp_utils.register_attention_control(model, controller)
44 print("Inside run_on_prompt function")
---> 45 outputs = model(prompt=prompt,
46 attention_store=controller,
47 indices_to_alter=token_indices,
48 attention_res=config.attention_res,
49 guidance_scale=config.guidance_scale,
50 generator=seed,
51 num_inference_steps=config.n_inference_steps,
52 max_iter_to_alter=config.max_iter_to_alter,
53 run_standard_sd=config.run_standard_sd,
54 thresholds=config.thresholds,
55 scale_factor=config.scale_factor,
56 scale_range=config.scale_range,
57 smooth_attentions=config.smooth_attentions,
58 sigma=config.sigma,
59 kernel_size=config.kernel_size)
60 image = outputs.images[0]
61 return image
File /opt/conda/lib/python3.8/site-packages/torch/autograd/grad_mode.py:27, in _DecoratorContextManager.__call__.<locals>.decorate_context(*args, **kwargs)
24 @functools.wraps(func)
25 def decorate_context(*args, **kwargs):
26 with self.clone():
---> 27 return func(*args, **kwargs)
File ~/ptp_sd_exps/Attend-and-Excite/notebooks/../pipeline_attend_and_excite.py:207, in AttendAndExcitePipeline.__call__(self, prompt, attention_store, indices_to_alter, attention_res, height, width, num_inference_steps, guidance_scale, eta, generator, latents, output_type, return_dict, max_iter_to_alter, run_standard_sd, thresholds, scale_factor, scale_range, smooth_attentions, sigma, kernel_size, **kwargs)
205 # Forward pass of denoising with text conditioning
206 print("calling unet inside __call__")
--> 207 noise_pred_text = self.unet(latents, t, encoder_hidden_states=text_embeddings[1].unsqueeze(0)).sample
208 print("exiting unet")
209 self.unet.zero_grad()
File /opt/conda/lib/python3.8/site-packages/torch/nn/modules/module.py:1194, in Module._call_impl(self, *input, **kwargs)
1190 # If we don't have any hooks, we want to skip the rest of the logic in
1191 # this function, and just call forward.
1192 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1193 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1194 return forward_call(*input, **kwargs)
1195 # Do not call functions when jit is used
1196 full_backward_hooks, non_full_backward_hooks = [], []
File /opt/conda/lib/python3.8/site-packages/diffusers/models/unet_2d_condition.py:381, in UNet2DConditionModel.forward(self, sample, timestep, encoder_hidden_states, class_labels, return_dict)
379 for downsample_block in self.down_blocks:
380 if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
--> 381 sample, res_samples = downsample_block(
382 hidden_states=sample,
383 temb=emb,
384 encoder_hidden_states=encoder_hidden_states,
385 )
386 else:
387 sample, res_samples = downsample_block(hidden_states=sample, temb=emb)
File /opt/conda/lib/python3.8/site-packages/torch/nn/modules/module.py:1194, in Module._call_impl(self, *input, **kwargs)
1190 # If we don't have any hooks, we want to skip the rest of the logic in
1191 # this function, and just call forward.
1192 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1193 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1194 return forward_call(*input, **kwargs)
1195 # Do not call functions when jit is used
1196 full_backward_hooks, non_full_backward_hooks = [], []
File /opt/conda/lib/python3.8/site-packages/diffusers/models/unet_2d_blocks.py:612, in CrossAttnDownBlock2D.forward(self, hidden_states, temb, encoder_hidden_states)
610 else:
611 hidden_states = resnet(hidden_states, temb)
--> 612 hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample
614 output_states += (hidden_states,)
616 if self.downsamplers is not None:
File /opt/conda/lib/python3.8/site-packages/torch/nn/modules/module.py:1194, in Module._call_impl(self, *input, **kwargs)
1190 # If we don't have any hooks, we want to skip the rest of the logic in
1191 # this function, and just call forward.
1192 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1193 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1194 return forward_call(*input, **kwargs)
1195 # Do not call functions when jit is used
1196 full_backward_hooks, non_full_backward_hooks = [], []
File /opt/conda/lib/python3.8/site-packages/diffusers/models/attention.py:217, in Transformer2DModel.forward(self, hidden_states, encoder_hidden_states, timestep, return_dict)
215 # 2. Blocks
216 for block in self.transformer_blocks:
--> 217 hidden_states = block(hidden_states, context=encoder_hidden_states, timestep=timestep)
219 # 3. Output
220 if self.is_input_continuous:
File /opt/conda/lib/python3.8/site-packages/torch/nn/modules/module.py:1194, in Module._call_impl(self, *input, **kwargs)
1190 # If we don't have any hooks, we want to skip the rest of the logic in
1191 # this function, and just call forward.
1192 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1193 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1194 return forward_call(*input, **kwargs)
1195 # Do not call functions when jit is used
1196 full_backward_hooks, non_full_backward_hooks = [], []
File /opt/conda/lib/python3.8/site-packages/diffusers/models/attention.py:495, in BasicTransformerBlock.forward(self, hidden_states, context, timestep)
493 hidden_states = self.attn1(norm_hidden_states, context) + hidden_states
494 else:
--> 495 hidden_states = self.attn1(norm_hidden_states) + hidden_states
497 if self.attn2 is not None:
498 # 2. Cross-Attention
499 norm_hidden_states = (
500 self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)
501 )
File /opt/conda/lib/python3.8/site-packages/torch/nn/modules/module.py:1194, in Module._call_impl(self, *input, **kwargs)
1190 # If we don't have any hooks, we want to skip the rest of the logic in
1191 # this function, and just call forward.
1192 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1193 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1194 return forward_call(*input, **kwargs)
1195 # Do not call functions when jit is used
1196 full_backward_hooks, non_full_backward_hooks = [], []
File ~/ptp_sd_exps/Attend-and-Excite/notebooks/../utils/ptp_utils.py:85, in register_attention_control.<locals>.ca_forward.<locals>.forward(x, context, mask)
83 out = torch.einsum("b i j, b j d -> b i d", attn, v)
84 out = self.reshape_batch_dim_to_heads(out)
---> 85 return self.to_out(out)
File /opt/conda/lib/python3.8/site-packages/torch/nn/modules/module.py:1194, in Module._call_impl(self, *input, **kwargs)
1190 # If we don't have any hooks, we want to skip the rest of the logic in
1191 # this function, and just call forward.
1192 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1193 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1194 return forward_call(*input, **kwargs)
1195 # Do not call functions when jit is used
1196 full_backward_hooks, non_full_backward_hooks = [], []
File /opt/conda/lib/python3.8/site-packages/torch/nn/modules/module.py:246, in _forward_unimplemented(self, *input)
235 def _forward_unimplemented(self, *input: Any) -> None:
236 r"""Defines the computation performed at every call.
237
238 Should be overridden by all subclasses.
(...)
244 registered hooks while the latter silently ignores them.
245 """
--> 246 raise NotImplementedError(f"Module [{type(self).__name__}] is missing the required \"forward\" function")
NotImplementedError: Module [ModuleList] is missing the required "forward" function