Error in the section "actually do the run"
After adding both a init image and target image I get out of memory. Fresh setup and factory restored everything.
`
Cleared Accounted PIDs for GPU 00000000:00:04.0.
All done.
Using device: cuda:0
Using seed: 1
Working with z of shape (1, 256, 16, 16) = 65536 dimensions.
Downloading: "https://download.pytorch.org/models/vgg16-397923af.pth" to /root/.cache/torch/hub/checkpoints/vgg16-397923af.pth
100%
528M/528M [00:05<00:00, 98.3MB/s]
Downloading vgg_lpips model from https://heibox.uni-heidelberg.de/f/607503859c864bc1b30b/?dl=1 to taming/modules/autoencoder/lpips/vgg.pth
8.19kB [00:00, 488kB/s]
loaded pretrained LPIPS loss from taming/modules/autoencoder/lpips/vgg.pth
VQLPIPSWithDiscriminator running with hinge loss.
Restored from vqgan_imagenet_f16_16384.ckpt
100%|███████████████████████████████████████| 338M/338M [00:05<00:00, 59.1MiB/s]
text_prompts: ['Ape: 1.0', 'Cryptopunk: 0.0']angle: 0.0 zoom: 1.0 translation_x: 0.0 translation_y: 0.0 iterations_per_frame: 10
0/? [00:02<?, ?it/s]
RuntimeError Traceback (most recent call last)
in ()
231 train(i, save=True, suffix=suffix)
232 else:
--> 233 train(i, save=False, suffix=suffix)
234 j += 1
235 pbar.update()
14 frames
in train(i, save, suffix)
210 def train(i, save=True, suffix=None):
211 opt.zero_grad()
--> 212 lossAll = ascend_txt(i, save=save, suffix=suffix)
213 if i % args.display_freq == 0 and save:
214 checkin(i, lossAll)
in ascend_txt(i, save, suffix)
193 def ascend_txt(i, save=True, suffix=None):
194 out = synth(z)
--> 195 iii = perceptor.encode_image(normalize(make_cutouts(out))).float()
196
197 result = []
/content/CLIP/clip/model.py in encode_image(self, image)
335
336 def encode_image(self, image):
--> 337 return self.visual(image.type(self.dtype))
338
339 def encode_text(self, text):
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1049 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1050 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1051 return forward_call(*input, **kwargs)
1052 # Do not call functions when jit is used
1053 full_backward_hooks, non_full_backward_hooks = [], []
/content/CLIP/clip/model.py in forward(self, x)
226
227 x = x.permute(1, 0, 2) # NLD -> LND
--> 228 x = self.transformer(x)
229 x = x.permute(1, 0, 2) # LND -> NLD
230
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1049 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1050 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1051 return forward_call(*input, **kwargs)
1052 # Do not call functions when jit is used
1053 full_backward_hooks, non_full_backward_hooks = [], []
/content/CLIP/clip/model.py in forward(self, x)
197
198 def forward(self, x: torch.Tensor):
--> 199 return self.resblocks(x)
200
201
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1049 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1050 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1051 return forward_call(*input, **kwargs)
1052 # Do not call functions when jit is used
1053 full_backward_hooks, non_full_backward_hooks = [], []
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/container.py in forward(self, input)
137 def forward(self, input):
138 for module in self:
--> 139 input = module(input)
140 return input
141
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1049 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1050 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1051 return forward_call(*input, **kwargs)
1052 # Do not call functions when jit is used
1053 full_backward_hooks, non_full_backward_hooks = [], []
/content/CLIP/clip/model.py in forward(self, x)
184
185 def forward(self, x: torch.Tensor):
--> 186 x = x + self.attention(self.ln_1(x))
187 x = x + self.mlp(self.ln_2(x))
188 return x
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1049 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1050 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1051 return forward_call(*input, **kwargs)
1052 # Do not call functions when jit is used
1053 full_backward_hooks, non_full_backward_hooks = [], []
/content/CLIP/clip/model.py in forward(self, x)
156 def forward(self, x: torch.Tensor):
157 orig_type = x.dtype
--> 158 ret = super().forward(x.type(torch.float32))
159 return ret.type(orig_type)
160
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/normalization.py in forward(self, input)
172 def forward(self, input: Tensor) -> Tensor:
173 return F.layer_norm(
--> 174 input, self.normalized_shape, self.weight, self.bias, self.eps)
175
176 def extra_repr(self) -> str:
/usr/local/lib/python3.7/dist-packages/torch/nn/functional.py in layer_norm(input, normalized_shape, weight, bias, eps)
2344 layer_norm, (input,), input, normalized_shape, weight=weight, bias=bias, eps=eps
2345 )
-> 2346 return torch.layer_norm(input, normalized_shape, weight, bias, eps, torch.backends.cudnn.enabled)
2347
2348
RuntimeError: CUDA out of memory. Tried to allocate 20.00 MiB (GPU 0; 11.17 GiB total capacity; 10.55 GiB already allocated; 9.81 MiB free; 10.68 GiB reserved in total by PyTorch)`
The GPU log
`Fri Sep 17 20:04:09 2021
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 470.63.01 Driver Version: 460.32.03 CUDA Version: 11.2 |
|-------------------------------+----------------------+----------------------+
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|===============================+======================+======================|
| 0 Tesla K80 Off | 00000000:00:04.0 Off | 0 |
| N/A 31C P8 29W / 149W | 0MiB / 11441MiB | 0% Default |
| | | N/A |
+-------------------------------+----------------------+----------------------+
+-----------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=============================================================================|
| No running processes found |
+-----------------------------------------------------------------------------+`