first of all, thanks for your great work.
I just tried to reproduce the test example (aeroplane), but received the following error:
66 image = self.im_transform(image).unsqueeze(0).to(self.device)
---> 67 mask = self.seg_transform((mask>127).astype(np.uint8)*255).unsqueeze(0).to(self.device)
68 if len(mask.shape) < 4:
69 mask = mask.unsqueeze(0)
~/Documents/Programming/VirtualEnvironments/python3_venv/lib/python3.7/site-packages/torchvision/transforms/transforms.py in __call__(self, img)
47 def __call__(self, img):
48 for t in self.transforms:
---> 49 img = t(img)
50 return img
51
~/Documents/Programming/VirtualEnvironments/python3_venv/lib/python3.7/site-packages/torchvision/transforms/transforms.py in __call__(self, pic)
74 Tensor: Converted image.
75 """
---> 76 return F.to_tensor(pic)
77
78 def __repr__(self):
~/Documents/Programming/VirtualEnvironments/python3_venv/lib/python3.7/site-packages/torchvision/transforms/functional.py in to_tensor(pic)
46 if isinstance(pic, np.ndarray):
47 # handle numpy array
---> 48 img = torch.from_numpy(pic.transpose((2, 0, 1)))
49 # backward compatibility
50 if isinstance(img, torch.ByteTensor):
ValueError: axes don't match array
refiner = refine.Refiner(device='cpu')
image = cv2.imread('cascade/aeroplane.jpg')
mask = cv2.imread('cascade/aeroplane.png', cv2.IMREAD_GRAYSCALE)
output = refiner.refine(image, mask, fast=True, L=900)