Datasets:
File size: 3,209 Bytes
5fe3c1e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 |
import torch
import torchvision
from torchvision import transforms
from torch import Tensor
import pandas as pd
from skimage import io
import matplotlib.pyplot as plt
from pathlib import Path
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
plt.ion() # interactive mode
class MORRIS(torch.utils.data.Dataset):
_storage_csv='morris.csv'
_storage_jpg='jpgs'
def __init__(self,root=Path(__file__).parent,transform=False):
self.storage = Path(root)
self.transform = transform
self.index = pd.read_csv(self.storage / self._storage_csv)
def __len__(self):
return len(self.index)
def __getitem__(self,idx):
item = self.index.iloc[idx].to_dict()
image = io.imread(self.storage / self._storage_jpg / self.index.iloc[idx].filename)
if self.transform:
image = self.transform(image)
item['image'] = image
return item
def showitem(self,idx):
plt.imshow(transforms.ToPILImage()(self.__getitem__(idx)['image']))
def _self_validate(self):
"""try loading each image in the dataset"""
allgood=True
for idx in range(len(self)):
try:
self[idx]
except:
allgood=False
print(f"couldn't load {self.index.iloc[idx].filename}")
if allgood:
print(f"All Good! Loaded {len(self)} images.")
class Deframe(object):
"""check for uniform color boundaries on edges of input and crop them away"""
def __init__(self,aggressive=False,maxPixelFrame=20):
self.alpha = 0.1 if aggressive else 0.01
self.maxPixelFrame = maxPixelFrame
def _map2idx(self,frameMap):
try:
return frameMap.tolist().index(False)
except ValueError:
return self.maxPixelFrame
def _Border(self,img: Tensor):
""" take greyscale Tensor
return left,right,top,bottom border size identified """
top = left = right = bottom = 0
# expected image variance
hvar,wvar = torch.mean(torch.var(img,dim=0)), torch.mean(torch.var(img,dim=1))
# use image variance and alpha to identify too-uniform frame borders
top = torch.var(img[:self.maxPixelFrame,:],dim=1) < wvar*(1+self.alpha)
top = self._map2idx(top)
bottom = torch.var(img[-self.maxPixelFrame:,:],dim=1) < wvar*(1+self.alpha)
bottom = self._map2idx(bottom)
left = torch.var(img[:,:self.maxPixelFrame],dim=0) < hvar*(1+self.alpha)
left = self._map2idx(left)
right = torch.var(img[:,-self.maxPixelFrame:],dim=0) < hvar*(1+self.alpha)
right = self._map2idx(right)
return (top,bottom,right,left)
def __call__(self,img: Tensor):
top,bottom,right,left = self._Border(torchvision.transforms.Grayscale()(img)[0])
height = img.shape[1]-(top+bottom)
width = img.shape[2]-(left+right)
print(f"t{top} b{bottom} l{left} r{right}")
return torchvision.transforms.functional.crop(img,top,left,height,width)
|