morris / __init__.py
dactylroot's picture
initial upload
5fe3c1e verified
raw
history blame
No virus
3.21 kB
import torch
import torchvision
from torchvision import transforms
from torch import Tensor
import pandas as pd
from skimage import io
import matplotlib.pyplot as plt
from pathlib import Path
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
plt.ion() # interactive mode
class MORRIS(torch.utils.data.Dataset):
_storage_csv='morris.csv'
_storage_jpg='jpgs'
def __init__(self,root=Path(__file__).parent,transform=False):
self.storage = Path(root)
self.transform = transform
self.index = pd.read_csv(self.storage / self._storage_csv)
def __len__(self):
return len(self.index)
def __getitem__(self,idx):
item = self.index.iloc[idx].to_dict()
image = io.imread(self.storage / self._storage_jpg / self.index.iloc[idx].filename)
if self.transform:
image = self.transform(image)
item['image'] = image
return item
def showitem(self,idx):
plt.imshow(transforms.ToPILImage()(self.__getitem__(idx)['image']))
def _self_validate(self):
"""try loading each image in the dataset"""
allgood=True
for idx in range(len(self)):
try:
self[idx]
except:
allgood=False
print(f"couldn't load {self.index.iloc[idx].filename}")
if allgood:
print(f"All Good! Loaded {len(self)} images.")
class Deframe(object):
"""check for uniform color boundaries on edges of input and crop them away"""
def __init__(self,aggressive=False,maxPixelFrame=20):
self.alpha = 0.1 if aggressive else 0.01
self.maxPixelFrame = maxPixelFrame
def _map2idx(self,frameMap):
try:
return frameMap.tolist().index(False)
except ValueError:
return self.maxPixelFrame
def _Border(self,img: Tensor):
""" take greyscale Tensor
return left,right,top,bottom border size identified """
top = left = right = bottom = 0
# expected image variance
hvar,wvar = torch.mean(torch.var(img,dim=0)), torch.mean(torch.var(img,dim=1))
# use image variance and alpha to identify too-uniform frame borders
top = torch.var(img[:self.maxPixelFrame,:],dim=1) < wvar*(1+self.alpha)
top = self._map2idx(top)
bottom = torch.var(img[-self.maxPixelFrame:,:],dim=1) < wvar*(1+self.alpha)
bottom = self._map2idx(bottom)
left = torch.var(img[:,:self.maxPixelFrame],dim=0) < hvar*(1+self.alpha)
left = self._map2idx(left)
right = torch.var(img[:,-self.maxPixelFrame:],dim=0) < hvar*(1+self.alpha)
right = self._map2idx(right)
return (top,bottom,right,left)
def __call__(self,img: Tensor):
top,bottom,right,left = self._Border(torchvision.transforms.Grayscale()(img)[0])
height = img.shape[1]-(top+bottom)
width = img.shape[2]-(left+right)
print(f"t{top} b{bottom} l{left} r{right}")
return torchvision.transforms.functional.crop(img,top,left,height,width)