File size: 5,325 Bytes
5fe3c1e
bd00799
 
 
 
 
5fe3c1e
 
bd00799
 
5fe3c1e
bd00799
 
 
5fe3c1e
 
bd00799
 
5fe3c1e
bd00799
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5fe3c1e
 
bd00799
5fe3c1e
 
bd00799
 
5fe3c1e
 
bd00799
5fe3c1e
 
bd00799
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5fe3c1e
 
 
 
 
 
 
 
 
 
bd00799
5fe3c1e
 
 
bd00799
 
5fe3c1e
 
 
bd00799
5fe3c1e
 
 
 
 
bd00799
5fe3c1e
 
 
bd00799
5fe3c1e
bd00799
5fe3c1e
 
bd00799
5fe3c1e
 
 
bd00799
5fe3c1e
 
bd00799
5fe3c1e
 
bd00799
5fe3c1e
 
bd00799
5fe3c1e
bd00799
5fe3c1e
bd00799
5fe3c1e
bd00799
5fe3c1e
 
bd00799
5fe3c1e
bd00799
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154

import pandas as _pd
from skimage import io as _io
import matplotlib.pyplot as _plt
from pathlib import Path as _Path
from PIL import Image as _Image

# Ignore warnings
import warnings as _warnings
_warnings.filterwarnings("ignore")

_plt.ion()   # interactive mode

class MORRIS():
    _storage_csv='morris.csv'
    _storage_jpg='jpgs'
    def __init__(self,root=_Path(__file__).parent,transform=False):
        self.storage = _Path(root)
        self.transform = transform
        self.index = _pd.read_csv(self.storage / self._storage_csv)

    def torch(self):
        import torch
        from torchvision import transforms

        class MORRISTORCH(torch.utils.data.Dataset,MORRIS):

            def __init__(self,root=_Path(__file__).parent,transform=False):
                super().__init__(root,transform)

            def show(self,idx):
                name=None
                if isinstance(idx,str):
                    if idx in self.index.name.values:
                        idx = self.index.index[self.index.name==idx][0]
                        idx = int(idx)
                        name = self.index.name[idx]
                        print(f"found item {idx} by name {name}")
                    else:
                        raise ValueError('item name not found')
                if isinstance(idx,int):
                    name = self.index.name[idx]
                    _plt.title(name)
                    _plt.imshow(transforms.ToPILImage()(self.__getitem__(idx)[0]))
                else:
                    _plt.imshow(transforms.ToPILImage()(idx))

            def __getitem__(self,idx):
                item = self.index.iloc[idx].to_dict()
                image = _io.imread(self.storage / self._storage_jpg / self.index.iloc[idx].filename)
                image = torch.tensor(image).permute(2,0,1)
                if self.transform:
                    image = self.transform(image)

                item = [image,item['name'],item['year']]
                return item

        return MORRISTORCH(str(self.storage),self.transform)

    def __len__(self):
        return len(self.index)

    def __getitem__(self,idx):
        item = self.index.iloc[idx].to_dict()
        image = _io.imread(self.storage / self._storage_jpg / self.index.iloc[idx].filename)

        if self.transform:
            image = self.transform(image)

        item['image'] = image
        return item

    def show(self,idx):
        if isinstance(idx,str):
            if idx in self.index.name.values:
                idx = self.index.index[self.index.name==idx][0]
                idx = int(idx)
                name = self.index.name[idx]
                print(f"found item {idx} by name {name}")
            else:
                raise ValueError('item name not found')
        if isinstance(idx,int):
            _item = self.__getitem__(idx)
            image = _item['image']
            name  = _item['name']
            _plt.title(name)
            _plt.imshow(_Image.fromarray(image))
        else:
            try:
                _plt.imshow(_Image.fromarray(idx))
            except AttributeError:
                _plt.imshow(_Image.fromarray(idx.permute(1,2,0).numpy()))

    def _self_validate(self):
        """try loading each image in the dataset"""
        allgood=True
        for idx in range(len(self)):
            try:
                self[idx]
            except:
                allgood=False
                print(f"couldn't load {self.index.iloc[idx].filename}")
        if allgood:
            print(f"All good. {len(self)} images loadable.")

class Deframe(object):
    """check for uniform color boundaries on edges of input and crop them away"""
    from torch import Tensor

    def __init__(self,aggressive=False,maxPixelFrame=20):
        self.alpha = 0.1 if aggressive else 0.01
        self.maxPixelFrame = maxPixelFrame

    def _map2idx(self,frameMap):
        try:
            return frameMap.tolist().index(False)
        except ValueError:
            return self.maxPixelFrame

    def _Border(self,img: Tensor):
        """ take greyscale Tensor
            return left,right,top,bottom border size identified """
        import torch
        top = left = right = bottom = 0

        # expected image variance
        hvar,wvar = torch.mean(torch.var(img,dim=0)), torch.mean(torch.var(img,dim=1))

        # use image variance and alpha to identify too-uniform frame borders
        top = torch.var(img[:self.maxPixelFrame,:],dim=1) < wvar*(1+self.alpha)
        top = self._map2idx(top)

        bottom = torch.var(img[-self.maxPixelFrame:,:],dim=1) < wvar*(1+self.alpha)
        bottom = self._map2idx(bottom)

        left = torch.var(img[:,:self.maxPixelFrame],dim=0) < hvar*(1+self.alpha)
        left = self._map2idx(left)

        right = torch.var(img[:,-self.maxPixelFrame:],dim=0) < hvar*(1+self.alpha)
        right = self._map2idx(right)

        return (top,bottom,right,left)

    def __call__(self,img: Tensor):
        import torchvision
        top,bottom,right,left = self._Border(torchvision.transforms.Grayscale()(img)[0])

        height = img.shape[1]-(top+bottom)
        width  = img.shape[2]-(left+right)

        print(f"t{top} b{bottom} l{left} r{right}")

        return torchvision.transforms.functional.crop(img,top,left,height,width)