File size: 2,879 Bytes
1173bfd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load

import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)

# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory

import os
for dirname, _, filenames in os.walk('/kaggle/input'):
    for filename in filenames:
        print(os.path.join(dirname, filename))

# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" 
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session


import pandas as pd
from fastai.vision.all import *
from fastcore.all import *

import fastai
print(fastai.__version__)

# Set the maximum image size to 10 billion pixels
Image.MAX_IMAGE_PIXELS = 10000000000

# enable pytorch fallback to cpu - will be slower but will work
#os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1'

# using thumbnails while we figure out our out of memory problems
path = Path('/kaggle/input/UBC-OCEAN/train_thumbnails')

#check images count
print(len(get_image_files(path)))

df = pd.read_csv("/kaggle/input/UBC-OCEAN/train.csv")
df.head(10)

def get_x(r):
    filename = f"{r['image_id']}.png"
    if os.path.exists(path/filename):
        return str(path/filename)
    else:
        return str("/kaggle/input/UBC-OCEAN/test_images/41.png")
def get_y(r): return r['label']

ovarianCancerDataBlock = DataBlock( 
    blocks=(ImageBlock, CategoryBlock), 
    splitter=RandomSplitter(valid_pct=0.2, seed=42), 
    get_x = get_x, 
    get_y = get_y,
    item_tfms=Resize(460), 
    batch_tfms=[*aug_transforms(size=224, min_scale=0.75), Normalize.from_stats(*imagenet_stats)]
    )

dls = ovarianCancerDataBlock.dataloaders(df, bs=2, num_workers=4)

from fastai.callback.fp16 import *
# make fast ai aware of resnet18 weights by moving them to the cache path for pytorch
# check out https://forums.fast.ai/t/how-can-i-load-a-pretrained-model-on-kaggle-using-fastai/13941/24?page=2 for more info
if not os.path.exists('/root/.cache/torch/hub/checkpoints/'):
        os.makedirs('/root/.cache/torch/hub/checkpoints/')
#!cp '/kaggle/input/torchvision-resnet-pretrained/resnet50-0676ba61.pth' '/root/.cache/torch/hub/checkpoints/resnet50-0676ba61.pth'
#!cp '/kaggle/input/torchvision-resnet-pretrained/resnet18-f37072fd.pth' '/root/.cache/torch/hub/checkpoints/resnet18-f37072fd.pth'
learn = vision_learner(dls, resnet18, metrics=[accuracy, error_rate]).to_fp16()
#force gpu
learn.model.cuda()
learn.fine_tune(5, freeze_epochs=3)