File size: 2,638 Bytes
1173bfd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
#try this with pytorch directly
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import models
from torch.utils.data import DataLoader
from torchvision.transforms import transforms
from PIL import Image as PILImage
import pandas as pd
import os

# Define the path to the train thumbnails
trainPath = Path('/kaggle/input/UBC-OCEAN/train_images')

# Define the transform to apply to each image
transform = transforms.Compose([
    transforms.Resize(460),
    transforms.CenterCrop(224),
    transforms.ToTensor(),
    transforms.Normalize(*imagenet_stats)
])


# Define a function to load an image from a file
def load_image(filename):
    image = PILImage.open(filename)
    image = transform(image)
    return image

# Define a function to load the train dataset
def load_train_dataset():
    # Load the train CSV file
    df = pd.read_csv("/kaggle/input/UBC-OCEAN/train.csv")
    # Create a list of tuples containing the image path and label
    image_list = [(os.path.join(trainPath, str(row['image_id']) + '.png'), row['label']) for index, row in df.iterrows()]
    # Create a dataset from the list of image tuples
    dataset = [(load_image(f), l) for f, l in image_list]
    return dataset

# Load the train dataset
train_dataset = load_train_dataset()

# Create a dataloader for the train dataset
train_dataloader = DataLoader(train_dataset, batch_size=16, shuffle=True)

# Define the model architecture
model = models.resnet18(pretrained=True)
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, 2)

# Move the model to the GPU
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = model.to(device)

# Define the loss function and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)

# Train the model
model.train()
for epoch in range(5):
    running_loss = 0.0
    for i, batch in enumerate(train_dataloader):
        # Get the inputs and labels
        inputs, labels = batch
        inputs, labels = inputs.to(device), labels.to(device)

        # Zero the parameter gradients
        optimizer.zero_grad()

        # Forward pass
        outputs = model(inputs)
        loss = criterion(outputs, labels)

        # Backward pass and optimization
        loss.backward()
        optimizer.step()

        # Print statistics
        running_loss += loss.item()
        if i % 100 == 99:
            print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 100))
            running_loss = 0.0

print('Finished training')

# Save the model
torch.save(model.state_dict(), 'ovarianCancerModel.pth')