Sebastiano Maesano commited on
Commit
1c13d92
1 Parent(s): 558fdd9

initial commit

Browse files
__pycache__/definition.cpython-311.pyc ADDED
Binary file (2.5 kB). View file
 
definition.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.nn as nn
2
+
3
+ class FlowersImagesDetectionModel(nn.Module):
4
+ def __init__(self, num_classes):
5
+ super(FlowersImagesDetectionModel, self).__init__()
6
+ self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, stride=1, padding=1)
7
+ self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1)
8
+ self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1)
9
+ self.fc1 = nn.Linear(128 * 28 * 28, 512) # Adjust the input size according to your image size after resizing
10
+ self.fc2 = nn.Linear(512, num_classes)
11
+ self.relu = nn.ReLU()
12
+ self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
13
+
14
+ def forward(self, x):
15
+ x = self.pool(self.relu(self.conv1(x)))
16
+ x = self.pool(self.relu(self.conv2(x)))
17
+ x = self.pool(self.relu(self.conv3(x)))
18
+ x = x.view(-1, 128 * 28 * 28) # Adjust this according to the output size of the convolutional layers
19
+ x = self.relu(self.fc1(x))
20
+ x = self.fc2(x)
21
+ return x
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2945bcacceb16ec477c7f9f127ddb1131150583f7abba6af412bada17e83dcf6
3
+ size 206109068
train.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.optim as optim
4
+ from definition import FlowersImagesDetectionModel
5
+ from torch.utils.data import DataLoader
6
+ from datasets import load_dataset
7
+ from torchvision.transforms import ToTensor, Resize
8
+ from torch.utils.data.dataset import TensorDataset
9
+
10
+ flowerTypesNumber = 102
11
+
12
+ model = FlowersImagesDetectionModel(flowerTypesNumber)
13
+
14
+ # Funzioni di ottimizzazione e di perdita
15
+ optimizer = optim.Adam(model.parameters(), lr=0.001)
16
+ criterion = nn.CrossEntropyLoss()
17
+
18
+ # Caricamento del dataset
19
+ originalDataset = load_dataset("nelorth/oxford-flowers", split="train")
20
+
21
+ tensorImages = []
22
+ tensorLabels = []
23
+
24
+ # Trasforma le immagini in tensori PyTorch e ridimensionale
25
+ for imageData, label in zip(originalDataset['image'], originalDataset['label']):
26
+ tensorImage = ToTensor()(Resize((224, 224))(imageData)) # Ridimensiona le immagini
27
+ tensorImages.append(tensorImage)
28
+ tensorLabels.append(label)
29
+
30
+ # Trasforma le liste di tensori in un singolo tensore
31
+ imagesTensor = torch.stack(tensorImages)
32
+ labelsTensor = torch.tensor(tensorLabels)
33
+
34
+ # Crea un dataset
35
+ dataset = TensorDataset(imagesTensor, labelsTensor)
36
+
37
+ # Crea un DataLoader
38
+ dataLoader = DataLoader(dataset, batch_size=64, shuffle=True)
39
+
40
+ # Addestramento
41
+ model.train()
42
+ for epoch in range(2):
43
+ running_loss = 0.0
44
+
45
+ for i, (inputs, labels) in enumerate(dataLoader, 0):
46
+ optimizer.zero_grad()
47
+ outputs = model(inputs)
48
+ loss = criterion(outputs, labels)
49
+ loss.backward()
50
+ optimizer.step()
51
+ running_loss += loss.item()
52
+
53
+ if i % 100 == 99:
54
+ print('[%d, %5d] loss: %.3f' %
55
+ (epoch + 1, i + 1, running_loss / 100))
56
+ running_loss = 0.0
57
+
58
+ torch.save(model.state_dict(), 'pytorch_model.bin')