import os
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from torchvision.transforms import Resize
import cv2

class ImageDataset(Dataset):
    def __init__(self, data_dir, transform=None):
        self.data_dir = data_dir
        self.transform = transform
        self.image_paths = []
        self.labels = []
        self.load_data()

    def load_data(self):
        # class 0: the gray one
        # class 1: the blue one
        # class 2: pedestrian 
        for filename in os.listdir(self.data_dir):
            if filename.startswith('0_') or filename.startswith('1_') or  filename.startswith('2_'):
                label = int(filename[0])
                self.labels.append(label)
                self.image_paths.append(os.path.join(self.data_dir, filename))

    def __len__(self):
        return len(self.image_paths)

    def __getitem__(self, idx):
        image = cv2.imread(self.image_paths[idx])
        image = cv2.resize(image, (30, 40))
        image = torch.from_numpy(image).permute(2, 0, 1).float()  # float32 Tensor
        if self.transform:
            image = self.transform(image)
        label = self.labels[idx]
        return image, label

class CNN(nn.Module):
    def __init__(self):
        super(CNN, self).__init__()
        self.conv1 = nn.Conv2d(3, 16, 3, 1, 1)
        self.pool1 = nn.MaxPool2d(2, 2)
        self.conv2 = nn.Conv2d(16, 32, 3, 1, 1)
        self.pool2 = nn.MaxPool2d(2, 2)
        self.fc1 = nn.Linear(32 * 7 * 10, 128)
        self.fc2 = nn.Linear(128, 3)

    def forward(self, x):
        x = self.pool1(nn.functional.relu(self.conv1(x)))
        x = self.pool2(nn.functional.relu(self.conv2(x)))
        x = x.view(-1, 32 * 7 * 10)
        x = nn.functional.relu(self.fc1(x))
        x = self.fc2(x)
        return x

if __name__ == '__main__':
    data_dir= './train'
    dataset = ImageDataset(data_dir, transform=None)
    train_loader = DataLoader(dataset, batch_size=16, shuffle=True)

    model = CNN()
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=0.001)
    num_epochs = 500
    for epoch in range(num_epochs):
        running_loss = 0.0
        for i, (images, labels) in enumerate(train_loader):
            optimizer.zero_grad()
            outputs = model(images)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            running_loss += loss.item()
        print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {running_loss / len(dataset)}')

    torch.save(model.state_dict(), 'cnn_model.pth')