from cnn import CNN
from res_net import ResNet18
from vgg_net import VGG
from alex_net import AlexNet
from torch.utils.data import Dataset, DataLoader
from PIL import Image
from torchvision import transforms, utils
import numpy as np
import torch
import torch.nn as nn

device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')


def default_loader(path):
    return np.load(path)
    # return Image.open(path).convert('L')


train_transforms = transforms.Compose([
    # transforms.ToTensor(),
])


class MyDataset(Dataset):
    def __init__(self, txt, transform=None, target_transform=None, loader=default_loader):
        super(MyDataset, self).__init__()
        fh = open(txt, 'r')
        imgs = []
        for line in fh:
            line = line.strip('\n')
            line = line.rstrip()
            words = line.split()
            imgs.append((words[0], int(words[1])))
            # print(len(words))

        self.imgs = imgs
        self.transform = transform
        self.target_transform = target_transform
        self.loader = loader

    def __getitem__(self, index):
        fn, label = self.imgs[index]
        img = self.loader(fn)
        if self.transform is not None:
            img = self.transform(img)
        return img, label

    def __len__(self):
        return len(self.imgs)


BATCH_SIZE = 1
NUM_WORKERS = 4

train_data = MyDataset(txt='./train.txt', transform=None)
test_data = MyDataset(txt='./test.txt', transform=train_transforms)

train_dataloader = DataLoader(train_data, batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_WORKERS)
test_dataloader = DataLoader(test_data, batch_size=BATCH_SIZE, shuffle=False, num_workers=NUM_WORKERS)

print('train dataset len: {}'.format(len(train_dataloader.dataset)))
print('test dataset len: {}'.format(len(test_dataloader.dataset)))

#  start make net

# model = AlexNet().to(device)
# model = VGG(10).to(device)
model = ResNet18().to(device)

criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)
total_step = len(train_dataloader)
num_epochs = 30

for epoch in range(num_epochs):
    for i, (videos, labels) in enumerate(train_dataloader):
        data = videos[0].type(torch.FloatTensor).to(device)
        label = labels.type(torch.FloatTensor).to(device)
        outputs = model(data)

        loss = criterion(outputs, label.repeat(100).long())
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        if (i + 1) % 20 == 0:
            print('Epoch: [{}/{}],Step: [{}/{}],Loss: {:.4f}'.format(epoch + 1, num_epochs, i + 1, total_step,
                                                                     loss.item()))
    model.eval()
    with torch.no_grad():
        correct = 0
        total = 0
        for images, labels in test_dataloader:
            images = images[0].type(torch.FloatTensor).to(device)
            labels = labels.type(torch.FloatTensor).to(device)

            outputs = model(images)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
        print('test accuracy {}%'.format(correct / total))
torch.save(model.state_dict(), 'Model2.kpl')
