import os.path
import torch
import torchvision.datasets as datasets
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as data
from torchsummary import summary
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
import csv

BATCH_SIZE = 128
INPUT_SIZE = 150
TRAIN_SAMPLE_NUM = 20000
TEST_SAMPLE_NUM = 5000
assert TRAIN_SAMPLE_NUM + TEST_SAMPLE_NUM <= 25000
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")  # GPU support

# Create transformer: compose several transforms together
transform = transforms.Compose([
    transforms.RandomResizedCrop(INPUT_SIZE),  # cut to same size: 150*150
    transforms.ToTensor(),  # convert the PIL image to tensor
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))  # normalize the tensor: mean and std for each channel
])

# Fetch dataset
fullSet = datasets.ImageFolder(root='dataset/kaggle/train/train', transform=transform)
trainSet, testSet = data.dataset.random_split(fullSet, [20000, 5000])

trainLoader = data.DataLoader(
    dataset=trainSet,
    batch_size=BATCH_SIZE,
    sampler=data.RandomSampler(
        data_source=range(len(trainSet)),
        replacement=False,
        num_samples=TRAIN_SAMPLE_NUM
    )
)
testLoader = data.DataLoader(
    dataset=testSet,
    batch_size=BATCH_SIZE,
    sampler=data.RandomSampler(
        data_source=range(len(testSet)),
        replacement=False,
        num_samples=TEST_SAMPLE_NUM
    )
)


# Complex net, abandoned
class NetFake(nn.Module):
    def __init__(self):
        super(NetFake, self).__init__()
        self.conv1 = nn.Conv2d(in_channels=3, out_channels=64, kernel_size=(3, 3), padding=1)
        self.conv2 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3, 3), padding=1)
        self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=(3, 3), padding=1)
        self.conv4 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=(3, 3), padding=1)
        self.conv5 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=(3, 3), padding=1)
        self.conv6 = nn.Conv2d(in_channels=256, out_channels=256, kernel_size=(3, 3), padding=1)
        self.maxPool = nn.MaxPool2d(2, 2)
        self.avgPool = nn.AvgPool2d(2, 2)
        self.globalAvgPool = nn.AvgPool2d(8, 8)
        self.bn1 = nn.BatchNorm2d(64)
        self.bn2 = nn.BatchNorm2d(128)
        self.bn3 = nn.BatchNorm2d(256)
        self.dropout50 = nn.Dropout(0.5)
        self.dropout10 = nn.Dropout(0.1)
        self.fc = nn.Linear(4096, 2)

    def forward(self, x):
        x = self.bn1(F.relu(self.conv1(x)))
        x = self.bn1(F.relu(self.conv2(x)))
        x = self.maxPool(x)
        x = self.dropout10(x)
        x = self.bn2(F.relu(self.conv3(x)))
        x = self.bn2(F.relu(self.conv4(x)))
        x = self.avgPool(x)
        x = self.dropout10(x)
        x = self.bn3(F.relu(self.conv5(x)))
        x = self.bn3(F.relu(self.conv6(x)))
        x = self.globalAvgPool(x)
        x = self.dropout50(x)
        x = x.view(x.size(0), -1)
        x = self.fc(x)
        return nn.functional.softmax(x, dim=1)


# Simple net, abandoned
class OriginalNet(nn.Module):
    def __init__(self):
        super(OriginalNet, self).__init__()
        self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=(3, 3))
        self.max_pool1 = nn.MaxPool2d(2)
        self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(3, 3))
        self.max_pool2 = nn.MaxPool2d(2)
        self.fc1 = nn.Linear(82944, 512)
        self.fc2 = nn.Linear(512, 2)

    def forward(self, x):
        in_size = x.size(0)
        x = F.relu(self.conv1(x))
        x = self.max_pool1(x)
        x = F.relu(self.conv2(x))
        x = self.max_pool2(x)
        x = x.view(in_size, -1)
        x = F.relu(self.fc1(x))
        return nn.functional.softmax(self.fc2(x), dim=1)


class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=(3, 3))
        self.max_pool1 = nn.MaxPool2d(2)
        self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(3, 3))
        self.max_pool2 = nn.MaxPool2d(2)
        self.conv3 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3, 3))
        self.max_pool3 = nn.MaxPool2d(2)
        self.bn = nn.BatchNorm2d(64)
        self.fc1 = nn.Linear(18496, 512)
        self.fc2 = nn.Linear(512, 2)

    def forward(self, x):
        in_size = x.size(0)
        x = F.relu(self.conv1(x))
        x = self.max_pool1(x)
        x = F.relu(self.conv2(x))
        x = self.max_pool2(x)
        x = self.bn(F.relu(self.conv3(x)))
        x = self.max_pool3(x)
        x = x.view(in_size, -1)
        x = F.relu(self.fc1(x))
        return nn.functional.softmax(self.fc2(x), dim=1)


def train(trainRound):
    # build model and show summary of the model
    net = Net().to(device)

    # activate training mode
    net.train()

    # show model summary
    # summary(net, (3, INPUT_SIZE, INPUT_SIZE), batch_size=BATCH_SIZE)

    # create loss function (cross entropy) and optimizer (stochastic gradient descent)
    loss_function = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)

    # prepare for checkpoint
    start_epoch = -1
    path_checkpoint = 'checkpoint/ckpt.pth'
    if os.path.exists(path_checkpoint):
        checkpoint = torch.load(path_checkpoint)  # load the saved checkpoint
        net.load_state_dict(checkpoint['net'])  # load model params
        optimizer.load_state_dict(checkpoint['optimizer'])  # load optimizer params
        start_epoch = checkpoint['epoch']  # load trained rounds

    # collect training loss and training accuracy (ASSERT EPOCH == 1)
    assert trainRound == 1
    train_loss, total_steps, train_correct = 0.0, 0, 0.0

    # train for trainRound epochs
    for epoch in range(start_epoch + 1, trainRound):
        # preparation for progress bar
        loop = tqdm(enumerate(trainLoader), total=len(trainLoader), leave=True)

        # fetch BATCH_SIZE images in a step
        for step, (batch_img, batch_label) in loop:
            # transfer relevant data to GPU
            batch_img, batch_label = batch_img.to(device), batch_label.to(device)
            optimizer.zero_grad()
            output = net(batch_img)

            loss = loss_function(output, batch_label)
            loss.backward()
            optimizer.step()

            # calculate loss and acc
            train_loss += loss.item()
            # pred = torch.tensor([[1] if num[0] >= 0.5 else [0] for num in output]).to(device)
            pred = output.argmax(dim=1)
            total_steps += len(batch_label)
            train_correct += (pred == batch_label).sum().item()

            # set progress bar
            loop.set_description(f'TRAIN Epoch [{epoch}/{trainRound}]')
            loop.set_postfix(loss=float(train_loss / total_steps), acc=float(train_correct / total_steps))

    # save params after training
    torch.save({
        'net': net.state_dict(),
        'optimizer': optimizer.state_dict(),
        'epoch': -1  # prepare for next train
    }, path_checkpoint)

    return float(train_loss / total_steps), float(train_correct / total_steps)


def test():
    # build model and show summary of the model
    net = Net().to(device)
    net.eval()
    loss_function = nn.CrossEntropyLoss()
    # summary(net, (3, INPUT_SIZE, INPUT_SIZE), batch_size=BATCH_SIZE)

    # load trained params
    path_checkpoint = 'checkpoint/ckpt.pth'
    if os.path.exists(path_checkpoint) is False:
        print('Please train the model first.')
        return
    checkpoint = torch.load(path_checkpoint)  # load the saved checkpoint
    net.load_state_dict(checkpoint['net'])  # load model params

    # disable gradient calculation, reducing memory consumption
    with torch.no_grad():
        test_loss, test_correct, total_steps = 0.0, 0.0, 0
        loop = tqdm(enumerate(testLoader), total=len(testLoader), leave=True)
        for step, (batch_img, batch_label) in loop:
            batch_img, batch_label = batch_img.to(device), batch_label.to(device)
            output = net(batch_img)
            loss = loss_function(output, batch_label)
            test_loss += loss.item()
            # pred = torch.tensor([[1] if num[0] >= 0.5 else [0] for num in output]).to(device)
            pred = output.argmax(dim=1)
            test_correct += (pred == batch_label).sum().item()
            total_steps += len(batch_label)

            # set progress bar
            loop.set_description('TEST')
            loop.set_postfix(loss=float(test_loss / total_steps), acc=float(test_correct / total_steps))
    test_loss, test_acc = float(test_loss / total_steps), float(test_correct / total_steps)
    print('Accuracy on the %d test images: %f %%' % (TEST_SAMPLE_NUM, test_acc * 100))
    return test_loss, test_acc


def train_and_test(epoch):
    for ep in range(epoch):
        tr_loss, tr_acc = train(1)
        ts_loss, ts_acc = test()
        with open('log/loss_acc.csv', 'a', newline='') as f:
            writer = csv.DictWriter(f, fieldnames=['train_loss', 'train_acc', 'test_loss', 'test_acc'])
            writer.writerow({
                'train_loss': tr_loss,
                'train_acc': tr_acc,
                'test_loss': ts_loss,
                'test_acc': ts_acc,
            })


def draw_curves():
    tr_loss, tr_acc, ts_loss, ts_acc = [], [], [], []
    with open('log/loss_acc.csv', 'r') as f:
        reader = csv.DictReader(f)
        for row in reader:
            tr_loss.append(float(row['train_loss']))
            tr_acc.append(float(row['train_acc']))
            ts_loss.append(float(row['test_loss']))
            ts_acc.append(float(row['test_acc']))
    X = torch.arange(1, len(tr_loss) + 1)

    # draw loss curves
    plt.plot(X, tr_loss, label='train loss')
    plt.plot(X, ts_loss, label='test loss')
    plt.xlabel('epoch')
    plt.ylabel('loss')
    plt.legend()
    plt.title('Train/Test Loss')
    plt.show()

    # draw acc curves
    plt.plot(X, tr_acc, label='train accuracy')
    plt.plot(X, ts_acc, label='test accuracy')
    plt.xlabel('epoch')
    plt.ylabel('accuracy')
    plt.legend()
    plt.title('Train/Test Accuracy')
    plt.show()


def initialize():
    def delete_file(path):
        if os.path.exists(path):
            os.remove(path)

    delete_file('checkpoint/ckpt.pth')
    delete_file('log/loss_acc.csv')
    with open('log/loss_acc.csv', 'a', newline='') as f:
        writer = csv.DictWriter(f, fieldnames=['train_loss', 'train_acc', 'test_loss', 'test_acc'])
        writer.writeheader()


# show some examples of the cropped samples
def show_pic_examples():
    plt.figure(figsize=(10, 10))
    to_image = transforms.ToPILImage()
    for i in range(16):
        plt.subplot(4, 4, i + 1)
        plt.xticks([])
        plt.yticks([])
        plt.imshow(to_image(fullSet[i * 1600][0] * 0.5 + 0.5))
        plt.xlabel('cat' if i < 8 else 'dog')
    plt.show()


if __name__ == '__main__':
    # initialize()
    # train_and_test(epoch=100)
    # draw_curves()

    test()

    # summary(Net().to(device), (3, INPUT_SIZE, INPUT_SIZE), batch_size=BATCH_SIZE)

    # show_pic_examples()
