from torchvision.datasets import FashionMNIST
from torchvision import transforms
import torch.utils.data as data
import torch
import copy
import os
import pandas as pd
import time
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt


def train(model, train_loader, test_loader, epoches, lr, read_model_path=None, batch_debug=False, batch_debug_num=10):
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    if read_model_path is not None:
        model.load_state_dict(torch.load(read_model_path))

    optimizer = torch.optim.Adam(model.parameters(), lr=lr)

    criterion = nn.CrossEntropyLoss()

    model = model.to(device)

    best_model_wts = copy.deepcopy(model.state_dict())

    best_acc = 0.0

    train_loss_list = []
    test_loss_list = []
    train_acc_list = []
    test_acc_list = []

    since = time.time()
    delta_train_batch = len(train_loader) // batch_debug_num
    delta_test_batch = len(test_loader) // batch_debug_num
    for epoch in range(epoches):
        print('-' * 30)
        print('Epoch {}/{}'.format(epoch + 1, epoches))

        train_loss = 0.0
        train_correct = 0
        test_loss = 0.0
        test_correct = 0
        train_num = 0
        test_num = 0
        bn = delta_train_batch
        for i, (bx, by) in enumerate(train_loader):
            bx = bx.to(device)
            by = by.to(device)

            model.train()
            output = model(bx)
            pre_label = torch.argmax(output, dim=1)
            loss = criterion(output, by)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            train_loss += loss.item() * bx.size(0)
            train_correct += torch.sum(pre_label == by).item()
            train_num += bx.size(0)
            if batch_debug and i == bn - 1:
                print('\r\ttrain Batch Debug: {}/{}'.format(bn, len(train_loader)), end='')
                bn += delta_train_batch
                if bn > len(train_loader):
                    bn = len(train_loader)
        print()
        bn = delta_test_batch
        for i, (bx, by) in enumerate(test_loader):
            bx = bx.to(device)
            by = by.to(device)

            model.eval()
            output = model(bx)
            pre_label = torch.argmax(output, dim=1)
            loss = criterion(output, by)

            test_loss += loss.item() * bx.size(0)
            test_correct += torch.sum(pre_label == by).item()
            test_num += bx.size(0)

            if batch_debug and i == bn - 1:
                print('\r\tTest Batch Debug: {}/{}'.format(bn, len(test_loader)), end='')
                bn += delta_test_batch
                if bn > len(test_loader):
                    bn = len(test_loader)

        train_loss_list.append(train_loss / train_num)
        test_loss_list.append(test_loss / test_num)
        train_acc_list.append(float(train_correct) / train_num)
        test_acc_list.append(float(test_correct) / test_num)
        print()
        print('Train Loss: {:.4f} Acc: {:.4f} '.format(train_loss_list[-1], train_acc_list[-1]))
        print('Test Loss: {:.4f} Acc: {:.4f} '.format(test_loss_list[-1], test_acc_list[-1]))

        if test_acc_list[-1] > best_acc:
            best_acc = test_acc_list[-1]
            best_model_wts = copy.deepcopy(model.state_dict())

        time_used = time.time() - since
        print('All time used: {}h {}m {:.2f}s'.format(int(time_used // 3600), int((time_used % 3600) // 60),
                                                      time_used % 60))

    train_process = pd.DataFrame(
        {'epoch': range(1, epoches + 1), 'train_loss': train_loss_list, 'train_acc': train_acc_list,
         'test_loss': test_loss_list, 'test_acc': test_acc_list})

    return best_model_wts, train_process


def save_best_model(best_model_wts, path):
    torch.save(best_model_wts, path)


def plot_train_process(train_process):
    """
    plot train process
    """
    plot_name = os.path.basename(os.getcwd())

    plt.figure(figsize=(12, 4))
    plt.subplot(1, 2, 1)
    plt.plot(train_process['epoch'], train_process['train_loss'], 'ro-', label='train_loss')
    plt.plot(train_process['epoch'], train_process['test_loss'], 'bo-', label='eval_loss')
    plt.xlabel('epoch')
    plt.ylabel('loss')
    plt.legend()
    plt.title(plot_name)

    plt.subplot(1, 2, 2)
    plt.plot(train_process['epoch'], train_process['train_acc'], 'ro-', label='train_acc')
    plt.plot(train_process['epoch'], train_process['test_acc'], 'bo-', label='eval_acc')
    plt.xlabel('epoch')
    plt.ylabel('accuracy')
    plt.legend()
    plt.title(plot_name)
    plt.show()


if __name__ == '__main__':
    pass
