import torch as pt
import torchvision as ptv
import numpy as np


def sep(label=''):
    print('-' * 32, label, '-' * 32, sep='')


class Resnet34Clf(pt.nn.Module):

    def __init__(self, n_cls, **kwargs):
        super().__init__(**kwargs)

        resnet = ptv.models.resnet34(pretrained=True)
        output_dim = resnet.fc.in_features

        self.resnet = pt.nn.Sequential(*(list(resnet.children())[:-1]))
        self.fc = pt.nn.Linear(output_dim, n_cls)

    def forward(self, x):
        x = self.resnet(x)
        x = pt.squeeze(x, dim=3)
        x = pt.squeeze(x, dim=2)
        x = self.fc(x)
        return x


criterion = pt.nn.CrossEntropyLoss()


def accuracy(y, pred):
    y = y.long()
    pred = pred.argmax(dim=1)
    acc = pt.eq(y, pred).float().mean()
    return acc


if '__main__' == __name__:
    import os
    import sys
    import matplotlib.pyplot as plt
    import cv2 as cv
    from sklearn.model_selection import train_test_split
    from torch.utils.data import TensorDataset, DataLoader
    import pandas as pd
    import seaborn as sns
    from sklearn.metrics import recall_score, precision_score, f1_score, confusion_matrix

    sep('Arguments')
    VER = 'v1.2'
    BATCH_SIZE = 32
    N_EPOCHS = 2
    ALPHA = 1e-5
    IMG_H, IMG_W = 224, 224
    train_dir = '../../../../../large_data/DL1/_many_files/cats_and_dogs_filtered/train_fast'
    test_dir = '../../../../../large_data/DL1/_many_files/cats_and_dogs_filtered/validation_fast'
    FILE_NAME = os.path.basename(__file__)
    SAVE_DIR = os.path.join('_save', FILE_NAME, VER)
    os.makedirs(SAVE_DIR, exist_ok=True)
    SAVE_PATH = os.path.join(SAVE_DIR, f'weight.{N_EPOCHS}.pth')

    sep('Model')
    device = 'cuda:0' if pt.cuda.is_available() else 'cpu'
    print('device', device)
    device = pt.device(device)

    model = Resnet34Clf(2).to(device)
    optim = pt.optim.Adam(model.parameters(), lr=ALPHA)

    sep('Test model')

    x = pt.zeros([4, 3, 224, 224]).to(device)
    pred = model(x)
    print('pred', pred.size())

    sep('Load data')


    def load_dir(dir, label2idxArg=None):
        yv = 0
        x = []
        y = []
        label2idx = {}
        for dir_name in os.listdir(dir):
            label2idx[dir_name] = yv
            dir_path = os.path.join(dir, dir_name)
            for file_name in os.listdir(dir_path):
                file_path = os.path.join(dir_path, file_name)
                img = cv.imread(file_path, cv.IMREAD_COLOR)
                H, W = img.shape[:2]
                if H > W:
                    padding = H - W
                    img = np.concatenate([img, np.zeros([H, padding, 3], dtype=np.uint8)], axis=1)
                elif H < W:
                    padding = W - H
                    img = np.concatenate([img, np.zeros([padding, W, 3], dtype=np.uint8)], axis=0)
                img = cv.resize(img, (IMG_H, IMG_W), interpolation=cv.INTER_CUBIC)
                x.append(img)
                if label2idxArg is None:
                    y.append(yv)
                else:
                    y.append(label2idxArg[dir_name])
            yv += 1
        if label2idxArg is not None:
            label2idx = label2idxArg
        x = np.uint8(x)
        y = np.int64(y)
        return x, y, label2idx


    def trans_np_data_for_pt(x):
        x = x.transpose([0, 3, 1, 2])
        x = x.astype(np.float32) / 255. * 2. - 1.
        return x


    x_train, y_train, label2idx = load_dir(train_dir)
    x_train = trans_np_data_for_pt(x_train)
    x_train = pt.Tensor(x_train)
    y_train = pt.Tensor(y_train)
    print('x_train', x_train.size())
    print('y_train', y_train.size())

    print('Splitting ...')
    x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, train_size=0.9, random_state=1, shuffle=True)
    print('x_train', x_train.size())
    print('y_train', y_train.size())
    print('x_val', x_val.size())
    print('y_val', y_val.size())

    x_test, y_test, _ = load_dir(test_dir, label2idx)
    x_test = trans_np_data_for_pt(x_test)
    x_test = pt.Tensor(x_test)
    y_test = pt.Tensor(y_test)
    print('x_test', x_test.size())
    print('y_test', y_test.size())

    ds_train = TensorDataset(x_train, y_train)
    ds_test = TensorDataset(x_test, y_test)
    ds_val = TensorDataset(x_val, y_val)

    dl_train = DataLoader(ds_train, BATCH_SIZE)
    dl_test = DataLoader(ds_test, BATCH_SIZE)
    dl_val = DataLoader(ds_val, BATCH_SIZE)

    sep('Define: Process data routine')


    def process_data(dl, label, is_train, epoch=1, return_data=False):
        avg_loss = 0.
        avg_acc = 0.
        n_batches = len(dl)
        group = int(np.ceil(n_batches / 10))
        y_true, y_pred = None, None
        for i, (bx, by) in enumerate(dl):
            bx = bx.float().to(device)
            by = by.long().to(device)
            if is_train:
                model.train(True)
                optim.zero_grad()
                pred = model(bx)
                loss = criterion(pred, by)
                loss.backward()
                optim.step()
                model.train(False)
            else:
                model.train(False)
                pred = model(bx)
                loss = criterion(pred, by)
            acc = accuracy(by, pred)
            loss = loss.detach().cpu().numpy()
            acc = acc.detach().cpu().numpy()
            avg_loss += loss
            avg_acc += acc
            if i % group == 0 or i == n_batches - 1:
                print(f'{label}: epoch#{epoch}: #{i + 1}: loss = {loss}, acc = {acc}')

            if return_data:
                by = by.detach().cpu().numpy()
                pred = pred.argmax(dim=1)
                pred = pred.detach().cpu().numpy()
                if y_true is None:
                    y_true = by
                    y_pred = pred
                else:
                    y_true = np.concatenate([y_true, by], axis=0, dtype=np.int64)
                    y_pred = np.concatenate([y_pred, pred], axis=0, dtype=np.int64)

        avg_loss /= i + 1
        avg_acc /= i + 1
        if return_data:
            return avg_loss, avg_acc, y_true, y_pred
        else:
            return avg_loss, avg_acc


    sep('Train or load')
    if os.path.exists(SAVE_PATH):
        print('Loading ...')
        dict = pt.load(SAVE_PATH)
        model.load_state_dict(dict)
        print('Loaded')
    else:
        loss_his, acc_his, loss_his_val, acc_his_val = [], [], [], []
        for epoch in range(N_EPOCHS):
            sep(epoch + 1)
            avg_loss, avg_acc = process_data(dl_train, 'train', True, epoch + 1)
            print(f'Train: epoch#{epoch + 1}: loss = {avg_loss}, acc = {avg_acc}')
            loss_his.append(avg_loss)
            acc_his.append(avg_acc)
            avg_loss_val, avg_acc_val = process_data(dl_val, 'val', False, epoch + 1)
            print(f'Train: epoch#{epoch + 1}: loss = {avg_loss}, acc = {avg_acc}, loss_val = {avg_loss_val}, acc_val = {avg_acc_val}')
            loss_his_val.append(avg_loss_val)
            acc_his_val.append(avg_acc_val)
        print('Saving ...')
        dict = model.state_dict()
        pt.save(dict, SAVE_PATH)
        print('Saved')

        print('Plotting ... (Check and close the plotting window to continue.)')
        spr = 1
        spc = 2
        spn = 0
        plt.figure(figsize=[12, 6])

        spn += 1
        plt.subplot(spr, spc, spn)
        plt.title('loss')
        plt.plot(loss_his, label='train')
        plt.plot(loss_his_val, label='val')
        plt.legend()

        spn += 1
        plt.subplot(spr, spc, spn)
        plt.title('accuracy')
        plt.plot(acc_his, label='train')
        plt.plot(acc_his_val, label='val')
        plt.legend()

        plt.show()

    sep('Test')
    avg_loss_test, avg_acc_test, y_true, y_pred = process_data(dl_test, 'test', False, 1, True)
    print(f'Tested: loss = {avg_loss_test}, acc = {avg_acc_test}')

    sep('Confusion matrix and metrics')
    mat = confusion_matrix(y_true, y_pred)

    spr = 2
    spc = 2
    spn = 0
    plt.figure(figsize=[12, 6])

    spn += 1
    plt.subplot(spr, spc, spn)
    plt.title('Confusion matrix (lib)')
    sns.heatmap(mat, annot=True)

    spn += 1
    plt.subplot(spr, spc, spn)
    plt.title('recall (lib)')
    recall = recall_score(y_true, y_pred)
    plt.pie([recall, 1 - recall], labels=['Recall', ''], autopct='%.2f%%')

    spn += 1
    plt.subplot(spr, spc, spn)
    plt.title('precision (lib)')
    precision = precision_score(y_true, y_pred)
    plt.pie([precision, 1 - precision], labels=['Precision', ''], autopct='%.2f%%', explode=[0.1, 0])

    spn += 1
    plt.subplot(spr, spc, spn)
    plt.title('F1 score (lib)')
    f1 = f1_score(y_true, y_pred)
    plt.pie([f1, 1 - f1], labels=['F1', ''], autopct='%.2f%%')

    print('Please check and close the plotting window to finish.')
    plt.show()
    print('Over')
