import torch
import torch.nn as nn
import os
import numpy as np
from tqdm import tqdm
from torch.utils.data import DataLoader, Dataset
from gen_dataset import GenData, MyData, transform
from model import ResNet18_PRO, ResNet18, VGG13, GoogLeNet, SVM, HingeLoss, LabelSmoothingSVM, SDTModel
from utils import LabelSmoothing
import torch.autograd as autograd
from show_result import show_result
from DTs import *
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, fbeta_score


def get_k_fold_data(k, i, x, y):
    assert k > 1
    fold_size = x.shape[0] // k
    x_train, y_train = None, None
    for j in range(k):
        idx = slice(j * fold_size, (j + 1) * fold_size)
        x_part, y_part = x[idx], y[idx]
        if i == j:
            x_valid, y_valid = x_part, y_part
        elif x_train is None:
            x_train, y_train = x_part, y_part
        else:
            x_train = np.concatenate((x_train, x_part), axis=0)
            y_train = np.concatenate((y_train, y_part), axis=0)
    return x_train, y_train, x_valid, y_valid


def k_fold_training(netname, num_class, k, batch_size, x_data, y_data, epochs, learning_rate, device, ckpt_dir,
                    label_smooth, lr_scheduler):
    total_train_loss, total_train_acc = {"total": None, "hole": None, "crack": None, "normal": None}, {"total": None,
                                                                                                       "hole": None,
                                                                                                       "crack": None,
                                                                                                       "normal": None}
    total_val_loss, total_val_acc = {"total": None, "hole": None, "crack": None, "normal": None}, {"total": None,
                                                                                                   "hole": None,
                                                                                                   "crack": None,
                                                                                                   "normal": None}
    total_train_precision, total_train_recall = {"total": None, "hole": None, "crack": None, "normal": None}, {
        "total": None, "hole": None, "crack": None, "normal": None}
    total_val_precision, total_val_recall = {"total": None, "hole": None, "crack": None, "normal": None}, {
        "total": None, "hole": None, "crack": None, "normal": None}
    total_train_f1, total_val_f1 = {"total": None, "hole": None, "crack": None, "normal": None}, {"total": None,
                                                                                                  "hole": None,
                                                                                                  "crack": None,
                                                                                                  "normal": None}
    folds = 1  # folds = k 时,表示使用k折交叉，folds=1时,表示不使用k折交叉验证，但此时验证集占总数据集的1/k
    for count in range(folds):
        # get the ith fold data
        x_train, y_train, x_val, y_val = get_k_fold_data(k, count, x_data, y_data)
        # convert data to dataloader iterator
        train_dataset = MyData(x_train, y_train,416,416, transform=transform)
        train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, drop_last=True, )
        val_dataset = MyData(x_val, y_val, transform=transform)
        val_dataloader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, drop_last=True)
        model = get_model(device, netname, num_class)
        train_loss, val_loss, train_acc_dict, train_precision_dict, train_recall_dict, train_f1_dict, \
        val_acc_dict, val_precision_dict, val_recall_dict, val_f1_dict = train(netname, count, model, train_dataloader,
                                                                               val_dataloader, epochs, learning_rate,
                                                                               device, ckpt_dir, num_class, label_smooth,
                                                                               lr_scheduler)
        print(val_acc_dict)
        #     # add loss and accuracy of k folds
        for defect in ["total", "hole", "crack", "normal"]:
            if total_train_acc[defect] is None:
                total_train_loss, total_train_acc[defect] = np.array(train_loss) / folds, np.array(
                    train_acc_dict[defect]) / folds
                total_val_loss, total_val_acc[defect] = np.array(val_loss) / folds, np.array(
                    val_acc_dict[defect]) / folds
                total_train_precision[defect], total_train_recall[defect] = np.array(
                    train_precision_dict[defect]) / folds, np.array(train_recall_dict[
                                                                        defect]) / folds
                total_val_precision[defect], total_val_recall[defect] = np.array(
                    val_precision_dict[defect]) / folds, np.array(val_recall_dict[
                                                                      defect]) / folds
                total_train_f1[defect], total_val_f1[defect] = np.array(train_f1_dict[defect]) / folds, np.array(
                    val_f1_dict[defect]) / folds
            else:
                total_train_loss += np.array(train_loss) / folds
                total_train_acc[defect] += np.array(train_acc_dict[defect]) / folds
                total_val_loss += np.array(val_loss) / folds
                total_val_acc[defect] += np.array(val_acc_dict[defect]) / folds
                total_train_precision[defect] += np.array(train_precision_dict[defect]) / folds
                total_train_recall[defect] += np.array(train_recall_dict[defect]) / folds
                total_val_precision[defect] += np.array(val_precision_dict[defect]) / folds
                total_val_recall[defect] += np.array(val_recall_dict[defect]) / folds
                total_train_f1[defect] += np.array(train_f1_dict[defect]) / folds
                total_val_f1[defect] += np.array(val_f1_dict[defect]) / folds
    # # return average loss and accuracy of k folds in each epoch
    return total_train_loss, total_val_loss, total_train_acc, total_train_precision, total_train_recall, total_train_f1, \
           total_val_acc, total_val_precision, total_val_recall, total_val_f1


def train(netname, count, model, train_dataloader, val_dataloader, epochs, learning_rate, device, ckpt_dir, num_class,
          label_smooth, lr_scheduler):
    train_loss = []
    val_loss = []

    train_acc_dict = {"total": [], "hole": [], "crack": [], "normal": []}
    train_precision_dict = {"total": [], "hole": [], "crack": [], "normal": []}
    train_recall_dict = {"total": [], "hole": [], "crack": [], "normal": []}
    train_f1_dict = {"total": [], "hole": [], "crack": [], "normal": []}

    val_acc_dict = {"total": [], "hole": [], "crack": [], "normal": []}
    val_precision_dict = {"total": [], "hole": [], "crack": [], "normal": []}
    val_recall_dict = {"total": [], "hole": [], "crack": [], "normal": []}
    val_f1_dict = {"total": [], "hole": [], "crack": [], "normal": []}
    val_f1_list = []

    ################################################################################
    optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=learning_rate,
                                 weight_decay=0.0005)

    if netname == "SVM":
        if label_smooth:
            criterion = LabelSmoothingSVM(0.1, num_class)
        else:
            criterion = HingeLoss()
    elif netname == "SoftDTs":
        criterion = model.cal_loss
        optimizer = torch.optim.SGD(model.parameters(), lr=params_dict["lr"], momentum=params_dict["momentum"])
    else:
        criterion = nn.CrossEntropyLoss()
        if label_smooth:
            criterion = LabelSmoothing(0.1, num_class)

    my_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=0.95)
    for epoch in range(0, epochs):
        print('\nfold: %d  Epoch: %d' % (count + 1, epoch + 1))
        model.train()
        sum_loss = 0.0
        total_metrics = {"accuracy": 0.0, "precision": 0.0, "recall": 0.0, "f1": 0.0}
        hole_metrics = {"accuracy": 0.0, "precision": 0.0, "recall": 0.0, "f1": 0.0}
        crack_metrics = {"accuracy": 0.0, "precision": 0.0, "recall": 0.0, "f1": 0.0}
        normal_metrics = {"accuracy": 0.0, "precision": 0.0, "recall": 0.0, "f1": 0.0}
        metric_result_list = [total_metrics, hole_metrics, crack_metrics, normal_metrics]
        element_list = [accuracy_score, precision_score, recall_score, f1_score]
        data_name = ["total", "hole", "crack", "normal"]
        n = len(train_dataloader)
        m = len(val_dataloader)
        with tqdm(total=n, desc='Train', leave=True, ncols=100, unit='img', unit_scale=True)as pbar:
            for batch_idx, (images, labels) in enumerate(train_dataloader):
                length = len(train_dataloader)
                images, labels = images.to(device), labels.to(device)
                labels = labels.long()
                optimizer.zero_grad()

                ################################################################################
                if netname == "SoftDTs":
                    loss, outputs = criterion(images, labels)
                    outputs = outputs.to(device)
                else:
                    if netname == "GoogLeNet":
                        outputs, _, _ = model(images)
                    else:
                        outputs = model(images)
                    loss = criterion(outputs, labels)
                ################################################################################
                loss.backward()
                optimizer.step()
                sum_loss += loss.item()
                _, predicted = torch.max(outputs.data, dim=1)

                labels = labels.cpu()
                predicted = predicted.cpu()
                for c, metric_result in enumerate(metric_result_list):
                    for idx, element in enumerate(metric_result.keys()):
                        if idx == 0:
                            metric_result[element] += element_list[idx](labels, predicted)
                        else:
                            if c == 0:
                                labels_pos = [0, 1, 2,3,4,5]
                            else:
                                labels_pos = [c - 1]
                            metric_result[element] += element_list[idx](labels, predicted, labels=labels_pos,
                                                                        average="macro")
                pbar.update()
            if lr_scheduler:
                my_lr_scheduler.step()

        train_loss.append(sum_loss / n)
        for idx, metric_result in enumerate(metric_result_list):
            average_acc = metric_result["accuracy"] / n
            average_precision = metric_result["precision"] / n
            average_recall = metric_result["recall"] / n
            average_f1 = metric_result["f1"] / n

            train_acc_dict[data_name[idx]].append(average_acc)
            train_precision_dict[data_name[idx]].append(average_precision)
            train_recall_dict[data_name[idx]].append(average_recall)
            train_f1_dict[data_name[idx]].append(average_f1)
            print(
                'Train [epoch:%d, iter:%d] [%s] : Loss: %.04f | Acc: %.04f | Precision: %.04f | Recall: %.04f | F1_score: %.04f'
                % (epoch + 1, (batch_idx + 1 + epoch * length), data_name[idx], sum_loss / n,
                   average_acc, average_precision, average_recall, average_f1))

        # get the ac with testdataset in each epoch
        with torch.no_grad():
            sum_loss = 0.0
            total_metrics = {"accuracy": 0.0, "precision": 0.0, "recall": 0.0, "f1": 0.0}
            hole_metrics = {"accuracy": 0.0, "precision": 0.0, "recall": 0.0, "f1": 0.0}
            crack_metrics = {"accuracy": 0.0, "precision": 0.0, "recall": 0.0, "f1": 0.0}
            normal_metrics = {"accuracy": 0.0, "precision": 0.0, "recall": 0.0, "f1": 0.0}
            metric_result_list = [total_metrics, hole_metrics, crack_metrics, normal_metrics]
            with tqdm(total=m, desc='Val', leave=True, ncols=100, unit='img', unit_scale=True)as pbar:
                for batch_idx, (images, labels) in enumerate(val_dataloader):
                    model.eval()
                    images, labels = images.to(device), labels.to(device)
                    labels = labels.long()
                    outputs = model(images)
                    loss = criterion(outputs, labels)
                    sum_loss += loss.item()
                    _, predicted = torch.max(outputs.data, dim=1)

                    labels = labels.cpu()
                    predicted = predicted.cpu()
                    for c, metric_result in enumerate(metric_result_list):
                        for idx, element in enumerate(metric_result.keys()):
                            if idx == 0:
                                metric_result[element] += element_list[idx](labels, predicted)
                            else:
                                if c == 0:
                                    labels_pos = [0, 1, 2]
                                else:
                                    labels_pos = [c - 1]
                                metric_result[element] += element_list[idx](labels, predicted, labels=labels_pos,
                                                                            average="macro")
                    pbar.update()
            val_loss.append(sum_loss / m)
            for idx, metric_result in enumerate(metric_result_list):
                average_acc = metric_result["accuracy"] / m
                average_precision = metric_result["precision"] / m
                average_recall = metric_result["recall"] / m
                average_f1 = metric_result["f1"] / m

                val_acc_dict[data_name[idx]].append(average_acc)
                val_precision_dict[data_name[idx]].append(average_precision)
                val_recall_dict[data_name[idx]].append(average_recall)
                val_f1_dict[data_name[idx]].append(average_f1)
                print(
                    'Val [epoch:%d, iter:%d] [%s] : Loss: %.04f | Acc: %.04f | Precision: %.04f | Recall: %.04f | F1_score: %.04f'
                    % (epoch + 1, (batch_idx + 1 + epoch * length), data_name[idx], sum_loss / m,
                       average_acc, average_precision, average_recall, average_f1))
        total_f1 = val_f1_dict["total"][-1]
        val_f1_list.append(total_f1)
        torch.save(model.state_dict(), ckpt_dir + "last.pt")
        if total_f1 == max(val_f1_list):
            torch.save(model.state_dict(), ckpt_dir + f"{netname}_val{total_f1:.4f}.pt")
            print(f"model:{netname} f1_val: {total_f1:.4f} save model...")
    return train_loss, val_loss, train_acc_dict, train_precision_dict, train_recall_dict, train_f1_dict, \
           val_acc_dict, val_precision_dict, val_recall_dict, val_f1_dict


def get_model(device, netname=None, num_class=2,test_pth_path=None, Trans_Learning=False, Freeze=False):
    if netname == "ResNet18":
        return ResNet18(device, num_class,test_pth_path, Trans_Learning, Freeze)
    elif netname == "ResNet18_PRO":
        return ResNet18_PRO(device, num_class,test_pth_path, Trans_Learning, Freeze)
    elif netname == "VGG13":
        return VGG13(device, num_class,test_pth_path, Trans_Learning, Freeze)
    elif netname == "GoogLeNet":
        return GoogLeNet(device, num_class,test_pth_path, Trans_Learning, Freeze)
    elif netname == "SVM":
        return SVM(device, num_class,test_pth_path, Trans_Learning, Freeze)
    elif netname == "SDT":
        return SDTModel(device, num_class,test_pth_path, Trans_Learning, Freeze)


def main():
    # 1. load dataset
    os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
    data_path = "train_data"
    train_data, train_label = GenData(data_path, h=416, w=416, a=859, b=1715)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # 2. prepare super parameters
    batch_size = 32
    learning_rate = 0.001
    epochs = 100
    k = 5  # to set the ratio ==>train dataset/val dataset=(k-1)/1
    num_class = 3  # the number of defect types
    label_smooth = False  # using label smoothing
    lr_scheduler = True  # using Exponential Learning rate
    # 3. train
    net_list = ["ResNet18_PRO"]  # the model which is needed to be trained
    for netname in net_list:
        result_path = f"result/{netname}"
        ckpt_dir = f"{result_path}/checkpoints/"

        if not os.path.exists(ckpt_dir):
            os.makedirs(ckpt_dir)

        if netname == "SDT":
            learning_rate = 1e-6
        elif netname == "SVM":
            learning_rate = 1e-5
        print(f"\n\n######### Start Training {netname} model ######### ")
        kfold_train_loss, kfold_val_loss, kfold_train_acc, kfold_train_precision, kfold_train_recall, kfold_train_f1, \
        kfold_val_acc, kfold_val_precision, kfold_val_recall, kfold_val_f1 = \
            k_fold_training(netname, num_class, k, batch_size, train_data, train_label, epochs, learning_rate, device,
                            ckpt_dir, label_smooth, lr_scheduler)
        show_result(result_path, kfold_train_loss, kfold_val_loss, kfold_train_acc, kfold_train_precision, kfold_train_recall,
                    kfold_train_f1, kfold_val_acc, kfold_val_precision, kfold_val_recall, kfold_val_f1, epochs)


if __name__ == "__main__":
    main()
