# -*- coding: utf-8 -*-
"""
@Time:Created on 2021/7/
@author: Qichang Zhao
"""
import random
import os
import time
from models import AlexNet,VGG,cfgs,make_layers
from dataset import CustomDataSet, collater
from torch.utils.data import DataLoader
from prefetch_generator import BackgroundGenerator
from tqdm import tqdm
from tensorboardX import SummaryWriter
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from sklearn.metrics import accuracy_score, f1_score

def show_result(save_path, dataset, Loss_List, Accuracy_List,F1_score_List):
    Loss_mean, Loss_std = np.mean(Loss_List), np.sqrt(np.var(Loss_List))
    Accuracy_mean, Accuracy_std = np.mean(Accuracy_List), np.sqrt(np.var(Accuracy_List))
    F1_score_mean, F1_score_var = np.mean(F1_score_List), np.sqrt(np.var(F1_score_List))

    print("The results on {}:".format(dataset))
    with open(save_path + 'results.txt', 'a') as f:
        f.write('{}:'.format(dataset) + '\n')
        f.write('Loss(std):{:.4f}({:.4f})'.format(Loss_mean, Loss_std) + '\n')
        f.write('Accuracy(std):{:.4f}({:.4f})'.format(Accuracy_mean, Accuracy_std) + '\n')
        f.write('Precision(std):{:.4f}({:.4f})'.format(F1_score_mean, F1_score_var) + '\n')
    print('Loss(std):{:.4f}({:.4f})'.format(Loss_mean, Loss_std))
    print('Accuracy(std):{:.4f}({:.4f})'.format(Accuracy_mean, Accuracy_std))
    print('F1_score(std):{:.4f}({:.4f})'.format(F1_score_mean, F1_score_var))

def test_precess(model,pbar,LOSS):
    model.eval()
    test_losses = []
    Y, P = [], []
    with torch.no_grad():
        for i, data in pbar:
            '''data preparation '''
            images, labels = data
            images = images.cuda()
            labels = labels.cuda()

            predicted_scores = model(images)
            loss = LOSS(predicted_scores, labels)
            correct_labels = labels.to('cpu').data.numpy()
            predicted_scores = F.softmax(predicted_scores, 1).to('cpu').data.numpy()
            predicted_labels = np.argmax(predicted_scores, axis=1)

            Y.extend(correct_labels)
            P.extend(predicted_labels)
            test_losses.append(loss.item())
    F1_score = f1_score(Y, P,average='micro')
    Accuracy = accuracy_score(Y, P)
    test_loss = np.average(test_losses)  # 一次epoch的平均验证loss
    return Y, P, test_loss, Accuracy, F1_score

def test_model(dataset_load, LOSS):
    test_pbar = tqdm(
        enumerate(
            BackgroundGenerator(dataset_load)),
        total=len(dataset_load))
    T, P, loss_test, Accuracy_test, F1_score_test = \
        test_precess(model,test_pbar, LOSS)
    results = 'Loss:{:.5f};Accuracy:{:.5f};F1_score:{:.5f}.' \
        .format(loss_test, Accuracy_test, F1_score_test)
    print(results)
    return results,loss_test, Accuracy_test, F1_score_test

def get_kfold_data(i, datasets, k=5):
    # 返回第 i+1 折 (i = 0 -> k-1) 交叉验证时所需要的训练和验证数据，X_train为训练集，X_valid为验证集
    fold_size = len(datasets) // k  # 每份的个数:数据总条数/折数（组数）

    val_start = i * fold_size
    if i != k - 1 and i != 0:
        val_end = (i + 1) * fold_size
        validset = datasets[val_start:val_end]
        trainset = datasets[0:val_start] + datasets[val_end:]
    elif i == 0:
        val_end = fold_size
        validset = datasets[val_start:val_end]
        trainset = datasets[val_end:]
    else:
        validset = datasets[val_start:] # 若不能整除，将多的case放在最后一折里
        trainset = datasets[0:val_start]

    return trainset, validset

def shuffle_dataset(dataset, seed):
    np.random.seed(seed)
    np.random.shuffle(dataset)
    return dataset

class hyperparameter():
    def __init__(self):
        self.Learning_rate = 3e-4
        self.Epoch = 500
        self.Batch_size = 128
        self.validation_split = 0.2
        self.weight_decay = 5e-4
        self.patience = 20

import os
os.environ["CUDA_VISIBLE_DEVICES"] = "7"
if __name__ == "__main__":
    """select seed"""
    SEED = 1234
    random.seed(SEED)
    torch.manual_seed(SEED)
    torch.cuda.manual_seed_all(SEED)
    # torch.backends.cudnn.deterministic = True

    save_path = "./25/"
    """Output files."""
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    """init hyperparameters"""
    hp = hyperparameter()

    """Load preprocessed data."""
    load_train_path = "./Dataset/Train/"
    train_files = os.listdir(load_train_path)
    with open("./Dataset/gred_img_train.txt", "r") as f:
        gred_list_train = f.read().strip().split('\n')
    train_set = [x for x in train_files if x not in gred_list_train]

    load_test_path = "./Dataset/Test/"
    test_files = os.listdir(load_test_path)
    with open("./Dataset/gred_img_test.txt", "r") as f:
        gred_list_test = f.read().strip().split('\n')
    test_dataset = [x for x in test_files if x not in gred_list_test]
    print("load data")
    print("data shuffle")
    train_set = shuffle_dataset(train_set, SEED)
    K_Fold = 5
    Loss_List_train, Accuracy_List_train, F1_List_train = [], [], []
    Loss_List_valid, Accuracy_List_valid, F1_List_valid = [], [], []
    Loss_List_test, Accuracy_List_test, F1_List_test = [], [], []
    Time_list = []
    for i_fold in range(K_Fold):
        print('*' * 25, '第', i_fold + 1, '折', '*' * 25)
        train_dataset, valid_dataset = get_kfold_data(i_fold, train_set)
        train_size = len(train_dataset)
        train_dataset = CustomDataSet(train_dataset)
        valid_dataset = CustomDataSet(valid_dataset)
        test_dataset = CustomDataSet(test_dataset)
        train_collate_fn = collater("./Dataset/Train/")
        test_collate_fn = collater("./Dataset/Test/")
        train_dataset_load = DataLoader(train_dataset, batch_size=hp.Batch_size, shuffle=True, num_workers=2,
                                        collate_fn=train_collate_fn)
        valid_dataset_load = DataLoader(valid_dataset, batch_size=hp.Batch_size, shuffle=False, num_workers=2,
                                        collate_fn=train_collate_fn)
        test_dataset_load = DataLoader(test_dataset, batch_size=hp.Batch_size, shuffle=False, num_workers=2,
                                       collate_fn=test_collate_fn)

        """ create model"""
        model = VGG(make_layers(cfgs["A"], batch_norm=False), num_classes=25).cuda()
        """weight initialize"""
        weight_p, bias_p = [], []
        for p in model.parameters():
            if p.dim() > 1:
                nn.init.xavier_uniform_(p)
        for name, p in model.named_parameters():
            if 'bias' in name:
                bias_p += [p]
            else:
                weight_p += [p]
        optimizer = optim.AdamW(
            [{'params': weight_p, 'weight_decay': hp.weight_decay}, {'params': bias_p, 'weight_decay': 0}], lr=hp.Learning_rate)
        scheduler = optim.lr_scheduler.CyclicLR(optimizer, base_lr=hp.Learning_rate, max_lr=hp.Learning_rate*10, cycle_momentum=False,
                                                step_size_up=train_size // hp.Batch_size)
        Loss = nn.CrossEntropyLoss()
        # print(model)
        """ 使用tensorboardX来跟踪实验"""
        tb_path = "./25/{}/".format(i_fold)
        if not os.path.exists(tb_path):
            os.makedirs(tb_path)
        note = ''
        writer = SummaryWriter(log_dir=tb_path, comment=note)

        """Start training."""
        print('Training...')
        patience = 0
        best_score = 0
        epoch_len = len(str(hp.Epoch))
        start = time.clock()
        for epoch in range(hp.Epoch):
            trian_pbar = tqdm(
                enumerate(
                    BackgroundGenerator(train_dataset_load)),
                total=len(train_dataset_load))
            """train"""
            train_losses_in_epoch = []
            model.train()
            for trian_i, train_data in trian_pbar:
                '''data preparation '''
                trian_img, trian_labels = train_data
                trian_img = trian_img.cuda()
                trian_labels = trian_labels.cuda()
                '''前向传播与反向传播'''
                '''梯度置0'''
                optimizer.zero_grad()
                # 正向传播，反向传播，优化
                predicted_interaction = model(trian_img)
                train_loss = Loss(predicted_interaction, trian_labels)
                train_losses_in_epoch.append(train_loss.item())
                train_loss.backward()
                # torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=10)
                optimizer.step()
                scheduler.step()
            train_loss_a_epoch = np.average(train_losses_in_epoch)  # 一次epoch的平均训练loss
            writer.add_scalar('Train Loss/{}'.format(i_fold), train_loss_a_epoch, epoch)

            """valid"""
            valid_pbar = tqdm(
                enumerate(
                    BackgroundGenerator(valid_dataset_load)),
                total=len(valid_dataset_load))
            _, _, valid_loss_a_epoch, valid_Accuracy, valid_F1_score = test_precess(model, valid_pbar, Loss)
            valid_score = valid_Accuracy + valid_F1_score
            writer.add_scalar('Valid Loss/{}'.format(i_fold), valid_loss_a_epoch, epoch)
            writer.add_scalar('Valid score/{}'.format(i_fold), valid_score, epoch)

            test_pbar = tqdm(
                enumerate(
                    BackgroundGenerator(test_dataset_load)),
                total=len(test_dataset_load))
            _, _, test_loss, test_Accuracy, test_F1_score = test_precess(model, test_pbar, Loss)
            writer.add_scalar('test Loss/{}'.format(i_fold), test_loss, epoch)
            writer.add_scalar('test accuracy/{}'.format(i_fold), test_Accuracy, epoch)
            writer.add_scalar('test f1 score/{}'.format(i_fold), test_F1_score, epoch)

            if valid_score > best_score:
                best_score = valid_score
                patience = 0
                torch.save(model.state_dict(), save_path + 'valid_best_checkpoint_{}.pth'.format(i_fold))
            else:
                patience+=1
            print_msg = (f'[{epoch+1:>{epoch_len}}/{hp.Epoch:>{epoch_len}}] ' +
                         f'patience: {patience} ' +
                         f'train_loss: {train_loss_a_epoch:.5f} ' +
                         f'valid_loss: {valid_loss_a_epoch:.5f} ' +
                         f'test_loss: {test_loss:.5f} ' +
                         f'valid_accuracy: {valid_Accuracy:.5f} ' +
                         f'valid_f1 score: {valid_F1_score:.5f} ' +
                         f'test_accuracy: {test_Accuracy:.5f} ' +
                         f'test_f1 score: {test_F1_score:.5f} ')
            print(print_msg)

            if patience == hp.patience:
                break
        end = time.clock()
        Time_list.append(start-end)
        """Test the best model"""
        """load trained model"""
        model.load_state_dict(torch.load(save_path + "valid_best_checkpoint_{}.pth".format(i_fold)))

        trainset_test_results, Loss_train, Accuracy_train, F1_score_train = \
            test_model(train_dataset_load, Loss)
        Loss_List_train.append(Loss_train)
        Accuracy_List_train.append(Accuracy_train)
        F1_List_train.append(F1_score_train)
        with open(save_path + 'results.txt', 'a') as f:
            f.write("The result of train set  on {} fold:".format(i_fold)+trainset_test_results + '\n')

        validset_test_results, Loss_valid, Accuracy_valid, F1_score_valid = \
            test_model(valid_dataset_load, Loss)
        Loss_List_valid.append(Loss_valid)
        Accuracy_List_valid.append(Accuracy_valid)
        F1_List_valid.append(F1_score_valid)
        with open(save_path + 'results.txt', 'a') as f:
            f.write("The result of valid set on {} fold:".format(i_fold) + validset_test_results + '\n')

        testset_test_results, Loss_test, Accuracy_test, F1_score_test = \
            test_model(test_dataset_load, Loss)
        Loss_List_test.append(Loss_test)
        Accuracy_List_test.append(Accuracy_test)
        F1_List_test.append(F1_score_test)
        with open(save_path + 'results.txt', 'a') as f:
            f.write("The result of test set on {} fold:".format(i_fold) + testset_test_results + '\n')
        writer.close()
    show_result(save_path, "Trainset", Loss_List_train,
                Accuracy_List_train, F1_List_train)
    show_result(save_path,"Validset", Loss_List_valid,
                Accuracy_List_valid, F1_List_valid)
    show_result(save_path,"Testset", Loss_List_test,
                Accuracy_List_test, F1_List_test)
    print(Time_list)



