# -*- coding: utf-8 -*-
"""
@Time:Created on 2021/7/
@author: Qichang Zhao
"""
import random
import os
import time
from models import AlexNet,VGG,cfgs,make_layers
from dataset import CustomDataSet, collater
from torch.utils.data import DataLoader
from prefetch_generator import BackgroundGenerator
from tqdm import tqdm
from tensorboardX import SummaryWriter
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from sklearn.metrics import classification_report,accuracy_score
import json

def save_report(report,lable):
    report_json = json.dumps(report,sort_keys=False, indent=4, separators=(',', ': '))
    f = open(tb_path + '{}.json'.format(lable), 'w')
    f.write(report_json)

def test_precess(model,pbar,LOSS):
    model.eval()
    test_losses = []
    IDs, Y, P = [], [], []
    with torch.no_grad():
        for i, data in pbar:
            '''data preparation '''
            ids, images, labels = data
            IDs.extend(ids)
            images = images.cuda()
            labels = labels.cuda()

            predicted_scores = model(images)
            loss = LOSS(predicted_scores, labels)
            correct_labels = labels.to('cpu').data.numpy()
            predicted_scores = F.softmax(predicted_scores, 1).to('cpu').data.numpy()
            predicted_labels = np.argmax(predicted_scores, axis=1)

            Y.extend(correct_labels)
            P.extend(predicted_labels)
            test_losses.append(loss.item())
    report = classification_report(Y, P,target_names=[i for i in range(task)],output_dict=True)
    accuracy = accuracy_score(Y, P)
    test_loss = np.average(test_losses)  # 一次epoch的平均验证loss
    return IDs, Y, P, test_loss, report, accuracy

def test_model(dataset_load, LOSS):
    test_pbar = tqdm(
        enumerate(
            BackgroundGenerator(dataset_load)),
        total=len(dataset_load))
    IDs, T, P, loss_test, report_test,accuracy_test = \
        test_precess(model,test_pbar, LOSS)
    with open(tb_path + "predictions.txt", 'a') as f:
        f.write("id lable prediction\n")
        for i in range(len(T)):
            f.write("{} {} {}\n".format(IDs[i],str(T[i]),str(P[i])))
    results = 'Loss:{:.5f};Accuracy:{:.5f}.' \
        .format(loss_test, accuracy_test)
    print(results)
    return results,loss_test, report_test, accuracy_test

def shuffle_dataset(dataset, seed):
    np.random.seed(seed)
    np.random.shuffle(dataset)
    return dataset

class hyperparameter():
    def __init__(self):
        self.Learning_rate = 3e-4
        self.Epoch = 500
        self.Batch_size = 128
        self.validation_split = 0.2
        self.weight_decay = 5e-4
        self.patience = 20

import os
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
if __name__ == "__main__":
    """select seed"""
    SEED = 1234
    random.seed(SEED)
    torch.manual_seed(SEED)
    torch.cuda.manual_seed_all(SEED)
    # torch.backends.cudnn.deterministic = True
    for task in [2,7,25]:
    # if task == 2:
    #     weight_CE = torch.FloatTensor([0.5, 0.5]).cuda()
    # elif task == 7:
    #     weight_CE = torch.FloatTensor([0.3, 0.7]).cuda()
    # elif task == 25:
    #     weight_CE = torch.FloatTensor([0.3, 0.7]).cuda()
        root_path = "./{}/".format(task)
        """Output files."""
        if not os.path.exists(root_path):
            os.makedirs(root_path)

        """init hyperparameters"""
        hp = hyperparameter()

        """Load preprocessed data."""
        load_train_path = "./Dataset/Train/"
        train_files = os.listdir(load_train_path)
        with open("./Dataset/gred_img_train.txt", "r") as f:
            gred_list_train = f.read().strip().split('\n')
        train_set = [x for x in train_files if x not in gred_list_train]

        load_test_path = "./Dataset/Test/"
        test_files = os.listdir(load_test_path)
        with open("./Dataset/gred_img_test.txt", "r") as f:
            gred_list_test = f.read().strip().split('\n')
        test_dataset = [x for x in test_files if x not in gred_list_test]
        print("load data")
        print("data shuffle")
        train_set = shuffle_dataset(train_set, SEED)
        TVdataset_len = len(train_set)
        valid_size = int(0.2 * TVdataset_len)
        train_size = TVdataset_len - valid_size
        train_dataset, valid_dataset = torch.utils.data.random_split(train_set, [train_size, valid_size])
        train_dataset = CustomDataSet(train_dataset)
        valid_dataset = CustomDataSet(valid_dataset)
        test_dataset = CustomDataSet(test_dataset)
        train_collate_fn = collater("./Dataset/Train/",task)
        test_collate_fn = collater("./Dataset/Test/",task)
        train_dataset_load = DataLoader(train_dataset, batch_size=hp.Batch_size, shuffle=True, num_workers=2,
                                        collate_fn=train_collate_fn)
        valid_dataset_load = DataLoader(valid_dataset, batch_size=hp.Batch_size, shuffle=False, num_workers=2,
                                        collate_fn=train_collate_fn)
        test_dataset_load = DataLoader(test_dataset, batch_size=hp.Batch_size, shuffle=False, num_workers=2,
                                       collate_fn=test_collate_fn)

        """ create model"""
        # model = VGG(make_layers(cfgs["A"], batch_norm=False), num_classes=task).cuda()
        model = AlexNet(num_classes=task).cuda()
        """weight initialize"""
        weight_p, bias_p = [], []
        for p in model.parameters():
            if p.dim() > 1:
                nn.init.xavier_uniform_(p)
        for name, p in model.named_parameters():
            if 'bias' in name:
                bias_p += [p]
            else:
                weight_p += [p]
        optimizer = torch.optim.Adam(model.parameters(), lr=hp.Learning_rate)
        # optimizer = optim.AdamW(
        #     [{'params': weight_p, 'weight_decay': hp.weight_decay}, {'params': bias_p, 'weight_decay': 0}], lr=hp.Learning_rate)
        # scheduler = optim.lr_scheduler.CyclicLR(optimizer, base_lr=hp.Learning_rate, max_lr=hp.Learning_rate*10, cycle_momentum=False,
        #                                         step_size_up=train_size // hp.Batch_size)
        Loss = nn.CrossEntropyLoss()
        # print(model)
        """ 使用tensorboardX来跟踪实验"""
        tb_path = "./{}/{}/".format(task,model.model_name)
        if not os.path.exists(tb_path):
            os.makedirs(tb_path)
        note = ''
        writer = SummaryWriter(log_dir=tb_path, comment=note)

        """Start training."""
        print('Training...')
        patience = 0
        best_score = 0
        epoch_len = len(str(hp.Epoch))
        train_start = time.time()
        for epoch in range(hp.Epoch):
            trian_pbar = tqdm(
                enumerate(
                    BackgroundGenerator(train_dataset_load)),
                total=len(train_dataset_load))
            """train"""
            train_losses_in_epoch = []
            model.train()
            for trian_i, train_data in trian_pbar:
                '''data preparation '''
                _, trian_img, trian_labels = train_data
                trian_img = trian_img.cuda()
                trian_labels = trian_labels.cuda()
                '''前向传播与反向传播'''
                '''梯度置0'''
                optimizer.zero_grad()
                # 正向传播，反向传播，优化
                predicted_interaction = model(trian_img)
                train_loss = Loss(predicted_interaction, trian_labels)
                train_losses_in_epoch.append(train_loss.item())
                train_loss.backward()
                # torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=10)
                optimizer.step()
                # scheduler.step()
            train_loss_a_epoch = np.average(train_losses_in_epoch)  # 一次epoch的平均训练loss
            writer.add_scalar('Train Loss', train_loss_a_epoch, epoch)

            """valid"""
            valid_pbar = tqdm(
                enumerate(
                    BackgroundGenerator(valid_dataset_load)),
                total=len(valid_dataset_load))
            _, _, _, valid_loss_a_epoch, valid_report, valid_accuracy = test_precess(model, valid_pbar, Loss)
            writer.add_scalar('Valid Loss', valid_loss_a_epoch, epoch)
            writer.add_scalar('Valid score', valid_accuracy, epoch)

            test_pbar = tqdm(
                enumerate(
                    BackgroundGenerator(test_dataset_load)),
                total=len(test_dataset_load))
            _, _, _, test_loss, test_report, test_accuracy = test_precess(model, test_pbar, Loss)
            writer.add_scalar('test Loss', test_loss, epoch)
            writer.add_scalar('test accuracy', test_accuracy, epoch)

            if valid_accuracy > best_score:
                best_score = valid_accuracy
                patience = 0
                torch.save(model.state_dict(), tb_path + 'valid_best_checkpoint.pth')
            else:
                patience+=1
            print_msg = (f'[{epoch+1:>{epoch_len}}/{hp.Epoch:>{epoch_len}}] ' +
                         f'patience: {patience} ' +
                         f'train_loss: {train_loss_a_epoch:.5f} ' +
                         f'valid_loss: {valid_loss_a_epoch:.5f} ' +
                         f'test_loss: {test_loss:.5f} ' +
                         f'valid_accuracy: {valid_accuracy:.5f} ' +
                         f'test_accuracy: {test_accuracy:.5f} ')
            print(print_msg)

            if patience == hp.patience:
                break
        train_end = time.time()
        trian_time = train_end-train_start
        print("train time:{} s".format(trian_time))
        """Test the best model"""
        """load trained model"""
        model.load_state_dict(torch.load(tb_path + "valid_best_checkpoint.pth"))

        trainset_test_results, Loss_train, report_train, accuracy_train = \
            test_model(train_dataset_load, Loss)
        save_report(report_train,"Train")
        with open(tb_path + 'results.txt', 'a') as f:
            f.write("The result of train set :" + trainset_test_results + '\n')

        validset_test_results, Loss_valid, report_valid, accuracy_valid = \
            test_model(valid_dataset_load, Loss)
        save_report(report_valid, "Valid")
        with open(tb_path + 'results.txt', 'a') as f:
            f.write("The result of valid set:" + validset_test_results + '\n')

        test_start = time.time()
        testset_test_results, Loss_test, report_test,accuracy_test = \
            test_model(test_dataset_load, Loss)
        test_end = time.time()
        test_time = test_end-test_start
        save_report(report_test, "Test")
        print("test time:{} s".format(test_time))
        with open(tb_path + 'results.txt', 'a') as f:
            f.write("The result of test set:" + testset_test_results + '\n')
            f.write("Train time:{}s;Test time:{}s\n".format(trian_time,test_time))
        writer.close()



