#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time    : 2020/12/10 13:06
# @Author  : lxy
import itertools
import os

import numpy as np
import torch
import torch.utils.data as Data
import logging
import time


import Architecture
from Utils import Loader, EX, Final_Loss, shuffer, Acc, Loader_test, Truple_Dataset, Regularization, test_shuffle, \
    score, confusion_matrix, sharpen

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")




Batch_size = 64
Next_Inset = 3
weight_decay = 0.0001
def SemiCNN_SSL_aug_train(epoch_,dataset,category,word_num,T_0,project_path,single,logger_,k,result_time):
    lr = 0.001
    # 从numpy加载训练集并转化为tensor
    origintensor,_,labeltensor = Loader("train",project_path)
    set = Truple_Dataset(origintensor,labeltensor,_)
    # 使用loader将训练集加载到内存 优点：1.批处理可以加快训练速度。2. 直接打乱训练集。3.pin_memory可以指送gpu,加快传输速度
    train_loader = Data.DataLoader(
        dataset=set,
        batch_size=Batch_size,
        shuffle=True,
        drop_last=True,
        pin_memory=False
    )
    loss_fun = Final_Loss()
    # 实例化交叉熵Loss
    loss_fun_test = torch.nn.MSELoss(reduce=True)
    # 实例化训练日志
    model_name = "SemiCNN"
    model_type = "_SSL_aug_"
    model_single = single
    model_lr_schedule = "_cosine"
    str_ = model_name + model_type + str(model_single) + model_lr_schedule + "_" + result_time[11:]
    time_ = project_path + '/logs/' + dataset + "/" + result_time[:10] + "/"
    print(time_ + str_)
    if os.path.exists(time_):
        pass
    else:
        os.makedirs(time_)
    fh = logging.FileHandler(time_ + str_ + ".log")
    fh.setLevel(logging.DEBUG)
    ch = logging.StreamHandler()
    ch.setLevel(logging.DEBUG)
    formater = logging.Formatter('%(message)s')
    fh.setFormatter(formater)
    ch.setFormatter(formater)
    logger_.addHandler(fh)
    logger_.addHandler(ch)
    best_acc_score, best_recall_score, best_precision_score, best_specificity_score, best_f1_score = 0, 0, 0, 0, 0
    avg_acc_score, avg_recall_score, avg_precision_score, avg_specificity_score, avg_f1_score = 0, 0, 0, 0, 0
    best_tmp = [0, 0, 0, 0, 0]
    for i in range(k):
        print("*" * 25, "第", i, "次交叉验证", "*" * 25)
        test_tensor, test_label, __ = set.test(i)
        del __
        len_ = set.train(i)
        epoch = 0
        teacher = Architecture.Teacher(category=category)
        checkout = torch.load(project_path + "/vec2tensor/aug_Basic_state.npy")
        teacher.load_state_dict(checkout["net"])
        teacher.to(device)
        teacher.train()
        student = Architecture.Teacher(category=category)
        student.to(device)
        student.train()
        optimizer = torch.optim.Adam(itertools.chain(teacher.parameters(), student.parameters()), lr=0.001)
        U1_, U2_, U3_, U4_, U5_ = shuffer(project_path, beta=0.9)
        del U3_, U4_

        while epoch < epoch_:
            torch.cuda.empty_cache()
            for step, (batch_x, batch_y, _) in enumerate(train_loader):
                teacher.train()
                student.train()
                teacher.zero_grad()
                student.zero_grad()
                out = sharpen(teacher(batch_x.float().to(device)).view(-1, category), category)
                U1, U2, U5 = test_shuffle(U1_, U2_, U5_)
                U1_out = sharpen(student(U1.to(device)).view(-1, category), category)
                U2_out = sharpen(student(U2.to(device)).view(-1, category), category)
                U3_out = sharpen(teacher(U5.to(device)).view(-1, category), category)
                del U1, U2, U5
                train_loss = loss_fun(out.to(device), batch_y.view(-1, batch_y.shape[2]).to(device), U1_out.to(device),
                                      U2_out.to(device),
                                      U3_out.to(device), 0.9)
                train_loss.backward(retain_graph=True)
                optimizer.step()
                del _
                student.eval()
                teacher.eval()
                test_tensor, test_label = test_shuffle(test_tensor, test_label)
                test_out = sharpen(student(test_tensor.float().to(device)).view(-1, category), category)
                pred_test = torch.max(test_out, 1)[1].data.cpu().numpy()
                test_loss = loss_fun(test_out.to(device), test_label.to(device))
                del test_out, U1_out, U2_out, U3_out
                acc_score, recall_score, precision_score, specificity_score, f1_score \
                    = score(confusion_matrix(test_label.view(-1).detach().numpy(), pred_test, category))
                # 记录最优参数
                if best_acc_score < acc_score:
                    best_acc_score, best_recall_score, best_precision_score, best_specificity_score, best_f1_score = \
                        acc_score, recall_score, precision_score, specificity_score, f1_score
                    best_tmp[0], best_tmp[1], best_tmp[2], best_tmp[3], best_tmp[
                        4] = acc_score, recall_score, precision_score, specificity_score, f1_score

                logger_.info(
                    "epoch:%3s batch:%3s | train_loss: %s | test_loss: %.3f | acc_score: \033[1;30;41m %.3f \033[0m |" \
                    " recall_score: %.3f | precision_score: %s | specificity_score: %s | f1_score: %s | " % (
                        epoch, step, str(train_loss.item())[:10].rjust(10), test_loss, acc_score, recall_score,
                        str(precision_score)[:5].rjust(5), str(specificity_score)[:5].rjust(5),
                        str(f1_score)[:5].rjust(5))
                )
            if (epoch + 1) / 3 == 0:
                for param1, param2 in zip(teacher.named_parameters(), student.named_parameters()):
                    if 'weight' in param1[0]:
                        param1[1].data = 0.3 * param2[1].data + 0.7 * param1[1]

            epoch += 1
            torch.cuda.empty_cache()
        avg_acc_score += best_tmp[0]
        avg_recall_score += best_tmp[1]
        avg_precision_score += best_tmp[2]
        avg_specificity_score += best_tmp[3]
        avg_f1_score += best_tmp[4]
    avg = "| acc_score: %.3f |" \
          " recall_score: %.3f | precision_score: %s | specificity_score: %s | f1_score: %s | " % (
              avg_acc_score / 5, avg_recall_score / 5,
              str(avg_precision_score / 5)[:5].rjust(5), str(avg_specificity_score / 5)[:5].rjust(5),
              str(avg_f1_score / 5)[:5].rjust(5))
    best = "| acc_score: %.3f |" \
           " recall_score: %.3f | precision_score: %s | specificity_score: %s | f1_score: %s | " % (
               best_acc_score, best_recall_score, str(best_precision_score)[:5].rjust(5),
               str(best_specificity_score)[:5].rjust(5), str(best_f1_score)[:5].rjust(5))
    print(avg)
    print(best)
    with open(project_path + "/logs/result"+result_time+".txt", "a") as f:
        f.write("*" * 10 + time_ + str_ + ".result" + "*" * 10 + "\n")
        f.write("*" * 10 + "avg result" + "*" * 10 + "\n")
        f.write(avg + "\n")
        f.write("*" * 10 + "best result" + "*" * 10 + "\n")
        f.write(best + "\n")
        f.write("-" * 131 + "\n")
    f.close()