#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time    : 2020/12/10 13:06
# @Author  : lxy
import os

import torch
import torch.utils.data as Data
import logging
import time
import Architecture
from Utils import Loader, Acc, Truple_Dataset, Loader_test, score, confusion_matrix, Final_Loss

device = device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

Batch_size = 64
def TextCNN_SL_aug_train(epoch_,dataset,category,word_num,T_0,project_path,single,logger_,k,result_time):
    lr = 0.001
    origintensor, auto_embedding, labeltensor = Loader("train",project_path)
    set = Truple_Dataset(origintensor,auto_embedding, labeltensor)
    train_loader = Data.DataLoader(
        dataset=set,
        batch_size=Batch_size,
        shuffle=True,
        drop_last=True,
        pin_memory=False
    )

    model_name = "TextCNN"
    model_type = "_SL_aug"
    model_single = single
    model_lr_schedule = "_cosine"
    str_ = model_name + model_type + str(model_single) + model_lr_schedule + "_" + result_time[11:]
    time_ = project_path + '/logs/' + dataset + "/" + result_time[:10] + "/"
    print(time_+str_)
    if os.path.exists(time_):
        pass
    else:
        os.makedirs(time_)
    fh = logging.FileHandler(time_ + str_ + ".log")
    fh.setLevel(logging.DEBUG)
    ch = logging.StreamHandler()
    ch.setLevel(logging.DEBUG)
    formater = logging.Formatter('%(message)s')
    fh.setFormatter(formater)
    ch.setFormatter(formater)
    logger_.addHandler(fh)
    logger_.addHandler(ch)
    best_acc_score, best_recall_score, best_precision_score, best_specificity_score, best_f1_score = 0, 0, 0, 0, 0
    avg_acc_score, avg_recall_score, avg_precision_score, avg_specificity_score, avg_f1_score = 0, 0, 0, 0, 0
    loss_fun = Final_Loss()
    best_tmp = [0, 0, 0, 0, 0]
    for i in range(k):
        print("*"*25,"第",i,"次交叉验证","*"*25)
        test_tensor, numnp , test_label= set.test(i)
        len_ = set.train(i)
        epoch = 0
        # cnn = resnet.resnet32(category)
        cnn = Architecture.TextCNN(category=category,num_embedding=word_num)
        cnn.to(device)
        cnn.train()
        optimizer = torch.optim.Adam(cnn.parameters(), lr=lr,weight_decay=1e-6)
        # lr_schedule = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=100, verbose=True, threshold=0.0001, threshold_mode='rel', cooldown=0, min_lr=0, eps=1e-08)
        # lr_schedule = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=T_0, T_mult=1, eta_min=0,
        #                                                                    last_epoch=-1)

        while epoch < epoch_:
            for step, (batch_x, auto_embedding, batch_y) in enumerate(train_loader):
                cnn.zero_grad()
                out = cnn(batch_x.to(device),auto_embedding.long().to(device))
                out = out.view(-1, out.shape[2])
                pred_train = torch.max(out, 1)[1].data.cpu().numpy()
                # print("batch_y\n", batch_y.view(-1).cpu().detach().numpy(), "\npred\n", pred_train)
                batch_y = batch_y.view(-1)
                train_loss = loss_fun(out.to(device), batch_y.long().to(device))
                cnn.eval()
                test_out = cnn(test_tensor.to(device),numnp.long().to(device))
                test_out = test_out.view(-1, test_out.shape[2])
                pred_test = torch.max(test_out, 1)[1].data.cpu().numpy()
                # print("test_label\n", test_label.view(-1).cpu().detach().numpy(), "\npred\n", pred_test)
                test_label = test_label.view(-1)
                acc_score, recall_score, precision_score, specificity_score, f1_score \
                    = score(confusion_matrix(test_label.view(-1).detach().numpy(), pred_test, category))
                test_loss = loss_fun(test_out.to(device), test_label.long().to(device))
                if best_acc_score < acc_score:
                    best_params = {'net': cnn.state_dict(),
                                   'optimizer': optimizer.state_dict(),
                                   'epoch': epoch,
                                   'loss': test_loss, 'step': step}
                    best_acc_score, best_recall_score, best_precision_score, best_specificity_score, best_f1_score = \
                        acc_score, recall_score, precision_score, specificity_score, f1_score
                    best_tmp[0], best_tmp[1], best_tmp[2], best_tmp[3], best_tmp[
                        4] = acc_score, recall_score, precision_score, specificity_score, f1_score
                    torch.save(best_params, project_path + "/vec2tensor/Basic_state.npy")
                logger_.info(
                    "epoch:%3s batch:%3s | train_loss: %s | test_loss: %.3f | acc_score:\033[1;30;41m %.3f \033[0m |" \
                    " recall_score: %.3f | precision_score: %s | specificity_score: %s | f1_score: %s | " % (
                        epoch, step, str(train_loss.item())[:10].rjust(10), test_loss, acc_score, recall_score,
                        str(precision_score)[:5].rjust(5), str(specificity_score)[:5].rjust(5),
                        str(f1_score)[:5].rjust(5))
                )
                cnn.train()
                train_loss.backward()
                optimizer.step()
                # lr_schedule.step(test_loss)
                # lr_schedule.step()
            epoch +=1
            torch.cuda.empty_cache()
        avg_acc_score += best_tmp[0]
        avg_recall_score += best_tmp[1]
        avg_precision_score += best_tmp[2]
        avg_specificity_score += best_tmp[3]
        avg_f1_score += best_tmp[4]
    avg = "| acc_score: %.3f |" \
          " recall_score: %.3f | precision_score: %s | specificity_score: %s | f1_score: %s | " % (
              avg_acc_score / 5, avg_recall_score / 5,
              str(avg_precision_score / 5)[:5].rjust(5), str(avg_specificity_score / 5)[:5].rjust(5),
              str(avg_f1_score / 5)[:5].rjust(5))
    best = "| acc_score: %.3f |" \
           " recall_score: %.3f | precision_score: %s | specificity_score: %s | f1_score: %s | " % (
               best_acc_score, best_recall_score, str(best_precision_score)[:5].rjust(5),
               str(best_specificity_score)[:5].rjust(5), str(best_f1_score)[:5].rjust(5))
    print(avg)
    print(best)
    with open(project_path + "/logs/result" + result_time + ".txt", "a") as f:
        f.write("*" * 10 + time_ + str_ + ".result" + "*" * 10 + "\n")
        f.write("*" * 10 + "avg result" + "*" * 10 + "\n")
        f.write(avg + "\n")
        f.write("*" * 10 + "best result" + "*" * 10 + "\n")
        f.write(best + "\n")
        f.write("-" * 131 + "\n")
    f.close()