#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time    : 2020/12/10 13:06
# @Author  : lxy
import numpy as np
import torch
import torch.utils.data as Data
import logging
import time


import Architecture


device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
Batch_size = 32
Next_Inset = 3
if __name__ == "__main__":
    lr = 0.0001
    best_dev_loss = 10
    best_params = None
    origintensor,labeltensor,sentencetensor,labeldevtensor = Loader()
    train_set = Data.TensorDataset(origintensor, labeltensor)
    train_loader = Data.DataLoader(
        dataset=train_set,
        batch_size=Batch_size,
        shuffle=True,
        num_workers=4,
        drop_last=False,
        pin_memory=False
    )
    dev_set = Data.TensorDataset(sentencetensor,labeldevtensor)
    dev_loader = Data.DataLoader(
        dataset=dev_set,
        batch_size=Batch_size,
        shuffle=True,
        num_workers=4,
        drop_last=False,
        pin_memory=False
    )
    del sentencetensor,labeldevtensor
    cnn = Architecture.TextCNN2(3, 4)
    loss_fun = torch.nn.CrossEntropyLoss()
    checkout = torch.load("./vec2tensor/trained_state.npy")
    cnn.load_state_dict(checkout["net"])
    cnn.to(device)
    cnn.train()
    time = time.strftime('%Y_%m_%d_%H_%M_%S',time.localtime(time.time()))
    logging.basicConfig(level=logging.DEBUG,
                        format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
                        datefmt='%Y-%m-%d/%H:%M:%S',
                        filename='./logs/train_'+time+".log",
                        filemode='w')
    optimizer = torch.optim.Adam(cnn.parameters(),lr=lr,weight_decay=1e-5)
    # lr_schedule = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=500, verbose=True, threshold=0.0001, threshold_mode='rel', cooldown=0, min_lr=0, eps=1e-08)
    # lr_schedule = torch.optim.lr_scheduler.MultiStepLR(optimizer,milestones=mile,gamma=0.1)
    # lr_schedule = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,100, eta_min=0, last_epoch=-1)
    lr_schedule = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer,T_0=2, T_mult=2, eta_min=0, last_epoch=-1)
    print(torch.cuda.get_device_name(0))
    epoch = 0
    train_tensor = origintensor
    train_label = labeltensor
    dx,ex = 0,0
    # while optimizer.param_groups[0]['lr'] > 1e-7:
    while epoch < 2000:
    # SL
        for step,(batch_x,batch_y) in enumerate(train_loader):
            cnn.zero_grad()
            out = cnn(batch_x.to(device))
            out = out.view(-1, out.shape[2])
            pred_train = torch.max(out,1)[1].data.cpu().numpy()
            # print("batch_y\n", batch_y.view(-1).cpu().detach().numpy(), "\npred\n", pred)
            batch_y = batch_y.view(-1)
            train_loss = loss_fun(out.to(device),batch_y.to(device))
            test_tensor,test_label = load_test()
            cnn.eval()
            test_out = cnn(test_tensor.to(device))
            test_out = test_out.view(-1,test_out.shape[2])
            ex = EX(test_out,test_label)
            pred_test = torch.max(test_out,1)[1].data.cpu().numpy()
            test_label = test_label.view(-1)
            acc_train = Acc(pred_train, batch_y.detach().numpy())
            acc_test = Acc(pred_test,test_label.detach().numpy())
            test_loss = loss_fun(test_out.to(device),test_label.to(device))
            if best_dev_loss > test_loss:
                best_params = {'net': cnn.state_dict(),
                               'optimizer': optimizer.state_dict(),
                               'epoch': epoch,
                               "lr": lr_schedule.state_dict(),
                               'loss': test_loss, 'step': step}
            torch.save(best_params, "vec2tensor-3D/trained_state.npy")
            print("epoch:%6s batch:%6s | train_loss: %.6f | test_loss: %.6f | acc_train: %.3f | acc_test: %.3f | %f"%(epoch,step,train_loss,test_loss,acc_train,acc_test,optimizer.param_groups[0]['lr']))
            logging.info("epoch:%6s batch:%6s | train_loss: %.6f | test_loss: %.6f | acc_train: %.3f | acc_test: %.3f "%(epoch,step,train_loss,test_loss,acc_train,acc_test))
            cnn.train()
            train_loss.backward()
            optimizer.step()
            # lr_schedule.step(test_loss)
            lr_schedule.step()
        torch.cuda.empty_cache()
        # test

    # USL

        # if (epoch+1) == Next_Inset :
        if (epoch+1)%1 == 0:
            checkout = torch.load("./vec2tensor/trained_state.npy")
            cnn.load_state_dict(checkout["net"])
            cnn.to(device)
            cnn.eval()
        # 空tensor用以拼接tensor    dev_tensor_sub是拼接裁剪后的tensor add_to_train_tensor用以拼接select出来的tensor
            # dev_tensor_sub用以拼接每批置信度高的张量
            dev_tensor_sub = torch.empty([0, 30, 100])
            # dev_label_sub 用以拼接每批置信度高的真实标签
            dev_label_sub = torch.empty([0,1,1])
            openFlag = False
            if hasattr(torch.cuda, 'empty_cache'):
                torch.cuda.empty_cache()
            for step, (batch_x, batch_y) in enumerate(dev_loader):
                out = cnn(batch_x.to(device))
                out = out.view(-1, out.shape[2])
                pred = torch.max(out, 1)[1].data.cpu().numpy()
                # 获取需要加入到train_set的索引 remove_index
                remove_index = get_remove_index(out,batch_y,ex,len(train_set))
                if remove_index != []:
                    openFlag = True
                else:
                    openFlag = False
                torch.cuda.empty_cache()
                # remve_index int化
                remove_index_list = list(map(int, remove_index))
                del remove_index

                # 构造新dev_tensor作为dev_set的张量 将当前批删除后的张量拼接到一个张量中
                mask1 = torch.ones([batch_x.shape[0], 30, 100], dtype=bool)
                mask2 = torch.ones([batch_x.shape[0], 1, 1], dtype=bool)
                for i in remove_index_list:
                    mask1[i] = torch.zeros([30, 100], dtype=bool)
                    mask2[i] = torch.zeros([1, 1], dtype=bool)
                mask1 = mask1.detach()
                mask2 = mask2.detach()
                # dev_tensor_sub dev_label_set 为一批中需要保留的数据
                dev_tensor_sub = torch.cat((torch.masked_select(batch_x,mask1).view([-1,30,100]).cpu().detach(),dev_tensor_sub.cpu().detach())).cpu().detach()
                dev_label_sub = torch.cat((torch.masked_select(batch_y,mask2).view([-1,1,1]).cpu().detach(),dev_label_sub.cpu().detach())).cpu().detach()
                del mask1,mask2
                torch.cuda.empty_cache()
                # remove_index tensor化
                remove_index = torch.tensor(remove_index_list,dtype=torch.int64).cpu().detach()
                # print("abc:",remove_index.view(-1).detach().numpy())
                del remove_index_list
                torch.cuda.empty_cache()

                # 构造新remove_tensor做为train_set的张量
                remove_tensor = torch.index_select(batch_x.cpu().detach(),0,remove_index)
                train_tensor = torch.cat((train_tensor,remove_tensor),0)
                # dummy_tar 伪标签
                dummy_tag = torch.index_select(torch.from_numpy(pred).type(torch.long).view(-1,1,1),0,remove_index)
                train_label = torch.cat((train_label,dummy_tag))
                del remove_index
                torch.cuda.empty_cache()
                del remove_tensor,dummy_tag
                torch.cuda.empty_cache()
                # 将add_to_train_set 加入到train_set 作为新train_set
                del out
                torch.cuda.empty_cache()
            # add_to_train_set = torch.empty([0,30,100])
            # 替换dev_set
            torch.cuda.empty_cache()
            del dev_set,train_set
            dev_set = Data.TensorDataset(dev_tensor_sub.cpu().detach(),dev_label_sub.cpu().detach())
            train_set = Data.TensorDataset(train_tensor,train_label)
            del dev_loader,train_loader
            dev_loader = Data.DataLoader(
                dataset=dev_set,
                batch_size=Batch_size,
                shuffle=True,
                num_workers=4,
                drop_last=False,
                pin_memory=False
            )
            train_loader = Data.DataLoader(
                dataset=train_set,
                batch_size=Batch_size,
                shuffle=True,
                num_workers=4,
                drop_last=False,
                pin_memory=False
            )
            if openFlag:
                Next_Inset += 6
            else:
                Next_Inset += 3
            print(len(train_set.tensors[0]),len(train_set.tensors[1]))
            del dev_tensor_sub,dev_label_sub
            torch.cuda.empty_cache()
            with open ("1.txt","a+") as f:
                f.write("dev_set size: "+str(len(dev_set))+"     train_set size: "+str(len(train_set))+"\n")
            f.close()
            print("剩余未标签数据量 :",len(dev_set))
        torch.cuda.empty_cache()
        cnn.train()
        epoch += 1



