#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time    : 2021/3/13 15:58
# @Author  : lxy
import csv
import sys, os
import random

from torch.autograd import Variable

sys.path.append(os.pardir)
import platform
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch.utils.data import Dataset, DataLoader
Batch_size =32
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Final_loss是有标签数据集的交叉熵损失加上无标签数据集的一致性损失。损失函数见置信度.doc

def sharpen(OutTensor,num):
    """
    锐化函数
    :param OutTensor: 张量
    :param num: 类别数
    :return: 锐化后的张量
    """
    tensorPow = OutTensor.pow(5).to(device)
    '''构造n向量'''
    n = torch.ones(num,1).float().to(device)
    tensorSum = tensorPow.mm(n).to(device)
    return tensorPow/tensorSum

class Final_Loss(nn.Module):
    def __init__(self):
        super().__init__()
    def forward(self,Lx,Ly,U1=None,U2=None,U3=None,beta=None):
        first_loss = F.cross_entropy(input=Lx.float(), target=Ly.view(-1).long())
        if U1 != None:
            one_hot_mix = beta * U1 + (1 - beta) * U2
            second_loss = F.cross_entropy(one_hot_mix,torch.max(U3, 1)[1].data)
            return (first_loss+100*second_loss).float()
        else:
            return first_loss

# 自定义三元数据集 继承自Dataset 需要重写以下三个方法
class Truple_Dataset(Dataset):
    def __init__(self,U1,U2,U3):
        super().__init__()
        self.o1 = U1
        self.o2 = U2
        self.o3 = U3
        self.U1 = U1
        self.U2 = U2
        self.U3 = U3

    def __getitem__(self, item):
        return (self.U1[item],self.U2[item],self.U3[item])


    def __len__(self):
        return self.U1.shape[0]

    def train(self,i):
        tmp_x = torch.cat((self.o1[:int((i % 5) * len(self.o1) / 5)] ,self.o1[int((i % 5 + 1) * len(self.o1) / 5):]),0)
        tmp_y = torch.cat((self.o2[:int((i % 5) * len(self.o2) / 5)] ,self.o2[int((i % 5 + 1) * len(self.o2) / 5):]),0)
        tmp_embeding = torch.cat((self.o3[:int((i % 5) * len(self.o3) / 5)] ,self.o3[int((i % 5 + 1) * len(self.o3) / 5):]),0)
        self.U1 = tmp_x
        self.U2 = tmp_y
        self.U3 = tmp_embeding
        # print("train",self.U1.shape[0],self.U2.shape[0],self.U3.shape[0])
        return self.U1.shape[0]
        del tmp_x,tmp_y,tmp_embeding


    def test(self,i):
        U1_ = self.o1[int((i % 5) * len(self.o1) / 5):int((i % 5 + 1) * len(self.o1) / 5)]
        U2_ = self.o2[int((i % 5) * len(self.o2) / 5):int((i % 5 + 1) * len(self.o2) / 5)]
        U3_ = self.o3[int((i % 5) * len(self.o3) / 5):int((i % 5 + 1) * len(self.o3) / 5)]
        # print("test",U1_.shape[0], U2_.shape[0], U3_.shape[0])
        return U1_,U2_,U3_





class Five_Dataset(Dataset):
    def __init__(self,U1,U2,U3,U4,U5):
        super().__init__()
        self.U1 = U1
        self.U2 = U2
        self.U3 = U3
        self.U4 = U4
        self.U5 = U5

    def __getitem__(self, item):
        return (self.U1[item],self.U2[item],self.U3[item],self.U4[item],self.U5[item])


    def __len__(self):
        return self.U1.shape[0]

# 打乱无标签数据集
def shuffer(project_path,beta=0.9,aug = False):
    tensor = np.load(project_path+"/Processed/dev/tensor.npy",allow_pickle=True)
    embedding = np.load(project_path+"/Processed/dev/num.npy", allow_pickle=True).astype(int)
    tensor = torch.from_numpy(tensor).float()
    tensor = torch.cat((tensor[0],tensor[1],tensor[2],tensor[3]),0)
    # print(tensor.shape)
    embedding = torch.from_numpy(embedding)
    embedding = torch.cat((embedding[0],embedding[1],embedding[2],embedding[3]),0)
    # print(label.shape)
    tensor,  embedding = test_shuffle(tensor, embedding)
    tensor1 = tensor[::2]
    tensor2 = tensor[1::2]
    aug = beta*tensor1+(1-beta)*tensor2
    embedding1 = embedding[::2]
    embedding2 = embedding[1::2]
    return tensor1,tensor2,embedding1,embedding2,aug


def Loader_test(model,project_path):
    train_x = np.load(project_path+"/Processed/"+model+"/tensor.npy",allow_pickle=True).astype(float)
    auto_embedding = np.load(project_path+"/Processed/"+model+"/num.npy",allow_pickle=True).astype(int)
    # print(auto_embedding.shape)
    train_y = np.load(project_path+"/Processed/"+model+"/label.npy",allow_pickle=True).astype(int)
    tensor = torch.from_numpy(train_x)
    # print(tensor.shape)
    embedding = torch.from_numpy(auto_embedding)
    # print(auto_embedding.shape)
    auto_embedding = torch.cat((embedding[0],embedding[1],embedding[2],embedding[3]),0).long()
    origintensor = torch.cat((tensor[0],tensor[1],tensor[2],tensor[3]),0)
    label = torch.from_numpy(train_y)
    labeltensor = torch.cat((label[0],label[1],label[2],label[3]),0).long()
    del tensor,embedding,label
    labeltensor = torch.unsqueeze(labeltensor, 1)
    labeltensor = torch.unsqueeze(labeltensor, 1)
    U1_tensor, U2_tensor, U3_tensor = test_shuffle(origintensor,labeltensor,auto_embedding)
    return U1_tensor,U3_tensor,U2_tensor

def get_remove_index(out,batch_y,Ex,train_set_len,project_path):
    if train_set_len > 4000:
        flag = 1e-8
    else :
        flag = 0.1*np.power(0.1,(train_set_len-1500)//1000+2)
    counter = [0,0,0,0]
    pred = torch.max(out, 1)[1].data.cpu().numpy().astype(np.int)
    out = out.cpu().detach().numpy()
    cnt = 0
    remove_index = []
    Dx = [[[],[],[],[]] for i in range(Batch_size)]
    for item in out:
        min = 4
        min_num = 4
        for i in range(4):
            Dx[cnt][i] = abs(item[i]*(i+1) - Ex[i])
            if Dx[cnt][i] < min_num:
                min = i
                min_num = Dx[cnt][i]
        if min == pred[cnt] and min_num <=flag:
            if counter[min] < 5:
                counter[min] += 1
                with open(project_path+"/置信度.txt", "a+") as f:
                    str = "%15f | %3d | %3d | \n" % (min_num, min, batch_y[cnt].cpu().detach().numpy())
                    f.write(str)
                f.close()
                remove_index.append(cnt)
            else:
                continue
        cnt += 1
    return remove_index

def EX(test_out,batch_y):
    pred_test = torch.max(test_out, 1)[1].data.cpu().numpy()
    true_counter = [0, 0, 0, 0]
    sum = [1000,1000,1000,1000]
    cnt =0
    flag = [False,False,False,False]
    for pred, real in zip(pred_test, batch_y):
        if pred == real and flag[real] == False:
            flag[real] = True
            sum[real] = 0
            ex2 = (test_out[cnt][real] * (real+1)).cpu().detach().numpy()
            true_counter[real] += 1
            sum[real] = 0
            sum[real] += ex2
        cnt += 1
    ex = [0, 0, 0, 0]
    for i in range(4):
        if true_counter[i] != 0:
            ex[i] = sum[i] / true_counter[i]
    return ex

# 计算精确度
def Acc(prediciton,label):
    acc = float((prediciton == label).astype(int).sum())/float(label.shape[0])
    return acc

def Loader(model,project_path):
    train_x = np.load(project_path+"/Processed/"+model+"/tensor.npy",allow_pickle=True).astype(float)
    auto_embedding = np.load(project_path+"/Processed/"+model+"/num.npy",allow_pickle=True).astype(int)
    # print(auto_embedding.shape)
    train_y = np.load(project_path+"/Processed/"+model+"/label.npy",allow_pickle=True).astype(int)
    tensor = torch.from_numpy(train_x)
    # print(tensor.shape)
    embedding = torch.from_numpy(auto_embedding)
    # print(auto_embedding.shape)
    auto_embedding = torch.cat((embedding[0],embedding[1],embedding[2],embedding[3]),0).long()
    aug_tensor = 0.9 * auto_embedding[::2] + 0.1 * auto_embedding[1::2]
    auto_embedding = torch.cat((auto_embedding, aug_tensor), dim=0)
    origintensor = torch.cat((tensor[0],tensor[1],tensor[2],tensor[3]),0)
    aug_tensor = 0.9 * origintensor[::2]+0.1* origintensor[1::2]
    origintensor = torch.cat((origintensor,aug_tensor),dim=0)
    label = torch.from_numpy(train_y)
    labeltensor = torch.cat((label[0],label[1],label[2],label[3]),0).long()
    aug_tensor = labeltensor[::2]
    labeltensor = torch.cat((labeltensor,aug_tensor),dim=0)
    del tensor,embedding,label,aug_tensor
    labeltensor = torch.unsqueeze(labeltensor, 1)
    labeltensor = torch.unsqueeze(labeltensor, 1)
    origintensor,labeltensor,auto_embedding = test_shuffle(origintensor,labeltensor,auto_embedding)
    return origintensor,auto_embedding,labeltensor



class Word_Dict_Mapping():
    def __init__(self,*args):
        # 文本 字典 单词字典序
        if len(args) == 0:
            self.dict = {}
            self.len = 0
        else:
            self.passages = args[0]
            self.dict = set()
            self.len = 0


    def  Create_Dict(self):
        for i in range(4):
            for item in self.passages[i]:
                self.dict = self.dict | set(item)
        self.dict = list(self.dict)
        self.len = len(self.dict)
        orders = [j+1 for j in range(len(self.dict))]
        self.dict = dict(zip(self.dict,orders))

    def loader_from_csv(self,csv_name,project_path):
        dir_path = project_path+"/word_dict/"
        if os.path.exists(dir_path):
            print(dir_path + " is existed")
            pass
        else:
            os.makedirs(dir_path)
            print("mkdir " + dir_path)
        if (platform.system() == 'Windows'):
            f = open("D:\\word_dict\\" + csv_name + ".csv", "r", encoding="utf8", newline="")
        else:
            f = open(dir_path + csv_name + ".csv", "r", encoding="utf8", newline="")
        csv_reader = csv.reader(f)
        cnt = 0
        for row in csv_reader:
            cnt += 1
            self.dict.setdefault(row[0],[]).append(int(row[1]))
        self.len = cnt
        f.close()

def load_passages(name,project_path):
    # 需要再考虑一下，返回值与存储拆分成两个函数
    category = 4
    if (platform.system() == 'Windows'):
        csv_name = "D:\\DataSet\\"+name+".csv"
    else:
        csv_name = project_path+"/DataSet/"+name+".csv"
    f = open(csv_name, 'r', encoding='utf-8')
    csv_reader = csv.reader(f)
    sentences = [[] for i in range(category)]
    for row in csv_reader:
        sentences[int(row[0])].append(row[1:])
    f.close()
    wd = Word_Dict_Mapping(sentences)
    wd.Create_Dict()
    print(wd.len)
    print("constructed word dictionary")
    dir_path = project_path + "/word_dict/"
    if os.path.exists(dir_path):
        print(dir_path + " is existed")
        pass
    else:
        os.makedirs(dir_path)
        print("mkdir " + dir_path)
    if (platform.system() == 'Windows'):
        f = open("D:\\word_dict\\"+name+".csv","w",encoding="utf8",newline="")
    else:
        f = open(dir_path+name+".csv","w",encoding="utf8",newline="")
    csv_writer = csv.writer(f)
    for key,value in wd.dict.items():
        print(key,value)
        csv_writer.writerow([key,str(value)])
    f.close()
    print("word dictionary saved")

def Create_Vector(project_path):
    passages,word_dict = load_passages("THUCNews",project_path)
    key_values = [[[] for j in range(len(passages[i]))] for i in range(4)]
    for i in range(4):
        idx = 0
        for j in passages[i]:
            cnt = 0
            for w in j:
                if cnt < 50:
                    key_values[i][idx].append(word_dict.dict[w])
                    cnt += 1
                else:
                    break
            idx += 1
    return key_values,word_dict.len

def test_shuffle(tensor,label,embeding = None):
    index = [i for i in range(label.shape[0])]
    random.shuffle(index)
    label = label[index]
    tensor = tensor[index]
    if embeding != None:
        embeding = embeding[index]
        return tensor,label,embeding
    else:
        return tensor,label

class Regularization(torch.nn.Module):
    def __init__(self, model, weight_decay, p=2):
        '''
        :param model 模型
        :param weight_decay:正则化参数
        :param p: 范数计算中的幂指数值，默认求2范数,
                  当p=0为L2正则化,p=1为L1正则化
        '''
        super(Regularization, self).__init__()
        if weight_decay <= 0:
            print("param weight_decay can not <=0")
            exit(0)
        self.model = model
        self.weight_decay = weight_decay
        self.p = p
        self.weight_list = self.get_weight(model)

    def to(self, device):
        '''
        指定运行模式
        :param device: cude or cpu
        :return:
        '''
        self.device = device
        super().to(device)
        return self

    def forward(self, model):
        self.weight_list = self.get_weight(model)  # 获得最新的权重
        reg_loss = self.regularization_loss(self.weight_list, self.weight_decay, p=self.p)
        return reg_loss

    def get_weight(self, model):
        '''
        获得模型的权重列表
        :param model:
        :return:
        '''
        weight_list = []
        for name, param in model.named_parameters():
            if 'weight' in name:
                weight = (name, param)
                weight_list.append(weight)
        return weight_list

    def regularization_loss(self, weight_list, weight_decay, p=2):
        '''
        计算张量范数
        :param weight_list:
        :param p: 范数计算中的幂指数值，默认求2范数
        :param weight_decay:
        :return:
        '''
        # weight_decay=Variable(torch.FloatTensor([weight_decay]).to(self.device),requires_grad=True)
        # reg_loss=Variable(torch.FloatTensor([0.]).to(self.device),requires_grad=True)
        # weight_decay=torch.FloatTensor([weight_decay]).to(self.device)
        # reg_loss=torch.FloatTensor([0.]).to(self.device)
        reg_loss = 0
        for name, w in weight_list:
            l2_reg = torch.norm(w, p=p)
            reg_loss = reg_loss + l2_reg

        reg_loss = weight_decay * reg_loss
        return reg_loss


def confusion_matrix(real,pred,num):
    '''
    计算混肴矩阵
    :param real: 真实值
    :param pred: 预测值
    :param num: 类别数
    :return: 混肴矩阵
    '''
    matrix = []*num
    for i in range(num):
        cnt = [0]*num
        for r,p in zip(real,pred):
            if p != i :
                continue
            if p == r :
                cnt[i] += 1
            else:
                cnt[r] += 1
            # print(r, p, cnt)
        matrix.append(cnt)
        # print("-" * 30)
    matrix = np.mat(matrix).reshape(num,num)
    return matrix

def specificity(matrix):
    '''
    计算准确率
    :param matrix: 混肴矩阵
    :return: 准确率向量
    '''
    matrixT = matrix.T
    recall_score = []
    for i in range(matrix.shape[0]):
        if i == 0:
            col = matrixT[1:]
        elif i == matrix.shape[0]-1:
            col = matrixT[:-1]
        else :
            col = np.vstack((matrixT[0:i],matrixT[i+1:]))
        n = np.mat([[1]*(matrix.shape[0]-1)]).T
        m = np.mat([[1]*(matrix.shape[0])])
        all_f = m.dot(col.T).dot(n)
        m[0,i] = 0
        select = m.T
        all_t = matrix[i].dot(select)
        recall_i = (all_f-all_t)/all_f
        recall_score.append(recall_i.item())
    return np.mat(recall_score)

def score(matrix):
    '''
    计算各种指标
    :param matrix: 样本混肴矩阵
    :return: 精确度,召回率,精准率,准确率,f1
    '''
    trace = matrix.trace()
    diag = matrix.diagonal()
    n = np.mat([1]*matrix.shape[0],dtype=np.float)
    row = n*matrix
    col = matrix*n.T
    acc_score = trace/(n*matrix*n.T)
    recall_score = (diag/row)*n.T/matrix.shape[0]
    precision_score = (diag/col.T)*n.T/matrix.shape[0]
    specificity_score = specificity(matrix)*n.T/matrix.shape[0]
    f1_score = 2*((diag/col.T).T*(diag/row)).diagonal()/((diag/col.T)+(diag/row))*n.T/matrix.shape[0]
    return acc_score.item(),recall_score.item(),precision_score.item(),specificity_score.item(),f1_score.item()

