import paddle
import paddle.nn as nn
import paddlenlp
from paddlenlp.embeddings import TokenEmbedding
import pandas as pd
from juzi_tokenizer import *
import sys
from collections import defaultdict
import torch
from model import BoWModel
from data import Tokenizer
import numpy as np
class SingleFaultClassifar:
    def __init__(self,description,handle_method,tokenizer,model) -> None:
        self.description=description
        self.handle_method=handle_method
        self.tokenizer = tokenizer
        self.model = model
    def embed(self,string):
        if(pd.isnull(string)):
            string =" "
        #将句子转换为句id
        string_ids = paddle.to_tensor([self.tokenizer.text_to_ids(string)])

        #将id输入model 输出句向量
        return self.model(string_ids).flatten().numpy()
    def cosine_distance(self,a, b):
        if a.shape != b.shape:
            raise RuntimeError("array {} shape not match {}".format(a.shape, b.shape))
        if a.ndim==1:
            a_norm = np.linalg.norm(a)
            b_norm = np.linalg.norm(b)
        elif a.ndim==2:
            a_norm = np.linalg.norm(a, axis=1, keepdims=True)
            b_norm = np.linalg.norm(b, axis=1, keepdims=True)
        else:
            raise RuntimeError("array dimensions {} not right".format(a.ndim))
        similiarity = np.dot(a, b.T)/(a_norm * b_norm) 
        dist = 1. - similiarity
        return dist
    def distance(self,embed_a,embed_b):
        return self.cosine_distance(embed_a,embed_b)
class StandaedFaultClassifar:
    def __init__(self,pt_path="AI-classfication\standard_embeddings.pt") -> None:
        self.pt_path = pt_path
        self.dict = torch.load(pt_path)
        self.tokenizer = Tokenizer()
        self.token_embedding = TokenEmbedding(embedding_name="w2v.wiki.target.word-word.dim300")
        self.tokenizer.set_vocab(vocab=token_embedding.vocab)
        self.model = BoWModel(embedder=token_embedding)

    def distance_dict(self,description,handle_method):
        faultObject = SingleFaultClassifar(description,handle_method,self.tokenizer,self.model)
        #计算向量
        embed_desc = faultObject.embed(description)
        embed_handle = faultObject.embed(handle_method)
        #计算与standard的距离
        dis_dict = defaultdict(torch.Tensor)
        max_dis = 0
        maxium_label =""
        for label in self.dict:
            print(label)
            dis_desc = faultObject.distance(self.dict[label][0],embed_desc)
            dis_handle = faultObject.distance(self.dict[label][0],embed_handle)
            dis_dict[label] = 0.7*dis_desc+0.3*dis_handle
            if(dis_dict[label]>max_dis):
                max_dis = dis_dict[label]
                maxium_label=label
        return dis_dict,maxium_label
    def run(self,fault_data,save_path="AI-classfication\classifar_under_standard.xlsx",eps = 0.5):
        for i in range(len(fault_data)):
            fault_desc = fault_data["故障描述"][i]
            fault_handle = fault_data["处理措施"][i]
            
            dis_dict,label = self.distance_dict(fault_desc,fault_handle)

            fault_data.loc[i,"模型预测"]=label
            fault_data.loc[i,"置信度"] = dis_dict[label]
        fault_data.to_excel(save_path,index=False)

if __name__ == '__main__':
    #读取标准句子及其描述
    sub_data = pd.read_excel("AI-classfication\subtext.xlsx")
    # temp = sub_data["故障描述"][1]
    # print(temp)
    # for i in range(len(sub_data)):
    #     fault_desc = sub_data[i]
    #     print(fault_desc)

    test = StandaedFaultClassifar()
    test.run(sub_data)

            


