import paddle
import paddle.nn as nn
import paddlenlp
from paddlenlp.embeddings import TokenEmbedding
import pandas as pd
from juzi_tokenizer import *
import sys
from collections import defaultdict
import torch
#读取标准句子
# 加载TokenEmbedding
# TokenEmbedding()参数
# embedding_name 将模型名称以参数形式传入TokenEmbedding，加载对应的模型。默认为w2v.baidu_encyclopedia.target.word-word.dim300的词向量。
# unknown_token 未知token的表示，默认为[UNK]。
# unknown_token_vector 未知token的向量表示，默认生成和embedding维数一致，数值均值为0的正态分布向量。
# extended_vocab_path 扩展词汇列表文件路径，词表格式为一行一个词。如引入扩展词汇列表，trainable=True。
# trainable Embedding层是否可被训练。True表示Embedding可以更新参数，False为不可更新。默认为True。
# 初始化TokenEmbedding， 预训练embedding未下载时会自动下载并加载数据
# token_embedding = TokenEmbedding(embedding_name="w2v.baidu_encyclopedia.target.word-word.dim300")
token_embedding = TokenEmbedding(embedding_name="w2v.wiki.target.word-word.dim300")
# 获取词表中前1000个单词
labels = token_embedding.vocab.to_tokens(list(range(0, 1000)))
# 取出这1000个单词对应的Embedding
test_token_embedding = token_embedding.search(labels)

# 引入VisualDL的LogWriter记录日志
from visualdl import LogWriter

# with LogWriter(logdir='./token_hidi') as writer:
#     writer.add_embeddings(tag='test', mat=[i for i in test_token_embedding], metadata=labels)
class BoWModel(nn.Layer):
    def __init__(self, embedder):
        super().__init__()
        self.embedder = embedder
        emb_dim = self.embedder.embedding_dim
        self.encoder = paddlenlp.seq2vec.BoWEncoder(emb_dim)
        self.cos_sim_func = nn.CosineSimilarity(axis=-1)

    def get_cos_sim(self, text_a, text_b):
        text_a_embedding = self.forward(text_a)
        text_b_embedding = self.forward(text_b)
        cos_sim = self.cos_sim_func(text_a_embedding, text_b_embedding)
        return cos_sim

    def forward(self, text):
        # Shape: (batch_size, num_tokens, embedding_dim)
        embedded_text = self.embedder(text)

        # Shape: (batch_size, embedding_dim)
        summed = self.encoder(embedded_text)

        return summed
from data import Tokenizer
#加载分词器和模型
tokenizer = Tokenizer()
tokenizer.set_vocab(vocab=token_embedding.vocab)
model = BoWModel(embedder=token_embedding)
# text_pairs = {}

#读取标准句子及其描述
sub_data = pd.read_excel("AI-classfication\subtext.xlsx")
sub_labels = sub_data["故障分类"]
sub_descriptions = sub_data["故障描述"]
sub_repairs = sub_data["处理措施"]
sub_embeddings = defaultdict(list)
#转换故障描述句向量
for i in range(len(sub_data)):
    print("正在编码故障描述句向量")
    sub_description = sub_descriptions.loc[i]
    sub_label = sub_labels.loc[i]
    if(pd.isnull(sub_description)):
        sub_description =" "
    #将句子转换为句id
    sub_description_ids = paddle.to_tensor([tokenizer.text_to_ids(sub_description)])
    #将id输入model 输出句向量
    sub_embeddings[sub_label].append(model(sub_description_ids).flatten().numpy())
#转换处理措施句向量
for i in range(len(sub_data)):
    print("正在编码处理措施句向量")
    sub_repair = sub_repairs.loc[i]
    sub_label = sub_labels.loc[i]
    if(pd.isnull(sub_repair)):
        sub_repair =" "
    #将句子转换为句id
    sub_repair_ids = paddle.to_tensor([tokenizer.text_to_ids(sub_repair)])
    #将id输入model 输出句向量
    sub_embeddings[sub_label].append(model(sub_repair_ids).flatten().numpy())
# 引入VisualDL的LogWriter记录日志
save_path="AI-classfication\sub_embeddings.pt"
torch.save(sub_embeddings, save_path)
# print(sub_embeddings)
# from visualdl import LogWriter

# with LogWriter(logdir='./sentence_hidi') as writer:
#     writer.add_embeddings(tag='test', mat=embedding_list, metadata=label_list)
if __name__ == '__main__':
    pass