import paddle
import paddle.nn as nn
import paddlenlp
from paddlenlp.embeddings import TokenEmbedding
# 加载TokenEmbedding
# TokenEmbedding()参数
# embedding_name 将模型名称以参数形式传入TokenEmbedding，加载对应的模型。默认为w2v.baidu_encyclopedia.target.word-word.dim300的词向量。
# unknown_token 未知token的表示，默认为[UNK]。
# unknown_token_vector 未知token的向量表示，默认生成和embedding维数一致，数值均值为0的正态分布向量。
# extended_vocab_path 扩展词汇列表文件路径，词表格式为一行一个词。如引入扩展词汇列表，trainable=True。
# trainable Embedding层是否可被训练。True表示Embedding可以更新参数，False为不可更新。默认为True。
# 初始化TokenEmbedding， 预训练embedding未下载时会自动下载并加载数据
# token_embedding = TokenEmbedding(embedding_name="w2v.baidu_encyclopedia.target.word-word.dim300")
token_embedding = TokenEmbedding(embedding_name="w2v.wiki.target.word-word.dim300")
# 获取词表中前1000个单词
labels = token_embedding.vocab.to_tokens(list(range(0, 1000)))
# 取出这1000个单词对应的Embedding
test_token_embedding = token_embedding.search(labels)

# 引入VisualDL的LogWriter记录日志
from visualdl import LogWriter

# with LogWriter(logdir='./token_hidi') as writer:
#     writer.add_embeddings(tag='test', mat=[i for i in test_token_embedding], metadata=labels)
class BoWModel(nn.Layer):
    def __init__(self, embedder):
        super().__init__()
        self.embedder = embedder
        emb_dim = self.embedder.embedding_dim
        self.encoder = paddlenlp.seq2vec.BoWEncoder(emb_dim)
        self.cos_sim_func = nn.CosineSimilarity(axis=-1)

    def get_cos_sim(self, text_a, text_b):
        text_a_embedding = self.forward(text_a)
        text_b_embedding = self.forward(text_b)
        cos_sim = self.cos_sim_func(text_a_embedding, text_b_embedding)
        return cos_sim

    def forward(self, text):
        # Shape: (batch_size, num_tokens, embedding_dim)
        embedded_text = self.embedder(text)

        # Shape: (batch_size, embedding_dim)
        summed = self.encoder(embedded_text)

        return summed

def test():
    from data import Tokenizer
    tokenizer = Tokenizer()
    tokenizer.set_vocab(vocab=token_embedding.vocab)
    model = BoWModel(embedder=token_embedding)
    text_pairs = {}
    with open("D:\python_lab\paddle_nlp_之词或者句子相似度计算.zip_pad\paddle_nlp_之词或者句子相似度计算\\text_pair.txt", "r", encoding="utf8") as f:
        for line in f:
            print(line.strip().split("\t"))
            text_a, text_b = line.strip().split("\t")
            if text_a not in text_pairs:
                text_pairs[text_a] = []
            text_pairs[text_a].append(text_b)
    for text_a, text_b_list in text_pairs.items():
        text_a_ids = paddle.to_tensor([tokenizer.text_to_ids(text_a)])

        for text_b in text_b_list:
            text_b_ids = paddle.to_tensor([tokenizer.text_to_ids(text_b)])
            print("text_a: {}".format(text_a))
            print("text_b: {}".format(text_b))
            print("cosine_sim: {}".format(model.get_cos_sim(text_a_ids, text_b_ids).numpy()[0]))
            print()
    # 引入VisualDL的LogWriter记录日志
    from visualdl import LogWriter
    # 获取句子以及其对应的向量
    label_list = []
    embedding_list = []

    for text_a, text_b_list in text_pairs.items():
        #将句子转换为句id
        text_a_ids = paddle.to_tensor([tokenizer.text_to_ids(text_a)])
        #将id输入model 输出句向量
        embedding_list.append(model(text_a_ids).flatten().numpy())
        
        label_list.append(text_a)
        for text_b in text_b_list:
            text_b_ids = paddle.to_tensor([tokenizer.text_to_ids(text_b)])
            embedding_list.append(model(text_b_ids).flatten().numpy())
            label_list.append(text_b)
    with LogWriter(logdir='./sentence_hidi') as writer:
        writer.add_embeddings(tag='test', mat=embedding_list, metadata=label_list)