import torch
import torch.nn as nn
from omod import ChineseDependencyParser2, ChinesePosParser
import pickle as pkl
from gensim.models import FastText

def depp(w):
    with open('data/generate_pkl/depvocab.pkl', 'rb') as f:
        word_to_idx, pos_to_idx, deprel_to_idx = pkl.load(f)

    vocab_size = len(word_to_idx) + 1  # +1 用于未知词

    num_labels = len(deprel_to_idx)+1  # +1 用于未知标签

    print(w)

    words = [[word_to_idx.get(idx, 'UNK') for idx in w]]

    print(words)

    #词性预测
    pos_model = ChinesePosParser(vocab_size, len(pos_to_idx)+1)
    pos_model.load_state_dict(torch.load('model/pos_parser.pkl', weights_only=False))
    pos_model.eval()
    pos_scores = pos_model(torch.tensor(words, dtype=torch.long))

    # 获取词性预测
    predicted_pos = torch.argmax(pos_scores, dim=1).tolist()
    # print("词性", predicted_pos)

    idx_to_pos = {v: k for k, v in pos_to_idx.items()}

    # 将索引转换为依存关系标签
    str_pos = [idx_to_pos.get(idx, '?') for idx in predicted_pos]

    # 打印词性
    print("词性", str_pos)

    # 字典索引和词向量索引不一样
    wmodel = FastText.load_fasttext_format("data/word_vectors.bin")
    words2 = [[wmodel.wv.key_to_index[idx] for idx in w]]

    vovect = torch.tensor(wmodel.wv.vectors, dtype=torch.float, requires_grad=False)

    model = ChineseDependencyParser2(vovect, len(pos_to_idx)+1, num_labels)
    model.load_state_dict(torch.load('model/dependency_parser.pkl', weights_only=False))
    model.eval()

    # model.initPre()
    
    

    arc_head_scores, rel_scores = model(torch.LongTensor(words2), #torch.tensor(words2, dtype=torch.long), 
                                                        torch.tensor(predicted_pos, dtype=torch.long))
    print(arc_head_scores, rel_scores)

    # 获取预测结果
    # predicted_heads = torch.argmax(arc_head_scores, dim=1).tolist()
    predicted_deprels = torch.argmax(rel_scores, dim=2).tolist()[0]

    # print("预测的依存头:", predicted_heads)
    print("预测的依存关系:", predicted_deprels)

    # 假设 deprel_to_idx 是训练时使用的依存关系标签表
    # deprel_to_idx = {'nsubj': 1, 'root': 2, 'dobj': 3, 'compound': 4}
    idx_to_deprel = {v: k for k, v in deprel_to_idx.items()}

    # 将索引转换为依存关系标签
    predicted_deprels = [idx_to_deprel.get(idx, '空') for idx in predicted_deprels]

    # 打印结果
    # for i, (word, head, deprel) in enumerate(zip(words, predicted_heads, predicted_deprels)):
    #     print(f"词: {word}, 依存头: {head}, 依存关系: {deprel}")
    # print("依存关系", predicted_deprels)
    return predicted_deprels

if __name__ == "__main__":
    print(depp("你 想 吃 鱼 吗".split()))