import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import pickle as pkl
from tqdm import tqdm
import re
import time

from omod import BiLSTM_Model, target_dict

def cut(test_data):
    # max_len = 32
    # 加载词典
    vocsize = 0
    with open('data/generate_pkl/vocab.pkl', 'rb') as f:
        word2id = pkl.load(f)
        vocsize = len(word2id)
        print()

    model = BiLSTM_Model(vocsize+1, 128, 64)
    model.load_state_dict(torch.load('model/BiLSTM_model.pkl', weights_only=False))
    model.eval()

    # test_data = ["你好世界", "将磁颐国的空调打开"]#["工作人员不让前进","村里有个姑娘叫小芳"]

    
    # with open('data/generate_pkl/vocab.pkl', 'rb') as f:
    #     word2id = pkl.load(f)

    # 将测试数据转换为id表示
    test_data_id = [[word2id[word] if word in word2id else 0 for word in line] for line in test_data]

    # 找到含有0的句子所对应的索引
    index = []
    for i in range(len(test_data_id)):
        if 0 in test_data_id[i]:
            index.append(i)

    for w in test_data_id:
        if len(w)<32:
            for i in range(32-len(w)):
                w.append(0)

    # 去掉索引中对应的句子及其对应的标签
    test_data_id = [test_data_id[i] for i in range(len(test_data_id)) if i not in index]
    # test_result = [test_result[i] for i in range(len(test_result)) if i not in index]

    # 将测试数据和测试结果转换为tensor
    test_data_tensor = torch.tensor(test_data_id)
    # test_result_tensor = torch.tensor(test_result_id)

    # 输入模型进行预测
    output = model(test_data_tensor)

    # 将预测结果转换为numpy数组
    output = output.detach().numpy()

    # 将预测结果转换为标签
    output = np.argmax(output, axis=2)

    # print("原始结果", output)

    # 将预测结果转换为文字
    output = [[list(target_dict.keys())[list(target_dict.values()).index(word)] for word in line] for line in output]

    def splitwords(test_data):
        linesarr = []
        for linei, line in enumerate(test_data):
            words = [ word  for word in line] 
            # print("//", linei, words)
            # print(output[linei])
            linearr = []
            word = []
            for wi in range(len(words)):
                w = output[linei][wi]
                # print(wi, w)
                if w=='N':
                    continue
                elif w=="S":
                    word.append(words[wi])
                    linearr.append("".join(word))
                    word = []
                elif w=="B" or w=="M":
                    word.append(words[wi])
                elif w=="E":
                    word.append(words[wi])
                    linearr.append("".join(word))
                    word = []
            linesarr.append(linearr)
        return linesarr
    ret = []
    #最终结果
    for li in splitwords(test_data):
        ret.append(li)
    return ret

if __name__ == "__main__":
    print(cut(["你好世界", "将磁颐国的空调打开"]))
