import torch
from torch.utils.data import DataLoader
from main import Model, TextDataset, dev_eval  # Replace 'your_model_file' with the actual filename
import pickle as pkl
from tqdm import tqdm
import numpy as np
# 加载模型
model = Model()
model.load_state_dict(torch.load('./saved_dict/lstm.ckpt'))  # 修改为您保存的模型文件路径
model.eval()
embedding_pretrained = \
    torch.tensor(
    np.load(
        './data/embedding_Tencent.npz')
    ["embeddings"].astype('float32'))
                                            # 预训练词向量
embed = embedding_pretrained.size(1)        # 词向量维度
dropout = 0.5                               # 随机丢弃
num_classes = 2                             # 类别数
num_epochs = 30                             # epoch数
batch_size = 128                            # mini-batch大小
pad_size = 50                               # 每句话处理成的长度(短填长切)
learning_rate = 1e-3                        # 学习率
hidden_size = 128                           # lstm隐藏层
num_layers = 2                              # lstm层数
MAX_VOCAB_SIZE = 10000                      # 词表长度限制
UNK, PAD = '<UNK>', '<PAD>'                 # 未知字，padding符号
tokenizer = lambda x: [y for y in x]  # 字级别
# 加载数据
# 这里的数据加载部分应该与训练时的数据预处理一致
# 请根据实际情况调整以下代码
# 例如，您可能需要重新定义数据加载和预处理部分

# 示例数据加载和预处理代码
def load_and_preprocess_data():
    # 加载词表等必要信息
    vocab = pkl.load(open('./data/vocab.pkl', 'rb'))

    # 加载测试数据
    test_data = load_test_data('./data/test_data.txt', pad_size, tokenizer, vocab)

    # 创建数据加载器
    test_dataloader = DataLoader(TextDataset(test_data), batch_size, shuffle=False)

    return test_dataloader

# 示例测试数据加载函数
def load_test_data(path, pad_size, tokenizer, vocab):
    contents = []
    with open(path, 'r', encoding='utf-8') as f:
        for line in tqdm(f):
            lin = line.strip()
            if not lin:
                continue
            label, content = lin.split('	####	')
            words_line = []
            token = tokenizer(content)
            seq_len = len(token)
            if pad_size:
                if len(token) < pad_size:
                    token.extend([vocab.get(PAD)] * (pad_size - len(token)))
                else:
                    token = token[:pad_size]
                    seq_len = pad_size
            for word in token:
                words_line.append(vocab.get(word, vocab.get(UNK)))
            contents.append((words_line, int(label)))
    return contents

# 调用数据加载函数
test_dataloader = load_and_preprocess_data()

# 进行推断
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
model.to(device)
acc, loss = dev_eval(model, test_dataloader, torch.nn.CrossEntropyLoss(), Result_test=True)
print(f'Test Accuracy: {acc * 100:.2f}%, Test Loss: {loss:.4f}')
