import torch
from MyData import MyDataset
from torch.utils.data import DataLoader
from net import Model
from transformers import BertTokenizer,AdamW

# 定义训练设备
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 加载数据集
token = BertTokenizer.from_pretrained(r"D:\AI\HuggingFace\my-model-cache\bert-base-chinese")
def collate_fn(batch):
    # 填充到同一序列长度
    sentes = [i[0] for i in batch]
    labels = [i[1] for i in batch]
    # 编码
    data = token.batch_encode_plus(batch_text_or_text_pairs=sentes,
                                   truncation=True,
                                   padding='max_length',
                                   max_length=500,
                                   return_tensors='pt',
                                   return_length=True
                                   )
    # input_ids:编码之后的数字
    # attention_mask:是补零的位置是0,其他位置是1
    input_ids = data['input_ids'].to(DEVICE)
    attention_mask = data['attention_mask'].to(DEVICE)
    token_type_ids = data['token_type_ids'].to(DEVICE)
    labels = torch.LongTensor(labels).to(DEVICE)
    # 准备数据
    return input_ids, attention_mask, token_type_ids, labels

# 创建数据集
test_dataset = MyDataset("test")

# 创建数据加载器
test_loader = DataLoader(dataset=test_dataset, 
                          batch_size=32, 
                          shuffle=True,
                          drop_last=True,
                          collate_fn=collate_fn
                          )

if __name__ == "__main__":
    acc = 0
    total =0
    print(DEVICE)
    # 加载模型
    model = Model().to(DEVICE)
    model.load_state_dict(torch.load("params/99bert.pt")) # 加载模型参数
    model.eval() # 测试模式
    for i, (input_ids, attention_mask, token_type_ids, labels) in enumerate(test_loader): # 测试批次
        # 前向传播
        out = model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids) # 模型输出

        # 计算损失
        out = out.argmax(dim=1) # 预测类别
        acc += (out == labels).sum().item() # 计算准确率
        total += len(labels)
    print(acc/total)
            

            