#模型使用接口（主观评估）
import torch
from my_data import MyDataset
from torch.utils.data import DataLoader
from net import Model
from transformers import BertTokenizer,AdamW

# 定义设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)

# 定义训练的轮次
EPOCH = 30000

# 加载字典和分词器
token = BertTokenizer.from_pretrained(r"D://")
model = Model().to(device)
names = ["负向评价","正向评价"]

# 加载数据集
train_data = MyDataset(r"D://")
train_loader = DataLoader(train_data,batch_size=1,shuffle=True,collate_fn=collate_fn)

# 将传入的数据进行编码
def collate_fn(data):
    sents = []
    sents.append(data)
    # 编码
    data = token.batch_encode_plus(
        batch_text_or_text_pairs=sents,
        truncation=True,
        max_length=512,
        # 一律补0到max_length
        padding="max_length",
        # 取值tf(tensor flow), pt(pytorch), np(numpy),默认为list
        return_tensors="pt",
        # 返回序列长度
        return_length=True
    )

    input_ids = data["input_ids"]
    attention_mask = data["attention_mask"]
    token_type_ids = data["token_type_ids"]
    return input_ids,attention_mask,token_type_ids

def test():
    # 加载模型训练参数
    model.load_state_dict(torch.load("params/16_bert_model.pth"))
    # 开启模型测试
    model.eval()

    while True:
        text = input("请输入：")
        if text == "q":
            print("测试结束")
            break
        # 编码
        input_ids, attention_mask, token_type_ids = collate_fn(text)
        input_ids, attention_mask, token_type_ids = input_ids.to(device),attention_mask.to(device),token_type_ids.to(device)

        # 将数据输入到模型，得到输出
        with torch.no_grad():
            output = model(input_ids,attention_mask,token_type_ids)
            # 取出概率最大的下标
            out = output.argmax(dim=1)
            print("模型判定：", names[out],"\n")

if __name__ == '__main__':
    test()