import os
import torch
from transformers import BertTokenizer

import dpcnn

def predict_text(model, tokenizer, text, device, max_len=512):
    model.eval()
    
    # 文本编码
    encoding = tokenizer(
        text,
        max_length=max_len,
        padding='max_length',
        truncation=True,
        return_tensors='pt'
    )

    input_ids = encoding['input_ids'].to(device)
    
    # 推理
    with torch.no_grad():
        outputs = model(input_ids)
        pred = torch.argmax(outputs, dim=1).item()

    label_map = {0: "人工文本", 1: "AI生成文本"}
    return label_map[pred]

def load_model(model, model_path, device):
    if os.path.exists(model_path):
        model.load_state_dict(torch.load(model_path, map_location=device))
        model.to(device)
        print(f"模型加载成功，{model_path}")
    else:
        print("未找到模型，使用未训练模型")

model_save_path = "saved_models/dpcnn_model.pth"

tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')

# 初始化模型
vocab_size = tokenizer.vocab_size
embed_dim = 300
num_classes = 2
num_filters = 250
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 创建DPCNN模型
model = dpcnn.DPCNN(vocab_size, embed_dim, num_classes, num_filters)

# 加载模型
load_model(model, model_save_path, device)

sample_text = "这是一个AI写的人工文本。"
result = predict_text(model, tokenizer, sample_text, device)
print(f"预测结果：{result}")
