from transformers import BertTokenizer, BertForSequenceClassification
import torch
import chardet

# 加载模型和分词器
model_path = "C:/Users/86182/Desktop/sqsx_lqbz/model_download"
model = BertForSequenceClassification.from_pretrained(model_path)
tokenizer = BertTokenizer.from_pretrained(model_path)


# 读取主观题文档
def read_document(file_path):
    encodings = ['utf-8', 'gbk', 'latin1', 'utf-16']  # 常见编码格式列表
    for encoding in encodings:
        try:
            with open(file_path, 'r', encoding=encoding) as file:
                return file.read()
        except (UnicodeDecodeError, IOError):
            continue  # 如果读取失败，尝试下一个编码
    raise ValueError(f"无法使用任何已知编码格式读取文件: {file_path}")


# 预处理文本（例如去除多余的空格）
def preprocess_text(text):
    return text.strip()


# 对文本进行预测
def predict_text(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
    with torch.no_grad():
        outputs = model(**inputs)
    predictions = torch.argmax(outputs.logits, dim=-1)
    return predictions.item()


# 主程序
if __name__ == "__main__":
    # 指定文档路径
    document_path = "C:/Users/86182/Documents/T5大模型/主观题训练题库 1w/主观/zhuguan.txt"

    # 读取和预处理文档
    text = read_document(document_path)
    preprocessed_text = preprocess_text(text)

    # 进行预测
    prediction = predict_text(preprocessed_text)

    # 输出预测结果
    print(f"预测类别: {prediction}")
