from transformers import AutoTokenizer, AutoModel
import json
import os
import re

model_path = os.path.join('/', 'usr', 'src', 'models', 'chatglm3-6b')
print(f'model_path is {model_path}', flush=True)

tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
model = AutoModel.from_pretrained(model_path, trust_remote_code=True, device_map='auto')
model = model.eval()

user_prompt = '你是一名高老师，正在批改试卷。每道题满分10分，请根据下面的题目、标准答案、学生答案，对学生答案进行判分和理由。请注意学生答案可能为空。输出格式为：【判分理由】......\n【最终得分】...\n其中,最终得分直接给出这道题的最终分数。最终得分不得超过每道题的满分分数。'

gaokao_file = os.path.join('./bench.json')
gaokao_questions = []
with open(gaokao_file, 'r', encoding='utf-8') as f:
    gaokao_data = json.loads(f.read())
    gaokao_questions = gaokao_data['example']

get_ques_content = lambda x: re.search('[0-9]*\．（ [0-9]*分）([.\S\n ]*)', x).group(1)

for ques_item in gaokao_questions[:3]:
    score = ques_item['score']
    ques = get_ques_content(ques_item['question'])
    standard = ques_item['standard_answer']
    answer = ques_item['model_output']
    model_score = ques_item['model_correction_score'][0]

    chat_input = f'{user_prompt}\n【题目】\n{ques}\n【标准答案】\n{standard}\n【学生答案】\n{answer}\n请严格对照标准答案和学生答案给分。'
    response, history = model.chat(tokenizer, chat_input, history=[])
    print(f"{'-' * 10}{model_score}{'-' * 10}", flush=True)
    print(chat_input, flush=True)
    print(f"{'-' * 10}response:{'-' * 10}", flush=True)
    print(response, flush=True)
    print('\n'*5, flush=True)


