from flask import request, Blueprint
from sqlalchemy.sql import func
import math
import random
import json
import os
import datetime
import re

from sqlalchemy.sql.expression import func

from ..util import success_res, fail_res, float2percent
from ..model import Question, AutoQuestion, AutoScore
from .. import db
from .chatglm import get_chatglm_score_by_gaokao, check_question_expert_status, llm_create_objective_question, \
    subjective_question_response_re_formater

from ..llm import model_name, llm_model

question_bp = Blueprint('question', __name__)


@question_bp.route('/get_objective_model_names', methods=['POST'])
def get_objective_model_names():
    model_names = AutoQuestion.query.with_entities(AutoQuestion.model_name).distinct().all()
    model_names = [i[0] for i in model_names if i[0]]

    return success_res(data=model_names)


@question_bp.route('/get_objective_collection_names', methods=['POST'])
def get_objective_collection_names():
    collection_names = AutoQuestion.query.with_entities(AutoQuestion.datacollection_name).distinct().all()
    collection_names = [i[0] for i in collection_names if i[0]]

    return success_res(data=collection_names)


@question_bp.route('/get_question_table', methods=['POST'])
def get_question_table():
    page_size = request.json.get('page_size', 10)
    page_idx = request.json.get('page_idx', 1)
    model_name = request.json.get('model_name', '')
    collection_name = request.json.get('collection_name', '')
    status = request.json.get('status', -1)
    expert_status = request.json.get('expert_status', -1)
    ques_tagid = request.json.get('ques_tagid', 0)

    paginate = AutoQuestion.query.filter(AutoQuestion.ques_tagid.in_([1, 2, 3]))

    if status > -1:
        paginate = paginate.filter_by(status=status)
    if expert_status > -1:
        paginate = paginate.filter_by(expert_status=expert_status)
    if ques_tagid > 0:
        paginate = paginate.filter_by(ques_tagid=ques_tagid)
    if model_name:
        paginate = paginate.filter_by(model_name=model_name)
    if collection_name:
        paginate = paginate.filter_by(datacollection_name=collection_name)

    paginate = paginate.order_by(
        AutoQuestion.id.desc()).paginate(page=page_idx, per_page=page_size)

    res = [{
        'ques_id': ques.id,
        'ques_tagid': ques.ques_tagid,
        'ques_title': ques.output_title,
        'ques_options': ques.output_options,
        'ques_answer': ques.output_answer if ques.ques_tagid == 1 else json.loads(ques.output_answer),
        'status': ques.get_status(),
        'content': ques.content,
    } for ques in paginate.items]
    return success_res(data={'questions': res, 'total_size': paginate.total})


@question_bp.route('/create_question', methods=['POST'])
def create_question():
    ques_tagid = request.json.get('ques_tagid', 0)
    content = request.json.get('content', '')
    ques_title = request.json.get('ques_title', '')
    ques_answer = request.json.get('ques_answer', '')
    ques_options = request.json.get('ques_options', {})
    if ques_tagid == 4:
        question = Question(ques_title=ques_title, ques_tagid=ques_tagid, content=content, ques_answer=ques_answer)
        db.session.add(question)
    elif ques_tagid in [1, 3]:
        status, expert_status, accur_count, total_count = check_question_expert_status(ques_tagid, content, ques_title,
                                                                                       ques_answer,
                                                                                       json.dumps(ques_options))
        auto_question = AutoQuestion(output_title=ques_title, ques_tagid=ques_tagid, content=content,
                                     output_answer=ques_answer, expert_status=expert_status,
                                     output_options=ques_options,
                                     status=1, output_accur_count=accur_count, output_total_count=total_count)
        db.session.add(auto_question)
    try:
        db.session.commit()
    except:
        db.session.rollback()
    return success_res()


@question_bp.route('/save_question', methods=['POST'])
def save_question():
    ques_id = request.json.get('ques_id', 0)
    ques_tagid = request.json.get('ques_tagid', 0)
    content = request.json.get('content', '')
    ques_title = request.json.get('ques_title', '')
    ques_answer = request.json.get('ques_answer', '')
    ques_options = request.json.get('ques_options', {})
    if ques_tagid == 4:
        question = Question.query.filter_by(id=ques_id).first()
        if question:
            question.content = content
            question.ques_title = ques_title
            question.ques_answer = ques_answer
            db.session.commit()
        else:
            return fail_res(msg="题目不存在")
    elif ques_tagid in [1, 3]:
        blank_count = len(json.loads(ques_answer)) if ques_tagid == 3 else 1
        AutoQuestion.query.filter_by(id=ques_id).update(
            {'content': content, 'output_title': ques_title, 'output_answer': ques_answer,
             'output_options': ques_options,
             'status': 1, 'expert_status': 1, 'output_accur_count': blank_count,
             'output_total_count': blank_count, })
        db.session.commit()
    else:
        return fail_res(msg="题目类型不存在")
    return success_res()


@question_bp.route('/delete_question', methods=['POST'])
def delete_question():
    ques_id = request.json.get('ques_id', 0)
    ques_tagid = request.json.get('ques_tagid', 0)
    if ques_tagid == 4:
        question = Question.query.filter_by(id=ques_id).delete()
    elif ques_tagid in [1, 3]:
        question = AutoQuestion.query.filter_by(id=ques_id).delete()
    db.session.commit()
    return success_res()


@question_bp.route('/get_discuss_table', methods=['POST'])
def get_discuss_table():
    page_size = request.json.get('page_size', 10)
    page_idx = request.json.get('page_idx', 1)
    status = request.json.get('status', -1)
    paginate = Question.query.filter_by(ques_tagid=4)
    if status > -1:
        paginate = paginate.filter_by(status=status)
    paginate = paginate.order_by(Question.id.desc()).paginate(page=page_idx, per_page=page_size)
    res = [{
        'ques_id': ques.id,
        'ques_tagid': ques.ques_tagid,
        'ques_title': ques.ques_title,
        'ques_answer': ques.ques_answer,
        'status': ques.get_status(),
        'content': ques.content,
    } for ques in paginate.items]
    return success_res(data={'questions': res, 'total_size': paginate.total})


@question_bp.route('/get_score', methods=['POST'])
def get_score():
    ques_id = request.json.get('ques_id', 0)
    ques_tagid = request.json.get('ques_tagid', 0)
    answer = request.json.get('answer', '')
    if ques_tagid in [1, 2, 3]:
        question = AutoQuestion.query.filter_by(id=ques_id).first()
        if question:
            if question.ques_tagid == 1:
                if answer == question.output_answer:
                    score = 100
            elif question.ques_tagid == 3:
                output_answer = question.output_answer.spilt('；')
                score = int(len([ans for idx, ans in enumerate(answer) if ans == output_answer[idx]]) / len(
                    question.ques_answer) * 100)
            else:
                score = 0
            return success_res(data=score)
    elif ques_tagid == 4:
        question = Question.query.filter_by(id=ques_id).first()
        if question:
            status, score, reason, response = get_chatglm_score_by_gaokao(question.ques_title, question.ques_answer,
                                                                          answer)
            return success_res(data={"reason": reason, "score": score}) if status == 1 else fail_res(
                msg=f'判分失败，output：{response}')
    return fail_res(msg='题目不存在')


@question_bp.route('/calculate_subjective_score', methods=['POST'])
def calculate_subjective_score():
    ques_title = request.json.get('ques_title', '')
    ques_answer = request.json.get('ques_answer', '')
    answer = request.json.get('answer', '')
    status, score, reason, response = get_chatglm_score_by_gaokao(ques_title, ques_answer, answer)
    return success_res(data={"reason": reason, "score": score}) if status == 1 else fail_res(
        msg=f'判分失败，output：{response}')


@question_bp.route('/submit_subjective_answer', methods=['POST'])
def submit_subjective_answer():
    ques_id = request.json.get('ques_id', 0)
    ques_tagid = request.json.get('ques_tagid', 0)
    answer = request.json.get('answer', '')
    score = request.json.get('score', 0)
    expert_score = request.json.get('expert_score', 0)
    if ques_tagid == 4:
        question = Question.query.filter_by(id=ques_id).first()
        if question:
            as_record = AutoScore(question_id=ques_id, answer=answer, llm_score=score,
                                  expert_score=expert_score, status=1)
            db.session.add(as_record)
            try:
                db.session.commit()
            except:
                db.session.rollback()
        else:
            return fail_res(msg='题目不存在')
    return success_res()


@question_bp.route('/get_question_item', methods=['POST'])
def get_question_item():
    ques_id = request.json.get('ques_id', 0)
    ques_tagid = request.json.get('ques_tagid', 0)
    if ques_tagid in [1, 3]:
        question = AutoQuestion.query.filter_by(id=ques_id).first()
        if question:
            blank_count = 1
            output_answer = question.output_answer
            if ques_tagid == 3:
                blank_count = question.output_total_count
                output_answer = json.loads(output_answer)
            options = {}
            if ques_tagid == 1:
                options = question.output_options
            ques = {'ques_title': question.output_title,
                    'ques_answer': output_answer,
                    'options': options,
                    'blank_count': blank_count,
                    'content': question.content, }
            return success_res(data=ques)
        else:
            return fail_res(msg='题目不存在')
    elif ques_tagid == 4:
        question = Question.query.filter_by(id=ques_id).first()
        if question:
            ques = {'ques_title': question.ques_title,
                    'ques_answer': question.ques_answer}
            return success_res(data=ques)
        else:
            return fail_res(msg='题目不存在')


@question_bp.route('/get_objective_question_items', methods=['POST'])
def get_objective_question_items():
    ques_ids = request.json.get('ques_ids', [])
    questions = AutoQuestion.query.filter(AutoQuestion.id.in_(ques_ids)).all()
    res = []
    for question in questions:
        blank_count = 1
        output_answer = question.output_answer
        if question.ques_tagid == 3:
            blank_count = question.output_total_count
            output_answer = json.loads(output_answer)
        options = {}
        if question.ques_tagid == 1:
            options = question.output_options
        res.append({'ques_title': question.output_title,
                    'ques_answer': output_answer,
                    'options': options,
                    'ques_tagid': question.ques_tagid,
                    'id': question.id,
                    'blank_count': blank_count,
                    'content': question.content, })
    return success_res(data=res)


@question_bp.route('/calculate_objective_accuracy', methods=['POST', 'GET'])
def calculate_objective_accuracy():
    model_names = AutoQuestion.query.with_entities(AutoQuestion.model_name).distinct().all()
    model_names = [i[0] for i in model_names if i[0]]
    print(model_names, flush=True)

    res = ""
    for model_name in model_names:
        collect_names = AutoQuestion.query.filter_by(model_name=model_name).with_entities(
            AutoQuestion.datacollection_name).distinct().all()
        collect_names = [i[0] for i in collect_names if i[0]]
        print(collect_names, flush=True)

        res += f'''
            模型：{model_name}
        '''

        single_normal_count = AutoQuestion.query.filter_by(status=1, ques_tagid=1, model_name=model_name).count()
        blank_normal_count = AutoQuestion.query.filter_by(status=1, ques_tagid=3, model_name=model_name).count()
        total_count = AutoQuestion.query.count()

        single_count = AutoQuestion.query.filter_by(ques_tagid=1, model_name=model_name).count()
        blank_count = AutoQuestion.query.filter_by(ques_tagid=3, model_name=model_name).count()

        single_acc_count = AutoQuestion.query.filter_by(status=1, ques_tagid=1, expert_status=1,
                                                        model_name=model_name).count()

        blank_acc_count = AutoQuestion.query.filter_by(status=1, ques_tagid=3, expert_status=1,
                                                       model_name=model_name).count()

        blank_blank_acc_ques_count = AutoQuestion.query.filter_by(expert_status=1, status=1, ques_tagid=3,
                                                                  model_name=model_name).count()

        blank_blank_acc_count = AutoQuestion.query.with_entities(func.sum(AutoQuestion.output_accur_count)).filter_by(
            status=1, ques_tagid=3, model_name=model_name).all()
        blank_blank_total_count = AutoQuestion.query.with_entities(func.sum(AutoQuestion.output_total_count)).filter_by(
            status=1, ques_tagid=3, model_name=model_name).all()
        # print(blank_acc_count, blank_blank_count, flush=True)
        blank_acc_count_sum = blank_blank_acc_count[0][0]
        blank_blank_count_sum = blank_blank_total_count[0][0]

        f'''
                    单选题
                        出题率：{float2percent(single_normal_count / single_count) if single_count else 0}
                        正确率：{float2percent(single_acc_count / single_normal_count) if single_count else 0}
                    填空题
                        出题率：{float2percent(blank_normal_count / blank_count) if blank_count else 0}
                            校正后填空个数：{blank_acc_count_sum}
                            校正后题目个数：{blank_normal_count}
                        大模型初始正确率（填空）：{float2percent(blank_acc_count_sum / blank_blank_count_sum) if blank_blank_count_sum else 0}
                            大模型初始填空个数：{blank_blank_count_sum}
                            大模型初始填空正确个数：{blank_acc_count_sum}
                        大模型初始正确率（题目）：{float2percent(blank_blank_acc_ques_count / blank_normal_count) if blank_normal_count else 0}
                            大模型初始题目个数：{blank_normal_count}
                            大模型初始题目正确个数：{blank_blank_acc_ques_count}
                '''

        res += f'''
            单选题
                出题率：{float2percent(single_normal_count / single_count) if single_count else 0}
                正确率：{float2percent(single_acc_count / single_normal_count) if single_count else 0}
            填空题
                正确率：{float2percent(blank_normal_count / blank_count) if blank_count else 0}
        '''

        for collect_name in collect_names:
            single_normal_count = AutoQuestion.query.filter_by(status=1, ques_tagid=1, model_name=model_name,
                                                               datacollection_name=collect_name).count()
            blank_normal_count = AutoQuestion.query.filter_by(status=1, ques_tagid=3, model_name=model_name,
                                                              datacollection_name=collect_name).count()
            total_count = AutoQuestion.query.count()

            single_count = AutoQuestion.query.filter_by(ques_tagid=1, model_name=model_name,
                                                        datacollection_name=collect_name).count()
            blank_count = AutoQuestion.query.filter_by(ques_tagid=3, model_name=model_name,
                                                       datacollection_name=collect_name).count()

            single_acc_count = AutoQuestion.query.filter_by(status=1, ques_tagid=1, expert_status=1,
                                                            model_name=model_name,
                                                            datacollection_name=collect_name).count()

            blank_acc_count = AutoQuestion.query.filter_by(status=1, ques_tagid=3, expert_status=1,
                                                           model_name=model_name,
                                                           datacollection_name=collect_name).count()

            blank_blank_acc_ques_count = AutoQuestion.query.filter_by(expert_status=1, status=1, ques_tagid=3,
                                                                      model_name=model_name,
                                                                      datacollection_name=collect_name).count()

            blank_blank_acc_count = AutoQuestion.query.with_entities(
                func.sum(AutoQuestion.output_accur_count)).filter_by(
                status=1, ques_tagid=3, model_name=model_name, datacollection_name=collect_name).all()
            blank_blank_total_count = AutoQuestion.query.with_entities(
                func.sum(AutoQuestion.output_total_count)).filter_by(
                status=1, ques_tagid=3, model_name=model_name, datacollection_name=collect_name).all()
            # print(blank_acc_count, blank_blank_count, flush=True)
            blank_acc_count_sum = blank_blank_acc_count[0][0]
            blank_blank_count_sum = blank_blank_total_count[0][0]
            f'''
                            数据集：{collect_name}
                            单选题
                                出题率：{float2percent(single_normal_count / single_count) if single_count else 0}
                                正确率：{float2percent(single_acc_count / single_normal_count) if single_count else 0}
                            填空题
                                出题率：{float2percent(blank_normal_count / blank_count) if blank_count else 0}
                                    校正后填空个数：{blank_acc_count_sum}
                                    校正后题目个数：{blank_normal_count}
                                大模型初始正确率（填空）：{float2percent(blank_acc_count_sum / blank_blank_count_sum) if blank_blank_count_sum else 0}
                                    大模型初始填空个数：{blank_blank_count_sum}
                                    大模型初始填空正确个数：{blank_acc_count_sum}
                                大模型初始正确率（题目）：{float2percent(blank_blank_acc_ques_count / blank_normal_count) if blank_normal_count else 0}
                                    大模型初始题目个数：{blank_normal_count}
                                    大模型初始题目正确个数：{blank_blank_acc_ques_count}
                        '''
            res += f'''
                数据集：{collect_name}
                单选题
                    出题率：{float2percent(single_normal_count / single_count) if single_count else 0}
                    正确率：{float2percent(single_acc_count / single_normal_count) if single_count else 0}
                填空题
                    出题率：{float2percent(blank_normal_count / blank_count) if blank_count else 0}
            '''

            # for min_len, max_len in [[0, 50], [50, 100], [100, 150], [150, 250], [250, 2048]]:
            #     single_normal_count = AutoQuestion.query.filter_by(status=1, ques_tagid=1, model_name=model_name,
            #                                                        datacollection_name=collect_name).filter(
            #         func.length(AutoQuestion.content) > min_len, func.length(AutoQuestion.content) < max_len).count()
            #     blank_normal_count = AutoQuestion.query.filter_by(status=1, ques_tagid=3, model_name=model_name,
            #                                                       datacollection_name=collect_name).filter(
            #         func.length(AutoQuestion.content) > min_len, func.length(AutoQuestion.content) < max_len).count()
            #     total_count = AutoQuestion.query.count()
            #
            #     single_count = AutoQuestion.query.filter_by(ques_tagid=1, model_name=model_name,
            #                                                 datacollection_name=collect_name).filter(
            #         func.length(AutoQuestion.content) > min_len, func.length(AutoQuestion.content) < max_len).count()
            #     blank_count = AutoQuestion.query.filter_by(ques_tagid=3, model_name=model_name,
            #                                                datacollection_name=collect_name).filter(
            #         func.length(AutoQuestion.content) > min_len, func.length(AutoQuestion.content) < max_len).count()
            #
            #     single_acc_count = AutoQuestion.query.filter_by(status=1, ques_tagid=1, expert_status=1,
            #                                                     model_name=model_name,
            #                                                     datacollection_name=collect_name).filter(
            #         func.length(AutoQuestion.content) > min_len, func.length(AutoQuestion.content) < max_len).count()
            #
            #     blank_acc_count = AutoQuestion.query.filter_by(status=1, ques_tagid=3, expert_status=1,
            #                                                    model_name=model_name,
            #                                                    datacollection_name=collect_name).filter(
            #         func.length(AutoQuestion.content) > min_len, func.length(AutoQuestion.content) < max_len).count()
            #
            #     blank_blank_acc_ques_count = AutoQuestion.query.filter_by(expert_status=1, status=1, ques_tagid=3,
            #                                                               model_name=model_name,
            #                                                               datacollection_name=collect_name).filter(
            #         func.length(AutoQuestion.content) > min_len, func.length(AutoQuestion.content) < max_len).count()
            #
            #     blank_blank_acc_count = AutoQuestion.query.with_entities(
            #         func.sum(AutoQuestion.output_accur_count)).filter_by(
            #         status=1, ques_tagid=3, model_name=model_name, datacollection_name=collect_name).filter(
            #         func.length(AutoQuestion.content) > min_len, func.length(AutoQuestion.content) < max_len).all()
            #     blank_blank_total_count = AutoQuestion.query.with_entities(
            #         func.sum(AutoQuestion.output_total_count)).filter_by(
            #         status=1, ques_tagid=3, model_name=model_name, datacollection_name=collect_name).filter(
            #         func.length(AutoQuestion.content) > min_len, func.length(AutoQuestion.content) < max_len).all()
            #     # print(blank_acc_count, blank_blank_count, flush=True)
            #     blank_acc_count_sum = blank_blank_acc_count[0][0]
            #     blank_blank_count_sum = blank_blank_total_count[0][0]
            #
            #     res += f'''
            #         文本长度：{min_len} ~ {max_len}
            #         单选题
            #             出题率：{float2percent(single_normal_count / single_count) if single_count else 0}
            #             正确率：{float2percent(single_acc_count / single_normal_count) if single_count else 0}
            #         填空题
            #             出题率：{float2percent(blank_normal_count / blank_count) if blank_count else 0}
            #                 校正后填空个数：{blank_acc_count_sum}
            #                 校正后题目个数：{blank_normal_count}
            #             大模型初始正确率（填空）：{float2percent(blank_acc_count_sum / blank_blank_count_sum) if blank_blank_count_sum else 0}
            #                 大模型初始填空个数：{blank_blank_count_sum}
            #                 大模型初始填空正确个数：{blank_acc_count_sum}
            #             大模型初始正确率（题目）：{float2percent(blank_blank_acc_ques_count / blank_normal_count) if blank_normal_count else 0}
            #                 大模型初始题目个数：{blank_normal_count}
            #                 大模型初始题目正确个数：{blank_blank_acc_ques_count}
            #     '''

    return success_res(data=res)


@question_bp.route('/calculate_subjective_accuracy', methods=['POST', 'GET'])
def calculate_subjective_accuracy():
    model_names = AutoScore.query.filter_by(status=1).with_entities(AutoScore.model_name).distinct().all()
    model_names = [i[0] for i in model_names if i[0]]
    print(model_names, flush=True)

    res = "以10分为满分计算\n"
    for model_name in model_names:
        as_records = AutoScore.query.filter_by(model_name=model_name, status=1).all()

        scores = [(as_item.llm_score, as_item.expert_score) for as_item in as_records]

        diff_func = lambda x: x[0] - x[1]
        diff_var = sum([diff_func(i) for i in scores]) / len(scores)
        var_func = lambda x: (x[0] - x[1]) ** 2
        variance = sum([var_func(i) for i in scores]) / len(scores)
        standard_var = math.sqrt(variance)

        diff_var = round(diff_var, 2)
        variance = round(variance, 2)
        standard_var = round(standard_var, 2)

        res += f"""
            模型：{model_name}
            均值差：{diff_var}
            方差：{variance}
            标准差：{standard_var}
        """
    return success_res(data=res)


@question_bp.route('/rand_objective_question', methods=['POST', 'GET'])
def rand_objective_question():
    count = request.json.get('count', 1)
    ques_filter = AutoQuestion.query.filter_by(status=1, expert_status=1)
    total_count = ques_filter.count()
    rand_num = random.randint(0, total_count - count - 1)
    questions = ques_filter.offset(rand_num).limit(count)
    res = [{'ques_title': question.output_title,
            'ques_answer': question.output_answer if question.ques_tagid == 1 else json.loads(question.output_answer),
            'options': question.output_options,
            'blank_count': question.output_total_count} for question in questions]
    return success_res(data=res)


@question_bp.route('/create_subjective_questions_by_file', methods=['POST', 'GET'])
def create_objective_questions_by_file():
    file = request.files.get('file', None)
    file_name = file.filename
    # print(file.filename)
    # 获取前缀（文件名称）print(os.path.splitext(file_name)[0])
    # 获取后缀（文件类型）print(os.path.splitext(file_name)[-1])
    suffix = os.path.splitext(file_name)[-1]  # 获取文件后缀（扩展名）
    timestamp = datetime.datetime.now().timestamp()  # 获取当前时间戳改文件名print(nowTime)
    upload_path = os.path.join(os.getcwd(), 'static')  # 改到upload目录下# 注意：没有的文件夹一定要先创建，不然会提示没有该路径print(upload_path)
    file_path = os.path.join(upload_path, str(timestamp) + suffix)
    file.save(file_path)  # 保存文件

    request_form = {}
    for k, v in request.form.items():
        request_form[k] = v

    ques_tagids = request_form.get('ques_tagids', '')
    # print(request_form, ques_tagids, flush=True)

    with open(file_path, 'r', encoding='utf-8') as f:
        file_content = f.read()

    ques_data = []
    for content in [i.strip().replace('\n', '\\n') for i in file_content.split('\n\n') if i.strip()]:
        for ques_tagid in ques_tagids.split(','):
            flag, res = llm_create_objective_question(content, int(ques_tagid), batch=True,
                                                      datacollection_name=file_name.split('.')[0])
            ques_data.extend(res)
            if not flag:
                print(content, res, flush=True)
    return success_res(data=ques_data)


@question_bp.route('/calculate_file_score', methods=['POST', 'GET'])
def calculate_subjective_file_score():
    file = request.files.get('file', None)
    file_name = file.filename
    # print(file.filename)
    # 获取前缀（文件名称）print(os.path.splitext(file_name)[0])
    # 获取后缀（文件类型）print(os.path.splitext(file_name)[-1])
    suffix = os.path.splitext(file_name)[-1]  # 获取文件后缀（扩展名）
    timestamp = datetime.datetime.now().timestamp()  # 获取当前时间戳改文件名print(nowTime)
    upload_path = os.path.join(os.getcwd(), 'static')  # 改到upload目录下# 注意：没有的文件夹一定要先创建，不然会提示没有该路径print(upload_path)
    file_path = os.path.join(upload_path, str(timestamp) + suffix)
    file.save(file_path)  # 保存文件

    subjective_formmatter = """
            你是一个专业的判分老师，基于问题和标准答案、对答案进行打分，其中标准答案的得分是满分10分，
            对答案与标准答案进行比较，理解评分标准，基于评分标准对答案评分，得分不超过满分。返回格式如下：{{"分数": score}}

            问题：
            {ques_title}
            标准答案：
            {ques_standard_answer}

            {answer_scores}

            答案：
            {need_score_answer}
            仅输出得分
        """

    subjective_answer_score_formmatter = """
            答案：
            {ques_answer}
            得分：
            {{"分数": {ques_score}}}
        """

    file_content = ''
    with open(file_path, 'r', encoding='utf-8') as f:
        file_content = f.read()

    content_re = re.search('问题：([\n\S\s.]*)标准答案：([\n\S\s.]+?)满分：([\n\d\.]*)(答案：[\n\S\s.]*得分：[\n\d\.]*)+', file_content)

    ques_title = content_re.group(1).strip()
    ques_standard_answer = content_re.group(2).strip()
    ques_full_score = content_re.group(3).strip()
    # print('问题：', flush=True)
    # print(content_re.group(1), flush=True)
    #
    # print('标准答案', flush=True)
    # print(content_re.group(2), flush=True)
    #
    # print('满分', flush=True)
    # print(content_re.group(3), flush=True)
    #
    # print('答案+得分', flush=True)
    answer_score_list = content_re.group(4).strip()
    # print(answer_score_list, flush=True)

    answer_score_list_re = re.findall('(答案：[\n\S\s.]+?得分：[\n\d\.]*)+?', answer_score_list)
    # print('-' * 10, flush=True)
    # print([i for i in answer_score_re], flush=True)

    answer_score_tuples = []
    for answer_score in answer_score_list_re:
        answer_score_re = re.search('答案：([\n\S\s.]*)得分：([\n\d\.]*)', answer_score)

        # print(answer_score_re.groups(), flush=True)
        # print(answer_score_re.group(1).strip(), answer_score_re.group(2).strip(), flush=True)
        score_str = answer_score_re.group(2).strip()
        score = round(float(score_str) / float(ques_full_score) * 10, 2)
        answer_score_tuples.append((answer_score_re.group(1).strip(), score))

    llm_expert_score_tuples = []
    for idx in range(len(answer_score_tuples)):
        as_tuples = answer_score_tuples.copy()
        need_answer_score = as_tuples.pop(idx)

        answer_scores_str = ''
        for answer, score in as_tuples:
            answer_scores_str += subjective_answer_score_formmatter.format(ques_answer=answer,
                                                                           ques_score=score)

        if answer_scores_str:
            answer_scores_str = """
            例如
            {answer_scores}
            """.format(answer_scores=answer_scores_str)

        input = subjective_formmatter.format(ques_title=ques_title, ques_standard_answer=ques_standard_answer,
                                             answer_scores=answer_scores_str,
                                             need_score_answer=need_answer_score[0])

        response = llm_model.chat(input)

        # print('-------------------' * 2, flush=True)
        # print('待评分回答', need_answer_score[0], flush=True, end='-------------------\n')
        # print('评分', response, flush=True, end='-------------------\n')
        output_score_re = re.findall('\{"分数": ([\d\.]*)\}', response)
        if output_score_re[-1]:
            # print(f"output_score_re is {output_score_re}", flush=True)
            output_score = round(float(output_score_re[-1]), 2)
            llm_expert_score_tuples.append((output_score, float(need_answer_score[1])))
        else:
            print(f"得分解析异常：response is {response}", flush=True)

    print(llm_expert_score_tuples, flush=True)

    ave_diff_iter_func = lambda x: x[0] - x[1]
    diff_score_list = [ave_diff_iter_func(i) for i in llm_expert_score_tuples]
    ave_diff = round(sum(diff_score_list) / len(llm_expert_score_tuples), 2)

    var_diff_iter_func = lambda x: x ** 2
    var_diff = round(sum([var_diff_iter_func(i) for i in diff_score_list]) / len(llm_expert_score_tuples), 2)

    return success_res(data=f"""模型：{model_name}
                                模型打分与专家打分对比
                                均值差: {ave_diff},
                                方差: {var_diff},
                                标准差: {round(var_diff // 2, 2)}
                                """)


@question_bp.route('/calculate_subjective_score_by_json', methods=['POST', 'GET'])
def calculate_subjective_score_by_json():
    title = request.json.get('title', '')
    standard_answer = request.json.get('standard_answer', '')
    answer_scores = request.json.get('answer_scores', [])
    answer = request.json.get('answer', '')

    if title and standard_answer and answer:
        subjective_formmatter = """
            你是一个专业的          判分原则：1、待判答案与标准答案内容、主体越接近，得分越高；2、待判答案的关键内容覆盖标准答案关键信息越多得分越高；3、待判答案的内容与问题紧密相关得8-10分，较为紧密得5-7分，一般紧密得2-4分，毫不相关得1分，不作答得0分。
            输出格式是标准的json格式，判分老师，基于问题、标准答案、对待判答案给出评分理由和得分，其中标准答案的得分是满分10分，判分得分不得超过10分。
  包括评分理由和得分，评分理由是字符串形式，得分是数值形式。输出内容需要严格按照如下格式：{{"评分理由": "理由"， "得分": score}}，不要有其它无关符号等文本以避免解析失败。

            问题：
            {ques_title}
            标准答案：
            {ques_standard_answer}
            {answer_scores}
            待判答案：
            {need_score_answer}
            输出待判答案的评分理由和得分
        """

        subjective_answer_score_formmatter = """
            示例{ques_answer_idx}：
            答案：
            {ques_answer}
            得分：
            {{"分数": {ques_score}}}
        """

        answer_scores_str = ''
        for ans_idx, answer_score in enumerate(answer_scores):
            answer_item = answer_score['answer']
            score_item = answer_score['score']
            answer_scores_str += subjective_answer_score_formmatter.format(ques_answer_idx=ans_idx + 1,
                                                                           ques_answer=answer_item,
                                                                           ques_score=score_item)
        if answer_scores_str:
            answer_scores_str = """
            示例答案及得分{answer_scores}
            """.format(answer_scores=answer_scores_str)

        input = subjective_formmatter.format(ques_title=title, ques_standard_answer=standard_answer,
                                             answer_scores=answer_scores_str,
                                             need_score_answer=answer)
        print(input, flush=True)
        response = llm_model.chat(input)

        # print('-------------------' * 2, flush=True)
        # print('待评分回答', need_answer_score[0], flush=True, end='-------------------\n')
        # print('评分', response, flush=True, end='-------------------\n')
        try:
            output_score_re = re.match(subjective_question_response_re_formater, response)
            if output_score_re:
                # print(f"output_score_re is {output_score_re}", flush=True)
                output_reason = output_score_re.group(1)
                output_score = round(float(output_score_re.group(2)), 2)
            return success_res(data={"reason": output_reason, "score": output_score})
        except:
            print(f"得分解析异常：response is {response}", flush=True)
            return fail_res(msg='评分异常')

    else:
        return fail_res(msg='参数有误')
