import os
import json
import time
from typing import List, Dict, Any, Tuple
from swift.llm import PtEngine, RequestConfig, InferRequest
import logging
import re
os.environ['CUDA_VISIBLE_DEVICES'] = '2,3'
# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler("eval_score.log"),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)

def find_patient_file(patient_file: str) -> str:
    """
    根据患者文件名在dataset目录中查找对应的文件路径
    """
    dataset_dir = '/mnt/ssd/jsj/patient/dataset'
    # 查找所有目录
    for root, dirs, files in os.walk(dataset_dir):
        if patient_file in files:
            return os.path.join(root, patient_file)
    return None

def load_patient_info(patient_file: str) -> Dict[str, Any]:
    """
    加载患者信息
    """
    file_path = find_patient_file(patient_file)
    if not file_path:
        logger.error(f"未找到患者文件: {patient_file}")
        return {}
    
    try:
        with open(file_path, 'r', encoding='utf-8') as f:
            return json.load(f)
    except Exception as e:
        logger.error(f"加载患者文件失败: {str(e)}")
        return {}

# 人格定义文件路径
PERSONALITY_DEFINITION_FILE = "/mnt/ssd/jsj/patient/script/人格定义.json"

# 加载人格定义
personality_definition = {}

def load_personality_definition():
    """
    加载人格定义文件
    """
    global personality_definition
    try:
        with open(PERSONALITY_DEFINITION_FILE, 'r', encoding='utf-8') as f:
            content = f.read()
            personality_definition = json.loads(content)
    except FileNotFoundError:
        logger.error(f"人格定义文件未找到: {PERSONALITY_DEFINITION_FILE}")
        personality_definition = {"personality_dimensions": {}}
    except json.JSONDecodeError as e:
        logger.error(f"人格定义文件格式错误: {PERSONALITY_DEFINITION_FILE}, 错误: {e}")
        personality_definition = {"personality_dimensions": {}}
    except Exception as e:
        logger.error(f"加载人格定义文件失败: {PERSONALITY_DEFINITION_FILE}, 错误: {e}")
        personality_definition = {"personality_dimensions": {}}

# 加载人格定义
load_personality_definition()

def get_personality_description(target_personality: Dict[str, str]) -> str:
    """
    根据目标人格生成详细的人格描述文本
    """
    if not target_personality:
        logger.warning("目标人格为空，返回默认描述")
        return "患者人格特征详细定义：暂无特定人格信息"
    
    descriptions = []
    descriptions.append("患者人格特征详细定义：")
    
    # 遍历目标人格中的所有键值对
    for dim_name, current_value in target_personality.items():
        # 跳过认知状态
        if dim_name == "认知状态":
            continue
        
        # 从定义文件中获取信息
        dim_info = None
        for _, info in personality_definition.get("personality_dimensions", {}).items():
            if info.get("name") == dim_name:
                dim_info = info
                break
        
        if dim_info:
            dim_key_name = dim_info.get("key", dim_name)
            values_info = dim_info.get("values", {})
            value_info = values_info.get(current_value, {})
            
            desc_parts = [f"{dim_name}（{dim_key_name}）: {current_value}"]
            
            if "定义" in value_info:
                desc_parts.append(f"  - 定义：{value_info['定义']}")
            if "表现" in value_info:
                desc_parts.append(f"  - 表现：{value_info['表现']}")
            if "约束" in value_info:
                constraint = value_info['约束']
                if isinstance(constraint, dict):
                    constraint_text = []
                    for k, v in constraint.items():
                        if isinstance(v, dict):
                            constraint_text.append(f"{k}: {', '.join([f'{subk}: {subv}' for subk, subv in v.items()])}")
                        elif isinstance(v, list):
                            constraint_text.append(f"{k}: {', '.join(v)}")
                        else:
                            constraint_text.append(f"{k}: {v}")
                    desc_parts.append(f"  - 约束：{'; '.join(constraint_text)}")
                else:
                    desc_parts.append(f"  - 约束：{constraint}")
            
            if "表现形式示例" in value_info:
                desc_parts.append(f"  - 表现形式示例：{'; '.join(value_info['表现形式示例'])}")
            
            descriptions.append('\n'.join(desc_parts))
        else:
            # 如果在定义文件中找不到对应的维度信息，仍然添加基本信息
            descriptions.append(f"{dim_name}: {current_value}")
    
    return '\n'.join(descriptions)

def create_scoring_prompts(patient_info: Dict[str, Any], generated_response: str, doctor_question: str) -> List[Tuple[str, str]]:
    """
    为四个维度创建评分提示
    """
    # 构建患者信息文本
    patient_info_text = "患者基本信息：\n"
    if patient_info.get("基础信息"):
        for key, value in patient_info["基础信息"].items():
            patient_info_text += f"- {key}：{value}\n"
    
    # 构建人格信息文本
    personality_text = ""
    if patient_info.get("人格"):
        personality_text = get_personality_description(patient_info["人格"])
    
    # 构建门诊病历信息文本
    medical_record_text = "门诊病历：\n"
    if patient_info.get("门诊病历"):
        for key, value in patient_info["门诊病历"].items():
            medical_record_text += f"- {key}：{value}\n"
    
    # 四个维度的评分提示
    dimensions = [
        (
            "Information Consistency", 
            f"任务：评估生成回答的信息一致性\n"\
            f"评估说明：信息一致性包含两种情况——1.生成回答涉及提供的患者信息时，需与患者详细信息一致；2.生成回答未涉及提供的患者信息时，内容需与患者详细信息逻辑合理（不冲突）\n"\
            f"以下是患者的详细信息：\n{patient_info_text}\n{medical_record_text}\n"\
            f"医生提问：{doctor_question}\n"\
            f"生成回答：{generated_response}\n"\
            f"请结合上述评估说明打分，分值为1-5分的整数。\n"\
            f"5分：涉及患者信息时完全一致，未涉及时与患者信息逻辑高度合理\n"\
            f"4分：涉及患者信息时基本一致（仅微小细节偏差），未涉及时与患者信息逻辑合理\n"\
            f"3分：涉及患者信息时存在部分遗漏但无冲突，或未涉及时与患者信息逻辑基本合理\n"\
            f"2分：涉及患者信息时存在明显冲突/重要遗漏，或未涉及时与患者信息逻辑存在一定矛盾\n"\
            f"1分：涉及患者信息时严重冲突/完全遗漏，或未涉及时与患者信息逻辑严重矛盾\n"\
            f"输出格式：请直接输出分数，禁止添加任何其他文字。"
        ),
        (
            "Persona Consistency", 
            f"任务：评估生成回答的人格一致性\n"\
            f"以下是患者的人格特征：\n{personality_text}\n"\
            f"医生提问：{doctor_question}\n"\
            f"生成回答：{generated_response}\n"\
            f"请根据生成回答是否符合患者的人格特征（五维人格表达程度），评估其人格一致性，分值为1-5分的整数。\n"\
            f"5分：完全符合患者的人格特征\n"\
            f"4分：大部分符合患者的人格特征\n"\
            f"3分：基本符合患者的人格特征\n"\
            f"2分：部分不符合患者的人格特征\n"\
            f"1分：严重不符合或完全不符合患者的人格特征\n"\
            f"输出格式：请直接输出分数，禁止添加任何其他文字。"
        ),
        (
            "Hallucination", 
            f"任务：评估生成回答的幻觉程度\n"\
            f"以下是患者的详细信息：\n{patient_info_text}\n{medical_record_text}\n"\
            f"医生提问：{doctor_question}\n"\
            f"生成回答：{generated_response}\n"\
            f"请评估生成回答是否包含与患者不符的知识，分值为1-5分的整数。\n"\
            f"5分：完全没有包含与患者不符的知识\n"\
            f"4分：几乎没有包含与患者不符的知识\n"\
            f"3分：包含少量与患者不符的知识\n"\
            f"2分：包含较多与患者不符的知识\n"\
            f"1分：包含大量与患者不符的知识\n"\
            f"输出格式：请直接输出分数，禁止添加任何其他文字。"
        ),
        (
            "Human-Likeness", 
            f"任务：评估生成回答的自然程度\n"\
            f"医生提问：{doctor_question}\n"\
            f"生成回答：{generated_response}\n"\
            f"请评估生成回答是否自然、符合人类表达习惯，分值为1-5分的整数。\n"\
            f"5分：非常自然，完全符合人类表达习惯\n"\
            f"4分：很自然，基本符合人类表达习惯\n"\
            f"3分：一般自然，有轻微不自然但可接受\n"\
            f"2分：不够自然，有明显的机器生成痕迹\n"\
            f"1分：非常不自然，完全不符合人类表达习惯\n"\
            f"输出格式：请直接输出分数，禁止添加任何其他文字。"
        )
    ]
    
    return dimensions

def extract_score(response: str) -> int:
    """
    从模型响应中提取分数
    """
    # 尝试直接提取数字
    match = re.search(r'\b([1-5])\b', response)
    if match:
        return int(match.group(1))
    # 如果没有找到明确的数字，尝试从文本中推断
    if any(word in response for word in ['5分', '五分', '完全', '非常']):
        return 5
    elif any(word in response for word in ['4分', '四分', '大部分', '很']):
        return 4
    elif any(word in response for word in ['3分', '三分', '基本', '一般']):
        return 3
    elif any(word in response for word in ['2分', '二分', '部分', '不够']):
        return 2
    elif any(word in response for word in ['1分', '一分', '严重', '非常不']):
        return 1
    else:
        logger.warning(f"无法提取分数，响应内容：{response}")
        return 3  # 默认给3分

def main():
    # 设置环境变量

    
    # 加载模型
    model_path = '/mnt/ssd/jsj/models/models/ZhipuAI/GLM-4-32B-0414'
    # 提取评测模型名称
    evaluator_model = os.path.basename(model_path)
    engine = PtEngine(model_path, max_batch_size=8)  # 减小批处理大小以避免资源问题
    request_config = RequestConfig(max_tokens=64, temperature=0)
    
    # 加载实验结果
    result_file = '/mnt/ssd/jsj/patient/results/CoT/CoT-Qwen2.5-7B-Instruct_window_full.json'
    try:
        with open(result_file, 'r', encoding='utf-8') as f:
            data = json.load(f)
        # 提取results部分
        if isinstance(data, list) and data and isinstance(data[0], dict) and 'results' in data[0]:
            # 处理嵌套的结构: [{results: [...]}, ...]
            results = []
            for item in data:
                if 'results' in item and isinstance(item['results'], list):
                    results.extend(item['results'])
        elif isinstance(data, dict) and 'results' in data:
            results = data['results']
        elif isinstance(data, list):
            results = data
        else:
            logger.error(f"无效的实验结果数据格式")
            return
        
        # 验证results是否为列表
        if not isinstance(results, list):
            logger.error(f"提取的results不是列表格式")
            return
        
        logger.info(f"成功加载 {len(results)} 条记录")
    except Exception as e:
        logger.error(f"加载实验结果失败: {str(e)}")
        return
    
    # 定义批处理大小
    batch_size = 8  # 每次处理的记录数
    total_records = len(results)
    
    # 提取被评测模型名称
    match = re.search(r'experiment_results_(.+?)_window', os.path.basename(result_file))
    if match:
        evaluated_model = match.group(1)
    else:
        evaluated_model = 'unknown_model'
        logger.warning(f"无法从文件名提取被评测模型名称")
    
    # 设置输出文件名格式：评测模型-被评测模型-score.json
    output_file = f'/mnt/ssd/jsj/patient/results/eval-results/{evaluator_model}-evaCoT-{evaluated_model}-score.json'
    
    # 检查是否存在已保存的评分结果
    resume_from = 0
    all_scores = {
        "Information Consistency": [],
        "Persona Consistency": [],
        "Hallucination": [],
        "Human-Likeness": []
    }
    
    if os.path.exists(output_file):
        try:
            with open(output_file, 'r', encoding='utf-8') as f:
                saved_results = json.load(f)
            
            # 获取已处理的记录数
            if 'current_total' in saved_results and saved_results['current_total'] > 0:
                resume_from = saved_results['current_total']
                all_scores = saved_results.get('total_scores', all_scores)
                logger.info(f"找到已保存的结果，将从记录 {resume_from+1} 开始继续处理")
        except Exception as e:
            logger.error(f"加载已保存的评分结果失败: {str(e)}")
            resume_from = 0
    
    # 记录开始时间
    start_time = time.time()
    
    # 分批处理，从resume_from开始
    for batch_start in range(resume_from, total_records, batch_size):
        batch_end = min(batch_start + batch_size, total_records)
        batch = results[batch_start:batch_end]
        
        print(f"处理批次 {(batch_start-resume_from)//batch_size + 1}，处理记录 {batch_start+1}-{batch_end}/{total_records}")
        print(f"已完成 {batch_start} 条记录，剩余 {total_records - batch_start} 条记录")
        
        # 为当前批次准备所有评分请求
        batch_infer_requests = []
        request_mapping = []  # 记录请求与维度、记录的映射关系
        
        for record_idx, record in enumerate(batch):
            patient_file = record.get("patient_file")
            generated_response = record.get("generated_response", "")
            doctor_question = record.get("doctor_question", "")
            
            if not patient_file or not generated_response:
                logger.warning(f"记录 {batch_start+record_idx+1} 缺少必要信息，跳过")
                continue
            
            # 加载患者信息
            patient_info = load_patient_info(patient_file)
            if not patient_info:
                logger.warning(f"无法加载患者 {patient_file} 的信息，跳过")
                continue
            
            logger.info(f"成功加载患者 {patient_file} 的信息")
            
            # 创建四个维度的评分提示
            dimensions = create_scoring_prompts(patient_info, generated_response, doctor_question)
            
            # 为每个维度创建一个推理请求
            for dim_name, prompt in dimensions:
                messages = [{"role": "user", "content": prompt}]
                infer_request = InferRequest(messages=messages)
                batch_infer_requests.append(infer_request)
                request_mapping.append((record_idx, dim_name))
        
        # 执行批量推理
        try:
            if batch_infer_requests:
                logger.info(f"执行批量推理，请求数量: {len(batch_infer_requests)}")
                resp_list = engine.infer(batch_infer_requests, request_config)
                
                # 处理推理结果
                for i, resp in enumerate(resp_list):
                    record_idx, dim_name = request_mapping[i]
                    original_record_idx = batch_start + record_idx
                    
                    try:
                        raw_response = resp.choices[0].message.content
                        score = extract_score(raw_response)
                        
                        # 存储分数
                        all_scores[dim_name].append(score)
                        
                        logger.info(f"记录 {original_record_idx+1} - {dim_name}: {score} 分")
                    except Exception as e:
                        logger.error(f"处理记录 {original_record_idx+1} - {dim_name} 的评分结果时发生错误: {str(e)}")
        except Exception as e:
            logger.error(f"执行批量推理时发生错误: {str(e)}")
            continue
        
        # 每批处理完后计算并保存当前的总数和平均指标
        current_total = 0
        current_avg_scores = {}
        for dim_name, scores in all_scores.items():
            if scores:
                current_total = max(current_total, len(scores))
                current_avg_scores[dim_name] = sum(scores) / len(scores)
            else:
                current_avg_scores[dim_name] = 0
        
        # 保存当前评分结果
        try:
            with open(output_file, 'w', encoding='utf-8') as f:
                json.dump({
                    "current_total": current_total,
                    "current_average_scores": current_avg_scores,
                    "total_scores": all_scores
                }, f, ensure_ascii=False, indent=2)
            logger.info(f"批次 {batch_start//batch_size + 1} 处理完成，已更新评分结果到 {output_file}")
        except Exception as e:
            logger.error(f"保存评分结果失败: {str(e)}")
    
    # 计算最终平均分
    avg_scores = {}
    for dim_name, scores in all_scores.items():
        if scores:
            avg_scores[dim_name] = sum(scores) / len(scores)
            logger.info(f"{dim_name} 平均分: {avg_scores[dim_name]:.2f}")
        else:
            avg_scores[dim_name] = 0
            logger.info(f"{dim_name}: 没有有效评分")
    
    # 保存最终评分结果
    try:
        with open(output_file, 'w', encoding='utf-8') as f:
            json.dump({
                "final_average_scores": avg_scores,
                "total_samples": sum(len(scores) for scores in all_scores.values()) // 4,  # 每个样本有4个维度的评分
                "total_scores": all_scores,
                "current_total": sum(len(scores) for scores in all_scores.values()) // 4  # 为了兼容接续功能保留此字段
            }, f, ensure_ascii=False, indent=2)
        logger.info(f"最终评分结果已保存到 {output_file}")
    except Exception as e:
        logger.error(f"保存评分结果失败: {str(e)}")
    
    # 记录总耗时
    total_time = time.time() - start_time
    logger.info(f"评估完成，总耗时: {total_time:.2f} 秒")
    logger.info(f"处理记录总数: {total_records}")
    
    # 输出最终平均分
    print("\n===== 评估结果汇总 =====")
    for dim_name, avg_score in avg_scores.items():
        print(f"{dim_name}: {avg_score:.2f} 分")
    print("========================")

if __name__ == "__main__":
    main()