import os
import json
import time
import random
import shutil
import logging
import re
import tiktoken  # 用于token计数（若不可用，请安装或按需降级）
from typing import Dict, List, Any, Tuple
import requests
import traceback

# 设置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler("dataset_augmentation.log"),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)

# 导入配置，若找不到则使用环境变量或默认值
try:
    import sys
    sys.path.append("d:\\APP\\Trae CN\\Project\\patient-sim")
    from config import API_KEY, API_BASE_URL, MODEL, TEMPERATURE, MAX_TOKENS, TIMEOUT, REQUEST_DELAY
except Exception:
    logger.warning("无法导入config.py，使用内置后备配置（建议把真实apikey放在config.py或环境变量）")
    API_KEY = os.environ.get("OPENAI_API_KEY", "sk-DEFAULT-PLACEHOLDER")
    API_BASE_URL = os.environ.get("API_BASE_URL", "https://api.chatanywhere.tech/v1")
    MODEL = os.environ.get("MODEL", "gpt-4o-mini")
    TEMPERATURE = float(os.environ.get("TEMPERATURE", 0.3))
    MAX_TOKENS = int(os.environ.get("MAX_TOKENS", 4000))
    TIMEOUT = int(os.environ.get("TIMEOUT", 60))
    REQUEST_DELAY = float(os.environ.get("REQUEST_DELAY", 1))

# 长对话处理相关配置
MAX_PROMPT_TOKENS = 8000  # 最大提示词token数（字符近似）
DIALOGUE_CHUNK_SIZE = 10  # 对话块大小（轮次）
OVERLAP_SIZE = 2  # 重叠轮次数目（用于保证块间上下文）
LONG_DIALOGUE_THRESHOLD = 20  # 长对话阈值（轮次）

class DatasetAugmenter:
    def __init__(self):
        # 文件路径配置
        self.source_dir = "d:\\APP\\Trae CN\\Project\\patient-sim\\json病历_最终"
        self.output_dir = "d:\\APP\\Trae CN\\Project\\patient-sim\\script\\策略\\migrated_patient_data"
        self.failed_dir = os.path.join(self.output_dir, "failed_data")
        self.progress_file = os.path.join(self.output_dir, "migration_progress.json")
        self.personality_definition_file = "d:\\APP\\Trae CN\\Project\\patient-sim\\人格定义.json"
        self.migration_strategy_file = "d:\\APP\\Trae CN\\Project\\patient-sim\\script\\策略\\迁移策略.txt"
        
        # 确保输出目录存在
        os.makedirs(self.output_dir, exist_ok=True)
        os.makedirs(self.failed_dir, exist_ok=True)
        
        # 加载人格定义和迁移策略
        self.personality_definition = self.load_personality_definition()
        self.migration_strategies = self.parse_migration_strategy()
        
        # 跟踪已处理的文件
        self.processed_files = set()
        self.failed_files = {}
        self.total_files = 0
        self.completed_files = 0
        self.pending_files = 0
        self.strategy_progress = {}
        
        # 加载进度
        self.load_progress()
        
        # 初始化token编码解码器
        try:
            self.token_encoder = tiktoken.encoding_for_model(MODEL)
        except Exception:
            logger.warning(f"模型 {MODEL} 不支持或无法识别，使用默认编码 cl100k_base")
            try:
                self.token_encoder = tiktoken.get_encoding("cl100k_base")
            except Exception:
                self.token_encoder = None
                
        # 当前验证阈值（用于重试时逐步降低要求）
        self.current_validation_threshold = 1.0
    
    def count_tokens(self, text: str) -> int:
        """计算文本的token数量（回退到词数）"""
        try:
            if self.token_encoder:
                return len(self.token_encoder.encode(text))
            else:
                return len(text.split())
        except Exception:
            return len(text.split())
    
    def is_long_dialogue(self, dialogue: List[Dict]) -> bool:
        """检测是否为长对话"""
        return len(dialogue) > LONG_DIALOGUE_THRESHOLD
    
    def split_dialogue_into_chunks(self, dialogue: List[Dict]) -> List[Tuple[int,int,List[Dict]]]:
        """
        将长对话分割成多个短对话块，返回 (start_idx, end_idx, chunk) 的列表。
        使用重叠以保持上下文连续性，便于后续合并。
        """
        chunks = []
        start_idx = 0
        n = len(dialogue)
        while start_idx < n:
            end_idx = min(start_idx + DIALOGUE_CHUNK_SIZE, n)
            chunk = dialogue[start_idx:end_idx]
            chunks.append((start_idx, end_idx, chunk))
            if end_idx >= n:
                break
            # 下一块开始位置（保留 overlap）
            start_idx = max(0, end_idx - OVERLAP_SIZE)
        return chunks
    
    def load_personality_definition(self) -> Dict:
        """加载人格定义JSON文件"""
        try:
            with open(self.personality_definition_file, 'r', encoding='utf-8') as f:
                return json.load(f)
        except Exception as e:
            logger.error(f"加载人格定义文件失败: {e}")
            raise
    
    def parse_migration_strategy(self) -> List[Dict]:
        """解析迁移策略文本文件（新格式：制表符分隔的CSV/TSV）"""
        strategies = []
        try:
            with open(self.migration_strategy_file, 'r', encoding='utf-8') as f:
                lines = f.readlines()
                
            # 跳过标题行（第一行）
            for line in lines[1:]:
                line = line.strip()
                if not line:
                    continue
                parts = line.split('\t')
                # 期望格式： idx \t 性格 \t 情绪 \t 回忆 \t 理解 \t 表达 \t 数量
                if len(parts) >= 7:
                    try:
                        personality_type = parts[1].strip()
                        emotion_state = parts[2].strip()
                        medical_history_recall = parts[3].strip()
                        medical_understanding = parts[4].strip()
                        language_ability = parts[5].strip()
                        count = int(parts[6].strip())
                        strategies.append({
                            'personality': {
                                '性格': personality_type,
                                '情绪状态': emotion_state,
                                '病史回忆能力': medical_history_recall,
                                '医学理解力': medical_understanding,
                                '语言表达能力': language_ability,
                                '认知状态': '正常'
                            },
                            'count': count,
                            'stage': 1
                        })
                    except Exception as e:
                        logger.warning(f"解析迁移策略行失败: [{line}]，错误: {e}")
                        continue
                else:
                    logger.debug(f"跳过无法解析的策略行: {line}")
        except Exception as e:
            logger.error(f"解析迁移策略文件失败: {e}")
            raise
        return strategies
    
    def load_progress(self):
        """加载之前的处理进度"""
        if os.path.exists(self.progress_file):
            try:
                with open(self.progress_file, 'r', encoding='utf-8') as f:
                    progress = json.load(f)
                    self.processed_files = set(progress.get('processed_files', []))
                    self.failed_files = progress.get('failed_files', {})
                    self.total_files = progress.get('total_files', 0)
                    self.completed_files = progress.get('completed_files', 0)
                    self.pending_files = progress.get('pending_files', 0)
                    self.strategy_progress = progress.get('strategy_progress', {})
                    logger.info(f"已加载进度：处理了{self.completed_files}个文件，失败{len(self.failed_files)}个文件")
            except Exception as e:
                logger.error(f"加载进度文件失败: {e}")
        else:
            self.strategy_progress = {}
    
    def save_progress(self):
        """保存当前处理进度"""
        progress = {
            'processed_files': list(self.processed_files),
            'failed_files': self.failed_files,
            'total_files': self.total_files,
            'completed_files': self.completed_files,
            'pending_files': self.pending_files,
            'strategy_progress': self.strategy_progress,
            'timestamp': time.time()
        }
        try:
            with open(self.progress_file, 'w', encoding='utf-8') as f:
                json.dump(progress, f, ensure_ascii=False, indent=2)
        except Exception as e:
            logger.error(f"保存进度文件失败: {e}")
    
    def collect_source_files(self) -> List[str]:
        """收集所有源文件"""
        source_files = []
        try:
            for root, dirs, files in os.walk(self.source_dir):
                for file in files:
                    if file.endswith('.json'):
                        file_path = os.path.join(root, file)
                        source_files.append(file_path)
            self.total_files = len(source_files)
            self.pending_files = self.total_files - len(self.processed_files)
            logger.info(f"共发现{self.total_files}个源文件，待处理{self.pending_files}个文件")
        except Exception as e:
            logger.error(f"收集源文件失败: {e}")
            raise
        return source_files
    
    def call_openai_api(self, prompt: str) -> Dict:
        """调用OpenAI API"""
        headers = {
            "Content-Type": "application/json",
            "Authorization": f"Bearer {API_KEY}"
        }
        
        data = {
            "model": MODEL,
            "messages": [
                {"role": "system", "content": "你是一个医疗对话转换助手，需要根据提供的人格定义和原始对话，生成符合目标人格特征的新对话。"},
                {"role": "user", "content": prompt}
            ],
            "temperature": TEMPERATURE,
            "max_tokens": MAX_TOKENS
        }
        
        retries = 3
        for i in range(retries):
            try:
                response = requests.post(
                    f"{API_BASE_URL}/chat/completions",
                    headers=headers,
                    json=data,
                    timeout=TIMEOUT
                )
                response.raise_for_status()
                resp_json = response.json()
                
                if "error" in resp_json:
                    error_msg = resp_json["error"].get("message", "API返回未知错误")
                    logger.error(f"API返回错误: {error_msg}")
                    if i < retries - 1:
                        time.sleep(REQUEST_DELAY * (i + 1))
                        continue
                    raise ValueError(f"API返回错误: {error_msg}")
                
                return resp_json
            except Exception as e:
                logger.warning(f"API调用失败 (第{i+1}/{retries}次尝试): {e}")
                if i < retries - 1:
                    time.sleep(REQUEST_DELAY * (i + 1))
                else:
                    logger.error(f"API调用最终失败: {e}")
                    raise
    
    def extract_json(self, content: str) -> dict:
        """从内容中提取和解析JSON"""
        try:
            # 尝试1: 直接解析整个内容
            return json.loads(content)
        except json.JSONDecodeError:
            # 尝试2: 使用正则表达式提取最外层的JSON对象
            match = re.search(r'\{[\s\S]*\}', content)
            if match:
                json_content = match.group(0)
                try:
                    return json.loads(json_content)
                except json.JSONDecodeError:
                    # 尝试3: 修复常见问题后再解析
                    json_content = self.fix_json_format(json_content)
                    return json.loads(json_content)
            raise ValueError("无法从content中提取有效JSON")
    
    def fix_json_format(self, json_content: str) -> str:
        """尝试修复JSON格式问题（较保守的替换）"""
        json_content = json_content.replace('“', '"').replace('”', '"').replace('‘', '"').replace('’', '"')
        json_content = json_content.replace('⌈', '"').replace('⌉', '"').replace('⌊', '"').replace('⌋', '"')
        json_content = json_content.replace('〝', '"').replace('〞', '"')
        
        # 修复单引号的小范围应用（仅键名或冒号后的单引号值）
        json_content = re.sub(r"'([A-Za-z0-9_\- ]+)'\s*:", r'"\1":', json_content)
        json_content = re.sub(r':\s*\'([^\']+)\'', r': "\1"', json_content)
        
        # 清理多余空白
        json_content = re.sub(r'\s+', ' ', json_content)
        return json_content
    
    def prepare_prompt_with_next_turns(self, source_data: Dict, target_personality: Dict, include_triples: int = 50) -> str:
        """
        将原始对话组织成（医生当前 / 患者原文 / 医生下一轮）三元组，并生成提示词。
        include_triples: 最多纳入的三元组数量以控制长度
        """
        dialogue = source_data.get("门诊对话", [])
        triples = []
        for i, turn in enumerate(dialogue):
            doctor_current = (turn.get("医生") or "").strip()
            patient_orig = (turn.get("患者") or "").strip()
            next_doctor = ""
            if i + 1 < len(dialogue):
                next_turn = dialogue[i + 1]
                next_doctor = (next_turn.get("医生") or "").strip()
            triples.append({
                "index": i + 1,
                "doctor_current": doctor_current,
                "patient_orig": patient_orig,
                "doctor_next": next_doctor
            })
        
        # 构造 prompt（结构化，保留严格约束）
        prompt_parts = []
        prompt_parts.append("任务：将下列医患对话迁移为符合目标人格的版本。")
        prompt_parts.append("重要：只修改患者的发言，医生的发言尽量保持不变。迁移后必须保留原始对话的事实性关键内容（如症状、时间、药物、剂量、疾病史等）。")
        prompt_parts.append("输出格式：仅返回一个有效的 JSON 对象，包含键 target_personality 和 target_dialogue，且所有字符串必须使用双引号。")
        prompt_parts.append("\n目标人格定义（摘要）:")
        prompt_parts.append(json.dumps(self.personality_definition, ensure_ascii=False))
        prompt_parts.append("\n目标人格（请据此改写患者发言）:")
        prompt_parts.append(json.dumps(target_personality, ensure_ascii=False))
        
        # 添加医疗信息保留要求
        medical_preservation = """
关键医疗信息保留要求（必须遵守）：
1. 必须保留所有症状描述：如疼痛部位、程度、持续时间等
2. 必须保留所有医疗时间信息：如发病时间、用药时长等  
3. 必须保留所有药物信息：药名、剂量、用法
4. 必须保留所有检查信息：检查类型、结果、时间
5. 必须保留所有数字信息：年龄、剂量、时间数量等

请在人格迁移时确保上述关键医疗信息完全保留，只改变表达方式和语气。
"""
        prompt_parts.append(medical_preservation)
        
        prompt_parts.append("""
迁移要求（严格约束）：
- 保持语义核心：请务必保留原患者发言中的事实性信息（症状、数量/剂量、时间点、药物名称、关键病史）。
- 对话逻辑：保持医生提问和患者回答的逻辑一致，参考下一轮医生提问以确保回答能支持后续提问。
- 人格体现：在少部分轮次自然体现目标人格特征；不要在每轮都刻意展示。
- 表达不要太过书面，尽量口语化表达。
- 输出必须为纯 JSON，且不要输出其他任何解释性文字。
""")
        prompt_parts.append("下面是按轮次列出的三元组（医生当前 | 患者原文 | 医生下一轮），请参考这些信息进行改写：")
        for t in triples[:include_triples]:
            prompt_parts.append(f"轮次 {t['index']}: 医生(当前): {t['doctor_current']} ||| 患者(原文): {t['patient_orig']} ||| 医生(下一轮): {t['doctor_next']}")
        
        prompt_parts.append("\n请仅输出最终 JSON，不要添加任何额外文字。")
        prompt = "\n".join(prompt_parts)
        return prompt
    
    # 兼容旧接口签名（保留）
    def generate_prompt(self, source_data: Dict, target_personality: Dict) -> str:
        return self.prepare_prompt_with_next_turns(source_data, target_personality)
    
    def extract_key_fragments(self, text: str) -> List[str]:
        """
        改进版关键片段抽取：专注于真正的医疗关键信息
        """
        if not text:
            return []
        
        fragments = set()
        text = str(text).lower()
        
        # 1. 医疗数值信息（剂量、时间、频率）
        medical_units = re.findall(r'(\d+\.?\d*\s*(?:mg|毫克|g|克|ml|毫升|次|天|周|个月|月|年|小时))', text)
        fragments.update(medical_units)
        
        # 2. 症状描述（更精准的症状词库）
        symptom_keywords = ['咳嗽','发热','发烧','头痛','恶心','呕吐','胸痛','腹痛','腹泻','头晕',
                           '乏力','失眠','咽痛','流涕','发冷','发汗','口干','口臭','拉肚子','贫血',
                           '胀气','消化不良','便秘','便血','黑便','胃痛','胃胀','反酸','烧心']
        for symptom in symptom_keywords:
            if symptom in text:
                fragments.add(symptom)
        
        # 3. 医疗操作和检查
        medical_actions = re.findall(r'(胃镜|肠镜|彩超|检查|化验|体检|复查|挂号|诊断|治疗|手术)', text)
        fragments.update(medical_actions)
        
        # 4. 药物名称（识别常见药物）
        medicine_keywords = ['奥美拉唑','头孢','消炎药','胃药','中药','西药','抗生素','止痛药','降压药']
        for med in medicine_keywords:
            if med in text:
                fragments.add(med)
        
        # 5. 时间描述（更精准的时间表达）
        time_expressions = re.findall(r'(昨天|今天|明天|前天|上周|上个月|早上|晚上|下午|凌晨|\d+点|\d+:\d+)', text)
        fragments.update(time_expressions)
        
        # 6. 身体部位
        body_parts = re.findall(r'(胃|肠|腹部|头部|胸部|喉咙|口腔|心脏|肺部|肝脏)', text)
        fragments.update(body_parts)
        
        # 过滤掉太短的片段和语气词
        filtered_fragments = []
        for frag in fragments:
            # 移除纯语气词
            if frag in ['嗯', '啊', '哦', '呃', '嘛', '吧', '呢', '呀', '啦']:
                continue
            # 移除过短的片段
            if len(frag) >= 2:
                filtered_fragments.append(frag)
        
        return filtered_fragments
    
    def extract_medical_key_fragments(self, dialogue: List[Dict]) -> List[str]:
        """从对话中提取医疗关键片段"""
        medical_fragments = set()
        for turn in dialogue:
            patient_text = (turn.get("患者") or "").strip()
            if patient_text:
                medical_fragments.update(self.extract_key_fragments(patient_text))
        return list(medical_fragments)
    
    def calculate_medical_retention(self, original_keys: List[str], migrated_keys: List[str]) -> float:
        """计算医疗关键信息的保留率"""
        if not original_keys:
            return 1.0
        
        retained_count = 0
        for key in original_keys:
            if any(key in migrated_key for migrated_key in migrated_keys):
                retained_count += 1
        
        return retained_count / len(original_keys)
    
    def calculate_turn_retention(self, original_dialogue: List[Dict], migrated_dialogue: List[Dict]) -> int:
        """计算保留关键信息的轮次数"""
        turns_with_retention = 0
        for i, orig_turn in enumerate(original_dialogue):
            if i >= len(migrated_dialogue):
                continue
            orig_text = (orig_turn.get("患者") or "").strip()
            mig_text = (migrated_dialogue[i].get("患者") or "").strip()
            if not orig_text or not mig_text:
                continue
            orig_frag = self.extract_key_fragments(orig_text)
            if not orig_frag:
                # 若该轮没有关键片段，视为保留
                turns_with_retention += 1
                continue
            # 若该轮任一碎片在对应迁移文本中出现，则视为该轮保留
            if any((frag in mig_text) or re.search(re.escape(frag), mig_text, flags=re.I) for frag in orig_frag):
                turns_with_retention += 1
        
        return turns_with_retention
    
    def validate_dialogue(self, original_dialogue: List[Dict], migrated_dialogue: List[Dict], target_personality: Dict = None) -> Tuple[bool, Dict]:
        """
        改进版校验：专注于医疗关键信息保留，根据人格特征调整阈值
        """
        try:
            # 提取医疗关键信息
            original_medical_keys = self.extract_medical_key_fragments(original_dialogue)
            migrated_medical_keys = self.extract_medical_key_fragments(migrated_dialogue)
            
            # 计算医疗信息保留率
            medical_retention = self.calculate_medical_retention(original_medical_keys, migrated_medical_keys)
            
            # 计算轮次保留率
            turns_with_retention = self.calculate_turn_retention(original_dialogue, migrated_dialogue)
            turn_retention_rate = turns_with_retention / len(original_dialogue) if original_dialogue else 1.0
            
            # 根据人格类型调整要求
            personality_type = target_personality.get('性格', '') if target_personality else ''
            recall_ability = target_personality.get('病史回忆能力', '高') if target_personality else '高'
            
            # 设置不同的通过阈值（应用当前验证阈值）
            if recall_ability == '低':
                min_medical_retention = 0.6 * self.current_validation_threshold
            elif personality_type in ['偏执', '怀疑']:
                min_medical_retention = 0.7 * self.current_validation_threshold
            else:
                min_medical_retention = 0.8 * self.current_validation_threshold
            
            # 核心医疗信息保留率是主要判断标准
            is_medical_valid = medical_retention >= min_medical_retention
            
            # 综合判断（降低轮次保留要求）
            is_valid = is_medical_valid and turn_retention_rate >= 0.3
            
            detail = {
                "medical_retention_rate": medical_retention,
                "turn_retention_rate": turn_retention_rate,
                "min_required_medical_retention": min_medical_retention,
                "original_medical_keys_count": len(original_medical_keys),
                "migrated_medical_keys_count": len(migrated_medical_keys),
                "turns_with_retention": turns_with_retention,
                "total_turns": len(original_dialogue)
            }
            
            # 记录验证结果日志
            if not is_valid:
                logger.warning(f"验证失败: 医疗信息保留率={medical_retention:.2f}, 要求={min_medical_retention:.2f}")
                logger.warning(f"轮次保留率: {turn_retention_rate:.2f}, 要求: 0.3")
            else:
                logger.info(f"验证通过: 医疗信息保留率={medical_retention:.2f}, 轮次保留率={turn_retention_rate:.2f}")
            
            return is_valid, detail
        except Exception as e:
            logger.exception(f"validate_dialogue 出错: {e}")
            return False, {
                "medical_retention_rate": 0.0,
                "turn_retention_rate": 0.0,
                "min_required_medical_retention": 0.0,
                "original_medical_keys_count": 0,
                "migrated_medical_keys_count": 0,
                "turns_with_retention": 0,
                "total_turns": len(original_dialogue),
                "error": str(e)
            }
    
    def apply_memory_constraints(self, question: str, base_answer: str, persona: Dict) -> str:
        """
        根据病史回忆能力调整回答，避免低回忆时频繁遗忘。
        
        参数:
        - question: 医生的问题
        - base_answer: 基础回答
        - persona: 患者人格特征
        
        返回:
        - 应用记忆约束后的回答
        """
        import random
        
        if persona.get("病史回忆能力") != "低":
            return base_answer  # 高回忆能力 → 保持原样
        
        # 检查是否为主诉或当前症状相关问题，如果是则禁止遗忘
        chief_complaint_keywords = ["哪不舒服", "症状", "哪里痛", "怎么了", "哪里不舒服", "感觉如何"]
        if any(keyword in question for keyword in chief_complaint_keywords):
            # 主诉问题只允许正常回答或轻微模糊
            if random.random() < 0.15:
                return base_answer + random.choice([
                    "，不过具体我记得不是很准。",
                    "，应该是这样吧。",
                    "，大概是这样。"
                ])
            return base_answer
        
        rand = random.random()
        if rand < 0.08:  # 8% 完全遗忘
            return random.choice([
                "这个我真想不起来了。",
                "医生，这个我记不清楚了。",
            ])
        elif rand < 0.20:  # 12% 模糊/不确定
            return random.choice([
                "大概一周左右吧，也可能更久。",
                "好像吃了几天药，具体我不太确定。",
                "我记得不是很清楚，可能是上个月。"
            ])
        else:  # 80% 保持正常回答
            # 在正常回答里加一点点模糊修饰
            if random.random() < 0.15:
                return base_answer + random.choice([
                    "，不过具体我记得不是很准。",
                    "，应该是这样吧。",
                    "，大概是这样。"
                ])
            return base_answer

    def process_file_with_retry(self, file_path: str, target_personality: Dict, max_retries: int = 2) -> bool:
        """带重试机制的文件处理"""
        original_threshold = self.current_validation_threshold
        
        for attempt in range(max_retries):
            try:
                logger.info(f"第{attempt+1}次尝试处理文件: {os.path.basename(file_path)}")
                success = self.process_file(file_path, target_personality)
                if success:
                    return True
                elif attempt < max_retries - 1:
                    logger.info(f"第{attempt+1}次尝试失败，进行第{attempt+2}次尝试...")
                    # 逐步降低验证要求
                    self.current_validation_threshold *= 0.8  # 降低验证阈值
            except Exception as e:
                logger.error(f"第{attempt+1}次尝试异常: {e}")
                if attempt < max_retries - 1:
                    self.current_validation_threshold *= 0.8
        
        # 恢复原始阈值
        self.current_validation_threshold = original_threshold
        return False

    def process_file(self, file_path: str, target_personality: Dict) -> bool:
        """处理单个文件（主入口）"""
        file_name = os.path.basename(file_path)
        original_name = os.path.splitext(file_name)[0]
        
        personality_values = [
            target_personality.get('性格', ''),
            target_personality.get('情绪状态', ''),
            target_personality.get('病史回忆能力', ''),
            target_personality.get('医学理解力', ''),
            target_personality.get('语言表达能力', ''),
            target_personality.get('认知状态', '正常')
        ]
        output_file_name = f"{original_name}_{'_'.join([v for v in personality_values if v])}.json"
        output_path = os.path.join(self.output_dir, output_file_name)
        
        if output_path in self.processed_files:
            logger.info(f"跳过已处理文件: {file_name}")
            return True
        
        try:
            with open(file_path, 'r', encoding='utf-8') as f:
                source_data = json.load(f)
        except Exception as e:
            logger.error(f"读取文件失败: {file_path}, {e}")
            return False
        
        try:
            dialogue = source_data.get("门诊对话", [])
            
            if not self.is_long_dialogue(dialogue):
                return self._process_normal_dialogue(source_data, target_personality, file_path, file_name, output_file_name, output_path)
            else:
                logger.info(f"文件 {file_name} 包含长对话，使用分块处理")
                return self._process_long_dialogue(source_data, target_personality, file_path, file_name, output_file_name, output_path)
        except Exception as e:
            logger.error(f"处理文件失败: {file_name}, {e}")
            self._handle_processing_failure(file_path, file_name, e)
            return False
    
    def _process_normal_dialogue(self, source_data: Dict, target_personality: Dict, file_path: str, file_name: str, output_file_name: str, output_path: str) -> bool:
        """处理正常长度的对话"""
        try:
            prompt = self.generate_prompt(source_data, target_personality)
            prompt_tokens = self.count_tokens(prompt)
            if prompt_tokens > MAX_PROMPT_TOKENS:
                logger.warning(f"提示词token数量超出限制: {prompt_tokens}/{MAX_PROMPT_TOKENS}，进行优化")
                prompt = self._optimize_prompt(prompt, target_personality)
            
            logger.info(f"调用API处理文件: {file_name}")
            response = self.call_openai_api(prompt)
            
            if 'choices' in response and len(response['choices']) > 0:
                content = response['choices'][0]['message']['content']
                try:
                    result = self.extract_json(content)
                    if result is None:
                        raise ValueError("无法解析API返回的JSON内容")
                    if 'target_personality' not in result or 'target_dialogue' not in result:
                        raise ValueError("API返回的JSON不包含必要字段")
                    
                    original_dialogue = source_data.get("门诊对话", [])
                    migrated_dialogue = result['target_dialogue']
                    
                    # 应用记忆约束到患者回答
                    for i, turn in enumerate(migrated_dialogue):
                        if i < len(original_dialogue):
                            doctor_question = original_dialogue[i].get("医生", "")
                        else:
                            doctor_question = ""
                        patient_answer = turn.get("患者", "")
                        if patient_answer:
                            # 应用记忆约束
                            constrained_answer = self.apply_memory_constraints(doctor_question, patient_answer, target_personality)
                            turn["患者"] = constrained_answer
                    
                    valid, detail = self.validate_dialogue(original_dialogue, migrated_dialogue, target_personality)
                    if not valid:
                        # 反馈详细信息并保存响应，以便离线分析
                        raise ValueError(f"迁移结果关键内容缺失: {detail}")
                    
                    # 保存迁移结果
                    new_data = source_data.copy()
                    new_data['人格'] = result['target_personality']
                    new_data['门诊对话'] = result['target_dialogue']
                    new_data['is_long_dialogue'] = False
                    
                    with open(output_path, 'w', encoding='utf-8') as f:
                        json.dump(new_data, f, ensure_ascii=False, indent=2)
                    
                    self.processed_files.add(output_path)
                    self.completed_files += 1
                    self.save_progress()
                    logger.info(f"成功生成文件: {output_file_name}")
                    return True
                except Exception as e:
                    self._handle_processing_failure(file_path, file_name, e, content if 'content' in locals() else None)
                    return False
            else:
                raise ValueError("API返回的响应格式不正确")
        except Exception as e:
            logger.error(f"处理正常对话时出错: {str(e)}")
            self._handle_processing_failure(file_path, file_name, e)
            return False
    
    def _process_long_dialogue(self, source_data: Dict, target_personality: Dict, file_path: str, file_name: str, output_file_name: str, output_path: str) -> bool:
        """处理长对话，使用分块策略并传递上下文以维护连贯性"""
        try:
            dialogue = source_data.get("门诊对话", [])
            dialogue_chunks = self.split_dialogue_into_chunks(dialogue)  # 返回 (start,end,chunk) 列表
            logger.info(f"将对话分割为 {len(dialogue_chunks)} 个块")
            
            migrated_chunks = []
            for i, (start_idx, end_idx, chunk) in enumerate(dialogue_chunks):
                logger.info(f"处理块 {i+1}/{len(dialogue_chunks)} (turns {start_idx}..{end_idx-1})")
                # 构造块级数据（包含本块）
                chunk_data = source_data.copy()
                chunk_data["门诊对话"] = chunk
                # 获取前后上下文（作为只读参考，不要改写）
                prev_context = dialogue[max(0, start_idx - OVERLAP_SIZE):start_idx] if start_idx > 0 else []
                next_context = dialogue[end_idx:min(len(dialogue), end_idx + OVERLAP_SIZE)] if end_idx < len(dialogue) else []
                
                prompt = self._generate_chunk_prompt(chunk_data, target_personality, i, len(dialogue_chunks), prev_context, next_context)
                
                response = self.call_openai_api(prompt)
                if 'choices' in response and len(response['choices']) > 0:
                    content = response['choices'][0]['message']['content']
                    try:
                        result = self.extract_json(content)
                        if 'target_dialogue' not in result:
                            raise ValueError(f"块 {i+1} 处理失败，缺少 target_dialogue 字段")
                        migrated_chunks.append(result['target_dialogue'])
                    except Exception as e:
                        self._handle_processing_failure(file_path, file_name, e, content)
                        return False
                else:
                    raise ValueError(f"API返回的响应格式不正确，块 {i+1}")
                time.sleep(REQUEST_DELAY)
            
            # 合并块（去重重叠部分）
            merged_dialogue = self._merge_dialogue_chunks(migrated_chunks)
            
            # 应用记忆约束到患者回答
            for i, turn in enumerate(merged_dialogue):
                if i < len(dialogue):
                    doctor_question = dialogue[i].get("医生", "")
                else:
                    doctor_question = ""
                patient_answer = turn.get("患者", "")
                if patient_answer:
                    # 应用记忆约束
                    constrained_answer = self.apply_memory_constraints(doctor_question, patient_answer, target_personality)
                    turn["患者"] = constrained_answer
            
            valid, detail = self.validate_dialogue(dialogue, merged_dialogue, target_personality)
            if not valid:
                raise ValueError(f"合并后对话关键内容缺失: {detail}")
            
            new_data = source_data.copy()
            new_data['人格'] = target_personality
            new_data['门诊对话'] = merged_dialogue
            new_data['is_long_dialogue'] = True
            new_data['chunk_count'] = len(dialogue_chunks)
            
            with open(output_path, 'w', encoding='utf-8') as f:
                json.dump(new_data, f, ensure_ascii=False, indent=2)
            
            self.processed_files.add(output_path)
            self.completed_files += 1
            self.save_progress()
            logger.info(f"长对话文件 {file_name} 处理成功")
            return True
        except Exception as e:
            logger.error(f"处理长对话时出错: {str(e)}")
            self._handle_processing_failure(file_path, file_name, e)
            return False
    
    def _generate_chunk_prompt(self, data: Dict, target_personality: Dict, chunk_index: int, total_chunks: int, prev_context: List[Dict], next_context: List[Dict]) -> str:
        """
        为对话块生成提示词，并把前后上下文作为只读参考（模型不得改写这些上下文，只作参考）。
        """
        base_prompt = self.generate_prompt(data, target_personality)
        chunk_instruction = "\n\n特别注意："
        if total_chunks > 1:
            chunk_instruction += f"\n这是长对话的第 {chunk_index + 1}/{total_chunks} 个部分。"
            if chunk_index == 0:
                chunk_instruction += " 这是对话的开始部分，请保持对话的连贯性，为后续部分奠定基础。"
            elif chunk_index == total_chunks - 1:
                chunk_instruction += " 这是对话的最后部分，请确保对话有合理的结尾。"
            else:
                chunk_instruction += " 这是对话的中间部分，请确保上下文连贯。"
        # 将前后上下文以只读方式加入（明确告知模型不要改写）
        if prev_context:
            chunk_instruction += "\n\n只读参考（前文上下文）: 以下为上一块的最后若干轮，仅供参考，请不要改写这些内容："
            for t in prev_context:
                chunk_instruction += f"\n医生: {t.get('医生','')} ||| 患者: {t.get('患者','')}"
        if next_context:
            chunk_instruction += "\n\n只读参考（后文上下文）: 以下为下一块的前若干轮，仅供参考，请不要改写这些内容："
            for t in next_context:
                chunk_instruction += f"\n医生: {t.get('医生','')} ||| 患者: {t.get('患者','')}"
        chunk_instruction += "\n\n对于本块内每一轮患者回答，请参考本块内的下一轮医生提问（或跨块的下一轮医生提问）以保持逻辑一致性。"
        chunk_instruction += "\n请仅输出本块的 JSON（包含 target_personality 与 target_dialogue），不要改写只读参考内容。"
        return base_prompt + chunk_instruction
    
    def _merge_dialogue_chunks(self, chunks: List[List[Dict]]) -> List[Dict]:
        """合并对话块，处理重叠部分（假设每个块的前 OVERLAP_SIZE 项与前一个块的后 OVERLAP_SIZE 项重复）"""
        if not chunks:
            return []
        merged = []
        for i, chunk in enumerate(chunks):
            if i == 0:
                merged.extend(chunk)
            else:
                # 跳过当前块的前 overlap 部分（这部分已由上一块包含）
                if OVERLAP_SIZE > 0 and len(chunk) > OVERLAP_SIZE:
                    merged.extend(chunk[OVERLAP_SIZE:])
                else:
                    merged.extend(chunk)
        return merged
    
    def _optimize_prompt(self, prompt: str, target_personality: Dict) -> str:
        """优化提示词，减少token数量（保留三元组要点）"""
        s = ' '.join(prompt.strip().split())
        max_chars = MAX_PROMPT_TOKENS * 3
        if len(s) > max_chars:
            head = s[:int(max_chars*0.6)]
            tail = s[-int(max_chars*0.4):]
            s = head + "\n ... （略） ... \n" + tail
        return s
    
    def _handle_processing_failure(self, file_path: str, file_name: str, exception: Exception, content: str = None):
        """处理失败的情况并保存详细信息"""
        error_msg = str(exception)
        logger.warning(f"处理失败: {error_msg}，源文件: {file_name}")
        
        timestamp = int(time.time())
        failed_meta = {
            'source_file': file_path,
            'error_message': error_msg,
            'timestamp': time.time(),
            'traceback': traceback.format_exc()
        }
        err_path = os.path.join(self.failed_dir, f"failed_{file_name}_{timestamp}.json")
        try:
            with open(err_path, 'w', encoding='utf-8') as f:
                json.dump(failed_meta, f, ensure_ascii=False, indent=2)
        except Exception as e:
            logger.error(f"写入失败元信息失败: {e}")
        
        if content:
            full_api_response_file = os.path.join(self.failed_dir, f"api_response_{file_name}_{timestamp}.txt")
            try:
                with open(full_api_response_file, 'w', encoding='utf-8') as f:
                    f.write(content)
                logger.info(f"完整的API返回内容已保存到: {full_api_response_file}")
            except Exception as e:
                logger.error(f"保存API返回内容失败: {e}")
        
        # 记录到内存并保存进度
        self.failed_files[err_path] = failed_meta
        self.save_progress()
    
    def run(self):
        """运行数据集扩充过程（主流程）"""
        logger.info("开始数据集扩充过程")
        try:
            source_files = self.collect_source_files()
            total_required = sum(strategy['count'] for strategy in self.migration_strategies)
            logger.info(f"总共需要生成{total_required}个新文件（策略合计）")
            
            # 为避免同一源文件被多次选中，创建一个可用文件列表副本（每次处理策略从中抽取）
            available_files_global = list(source_files)
            
            for strategy in self.migration_strategies:
                target_personality = strategy['personality']
                count = strategy['count']
                strategy_id = json.dumps(target_personality, ensure_ascii=False, sort_keys=True)
                progress_count = self.strategy_progress.get(strategy_id, 0)
                target_suffix = f"_{'_'.join([v for v in target_personality.values()])}.json"
                
                # 计算此策略实际已完成数量（基于输出文件名后缀）
                actual_completed = sum(1 for file_path in self.processed_files if file_path.endswith(target_suffix))
                generated_count = max(progress_count, actual_completed)
                
                logger.info(f"开始处理策略: {target_personality}, 需要生成{count}个文件，已完成{generated_count}个")
                
                # 构建候选文件：排除那些已经为该策略生成过输出文件（按输出名）
                candidate_files = []
                for src in available_files_global:
                    out_name = f"{os.path.splitext(os.path.basename(src))[0]}_{'_'.join([v for v in target_personality.values()])}.json"
                    out_path = os.path.join(self.output_dir, out_name)
                    if out_path not in self.processed_files:
                        candidate_files.append(src)
                
                # 需要生成的文件数
                need = count - generated_count
                if need <= 0:
                    logger.info(f"策略 {strategy_id} 已满足，不需新增")
                    continue
                
                # 如果候选不足，仍然尝试从所有源文件中抽样未被此策略处理的（保守）
                if len(candidate_files) < need:
                    # 也允许再次尝试从全源文件中挑选未被处理的文件
                    candidate_files = [s for s in source_files if os.path.join(self.output_dir, f"{os.path.splitext(os.path.basename(s))[0]}_{'_'.join([v for v in target_personality.values()])}.json") not in self.processed_files]
                
                # 随机选择需要处理的文件（不重复）
                if not candidate_files:
                    logger.warning(f"没有可用的候选文件来生成策略 {strategy_id}")
                    continue
                selected_files = random.sample(candidate_files, min(need, len(candidate_files)))
                
                for file_path in selected_files:
                    if generated_count >= count:
                        break
                    file_name = os.path.basename(file_path)
                    output_file_name = f"{os.path.splitext(file_name)[0]}_{'_'.join([v for v in target_personality.values()])}.json"
                    output_path = os.path.join(self.output_dir, output_file_name)
                    
                    # 如果已经由其他并发/上次处理生成，则跳过，但不要把此跳过计入 generated_count
                    if output_path in self.processed_files:
                        logger.info(f"文件已由先前进程生成，跳过: {output_file_name}")
                        continue
                    
                    # 处理文件（使用重试机制）
                    success = self.process_file_with_retry(file_path, target_personality)
                    if success:
                        generated_count += 1
                        # 从全局可用文件中移除已使用的源文件，避免在后续策略重复使用
                        try:
                            available_files_global.remove(file_path)
                        except ValueError:
                            pass
                        # 更新策略进度并保存
                        self.strategy_progress[strategy_id] = generated_count
                        self.save_progress()
                        remaining = count - generated_count
                        logger.info(f"策略进度: {generated_count}/{count}，还需处理{remaining}个文件")
                    else:
                        logger.warning(f"处理失败，继续下一个文件: {file_name}")
                    
                    time.sleep(REQUEST_DELAY)
                
                logger.info(f"策略处理结束: {target_personality}, 已生成 {generated_count} / {count} 个（含已存在的）")
            
            logger.info("数据集扩充过程完成")
            logger.info(f"成功生成{self.completed_files}个文件，失败{len(self.failed_files)}个文件")
        except Exception as e:
            logger.error(f"数据集扩充过程失败: {e}")
            logger.debug(traceback.format_exc())
        finally:
            self.save_progress()

def process_failed_files():
    """处理失败的文件（重试机制）"""
    augmenter = DatasetAugmenter()
    augmenter.load_progress()
    
    if not augmenter.failed_files:
        logger.info("没有失败的文件需要处理")
        return
    
    logger.info(f"开始处理{len(augmenter.failed_files)}个失败的文件")
    if augmenter.migration_strategies:
        strategy = augmenter.migration_strategies[0]
        target_personality = strategy['personality']
        logger.info(f"使用策略: {target_personality} 重新处理失败文件")
        
        processed_count = 0
        failed_items = list(augmenter.failed_files.items())
        for failed_path, failed_info in failed_items:
            source_file = failed_info.get('source_file')
            if not source_file or not os.path.exists(source_file):
                logger.warning(f"无法定位失败条目的源文件: {failed_info}")
                continue
            logger.info(f"重新处理文件: {os.path.basename(source_file)}")
            if augmenter.process_file_with_retry(source_file, target_personality):
                processed_count += 1
                try:
                    del augmenter.failed_files[failed_path]
                except KeyError:
                    pass
                augmenter.save_progress()
                logger.info(f"成功重新处理文件: {os.path.basename(source_file)}")
            time.sleep(REQUEST_DELAY)
        
        logger.info(f"失败文件处理完成，成功重新处理{processed_count}个文件")
    else:
        logger.error("没有找到迁移策略，无法重新处理失败文件")

if __name__ == "__main__":
    import sys
    if len(sys.argv) > 1 and sys.argv[1] == "--process-failed":
        process_failed_files()
    else:
        augmenter = DatasetAugmenter()
        augmenter.run()