"""
训练数据管理模块
Training Data Manager Module

负责处理医疗问答训练数据的预处理、格式化和管理
Handles preprocessing, formatting and management of medical QA training data
"""

import json
import os
import random
import logging
from typing import List, Dict, Tuple, Any
from datetime import datetime

class TrainingDataManager:
    """训练数据管理器"""
    
    def __init__(self, data_dir: str = None):
        """
        初始化训练数据管理器
        Initialize training data manager
        
        Args:
            data_dir: 数据目录路径 / Data directory path
        """
        if data_dir is None:
            self.data_dir = os.path.join(os.path.dirname(__file__), '..', 'data')
        else:
            self.data_dir = data_dir
            
        self.medical_data = {}
        self.knowledge_graph = {}
        self.training_pairs = []
        self.feedback_data = []
        
        self._load_base_data()
        
    def _load_base_data(self):
        """加载基础医疗数据和知识图谱"""
        try:
            # 加载医疗数据
            medical_data_path = os.path.join(self.data_dir, 'medical_data.json')
            if os.path.exists(medical_data_path):
                with open(medical_data_path, 'r', encoding='utf-8') as f:
                    self.medical_data = json.load(f)
                    
            # 加载知识图谱
            kg_path = os.path.join(self.data_dir, 'knowledge_graph.json')
            if os.path.exists(kg_path):
                with open(kg_path, 'r', encoding='utf-8') as f:
                    self.knowledge_graph = json.load(f)
                    
            logging.info("基础医疗数据加载成功")
            
        except Exception as e:
            logging.error(f"加载基础数据失败: {str(e)}")
            raise
            
    def generate_qa_pairs_from_knowledge_base(self) -> List[Dict[str, str]]:
        """
        从知识库生成问答对
        Generate QA pairs from knowledge base
        
        Returns:
            List of QA pairs with question, answer, and context
        """
        qa_pairs = []
        
        # 从疾病数据生成问答对
        for disease in self.medical_data.get('diseases', []):
            disease_name = disease['name']
            description = disease['description']
            symptoms = disease.get('symptoms', [])
            drugs = disease.get('drugs', [])
            
            # 生成疾病描述问答
            qa_pairs.append({
                'question': f"什么是{disease_name}？",
                'answer': description,
                'context': f"疾病：{disease_name}",
                'category': 'disease_description'
            })
            
            qa_pairs.append({
                'question': f"{disease_name}是什么病？",
                'answer': description,
                'context': f"疾病：{disease_name}",
                'category': 'disease_description'
            })
            
            # 生成症状相关问答
            if symptoms:
                symptoms_str = '、'.join(symptoms)
                qa_pairs.append({
                    'question': f"{disease_name}有什么症状？",
                    'answer': f"{disease_name}的常见症状包括：{symptoms_str}。",
                    'context': f"疾病：{disease_name}，症状查询",
                    'category': 'symptoms'
                })
                
                qa_pairs.append({
                    'question': f"{disease_name}的症状是什么？",
                    'answer': f"患{disease_name}时可能出现以下症状：{symptoms_str}。",
                    'context': f"疾病：{disease_name}，症状查询",
                    'category': 'symptoms'
                })
            
            # 生成治疗药物问答
            if drugs:
                for drug in drugs:
                    drug_name = drug['name']
                    usage = drug['usage']
                    indication = drug.get('indication', '')
                    
                    qa_pairs.append({
                        'question': f"{disease_name}用什么药治疗？",
                        'answer': f"治疗{disease_name}可以使用{drug_name}，用法：{usage}。{indication}",
                        'context': f"疾病：{disease_name}，药物：{drug_name}",
                        'category': 'treatment'
                    })
                    
                    qa_pairs.append({
                        'question': f"{drug_name}怎么用？",
                        'answer': f"{drug_name}的用法是：{usage}。{indication}",
                        'context': f"药物：{drug_name}",
                        'category': 'drug_usage'
                    })
        
        # 从科室数据生成问答对
        for department in self.medical_data.get('departments', []):
            dept_name = department['name']
            symptoms = department.get('symptoms', [])
            
            if symptoms:
                symptoms_str = '、'.join(symptoms)
                qa_pairs.append({
                    'question': f"什么症状应该去{dept_name}？",
                    'answer': f"如果您有以下症状：{symptoms_str}，建议您前往{dept_name}就诊。",
                    'context': f"科室：{dept_name}",
                    'category': 'department_recommendation'
                })
                
                for symptom in symptoms:
                    qa_pairs.append({
                        'question': f"{symptom}应该挂什么科？",
                        'answer': f"如果您有{symptom}的症状，建议您可以考虑前往{dept_name}就诊。",
                        'context': f"症状：{symptom}，科室：{dept_name}",
                        'category': 'department_recommendation'
                    })
        
        # 从知识图谱生成问答对
        nodes = self.knowledge_graph.get('nodes', [])
        relationships = self.knowledge_graph.get('relationships', [])
        
        # 创建节点映射
        node_map = {node['id']: node for node in nodes}
        
        for rel in relationships:
            source_node = node_map.get(rel['source'])
            target_node = node_map.get(rel['target'])
            rel_type = rel['type']
            description = rel.get('description', '')
            
            if source_node and target_node:
                if rel_type == 'has_symptom':
                    qa_pairs.append({
                        'question': f"{source_node['name']}会有{target_node['name']}的症状吗？",
                        'answer': f"是的，{description}",
                        'context': f"疾病：{source_node['name']}，症状：{target_node['name']}",
                        'category': 'symptom_relation'
                    })
                    
                elif rel_type == 'treated_by':
                    qa_pairs.append({
                        'question': f"{source_node['name']}可以用{target_node['name']}治疗吗？",
                        'answer': f"是的，{description}",
                        'context': f"疾病：{source_node['name']}，药物：{target_node['name']}",
                        'category': 'treatment_relation'
                    })
                    
                elif rel_type == 'treated_in':
                    qa_pairs.append({
                        'question': f"{source_node['name']}应该去哪个科室？",
                        'answer': f"{description}",
                        'context': f"疾病：{source_node['name']}，科室：{target_node['name']}",
                        'category': 'department_relation'
                    })
        
        logging.info(f"从知识库生成了 {len(qa_pairs)} 个问答对")
        return qa_pairs
        
    def add_feedback_data(self, question: str, predicted_answer: str, 
                         correct_answer: str, score: float, feedback: str = ""):
        """
        添加用户反馈数据
        Add user feedback data
        
        Args:
            question: 用户问题
            predicted_answer: 模型预测答案
            correct_answer: 正确答案
            score: 用户评分 (0-1)
            feedback: 用户反馈文本
        """
        feedback_item = {
            'question': question,
            'predicted_answer': predicted_answer,
            'correct_answer': correct_answer,
            'score': score,
            'feedback': feedback,
            'timestamp': datetime.now().isoformat()
        }
        
        self.feedback_data.append(feedback_item)
        
        # 自动保存反馈数据
        self._save_feedback_data()
        
    def _save_feedback_data(self):
        """保存反馈数据到文件"""
        feedback_path = os.path.join(self.data_dir, 'feedback_data.json')
        try:
            with open(feedback_path, 'w', encoding='utf-8') as f:
                json.dump(self.feedback_data, f, ensure_ascii=False, indent=2)
        except Exception as e:
            logging.error(f"保存反馈数据失败: {str(e)}")
            
    def load_feedback_data(self):
        """加载已有的反馈数据"""
        feedback_path = os.path.join(self.data_dir, 'feedback_data.json')
        if os.path.exists(feedback_path):
            try:
                with open(feedback_path, 'r', encoding='utf-8') as f:
                    self.feedback_data = json.load(f)
                logging.info(f"加载了 {len(self.feedback_data)} 条反馈数据")
            except Exception as e:
                logging.error(f"加载反馈数据失败: {str(e)}")
                self.feedback_data = []
                
    def prepare_training_data(self, include_feedback: bool = True, 
                            train_ratio: float = 0.8) -> Dict[str, List[Dict[str, Any]]]:
        """
        增强的训练数据准备
        Enhanced training data preparation
        
        Args:
            include_feedback: 是否包含反馈数据
            train_ratio: 训练集比例
            
        Returns:
            包含训练、验证和测试数据的字典
        """
        training_data = []
        
        # 1. 从知识图谱生成训练数据
        kg_data = self._prepare_knowledge_graph_data()
        training_data.extend(kg_data)
        
        # 2. 从医疗数据生成训练数据
        medical_data = self._prepare_medical_data()
        training_data.extend(medical_data)
        
        # 3. 添加用户反馈数据
        if include_feedback:
            feedback_data = self._prepare_feedback_data()
            training_data.extend(feedback_data)
        
        # 4. 数据增强
        augmented_data = self._augment_training_data(training_data)
        training_data.extend(augmented_data)
        
        # 5. 数据质量控制和优化
        training_data = self._enhanced_quality_control(training_data)
        
        # 6. 数据集划分
        return self._enhanced_split_dataset(training_data)

    def _prepare_knowledge_graph_data(self) -> List[Dict[str, Any]]:
        """准备知识图谱训练数据"""
        kg_data = []
        
        nodes = self.knowledge_graph.get('nodes', [])
        relationships = self.knowledge_graph.get('relationships', [])
        node_map = {node['id']: node for node in nodes}
        
        for rel in relationships:
            source_node = node_map.get(rel['source'])
            target_node = node_map.get(rel['target'])
            if not (source_node and target_node):
                continue
                
            item = {
                'question': f"{source_node['name']}与{target_node['name']}的关系？",
                'answer': rel.get('description', ''),
                'source': 'knowledge_graph',
                'quality_score': 0.9,
                'type': rel.get('type', 'relation')
            }
            kg_data.append(item)
        
        return kg_data

    def _prepare_medical_data(self) -> List[Dict[str, Any]]:
        """准备医疗数据"""
        medical_data = []
        qa_pairs = self.generate_qa_pairs_from_knowledge_base()
        
        for pair in qa_pairs:
            pair.update({
                'source': 'medical_data',
                'quality_score': 0.9,
                'type': pair.get('category', 'general')
            })
            medical_data.append(pair)
        
        return medical_data

    def _prepare_feedback_data(self) -> List[Dict[str, Any]]:
        """准备用户反馈数据"""
        feedback_data = []
        self.load_feedback_data()
        
        for feedback in self.feedback_data:
            if feedback.get('score', 0) >= 0.7:  # 只使用高质量反馈
                feedback_data.append({
                    'question': feedback['question'],
                    'answer': feedback['correct_answer'],
                    'source': 'user_feedback',
                    'quality_score': feedback.get('score', 1.0),
                    'timestamp': feedback.get('timestamp', datetime.now().isoformat()),
                    'type': 'feedback'
                })
        
        return feedback_data

    def _augment_training_data(self, training_data: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """数据增强"""
        augmented_data = []
        
        for item in training_data:
            # 同义词替换
            synonyms_data = self._synonym_replacement(item)
            augmented_data.extend(synonyms_data)
            
            # 句式变换
            paraphrased_data = self._paraphrase_questions(item)
            augmented_data.extend(paraphrased_data)
            
            # 上下文增强
            contextual_data = self._contextual_enhancement(item)
            augmented_data.extend(contextual_data)
        
        return augmented_data

    def _synonym_replacement(self, item: Dict[str, Any]) -> List[Dict[str, Any]]:
        """同义词替换增强"""
        synonyms = {
            '症状': ['表现', '征象', '体征'],
            '治疗': ['医治', '疗法', '治疗方案'],
            '药物': ['药品', '用药', '药剂'],
            '科室': ['部门', '诊室', '专科']
        }
        
        augmented_items = []
        question = item['question']
        
        for word, synonym_list in synonyms.items():
            if word in question:
                for synonym in synonym_list:
                    new_item = item.copy()
                    new_item['question'] = question.replace(word, synonym)
                    new_item['augmentation_method'] = 'synonym_replacement'
                    new_item['quality_score'] = item.get('quality_score', 0.8) * 0.9
                    augmented_items.append(new_item)
        
        return augmented_items

    def _paraphrase_questions(self, item: Dict[str, Any]) -> List[Dict[str, Any]]:
        """句式变换增强"""
        question_patterns = {
            '什么是': ['什么是', '介绍下', '解释一下', '请说明'],
            '症状有哪些': ['症状有哪些', '有什么症状', '会出现什么表现', '有哪些临床表现'],
            '怎么治疗': ['怎么治疗', '如何治疗', '治疗方法是什么', '该怎么医治']
        }
        
        augmented_items = []
        question = item['question']
        
        for pattern, alternatives in question_patterns.items():
            if pattern in question:
                for alt in alternatives[1:]:  # 跳过第一个（原始用词）
                    new_item = item.copy()
                    new_item['question'] = question.replace(pattern, alt)
                    new_item['augmentation_method'] = 'paraphrase'
                    new_item['quality_score'] = item.get('quality_score', 0.8) * 0.85
                    augmented_items.append(new_item)
        
        return augmented_items

    def _contextual_enhancement(self, item: Dict[str, Any]) -> List[Dict[str, Any]]:
        """上下文增强"""
        context_templates = [
            "请问医生，{question}",
            "我想了解一下，{question}",
            "我家人{question}",
            "我最近{question}"
        ]
        
        augmented_items = []
        question = item['question']
        
        for template in context_templates:
            new_item = item.copy()
            new_item['question'] = template.format(question=question)
            new_item['augmentation_method'] = 'contextual_enhancement'
            new_item['quality_score'] = item.get('quality_score', 0.8) * 0.8
            augmented_items.append(new_item)
        
        return augmented_items

    def _calculate_quality_score(self, item: Dict[str, Any]) -> float:
        """计算数据质量分数"""
        score = 1.0
        
        # 长度检查
        question_len = len(item.get('question', ''))
        answer_len = len(item.get('answer', ''))
        
        if question_len < 5 or answer_len < 10:
            score *= 0.5
        
        if question_len > 256 or answer_len > 512:
            score *= 0.7
        
        # 内容检查
        if not item.get('question') or not item.get('answer'):
            score *= 0.1
        
        # 医疗相关性检查
        medical_keywords = ['病', '症', '药', '治疗', '症状', '医院', '医生']
        has_medical_keyword = any(keyword in item.get('question', '') + item.get('answer', '') 
                                  for keyword in medical_keywords)
        
        if not has_medical_keyword:
            score *= 0.3
        
        return score

    def _enhanced_quality_control(self, training_data: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """增强的数据质量控制"""
        # 质量分数过滤
        training_data = [item for item in training_data 
                        if item.get('quality_score', 0) >= 0.5]
        
        # 去重
        seen = set()
        unique_data = []
        for item in training_data:
            key = (item['question'], item['answer'])
            if key not in seen:
                seen.add(key)
                unique_data.append(item)
        training_data = unique_data
        
        # 平衡数据分布
        training_data = self._balance_dataset(training_data)
        
        return training_data

    def _balance_dataset(self, training_data: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """平衡数据集"""
        from collections import defaultdict
        type_counts = defaultdict(int)
        for item in training_data:
            type_counts[item.get('type', 'general')] += 1
        
        # 找到最小类别的大小
        min_size = min(type_counts.values()) if type_counts else 0
        
        balanced_data = []
        type_samples = defaultdict(list)
        
        # 按类型分组
        for item in training_data:
            type_samples[item.get('type', 'general')].append(item)
        
        # 平衡采样
        for type_name, samples in type_samples.items():
            if len(samples) > min_size * 2:
                balanced_data.extend(random.sample(samples, min(len(samples), min_size * 2)))
            else:
                balanced_data.extend(samples)
        
        return balanced_data

    def _enhanced_split_dataset(self, training_data: List[Dict[str, Any]]) -> Dict[str, List[Dict[str, Any]]]:
        """增强的数据集划分"""
        if not training_data:
            return {'train': [], 'validation': [], 'test': []}
        
        # 按质量分数排序
        training_data.sort(key=lambda x: x.get('quality_score', 0), reverse=True)
        
        # 分层采样确保各类别比例
        from collections import defaultdict
        type_groups = defaultdict(list)
        for item in training_data:
            type_groups[item.get('type', 'general')].append(item)
        
        train_data, val_data, test_data = [], [], []
        
        for type_name, samples in type_groups.items():
            if len(samples) >= 3:
                n_samples = len(samples)
                n_train = max(1, int(n_samples * 0.85))
                n_val = max(1, int(n_samples * 0.10))
                n_test = n_samples - n_train - n_val
                
                train_data.extend(samples[:n_train])
                val_data.extend(samples[n_train:n_train + n_val])
                test_data.extend(samples[n_train + n_val:])
            else:
                # 样本太少，全部用于训练
                train_data.extend(samples)
        
        random.shuffle(train_data)
        random.shuffle(val_data)
        random.shuffle(test_data)
        
        logging.info(f"增强数据划分完成：训练集 {len(train_data)} 条，验证集 {len(val_data)} 条，测试集 {len(test_data)} 条")
        
        return {
            'train': train_data,
            'validation': val_data,
            'test': test_data
        }
        
    def save_training_data(self, train_data: List[Dict], val_data: List[Dict], 
                          output_dir: str = None):
        """
        保存训练数据到文件
        Save training data to files
        
        Args:
            train_data: 训练数据
            val_data: 验证数据
            output_dir: 输出目录
        """
        if output_dir is None:
            output_dir = self.data_dir
            
        os.makedirs(output_dir, exist_ok=True)
        
        # 保存训练数据
        train_path = os.path.join(output_dir, 'train_data.json')
        with open(train_path, 'w', encoding='utf-8') as f:
            json.dump(train_data, f, ensure_ascii=False, indent=2)
            
        # 保存验证数据
        val_path = os.path.join(output_dir, 'val_data.json')
        with open(val_path, 'w', encoding='utf-8') as f:
            json.dump(val_data, f, ensure_ascii=False, indent=2)
            
        logging.info(f"训练数据已保存到 {output_dir}")
        
    def get_data_statistics(self) -> Dict[str, Any]:
        """
        获取数据统计信息
        Get data statistics
        
        Returns:
            Dictionary containing data statistics
        """
        qa_pairs = self.generate_qa_pairs_from_knowledge_base()
        
        # 按类别统计
        category_counts = {}
        for pair in qa_pairs:
            category = pair.get('category', 'unknown')
            category_counts[category] = category_counts.get(category, 0) + 1
            
        stats = {
            'total_qa_pairs': len(qa_pairs),
            'category_distribution': category_counts,
            'total_diseases': len(self.medical_data.get('diseases', [])),
            'total_departments': len(self.medical_data.get('departments', [])),
            'total_feedback': len(self.feedback_data),
            'knowledge_graph_nodes': len(self.knowledge_graph.get('nodes', [])),
            'knowledge_graph_relationships': len(self.knowledge_graph.get('relationships', []))
        }
        
        return stats