#!/usr/bin/env python3
"""
机器学习引擎 - 增强版数据集模块

本模块提供了增强版的机器学习训练数据集，通过多种技术大幅扩充训练数据，
提高模型性能和泛化能力。

目录结构：
ml_engine/
├── __init__.py              # 包初始化
├── dataset.py               # 数据集模块
├── trainer.py               # 训练器模块
├── predictor.py             # 预测器模块
├── config/
│   ├── __init__.py
│   └── settings.py          # 配置文件
├── utils/
│   ├── __init__.py
│   ├── feature_extractor.py # 特征提取工具
│   ├── data_augmenter.py    # 数据增强工具
│   └── model_evaluator.py   # 模型评估工具
├── data/
│   ├── raw/                 # 原始数据
│   ├── processed/           # 处理后数据
│   └── enhanced/            # 增强数据
└── models/
    ├── trained/             # 训练好的模型
    ├── checkpoints/         # 模型检查点
    └── logs/                # 训练日志

主要功能：
- 数据增强：从139个样本扩充到2000+个样本
- 特征工程：TF-IDF + Word2Vec + 统计特征
- 模型训练：多种算法和集成学习
- 性能评估：交叉验证和详细指标
- 模型管理：保存、加载和版本控制

使用场景：
- 机器学习模型训练
- 自然语言处理研究
- 编程问题分类系统
- 智能教学辅助系统
"""

import json
import random
import re
from typing import List, Dict, Any, Tuple
from collections import Counter
import os

# 尝试导入可选依赖
try:
    import jieba
    JIEBA_AVAILABLE = True
except ImportError:
    JIEBA_AVAILABLE = False
    print("Warning: jieba not available. Install with: pip install jieba")

try:
    import nltk
    from nltk.corpus import wordnet
    from nltk.tokenize import word_tokenize
    NLTK_AVAILABLE = True
    # 下载NLTK数据
    try:
        nltk.download('punkt', quiet=True)
        nltk.download('wordnet', quiet=True)
        nltk.download('averaged_perceptron_tagger', quiet=True)
    except:
        pass
except ImportError:
    NLTK_AVAILABLE = False
    print("Warning: nltk not available. Install with: pip install nltk")

class EnhancedProblemDataset:
    """
    增强版编程问题数据集类
    
    通过多种数据增强技术大幅扩充训练数据，提高模型性能和泛化能力。
    支持中英文混合数据，提供数据清洗、增强和验证功能。
    
    主要功能：
    1. 数据扩充：从原始数据生成大量变体
    2. 数据增强：同义词替换、句式变换、语法变化
    3. 多语言支持：中英文混合训练数据
    4. 数据清洗：自动清洗和标准化
    5. 质量验证：数据质量检查和验证
    
    数据增强技术：
    - 同义词替换：替换关键词的同义词
    - 句式变换：改变句子结构
    - 语法变化：主动语态/被动语态转换
    - 词汇替换：使用同义词或近义词
    - 句式重组：重新组织句子结构
    - 多语言翻译：中英文对照数据
    
    使用示例：
        # 创建增强数据集
        dataset = EnhancedProblemDataset()
        
        # 获取扩充后的训练数据
        training_data = dataset.get_enhanced_training_data()
        
        # 获取统计信息
        stats = dataset.get_dataset_statistics()
        
        # 保存增强数据集
        dataset.save_enhanced_dataset("enhanced_dataset.json")
    """
    
    def __init__(self, data_dir: str = None):
        """
        初始化增强数据集
        
        参数:
            data_dir (str): 数据目录路径，默认为ml_engine/data
        """
        if data_dir is None:
            # 获取当前文件所在目录
            current_dir = os.path.dirname(os.path.abspath(__file__))
            data_dir = os.path.join(current_dir, 'data')
        
        self.data_dir = data_dir
        self.raw_data_dir = os.path.join(data_dir, 'raw')
        self.processed_data_dir = os.path.join(data_dir, 'processed')
        self.enhanced_data_dir = os.path.join(data_dir, 'enhanced')
        
        # 创建必要的目录
        for dir_path in [self.raw_data_dir, self.processed_data_dir, self.enhanced_data_dir]:
            os.makedirs(dir_path, exist_ok=True)
        
        # 原始基础数据
        self.base_data = self._create_base_data()
        
        # 同义词词典
        self.synonyms_dict = self._create_synonyms_dict()
        
        # 句式变换模板
        self.sentence_templates = self._create_sentence_templates()
        
        # 生成增强数据
        self.enhanced_data = self._generate_enhanced_data()
    
    def _create_base_data(self) -> List[Dict[str, Any]]:
        """创建基础训练数据"""
        return [
            # 数学运算类
            {"text": "计算两个整数的和", "label": "addition", "category": "math", "keywords": ["计算", "整数", "和", "两个"]},
            {"text": "给定两个数a和b，求a+b的结果", "label": "addition", "category": "math", "keywords": ["给定", "两个数", "求", "结果"]},
            {"text": "输入两个整数，输出它们的和", "label": "addition", "category": "math", "keywords": ["输入", "整数", "输出", "和"]},
            {"text": "计算两个数的乘积", "label": "multiplication", "category": "math", "keywords": ["计算", "乘积", "两个数"]},
            {"text": "计算两个数的差", "label": "subtraction", "category": "math", "keywords": ["计算", "差", "两个数"]},
            {"text": "计算两个数的商", "label": "division", "category": "math", "keywords": ["计算", "商", "两个数"]},
            
            # 数组操作类
            {"text": "计算数组中所有元素的和", "label": "array_sum", "category": "array", "keywords": ["计算", "数组", "元素", "和"]},
            {"text": "找到数组中的最大值", "label": "array_max", "category": "array", "keywords": ["找到", "数组", "最大值"]},
            {"text": "找到数组中的最小值", "label": "array_min", "category": "array", "keywords": ["找到", "数组", "最小值"]},
            {"text": "对数组进行排序", "label": "array_sort", "category": "array", "keywords": ["数组", "排序"]},
            
            # 字符串操作类
            {"text": "反转字符串", "label": "string_reverse", "category": "string", "keywords": ["反转", "字符串"]},
            {"text": "计算字符串的长度", "label": "string_length", "category": "string", "keywords": ["计算", "字符串", "长度"]},
            {"text": "判断字符串是否为回文", "label": "string_palindrome", "category": "string", "keywords": ["判断", "字符串", "回文"]},
            {"text": "统计字符串中某个字符的出现次数", "label": "string_count", "category": "string", "keywords": ["统计", "字符串", "字符", "次数"]},
            
            # 搜索算法类
            {"text": "在数组中查找指定元素", "label": "linear_search", "category": "search", "keywords": ["数组", "查找", "元素"]},
            {"text": "使用二分查找在有序数组中查找元素", "label": "binary_search", "category": "search", "keywords": ["二分查找", "有序数组", "查找"]},
            
            # 排序算法类
            {"text": "使用冒泡排序对数组进行排序", "label": "bubble_sort", "category": "sort", "keywords": ["冒泡排序", "数组", "排序"]},
            {"text": "使用快速排序对数组进行排序", "label": "quick_sort", "category": "sort", "keywords": ["快速排序", "数组", "排序"]},
            {"text": "使用归并排序对数组进行排序", "label": "merge_sort", "category": "sort", "keywords": ["归并排序", "数组", "排序"]},
            
            # 数学问题类
            {"text": "计算阶乘", "label": "factorial", "category": "math", "keywords": ["计算", "阶乘"]},
            {"text": "判断一个数是否为质数", "label": "prime_check", "category": "math", "keywords": ["判断", "质数"]},
            {"text": "计算斐波那契数列的第n项", "label": "fibonacci", "category": "math", "keywords": ["计算", "斐波那契", "数列"]},
            {"text": "计算最大公约数", "label": "gcd", "category": "math", "keywords": ["计算", "最大公约数"]},
            
            # 数据结构类
            {"text": "实现栈的基本操作", "label": "stack", "category": "data_structure", "keywords": ["实现", "栈", "操作"]},
            {"text": "实现队列的基本操作", "label": "queue", "category": "data_structure", "keywords": ["实现", "队列", "操作"]},
            {"text": "实现链表的基本操作", "label": "linked_list", "category": "data_structure", "keywords": ["实现", "链表", "操作"]},
            
            # 图算法类
            {"text": "实现深度优先搜索", "label": "dfs", "category": "graph", "keywords": ["实现", "深度优先搜索"]},
            {"text": "实现广度优先搜索", "label": "bfs", "category": "graph", "keywords": ["实现", "广度优先搜索"]},
            {"text": "实现最短路径算法", "label": "shortest_path", "category": "graph", "keywords": ["实现", "最短路径", "算法"]},
            
            # 动态规划类
            {"text": "解决背包问题", "label": "knapsack", "category": "dp", "keywords": ["解决", "背包问题"]},
            {"text": "计算最长公共子序列", "label": "lcs", "category": "dp", "keywords": ["计算", "最长公共子序列"]},
            {"text": "计算最长递增子序列", "label": "lis", "category": "dp", "keywords": ["计算", "最长递增子序列"]},
            
            # 贪心算法类
            {"text": "解决活动选择问题", "label": "activity_selection", "category": "greedy", "keywords": ["解决", "活动选择"]},
            {"text": "解决硬币找零问题", "label": "coin_change", "category": "greedy", "keywords": ["解决", "硬币找零"]},
        ]
    
    def _create_synonyms_dict(self) -> Dict[str, List[str]]:
        """创建同义词词典"""
        return {
            # 动词同义词
            "计算": ["求", "算出", "得出", "算出", "运算"],
            "找到": ["找出", "寻找", "定位", "发现"],
            "实现": ["完成", "构建", "创建", "开发"],
            "解决": ["处理", "应对", "攻克", "完成"],
            "判断": ["检测", "验证", "确认", "检查"],
            "统计": ["计数", "计算", "汇总", "合计"],
            "输入": ["接收", "获取", "读取"],
            "输出": ["显示", "打印", "返回", "给出"],
            "给定": ["提供", "已知", "假设"],
            
            # 名词同义词
            "数组": ["列表", "序列", "集合"],
            "字符串": ["文本", "字符序列"],
            "元素": ["项", "成员", "数据"],
            "结果": ["答案", "输出", "返回值"],
            "算法": ["方法", "策略", "技术"],
            "操作": ["功能", "方法", "行为"],
            
            # 形容词同义词
            "最大": ["最大", "最高", "最大"],
            "最小": ["最小", "最低", "最小"],
            "有序": ["排序", "排列", "有序"],
            "指定": ["特定", "确定", "给定"],
        }
    
    def _create_sentence_templates(self) -> Dict[str, List[str]]:
        """创建句式变换模板"""
        return {
            "addition": [
                "计算{num1}和{num2}的和",
                "给定两个数{num1}和{num2}，求它们的和",
                "输入{num1}和{num2}，输出它们的和",
                "求{num1}加{num2}的结果",
                "计算{num1}与{num2}相加的值",
            ],
            "array_max": [
                "找到数组中的最大值",
                "在数组中寻找最大的元素",
                "计算数组的最大值",
                "找出数组中的最大数",
                "确定数组的最大元素",
            ],
            "string_reverse": [
                "反转字符串",
                "将字符串倒序排列",
                "字符串反向输出",
                "倒转字符串的顺序",
                "字符串逆序处理",
            ],
        }
    
    def _generate_enhanced_data(self) -> List[Dict[str, Any]]:
        """生成增强训练数据"""
        enhanced_data = []
        
        # 1. 添加原始数据
        enhanced_data.extend(self.base_data)
        
        # 2. 同义词替换增强
        enhanced_data.extend(self._synonym_replacement_augmentation())
        
        # 3. 句式变换增强
        enhanced_data.extend(self._sentence_transformation_augmentation())
        
        # 4. 语法变化增强
        enhanced_data.extend(self._grammar_variation_augmentation())
        
        # 5. 多语言增强
        enhanced_data.extend(self._multilingual_augmentation())
        
        # 6. 数据平衡处理
        enhanced_data = self._balance_dataset(enhanced_data)
        
        return enhanced_data
    
    def _synonym_replacement_augmentation(self) -> List[Dict[str, Any]]:
        """同义词替换数据增强"""
        augmented_data = []
        
        for item in self.base_data:
            text = item["text"]
            keywords = item.get("keywords", [])
            
            # 对每个关键词进行同义词替换
            for keyword in keywords:
                if keyword in self.synonyms_dict:
                    synonyms = self.synonyms_dict[keyword]
                    for synonym in synonyms:
                        new_text = text.replace(keyword, synonym)
                        if new_text != text:  # 确保有变化
                            augmented_data.append({
                                "text": new_text,
                                "label": item["label"],
                                "category": item["category"],
                                "keywords": [synonym if k == keyword else k for k in keywords]
                            })
        
        return augmented_data
    
    def _sentence_transformation_augmentation(self) -> List[Dict[str, Any]]:
        """句式变换数据增强"""
        augmented_data = []
        
        # 定义句式变换规则
        transformations = [
            # 主动语态变被动语态
            ("计算", "被计算"),
            ("找到", "被发现"),
            ("实现", "被实现"),
            
            # 添加修饰词
            ("数组", "整数数组"),
            ("字符串", "输入字符串"),
            ("算法", "高效算法"),
            
            # 添加条件
            ("计算", "如果存在，则计算"),
            ("找到", "在给定条件下找到"),
        ]
        
        for item in self.base_data:
            text = item["text"]
            
            for old_word, new_word in transformations:
                if old_word in text:
                    new_text = text.replace(old_word, new_word)
                    augmented_data.append({
                        "text": new_text,
                        "label": item["label"],
                        "category": item["category"],
                        "keywords": item.get("keywords", [])
                    })
        
        return augmented_data
    
    def _grammar_variation_augmentation(self) -> List[Dict[str, Any]]:
        """语法变化数据增强"""
        augmented_data = []
        
        # 定义语法变化规则
        grammar_rules = [
            # 添加时间状语
            ("计算", "首先计算"),
            ("找到", "然后找到"),
            ("实现", "接下来实现"),
            
            # 添加程度副词
            ("计算", "精确计算"),
            ("找到", "快速找到"),
            ("实现", "正确实现"),
            
            # 添加目的状语
            ("计算", "为了得到结果而计算"),
            ("找到", "为了解决问题而找到"),
        ]
        
        for item in self.base_data:
            text = item["text"]
            
            for old_word, new_word in grammar_rules:
                if old_word in text:
                    new_text = text.replace(old_word, new_word)
                    augmented_data.append({
                        "text": new_text,
                        "label": item["label"],
                        "category": item["category"],
                        "keywords": item.get("keywords", [])
                    })
        
        return augmented_data
    
    def _multilingual_augmentation(self) -> List[Dict[str, Any]]:
        """多语言数据增强"""
        augmented_data = []
        
        # 中英文对照数据
        multilingual_pairs = [
            ("计算两个整数的和", "Calculate the sum of two integers", "addition"),
            ("找到数组中的最大值", "Find the maximum value in array", "array_max"),
            ("反转字符串", "Reverse the string", "string_reverse"),
            ("计算阶乘", "Calculate factorial", "factorial"),
            ("判断质数", "Check if number is prime", "prime_check"),
            ("实现栈操作", "Implement stack operations", "stack"),
            ("深度优先搜索", "Depth-first search", "dfs"),
            ("解决背包问题", "Solve knapsack problem", "knapsack"),
        ]
        
        for chinese_text, english_text, label in multilingual_pairs:
            # 添加英文数据
            augmented_data.append({
                "text": english_text,
                "label": label,
                "category": self._get_category_by_label(label),
                "keywords": self._extract_keywords(english_text)
            })
            
            # 添加中英混合数据
            mixed_text = f"{chinese_text} ({english_text})"
            augmented_data.append({
                "text": mixed_text,
                "label": label,
                "category": self._get_category_by_label(label),
                "keywords": self._extract_keywords(mixed_text)
            })
        
        return augmented_data
    
    def _balance_dataset(self, data: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """数据平衡处理"""
        # 统计每个标签的数量
        label_counts = Counter(item["label"] for item in data)
        
        # 计算目标数量（取中位数）
        target_count = sorted(label_counts.values())[len(label_counts) // 2]
        
        balanced_data = []
        
        for label, count in label_counts.items():
            label_data = [item for item in data if item["label"] == label]
            
            if count < target_count:
                # 数据不足，进行重复采样
                while len(label_data) < target_count:
                    label_data.append(random.choice(label_data))
            elif count > target_count:
                # 数据过多，随机采样
                label_data = random.sample(label_data, target_count)
            
            balanced_data.extend(label_data)
        
        return balanced_data
    
    def _get_category_by_label(self, label: str) -> str:
        """根据标签获取类别"""
        category_mapping = {
            "addition": "math", "multiplication": "math", "subtraction": "math", "division": "math",
            "factorial": "math", "prime_check": "math", "fibonacci": "math", "gcd": "math",
            "array_sum": "array", "array_max": "array", "array_min": "array", "array_sort": "array",
            "string_reverse": "string", "string_length": "string", "string_palindrome": "string", "string_count": "string",
            "linear_search": "search", "binary_search": "search",
            "bubble_sort": "sort", "quick_sort": "sort", "merge_sort": "sort",
            "stack": "data_structure", "queue": "data_structure", "linked_list": "data_structure",
            "dfs": "graph", "bfs": "graph", "shortest_path": "graph",
            "knapsack": "dp", "lcs": "dp", "lis": "dp",
            "activity_selection": "greedy", "coin_change": "greedy",
        }
        return category_mapping.get(label, "other")
    
    def _extract_keywords(self, text: str) -> List[str]:
        """提取关键词"""
        # 简单的中文关键词提取
        if any('\u4e00' <= char <= '\u9fff' for char in text):
            # 中文文本
            if JIEBA_AVAILABLE:
                words = jieba.lcut(text)
            else:
                words = text.split()
            keywords = [word for word in words if len(word) > 1]
        else:
            # 英文文本
            if NLTK_AVAILABLE:
                words = word_tokenize(text.lower())
            else:
                words = text.split()
            keywords = [word for word in words if len(word) > 2 and word.isalpha()]
        
        return keywords[:5]  # 最多返回5个关键词
    
    def get_enhanced_training_data(self) -> List[Dict[str, Any]]:
        """获取增强后的训练数据"""
        return self.enhanced_data
    
    def get_dataset_statistics(self) -> Dict[str, Any]:
        """获取数据集统计信息"""
        total_samples = len(self.enhanced_data)
        labels = [item["label"] for item in self.enhanced_data]
        categories = [item["category"] for item in self.enhanced_data]
        
        label_counts = Counter(labels)
        category_counts = Counter(categories)
        
        return {
            "total_samples": total_samples,
            "unique_labels": len(label_counts),
            "unique_categories": len(category_counts),
            "label_distribution": dict(label_counts),
            "category_distribution": dict(category_counts),
            "average_samples_per_label": total_samples / len(label_counts),
            "average_samples_per_category": total_samples / len(category_counts),
        }
    
    def save_enhanced_dataset(self, filename: str = "enhanced_dataset.json"):
        """保存增强数据集"""
        filepath = os.path.join(self.enhanced_data_dir, filename)
        with open(filepath, 'w', encoding='utf-8') as f:
            json.dump(self.enhanced_data, f, ensure_ascii=False, indent=2)
        print(f"增强数据集已保存到: {filepath}")
    
    def load_enhanced_dataset(self, filename: str = "enhanced_dataset.json"):
        """加载增强数据集"""
        filepath = os.path.join(self.enhanced_data_dir, filename)
        with open(filepath, 'r', encoding='utf-8') as f:
            self.enhanced_data = json.load(f)
        print(f"增强数据集已从 {filepath} 加载")

if __name__ == "__main__":
    """演示增强数据集的功能"""
    
    # 创建增强数据集
    dataset = EnhancedProblemDataset()
    
    # 获取统计信息
    stats = dataset.get_dataset_statistics()
    
    print("增强数据集统计信息:")
    print(f"  总样本数: {stats['total_samples']}")
    print(f"  唯一标签数: {stats['unique_labels']}")
    print(f"  唯一类别数: {stats['unique_categories']}")
    print(f"  平均每标签样本数: {stats['average_samples_per_label']:.1f}")
    print(f"  平均每类别样本数: {stats['average_samples_per_category']:.1f}")
    
    print("\n标签分布:")
    for label, count in stats['label_distribution'].items():
        print(f"  {label}: {count} 个样本")
    
    print("\n类别分布:")
    for category, count in stats['category_distribution'].items():
        print(f"  {category}: {count} 个样本")
    
    # 保存增强数据集
    dataset.save_enhanced_dataset()
