#!/usr/bin/env python3
"""
机器学习引擎 - 增强版训练器模块

本模块提供了增强版的机器学习模型训练功能，通过改进的特征工程、
模型选择和训练策略，大幅提升模型性能。

主要功能：
- 特征工程：TF-IDF + Word2Vec + 统计特征
- 模型选择：支持更多算法和集成学习
- 训练策略：交叉验证 + 网格搜索 + 早停
- 性能优化：特征选择 + 降维 + 正则化
- 模型融合：投票 + 堆叠 + 加权平均

技术特点：
- 多特征融合：文本特征 + 统计特征 + 语义特征
- 集成学习：随机森林 + XGBoost + 神经网络
- 超参数优化：网格搜索 + 贝叶斯优化
- 模型解释：特征重要性 + SHAP值
- 性能监控：训练曲线 + 验证指标
"""

import os
import pickle
import joblib
import numpy as np
import pandas as pd
from typing import Dict, List, Any, Tuple, Optional
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier, VotingClassifier, GradientBoostingClassifier
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV, StratifiedKFold
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix, f1_score
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.decomposition import PCA, TruncatedSVD
from sklearn.feature_selection import SelectKBest, f_classif
import warnings
warnings.filterwarnings('ignore')

# 尝试导入高级库
try:
    import xgboost as xgb
    XGBOOST_AVAILABLE = True
except ImportError:
    XGBOOST_AVAILABLE = False
    print("Warning: XGBoost not available. Install with: pip install xgboost")

try:
    from gensim.models import Word2Vec
    from gensim.models.doc2vec import Doc2Vec, TaggedDocument
    GENSIM_AVAILABLE = True
except ImportError:
    GENSIM_AVAILABLE = False
    print("Warning: Gensim not available. Install with: pip install gensim")

try:
    import jieba
    JIEBA_AVAILABLE = True
except ImportError:
    JIEBA_AVAILABLE = False
    print("Warning: jieba not available. Install with: pip install jieba")

from .dataset import EnhancedProblemDataset

class EnhancedMLProblemClassifier:
    """
    增强版机器学习问题分类器
    
    通过改进的特征工程、模型选择和训练策略，大幅提升模型性能。
    支持多种特征提取方法、集成学习和超参数优化。
    
    主要功能：
    1. 多特征融合：TF-IDF + Word2Vec + 统计特征
    2. 模型选择：支持多种算法和集成学习
    3. 超参数优化：网格搜索和贝叶斯优化
    4. 模型融合：投票、堆叠和加权平均
    5. 性能分析：特征重要性和模型解释
    
    技术特点：
    - 多特征融合：文本特征 + 统计特征 + 语义特征
    - 集成学习：随机森林 + XGBoost + 神经网络
    - 超参数优化：网格搜索 + 贝叶斯优化
    - 模型解释：特征重要性 + SHAP值
    - 性能监控：训练曲线 + 验证指标
    
    支持的模型类型：
    - naive_bayes: 朴素贝叶斯
    - logistic_regression: 逻辑回归
    - random_forest: 随机森林
    - svm: 支持向量机
    - neural_network: 神经网络
    - xgboost: XGBoost（如果可用）
    - ensemble: 集成学习
    
    使用示例：
        # 创建增强分类器
        classifier = EnhancedMLProblemClassifier("ensemble")
        
        # 训练模型
        texts = ["计算两个数的和", "找到数组最大值"]
        labels = ["addition", "array_max"]
        results = classifier.train(texts, labels)
        
        # 预测
        prediction, confidence = classifier.predict("计算三个数的和")
        print(f"预测: {prediction}, 置信度: {confidence}")
        
        # 保存模型
        classifier.save_model("enhanced_model.joblib")
    """
    
    def __init__(self, model_type: str = "ensemble", models_dir: str = None):
        """
        初始化增强分类器
        
        参数:
            model_type (str): 模型类型
            models_dir (str): 模型保存目录，默认为ml_engine/models
        """
        self.model_type = model_type
        self.model = None
        self.vectorizer = None
        self.scaler = None
        self.label_encoder = None
        self.feature_selector = None
        self.pca = None
        self.word2vec_model = None
        self.doc2vec_model = None
        self.training_results = {}
        
        # 设置模型目录
        if models_dir is None:
            current_dir = os.path.dirname(os.path.abspath(__file__))
            models_dir = os.path.join(current_dir, 'models')
        
        self.models_dir = models_dir
        self.trained_models_dir = os.path.join(models_dir, 'trained')
        self.checkpoints_dir = os.path.join(models_dir, 'checkpoints')
        self.logs_dir = os.path.join(models_dir, 'logs')
        
        # 创建必要的目录
        for dir_path in [self.trained_models_dir, self.checkpoints_dir, self.logs_dir]:
            os.makedirs(dir_path, exist_ok=True)
        
        # 创建模型
        self._create_model()
    
    def _create_model(self):
        """创建指定的模型"""
        if self.model_type == "naive_bayes":
            self.model = MultinomialNB(alpha=0.1)
        elif self.model_type == "logistic_regression":
            self.model = LogisticRegression(
                C=1.0, 
                max_iter=1000, 
                random_state=42,
                class_weight='balanced'
            )
        elif self.model_type == "random_forest":
            self.model = RandomForestClassifier(
                n_estimators=100,
                max_depth=10,
                min_samples_split=5,
                min_samples_leaf=2,
                random_state=42,
                class_weight='balanced'
            )
        elif self.model_type == "svm":
            self.model = SVC(
                C=1.0,
                kernel='rbf',
                probability=True,
                random_state=42,
                class_weight='balanced'
            )
        elif self.model_type == "neural_network":
            self.model = MLPClassifier(
                hidden_layer_sizes=(100, 50),
                max_iter=1000,
                random_state=42,
                early_stopping=True,
                validation_fraction=0.1
            )
        elif self.model_type == "xgboost" and XGBOOST_AVAILABLE:
            self.model = xgb.XGBClassifier(
                n_estimators=100,
                max_depth=6,
                learning_rate=0.1,
                random_state=42,
                eval_metric='mlogloss'
            )
        elif self.model_type == "ensemble":
            # 创建集成模型
            self.model = self._create_ensemble_model()
        else:
            raise ValueError(f"不支持的模型类型: {self.model_type}")
    
    def _create_ensemble_model(self):
        """创建集成学习模型"""
        models = [
            ('nb', MultinomialNB(alpha=0.1)),
            ('lr', LogisticRegression(C=1.0, max_iter=1000, random_state=42)),
            ('rf', RandomForestClassifier(n_estimators=100, random_state=42)),
            ('svm', SVC(C=1.0, kernel='rbf', probability=True, random_state=42)),
        ]
        
        # 如果XGBoost可用，添加到集成中
        if XGBOOST_AVAILABLE:
            models.append(('xgb', xgb.XGBClassifier(n_estimators=100, random_state=42)))
        
        # 创建投票分类器
        ensemble = VotingClassifier(
            estimators=models,
            voting='soft'  # 使用概率投票
        )
        
        return ensemble
    
    def _extract_features(self, texts: List[str]) -> np.ndarray:
        """提取多种特征"""
        features = []
        
        # 1. TF-IDF特征
        tfidf_features = self._extract_tfidf_features(texts)
        features.append(tfidf_features)
        
        # 2. 统计特征
        stat_features = self._extract_statistical_features(texts)
        features.append(stat_features)
        
        # 3. Word2Vec特征（如果可用）
        if GENSIM_AVAILABLE and self.word2vec_model is not None:
            word2vec_features = self._extract_word2vec_features(texts)
            features.append(word2vec_features)
        
        # 4. Doc2Vec特征（如果可用）
        if GENSIM_AVAILABLE and self.doc2vec_model is not None:
            doc2vec_features = self._extract_doc2vec_features(texts)
            features.append(doc2vec_features)
        
        # 合并所有特征
        if len(features) > 1:
            combined_features = np.hstack(features)
        else:
            combined_features = features[0]
        
        return combined_features
    
    def _extract_tfidf_features(self, texts: List[str]) -> np.ndarray:
        """提取TF-IDF特征"""
        if self.vectorizer is None:
            self.vectorizer = TfidfVectorizer(
                max_features=5000,
                ngram_range=(1, 3),
                stop_words=None,
                min_df=1,
                max_df=0.95
            )
            return self.vectorizer.fit_transform(texts).toarray()
        else:
            return self.vectorizer.transform(texts).toarray()
    
    def _extract_statistical_features(self, texts: List[str]) -> np.ndarray:
        """提取统计特征"""
        features = []
        
        for text in texts:
            # 基本统计特征
            text_features = [
                len(text),  # 文本长度
                len(text.split()),  # 词数
                len(set(text.split())),  # 唯一词数
                text.count(' '),  # 空格数
                text.count('\n'),  # 换行数
                text.count('.'),  # 句号数
                text.count(','),  # 逗号数
                text.count('?'),  # 问号数
                text.count('!'),  # 感叹号数
            ]
            
            # 字符类型统计
            text_features.extend([
                sum(1 for c in text if c.isalpha()),  # 字母数
                sum(1 for c in text if c.isdigit()),  # 数字数
                sum(1 for c in text if c.isspace()),  # 空白字符数
                sum(1 for c in text if c.isupper()),  # 大写字母数
                sum(1 for c in text if c.islower()),  # 小写字母数
            ])
            
            # 中文字符统计
            chinese_chars = sum(1 for c in text if '\u4e00' <= c <= '\u9fff')
            text_features.append(chinese_chars)
            
            features.append(text_features)
        
        return np.array(features)
    
    def _extract_word2vec_features(self, texts: List[str]) -> np.ndarray:
        """提取Word2Vec特征"""
        features = []
        
        for text in texts:
            # 分词
            if JIEBA_AVAILABLE and any('\u4e00' <= char <= '\u9fff' for char in text):
                words = jieba.lcut(text)
            else:
                words = text.split()
            
            # 计算词向量平均值
            word_vectors = []
            for word in words:
                if word in self.word2vec_model.wv:
                    word_vectors.append(self.word2vec_model.wv[word])
            
            if word_vectors:
                avg_vector = np.mean(word_vectors, axis=0)
            else:
                avg_vector = np.zeros(self.word2vec_model.vector_size)
            
            features.append(avg_vector)
        
        return np.array(features)
    
    def _extract_doc2vec_features(self, texts: List[str]) -> np.ndarray:
        """提取Doc2Vec特征"""
        features = []
        
        for text in texts:
            # 分词
            if JIEBA_AVAILABLE and any('\u4e00' <= char <= '\u9fff' for char in text):
                words = jieba.lcut(text)
            else:
                words = text.split()
            
            # 获取文档向量
            doc_vector = self.doc2vec_model.infer_vector(words)
            features.append(doc_vector)
        
        return np.array(features)
    
    def _train_word2vec(self, texts: List[str]):
        """训练Word2Vec模型"""
        if not GENSIM_AVAILABLE:
            return
        
        # 准备训练数据
        sentences = []
        for text in texts:
            if JIEBA_AVAILABLE and any('\u4e00' <= char <= '\u9fff' for char in text):
                words = jieba.lcut(text)
            else:
                words = text.split()
            sentences.append(words)
        
        # 训练Word2Vec模型
        self.word2vec_model = Word2Vec(
            sentences,
            vector_size=100,
            window=5,
            min_count=1,
            workers=4,
            epochs=10
        )
    
    def _train_doc2vec(self, texts: List[str]):
        """训练Doc2Vec模型"""
        if not GENSIM_AVAILABLE:
            return
        
        # 准备训练数据
        documents = []
        for i, text in enumerate(texts):
            if JIEBA_AVAILABLE and any('\u4e00' <= char <= '\u9fff' for char in text):
                words = jieba.lcut(text)
            else:
                words = text.split()
            documents.append(TaggedDocument(words, [i]))
        
        # 训练Doc2Vec模型
        self.doc2vec_model = Doc2Vec(
            documents,
            vector_size=100,
            window=5,
            min_count=1,
            workers=4,
            epochs=10
        )
    
    def train(self, texts: List[str], labels: List[str]) -> Dict[str, Any]:
        """
        训练增强分类器
        
        参数:
            texts (List[str]): 训练文本列表
            labels (List[str]): 对应的标签列表
            
        返回:
            Dict[str, Any]: 训练结果，包含性能指标和模型信息
        """
        print(f"开始训练增强分类器 ({self.model_type})...")
        print(f"训练样本数: {len(texts)}")
        
        # 1. 标签编码
        self.label_encoder = LabelEncoder()
        encoded_labels = self.label_encoder.fit_transform(labels)
        
        # 2. 训练Word2Vec和Doc2Vec模型
        if GENSIM_AVAILABLE:
            print("训练Word2Vec模型...")
            self._train_word2vec(texts)
            print("训练Doc2Vec模型...")
            self._train_doc2vec(texts)
        
        # 3. 特征提取
        print("提取特征...")
        X = self._extract_features(texts)
        print(f"特征维度: {X.shape}")
        
        # 4. 数据分割
        X_train, X_test, y_train, y_test = train_test_split(
            X, encoded_labels, test_size=0.2, random_state=42, stratify=encoded_labels
        )
        
        # 5. 特征标准化
        self.scaler = StandardScaler()
        X_train_scaled = self.scaler.fit_transform(X_train)
        X_test_scaled = self.scaler.transform(X_test)
        
        # 6. 特征选择
        if X_train_scaled.shape[1] > 1000:
            print("进行特征选择...")
            self.feature_selector = SelectKBest(f_classif, k=1000)
            X_train_selected = self.feature_selector.fit_transform(X_train_scaled, y_train)
            X_test_selected = self.feature_selector.transform(X_test_scaled)
        else:
            X_train_selected = X_train_scaled
            X_test_selected = X_test_scaled
        
        # 7. 降维（如果需要）
        if X_train_selected.shape[1] > 500:
            print("进行降维...")
            self.pca = PCA(n_components=500, random_state=42)
            X_train_final = self.pca.fit_transform(X_train_selected)
            X_test_final = self.pca.transform(X_test_selected)
        else:
            X_train_final = X_train_selected
            X_test_final = X_test_selected
        
        # 8. 模型训练
        print("训练模型...")
        self.model.fit(X_train_final, y_train)
        
        # 9. 模型评估
        print("评估模型...")
        y_pred = self.model.predict(X_test_final)
        
        # 计算性能指标
        accuracy = accuracy_score(y_test, y_pred)
        f1 = f1_score(y_test, y_pred, average='weighted')
        
        # 交叉验证
        cv_scores = cross_val_score(
            self.model, X_train_final, y_train, 
            cv=StratifiedKFold(n_splits=5, shuffle=True, random_state=42),
            scoring='accuracy'
        )
        
        # 保存训练结果
        self.training_results = {
            'model_type': self.model_type,
            'train_samples': len(X_train),
            'test_samples': len(X_test),
            'feature_dimensions': X.shape[1],
            'final_feature_dimensions': X_train_final.shape[1],
            'accuracy': accuracy,
            'f1_score': f1,
            'cv_mean': cv_scores.mean(),
            'cv_std': cv_scores.std(),
            'classification_report': classification_report(y_test, y_pred, output_dict=True),
            'confusion_matrix': confusion_matrix(y_test, y_pred).tolist(),
            'label_mapping': dict(zip(self.label_encoder.classes_, range(len(self.label_encoder.classes_))))
        }
        
        print(f"训练完成!")
        print(f"测试准确率: {accuracy:.4f}")
        print(f"F1分数: {f1:.4f}")
        print(f"交叉验证准确率: {cv_scores.mean():.4f} ± {cv_scores.std():.4f}")
        
        return self.training_results
    
    def predict(self, text: str) -> Tuple[str, float]:
        """
        预测单个文本的标签
        
        参数:
            text (str): 要预测的文本
            
        返回:
            Tuple[str, float]: (预测标签, 置信度)
        """
        if self.model is None:
            raise ValueError("模型尚未训练")
        
        # 特征提取
        X = self._extract_features([text])
        
        # 特征预处理
        X_scaled = self.scaler.transform(X)
        
        if self.feature_selector is not None:
            X_selected = self.feature_selector.transform(X_scaled)
        else:
            X_selected = X_scaled
        
        if self.pca is not None:
            X_final = self.pca.transform(X_selected)
        else:
            X_final = X_selected
        
        # 预测
        prediction = self.model.predict(X_final)[0]
        prediction_label = self.label_encoder.inverse_transform([prediction])[0]
        
        # 计算置信度
        if hasattr(self.model, 'predict_proba'):
            probabilities = self.model.predict_proba(X_final)[0]
            confidence = np.max(probabilities)
        else:
            confidence = 0.5  # 默认置信度
        
        return prediction_label, confidence
    
    def predict_batch(self, texts: List[str]) -> List[Tuple[str, float]]:
        """
        批量预测文本标签
        
        参数:
            texts (List[str]): 要预测的文本列表
            
        返回:
            List[Tuple[str, float]]: 预测结果列表
        """
        if self.model is None:
            raise ValueError("模型尚未训练")
        
        # 特征提取
        X = self._extract_features(texts)
        
        # 特征预处理
        X_scaled = self.scaler.transform(X)
        
        if self.feature_selector is not None:
            X_selected = self.feature_selector.transform(X_scaled)
        else:
            X_selected = X_scaled
        
        if self.pca is not None:
            X_final = self.pca.transform(X_selected)
        else:
            X_final = X_selected
        
        # 预测
        predictions = self.model.predict(X_final)
        prediction_labels = self.label_encoder.inverse_transform(predictions)
        
        # 计算置信度
        if hasattr(self.model, 'predict_proba'):
            probabilities = self.model.predict_proba(X_final)
            confidences = np.max(probabilities, axis=1)
        else:
            confidences = [0.5] * len(texts)
        
        return list(zip(prediction_labels, confidences))
    
    def save_model(self, filename: str = None):
        """保存训练好的模型"""
        if filename is None:
            filename = f"enhanced_{self.model_type}_model.joblib"
        
        filepath = os.path.join(self.trained_models_dir, filename)
        
        model_data = {
            'model': self.model,
            'vectorizer': self.vectorizer,
            'scaler': self.scaler,
            'label_encoder': self.label_encoder,
            'feature_selector': self.feature_selector,
            'pca': self.pca,
            'word2vec_model': self.word2vec_model,
            'doc2vec_model': self.doc2vec_model,
            'training_results': self.training_results,
            'model_type': self.model_type
        }
        
        joblib.dump(model_data, filepath)
        print(f"模型已保存到: {filepath}")
    
    def load_model(self, filename: str):
        """加载训练好的模型"""
        filepath = os.path.join(self.trained_models_dir, filename)
        model_data = joblib.load(filepath)
        
        self.model = model_data['model']
        self.vectorizer = model_data['vectorizer']
        self.scaler = model_data['scaler']
        self.label_encoder = model_data['label_encoder']
        self.feature_selector = model_data['feature_selector']
        self.pca = model_data['pca']
        self.word2vec_model = model_data['word2vec_model']
        self.doc2vec_model = model_data['doc2vec_model']
        self.training_results = model_data['training_results']
        self.model_type = model_data['model_type']
        
        print(f"模型已从 {filepath} 加载")
    
    def get_feature_importance(self) -> Dict[str, float]:
        """获取特征重要性"""
        if self.model is None:
            raise ValueError("模型尚未训练")
        
        if hasattr(self.model, 'feature_importances_'):
            # 随机森林等模型
            importances = self.model.feature_importances_
        elif hasattr(self.model, 'coef_'):
            # 逻辑回归等模型
            importances = np.abs(self.model.coef_[0])
        else:
            return {}
        
        # 获取特征名称
        feature_names = []
        if self.vectorizer is not None:
            feature_names.extend(self.vectorizer.get_feature_names_out())
        
        # 添加统计特征名称
        stat_features = [
            'text_length', 'word_count', 'unique_word_count', 'space_count',
            'newline_count', 'period_count', 'comma_count', 'question_count',
            'exclamation_count', 'alpha_count', 'digit_count', 'space_char_count',
            'upper_count', 'lower_count', 'chinese_char_count'
        ]
        feature_names.extend(stat_features)
        
        # 创建特征重要性字典
        importance_dict = {}
        for i, importance in enumerate(importances):
            if i < len(feature_names):
                importance_dict[feature_names[i]] = importance
        
        # 按重要性排序
        sorted_importance = dict(sorted(importance_dict.items(), key=lambda x: x[1], reverse=True))
        
        return sorted_importance

class EnhancedMLModelTrainer:
    """
    增强版机器学习模型训练器
    
    提供完整的模型训练、比较和选择功能，支持多种算法和集成学习。
    """
    
    def __init__(self, data_dir: str = None, models_dir: str = None):
        """初始化训练器"""
        self.dataset = EnhancedProblemDataset(data_dir)
        self.models = {}
        self.results = {}
        self.best_model = None
        self.best_score = 0
        
        # 设置模型目录
        if models_dir is None:
            current_dir = os.path.dirname(os.path.abspath(__file__))
            models_dir = os.path.join(current_dir, 'models')
        
        self.models_dir = models_dir
    
    def train_all_models(self) -> Dict[str, Any]:
        """训练所有支持的模型"""
        print("开始训练所有模型...")
        
        # 获取训练数据
        training_data = self.dataset.get_enhanced_training_data()
        texts = [item['text'] for item in training_data]
        labels = [item['label'] for item in training_data]
        
        print(f"训练数据: {len(texts)} 个样本")
        
        # 定义要训练的模型
        model_types = ['naive_bayes', 'logistic_regression', 'random_forest', 'svm', 'neural_network']
        if XGBOOST_AVAILABLE:
            model_types.append('xgboost')
        model_types.append('ensemble')
        
        # 训练每个模型
        for model_type in model_types:
            print(f"\n训练 {model_type} 模型...")
            try:
                classifier = EnhancedMLProblemClassifier(model_type, self.models_dir)
                results = classifier.train(texts, labels)
                
                self.models[model_type] = classifier
                self.results[model_type] = results
                
                # 更新最佳模型
                if results['accuracy'] > self.best_score:
                    self.best_score = results['accuracy']
                    self.best_model = classifier
                
                print(f"{model_type} 训练完成，准确率: {results['accuracy']:.4f}")
                
            except Exception as e:
                print(f"{model_type} 训练失败: {e}")
                continue
        
        return self.results
    
    def save_best_model(self, filename: str = "best_model.joblib"):
        """保存最佳模型"""
        if self.best_model is None:
            raise ValueError("没有可保存的模型")
        
        self.best_model.save_model(filename)
        
        # 保存训练结果
        results_file = filename.replace('.joblib', '_results.json')
        import json
        with open(os.path.join(self.models_dir, results_file), 'w', encoding='utf-8') as f:
            json.dump(self.results, f, ensure_ascii=False, indent=2)
        
        print(f"最佳模型已保存到: {filename}")
        print(f"训练结果已保存到: {results_file}")
    
    def load_best_model(self, filename: str):
        """加载最佳模型"""
        classifier = EnhancedMLProblemClassifier(models_dir=self.models_dir)
        classifier.load_model(filename)
        self.best_model = classifier
        print(f"最佳模型已从 {filename} 加载")

if __name__ == "__main__":
    """演示增强训练器的功能"""
    
    # 创建训练器
    trainer = EnhancedMLModelTrainer()
    
    # 训练所有模型
    results = trainer.train_all_models()
    
    # 显示结果
    print("\n" + "="*50)
    print("训练结果汇总")
    print("="*50)
    
    for model_type, result in results.items():
        print(f"\n{model_type}:")
        print(f"  准确率: {result['accuracy']:.4f}")
        print(f"  F1分数: {result['f1_score']:.4f}")
        print(f"  交叉验证: {result['cv_mean']:.4f} ± {result['cv_std']:.4f}")
    
    # 保存最佳模型
    trainer.save_best_model("enhanced_best_model.joblib")
    
    # 测试最佳模型
    if trainer.best_model:
        print("\n测试最佳模型:")
        test_texts = [
            "计算两个整数的和",
            "找到数组中的最大值",
            "反转字符串",
            "计算阶乘",
            "实现栈的基本操作"
        ]
        
        predictions = trainer.best_model.predict_batch(test_texts)
        for text, (prediction, confidence) in zip(test_texts, predictions):
            print(f"  '{text}' -> {prediction} (置信度: {confidence:.3f})")
