"""
增强版模型定义模块
基于interface_design.md和workflow_design.md设计
支持11分类任务的完整训练和预测流程
"""
import yaml
import numpy as np
import pandas as pd
from abc import ABC, abstractmethod
from typing import Dict, Any, Tuple, Optional, List
import pickle
import json
import sys
import os
from sklearn.ensemble import RandomForestClassifier as SklearnRF
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from tqdm import tqdm
import logging

# 使用config文件夹的yaml配置模型参数
def load_models_config(path: str) -> Dict[str, Any]:
    try:
        with open(path, 'r', encoding='utf-8') as f:
            config = yaml.safe_load(f)
        print("✅ 模型配置加载成功")
    except FileNotFoundError:
        print("❌ 模型配置文件不存在，请确保config/models.yaml存在")
        exit(1)
    return config
#同理使用config文件夹的yaml配置路径参数
def load_paths_config(path: str) -> Dict[str, Any]:
    try:
        with open(path, 'r', encoding='utf-8') as f:
            paths = yaml.safe_load(f)
        print("✅ 路径配置加载成功")
    except FileNotFoundError:
        print("❌ 路径配置文件不存在，请确保config/paths.yaml存在")
        exit(1)
    return paths


class BaseModel(ABC):
    """增强版模型基类 - 支持11分类任务"""
    
    def __init__(self):
        # 模型类一经创建即自动读取config文件夹下的yaml配置文件
        self.models_config = load_models_config('config/models.yaml')
        self.paths_config = load_paths_config('config/paths.yaml')
        self.vectorizer = None #向量化器
        self.label_mapping = None #标签映射（编码）
        self.reverse_mapping = None #反向标签映射（解码）
        self.is_trained = False #是否训练
        self.trained_times = 0 #训练次数
        self.logger = self._setup_logger() #日志记录器
    
    def _setup_logger(self) -> logging.Logger:
        """设置日志记录器"""
        logger = logging.getLogger(f'{self.__class__.__name__}')
        logger.setLevel(logging.INFO)
        return logger
    
    # 原有方法保持不变
    @abstractmethod
    def train(self, X_train: np.ndarray, y_train: np.ndarray, 
              X_val: Optional[np.ndarray] = None, y_val: Optional[np.ndarray] = None) -> Dict[str, Any]:
        """
        训练模型
        参数：
        X_train: 训练集特征
        y_train: 训练集标签
        X_val: 验证集特征
        y_val: 验证集标签
        返回：
        训练结果（字典）
        """
        pass
    
    @abstractmethod
    def predict(self, X: np.ndarray) -> np.ndarray:
        """
        预测
        参数：        X: 特征(numpy数组)
        返回：        预测结果（numpy数组）
        """
        pass
    
    @abstractmethod
    def save(self, path: str) -> None:
        """保存模型
        参数：
        path: 模型保存路径(配置文件路径)
        """
        pass
    
    @abstractmethod
    def load(self, path: str) -> None:
        """加载模型
        参数：
        path: 模型加载路径(配置文件路径)
        """
        pass
    
    def evaluate(self, X_test: np.ndarray, y_test: np.ndarray) -> Dict[str, float]:
        """评估模型"""
        y_pred = self.predict(X_test)
        
        metrics = {
            #accuracy反映了整体模型的准确率，适合类别平衡时评估整体模型表现
            'accuracy': accuracy_score(y_test, y_pred),
            #precision_macro反映了各类别精确率的算术平均，适合类别不平衡时评估整体模型表现
            'precision_macro': precision_score(y_test, y_pred, average='macro', zero_division=0), 
            #recall_macro反映了各类别召回率的算术平均，适合类别不平衡时评估整体模型表现
            'recall_macro': recall_score(y_test, y_pred, average='macro', zero_division=0), 
            # f1_macro反映了各类别F1分数的算术平均，适合类别不平衡时评估整体模型表现
            'f1_macro': f1_score(y_test, y_pred, average='macro', zero_division=0),  
            #precision_micro反映了各类别精确率的算术平均，适合类别不平衡时评估整体模型表现
            'precision_micro': precision_score(y_test, y_pred, average='micro', zero_division=0), 
            #recall_micro反映了各类别召回率的算术平均，适合类别不平衡时评估整体模型表现
            'recall_micro': recall_score(y_test, y_pred, average='micro', zero_division=0), 
            #f1_micro反映了各类别F1分数的算术平均，适合类别不平衡时评估整体模型表现
            'f1_micro': f1_score(y_test, y_pred, average='micro', zero_division=0) 
        }
        
        return metrics
    
    #以下待封装流程方法,目前pass

    @abstractmethod
    def train_operator(self, data_path: str, processor_type: str = 'ml') -> Dict[str, Any]:
        """从数据路径开始完整训练流程"""
        pass
    
    
    @abstractmethod
    def train_from_df_operator(self, df: pd.DataFrame, processor_type: str = 'ml') -> Dict[str, Any]:
        """从DataFrame开始完整训练流程"""
        pass
    
    # 新增：预测流程方法
    @abstractmethod
    def predict_operator(self, texts: List[str], processor_type: str = 'ml') -> np.ndarray:
        """从原始文本开始预测流程"""
        pass
    
    @abstractmethod
    def predict_from_df_operator(self, df: pd.DataFrame, processor_type: str = 'ml') -> np.ndarray:
        """从DataFrame开始预测流程"""
        pass
    
    # 数据处理集成方法
    @abstractmethod
    def get_processor(self, processor_type: str):
        """获取数据处理器
        由于EDA.ipynb中已经实现了数据处理，暂时pass，待后续集成
        处理结果为df，经检验数据无缺失值，违规样本量均衡，
        但文本长度分布不均衡，最长文本长度为1500，最短文本长度为2，后续DL需要重写truncate和padding处理
        抽象方法，不同模型可在子类中自定义实现
        """
        pass

        
    
   
    def get_label_mapping(self, df: pd.DataFrame = None) -> Dict[str, int]:
        """获取标签映射字典 - label+subject组合到数字ID"""
        # 优先从配置文件读取标签映射
        if 'label_mapping' in self.models_config:
            mapping = self.models_config['label_mapping']
            self.logger.info(f"从配置文件加载标签映射，共{len(mapping)}个类别")
            return mapping
        
        if df is not None:
            # 从数据中自动生成映射
            unique_combinations = df.apply(lambda row: f"{row['label']}_{row['subject']}", axis=1).unique()
            mapping = {combo: idx for idx, combo in enumerate(sorted(unique_combinations))}
            self.logger.info(f"从数据自动生成标签映射，共{len(mapping)}个类别")
        else:
            # 使用默认映射（基于实际数据分布）
            mapping = {
                "不违规_不违规": 0,
                "违规_偏见歧视": 1,
                "违规_淫秽色情": 2,
                "违规_财产隐私": 3,
                "违规_心理健康": 4,
                "违规_违法犯罪": 5,
                "违规_脏话侮辱": 6,
                "违规_身体伤害": 7,
                "违规_政治错误": 8,
                "违规_道德伦理": 9,
                "违规_变体词": 10
            }
            self.logger.info(f"使用默认标签映射，共{len(mapping)}个类别")
        
        return mapping
    
    def save_vectorizer(self, vectorizer, path: str) -> None:
        """保存特征向量化器（预测时复用）
        参数：
        vectorizer: 特征向量化器
        path: 特征向量化器保存路径（配置文件路径）
        """
        # 确保目录存在
        os.makedirs(os.path.dirname(path), exist_ok=True)
        
        with open(path, 'wb') as f:
            pickle.dump(vectorizer, f)
        self.logger.info(f"向量化器已保存到: {path}")
    
    def _load_jsonl_data(self, file_path: str) -> pd.DataFrame:
        """加载JSONL格式数据"""
        data = []
        with open(file_path, 'r', encoding='utf-8') as f:
            for line in f:
                data.append(json.loads(line.strip()))
        return pd.DataFrame(data)
    
    def _preprocess_data(self, df: pd.DataFrame) -> pd.DataFrame:
        """数据预处理"""
        # 复制数据避免修改原始数据
        df = df.copy()
        
        # 清洗文本
        df['text'] = df['text'].fillna('').astype(str)
        
        # 确保标签列存在
        if 'label' not in df.columns or 'subject' not in df.columns:
            raise ValueError("数据必须包含 'label' 和 'subject' 列")
        
        return df
    
    def _create_tfidf_vectorizer(self) -> TfidfVectorizer:
        """创建TF-IDF向量化器"""
        config = self.models_config
        
        # 所有参数均通过配置文件tfidf_config传入，不做任何硬编码
        # vectorizer = TfidfVectorizer(**tfidf_config)
        # 原参数包含analyzer, ngram_range, max_features, min_df, max_df, stop_words
        vectorizer = TfidfVectorizer(
            analyzer=config['feature_extraction']['tfidf']['analyzer'],
            ngram_range = tuple(config['feature_extraction']['tfidf']['ngram_range']),
            max_features=config['feature_extraction']['tfidf']['max_features'],
            min_df=config['feature_extraction']['tfidf']['min_df'],
            max_df=config['feature_extraction']['tfidf']['max_df'],
            stop_words=config['feature_extraction']['tfidf']['stop_words']
        )
        
        return vectorizer


class RfClassifier(BaseModel):
    """BaseModel的子类，RandomForest分类器 - 支持11分类任务
    继承BaseModel的抽象方法：
    train, predict, save, load, evaluate, train_operator, predict_operator, get_processor
    """
    
    def __init__(self, config: Dict[str, Any]):
        super().__init__()  # BaseModel不需要参数
        rf_config = self.models_config.get('random_forest', {})
        
        # 处理max_depth参数：如果配置文件中设置了具体数值，使用它；否则使用None
        max_depth = rf_config.get('max_depth', None)
        if max_depth == 0:  # 0也视为None
            max_depth = None

        self.model = SklearnRF(
            n_estimators=rf_config.get('n_estimators', 100),
            max_depth=max_depth,
            random_state=self.models_config.get('random_state', None),  # 使用父类加载的配置
            min_samples_split=rf_config.get('min_samples_split', 2),
            min_samples_leaf=rf_config.get('min_samples_leaf', 1),
            n_jobs=rf_config.get('n_jobs', -1)
        )
        
        self.logger.info(f"RandomForest配置: n_estimators={self.model.n_estimators}, max_depth={self.model.max_depth}, random_state={self.model.random_state}")
    
    def get_processor(self, data_path: str, processor_type: str = "rf") -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
        """
        继承BaseModel的get_processor抽象方法，准备训练数据（设计给RF模型使用）
        参数：
        data_path: 数据路径（配置文件路径）
        processor_type: 数据处理器标签（RF等）
        返回：
        X_train_tfidf: 训练集特征
        X_test_tfidf: 测试集特征
        X_dev_tfidf: 验证集特征
        y_train: 训练集标签
        y_test: 测试集标签
        y_dev: 验证集标签
        """
        # 1. 加载数据
        df = self._load_jsonl_data(data_path)
        
        # 2. 预处理数据
        df = self._preprocess_data(df)
        
        # 3. 创建标签映射（编码和解码器）
        self.label_mapping = self.get_label_mapping(df)
        self.reverse_mapping = {v: k for k, v in self.label_mapping.items()}
        
        # 4. 编码标签
        df['combined_label'] = df['label'] + '_' + df['subject']
        
        # 安全检查：是否有未知标签
        unknown_labels = set(df['combined_label']) - set(self.label_mapping.keys())
        if unknown_labels:
            self.logger.warning(f"发现未知标签: {unknown_labels}")
            # 过滤掉未知标签的数据
            df = df[df['combined_label'].isin(self.label_mapping.keys())]
            self.logger.info(f"过滤后数据量: {len(df)}")
        
        df['target'] = df['combined_label'].map(self.label_mapping)#编码标签
        
        # 5. 提取特征和标签
        texts = df['text'].tolist()
        labels = df['target'].values
        
        # 6. 数据划分 8:1:1 - 在参数中引用父类的self.models_config
        split_ratios = self.models_config.get('data', {}).get('split_ratios', [0.8, 0.1, 0.1])
        test_ratio = split_ratios[1]
        dev_ratio = split_ratios[2]
        # 先划分出测试集
        X_temp, X_test, y_temp, y_test = train_test_split(
            texts, labels, test_size=test_ratio, random_state=self.models_config.get('random_state', None), stratify=labels
        )
        # 再从剩余中划分验证集（dev），比例需按剩余部分再分
        dev_ratio_adjusted = dev_ratio / (split_ratios[0] + dev_ratio)
        X_train, X_dev, y_train, y_dev = train_test_split(
            X_temp, y_temp, test_size=dev_ratio_adjusted, random_state=self.models_config.get('random_state', None), stratify=y_temp
        )
        
        # 7. TF-IDF特征提取
        self.vectorizer = self._create_tfidf_vectorizer()
        X_train_tfidf = self.vectorizer.fit_transform(X_train).toarray()
        X_test_tfidf = self.vectorizer.transform(X_test).toarray()
        X_dev_tfidf = self.vectorizer.transform(X_dev).toarray()
        
        self.logger.info(f"数据划分完成: Train={len(X_train)}, Test={len(X_test)}, Dev={len(X_dev)}")
        self.logger.info(f"特征维度: {X_train_tfidf.shape[1]}")
        self.logger.info(f"标签分布: {dict(zip(*np.unique(labels, return_counts=True)))}")
        
        return X_train_tfidf, X_test_tfidf, X_dev_tfidf, y_train, y_test, y_dev
 
    def train_from_df_operator(self, df: pd.DataFrame, processor_type: str = 'ml') -> Dict[str, Any]:
        pass

    def predict_from_df_operator(self, df: pd.DataFrame, processor_type: str = 'ml') -> np.ndarray:
        pass

    def train_operator(self, data_path: str, processor_type: str = 'ml', existing_model_path: str = None) -> Dict[str, Any]:
        """RandomForest完整训练流程 - 支持增量训练"""
        
        # 1. 简单的增量训练逻辑
        if existing_model_path and os.path.exists(existing_model_path):
            self.logger.info(f"🔄 发现已有模型，基于已有模型继续训练: {existing_model_path}")
            self.load(existing_model_path)  # 引用已有模型，替换初始化
        else:
            self.logger.info("🆕 初始化新模型进行训练")
        
        # 2. 准备数据
        X_train, X_test, X_dev, y_train, y_test, y_dev = self.get_processor(data_path, processor_type)
        
        # 3. 训练模型（无论是新模型还是已有模型）
        self.logger.info("🎯 开始模型训练...")
        
        # 按照你的建议：for循环显示真正的百分比进度
        for i in tqdm(range(10), desc="Training RandomForest", unit="%"):               # 在第一次迭代时执行实际训练
            self.model.fit(X_train, y_train)
        
        self.is_trained = True
        
        # 4. 评估模型
        train_metrics = self.evaluate(X_train, y_train)
        test_metrics = self.evaluate(X_test, y_test)
        dev_metrics = self.evaluate(X_dev, y_dev)
        
        # 5. 返回结果
        training_type = 'incremental' if existing_model_path and os.path.exists(existing_model_path) else 'new'
        
        results = {
            'model_name': 'RandomForest',
            'training_type': training_type,
            'train_metrics': train_metrics,
            'test_metrics': test_metrics,
            'dev_metrics': dev_metrics,
            'label_mapping': self.label_mapping,
            'feature_dim': X_train.shape[1],
            'train_samples': len(X_train),
            'test_samples': len(X_test),
            'dev_samples': len(X_dev)
        }
        
        self.logger.info(f"✅ RandomForest训练流程完成 ({training_type})")
        self.logger.info(f"测试集F1分数: {test_metrics['f1_macro']:.4f}")
        
        return results
    
    def predict_operator(self, texts: List[str], processor_type: str = 'ml') -> np.ndarray:
        """从原始文本开始预测流程"""
        if not self.is_trained or self.vectorizer is None:
            raise ValueError("模型尚未训练或向量化器不存在")
        
        # 特征提取
        X_tfidf = self.vectorizer.transform(texts).toarray()
        
        # 预测
        predictions = self.model.predict(X_tfidf)
        
        return predictions
    
    def train(self, X_train: np.ndarray, y_train: np.ndarray, 
              X_val: Optional[np.ndarray] = None, y_val: Optional[np.ndarray] = None) -> Dict[str, Any]:
        """训练RandomForest模型（核心训练与评估逻辑，供operator流程调用）"""
        self.logger.info("训练RandomForest模型...")

        self.model.fit(X_train, y_train)  # 只执行一次核心训练

        self.is_trained = True

        # 计算训练集性能
        train_metrics = self.evaluate(X_train, y_train)

        results = {
            'train_metrics': train_metrics,
            'model_name': 'RandomForest'
        }

        if X_val is not None and y_val is not None:
            val_metrics = self.evaluate(X_val, y_val)
            results['val_metrics'] = val_metrics

        return results
    
    def predict(self, X: np.ndarray) -> np.ndarray:
        """预测"""
        if not self.is_trained:
            raise ValueError("模型尚未训练")
        return self.model.predict(X)
    
    def save(self, path: str) -> None:
        """保存模型"""
        # 确保目录存在
        os.makedirs(os.path.dirname(path), exist_ok=True)
        
        model_data = {
            'model': self.model,
            'vectorizer': self.vectorizer,
            'label_mapping': self.label_mapping,
            'reverse_mapping': self.reverse_mapping,
            'model_config': self.models_config
        }
        
        with open(path, 'wb') as f:
            pickle.dump(model_data, f)
        
        self.logger.info(f"模型已保存到: {path}")
    
    def load(self, path: str) -> None:
        """加载模型"""
        with open(path, 'rb') as f:
            model_data = pickle.load(f)
        
        self.model = model_data['model']
        self.vectorizer = model_data['vectorizer']
        self.label_mapping = model_data['label_mapping']
        self.reverse_mapping = model_data['reverse_mapping']
        self.is_trained = True
        
        self.logger.info(f"模型已从 {path} 加载")

    def train_from_df_operator(self, df: pd.DataFrame, processor_type: str = 'ml') -> Dict[str, Any]:
        """从DataFrame开始完整训练流程 - 复用现有方法"""
        # 保存DataFrame为临时文件，然后调用现有的train_operator
        temp_path = 'temp_training_data.jsonl'
        df.to_json(temp_path, orient='records', lines=True, force_ascii=False)
        
        try:
            return self.train_operator(temp_path, processor_type)
        finally:
            if os.path.exists(temp_path):
                os.remove(temp_path)

    def predict_from_df_operator(self, df: pd.DataFrame, processor_type: str = 'ml') -> np.ndarray:
        """从DataFrame开始预测流程 - 复用现有方法"""
        texts = df['text'].tolist()
        return self.predict_operator(texts, processor_type)


def create_model(model_type: str, config: Dict[str, Any], **kwargs) -> BaseModel:
    """工厂函数：创建模型"""
    if model_type == 'random_forest':
        return RfClassifier(config)
    else:
        raise ValueError(f"Unknown model type: {model_type}")


if __name__ == "__main__":
    # 测试RandomForest模型 - 使用YAML配置文件
    
    
    
    
    # 加载配置
    config = load_models_config('config/models.yaml')
    
    # 创建并测试模型
    rf_model = create_model('random_forest', config)
    print("✅ RandomForest模型创建成功")
    print(f"模型random_state: {rf_model.model.random_state}")
    print(f"模型n_estimators: {rf_model.model.n_estimators}")
    print(f"全局random_state配置: {rf_model.models_config.get('random_state', 'N/A')}")
    print(f"标签映射数量: {len(rf_model.models_config.get('label_mapping', {}))}")
    print(f"TF-IDF配置: {rf_model.models_config.get('feature_extraction', {}).get('tfidf', {})}")
    print(f"训练配置: {rf_model.models_config.get('training', {})}")
    print(f"模型配置: {rf_model.models_config.get('random_forest', {})}")
