"""
Predict模块 - 预测编排器
基于interface_design.md设计，负责预测流程编排
支持11分类任务的RandomForest预测，包括标签解码
"""

import numpy as np
import pandas as pd
import json
import os
import pickle
from typing import Dict, Any, List, Optional, Union
import logging
from datetime import datetime

import models
from models import create_model


class PredictionOperator:
    """预测编排器 - 只负责预测流程编排"""
    
    def __init__(self, config: Dict[str, Any], paths: Dict[str, str]):
        self.config = config
        self.paths = paths
        self.logger = self._setup_logger()
        self.loaded_models = {}  # 缓存已加载的模型
        
        # 创建必要目录
        self._create_directories()
    
    def _setup_logger(self) -> logging.Logger:
        """设置日志记录器"""
        logger = logging.getLogger('PredictionOperator')
        logger.setLevel(logging.INFO)
        
        # 避免重复添加处理器
        if not logger.handlers:
            # 创建文件处理器
            log_dir = self.paths.get('logs', {}).get('base_path', 'logs')
            os.makedirs(log_dir, exist_ok=True)
            
            file_handler = logging.FileHandler(
                os.path.join(log_dir, 'prediction.log'),
                encoding='utf-8'
            )
            file_handler.setLevel(logging.INFO)
            
            # 创建控制台处理器
            console_handler = logging.StreamHandler()
            console_handler.setLevel(logging.INFO)
            
            # 创建格式器
            formatter = logging.Formatter(
                '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
            )
            file_handler.setFormatter(formatter)
            console_handler.setFormatter(formatter)
            
            # 添加处理器
            logger.addHandler(file_handler)
            logger.addHandler(console_handler)
        
        return logger
    
    def _create_directories(self):
        """创建必要的目录结构"""
        dirs_to_create = [
            self.paths.get('results', {}).get('base_path', 'results'),
            self.paths.get('logs', {}).get('base_path', 'logs'),
            os.path.join(self.paths.get('results', {}).get('base_path', 'results'), 'predictions')
        ]
        
        for dir_path in dirs_to_create:
            os.makedirs(dir_path, exist_ok=True)
    
    def predict_single_model_operator(self, model_type: str, texts: List[str], 
                                    return_probabilities: bool = False) -> Dict[str, Any]:
        """单个模型预测 - 只负责编排"""
        self.logger.info(f"🔮 开始使用{model_type}模型进行预测")
        
        if model_type == 'random_forest':
            return self._predict_random_forest_operator(texts, return_probabilities)
        else:
            raise ValueError(f"暂不支持的模型类型: {model_type}")
    
    def _predict_random_forest_operator(self, texts: List[str], 
                                      return_probabilities: bool = False) -> Dict[str, Any]:
        """RandomForest预测编排"""
        # 1. 加载模型
        model = self._load_model('random_forest')
        
        # 2. 执行预测
        start_time = datetime.now()
        predictions = model.predict_operator(texts)
        prediction_time = (datetime.now() - start_time).total_seconds()
        
        # 3. 标签解码
        if hasattr(model, 'reverse_mapping') and model.reverse_mapping:
            decoded_predictions = [model.reverse_mapping[pred] for pred in predictions]
        else:
            decoded_predictions = predictions.tolist()
        
        # 4. 获取概率（如果需要）
        probabilities = None
        if return_probabilities and hasattr(model.model, 'predict_proba'):
            # 先转换文本为特征
            X_tfidf = model.vectorizer.transform(texts).toarray()
            probabilities = model.model.predict_proba(X_tfidf).tolist()
        
        # 5. 构建结果
        results = {
            'model_type': 'random_forest',
            'predictions': predictions.tolist(),
            'decoded_predictions': decoded_predictions,
            'prediction_time': prediction_time,
            'num_samples': len(texts),
            'timestamp': datetime.now().isoformat()
        }
        
        if probabilities is not None:
            results['probabilities'] = probabilities
        
        # 6. 记录预测日志
        self._log_prediction_result('RandomForest', len(texts), prediction_time)
        
        return results
    
    def predict_batch_texts_operator(self, model_type: str, texts: List[str], 
                                   batch_size: int = 100,
                                   return_probabilities: bool = False) -> Dict[str, Any]:
        """批量文本预测 - 处理大量文本时分批预测"""
        self.logger.info(f"🔮 开始批量预测，文本数量: {len(texts)}, 批次大小: {batch_size}")
        
        all_predictions = []
        all_decoded_predictions = []
        all_probabilities = [] if return_probabilities else None
        
        total_time = 0
        
        # 分批处理
        for i in range(0, len(texts), batch_size):
            batch_texts = texts[i:i+batch_size]
            self.logger.info(f"处理批次 {i//batch_size + 1}/{(len(texts)-1)//batch_size + 1}")
            
            # 预测当前批次
            batch_results = self.predict_single_model_operator(
                model_type, batch_texts, return_probabilities
            )
            
            all_predictions.extend(batch_results['predictions'])
            all_decoded_predictions.extend(batch_results['decoded_predictions'])
            total_time += batch_results['prediction_time']
            
            if return_probabilities and 'probabilities' in batch_results:
                all_probabilities.extend(batch_results['probabilities'])
        
        # 构建最终结果
        results = {
            'model_type': model_type,
            'predictions': all_predictions,
            'decoded_predictions': all_decoded_predictions,
            'prediction_time': total_time,
            'num_samples': len(texts),
            'batch_size': batch_size,
            'num_batches': (len(texts) - 1) // batch_size + 1,
            'timestamp': datetime.now().isoformat()
        }
        
        if all_probabilities is not None:
            results['probabilities'] = all_probabilities
        
        self.logger.info(f"✅ 批量预测完成，总用时: {total_time:.2f}秒")
        
        return results
    
    def predict_from_file_operator(self, model_type: str, file_path: str, 
                                 text_column: str = 'text',
                                 return_probabilities: bool = False,
                                 save_results: bool = True) -> Dict[str, Any]:
        """从文件预测"""
        self.logger.info(f"🔮 从文件进行预测: {file_path}")
        
        # 1. 加载数据
        if file_path.endswith('.jsonl'):
            data = []
            with open(file_path, 'r', encoding='utf-8') as f:
                for line in f:
                    data.append(json.loads(line.strip()))
            df = pd.DataFrame(data)
        elif file_path.endswith('.csv'):
            df = pd.read_csv(file_path)
        elif file_path.endswith('.json'):
            df = pd.read_json(file_path)
        else:
            raise ValueError(f"不支持的文件格式: {file_path}")
        
        # 2. 提取文本
        if text_column not in df.columns:
            raise ValueError(f"文件中不存在列: {text_column}")
        
        texts = df[text_column].fillna('').astype(str).tolist()
        
        # 3. 执行预测
        results = self.predict_batch_texts_operator(
            model_type, texts, return_probabilities=return_probabilities
        )
        
        # 4. 添加原始数据信息
        results['source_file'] = file_path
        results['text_column'] = text_column
        results['original_data_shape'] = df.shape
        
        # 5. 创建结果DataFrame
        result_df = df.copy()
        result_df['predicted_id'] = results['predictions']
        result_df['predicted_label'] = results['decoded_predictions']
        
        if 'probabilities' in results:
            # 添加概率列
            prob_array = np.array(results['probabilities'])
            for i in range(prob_array.shape[1]):
                result_df[f'prob_class_{i}'] = prob_array[:, i]
        
        results['result_dataframe'] = result_df
        
        # 6. 保存结果（如果需要）
        if save_results:
            self._save_prediction_results(results, model_type)
        
        return results
    
    def predict_ensemble_operator(self, texts: List[str], model_types: List[str]) -> Dict[str, np.ndarray]:
        """集成预测 - 只负责编排"""
        self.logger.info(f"🔮 开始集成预测，模型: {model_types}")
        
        ensemble_results = {}
        
        for model_type in model_types:
            try:
                results = self.predict_single_model_operator(model_type, texts)
                ensemble_results[model_type] = {
                    'predictions': results['predictions'],
                    'decoded_predictions': results['decoded_predictions'],
                    'prediction_time': results['prediction_time']
                }
                self.logger.info(f"✅ {model_type}预测完成")
            except Exception as e:
                self.logger.error(f"❌ {model_type}预测失败: {str(e)}")
                ensemble_results[model_type] = {'error': str(e)}
        
        # 添加集成统计信息
        ensemble_results['ensemble_info'] = {
            'num_models': len(model_types),
            'successful_models': len([k for k, v in ensemble_results.items() 
                                    if k != 'ensemble_info' and 'error' not in v]),
            'timestamp': datetime.now().isoformat()
        }
        
        return ensemble_results
    
    def predict_best_model_operator(self, texts: List[str], model_types: List[str]) -> np.ndarray:
        """使用最佳模型预测"""
        # 这里需要根据之前的评估结果选择最佳模型
        # 暂时默认使用random_forest
        best_model = 'random_forest'  # 可以从配置或评估结果中读取
        
        self.logger.info(f"🏆 使用最佳模型进行预测: {best_model}")
        
        results = self.predict_single_model_operator(best_model, texts)
        return np.array(results['predictions'])
    
    def predict_json_operator(self, json_path: str, model_type: str) -> Dict[str, Any]:
        """从JSON文件预测"""
        return self.predict_from_file_operator(model_type, json_path)
    
    def _load_model(self, model_type: str):
        """加载模型（带缓存）"""
        if model_type in self.loaded_models:
            return self.loaded_models[model_type]
        
        self.logger.info(f"🔄 加载{model_type}模型")
        
        # 获取模型路径
        model_path = self.paths.get('models', {}).get(model_type, f'models/{model_type}_model.pkl')
        
        if not os.path.exists(model_path):
            raise FileNotFoundError(f"模型文件不存在: {model_path}")
        
        # 创建模型实例并加载
        model = create_model(model_type, self.config)
        model.load(model_path)
        
        # 缓存模型
        self.loaded_models[model_type] = model
        
        self.logger.info(f"✅ {model_type}模型加载成功")
        
        return model
    
    def _log_prediction_result(self, model_name: str, num_samples: int, prediction_time: float):
        """记录预测结果"""
        self.logger.info(f"📊 {model_name}预测完成:")
        self.logger.info(f"   样本数量: {num_samples}")
        self.logger.info(f"   预测用时: {prediction_time:.4f}秒")
        self.logger.info(f"   平均每样本用时: {prediction_time/num_samples:.6f}秒")
    
    def _save_prediction_results(self, results: Dict[str, Any], model_type: str):
        """保存预测结果"""
        # 保存JSON结果
        timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
        json_path = os.path.join(
            self.paths.get('results', {}).get('base_path', 'results'),
            'predictions',
            f'{model_type}_predictions_{timestamp}.json'
        )
        
        # 准备保存的数据（移除DataFrame，因为不能JSON序列化）
        save_data = results.copy()
        if 'result_dataframe' in save_data:
            del save_data['result_dataframe']
        
        with open(json_path, 'w', encoding='utf-8') as f:
            json.dump(save_data, f, ensure_ascii=False, indent=2)
        
        # 保存CSV结果
        if 'result_dataframe' in results:
            csv_path = os.path.join(
                self.paths.get('results', {}).get('base_path', 'results'),
                'predictions',
                f'{model_type}_predictions_{timestamp}.csv'
            )
            results['result_dataframe'].to_csv(csv_path, index=False, encoding='utf-8')
            self.logger.info(f"预测结果CSV已保存到: {csv_path}")
        
        self.logger.info(f"预测结果JSON已保存到: {json_path}")
    
    def get_model_info_operator(self, model_type: str) -> Dict[str, Any]:
        """获取模型信息"""
        model = self._load_model(model_type)
        
        info = {
            'model_type': model_type,
            'is_trained': model.is_trained,
            'model_class': model.__class__.__name__,
            'config': model.config.get('models', {}).get(model_type, {})
        }
        
        # 添加标签映射信息
        if hasattr(model, 'label_mapping') and model.label_mapping:
            info['label_mapping'] = model.label_mapping
            info['num_classes'] = len(model.label_mapping)
            info['class_names'] = list(model.reverse_mapping.values()) if hasattr(model, 'reverse_mapping') else []
        
        # 添加特征信息
        if hasattr(model, 'vectorizer') and model.vectorizer:
            info['feature_dim'] = len(model.vectorizer.get_feature_names_out()) if hasattr(model.vectorizer, 'get_feature_names_out') else 'unknown'
            info['vectorizer_type'] = model.vectorizer.__class__.__name__
        
        return info


def create_prediction_operator(config_path: str = 'config/models.yaml', 
                             paths_path: str = 'config/paths.yaml') -> PredictionOperator:
    """工厂函数：创建预测编排器"""
    # 使用YAML配置文件
    try:
        import yaml
        with open(config_path, 'r', encoding='utf-8') as f:
            config = yaml.safe_load(f)
    except FileNotFoundError:
        raise FileNotFoundError(f"配置文件不存在: {config_path}，请确保config/models.yaml存在")
    
    # 加载路径配置
    try:
        import yaml
        with open(paths_path, 'r', encoding='utf-8') as f:
            paths = yaml.safe_load(f)
    except FileNotFoundError:
        # 默认路径配置
        paths = {
            'models': {'base_path': 'models', 'random_forest': 'models/random_forest_model.pkl'},
            'results': {'base_path': 'results'},
            'logs': {'base_path': 'logs'}
        }
    
    return PredictionOperator(config, paths)


if __name__ == "__main__":
    # 测试预测编排器 - 使用YAML配置文件
    try:
        predictor = create_prediction_operator()
        print("✅ 预测编排器创建成功（使用YAML配置）")
        
        # 测试模型信息获取
        try:
            info = predictor.get_model_info_operator('random_forest')
            print(f"✅ 模型信息获取成功: {info}")
        except Exception as e:
            print(f"❌ 模型信息获取失败: {str(e)}")
            print("提示: 请先训练模型")
            
    except FileNotFoundError as e:
        print(f"❌ {e}")
        print("请确保在项目根目录运行，且config/models.yaml文件存在")
