"""
数据准备主控模块

整合数据获取、特征工程和数据集划分模块，提供完整的数据准备流水线
"""

import os
import logging
import json
import numpy as np
from typing import Dict, List, Optional, Union, Tuple, Any
from datetime import datetime

from src.data.data_integrator import DataIntegrator
from src.features.feature_engineering import FeatureEngineer
from src.data.dataset_splitter import DatasetSplitter

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
    handlers=[
        logging.FileHandler("logs/data_preparation.log"),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)


class DataPreparationController:
    """数据准备主控器，整合所有数据准备模块，提供完整的数据准备流水线"""
    
    def __init__(self, config_path: str):
        """
        初始化数据准备主控器
        
        Args:
            config_path: 配置文件路径
        """
        self.config = self._load_config(config_path)
        self.integrator = DataIntegrator(self.config)
        self.feature_engineer = FeatureEngineer(self.config)
        self.dataset_splitter = DatasetSplitter(self.config)
        
        # 创建日志目录
        os.makedirs("logs", exist_ok=True)
        
        logger.info("初始化数据准备主控器")
    
    def _load_config(self, config_path: str) -> Dict[str, Any]:
        """
        加载配置文件
        
        Args:
            config_path: 配置文件路径
            
        Returns:
            配置字典
        """
        try:
            with open(config_path, 'r', encoding='utf-8') as f:
                config = json.load(f)
            logger.info(f"加载配置文件: {config_path}")
            return config
        except Exception as e:
            logger.error(f"加载配置文件失败: {str(e)}")
            # 返回默认配置
            return {
                "price_api_key": "",
                "volume_api_key": "",
                "sentiment_model_path": "",
                "db_connection_string": "",
                "default_stock_codes": ["AAPL", "MSFT", "GOOG", "AMZN", "TSLA"],
                "default_start_date": "2020-01-01",
                "default_end_date": "2023-12-31",
                "train_ratio": 0.7,
                "valid_ratio": 0.15,
                "test_ratio": 0.15,
                "sequence_length": 10,
                "prepared_data_path": "./data/prepared",
                "feature_engineering": {
                    "normalization_method": "standard",
                    "n_selected_features": 20,
                    "lag_periods": [1, 3, 5, 7],
                    "prediction_horizon": 5,
                    "price_change_threshold": 0.02
                }
            }
    
    def prepare_data_pipeline(self, stock_codes: Optional[List[str]] = None, start_date: Optional[str] = None, end_date: Optional[str] = None) -> Dict[str, Any]:
        """
        完整的数据准备流水线
        
        Args:
            stock_codes: 股票代码列表，如果为None则使用配置文件中的默认值
            start_date: 开始日期，如果为None则使用配置文件中的默认值
            end_date: 结束日期，如果为None则使用配置文件中的默认值
            
        Returns:
            包含数据路径和形状信息的字典
        """
        # 记录开始时间
        start_time = datetime.now()
        logger.info("开始数据准备流程")
        
        # 使用配置文件中的默认值（如果未指定）
        if stock_codes is None:
            stock_codes = self.config.get('default_stock_codes', [])
        if start_date is None:
            start_date = self.config.get('default_start_date', '2020-01-01')
        if end_date is None:
            end_date = self.config.get('default_end_date', '2023-12-31')
        
        # 获取特征工程配置
        fe_config = self.config.get('feature_engineering', {})
        normalization_method = fe_config.get('normalization_method', 'standard')
        n_selected_features = fe_config.get('n_selected_features', 20)
        lag_periods = fe_config.get('lag_periods', [1, 3, 5, 7])
        prediction_horizon = fe_config.get('prediction_horizon', 5)
        price_change_threshold = fe_config.get('price_change_threshold', 0.02)
        
        # 步骤1: 整合数据
        logger.info(f"步骤1: 整合数据 - {len(stock_codes)}支股票, 从{start_date}到{end_date}")
        integrated_data = self.integrator.integrate_all_data(stock_codes, start_date, end_date)
        
        # 步骤2: 特征工程
        logger.info("步骤2: 执行特征工程")
        logger.info("2.1: 特征预处理")
        processed_data = self.feature_engineer.preprocess_features(integrated_data)
        
        logger.info(f"2.2: 特征归一化 (方法: {normalization_method})")
        normalized_data = self.feature_engineer.normalize_features(processed_data, method=normalization_method)
        
        logger.info(f"2.3: 特征选择 (保留特征数: {n_selected_features})")
        selected_features_data = self.feature_engineer.select_features(normalized_data, n_features=n_selected_features)
        
        logger.info(f"2.4: 创建滞后特征 (滞后周期: {lag_periods})")
        lagged_data = self.feature_engineer.create_lagged_features(selected_features_data, lag_periods=lag_periods)
        
        logger.info(f"2.5: 创建目标变量 (预测周期: {prediction_horizon}天, 价格变化阈值: {price_change_threshold})")
        data_with_target = self.feature_engineer.create_target_variable(
            lagged_data, prediction_horizon=prediction_horizon, threshold=price_change_threshold
        )
        
        # 步骤3: 数据集划分
        logger.info("步骤3: 划分训练、验证和测试集")
        sequence_length = self.config.get('sequence_length', 10)
        datasets = self.dataset_splitter.prepare_training_data(
            data_with_target, sequence_length=sequence_length
        )
        
        # 步骤4: 保存准备好的数据
        logger.info("步骤4: 保存准备好的数据")
        output_path = self.config.get('prepared_data_path', './data/prepared')
        result = self.dataset_splitter.save_datasets(datasets, output_path=output_path)
        
        # 记录结束时间和总用时
        end_time = datetime.now()
        elapsed_time = end_time - start_time
        logger.info(f"数据准备流程完成，总用时: {elapsed_time}")
        
        # 记录数据统计信息
        (X_train, y_train), (X_valid, y_valid), (X_test, y_test) = datasets
        logger.info(f"数据统计: 训练集 {X_train.shape[0]} 条, 验证集 {X_valid.shape[0]} 条, 测试集 {X_test.shape[0]} 条")
        
        # 返回结果
        return result
    
    def run(self) -> Dict[str, Any]:
        """
        运行数据准备流水线（使用配置文件中的默认参数）
        
        Returns:
            包含数据路径和形状信息的字典
        """
        logger.info("使用默认参数运行数据准备流水线")
        return self.prepare_data_pipeline()


if __name__ == "__main__":
    # 创建数据目录
    os.makedirs("data/prepared", exist_ok=True)
    os.makedirs("config", exist_ok=True)
    
    # 如果配置文件不存在，创建默认配置文件
    default_config_path = "config/data_preparation_config.json"
    if not os.path.exists(default_config_path):
        default_config = {
            "price_api_key": "",
            "volume_api_key": "",
            "sentiment_model_path": "",
            "db_connection_string": "",
            "default_stock_codes": ["AAPL", "MSFT", "GOOG", "AMZN", "TSLA"],
            "default_start_date": "2020-01-01",
            "default_end_date": "2023-12-31",
            "train_ratio": 0.7,
            "valid_ratio": 0.15,
            "test_ratio": 0.15,
            "sequence_length": 10,
            "prepared_data_path": "./data/prepared",
            "feature_engineering": {
                "normalization_method": "standard",
                "n_selected_features": 20,
                "lag_periods": [1, 3, 5, 7],
                "prediction_horizon": 5,
                "price_change_threshold": 0.02
            }
        }
        with open(default_config_path, 'w', encoding='utf-8') as f:
            json.dump(default_config, f, indent=2)
        print(f"已创建默认配置文件: {default_config_path}")
    
    # 实例化并运行数据准备主控器
    controller = DataPreparationController(default_config_path)
    result = controller.run()
    
    print(f"数据已准备完成，保存在: {result['data_path']}")
    print(f"数据形状: {result['shapes']}")
    
    # 如果训练集为空，输出警告
    if result['shapes']['train'][0] == 0:
        print("警告: 训练集为空，请检查数据源和参数配置") 