"""
数据集划分模块

负责将处理好的数据划分为训练集、验证集和测试集，包括:
- 基于时间的数据集划分
- 序列数据的创建
- 数据格式转换
"""

import os
import logging
import pandas as pd
import numpy as np
from typing import Dict, List, Optional, Union, Tuple, Any

# 配置日志
logger = logging.getLogger(__name__)


class DatasetSplitter:
    """数据集分割器，负责划分训练集、验证集和测试集"""
    
    def __init__(self, config: Dict[str, Any]):
        """
        初始化数据集分割器
        
        Args:
            config: 配置字典，包含划分比例等参数
        """
        self.config = config
        self.train_ratio = config.get('train_ratio', 0.7)
        self.valid_ratio = config.get('valid_ratio', 0.15)
        self.test_ratio = config.get('test_ratio', 0.15)
        
        # 确保比例之和为1
        total = self.train_ratio + self.valid_ratio + self.test_ratio
        if abs(total - 1.0) > 1e-6:
            logger.warning(f"划分比例之和不为1 ({total})，将进行归一化")
            self.train_ratio /= total
            self.valid_ratio /= total
            self.test_ratio /= total
        
        logger.info(f"初始化数据集分割器，训练集比例: {self.train_ratio:.2f}, 验证集比例: {self.valid_ratio:.2f}, 测试集比例: {self.test_ratio:.2f}")
    
    def time_based_split(self, processed_data: Dict[str, Dict[str, Dict[str, Any]]]) -> Tuple[Dict[str, Dict[str, Dict[str, Any]]], Dict[str, Dict[str, Dict[str, Any]]], Dict[str, Dict[str, Dict[str, Any]]]]:
        """
        基于时间的数据集划分
        
        Args:
            processed_data: 处理后的数据，键为股票代码，值为日期到特征的映射
            
        Returns:
            包含训练集、验证集和测试集的元组
        """
        logger.info("进行基于时间的数据集划分")
        
        train_data, valid_data, test_data = {}, {}, {}
        
        for code, data in processed_data.items():
            # 按日期排序
            sorted_dates = sorted(data.keys())
            n_samples = len(sorted_dates)
            
            if n_samples == 0:
                logger.warning(f"股票 {code} 没有数据，跳过划分")
                continue
            
            # 计算分割点
            train_end = int(n_samples * self.train_ratio)
            valid_end = train_end + int(n_samples * self.valid_ratio)
            
            # 确保至少有一个样本
            train_end = max(1, train_end)
            valid_end = max(train_end + 1, valid_end)
            
            # 划分数据集
            train_dates = sorted_dates[:train_end]
            valid_dates = sorted_dates[train_end:valid_end]
            test_dates = sorted_dates[valid_end:]
            
            # 创建各个子集
            train_data[code] = {date: data[date] for date in train_dates}
            valid_data[code] = {date: data[date] for date in valid_dates}
            test_data[code] = {date: data[date] for date in test_dates}
            
            logger.debug(f"股票 {code}: 总样本数 {n_samples}, 训练集 {len(train_dates)}, 验证集 {len(valid_dates)}, 测试集 {len(test_dates)}")
        
        logger.info(f"数据集划分完成，处理了 {len(processed_data)} 只股票的数据")
        return train_data, valid_data, test_data
    
    def create_sequences(self, split_data: Dict[str, Dict[str, Dict[str, Any]]], sequence_length: int = 10, target_feature: str = 'target_class') -> Dict[str, List[Tuple[List[Dict[str, float]], Any]]]:
        """
        创建序列数据（用于LSTM等序列模型）
        
        Args:
            split_data: 划分后的数据子集，键为股票代码，值为日期到特征的映射
            sequence_length: 序列长度，默认为10
            target_feature: 目标特征名称
            
        Returns:
            序列数据，键为股票代码，值为(特征序列, 目标值)元组的列表
        """
        logger.info(f"开始创建序列数据，序列长度: {sequence_length}, 目标特征: {target_feature}")
        
        sequence_data = {}
        
        for code, data in split_data.items():
            # 按日期排序
            sorted_dates = sorted(data.keys())
            
            if len(sorted_dates) <= sequence_length:
                logger.warning(f"股票 {code} 数据点数 ({len(sorted_dates)}) 不足以创建长度为 {sequence_length} 的序列，跳过")
                continue
            
            # 创建序列
            sequences = []
            for i in range(len(sorted_dates) - sequence_length):
                # 提取序列日期
                seq_dates = sorted_dates[i:i+sequence_length+1]  # +1 是为了包含目标日期
                
                # 提取特征序列
                x_seq = [data[date] for date in seq_dates[:-1]]
                
                # 提取目标值
                y = data[seq_dates[-1]].get(target_feature)
                
                # 只有当目标值存在时才添加序列
                if y is not None and not pd.isna(y):
                    sequences.append((x_seq, y))
            
            if sequences:
                sequence_data[code] = sequences
                logger.debug(f"为股票 {code} 创建了 {len(sequences)} 个序列")
            else:
                logger.warning(f"股票 {code} 没有创建任何有效序列")
        
        logger.info(f"序列创建完成，为 {len(sequence_data)} 只股票创建了序列数据")
        return sequence_data
    
    def flatten_sequences(self, sequence_data: Dict[str, List[Tuple[List[Dict[str, float]], Any]]]) -> List[Tuple[List[Dict[str, float]], Any]]:
        """
        将按股票代码组织的序列数据扁平化为单一列表
        
        Args:
            sequence_data: 按股票代码组织的序列数据
            
        Returns:
            扁平化后的序列列表
        """
        logger.info("开始扁平化序列数据")
        
        flattened_sequences = []
        for code, sequences in sequence_data.items():
            flattened_sequences.extend(sequences)
        
        logger.info(f"序列扁平化完成，总共有 {len(flattened_sequences)} 个序列")
        return flattened_sequences
    
    def convert_to_numpy(self, sequences: List[Tuple[List[Dict[str, float]], Any]], feature_list: Optional[List[str]] = None) -> Tuple[np.ndarray, np.ndarray]:
        """
        将序列数据转换为NumPy数组（用于模型训练）
        
        Args:
            sequences: 序列列表，每个元素为(特征序列, 目标值)
            feature_list: 要包含的特征列表，如果为None则包含所有特征
            
        Returns:
            包含特征数组和目标数组的元组
        """
        logger.info("开始将序列数据转换为NumPy数组")
        
        if not sequences:
            logger.warning("没有序列数据，返回空数组")
            return np.array([]), np.array([])
        
        # 如果未指定特征列表，使用第一个序列中的所有特征
        if feature_list is None:
            feature_list = sorted(list(sequences[0][0][0].keys()))
            logger.info(f"未指定特征列表，将使用所有可用特征: {len(feature_list)}个")
        
        # 计算序列长度
        sequence_length = len(sequences[0][0])
        
        # 创建特征和目标数组
        X = np.zeros((len(sequences), sequence_length, len(feature_list)))
        y = np.zeros(len(sequences))
        
        # 填充数组
        for i, (seq, target) in enumerate(sequences):
            for j, time_step in enumerate(seq):
                for k, feature in enumerate(feature_list):
                    X[i, j, k] = time_step.get(feature, 0.0)
            y[i] = target
        
        logger.info(f"转换完成，X形状: {X.shape}, y形状: {y.shape}")
        return X, y
    
    def prepare_training_data(self, processed_data: Dict[str, Dict[str, Dict[str, Any]]], sequence_length: int = 10, target_feature: str = 'target_class', feature_list: Optional[List[str]] = None) -> Tuple[Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray]]:
        """
        准备完整的训练数据集
        
        Args:
            processed_data: 处理后的数据，键为股票代码，值为日期到特征的映射
            sequence_length: 序列长度，默认为10
            target_feature: 目标特征名称
            feature_list: 要包含的特征列表，如果为None则包含所有特征
            
        Returns:
            包含训练集、验证集和测试集的元组，每个子集是(X, y)的元组
        """
        logger.info(f"开始准备训练数据集，序列长度: {sequence_length}, 目标特征: {target_feature}")
        
        # 划分数据集
        train_data, valid_data, test_data = self.time_based_split(processed_data)
        
        # 创建序列
        train_sequences = self.create_sequences(train_data, sequence_length, target_feature)
        valid_sequences = self.create_sequences(valid_data, sequence_length, target_feature)
        test_sequences = self.create_sequences(test_data, sequence_length, target_feature)
        
        # 扁平化序列
        flat_train_sequences = self.flatten_sequences(train_sequences)
        flat_valid_sequences = self.flatten_sequences(valid_sequences)
        flat_test_sequences = self.flatten_sequences(test_sequences)
        
        # 如果未指定特征列表且有训练数据，从训练数据中提取特征列表
        if feature_list is None and flat_train_sequences:
            feature_list = sorted(list(flat_train_sequences[0][0][0].keys()))
            logger.info(f"从训练数据中提取特征列表: {len(feature_list)}个特征")
        
        # 转换为NumPy数组
        X_train, y_train = self.convert_to_numpy(flat_train_sequences, feature_list)
        X_valid, y_valid = self.convert_to_numpy(flat_valid_sequences, feature_list)
        X_test, y_test = self.convert_to_numpy(flat_test_sequences, feature_list)
        
        logger.info(f"训练数据准备完成，训练集: {X_train.shape}, 验证集: {X_valid.shape}, 测试集: {X_test.shape}")
        return (X_train, y_train), (X_valid, y_valid), (X_test, y_test)
    
    def save_datasets(self, datasets: Tuple[Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray]], output_path: str = './data/prepared') -> Dict[str, Any]:
        """
        保存数据集到指定路径
        
        Args:
            datasets: 包含训练集、验证集和测试集的元组，每个子集是(X, y)的元组
            output_path: 输出路径，默认为'./data/prepared'
            
        Returns:
            包含保存路径和数据集形状的字典
        """
        logger.info(f"开始保存数据集到 {output_path}")
        
        # 解包数据集
        (X_train, y_train), (X_valid, y_valid), (X_test, y_test) = datasets
        
        # 创建输出目录
        os.makedirs(output_path, exist_ok=True)
        
        # 保存数据集
        np.save(f"{output_path}/X_train.npy", X_train)
        np.save(f"{output_path}/y_train.npy", y_train)
        np.save(f"{output_path}/X_valid.npy", X_valid)
        np.save(f"{output_path}/y_valid.npy", y_valid)
        np.save(f"{output_path}/X_test.npy", X_test)
        np.save(f"{output_path}/y_test.npy", y_test)
        
        logger.info(f"数据集保存完成")
        
        # 返回路径和形状信息
        return {
            'data_path': output_path,
            'shapes': {
                'train': X_train.shape,
                'valid': X_valid.shape,
                'test': X_test.shape
            }
        } 