"""
TimeKAN数据构建与维度变化可视化演示
这个脚本演示了TimeKAN中数据从原始输入到最终输出的完整维度变化过程
"""

import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from datetime import datetime, timedelta

class DataDimensionVisualizer:
    """数据维度变化可视化器"""
    
    def __init__(self):
        self.dimension_log = []
        
    def log_dimension(self, step_name, tensor, description=""):
        """记录维度变化"""
        if isinstance(tensor, list):
            shapes = [t.shape for t in tensor]
            self.dimension_log.append({
                'step': step_name,
                'shapes': shapes,
                'description': description,
                'type': 'list'
            })
        else:
            self.dimension_log.append({
                'step': step_name,
                'shape': tensor.shape,
                'description': description,
                'type': 'tensor'
            })
    
    def print_dimension_flow(self):
        """打印维度变化流程"""
        print("=" * 80)
        print("TimeKAN 数据维度变化流程")
        print("=" * 80)
        
        for i, log in enumerate(self.dimension_log):
            print(f"\n步骤 {i+1}: {log['step']}")
            print("-" * 50)
            
            if log['type'] == 'list':
                print(f"多级数据形状: {log['shapes']}")
            else:
                print(f"数据形状: {log['shape']}")
            
            if log['description']:
                print(f"说明: {log['description']}")

def simulate_timekan_data_flow():
    """模拟TimeKAN的数据流程"""
    
    # 初始化可视化器
    visualizer = DataDimensionVisualizer()
    
    # 模拟参数
    batch_size = 32
    seq_len = 96
    pred_len = 24
    enc_in = 7  # 输入特征数
    d_model = 512
    down_sampling_layers = 2
    down_sampling_window = 2
    
    print("TimeKAN 数据流程模拟")
    print(f"批次大小: {batch_size}")
    print(f"输入序列长度: {seq_len}")
    print(f"预测长度: {pred_len}")
    print(f"输入特征数: {enc_in}")
    print(f"模型维度: {d_model}")
    print(f"下采样层数: {down_sampling_layers}")
    print(f"下采样窗口: {down_sampling_window}")
    
    # 步骤1: 原始输入数据
    x_enc = torch.randn(batch_size, seq_len, enc_in)
    x_mark_enc = torch.randn(batch_size, seq_len, 4)  # 时间特征
    
    visualizer.log_dimension("原始输入数据", x_enc, 
                           f"输入序列: (batch_size={batch_size}, seq_len={seq_len}, features={enc_in})")
    visualizer.log_dimension("时间特征", x_mark_enc,
                           f"时间特征: (batch_size={batch_size}, seq_len={seq_len}, time_features=4)")
    
    # 步骤2: 多级下采样
    def multi_level_downsampling(x):
        """模拟多级下采样过程"""
        x = x.permute(0, 2, 1)  # (B, T, C) -> (B, C, T)
        down_pool = nn.AvgPool1d(down_sampling_window)
        
        x_list = []
        x_ori = x
        
        # 原始分辨率
        x_list.append(x_ori.permute(0, 2, 1))  # (B, T, C)
        
        # 逐级下采样
        for i in range(down_sampling_layers):
            x_sampling = down_pool(x_ori)
            x_list.append(x_sampling.permute(0, 2, 1))
            x_ori = x_sampling
            
        return x_list
    
    x_enc_list = multi_level_downsampling(x_enc)
    visualizer.log_dimension("多级下采样", x_enc_list,
                           f"Level 0: 原始, Level 1: 1/{down_sampling_window}x, Level 2: 1/{down_sampling_window**2}x")
    
    # 步骤3: 归一化和重塑
    def normalize_and_reshape(x_list):
        """模拟归一化和重塑过程"""
        reshaped_list = []
        for x in x_list:
            B, T, N = x.size()
            # 模拟归一化（这里跳过实际归一化计算）
            x_reshaped = x.permute(0, 2, 1).contiguous().reshape(B * N, T, 1)
            reshaped_list.append(x_reshaped)
        return reshaped_list
    
    x_normalized_list = normalize_and_reshape(x_enc_list)
    visualizer.log_dimension("归一化重塑", x_normalized_list,
                           "将多变量转换为多个单变量序列: (B*N, T, 1)")
    
    # 步骤4: 数据嵌入
    def data_embedding(x_list, d_model):
        """模拟数据嵌入过程"""
        embedding_layer = nn.Linear(1, d_model)
        embedded_list = []
        for x in x_list:
            embedded = embedding_layer(x)
            embedded_list.append(embedded)
        return embedded_list
    
    x_embedded_list = data_embedding(x_normalized_list, d_model)
    visualizer.log_dimension("数据嵌入", x_embedded_list,
                           f"嵌入到高维空间: (B*N, T, d_model={d_model})")
    
    # 步骤5: 频率处理（模拟）
    def frequency_processing(x_list):
        """模拟频率处理过程"""
        # 这里只是保持维度不变，实际会有复杂的频率分解和混合
        return x_list
    
    x_freq_list = frequency_processing(x_embedded_list)
    visualizer.log_dimension("频率处理", x_freq_list,
                           "频率分解和混合，维度保持不变")
    
    # 步骤6: 时间预测
    def time_prediction(x, pred_len):
        """模拟时间维度预测"""
        B_N, T, D = x.size()
        # 模拟预测层：从seq_len预测到pred_len
        predict_layer = nn.Linear(T, pred_len)
        x_pred = predict_layer(x.permute(0, 2, 1)).permute(0, 2, 1)
        return x_pred
    
    # 只处理第一级（原始分辨率）
    x_pred = time_prediction(x_freq_list[0], pred_len)
    visualizer.log_dimension("时间预测", x_pred,
                           f"从seq_len={seq_len}预测到pred_len={pred_len}")
    
    # 步骤7: 特征投影
    def feature_projection(x, enc_in):
        """模拟特征投影"""
        B_N, T, D = x.size()
        projection_layer = nn.Linear(D, 1)
        x_proj = projection_layer(x)  # (B*N, T, 1)
        
        # 重塑回原始特征维度
        B = B_N // enc_in
        x_reshaped = x_proj.reshape(B, enc_in, T).permute(0, 2, 1)  # (B, T, N)
        return x_reshaped
    
    x_output = feature_projection(x_pred, enc_in)
    visualizer.log_dimension("特征投影", x_output,
                           f"投影并重塑回原始特征维度: (batch_size, pred_len, features)")
    
    # 步骤8: 反归一化（模拟）
    # 实际中会进行反归一化，这里维度不变
    visualizer.log_dimension("反归一化", x_output,
                           "恢复原始数据尺度，维度不变")
    
    # 打印完整的维度变化流程
    visualizer.print_dimension_flow()
    
    return x_output

def create_sample_dataset():
    """创建示例数据集，演示数据构建过程"""
    
    print("\n" + "=" * 80)
    print("数据集构建过程演示")
    print("=" * 80)
    
    # 模拟ETT数据集
    dates = pd.date_range(start='2016-07-01', end='2018-06-30', freq='H')
    n_features = 7
    
    # 生成模拟的时间序列数据
    np.random.seed(42)
    data = np.random.randn(len(dates), n_features)
    
    # 添加一些趋势和季节性
    for i in range(n_features):
        trend = np.linspace(0, 2, len(dates))
        seasonal = np.sin(2 * np.pi * np.arange(len(dates)) / (24 * 7))  # 周季节性
        data[:, i] += trend + seasonal * 0.5
    
    # 创建DataFrame
    feature_names = ['HUFL', 'HULL', 'MUFL', 'MULL', 'LUFL', 'LULL', 'OT']
    df = pd.DataFrame(data, columns=feature_names, index=dates)
    
    print(f"数据集形状: {df.shape}")
    print(f"时间范围: {df.index[0]} 到 {df.index[-1]}")
    print(f"特征列: {list(df.columns)}")
    
    # 数据分割演示
    total_len = len(df)
    train_len = int(total_len * 0.7)
    val_len = int(total_len * 0.1)
    test_len = total_len - train_len - val_len
    
    print(f"\n数据分割:")
    print(f"训练集: {train_len} 样本 ({train_len/total_len:.1%})")
    print(f"验证集: {val_len} 样本 ({val_len/total_len:.1%})")
    print(f"测试集: {test_len} 样本 ({test_len/total_len:.1%})")
    
    # 序列构建演示
    seq_len = 96
    label_len = 48
    pred_len = 24
    
    print(f"\n序列构建参数:")
    print(f"输入序列长度 (seq_len): {seq_len}")
    print(f"标签序列长度 (label_len): {label_len}")
    print(f"预测序列长度 (pred_len): {pred_len}")
    
    # 计算可生成的样本数
    available_samples = train_len - seq_len - pred_len + 1
    print(f"训练集可生成样本数: {available_samples}")
    
    # 演示单个样本的构建
    index = 100  # 选择一个样本索引
    s_begin = index
    s_end = s_begin + seq_len
    r_begin = s_end - label_len
    r_end = r_begin + label_len + pred_len
    
    print(f"\n样本 {index} 构建:")
    print(f"输入序列范围: [{s_begin}:{s_end}] (时间: {df.index[s_begin]} 到 {df.index[s_end-1]})")
    print(f"目标序列范围: [{r_begin}:{r_end}] (时间: {df.index[r_begin]} 到 {df.index[r_end-1]})")
    
    seq_x = data[s_begin:s_end]
    seq_y = data[r_begin:r_end]
    
    print(f"输入序列形状: {seq_x.shape}")
    print(f"目标序列形状: {seq_y.shape}")
    
    return df, seq_x, seq_y

def visualize_data_flow():
    """可视化数据流程"""
    
    # 创建示例数据
    df, seq_x, seq_y = create_sample_dataset()
    
    # 模拟TimeKAN数据流程
    output = simulate_timekan_data_flow()
    
    # 创建可视化图表
    fig, axes = plt.subplots(2, 2, figsize=(15, 10))
    fig.suptitle('TimeKAN 数据流程可视化', fontsize=16)
    
    # 子图1: 原始时间序列
    axes[0, 0].plot(df.index[:500], df.iloc[:500, 0])
    axes[0, 0].set_title('原始时间序列数据')
    axes[0, 0].set_xlabel('时间')
    axes[0, 0].set_ylabel('数值')
    axes[0, 0].grid(True)
    
    # 子图2: 输入序列示例
    axes[0, 1].plot(seq_x[:, 0], label='特征1', alpha=0.7)
    axes[0, 1].plot(seq_x[:, 1], label='特征2', alpha=0.7)
    axes[0, 1].set_title('输入序列示例 (seq_len=96)')
    axes[0, 1].set_xlabel('时间步')
    axes[0, 1].set_ylabel('数值')
    axes[0, 1].legend()
    axes[0, 1].grid(True)
    
    # 子图3: 多级下采样示意
    x = np.arange(96)
    y1 = np.sin(x * 0.1)  # 原始分辨率
    y2 = np.sin(x[::2] * 0.1)  # 2倍下采样
    y3 = np.sin(x[::4] * 0.1)  # 4倍下采样
    
    axes[1, 0].plot(x, y1, 'o-', label='Level 0 (原始)', markersize=3)
    axes[1, 0].plot(x[::2], y2, 's-', label='Level 1 (2x下采样)', markersize=4)
    axes[1, 0].plot(x[::4], y3, '^-', label='Level 2 (4x下采样)', markersize=5)
    axes[1, 0].set_title('多级下采样示意')
    axes[1, 0].set_xlabel('时间步')
    axes[1, 0].set_ylabel('数值')
    axes[1, 0].legend()
    axes[1, 0].grid(True)
    
    # 子图4: 维度变化流程图
    steps = ['输入', '下采样', '重塑', '嵌入', '频率处理', '预测', '投影', '输出']
    dimensions = [
        f'({32}, {96}, {7})',
        f'多级列表',
        f'({32*7}, T, 1)',
        f'({32*7}, T, {512})',
        f'保持不变',
        f'({32*7}, {24}, {512})',
        f'({32}, {24}, {7})',
        f'({32}, {24}, {7})'
    ]
    
    axes[1, 1].barh(range(len(steps)), [1]*len(steps), alpha=0.3)
    for i, (step, dim) in enumerate(zip(steps, dimensions)):
        axes[1, 1].text(0.5, i, f'{step}\n{dim}', ha='center', va='center', fontsize=8)
    
    axes[1, 1].set_yticks(range(len(steps)))
    axes[1, 1].set_yticklabels(steps)
    axes[1, 1].set_title('维度变化流程')
    axes[1, 1].set_xlabel('处理步骤')
    
    plt.tight_layout()
    plt.savefig('timekan_data_flow_visualization.png', dpi=300, bbox_inches='tight')
    plt.show()
    
    print(f"\n最终输出形状: {output.shape}")
    print("可视化图表已保存为 'timekan_data_flow_visualization.png'")

if __name__ == "__main__":
    # 运行完整的数据流程演示
    visualize_data_flow()
    
    print("\n" + "=" * 80)
    print("总结")
    print("=" * 80)
    print("TimeKAN的数据构建和维度变化过程包括:")
    print("1. 原始数据加载和预处理")
    print("2. 滑动窗口序列构建")
    print("3. 多级下采样处理")
    print("4. 归一化和维度重塑")
    print("5. 数据嵌入到高维空间")
    print("6. 频率分解和混合")
    print("7. 时间维度预测")
    print("8. 特征投影和反归一化")
    print("\n这种设计使得TimeKAN能够:")
    print("- 处理多尺度时间模式")
    print("- 高效进行长期预测")
    print("- 保持特征间的独立性")
    print("- 实现频率感知的预测")