"""
TimeKAN改进版本 - 增强时间序列预测效果
主要改进：
1. 添加注意力机制增强特征表示
2. 引入残差连接和层归一化提升训练稳定性
3. 添加自适应频率分解
4. 引入多尺度卷积捕获不同时间模式
5. 添加特征重要性学习机制
6. 引入时间感知的位置编码
"""

import torch
import torch.nn as nn
import torch.nn.functional as F
from layers.Autoformer_EncDec import series_decomp
from layers.Embed import DataEmbedding_wo_pos
from layers.StandardNorm import Normalize
from layers.ChebyKANLayer import ChebyKANLinear
import math
import numpy as np


class TimeAwarePositionalEncoding(nn.Module):
    """时间感知的位置编码"""
    
    def __init__(self, d_model, max_len=5000):
        super().__init__()
        self.d_model = d_model
        
        # 创建位置编码矩阵
        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        
        # 使用不同频率的正弦和余弦函数
        div_term = torch.exp(torch.arange(0, d_model, 2).float() * 
                           (-math.log(10000.0) / d_model))
        
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        
        self.register_buffer('pe', pe.unsqueeze(0))
        
        # 可学习的时间尺度参数
        self.time_scale = nn.Parameter(torch.ones(1))
        
    def forward(self, x):
        """
        添加时间感知的位置编码
        Args:
            x: 输入张量 (batch_size, seq_len, d_model)
        """
        seq_len = x.size(1)
        pos_encoding = self.pe[:, :seq_len] * self.time_scale
        return x + pos_encoding


class MultiHeadSelfAttention(nn.Module):
    """多头自注意力机制"""
    
    def __init__(self, d_model, num_heads=8, dropout=0.1):
        super().__init__()
        assert d_model % num_heads == 0
        
        self.d_model = d_model
        self.num_heads = num_heads
        self.d_k = d_model // num_heads
        
        self.w_q = nn.Linear(d_model, d_model)
        self.w_k = nn.Linear(d_model, d_model)
        self.w_v = nn.Linear(d_model, d_model)
        self.w_o = nn.Linear(d_model, d_model)
        
        self.dropout = nn.Dropout(dropout)
        self.layer_norm = nn.LayerNorm(d_model)
        
    def forward(self, x, mask=None):
        """
        多头自注意力前向传播
        Args:
            x: 输入张量 (batch_size, seq_len, d_model)
            mask: 注意力掩码
        """
        batch_size, seq_len, d_model = x.size()
        residual = x
        
        # 线性变换得到Q, K, V
        Q = self.w_q(x).view(batch_size, seq_len, self.num_heads, self.d_k).transpose(1, 2)
        K = self.w_k(x).view(batch_size, seq_len, self.num_heads, self.d_k).transpose(1, 2)
        V = self.w_v(x).view(batch_size, seq_len, self.num_heads, self.d_k).transpose(1, 2)
        
        # 计算注意力分数
        scores = torch.matmul(Q, K.transpose(-2, -1)) / math.sqrt(self.d_k)
        
        if mask is not None:
            scores = scores.masked_fill(mask == 0, -1e9)
            
        attention_weights = F.softmax(scores, dim=-1)
        attention_weights = self.dropout(attention_weights)
        
        # 应用注意力权重
        context = torch.matmul(attention_weights, V)
        context = context.transpose(1, 2).contiguous().view(
            batch_size, seq_len, d_model)
        
        # 输出投影和残差连接
        output = self.w_o(context)
        output = self.layer_norm(output + residual)
        
        return output, attention_weights


class AdaptiveFrequencyDecomp(nn.Module):
    """自适应频率分解模块"""
    
    def __init__(self, configs):
        super().__init__()
        self.configs = configs
        
        # 可学习的频率分解参数
        self.freq_weights = nn.Parameter(torch.ones(configs.down_sampling_layers + 1))
        self.adaptive_kernel = nn.Parameter(torch.ones(configs.moving_avg))
        
    def forward(self, level_list):
        """
        自适应频率分解
        Args:
            level_list: 多级输入列表
        """
        level_list_reverse = level_list.copy()
        level_list_reverse.reverse()
        
        out_low = level_list_reverse[0]
        out_high = level_list_reverse[1]
        out_level_list = [out_low]
        
        for i in range(len(level_list_reverse) - 1):
            # 使用可学习的权重进行频率插值
            weight = torch.sigmoid(self.freq_weights[i])
            
            out_high_res = self.adaptive_frequency_interpolation(
                out_low.transpose(1, 2),
                self.configs.seq_len // (self.configs.down_sampling_window ** (self.configs.down_sampling_layers-i)),
                self.configs.seq_len // (self.configs.down_sampling_window ** (self.configs.down_sampling_layers-i-1)),
                weight
            ).transpose(1, 2)
            
            out_high_left = out_high - out_high_res
            out_low = out_high
            
            if i + 2 <= len(level_list_reverse) - 1:
                out_high = level_list_reverse[i + 2]
                
            out_level_list.append(out_high_left)
            
        out_level_list.reverse()
        return out_level_list
    
    def adaptive_frequency_interpolation(self, x, seq_len, target_len, weight):
        """
        自适应频率插值
        Args:
            x: 输入张量
            seq_len: 源序列长度
            target_len: 目标序列长度
            weight: 自适应权重
        """
        len_ratio = seq_len / target_len
        x_fft = torch.fft.rfft(x, dim=2)
        
        # 自适应频率滤波
        freq_filter = torch.sigmoid(weight * torch.linspace(0, 1, x_fft.size(-1)).to(x_fft.device))
        x_fft = x_fft * freq_filter.unsqueeze(0).unsqueeze(0)
        
        out_fft = torch.zeros([x_fft.size(0), x_fft.size(1), target_len//2+1], 
                             dtype=x_fft.dtype).to(x_fft.device)
        out_fft[:, :, :seq_len//2+1] = x_fft
        
        out = torch.fft.irfft(out_fft, dim=2)
        out = out * len_ratio
        
        return out


class MultiScaleConv(nn.Module):
    """多尺度卷积模块"""
    
    def __init__(self, d_model, kernel_sizes=[3, 5, 7], dropout=0.1):
        super().__init__()
        self.convs = nn.ModuleList([
            nn.Conv1d(d_model, d_model, kernel_size=k, padding=k//2, groups=d_model)
            for k in kernel_sizes
        ])
        
        self.fusion = nn.Conv1d(d_model * len(kernel_sizes), d_model, 1)
        self.dropout = nn.Dropout(dropout)
        self.layer_norm = nn.LayerNorm(d_model)
        
    def forward(self, x):
        """
        多尺度卷积前向传播
        Args:
            x: 输入张量 (batch_size, seq_len, d_model)
        """
        residual = x
        x = x.transpose(1, 2)  # (batch_size, d_model, seq_len)
        
        # 多尺度卷积
        conv_outputs = []
        for conv in self.convs:
            conv_out = F.gelu(conv(x))
            conv_outputs.append(conv_out)
        
        # 特征融合
        fused = torch.cat(conv_outputs, dim=1)
        output = self.fusion(fused)
        output = self.dropout(output)
        
        output = output.transpose(1, 2)  # (batch_size, seq_len, d_model)
        output = self.layer_norm(output + residual)
        
        return output


class FeatureImportanceLearning(nn.Module):
    """特征重要性学习模块"""
    
    def __init__(self, d_model, reduction_ratio=16):
        super().__init__()
        self.global_pool = nn.AdaptiveAvgPool1d(1)
        self.fc1 = nn.Linear(d_model, d_model // reduction_ratio)
        self.fc2 = nn.Linear(d_model // reduction_ratio, d_model)
        self.sigmoid = nn.Sigmoid()
        
    def forward(self, x):
        """
        学习特征重要性权重
        Args:
            x: 输入张量 (batch_size, seq_len, d_model)
        """
        # 全局平均池化
        pooled = self.global_pool(x.transpose(1, 2)).squeeze(-1)  # (batch_size, d_model)
        
        # 特征重要性计算
        importance = F.relu(self.fc1(pooled))
        importance = self.sigmoid(self.fc2(importance))  # (batch_size, d_model)
        
        # 应用重要性权重
        weighted_x = x * importance.unsqueeze(1)  # 广播到 (batch_size, seq_len, d_model)
        
        return weighted_x, importance


class EnhancedChebyKANLayer(nn.Module):
    """增强的切比雪夫KAN层"""
    
    def __init__(self, in_features, out_features, order, dropout=0.1):
        super().__init__()
        self.fc1 = ChebyKANLinear(in_features, out_features, order)
        self.dropout = nn.Dropout(dropout)
        self.layer_norm = nn.LayerNorm(out_features)
        
        # 添加门控机制
        self.gate = nn.Linear(in_features, out_features)
        
    def forward(self, x):
        """
        增强的KAN层前向传播
        Args:
            x: 输入张量
        """
        B, N, C = x.shape
        residual = x
        
        # KAN变换
        x_reshaped = x.reshape(B * N, C)
        kan_out = self.fc1(x_reshaped)
        kan_out = kan_out.reshape(B, N, -1)
        
        # 门控机制
        gate_weights = torch.sigmoid(self.gate(residual))
        gated_out = kan_out * gate_weights
        
        # Dropout和层归一化
        output = self.dropout(gated_out)
        if output.size(-1) == residual.size(-1):
            output = self.layer_norm(output + residual)
        else:
            output = self.layer_norm(output)
            
        return output


class EnhancedM_KAN(nn.Module):
    """增强的M_KAN模块"""
    
    def __init__(self, d_model, seq_len, order, dropout=0.1):
        super().__init__()
        
        # 增强的KAN层
        self.channel_mixer = EnhancedChebyKANLayer(d_model, d_model, order, dropout)
        
        # 多尺度卷积
        self.multi_scale_conv = MultiScaleConv(d_model, dropout=dropout)
        
        # 注意力机制
        self.attention = MultiHeadSelfAttention(d_model, num_heads=8, dropout=dropout)
        
        # 特征重要性学习
        self.feature_importance = FeatureImportanceLearning(d_model)
        
        # 融合层
        self.fusion = nn.Linear(d_model * 2, d_model)
        self.final_norm = nn.LayerNorm(d_model)
        
    def forward(self, x):
        """
        增强M_KAN前向传播
        Args:
            x: 输入张量 (batch_size, seq_len, d_model)
        """
        residual = x
        
        # KAN处理
        x1 = self.channel_mixer(x)
        
        # 多尺度卷积处理
        x2 = self.multi_scale_conv(x)
        
        # 注意力处理
        x3, attention_weights = self.attention(x)
        
        # 特征重要性学习
        x4, importance_weights = self.feature_importance(x)
        
        # 特征融合
        fused_features = torch.cat([x1 + x3, x2 + x4], dim=-1)
        output = self.fusion(fused_features)
        
        # 残差连接和归一化
        output = self.final_norm(output + residual)
        
        return output


class EnhancedFrequencyMixing(nn.Module):
    """增强的频率混合模块"""
    
    def __init__(self, configs):
        super().__init__()
        self.configs = configs
        
        # 时间感知位置编码
        self.pos_encoding = TimeAwarePositionalEncoding(configs.d_model)
        
        # 增强的M_KAN模块
        self.front_block = EnhancedM_KAN(
            configs.d_model,
            self.configs.seq_len // (self.configs.down_sampling_window ** self.configs.down_sampling_layers),
            order=configs.begin_order,
            dropout=configs.dropout
        )
        
        self.front_blocks = nn.ModuleList([
            EnhancedM_KAN(
                configs.d_model,
                self.configs.seq_len // (self.configs.down_sampling_window ** (self.configs.down_sampling_layers-i-1)),
                order=i+configs.begin_order+1,
                dropout=configs.dropout
            )
            for i in range(configs.down_sampling_layers)
        ])
        
        # 自适应权重学习
        self.level_weights = nn.Parameter(torch.ones(configs.down_sampling_layers + 1))
        
    def forward(self, level_list):
        """
        增强频率混合前向传播
        Args:
            level_list: 多级特征列表
        """
        level_list_reverse = level_list.copy()
        level_list_reverse.reverse()
        
        out_low = level_list_reverse[0]
        out_high = level_list_reverse[1]
        
        # 添加位置编码
        out_low = self.pos_encoding(out_low)
        
        # 处理最低频率
        out_low = self.front_block(out_low)
        out_level_list = [out_low]
        
        for i in range(len(level_list_reverse) - 1):
            # 添加位置编码
            out_high = self.pos_encoding(out_high)
            
            # 处理高频成分
            out_high = self.front_blocks[i](out_high)
            
            # 自适应权重的频率插值
            weight = torch.sigmoid(self.level_weights[i])
            out_high_res = self.frequency_interpolation(
                out_low.transpose(1, 2),
                self.configs.seq_len // (self.configs.down_sampling_window ** (self.configs.down_sampling_layers-i)),
                self.configs.seq_len // (self.configs.down_sampling_window ** (self.configs.down_sampling_layers-i-1))
            ).transpose(1, 2)
            
            # 加权融合
            out_high = out_high + weight * out_high_res
            out_low = out_high
            
            if i + 2 <= len(level_list_reverse) - 1:
                out_high = level_list_reverse[i + 2]
                
            out_level_list.append(out_low)
            
        out_level_list.reverse()
        return out_level_list
    
    def frequency_interpolation(self, x, seq_len, target_len):
        """频率插值方法"""
        len_ratio = seq_len / target_len
        x_fft = torch.fft.rfft(x, dim=2)
        out_fft = torch.zeros([x_fft.size(0), x_fft.size(1), target_len//2+1], 
                             dtype=x_fft.dtype).to(x_fft.device)
        out_fft[:, :, :seq_len//2+1] = x_fft
        out = torch.fft.irfft(out_fft, dim=2)
        out = out * len_ratio
        return out


class EnhancedModel(nn.Module):
    """增强版TimeKAN模型"""
    
    def __init__(self, configs):
        super().__init__()
        self.configs = configs
        self.task_name = configs.task_name
        self.seq_len = configs.seq_len
        self.label_len = configs.label_len
        self.pred_len = configs.pred_len
        self.down_sampling_window = configs.down_sampling_window
        self.channel_independence = configs.channel_independence
        
        # 使用增强的模块
        self.res_blocks = nn.ModuleList([
            AdaptiveFrequencyDecomp(configs) for _ in range(configs.e_layers)
        ])
        
        self.add_blocks = nn.ModuleList([
            EnhancedFrequencyMixing(configs) for _ in range(configs.e_layers)
        ])
        
        # 预处理
        self.preprocess = series_decomp(configs.moving_avg)
        self.enc_in = configs.enc_in
        self.use_future_temporal_feature = configs.use_future_temporal_feature
        
        # 嵌入层
        self.enc_embedding = DataEmbedding_wo_pos(1, configs.d_model, configs.embed, configs.freq, configs.dropout)
        
        # 层数
        self.layer = configs.e_layers
        
        # 归一化层
        self.normalize_layers = nn.ModuleList([
            Normalize(self.configs.enc_in, affine=True, non_norm=True if configs.use_norm == 0 else False)
            for i in range(configs.down_sampling_layers + 1)
        ])
        
        # 预测层 - 使用更复杂的架构
        self.predict_layer = nn.Sequential(
            nn.Linear(configs.seq_len, configs.seq_len // 2),
            nn.GELU(),
            nn.Dropout(configs.dropout),
            nn.Linear(configs.seq_len // 2, configs.pred_len)
        )
        
        # 投影层 - 添加非线性
        self.projection_layer = nn.Sequential(
            nn.Linear(configs.d_model, configs.d_model // 2),
            nn.GELU(),
            nn.Dropout(configs.dropout),
            nn.Linear(configs.d_model // 2, 1)
        )
        
        # 输出调整层
        self.output_adjustment = nn.Sequential(
            nn.Linear(configs.c_out, configs.c_out * 2),
            nn.GELU(),
            nn.Dropout(configs.dropout),
            nn.Linear(configs.c_out * 2, configs.c_out)
        )
    
    def forecast(self, x_enc):
        """
        增强的预测方法
        Args:
            x_enc: 输入编码序列
        """
        # 多级输入处理
        x_enc = self.__multi_level_process_inputs(x_enc)
        x_list = []
        
        for i, x in enumerate(x_enc):
            B, T, N = x.size()
            x = self.normalize_layers[i](x, 'norm')
            x = x.permute(0, 2, 1).contiguous().reshape(B * N, T, 1)
            x_list.append(x)
        
        # 嵌入处理
        enc_out_list = []
        for i, x in enumerate(x_list):
            enc_out = self.enc_embedding(x, None)
            enc_out_list.append(enc_out)
        
        # 多层处理
        for i in range(self.layer):
            enc_out_list = self.res_blocks[i](enc_out_list)
            enc_out_list = self.add_blocks[i](enc_out_list)
        
        # 预测
        dec_out = enc_out_list[0]
        dec_out = self.predict_layer(dec_out.permute(0, 2, 1)).permute(0, 2, 1)
        dec_out = self.projection_layer(dec_out).reshape(B, self.configs.c_out, self.pred_len).permute(0, 2, 1).contiguous()
        
        # 输出调整
        dec_out = self.output_adjustment(dec_out)
        
        # 反归一化
        dec_out = self.normalize_layers[0](dec_out, 'denorm')
        
        return dec_out
    
    def __multi_level_process_inputs(self, x_enc):
        """多级输入处理"""
        down_pool = nn.AvgPool1d(self.configs.down_sampling_window)
        x_enc = x_enc.permute(0, 2, 1)
        x_enc_ori = x_enc
        x_enc_sampling_list = []
        x_enc_sampling_list.append(x_enc.permute(0, 2, 1))
        
        for i in range(self.configs.down_sampling_layers):
            x_enc_sampling = down_pool(x_enc_ori)
            x_enc_sampling_list.append(x_enc_sampling.permute(0, 2, 1))
            x_enc_ori = x_enc_sampling
            
        return x_enc_sampling_list
    
    def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask=None):
        """模型前向传播"""
        if self.task_name == 'long_term_forecast':
            dec_out = self.forecast(x_enc)
            return dec_out
        else:
            raise ValueError('Other tasks not implemented yet')


# 为了保持兼容性，创建一个别名
Model = EnhancedModel