import torch
import torch.nn as nn
import torch.nn.functional as F
from layers.Transformer_EncDec import Encoder, EncoderLayer
from layers.SelfAttention_Family import FullAttention, AttentionLayer
from layers.Embed import DataEmbedding_inverted
import numpy as np


class CNNFeatureExtractor(nn.Module):
    """
    修正后的1D CNN模块，正确处理输入维度
    """

    def __init__(self, enc_in, d_model, kernel_sizes=[3, 5, 7], num_filters=32, dropout=0.1):
        super(CNNFeatureExtractor, self).__init__()
        self.convs = nn.ModuleList()

        # 多个不同大小的卷积核捕捉不同局部模式
        for kernel_size in kernel_sizes:
            padding = (kernel_size - 1) // 2  # 保持序列长度不变
            self.convs.append(
                nn.Sequential(
                    nn.Conv1d(enc_in, num_filters, kernel_size=kernel_size, padding=padding),
                    nn.BatchNorm1d(num_filters),
                    nn.ReLU(),
                    nn.Dropout(dropout)
                )
            )

        # 投影到d_model维度
        self.projection = nn.Linear(len(kernel_sizes) * num_filters, d_model)

    def forward(self, x):
        # 输入x: [Batch, Seq_len, Channels]
        # 转换为CNN需要的格式: [Batch, Channels, Seq_len]
        x_conv = x.permute(0, 2, 1)  # [B, C, L]

        # 应用每个卷积分支
        conv_results = []
        for conv in self.convs:
            conv_out = conv(x_conv)  # [B, num_filters, L]
            conv_out = conv_out.permute(0, 2, 1)  # [B, L, num_filters]
            conv_results.append(conv_out)

        # 沿序列维度平均池化
        pooled_results = [torch.mean(res, dim=1) for res in conv_results]  # 每个[B, num_filters]

        # 合并不同卷积核的结果
        combined = torch.cat(pooled_results, dim=1)  # [B, num_filters*len(kernel_sizes)]

        # 投影到d_model维度
        output = self.projection(combined)  # [B, d_model]
        return output


class FeatureFusionModule(nn.Module):
    """
    特征融合模块，合并Transformer和CNN特征
    """

    def __init__(self, d_model, dropout=0.1):
        super(FeatureFusionModule, self).__init__()
        self.fusion_gate = nn.Sequential(
            nn.Linear(d_model * 2, d_model),
            nn.Sigmoid()
        )
        self.fusion_proj = nn.Linear(d_model * 2, d_model)
        self.dropout = nn.Dropout(dropout)
        self.layer_norm = nn.LayerNorm(d_model)

    def forward(self, transformer_feat, cnn_feat):
        # transformer_feat: [B, L, d_model]
        # cnn_feat: [B, d_model]

        # 扩展CNN特征以匹配序列长度
        cnn_feat_expanded = cnn_feat.unsqueeze(1).expand(-1, transformer_feat.size(1), -1)  # [B, L, d_model]

        # 计算融合门控权重
        combined = torch.cat([transformer_feat, cnn_feat_expanded], dim=-1)  # [B, L, 2*d_model]
        gate = self.fusion_gate(combined)  # [B, L, d_model]

        # 加权融合
        weighted_transformer = transformer_feat * gate
        weighted_cnn = cnn_feat_expanded * (1 - gate)

        # 合并特征
        fused = self.dropout(self.fusion_proj(torch.cat([weighted_transformer, weighted_cnn], dim=-1)))

        # 残差连接和层归一化
        output = self.layer_norm(transformer_feat + fused)
        return output


class Model(nn.Module):
    """
    iTransformer-CNN混合架构
    """

    def __init__(self, configs):
        super(Model, self).__init__()
        self.task_name = configs.task_name
        self.seq_len = configs.seq_len
        self.pred_len = configs.pred_len

        # iTransformer嵌入层
        self.enc_embedding = DataEmbedding_inverted(configs.seq_len, configs.d_model, configs.embed, configs.freq,
                                                    configs.dropout)

        # iTransformer编码器
        self.encoder = Encoder(
            [
                EncoderLayer(
                    AttentionLayer(
                        FullAttention(False, configs.factor, attention_dropout=configs.dropout,
                                      output_attention=False), configs.d_model, configs.n_heads),
                    configs.d_model,
                    configs.d_ff,
                    dropout=configs.dropout,
                    activation=configs.activation
                ) for l in range(configs.e_layers)
            ],
            norm_layer=torch.nn.LayerNorm(configs.d_model)
        )

        # CNN特征提取分支
        self.cnn_extractor = CNNFeatureExtractor(
            enc_in=configs.enc_in,  # 使用输入通道数
            d_model=configs.d_model,
            kernel_sizes=[3, 5, 7],
            num_filters=32,
            dropout=configs.dropout
        )

        # 特征融合模块
        self.fusion_module = FeatureFusionModule(
            d_model=configs.d_model,
            dropout=configs.dropout
        )

        # 任务特定输出层
        if self.task_name == 'long_term_forecast' or self.task_name == 'short_term_forecast':
            self.projection = nn.Linear(configs.d_model, configs.pred_len, bias=True)
        if self.task_name == 'imputation':
            self.projection = nn.Linear(configs.d_model, configs.seq_len, bias=True)
        if self.task_name == 'anomaly_detection':
            self.projection = nn.Linear(configs.d_model, configs.seq_len, bias=True)
        if self.task_name == 'classification':
            self.act = F.gelu
            self.dropout = nn.Dropout(configs.dropout)
            self.projection = nn.Linear(configs.d_model * configs.enc_in, configs.num_class)

    def forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec):
        # 标准化
        means = x_enc.mean(1, keepdim=True).detach()
        x_enc = x_enc - means
        stdev = torch.sqrt(torch.var(x_enc, dim=1, keepdim=True, unbiased=False) + 1e-5)
        x_enc /= stdev

        _, _, N = x_enc.shape

        # iTransformer路径
        transformer_enc = self.enc_embedding(x_enc, x_mark_enc)
        transformer_out, _ = self.encoder(transformer_enc, attn_mask=None)

        # CNN路径
        cnn_features = self.cnn_extractor(x_enc)

        # 特征融合
        fused_features = self.fusion_module(transformer_out, cnn_features)

        # 输出投影
        dec_out = self.projection(fused_features).permute(0, 2, 1)[:, :, :N]

        # 反标准化
        dec_out = dec_out * (stdev[:, 0, :].unsqueeze(1).repeat(1, self.pred_len, 1))
        dec_out = dec_out + (means[:, 0, :].unsqueeze(1).repeat(1, self.pred_len, 1))
        return dec_out




    def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask=None):
        if self.task_name == 'long_term_forecast' or self.task_name == 'short_term_forecast':
            dec_out = self.forecast(x_enc, x_mark_enc, x_dec, x_mark_dec)
            return dec_out[:, -self.pred_len:, :]  # [B, L, D]
        if self.task_name == 'imputation':
            dec_out = self.imputation(x_enc, x_mark_enc, x_dec, x_mark_dec, mask)
            return dec_out  # [B, L, D]
        if self.task_name == 'anomaly_detection':
            dec_out = self.anomaly_detection(x_enc)
            return dec_out  # [B, L, D]
        if self.task_name == 'classification':
            dec_out = self.classification(x_enc, x_mark_enc)
            return dec_out  # [B, N]
        return None