import torch
import torch.nn as nn
import torch.nn.functional as F
from layers.Transformer_EncDec import Encoder, EncoderLayer
from layers.SelfAttention_Family import FullAttention, AttentionLayer
from layers.Embed import DataEmbedding_inverted
import numpy as np

import math
class AttentionFusion(nn.Module):
    def __init__(self, d_model):
        super().__init__()
        self.query = nn.Linear(d_model * 2, d_model)
        self.key = nn.Linear(d_model * 2, d_model)
        self.value = nn.Linear(d_model * 2, d_model)

    def forward(self, transformer_out, cnn_out):
        combined = torch.cat([transformer_out, cnn_out], dim=-1)
        q, k, v = self.query(combined), self.key(combined), self.value(combined)
        attn_weights = F.softmax(torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(q.size(-1)), dim=-1)
        return torch.matmul(attn_weights, v)
class DynamicTanh(nn.Module):
    def __init__(self, normalized_shape, channels_last=True, alpha_init_value=0.5):
        super().__init__()
        self.normalized_shape = normalized_shape
        self.alpha_init_value = alpha_init_value
        self.channels_last = channels_last
        self.alpha = nn.Parameter(torch.ones(1) * alpha_init_value)
        self.weight = nn.Parameter(torch.ones(normalized_shape))
        self.bias = nn.Parameter(torch.zeros(normalized_shape))

    def forward(self, x):
        x = torch.tanh(self.alpha * x)
        if self.channels_last:
            x = x * self.weight + self.bias
        else:
            x = x * self.weight[:, None, None] + self.bias[:, None, None]
        return x

    def extra_repr(self):
        return f"normalized_shape={self.normalized_shape}, alpha_init_value={self.alpha_init_value}, channels_last={self.channels_last}"


class CNNBlock(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size=3, dilation=1, dropout=0.1):
        super(CNNBlock, self).__init__()
        self.conv1 = nn.Conv1d(
            in_channels=in_channels,
            out_channels=out_channels,
            kernel_size=kernel_size,
            padding=(kernel_size - 1) * dilation // 2,
            dilation=dilation
        )
        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(dropout)
        self.norm = nn.BatchNorm1d(out_channels)

    def forward(self, x):
        # x shape: [batch, length, channels]
        # Conv1d expects: [batch, channels, length]
        x = x.permute(0, 2, 1)
        x = self.conv1(x)
        x = self.relu(x)
        x = self.norm(x)
        x = self.dropout(x)
        # Return to original shape
        x = x.permute(0, 2, 1)
        return x


class CNNEncoder(nn.Module):
    def __init__(self, input_channels, d_model, seq_len, kernel_sizes=[3, 5, 7], num_layers=3, dropout=0.1):
        super(CNNEncoder, self).__init__()
        self.layers = nn.ModuleList()
        self.seq_len = seq_len

        # First layer from input dimension to d_model
        self.layers.append(CNNBlock(
            in_channels=input_channels,
            out_channels=d_model,
            kernel_size=kernel_sizes[0],
            dropout=dropout
        ))

        # Additional layers
        for i in range(1, num_layers):
            ks = kernel_sizes[min(i, len(kernel_sizes) - 1)]
            dilation = 2 ** i  # Exponentially increasing dilation
            self.layers.append(CNNBlock(
                in_channels=d_model,
                out_channels=d_model,
                kernel_size=ks,
                dilation=dilation,
                dropout=dropout
            ))

        # Final projection to ensure we have same sequence length as transformer
        self.projection = nn.Linear(self.seq_len, self.seq_len)

    def forward(self, x):
        # x shape: [batch, length, channels]
        for layer in self.layers:
            x = layer(x)

        # Ensure the output sequence length matches the transformer output
        # Apply projection across the sequence dimension if needed
        if x.size(1) != self.seq_len:
            # Transpose, project, then transpose back
            x = x.transpose(1, 2)  # [batch, channels, length]
            x = self.projection(x)  # Apply projection to sequence dimension
            x = x.transpose(1, 2)  # [batch, length, channels]

        return x


class Model(nn.Module):
    def __init__(self, configs):
        super(Model, self).__init__()
        self.task_name = configs.task_name
        self.seq_len = configs.seq_len
        self.pred_len = configs.pred_len

        # Embedding for Transformer
        self.enc_embedding = DataEmbedding_inverted(configs.seq_len, configs.d_model, configs.embed, configs.freq,
                                                    configs.dropout)

        # Transformer Encoder with DynamicTanh
        self.transformer_encoder = Encoder(
            [
                EncoderLayer(
                    AttentionLayer(
                        FullAttention(False, configs.factor, attention_dropout=configs.dropout,
                                      output_attention=False), configs.d_model, configs.n_heads),
                    configs.d_model,
                    configs.d_ff,
                    dropout=configs.dropout,
                    activation=configs.activation
                ) for l in range(configs.e_layers)
            ],
            norm_layer=DynamicTanh(configs.d_model)
        )

        # Add CNN parallel path
        self.cnn_encoder = CNNEncoder(
            input_channels=configs.enc_in,
            d_model=configs.d_model,
            seq_len=configs.seq_len,
            kernel_sizes=[3, 5, 7],
            num_layers=configs.e_layers,
            dropout=configs.dropout
        )

        # Fusion layer to combine CNN and Transformer outputs
        self.fusion = nn.Linear(configs.d_model * 2, configs.d_model)
        # 替换为注意力融合
        # self.fusion = AttentionFusion(configs.d_model)
        # Output projections based on task
        if self.task_name == 'long_term_forecast' or self.task_name == 'short_term_forecast':
            self.projection = nn.Linear(configs.d_model, configs.pred_len, bias=True)
        elif self.task_name == 'imputation' or self.task_name == 'anomaly_detection':
            self.projection = nn.Linear(configs.d_model, configs.seq_len, bias=True)
        elif self.task_name == 'classification':
            self.act = F.gelu
            self.dropout = nn.Dropout(configs.dropout)
            self.projection = nn.Linear(configs.d_model * configs.enc_in, configs.num_class)

    def forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec):
        # Normalization
        means = x_enc.mean(1, keepdim=True).detach()
        x_enc = x_enc - means
        stdev = torch.sqrt(torch.var(x_enc, dim=1, keepdim=True, unbiased=False) + 1e-5)
        x_enc /= stdev

        _, _, N = x_enc.shape

        # Transformer path
        transformer_input = self.enc_embedding(x_enc, x_mark_enc)
        transformer_out, _ = self.transformer_encoder(transformer_input)

        # CNN path
        cnn_out = self.cnn_encoder(x_enc)

        # Ensure shapes match before fusion
        if transformer_out.shape != cnn_out.shape:
            # print(f"Shape mismatch: transformer_out {transformer_out.shape}, cnn_out {cnn_out.shape}")
            # Reshape CNN output to match transformer output if needed
            cnn_out = F.interpolate(
                cnn_out.transpose(1, 2),
                size=transformer_out.size(1),
                mode='linear'
            ).transpose(1, 2)
            # print(f"After adjustment: cnn_out {cnn_out.shape}")

        # # Fusion
        fused_features = torch.cat([transformer_out, cnn_out], dim=-1)
        enc_out = self.fusion(fused_features)
        # 新的注意力融合调用方式
        # enc_out = self.fusion(transformer_out, cnn_out)
        # Output projection
        dec_out = self.projection(enc_out).permute(0, 2, 1)[:, :, :N]

        # De-normalization
        dec_out = dec_out * (stdev[:, 0, :].unsqueeze(1).repeat(1, self.pred_len, 1))
        dec_out = dec_out + (means[:, 0, :].unsqueeze(1).repeat(1, self.pred_len, 1))

        return dec_out

    def imputation(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask):
        # Normalization
        means = x_enc.mean(1, keepdim=True).detach()
        x_enc = x_enc - means
        stdev = torch.sqrt(torch.var(x_enc, dim=1, keepdim=True, unbiased=False) + 1e-5)
        x_enc /= stdev

        _, L, N = x_enc.shape

        # Transformer path
        transformer_input = self.enc_embedding(x_enc, x_mark_enc)
        transformer_out, _ = self.transformer_encoder(transformer_input)

        # CNN path
        cnn_out = self.cnn_encoder(x_enc)

        # Fusion
        fused_features = torch.cat([transformer_out, cnn_out], dim=-1)
        enc_out = self.fusion(fused_features)

        # Output projection
        dec_out = self.projection(enc_out).permute(0, 2, 1)[:, :, :N]

        # De-normalization
        dec_out = dec_out * (stdev[:, 0, :].unsqueeze(1).repeat(1, L, 1))
        dec_out = dec_out + (means[:, 0, :].unsqueeze(1).repeat(1, L, 1))

        return dec_out

    def anomaly_detection(self, x_enc):
        # Normalization
        means = x_enc.mean(1, keepdim=True).detach()
        x_enc = x_enc - means
        stdev = torch.sqrt(torch.var(x_enc, dim=1, keepdim=True, unbiased=False) + 1e-5)
        x_enc /= stdev

        _, L, N = x_enc.shape

        # Transformer path
        transformer_input = self.enc_embedding(x_enc, None)
        transformer_out, _ = self.transformer_encoder(transformer_input)

        # CNN path
        cnn_out = self.cnn_encoder(x_enc)

        # Fusion
        fused_features = torch.cat([transformer_out, cnn_out], dim=-1)
        enc_out = self.fusion(fused_features)

        # Output projection
        dec_out = self.projection(enc_out).permute(0, 2, 1)[:, :, :N]

        # De-normalization
        dec_out = dec_out * (stdev[:, 0, :].unsqueeze(1).repeat(1, L, 1))
        dec_out = dec_out + (means[:, 0, :].unsqueeze(1).repeat(1, L, 1))

        return dec_out

    def classification(self, x_enc, x_mark_enc):
        # Transformer path
        transformer_input = self.enc_embedding(x_enc, None)
        transformer_out, _ = self.transformer_encoder(transformer_input)

        # CNN path
        cnn_out = self.cnn_encoder(x_enc)

        # Fusion
        fused_features = torch.cat([transformer_out, cnn_out], dim=-1)
        enc_out = self.fusion(fused_features)

        # Output
        output = self.act(enc_out)
        output = self.dropout(output)
        output = output.reshape(output.shape[0], -1)
        output = self.projection(output)

        return output

    def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask=None):
        if self.task_name == 'long_term_forecast' or self.task_name == 'short_term_forecast':
            dec_out = self.forecast(x_enc, x_mark_enc, x_dec, x_mark_dec)
            return dec_out[:, -self.pred_len:, :], 0  # 返回 (预测值, 正则化损失)

        if self.task_name == 'imputation':
            dec_out = self.imputation(x_enc, x_mark_enc, x_dec, x_mark_dec, mask)
            return dec_out, 0

        if self.task_name == 'anomaly_detection':
            dec_out = self.anomaly_detection(x_enc)
            return dec_out, 0

        if self.task_name == 'classification':
            dec_out = self.classification(x_enc, x_mark_enc)
            return dec_out, 0

        return None, None