import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import pandas as pd
from layers.Transformer_EncDec import Encoder, EncoderLayer
from layers.SelfAttention_Family import FullAttention, AttentionLayer
from layers.Embed import DataEmbedding_inverted


class CNNFeatureExtractor(nn.Module):
    def __init__(self, enc_in, d_model, kernel_sizes=[3, 5, 7], num_filters=32, dropout=0.1):
        super(CNNFeatureExtractor, self).__init__()
        self.convs = nn.ModuleList()
        for kernel_size in kernel_sizes:
            padding = (kernel_size - 1) // 2
            self.convs.append(
                nn.Sequential(
                    nn.Conv1d(enc_in, num_filters, kernel_size=kernel_size, padding=padding),
                    nn.BatchNorm1d(num_filters),
                    nn.ReLU(),
                    nn.Dropout(dropout)
                )
            )
        self.projection = nn.Linear(len(kernel_sizes) * num_filters, d_model)

    def forward(self, x):
        x_conv = x.permute(0, 2, 1)  # [B, C, L]
        conv_results = [conv(x_conv).permute(0, 2, 1) for conv in self.convs]
        combined = torch.cat(conv_results, dim=2)  # [B, L, num_filters * len(kernels)]
        pooled = torch.mean(combined, dim=1)  # [B, num_filters * len(kernels)]
        output = self.projection(pooled)  # [B, d_model]
        return output


class FeatureFusionModule(nn.Module):
    def __init__(self, d_model, dropout=0.1):
        super(FeatureFusionModule, self).__init__()
        self.fusion_gate = nn.Sequential(
            nn.Linear(d_model * 2, d_model),
            nn.Sigmoid()
        )
        self.fusion_proj = nn.Linear(d_model * 2, d_model)
        self.dropout = nn.Dropout(dropout)
        self.layer_norm = nn.LayerNorm(d_model)

    def forward(self, transformer_feat, cnn_feat):
        cnn_feat_expanded = cnn_feat.unsqueeze(1).expand(-1, transformer_feat.size(1), -1)
        combined = torch.cat([transformer_feat, cnn_feat_expanded], dim=-1)
        gate = self.fusion_gate(combined)
        weighted_transformer = transformer_feat * gate
        weighted_cnn = cnn_feat_expanded * (1 - gate)
        fused = self.dropout(self.fusion_proj(torch.cat([weighted_transformer, weighted_cnn], dim=-1)))
        output = self.layer_norm(transformer_feat + fused)
        return output


class Model(nn.Module):
    def __init__(self, configs):
        super(Model, self).__init__()
        self.task_name = configs.task_name
        self.seq_len = configs.seq_len
        self.pred_len = configs.pred_len
        self.enc_in = configs.enc_in
        self.c_out = configs.c_out
        self.output_attention = getattr(configs, 'output_attention', False)

        self.enc_embedding = DataEmbedding_inverted(
            configs.seq_len, configs.d_model, configs.embed, configs.freq, configs.dropout
        )

        self.encoder = Encoder(
            [EncoderLayer(
                AttentionLayer(
                    FullAttention(False, configs.factor, attention_dropout=configs.dropout,
                                  output_attention=self.output_attention),
                    configs.d_model, configs.n_heads
                ),
                configs.d_model, configs.d_ff, dropout=configs.dropout, activation=configs.activation
            ) for _ in range(configs.e_layers)],
            norm_layer=nn.LayerNorm(configs.d_model)
        )

        self.cnn_extractor = CNNFeatureExtractor(
            enc_in=configs.enc_in,
            d_model=configs.d_model,
            kernel_sizes=[3, 5, 7],
            num_filters=32,
            dropout=configs.dropout
        )

        self.fusion_module = FeatureFusionModule(configs.d_model, dropout=configs.dropout)

        if self.task_name in ['long_term_forecast', 'short_term_forecast']:
            self.projection = nn.Linear(configs.d_model, configs.c_out)
            self.seq_projection = nn.Linear(configs.seq_len, configs.pred_len)
        elif self.task_name in ['imputation', 'anomaly_detection']:
            self.projection = nn.Linear(configs.d_model, configs.c_out)
        elif self.task_name == 'classification':
            self.act = F.gelu
            self.dropout = nn.Dropout(configs.dropout)
            self.projection = nn.Linear(configs.d_model * configs.seq_len, configs.num_class)

    def forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec):
        batch_size = x_enc.shape[0]
        means = x_enc.mean(1, keepdim=True).detach()
        stdev = torch.sqrt(torch.var(x_enc, dim=1, keepdim=True, unbiased=False) + 1e-5)
        x_enc = (x_enc - means) / stdev

        transformer_enc = self.enc_embedding(x_enc, x_mark_enc)
        transformer_out, attns = self.encoder(transformer_enc, attn_mask=None)

        cnn_features = self.cnn_extractor(x_enc)
        fused_features = self.fusion_module(transformer_out, cnn_features)

        output = self.projection(fused_features)  # [B, L, C_out]
        output = output.transpose(1, 2)  # [B, C_out, L]
        dec_out = self.seq_projection(output)  # [B, C_out, pred_len]
        dec_out = dec_out.transpose(1, 2)  # [B, pred_len, C_out]

        dec_out = dec_out * stdev[:, 0, :self.c_out].unsqueeze(1).repeat(1, self.pred_len, 1)
        dec_out = dec_out + means[:, 0, :self.c_out].unsqueeze(1).repeat(1, self.pred_len, 1)

        if self.output_attention:
            return dec_out, attns
        else:
            return dec_out

    def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec, mask=None):
        if self.task_name in ['long_term_forecast', 'short_term_forecast']:
            return self.forecast(x_enc, x_mark_enc, x_dec, x_mark_dec)
        elif self.task_name == 'imputation':
            return self.imputation(x_enc, x_mark_enc, x_dec, x_mark_dec, mask)
        elif self.task_name == 'anomaly_detection':
            return self.anomaly_detection(x_enc)
        elif self.task_name == 'classification':
            return self.classification(x_enc, x_mark_enc)
        else:
            return None

    # Add new method for one-step prediction and saving results
    def predict_and_save(self, batch_x, batch_y, batch_x_mark, batch_y_mark, save_path):
        """
        Perform one-step prediction and save predictions with actual values

        Args:
            batch_x: Input data
            batch_y: Ground truth
            batch_x_mark: Input timestamps
            batch_y_mark: Output timestamps
            save_path: Path to save results
        """
        self.eval()  # Set model to evaluation mode
        with torch.no_grad():
            # Get predictions
            if self.output_attention:
                outputs, _ = self.forward(batch_x, batch_x_mark, batch_x, batch_y_mark)
            else:
                outputs = self.forward(batch_x, batch_x_mark, batch_x, batch_y_mark)

            # Only take the first step prediction if requested
            if self.pred_len > 1:
                one_step_pred = outputs[:, 0:1, :]
            else:
                one_step_pred = outputs

            # Get first step ground truth
            one_step_true = batch_y[:, 0:1, :]

            # Convert to numpy arrays
            pred_np = one_step_pred.cpu().numpy()
            true_np = one_step_true.cpu().numpy()

            # Create a DataFrame for storing results
            results = []
            for i in range(len(pred_np)):
                for j in range(self.c_out):
                    results.append({
                        'sample_idx': i,
                        'feature_idx': j,
                        'predicted': pred_np[i, 0, j],
                        'actual': true_np[i, 0, j]
                    })

            # Save to CSV
            pd.DataFrame(results).to_csv(save_path, index=False)
            print(f"Results saved to {save_path}")

            return pred_np, true_np