#Data-Driven
import torch
import torch.nn as nn
import torch.nn.functional as F

device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")

# # 前半段残差块 Encoder (Conv)
class ResidualBlock(nn.Module):
    def __init__(self, in_channels, out_channels):
        super().__init__()
        self.relu = nn.ReLU(inplace=True)
        self.attn = AttentionBlock()

        self.conv_layers = nn.Sequential(
            nn.Conv1d(in_channels, out_channels, kernel_size=3, padding=1),
            nn.BatchNorm1d(out_channels),
            nn.ReLU(inplace=True),
            
            nn.Conv1d(out_channels, out_channels, kernel_size=3, padding=1),
            nn.BatchNorm1d(out_channels),
            nn.ReLU(inplace=True),
            
            nn.Conv1d(out_channels, out_channels, kernel_size=5, padding=2),
            nn.BatchNorm1d(out_channels),
        )

        self.residual_conv = nn.Conv1d(in_channels, out_channels, kernel_size=1) \
            if in_channels != out_channels else nn.Identity()

    def forward(self, x):
        residual = self.residual_conv(x)
        out = self.conv_layers(x)
        out = self.attn(out)
        return self.relu(out + residual)


# 后半段残差块 Decoder (DConv)
class DilatedResidualBlock(nn.Module):
    def __init__(self, in_channels, out_channels, dilation=2):
        super().__init__()
        self.relu = nn.ReLU(inplace=True)

        self.conv_layers = nn.Sequential(
            nn.Conv1d(in_channels, out_channels, kernel_size=3, padding=dilation, dilation=dilation),
            nn.BatchNorm1d(out_channels),
            nn.ReLU(inplace=True),

            nn.Conv1d(out_channels, out_channels, kernel_size=3, padding=dilation, dilation=dilation),
            nn.BatchNorm1d(out_channels),
        )

        self.residual_conv = nn.Conv1d(in_channels, out_channels, kernel_size=1) \
            if in_channels != out_channels else nn.Identity()

    def forward(self, x):
        residual = self.residual_conv(x)
        out = self.conv_layers(x)
        return self.relu(out + residual)



# Attention Mechanism: CBAM (Convolutional Block Attention Module)
class AttentionBlock(nn.Module): 
    def __init__(self, kernel_size = 7):
        super().__init__()
        self.conv = nn.Conv1d(2, 1, kernel_size= kernel_size, padding= kernel_size//2, bias=False)
        self.sigmoid = nn.Sigmoid()
    
    def forward(self, x):
        # x shape: (B, C, L)
        max_pool = torch.max(x, dim=1, keepdim=True)[0]  # (B, 1, L)
        avg_pool = torch.mean(x, dim=1, keepdim=True)  # (B, 1, L)
        attn = torch.cat([max_pool, avg_pool], dim=1)  # (B, 2, L)
        attn = self.sigmoid(self.conv(attn))  # (B, 1, L)
        return x * attn  # 重新加权

class Bottleneck(nn.Module):
    def __init__(self, in_channels, hidden_channels, kernel_size=7, lstm_hidden_size=None):
        super(Bottleneck, self).__init__()
        
        # CBAM Attention Block
        # Bi-LSTM for temporal feature modeling
        # Bi-LSTM makes the output bidirectional, helpful in time series modeling
        self.lstm = nn.LSTM(input_size=in_channels, hidden_size=lstm_hidden_size or in_channels,
                            batch_first=True, bidirectional=True)
        
    def forward(self, x):
        # Apply CBAM Attention Block

        # Bi-LSTM to model temporal dependencies (we need to permute the input to (B, L, C) format for LSTM)
        x = x.permute(0, 2, 1)  # (B, L, C)
        x, _ = self.lstm(x)  # (B, L, 2 * hidden_size) for bidirectional LSTM
        
        # Revert back to (B, C, L)
        x = x.permute(0, 2, 1)  # (B, C, L)
        
        return x

# 基于 Circled Residual Structure 和 Dilated Convolutions 的 U-Net
class CircledResidualUNet(nn.Module):
    def __init__(self, in_channels=1, out_channels=1, hidden_channels=[64, 128, 256], lstm_hidden_size = 128):
        super().__init__()        
        # Encoder (Standard Convolution)
        self.encoder = nn.ModuleList()
        prev_hidden_channels = in_channels
        for ch in hidden_channels:
            self.encoder.append(ResidualBlock(prev_hidden_channels, ch))
            prev_hidden_channels = ch
        
        # Bottleneck (包含 Bi-LSTM 和 CBAM)
        self.bottleneck = Bottleneck(hidden_channels[-1], hidden_channels[-1], lstm_hidden_size=lstm_hidden_size)
        
        # Decoder (Dilated Convolutions)
        self.decoder = nn.ModuleList()
        # 第一层 decoder: 接 bottleneck 输出（通道数为 hidden_channels[-1]）
    
        # 中间 decoder 层：从 bottleneck 向上拼接 encoder 的输出
        self.decoder.append(DilatedResidualBlock(hidden_channels[-1], hidden_channels[-2], dilation=2))
        for i in range(len(hidden_channels) - 2, 0, -1):
            self.decoder.append(DilatedResidualBlock(hidden_channels[i] + hidden_channels[i], hidden_channels[i-1], dilation=2))
        self.decoder.append(DilatedResidualBlock(hidden_channels[0] + hidden_channels[0], in_channels, dilation=2))
  
        # Output layer
        self.final_conv = nn.Conv1d(in_channels + in_channels, out_channels, kernel_size=1)
    
    def forward(self, x):
        # Encoder
        e = x
        enc_outputs = []
        for enc_layer in self.encoder:
            e = enc_layer(e)
            enc_outputs.append(e)
        
        # Bottleneck (Bi-LSTM + CBAM)
        b = self.bottleneck(e)
        
        d = b
        for i, dec_layer in enumerate(self.decoder):
            if i == 0:
                d = dec_layer(d)
            else:
                d = dec_layer(torch.cat([d, enc_outputs[-(i+1)]], dim=1))

        # Output
        output = self.final_conv(torch.cat([d, x], dim=1))
        return output

   
# 定义 L_data (Mean Absolute Error)
def L_data(pred, true):
    loss_fn = nn.L1Loss()
    return loss_fn(pred, true)

# test
if __name__ == "__main__":
    model = CircledResidualUNet(in_channels=2, out_channels=1).to(device) # CircledResidualUNet(in_channels=1, out_channels=1,hidden_channels=[64, 128, 256])
    x = torch.randn(10, 2, 250).to(device)  # 输入shape: (batch_size, channels, sequence_length)
    output = model(x)
    print("Output shape:", output.shape)
    y = torch.randn(10, 1, 250).to(device)
    print(L_data(output,y)) # 计算L_data (MAE) 损失
