import torch
import torch.nn as nn
import torch.nn.functional as F

class FiberVibrationClassifier(nn.Module):
    def __init__(self, 
                 curve_length=1024,
                 num_classes=3,
                 time_feat_dim=100,
                 space_input_dim=10,
                 space_feat_dim=64, 
                 freq_feat_dim=64):
        super(FiberVibrationClassifier, self).__init__()
        self.time_branch = TimeBranch(curve_length, time_feat_dim)
        self.space_branch = SpaceBranch(space_input_dim, space_feat_dim)
        self.freq_branch = FreqBranch(curve_length // 2, freq_feat_dim)

        self.total_feat_dim = time_feat_dim + space_feat_dim + freq_feat_dim

        # Attention over [time, space, freq] features
        self.attention_fusion = nn.MultiheadAttention(
            embed_dim=self.total_feat_dim,
            num_heads=4,
            batch_first=True
        )

        self.classifier = nn.Sequential(
            nn.Linear(self.total_feat_dim, 128),
            nn.ReLU(),
            nn.Dropout(0.3),
            nn.Linear(128, 64),
            nn.ReLU(),
            nn.Dropout(0.2),
            nn.Linear(64, num_classes)
        )

    def forward(self, time_data, space_data, freq_data=None):
        batch_size = time_data.size(0)

        time_feat = self.time_branch(time_data)                  # [B, D1]
        space_feat = self.space_branch(space_data)              # [B, D2]

        if freq_data is None:
            freq_data = torch.fft.fft(time_data).abs()[:, :time_data.size(1)//2]
            freq_data = torch.log(freq_data + 1e-6)  # 压缩幅度

        freq_feat = self.freq_branch(freq_data)                 # [B, D3]

        # Stack as sequence of 3 modality vectors: [B, 3, D]
        combined_features = torch.stack([time_feat, space_feat, freq_feat], dim=1)

        fused_features, _ = self.attention_fusion(
            combined_features, combined_features, combined_features
        )
        fused_features = fused_features.mean(dim=1)  # [B, D]

        output = self.classifier(fused_features)
        return output


class TimeBranch(nn.Module):
    def __init__(self, input_length, output_dim):
        super(TimeBranch, self).__init__()
        self.conv_layers = nn.Sequential(
            nn.Conv1d(1, 32, kernel_size=7, padding=3),
            nn.BatchNorm1d(32),
            nn.ReLU(),
            nn.MaxPool1d(2),
            nn.Conv1d(32, 64, kernel_size=5, padding=2),
            nn.BatchNorm1d(64),
            nn.ReLU(),
            nn.MaxPool1d(2),
            nn.Conv1d(64, 128, kernel_size=3, padding=1),
            nn.BatchNorm1d(128),
            nn.ReLU(),
            nn.MaxPool1d(2)
        )
        self.lstm = nn.LSTM(
            input_size=128,
            hidden_size=64,
            num_layers=2,
            batch_first=True,
            dropout=0.2,
            bidirectional=True
        )
        self.output_layer = nn.Linear(128, output_dim)

    def forward(self, x):
        x = x.unsqueeze(1)               # [B, 1, L]
        conv_out = self.conv_layers(x)   # [B, C, T]
        conv_out = conv_out.transpose(1, 2)  # [B, T, C]
        lstm_out, _ = self.lstm(conv_out)
        # Use last forward and backward states
        last_forward = lstm_out[:, -1, :64]
        last_backward = lstm_out[:, 0, 64:]
        global_feat = torch.cat([last_forward, last_backward], dim=1)
        return self.output_layer(global_feat)


class SpaceBranch(nn.Module):
    def __init__(self, input_dim, output_dim):
        super(SpaceBranch, self).__init__()
        self.mlp = nn.Sequential(
            nn.Linear(input_dim, 64),
            nn.ReLU(),
            nn.Dropout(0.2),
            nn.Linear(64, 128),
            nn.ReLU(),
            nn.Dropout(0.2),
            nn.Linear(128, output_dim)
        )

    def forward(self, x):
        return self.mlp(x)


class FreqBranch(nn.Module):
    def __init__(self, freq_length, output_dim):
        super(FreqBranch, self).__init__()
        self.conv_layers = nn.Sequential(
            nn.Conv1d(1, 32, kernel_size=9, padding=4),
            nn.BatchNorm1d(32),
            nn.ReLU(),
            nn.MaxPool1d(2),
            nn.Conv1d(32, 64, kernel_size=7, padding=3),
            nn.BatchNorm1d(64),
            nn.ReLU(),
            nn.MaxPool1d(2),
            nn.Conv1d(64, 128, kernel_size=5, padding=2),
            nn.BatchNorm1d(128),
            nn.ReLU(),
            nn.AdaptiveAvgPool1d(16)
        )
        self.mlp = nn.Sequential(
            nn.Linear(128 * 16, 256),
            nn.ReLU(),
            nn.Dropout(0.3),
            nn.Linear(256, output_dim)
        )

    def forward(self, x):
        x = x.unsqueeze(1)              # [B, 1, F]
        conv_out = self.conv_layers(x)  # [B, C, 16]
        return self.mlp(conv_out.view(x.size(0), -1))
