import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import BertModel, BertTokenizer

class AudioEncoder(nn.Module):
    def __init__(self, input_dim, hidden_dim):
        super(AudioEncoder, self).__init__()
        self.fc1 = nn.Linear(input_dim, hidden_dim)
        self.relu = nn.ReLU()
        self.fc2 = nn.Linear(hidden_dim, hidden_dim)

    def forward(self, x):
        x = self.fc1(x)
        x = self.relu(x)
        x = self.fc2(x)
        return x

class TextEncoder(nn.Module):
    def __init__(self, pretrained_model_name='bert-base-uncased'):
        super(TextEncoder, self).__init__()
        self.bert = BertModel.from_pretrained(pretrained_model_name)
        self.dropout = nn.Dropout(p=0.1)

    def forward(self, input_ids, attention_mask):
        outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask)
        last_hidden_states = outputs.last_hidden_state
        pooled_output = outputs.pooler_output
        return last_hidden_states, pooled_output

class CrossModalAttention(nn.Module):
    def __init__(self, hidden_dim):
        super(CrossModalAttention, self).__init__()
        self.query_proj = nn.Linear(hidden_dim, hidden_dim)
        self.key_proj = nn.Linear(hidden_dim, hidden_dim)
        self.value_proj = nn.Linear(hidden_dim, hidden_dim)
        self.out_proj = nn.Linear(hidden_dim, hidden_dim)

    def forward(self, audio_features, text_features, text_attention_mask):
        # Compute attention scores
        audio_q = self.query_proj(audio_features)
        text_k = self.key_proj(text_features)
        attention_scores = torch.matmul(audio_q, text_k.transpose(-2, -1))
        
        # Apply attention mask to scores
        attention_scores = attention_scores.masked_fill(text_attention_mask.unsqueeze(1) == 0, float('-inf'))
        
        # Normalize scores to probabilities
        attention_probs = F.softmax(attention_scores, dim=-1)
        
        # Compute weighted sum of text features
        context_vector = torch.matmul(attention_probs, self.value_proj(text_features))
        
        # Combine audio features with context vector
        combined_features = self.out_proj(audio_features + context_vector)
        
        return combined_features

class MultiModalModel(nn.Module):
    def __init__(self, audio_encoder, text_encoder, cross_modal_attention, decoder):
        super(MultiModalModel, self).__init__()
        self.audio_encoder = audio_encoder
        self.text_encoder = text_encoder
        self.cross_modal_attention = cross_modal_attention
        self.decoder = decoder

    def forward(self, audio_input, text_input):
        # Encode audio and text inputs
        audio_features = self.audio_encoder(audio_input)
        text_features, _ = self.text_encoder(text_input)
        
        # Apply cross-modal attention
        combined_features = self.cross_modal_attention(audio_features, text_features, text_input['attention_mask'])
        
        # Decode output (for simplicity, we'll use a simple linear layer as decoder here)
        output = self.decoder(combined_features)
        
        return output

# Example usage
# Initialize components
audio_encoder = AudioEncoder(input_dim=128, hidden_dim=768)  # Adjust input_dim based on your audio feature extractor
text_encoder = TextEncoder()
cross_modal_attention = CrossModalAttention(hidden_dim=768)
decoder = nn.Linear(768, vocab_size)  # Adjust vocab_size based on your text vocabulary

# Initialize full model
model = MultiModalModel(audio_encoder, text_encoder, cross_modal_attention, decoder)

# Dummy inputs for demonstration
audio_input = torch.randn(1, 128)  # Replace with actual audio features
text_input = {'input_ids': torch.tensor([[101, 2023, 2003, 102]]), 'attention_mask': torch.tensor([[1, 1, 1, 1]])}  # Example BERT input

# Forward pass
output = model(audio_input, text_input)
print(output)

