import torch
from torch import nn, optim
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
from torchvision.models import resnet50
from transformers import BertTokenizer, BertModel
import numpy as np
from PIL import Image
import os

# Hyperparameters
batch_size = 32
learning_rate = 1e-4
num_epochs = 10

# Dataset class
class MultimodalDataset(Dataset):
    def __init__(self, image_paths, texts, labels, tokenizer, transform=None):
        self.image_paths = image_paths
        self.texts = texts
        self.labels = labels
        self.tokenizer = tokenizer
        self.transform = transform

    def __len__(self):
        return len(self.labels)

    def __getitem__(self, idx):
        # Load and transform the image
        image = Image.open(self.image_paths[idx])
        if self.transform:
            image = self.transform(image)

        # Tokenize and encode the text
        text = self.texts[idx]
        encoded_text = self.tokenizer.encode_plus(
            text,
            add_special_tokens=True,
            return_tensors='pt'
        )

        # Get the label
        label = self.labels[idx]

        return image, encoded_text, label

# Model class
class MultimodalClassifier(nn.Module):
    def __init__(self):
        super(MultimodalClassifier, self).__init__()
        # Image encoder
        self.image_encoder = resnet50(pretrained=True)

        # Text encoder
        self.text_encoder = BertModel.from_pretrained('bert-base-uncased')
        self.num_classes = 2
        # Fusion and classifier
        self.fusion_layer = nn.Linear(self.image_encoder.fc.in_features + self.text_encoder.config.hidden_size, 512)
        self.classifier = nn.Linear(512, self.num_classes)

    def forward(self, images, input_ids, attention_mask):
        # Encode images
        image_features = self.image_encoder(images)

        # Encode text
        text_features = self.text_encoder(input_ids=input_ids, attention_mask=attention_mask)

        # Concatenate features
        combined_features = torch.cat((image_features, text_features.last_hidden_state[:, 0, :]), 1)

        # Fusion and classification
        fused_features = self.fusion_layer(combined_features)
        logits = self.classifier(fused_features)

        return logits

# Define transforms
transform = transforms.Compose([
    transforms.Resize((224, 224)),
    transforms.ToTensor(),
])

# Tokenizer
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')

# Assume you have the following data
image_paths = ['dataset/image1.png', 'dataset/image2.png']
texts = ['Description of image 1', 'Description of image 2']
labels = [0, 1] # labels should be numeric

# Dataset and DataLoader
dataset = MultimodalDataset(image_paths, texts, labels, tokenizer, transform)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)

# Model, loss, and optimizer
model = MultimodalClassifier()
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)

# Training loop
for epoch in range(num_epochs):
    model.train()
    for images, encoded_texts, labels in dataloader:
        input_ids = encoded_texts['input_ids'].squeeze(1)
        attention_mask = encoded_texts['attention_mask'].squeeze(1)

        optimizer.zero_grad()
        outputs = model(images, input_ids, attention_mask)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        print(f"Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}")