
import torch
import torch.nn as nn
import torch.nn.functional as F

class DualInputAlexNet(nn.Module):
    def __init__(self, num_classes=10):
        super(DualInputAlexNet, self).__init__()
        
        # First path: For 128x128 images
        self.path_128 = nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
            nn.Conv2d(64, 192, kernel_size=5, padding=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
        )
        
        # Second path: For 256x256 images
        self.path_256 = nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
            nn.Conv2d(64, 192, kernel_size=5, padding=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
        )
        
        # Merge layer
        self.merge_layer = nn.Sequential(
            nn.Conv2d(384, 384, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
        )
        
        # Calculate the size of the feature map after the merge layer
        # We use a dummy tensor to calculate the size
        dummy_tensor_128 = torch.randn(1, 3, 128, 128)
        dummy_tensor_256 = torch.randn(1, 3, 256, 256)
        with torch.no_grad():
            x_128 = self.path_128(dummy_tensor_128)
            x_256 = self.path_256(dummy_tensor_256)
            x_128 = F.interpolate(x_128, size=x_256.size()[-2:], mode='bilinear', align_corners=False)
            merged_size = self.merge_layer(torch.cat((x_128, x_256), dim=1)).view(1, -1).size(1)
        
        # Initialize the fully connected layer with the correct size
        self.fc = nn.Linear(merged_size, num_classes)

    def forward(self, x_128, x_256):
        # Forward pass for each path
        x_128 = self.path_128(x_128)
        x_256 = self.path_256(x_256)
        
        # Resize the smaller feature map to match the larger one
        x_128 = F.interpolate(x_128, size=x_256.size()[-2:], mode='bilinear', align_corners=False)
        
        # Concatenate along the channel dimensions
        x = torch.cat((x_128, x_256), dim=1)
        
        # Pass through the merge layer
        x = self.merge_layer(x)
        
        # Flatten the feature maps for fully connected layer
        x = x.view(x.size(0), -1)  # Flatten the tensor
        
        # Pass through the fully connected layer
        x = self.fc(x)
        
        return x

# Verify the model with dummy inputs
if __name__ == '__main__':
    # Create a model instance
    model = DualInputAlexNet(num_classes=10)

    # Dummy inputs
    x_128 = torch.randn(1, 3, 128, 128)
    x_256 = torch.randn(1, 3, 256, 256)

    # Forward pass
    output = model(x_128, x_256)

    # Output shape check
    print("Output shape:", output.shape)
