import torch.nn as nn

class ImageEncoder(nn.Module):
    def __init__(self, base_model, projection_dim=256):
        super().__init__()
        self.base_model = base_model
        self.projection = nn.Sequential(
            nn.Linear(base_model.config.hidden_size, projection_dim),
            nn.ReLU(),
            nn.Linear(projection_dim, projection_dim)
        )
    
    def forward(self, **inputs):
        # The original code used a generic **inputs, but the vision model
        # specifically expects 'pixel_values'. Let's make this explicit.
        outputs = self.base_model(pixel_values=inputs['pixel_values'])
        projected = self.projection(outputs.pooler_output)
        return projected 