#!/usr/bin/env python3

# Copyright © 2025 Wenze Wei
#
# This file is part of Pisces L1.
#
# Licensed under the Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0).
# You may not use this file except in compliance with the License.
# Commercial use is strictly prohibited.
# You may obtain a copy of the License at
#
#     https://creativecommons.org/licenses/by-nc/4.0/
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import torch
import numpy as np
import torch.nn as nn
from PIL import Image
from einops import rearrange

class NativeSiglipVisionEncoder(nn.Module):
    """
    A native implementation of the SigLIP-level vision encoder, improved based on Google's SigLIP 2 architecture.
    """
    def __init__(self, cfg):
        """
        Initialize the NativeSiglipVisionEncoder.

        Args:
            cfg: Configuration object containing necessary parameters.
        """
        super().__init__()
        self.enabled = True
        self.cfg = cfg
        self.image_size = 384  # Size of the input image
        self.patch_size = 14  # Size of each patch
        self.hidden_size = 1152  # Hidden size corresponding to 400M parameters
        self.num_heads = 18  # Number of attention heads
        self.num_layers = 24  # Number of Transformer layers
        
        print(f"🟧	NativeSiglipVisionEncoder: __init__ start ({'enabled' if self.enabled else 'disabled'})")
        
        # Image preprocessing: register mean and std for normalization
        self.register_buffer('mean', torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1))
        self.register_buffer('std', torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))
        
        # Patch embedding: convert the input image into patch embeddings
        self.patch_embed = nn.Conv2d(
            in_channels=3,  # Number of input channels (RGB)
            out_channels=self.hidden_size,  # Number of output channels
            kernel_size=self.patch_size,  # Size of the convolutional kernel
            stride=self.patch_size  # Stride of the convolution
        )
        
        # Positional embedding: add positional information to the patch embeddings
        num_patches = (self.image_size // self.patch_size) ** 2
        self.pos_embed = nn.Parameter(torch.randn(1, num_patches, self.hidden_size))
        
        # Transformer encoder: process the patch embeddings with multiple Transformer layers
        self.transformer = nn.ModuleDict({
            'layers': nn.ModuleList([
                nn.ModuleDict({
                    'norm1': nn.LayerNorm(self.hidden_size),  # First layer normalization
                    'attn': nn.MultiheadAttention(
                        embed_dim=self.hidden_size,  # Embedding dimension
                        num_heads=self.num_heads,  # Number of attention heads
                        batch_first=True  # Whether the batch dimension is the first dimension
                    ),
                    'norm2': nn.LayerNorm(self.hidden_size),  # Second layer normalization
                    'mlp': nn.Sequential(
                        nn.Linear(self.hidden_size, 4 * self.hidden_size),  # First linear layer
                        nn.GELU(),  # Activation function
                        nn.Linear(4 * self.hidden_size, self.hidden_size)  # Second linear layer
                    )
                }) for _ in range(self.num_layers)
            ]),
            'norm': nn.LayerNorm(self.hidden_size)  # Final layer normalization
        })
        
        # Projection layer: project the output of the Transformer encoder to the desired dimension
        self.proj = nn.Linear(self.hidden_size, cfg.hidden_size)
        
        print("🟧	NativeSiglipVisionEncoder: __init__ end")
    
    def process_image(self, image_path):
        """
        Process image data from the given path.

        Args:
            image_path (str): Path to the image file.

        Returns:
            torch.Tensor: Processed image tensor, or None if an error occurs.
        """
        print(f"🟧	Processing image: {image_path}")
        try:
            # Read the image using PIL and convert it to a tensor
            image = Image.open(image_path).convert('RGB')
            image = image.resize((self.image_size, self.image_size))
            image = torch.tensor(np.array(image)).permute(2, 0, 1).float() / 255.0
            image = (image - self.mean) / self.std
            return image
        except Exception as e:
            print(f"❌	Image processing error: {e}")
            return None
    
    def forward(self, pixel_values):
        """
        Forward pass of the NativeSiglipVisionEncoder.

        Args:
            pixel_values (torch.Tensor): Input pixel values of the image.

        Returns:
            torch.Tensor: Output tensor of shape (B, 1, hidden_size).
        """
        if pixel_values is None:
            return torch.zeros(1, 1, self.cfg.hidden_size, device=self.proj.weight.device)
        
        # Normalize the input
        x = (pixel_values - self.mean) / self.std
        
        # Patch embedding
        x = self.patch_embed(x)  # (B, 1152, 27, 27)
        x = rearrange(x, 'b c h w -> b (h w) c')
        
        # Add positional embedding
        x = x + self.pos_embed
        
        # Transformer encoder
        for layer in self.transformer['layers']:
            # Self-attention
            attn_out = layer['attn'](layer['norm1'](x), layer['norm1'](x), layer['norm1'](x))[0]
            x = x + attn_out
            # MLP
            mlp_out = layer['mlp'](layer['norm2'](x))
            x = x + mlp_out
        
        # Final normalization
        x = self.transformer['norm'](x)
        
        # Global average pooling and projection
        x = self.proj(x.mean(dim=1))
        return x.unsqueeze(1)  # (B, 1, hidden_size)