#!/usr/bin/env python3

# Copyright © 2025 Wenze Wei
#
# This file is part of Pisces L1.
#
# Licensed under the Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0).
# You may not use this file except in compliance with the License.
# Commercial use is strictly prohibited.
# You may obtain a copy of the License at
#
#     https://creativecommons.org/licenses/by-nc/4.0/
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import torch
import numpy as np
from torch import nn
from torch import nn
from PIL import Image
import torch.nn.functional as F
from transformers import ASTFeatureExtractor

class VisionEncoder(nn.Module):
    """
    A vision encoder module that processes image data using a Transformer-based architecture.
    """
    def __init__(self, cfg):
        """
        Initialize the VisionEncoder module.

        Args:
            cfg: Configuration object containing model hyperparameters.
        """
        super().__init__()
        self.enabled = True
        self.cfg = cfg
        self.image_size = 384
        self.patch_size = 14
        self.hidden_size = 1152
        self.num_heads = 18
        self.num_layers = 32
        
        print(f"🟧\tVisionEncoder: __init__ start ({'enabled' if self.enabled else 'disabled'})")
        
        # Register mean and std for image normalization
        self.register_buffer('mean', torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1))
        self.register_buffer('std', torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))
        
        # Initialize patch embedding layer to convert images into patch embeddings
        self.patch_embed = nn.Conv2d(
            in_channels=3,
            out_channels=self.hidden_size,
            kernel_size=self.patch_size,
            stride=self.patch_size
        )
        
        # Initialize position embedding and classification token
        num_patches = (self.image_size // self.patch_size) ** 2
        self.pos_embed = nn.Parameter(torch.randn(1, num_patches + 1, self.hidden_size))
        self.cls_token = nn.Parameter(torch.randn(1, 1, self.hidden_size))
        
        # Initialize Transformer encoder
        self.transformer = nn.ModuleDict({
            'layers': nn.ModuleList([
                nn.ModuleDict({
                    'norm1': nn.LayerNorm(self.hidden_size),
                    'attn': nn.MultiheadAttention(
                        embed_dim=self.hidden_size,
                        num_heads=self.num_heads,
                        batch_first=True
                    ),
                    'norm2': nn.LayerNorm(self.hidden_size),
                    'mlp': nn.Sequential(
                        nn.Linear(self.hidden_size, 4 * self.hidden_size),
                        nn.GELU(),
                        nn.Linear(4 * self.hidden_size, self.hidden_size)
                    )
                }) for _ in range(self.num_layers)
            ]),
            'norm': nn.LayerNorm(self.hidden_size)
        })
        
        # Initialize projection layer to project the output to the model's hidden dimension
        self.proj = nn.Linear(self.hidden_size, cfg.hidden_size)
        
        print("🟧\tVisionEncoder: __init__ end")
    
    def process_image(self, image_path):
        """
        Process image data from a given file path.

        Args:
            image_path (str): Path to the image file.

        Returns:
            torch.Tensor: Processed image tensor if successful, None otherwise.
        """
        print(f"🟧\tProcessing image: {image_path}")
        try:
            # Read image using PIL and convert it to RGB format
            image = Image.open(image_path).convert('RGB')
            # Resize the image to the specified size
            image = image.resize((self.image_size, self.image_size))
            # Convert the image to a tensor and normalize it
            image = torch.tensor(np.array(image)).permute(2, 0, 1).float() / 255.0
            image = (image - self.mean) / self.std
            return image
        except Exception as e:
            print(f"❌\tImage processing error: {e}")
            return None
    
    def forward(self, pixel_values):
        """
        Forward pass of the VisionEncoder module.

        Args:
            pixel_values (torch.Tensor): Input image tensor.

        Returns:
            torch.Tensor: Output tensor after encoding.
        """
        if pixel_values is None:
            return torch.zeros(1, 1, self.cfg.hidden_size, device=self.proj.weight.device)
        
        # Normalize the input pixel values
        x = (pixel_values - self.mean) / self.std
        
        # Process dynamic resolution following the NaViT style
        B, C, H, W = x.shape
        patch_size = self.patch_size
        
        # Adjust the input resolution if it's not a multiple of patch_size
        if H % patch_size != 0 or W % patch_size != 0:
            new_H = ((H + patch_size - 1) // patch_size) * patch_size
            new_W = ((W + patch_size - 1) // patch_size) * patch_size
            x = F.interpolate(x, size=(new_H, new_W), mode='bilinear', align_corners=False)
            H, W = new_H, new_W
        
        # Convert the input image into patch embeddings
        x = self.patch_embed(x)
        x = x.flatten(2).transpose(1, 2)
        
        # Add classification token and position embedding
        cls_tokens = self.cls_token.expand(B, -1, -1)
        x = torch.cat((cls_tokens, x), dim=1)
        
        # Adjust position embedding according to the current resolution
        num_patches = (H // patch_size) * (W // patch_size)
        pos_embed = self.pos_embed[:, :num_patches+1]
        x = x + pos_embed
        
        # Pass the input through the Transformer encoder layers
        for layer in self.transformer['layers']:
            # Self-attention block
            x = x + layer['attn'](layer['norm1'](x), layer['norm1'](x), layer['norm1'](x))[0]
            # MLP block
            x = x + layer['mlp'](layer['norm2'](x))
        
        # Apply final normalization
        x = self.transformer['norm'](x)
        
        # Project the output to the model's hidden dimension
        x = self.proj(x)
        return x

class AudioEncoder(nn.Module):
    """
    An audio encoder module that processes audio data.
    """
    def __init__(self, cfg):
        """
        Initialize the AudioEncoder module.

        Args:
            cfg: Configuration object containing model hyperparameters.
        """
        super().__init__()
        self.enabled = True
        self.cfg = cfg
        print(f"🟧\tAudioEncoder: __init__ start ({'enabled' if self.enabled else 'disabled'})")
        
        # Initialize the audio feature extractor
        self.processor = ASTFeatureExtractor()
        
        # Initialize the first convolutional layer
        self.conv1 = nn.Conv1d(1, 64, kernel_size=10, stride=5, padding=3)
        self.bn1 = nn.BatchNorm1d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool1d(kernel_size=8, stride=8)
        
        # Initialize the projection layer
        self.proj = nn.Sequential(
            nn.Linear(64 * 128, cfg.hidden_size),
            nn.LayerNorm(cfg.hidden_size)
        )
        
        print("🟧\tAudioEncoder: __init__ end")
    
    def process_audio(self, audio_path):
        """
        Process audio data from a given file path.

        Args:
            audio_path (str): Path to the audio file.

        Returns:
            torch.Tensor: Processed audio tensor if successful, None otherwise.
        """
        print(f"🟧\tProcessing audio: {audio_path}")
        try:
            audio = self.processor(audio=audio_path, return_tensors="pt")
            return audio['input_values'][0]
        except Exception as e:
            print(f"❌\tAudio processing error: {e}")
            return None
    
    def forward(self, audio_input):
        """
        Forward pass of the AudioEncoder module.

        Args:
            audio_input (dict): Dictionary containing audio input data.

        Returns:
            torch.Tensor: Output tensor after encoding.
        """
        if audio_input is None:
            return torch.zeros(1, 1, self.cfg.hidden_size, device=audio_input.device)
        
        x = self.conv1(audio_input['input_values'].unsqueeze(1))
        x = self.bn1(x)
        x = self.relu(x)
        x = self.maxpool(x)
        x = x.view(x.size(0), -1)
        x = self.proj(x)
        return x.unsqueeze(1)

class DocEncoder(nn.Module):
    """
    A document encoder module that processes document input.
    """
    def __init__(self, cfg):
        """
        Initialize the DocEncoder module.

        Args:
            cfg: Configuration object containing model hyperparameters.
        """
        super().__init__()
        self.enabled = True
        self.cfg = cfg
        print(f"🟧\tDocEncoder: __init__ start ({'enabled' if self.enabled else 'disabled'})")
        
        # Initialize the document projection layer
        self.doc_proj = nn.Sequential(
            nn.Linear(cfg.hidden_size, cfg.hidden_size),
            nn.LayerNorm(cfg.hidden_size)
        )
        
        print("🟧\tDocEncoder: __init__ end")
    
    def forward(self, doc_input):
        """
        Forward pass of the DocEncoder module.

        Args:
            doc_input (dict): Dictionary containing document input data.

        Returns:
            torch.Tensor: Output tensor after encoding.
        """
        x = self.doc_proj(doc_input['input_ids'])
        return x.unsqueeze(1)