#!/usr/bin/env python3

# Copyright © 2025 Wenze Wei
#
# This file is part of Pisces L1.
#
# Licensed under the Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0).
# You may not use this file except in compliance with the License.
# Commercial use is strictly prohibited.
# You may obtain a copy of the License at
#
#     https://creativecommons.org/licenses/by-nc/4.0/
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import math
import torch
from torch import nn
from .moe import MoELayer
import torch.nn.functional as F
from .config import PiscesConfig
from .reasoner import PiscesReasoner
from model.moe_dynamic import DynamicMoELayer
from model.yarn_rope import YaRNRotaryEmbedding
from model.vision_native import NativeSiglipVisionEncoder
from .multimodal import VisionEncoder, AudioEncoder, DocEncoder

def pisces_init_weights(m):
    """
    Initialize the weights of PyTorch modules.

    Args:
        m (nn.Module): PyTorch module to initialize weights.
    """
    if isinstance(m, nn.Linear):
        # Initialize linear layer weights using Kaiming uniform initialization
        nn.init.kaiming_uniform_(m.weight, a=math.sqrt(5))
        if m.bias is not None:
            # Initialize bias to zero if it exists
            nn.init.zeros_(m.bias)
    elif isinstance(m, nn.Embedding):
        # Initialize embedding layer weights using normal distribution
        nn.init.normal_(m.weight, mean=0, std=0.02)

class RMSNorm(nn.Module):
    """RMS normalization layer"""
    def __init__(self, dim, eps=1e-6):
        """
        Initialize the RMS normalization layer.

        Args:
            dim (int): Dimension of the input tensor.
            eps (float, optional): Small value to avoid division by zero. Defaults to 1e-6.
        """
        super().__init__()
        self.weight = nn.Parameter(torch.ones(dim))
        self.eps = eps

    def forward(self, x):
        """
        Apply RMS normalization to the input tensor.

        Args:
            x (torch.Tensor): Input tensor.

        Returns:
            torch.Tensor: Normalized tensor.
        """
        return self.weight * x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)

class RotaryEmbedding(nn.Module):
    """Rotary positional embedding"""
    def __init__(self, dim, max_seq_len=8192, base=1e6, device=None, dtype=None):
        """
        Initialize the rotary positional embedding layer.

        Args:
            dim (int): Dimension of the input tensor.
            max_seq_len (int, optional): Maximum sequence length. Defaults to 8192.
            base (float, optional): Base value for frequency calculation. Defaults to 1e6.
            device (torch.device, optional): Device to place tensors on. Defaults to None.
            dtype (torch.dtype, optional): Data type of tensors. Defaults to None.
        """
        super().__init__()
        inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2, device=device, dtype=torch.float32) / dim))
        t = torch.arange(max_seq_len, dtype=torch.float32, device=device)
        freqs = torch.einsum("i,j->ij", t, inv_freq)
        self.register_buffer("cos", freqs.cos())
        self.register_buffer("sin", freqs.sin())

    def forward(self, x, seq_len):
        """
        Apply rotary positional embedding to the input tensor.

        Args:
            x (torch.Tensor): Input tensor.
            seq_len (int): Sequence length of the input tensor.

        Returns:
            torch.Tensor: Tensor with rotary positional embedding applied.
        """
        cos, sin = self.cos[:seq_len], self.sin[:seq_len]
        x1, x2 = x[..., ::2], x[..., 1::2]
        return torch.stack([x1 * cos - x2 * sin, x1 * sin + x2 * cos], dim=-1).flatten(-2)

class Attention(nn.Module):
    """Multi-head attention with interleaved attention layers for 10M context"""
    def __init__(self, cfg, device=None, dtype=None):
        """
        Initialize the multi-head attention layer.

        Args:
            cfg: Configuration object containing attention parameters.
            device (torch.device, optional): Device to place tensors on. Defaults to None.
            dtype (torch.dtype, optional): Data type of tensors. Defaults to None.
        """
        super().__init__()
        self.cfg = cfg
        self.n_head = cfg.n_head
        self.n_kv_head = cfg.n_kv_head
        self.head_dim = cfg.hidden_size // cfg.n_head
        self.scale = self.head_dim ** -0.5
        self.q_proj = nn.Linear(cfg.hidden_size, cfg.n_head * self.head_dim, bias=False, device=device, dtype=dtype)
        self.k_proj = nn.Linear(cfg.hidden_size, cfg.n_kv_head * self.head_dim, bias=False, device=device, dtype=dtype)
        self.v_proj = nn.Linear(cfg.hidden_size, cfg.n_kv_head * self.head_dim, bias=False, device=device, dtype=dtype)
        self.o_proj = nn.Linear(cfg.n_head * self.head_dim, cfg.hidden_size, bias=False, device=device, dtype=dtype)
        self.rope = YaRNRotaryEmbedding(self.head_dim, cfg.max_position_embeddings, cfg.rope_theta, device=device)
        # Interleaved attention configuration
        self.interleaved_depth = 4
        self.temp_scaling = 1.0
        self.apply(pisces_init_weights)

    def forward(self, x, mask):
        """
        Perform multi-head attention calculation.

        Args:
            x (torch.Tensor): Input tensor.
            mask (torch.Tensor): Attention mask.

        Returns:
            torch.Tensor: Output tensor after attention calculation.
        """
        b, t, _ = x.shape
        
        # Validate sequence length bounds
        max_seq_len = self.cfg.max_position_embeddings
        if t > max_seq_len:
            print(f"🟧\tSequence length {t} exceeds max {max_seq_len}, truncating...")
            x = x[:, :max_seq_len]
            mask = mask[:, :, :max_seq_len, :max_seq_len]
            t = max_seq_len
        
        q = self.q_proj(x).view(b, t, self.n_head, self.head_dim).transpose(1, 2)
        k = self.k_proj(x).view(b, t, self.n_kv_head, self.head_dim).transpose(1, 2)
        v = self.v_proj(x).view(b, t, self.n_kv_head, self.head_dim).transpose(1, 2)
        
        # Validate tensor dimensions
        assert q.shape[-1] == self.head_dim, f"q head_dim mismatch: {q.shape[-1]} != {self.head_dim}"
        assert k.shape[-1] == self.head_dim, f"k head_dim mismatch: {k.shape[-1]} != {self.head_dim}"
        assert v.shape[-1] == self.head_dim, f"v head_dim mismatch: {v.shape[-1]} != {self.head_dim}"
        
        q, k = self.rope(q, t), self.rope(k, t)
        k = k.repeat_interleave(self.n_head // self.n_kv_head, dim=1)
        v = v.repeat_interleave(self.n_head // self.n_kv_head, dim=1)
        
        # Apply interleaved attention
        if t > self.rope.original_max_position_embeddings:
            self.temp_scaling = (self.rope.original_max_position_embeddings / t) ** 0.25
        
        # Ensure mask dimensions match
        if mask.shape[-1] != t or mask.shape[-2] != t:
            mask = mask[:, :, :t, :t]
            
        scores = torch.matmul(q, k.transpose(-2, -1)) * self.scale * self.temp_scaling + mask
        
        # Validate attention scores
        if torch.isnan(scores).any():
            print("🟧\tNaN detected in attention scores, replacing with zeros")
            scores = torch.where(torch.isnan(scores), torch.zeros_like(scores), scores)
        
        # Hierarchical attention calculation
        if self.interleaved_depth > 1 and t > 1024:
            chunk_size = t // self.interleaved_depth
            if chunk_size > 0:
                scores = torch.cat([F.softmax(scores[..., i*chunk_size:min((i+1)*chunk_size, t)], dim=-1) for i in range(min(self.interleaved_depth, t))], dim=-1)
            else:
                scores = F.softmax(scores, dim=-1)
        else:
            scores = F.softmax(scores, dim=-1)
            
        out = torch.matmul(scores, v).transpose(1, 2).contiguous().view(b, t, -1)
        return self.o_proj(out)

class TransformerBlock(nn.Module):
    """Transformer block with attention and MoE MLP"""
    def __init__(self, cfg, device=None, dtype=None):
        """
        Initialize the Transformer block.

        Args:
            cfg: Configuration object containing block parameters.
            device (torch.device, optional): Device to place tensors on. Defaults to None.
            dtype (torch.dtype, optional): Data type of tensors. Defaults to None.
        """
        super().__init__()
        self.attn = Attention(cfg, device=device, dtype=dtype)
        self.mlp = DynamicMoELayer(cfg, device=device, dtype=dtype)
        self.norm1 = RMSNorm(cfg.hidden_size)
        self.norm2 = RMSNorm(cfg.hidden_size)

    def forward(self, x, mask):
        """
        Perform forward pass through the Transformer block.

        Args:
            x (torch.Tensor): Input tensor.
            mask (torch.Tensor): Attention mask.

        Returns:
            tuple: Output tensor and auxiliary loss.
        """
        x = x + self.attn(self.norm1(x), mask)
        mlp_out, aux_loss = self.mlp(self.norm2(x))
        x = x + mlp_out
        return x, aux_loss

class PiscesModel(nn.Module):
    """Pisces L1 multimodal MoE model (oneflow style)"""
    def __init__(self, cfg, device=None, dtype=None, quantization_config=None, lora_config=None):
        """
        Initialize the Pisces model.

        Args:
            cfg: Configuration object containing model parameters.
            device (torch.device, optional): Device to place tensors on. Defaults to None.
            dtype (torch.dtype, optional): Data type of tensors. Defaults to None.
            quantization_config: Configuration for 4-bit quantization. Defaults to None.
            lora_config: Configuration for LoRA adaptation. Defaults to None.
        """
        super().__init__()
        print("🟧\tPiscesModel: __init__ start")
        self.cfg = cfg
        self.quantization_config = quantization_config
        self.lora_config = lora_config
        
        if quantization_config is not None:
            try:
                import bitsandbytes as bnb
                def convert_linear_to_4bit(module):
                    """
                    Convert all nn.Linear layers in the module to 4-bit linear layers.

                    Args:
                        module (nn.Module): PyTorch module to convert.
                    """
                    for name, child in module.named_children():
                        if isinstance(child, nn.Linear):
                            new_mod = bnb.nn.Linear4bit(
                                child.in_features, child.out_features, bias=child.bias is not None,
                                quant_type=getattr(quantization_config, 'bnb_4bit_quant_type', 'nf4'),
                                compute_dtype=getattr(quantization_config, 'bnb_4bit_compute_dtype', torch.bfloat16),
                                compress_statistics=getattr(quantization_config, 'bnb_4bit_use_double_quant', True)
                            )
                            setattr(module, name, new_mod)
                        else:
                            convert_linear_to_4bit(child)
                convert_linear_to_4bit(self)
                print("🟧\tPiscesModel: All Linear layers converted to 4bit (bitsandbytes)")
            except Exception as e:
                print(f"❌\t4bit quantization failed: {e}")
        print("🟧\tPiscesModel: initializing embedding...")
        self.embed = nn.Embedding(cfg.vocab_size, cfg.hidden_size, device=device, dtype=dtype)
        print(f"🟧\tPiscesModel: initializing {cfg.n_layer} transformer layers...")
        self.layers = nn.ModuleList([])
        for i in range(cfg.n_layer):
            if (i % 4 == 0) or (i == cfg.n_layer-1):
                print(f"🟧\tPiscesModel: initializing TransformerBlock {i+1}/{cfg.n_layer}")
            self.layers.append(TransformerBlock(cfg, device=device, dtype=dtype))
        print("🟧\tPiscesModel: initializing norm...")
        self.norm = RMSNorm(cfg.hidden_size)
        print("🟧\tPiscesModel: initializing multimodal encoders...")
        self.vision = VisionEncoder(cfg)
        self.audio = AudioEncoder(cfg)
        self.doc = DocEncoder(cfg)
        print("🟧\tPiscesModel: initializing output heads...")
        self.lm_head = nn.Linear(cfg.hidden_size, cfg.vocab_size, bias=False, device=device, dtype=dtype)
        self.task_head = nn.Linear(cfg.hidden_size, cfg.task_classes, device=device, dtype=dtype)
        self.eval_head = nn.Linear(cfg.hidden_size, cfg.eval_dims, device=device, dtype=dtype)
        
        print("🟧\tPiscesModel: initializing reasoner...")
        self.reasoner = PiscesReasoner(cfg)

        self.apply(pisces_init_weights)
        
        if lora_config is not None:
            try:
                from peft import get_peft_model
                self = get_peft_model(self, lora_config)
                print("🟧\tPiscesModel: LoRA adapters injected (peft)")
            except Exception as e:
                print(f"❌\tLoRA injection failed: {e}")
        total_params = sum(p.numel() for p in self.parameters())
        print(f"🟧\tPiscesModel: total parameters = {total_params/1e6:.2f}M")
        print("🟧\tPiscesModel: __init__ end")

    def resize_token_embeddings(self, new_num_tokens):
        """
        Resizes token embeddings and associated heads to accommodate a new vocabulary size.

        Args:
            new_num_tokens (int): New vocabulary size.
        """
        # 1. Resize main token embedding
        old_embed = self.embed
        new_embed = nn.Embedding(new_num_tokens, self.cfg.hidden_size, device=old_embed.weight.device, dtype=old_embed.weight.dtype)
        
        # Copy old weights
        num_to_copy = min(old_embed.num_embeddings, new_num_tokens)
        new_embed.weight.data[:num_to_copy, :] = old_embed.weight.data[:num_to_copy, :]
        self.embed = new_embed

        # 2. Resize LM head
        old_lm_head = self.lm_head
        new_lm_head = nn.Linear(self.cfg.hidden_size, new_num_tokens, bias=False, device=old_lm_head.weight.device, dtype=old_lm_head.weight.dtype)
        new_lm_head.weight.data[:num_to_copy, :] = old_lm_head.weight.data[:num_to_copy, :]
        self.lm_head = new_lm_head

        # 3. Resize reasoner's thinking head
        self.reasoner.resize_vocab(new_num_tokens)
        
        # 4. Update config
        self.cfg.vocab_size = new_num_tokens
        print(f"✅\tResized token embeddings to {new_num_tokens}. Remember to update special token IDs in the reasoner.")

    def prepare_inputs_for_generation(self, input_ids, **kwargs):
        """Compatible with PEF/Transformers generation interface, can be extended as needed in practice"""
        return {"input_ids": input_ids, **kwargs}

    def forward(self, input_ids, images=None, audio=None, docs=None, labels=None, **kwargs):
        """
        Perform forward pass through the Pisces model.

        Args:
            input_ids (torch.Tensor): Input token IDs.
            images (torch.Tensor, optional): Input images. Defaults to None.
            audio (torch.Tensor, optional): Input audio. Defaults to None.
            docs (torch.Tensor, optional): Input documents. Defaults to None.
            labels (torch.Tensor, optional): Ground truth labels. Defaults to None.

        Returns:
            dict: Dictionary containing model outputs.
        """
        import torch.utils.checkpoint as cp
        import torch
        b, t = input_ids.shape
        
        # Ensure input_ids are within vocabulary bounds
        vocab_size = self.embed.num_embeddings
        if input_ids.numel() == 0:
            print("🟧\tEmpty input_ids detected, adding padding token")
            input_ids = torch.zeros((b, 1), dtype=torch.long, device=input_ids.device)
            b, t = 1, 1
        
        # Strict boundary check
        if torch.any(input_ids < 0):
            print(f"🟧\tWarning: Input IDs contain negative values. Clamping to 0...")
            input_ids = torch.clamp(input_ids, min=0)
        if torch.any(input_ids >= vocab_size):
            max_id = input_ids.max().item()
            print(f"🟧\tWarning: Input IDs contain values >= vocab_size ({vocab_size}). Max ID: {max_id}, clamping...")
            input_ids = torch.clamp(input_ids, max=vocab_size - 1)
            
        # Check for NaN or inf values
        if torch.isnan(input_ids.float()).any():
            print("🟧\tWarning: NaN values in input_ids, replacing with 0")
            input_ids = torch.nan_to_num(input_ids, nan=0)
        if torch.isinf(input_ids.float()).any():
            print("🟧\tWarning: Inf values in input_ids, replacing with 0")
            input_ids = torch.nan_to_num(input_ids, posinf=0, neginf=0)
            
        # Validate sequence length
        max_seq_len = getattr(self.cfg, 'max_position_embeddings', 10485760)
        if t > max_seq_len:
            print(f"🟧\tSequence length {t} exceeds max {max_seq_len}, truncating...")
            input_ids = input_ids[:, :max_seq_len]
            t = max_seq_len
        
        x = self.embed(input_ids)
        if images is not None:
            x = torch.cat([self.vision(images), x], dim=1)
            t += 1
        if audio is not None:
            x = torch.cat([self.audio(audio), x], dim=1)
            t += 1
        if docs is not None:
            x = torch.cat([self.doc(docs), x], dim=1)
            t += 1
        
        # Original sequence length for LM loss calculation
        lm_seq_len = x.shape[1]

        mask = torch.full((t, t), float('-inf'), device=x.device, dtype=x.dtype)
        mask = torch.triu(mask, diagonal=1)
        total_aux_loss = 0.0
        
        chunk_size = min(getattr(self.cfg, 'max_position_embeddings', 10485760) // 16, 8192)
        chunk_size = max(chunk_size, 1)  # Ensure chunk_size is at least 1
        outputs = []
        
        if hasattr(torch, "amp") and hasattr(torch.amp, "autocast"):
            autocast_ctx = torch.amp.autocast("cuda", dtype=torch.bfloat16)
        else:
            autocast_ctx = torch.cuda.amp.autocast(dtype=torch.bfloat16)
        with autocast_ctx:
            for i in range(0, x.shape[1], chunk_size):
                x_chunk = x[:, i:i+chunk_size, ...]
                mask_chunk = mask[i:i+chunk_size, i:i+chunk_size]
                def block_fn(xc, msk):
                    """
                    Perform forward pass through all Transformer layers.

                    Args:
                        xc (torch.Tensor): Input tensor chunk.
                        msk (torch.Tensor): Attention mask chunk.

                    Returns:
                        tuple: Output tensor and cumulative auxiliary loss.
                    """
                    h = xc
                    aux = 0.0
                    for layer in self.layers:
                        h, aux_loss = layer(h, msk)
                        aux = aux + aux_loss if aux_loss is not None else aux
                    return h, aux
                
                # Disable gradient checkpointing to avoid _StopRecomputationError
                # h_chunk, aux_chunk = cp.checkpoint(block_fn, x_chunk, mask_chunk, use_reentrant=False)
                h_chunk, aux_chunk = block_fn(x_chunk, mask_chunk)
                outputs.append(h_chunk)
                total_aux_loss = total_aux_loss + aux_chunk
            if outputs:
                x = torch.cat(outputs, dim=1)
            
            if x.shape[1] == 0:
                # Handle empty sequences gracefully to prevent indexing errors in heads.
                return {
                    "logits": self.lm_head(x),
                    "loss": torch.tensor(0.0, device=x.device, requires_grad=True),
                    "task_logits": torch.zeros(x.shape[0], self.cfg.task_classes, device=x.device),
                    "eval_score": torch.zeros(x.shape[0], self.cfg.eval_dims, device=x.device),
                    "aux_loss": total_aux_loss,
                    "reasoner_out": {"loss": torch.tensor(0.0, device=x.device, requires_grad=True)}
                }

            x = self.norm(x)
            
            # Main model outputs
            logits = self.lm_head(x)
            
            # Reasoner outputs
            # Pass the 'correct' label to the reasoner if available
            reasoner_kwargs = {"hidden_states": x, "input_ids": input_ids, "labels": labels}
            if "correct" in kwargs:
                reasoner_kwargs["correct"] = kwargs["correct"]
            reasoner_out = self.reasoner(**reasoner_kwargs)

            loss = None
            if labels is not None:
                # Standard language modeling loss
                lm_loss = F.cross_entropy(
                    logits[:, :lm_seq_len, :].reshape(-1, logits.size(-1)), 
                    labels.view(-1)
                )
                
                # Combine with reasoner loss
                reasoner_loss = reasoner_out.get("loss", torch.tensor(0.0, device=x.device))
                loss = lm_loss + reasoner_loss

            task_logits = self.task_head(x[:, 0])
            eval_score = self.eval_head(x.mean(1))

        return {
            "logits": logits,
            "loss": loss,
            "task_logits": task_logits,
            "eval_score": eval_score,
            "aux_loss": total_aux_loss,
            "reasoner_out": reasoner_out
        }