#!/usr/bin/env python3

# Copyright © 2025 Wenze Wei
#
# This file is part of Pisces L1.
#
# Licensed under the Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0).
# You may not use this file except in compliance with the License.
# Commercial use is strictly prohibited.
# You may obtain a copy of the License at
#
#     https://creativecommons.org/licenses/by-nc/4.0/
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
# Initialize weights for MoE (Mixture of Experts) layers.
def moe_init_weights(m):
    """
    Initialize weights for linear layers in MoE (Mixture of Experts).

    Args:
        m (nn.Module): PyTorch module to initialize weights.
    """
    # If the module is a linear layer
    if isinstance(m, nn.Linear):
        # Initialize the weight using Kaiming uniform initialization
        nn.init.kaiming_uniform_(m.weight, a=math.sqrt(5))
        # If the layer has bias, initialize it to zero
        if m.bias is not None:
            nn.init.zeros_(m.bias)

# ExpertChoiceRouter class is used to implement expert choice routing mechanism.
class ExpertChoiceRouter(nn.Module):
    """
    Initialize the ExpertChoiceRouter module.

    Args:
        hidden_size (int): Dimension of the input hidden states.
        num_experts (int): Number of experts in the MoE layer.
        capacity_factor (float, optional): Capacity factor for each expert. Defaults to 1.25.
        top_k (int, optional): Number of top experts to select. Defaults to 2.
    """
    def __init__(self, hidden_size, num_experts, capacity_factor=1.25, top_k=2):
        super().__init__()
        # Gate network to calculate routing scores
        self.gate = nn.Linear(hidden_size, num_experts, bias=False)
        # Capacity factor for each expert
        self.capacity_factor = capacity_factor
        # Number of experts
        self.num_experts = num_experts
        # Number of top experts to select
        self.top_k = top_k
        
    def forward(self, x):
        """
        Forward pass of the ExpertChoiceRouter.

        Args:
            x (torch.Tensor): Input tensor with shape (batch*seq, hidden_size).

        Returns:
            tuple: 
                - expert_indices (torch.Tensor): Indices of selected tokens for each expert.
                - dispatch_mask (torch.Tensor): Routing matrix indicating token assignment.
                - load_balancing_loss (torch.Tensor): Loss for load balancing among experts.
        """
        # x: (batch*seq, hidden_size)
        # Calculate the number of tokens each expert can handle
        tokens_per_expert = min(int(x.shape[0] * self.capacity_factor / self.num_experts), x.shape[0])
        tokens_per_expert = max(tokens_per_expert, 1)  # Ensure at least 1
        
        # Calculate routing score
        # logits shape: (tokens, experts)
        logits = self.gate(x)
        
        # Expert Choice: Each expert selects top-k tokens
        # expert_indices shape: (experts, capacity)
        expert_indices = torch.topk(logits.T, tokens_per_expert, dim=1).indices
        
        # Clamp indices to prevent out of bounds access
        expert_indices = torch.clamp(expert_indices, 0, x.shape[0] - 1)
        
        # Building a routing matrix
        dispatch_mask = torch.zeros_like(logits)
        # Set the positions corresponding to the selected tokens to 1
        col_idx = torch.arange(tokens_per_expert).repeat_interleave(self.num_experts).to(logits.device)
        col_idx = torch.clamp(col_idx, 0, dispatch_mask.shape[1] - 1)
        valid_mask = (expert_indices.T.flatten() < dispatch_mask.shape[0]) & (col_idx < dispatch_mask.shape[1])
        dispatch_mask[expert_indices.T.flatten()[valid_mask], col_idx[valid_mask]] = 1
        
        # Calculate expert load
        expert_load = dispatch_mask.sum(dim=0)
        
        # Calculate load balancing loss
        # Calculate the load balancing loss using expert load and average load
        load_balancing_loss = (expert_load * torch.log(expert_load / expert_load.mean())).mean()
        
        return expert_indices, dispatch_mask, load_balancing_loss

# DynamicMoELayer class implements a dynamic Mixture of Experts layer.
class DynamicMoELayer(nn.Module):
    """
    A dynamic Mixture of Experts (MoE) layer that manages expert devices dynamically.
    """
    # Class variable to count the number of DynamicMoELayer instances
    _layer_count = 0
    
    def __init__(self, cfg, device=None, dtype=None):
        """
        Initialize the DynamicMoELayer module.

        Args:
            cfg (object): Configuration object containing various hyperparameters.
            device (torch.device, optional): Device to place the model on. Defaults to None.
            dtype (torch.dtype, optional): Data type of the model. Defaults to None.
        """
        super().__init__()
        DynamicMoELayer._layer_count += 1
        # Store the configuration object
        self.cfg = cfg
        # Number of top experts to select
        self.top_k = getattr(cfg, 'moe_top_k', 2)
        # Number of experts in the MoE layer
        self.num_experts = getattr(cfg, 'moe_num_experts', 8)
        # Initialize the router
        self.router = ExpertChoiceRouter(
            cfg.hidden_size, 
            self.num_experts, 
            capacity_factor=getattr(cfg, 'moe_capacity_factor', 1.25),
            top_k=self.top_k
        )
        # Initialize expert modules
        self.experts = nn.ModuleList([
            nn.Sequential(
                nn.Linear(cfg.hidden_size, cfg.intermediate_size, bias=False, device=device, dtype=dtype),
                nn.SiLU(),
                nn.Linear(cfg.intermediate_size, cfg.hidden_size, bias=False, device=device, dtype=dtype)
            ) for _ in range(self.num_experts)
        ])
        # Initialize weights for each expert
        for expert in self.experts:
            expert.apply(moe_init_weights)
        
        # Expert device management
        # Maximum number of experts on GPU
        self.max_gpu_experts = getattr(cfg, 'max_gpu_experts', 4)
        # Ordered dictionary to record active experts and their last used step
        self._active_experts = OrderedDict()  # expert_id: last_used_step
        # Current step count
        self._step = 0
        
        # Print initialization information for the first layer
        if DynamicMoELayer._layer_count == 1:
            print(f"✅	DynamicMoELayer: {self.num_experts} experts, top-{self.top_k} routing, capacity_factor={self.router.capacity_factor}")
    
    # Move an expert module to GPU
    def _move_expert_to_gpu(self, expert_id):
        """
        Move the specified expert to GPU and manage the active expert list.

        Args:
            expert_id (int): Index of the expert to move.
        """
        expert = self.experts[expert_id]
        # If the expert is not on GPU, move it to GPU
        if next(expert.parameters()).device.type != 'cuda':
            expert.to('cuda')
        # Record the last used step of the expert
        self._active_experts[expert_id] = self._step
        
        # If the number of active experts exceeds the limit, move the least recently used expert to CPU
        if len(self._active_experts) > self.max_gpu_experts:
            lru_expert_id, _ = self._active_experts.popitem(last=False)
            self._move_expert_to_cpu(lru_expert_id)
    
    # Move an expert module to CPU
    def _move_expert_to_cpu(self, expert_id):
        """
        Move the specified expert to CPU.

        Args:
            expert_id (int): Index of the expert to move.
        """
        expert = self.experts[expert_id]
        # If the expert is not on CPU, move it to CPU
        if next(expert.parameters()).device.type != 'cpu':
            expert.to('cpu')
    
    def forward(self, x):
        """
        Forward pass of the DynamicMoELayer.

        Args:
            x (torch.Tensor): Input tensor with shape (batch_size, seq_len, hidden_size).

        Returns:
            tuple: 
                - outputs (torch.Tensor): Output tensor with shape (batch_size, seq_len, hidden_size).
                - load_balancing_loss (torch.Tensor): Loss for load balancing among experts.
        """
        batch_size, seq_len, hidden = x.shape
        # Flatten the input tensor
        x_flat = x.view(-1, hidden)
        
        # Validate input dimensions
        if batch_size <= 0 or seq_len <= 0 or hidden <= 0:
            print(f"🟧\tWarning: Invalid input dimensions batch={batch_size}, seq={seq_len}, hidden={hidden}")
            return x, torch.tensor(0.0, device=x.device)
        
        total_tokens = batch_size * seq_len
        if total_tokens == 0:
            print("🟧\tWarning: Empty input tensor")
            return x, torch.tensor(0.0, device=x.device)
        
        expert_indices, dispatch_mask, load_balancing_loss = self.router(x_flat)
        
        # Validate routing output
        if torch.isnan(dispatch_mask).any() or torch.isinf(dispatch_mask).any():
            print("🟧\tWarning: dispatch_mask contains NaN/Inf values, using zero mask")
            dispatch_mask = torch.zeros_like(dispatch_mask)
        
        # Dynamic routing calculation
        outputs = torch.zeros_like(x_flat)
        
        # Expert device management
        # If the number of experts is greater than 8 and the input is on GPU, manage expert devices
        if self.num_experts > 8 and x.device.type == 'cuda':
            needed_experts = set()
            for expert_id in range(self.num_experts):
                # Collect the IDs of experts needed currently
                if expert_id < expert_indices.shape[0] and expert_indices[expert_id].numel() > 0:
                    needed_experts.add(expert_id)
            for expert_id in needed_experts:
                self._move_expert_to_gpu(expert_id)
        
        # Expert calculation with enhanced bounds checking
        for expert_id, expert in enumerate(self.experts):
            if expert_id < expert_indices.shape[0]:
                valid_indices = expert_indices[expert_id]
                
                # Ensure indices are within valid range
                valid_indices = torch.clamp(valid_indices, 0, x_flat.shape[0] - 1)
                
                # Validate index uniqueness
                valid_indices = torch.unique(valid_indices)
                
                if valid_indices.numel() > 0:
                    tokens = x_flat[valid_indices]
                    
                    # Validate token count
                    if tokens.shape[0] > x_flat.shape[0]:
                        print(f"🟧\tWarning: Abnormal token count {tokens.shape[0]} > {x_flat.shape[0]}")
                        tokens = tokens[:x_flat.shape[0]]
                    
                    try:
                        # Check expert device status
                        expert_device = next(expert.parameters()).device
                        if tokens.device != expert_device:
                            tokens = tokens.to(expert_device)
                            print(f"🟧\tExpert {expert_id} device mismatch, migrated: {tokens.device} -> {expert_device}")
                        
                        # Ensure deterministic computation in gradient checkpointing mode
                        with torch.enable_grad():
                            expert_out = expert(tokens)
                        
                        # Validate output shape
                        if expert_out.shape != tokens.shape:
                            print(f"🟧\tWarning: Expert {expert_id} output shape mismatch {expert_out.shape} != {tokens.shape}")
                            expert_out = expert_out.view(tokens.shape)
                        
                        # Ensure output indices are within valid range
                        valid_indices = torch.clamp(valid_indices, 0, outputs.shape[0] - 1)
                        
                        # Weighted aggregation
                        expert_weights = dispatch_mask[valid_indices, expert_id].unsqueeze(-1)
                        outputs[valid_indices] += expert_out * expert_weights
                        
                    except Exception as e:
                        if "_StopRecomputationError" in str(type(e)):
                            # Gradient checkpointing related error, try simplified processing
                            print(f"🟧\tExpert {expert_id} gradient checkpointing error, using simplified path")
                            # Use the original input as fallback
                            expert_out = tokens
                            expert_weights = dispatch_mask[valid_indices, expert_id].unsqueeze(-1)
                            outputs[valid_indices] += expert_out * expert_weights
                        else:
                            print(f"🟧\tExpert {expert_id} processing failed: {type(e).__name__}: {e}")
                            print(f"🟧\tExpert {expert_id} device: {next(expert.parameters()).device}")
                            print(f"🟧\tInput tensor shape: {tokens.shape}")
                            print(f"🟧\tInput tensor device: {tokens.device}")
                        continue
        
        # Validate final output
        if torch.isnan(outputs).any() or torch.isinf(outputs).any():
            print("🟧\tWarning: Output contains NaN/Inf values, using original input")
            outputs = x_flat
        
        self._step += 1
        return outputs.view(batch_size, seq_len, hidden), load_balancing_loss