#!/usr/bin/env python3

# Copyright © 2025 Wenze Wei
#
# This file is part of Pisces L1.
#
# Licensed under the Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0).
# You may not use this file except in compliance with the License.
# Commercial use is strictly prohibited.
# You may obtain a copy of the License at
#
#     https://creativecommons.org/licenses/by-nc/4.0/
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import time
import torch
import torch.nn as nn
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

from model.moe import MoELayer
from model.moe_adaptive import AdaptiveMoELayer
import argparse
import json
from dataclasses import dataclass

@dataclass
class BenchmarkConfig:
    """Configuration for MoE benchmark."""
    hidden_size: int = 512
    intermediate_size: int = 2048
    batch_size: int = 32
    seq_len: int = 128
    num_experts: int = 64
    num_layers: int = 4
    warmup_steps: int = 10
    test_steps: int = 100
    device: str = 'cuda' if torch.cuda.is_available() else 'cpu'

class MockConfig:
    """Mock configuration for testing."""
    def __init__(self, hidden_size=512, intermediate_size=2048, **kwargs):
        self.hidden_size = hidden_size
        self.intermediate_size = intermediate_size
        for key, value in kwargs.items():
            setattr(self, key, value)

def benchmark_model(model, input_tensor, num_steps, warmup_steps=10):
    """Benchmark a model's performance."""
    model.eval()
    
    # Warmup
    with torch.no_grad():
        for _ in range(warmup_steps):
            _ = model(input_tensor)
    
    # Synchronize before timing
    if torch.cuda.is_available():
        torch.cuda.synchronize()
    
    # Time forward pass
    start_time = time.time()
    total_flops = 0
    
    with torch.no_grad():
        for _ in range(num_steps):
            output, aux_loss = model(input_tensor)
            total_flops += input_tensor.numel() * model.num_experts  # Approximate FLOPs
    
    if torch.cuda.is_available():
        torch.cuda.synchronize()
    
    end_time = time.time()
    avg_time = (end_time - start_time) / num_steps
    throughput = input_tensor.size(0) * num_steps / (end_time - start_time)
    
    return {
        'avg_time': avg_time,
        'throughput': throughput,
        'total_flops': total_flops,
        'aux_loss': aux_loss.item() if isinstance(aux_loss, torch.Tensor) else aux_loss
    }

def memory_usage():
    """Get current GPU memory usage."""
    if torch.cuda.is_available():
        return {
            'allocated': torch.cuda.memory_allocated() / 1024**2,  # MB
            'cached': torch.cuda.memory_reserved() / 1024**2,  # MB
        }
    return {'allocated': 0, 'cached': 0}

def test_expert_efficiency():
    """Test expert activation efficiency."""
    config = BenchmarkConfig()
    mock_config = MockConfig(
        hidden_size=config.hidden_size,
        intermediate_size=config.intermediate_size,
        moe_num_experts=config.num_experts,
        moe_min_experts=1,
        moe_max_experts=128,
        moe_sparsity_threshold=0.05
    )
    
    device = torch.device(config.device)
    
    # Initialize models
    static_moe = MoELayer(mock_config, device=device)
    adaptive_moe = AdaptiveMoELayer(mock_config, device=device)
    
    # Test input
    input_tensor = torch.randn(config.batch_size, config.seq_len, config.hidden_size, device=device)
    
    print("🔍 Testing Expert Activation Efficiency...")
    
    # Static MoE
    static_results = benchmark_model(static_moe, input_tensor, config.test_steps)
    
    # Adaptive MoE
    adaptive_results = benchmark_model(adaptive_moe, input_tensor, config.test_steps)
    
    # Get adaptive statistics
    stats = adaptive_moe.get_statistics()
    
    efficiency_report = {
        'static_moe': {
            'avg_time': static_results['avg_time'],
            'throughput': static_results['throughput'],
            'aux_loss': static_results['aux_loss'],
            'activated_experts': 2,  # Fixed for static MoE
        },
        'adaptive_moe': {
            'avg_time': adaptive_results['avg_time'],
            'throughput': adaptive_results['throughput'],
            'aux_loss': adaptive_results['aux_loss'],
            'activated_experts': stats['avg_dynamic_k'],
            'expert_entropy': stats['expert_usage_entropy'],
        },
        'improvement': {
            'speedup': static_results['avg_time'] / adaptive_results['avg_time'],
            'throughput_gain': (adaptive_results['throughput'] - static_results['throughput']) / static_results['throughput'],
            'efficiency_gain': 2 / stats['avg_dynamic_k'] if stats['avg_dynamic_k'] > 0 else 0
        }
    }
    
    return efficiency_report

def test_training_convergence():
    """Test training convergence speed."""
    config = BenchmarkConfig()
    mock_config = MockConfig(
        hidden_size=config.hidden_size,
        intermediate_size=config.intermediate_size,
        moe_num_experts=32,
        moe_min_experts=1,
        moe_max_experts=64,
        moe_sparsity_threshold=0.1
    )
    
    device = torch.device(config.device)
    
    # Initialize models
    static_moe = MoELayer(mock_config, device=device)
    adaptive_moe = AdaptiveMoELayer(mock_config, device=device)
    
    # Optimizers
    static_optimizer = torch.optim.AdamW(static_moe.parameters(), lr=1e-4)
    adaptive_optimizer = torch.optim.AdamW(adaptive_moe.parameters(), lr=1e-4)
    
    # Loss function
    criterion = nn.MSELoss()
    
    # Training data
    target = torch.randn(config.batch_size, config.seq_len, config.hidden_size, device=device)
    
    print("🎯 Testing Training Convergence...")
    
    static_losses = []
    adaptive_losses = []
    
    for step in range(50):  # Test 50 steps
        # Static MoE
        static_optimizer.zero_grad()
        static_output, static_aux_loss = static_moe(torch.randn_like(target))
        static_loss = criterion(static_output, target) + 0.01 * static_aux_loss
        static_loss.backward()
        static_optimizer.step()
        static_losses.append(static_loss.item())
        
        # Adaptive MoE
        adaptive_optimizer.zero_grad()
        adaptive_output, adaptive_aux_loss = adaptive_moe(torch.randn_like(target))
        adaptive_loss = criterion(adaptive_output, target) + 0.01 * adaptive_aux_loss
        adaptive_loss.backward()
        adaptive_optimizer.step()
        adaptive_losses.append(adaptive_loss.item())
    
    # Calculate convergence rate
    static_improvement = (static_losses[0] - static_losses[-1]) / static_losses[0]
    adaptive_improvement = (adaptive_losses[0] - adaptive_losses[-1]) / adaptive_losses[0]
    
    convergence_report = {
        'static_moe': {
            'initial_loss': static_losses[0],
            'final_loss': static_losses[-1],
            'improvement': static_improvement,
        },
        'adaptive_moe': {
            'initial_loss': adaptive_losses[0],
            'final_loss': adaptive_losses[-1],
            'improvement': adaptive_improvement,
        },
        'convergence_speedup': adaptive_improvement / static_improvement if static_improvement > 0 else 1.0
    }
    
    return convergence_report

def run_comprehensive_benchmark():
    """Run comprehensive benchmark suite."""
    print("🚀 Pisces L1 Adaptive MoE Benchmark Suite")
    print("=" * 50)
    
    # Memory info
    if torch.cuda.is_available():
        print(f"📊 GPU Memory: {memory_usage()}")
    
    # Expert efficiency test
    efficiency_report = test_expert_efficiency()
    
    print("\n📈 Efficiency Results:")
    print(f"Speedup: {efficiency_report['improvement']['speedup']:.2f}x")
    print(f"Throughput Gain: {efficiency_report['improvement']['throughput_gain']:.1%}")
    print(f"Expert Efficiency: {efficiency_report['improvement']['efficiency_gain']:.2f}x")
    print(f"Avg Activated Experts: {efficiency_report['adaptive_moe']['activated_experts']:.1f}")
    
    # Training convergence test
    convergence_report = test_training_convergence()
    
    print("\n🎯 Convergence Results:")
    print(f"Training Speedup: {convergence_report['convergence_speedup']:.2f}x")
    
    # Summary
    summary = {
        'efficiency': efficiency_report,
        'convergence': convergence_report,
        'memory_usage': memory_usage(),
        'device': 'cuda' if torch.cuda.is_available() else 'cpu'
    }
    
    # Save results
    with open('adaptive_moe_benchmark.json', 'w') as f:
        json.dump(summary, f, indent=2)
    
    print("\n✅ Benchmark complete! Results saved to adaptive_moe_benchmark.json")
    return summary

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='Benchmark Adaptive MoE')
    parser.add_argument('--device', type=str, default='auto', help='Device to use')
    parser.add_argument('--batch_size', type=int, default=32, help='Batch size')
    parser.add_argument('--seq_len', type=int, default=128, help='Sequence length')
    
    args = parser.parse_args()
    
    if args.device != 'auto':
        torch.device(args.device)
    
    run_comprehensive_benchmark()