import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))

import numpy as np
from ml_lib.core import Tensor
from ml_lib.mtl.config import MTLConfig, TaskConfig, ArchitectureConfig, WeightingConfig, TrainingConfig
from ml_lib.mtl.trainer import MTLTrainer
from ml_lib.mtl.utils import MTLDataset

def debug_mtl_step_by_step():
    """逐步调试MTL框架"""
    print("=" * 60)
    print("逐步调试MTL框架")
    print("=" * 60)
    
    # 创建最简单的配置
    config = MTLConfig()
    
    # 单个回归任务
    task1 = TaskConfig(
        name='simple_task',
        task_type='regression',
        num_classes=1,
        loss_fn='mse',
        metrics=['mse']
    )
    config.add_task(task1)
    
    # 最简单的架构
    config.set_architecture(ArchitectureConfig(
        name='HPS',
        shared_layers=[2],      # 只有2个神经元
        task_specific_layers=[1] # 只有1个神经元
    ))
    
    config.set_weighting(WeightingConfig(name='EW'))
    config.set_training(TrainingConfig(
        batch_size=1,
        learning_rate=0.1,  # 大学习率
        num_epochs=10,
        optimizer='sgd',
        grad_clip=None
    ))
    
    config.input_dim = 1
    
    trainer = MTLTrainer(config)
    
    # 创建最简单的数据：y = 2x
    x_data = np.array([[1.0], [2.0]], dtype=np.float32)
    y_data = np.array([[2.0], [4.0]], dtype=np.float32)
    
    print("数据:")
    print(f"  x: {x_data.flatten()}")
    print(f"  y: {y_data.flatten()}")
    
    # 创建数据集
    dataset = MTLDataset(
        data={'input': x_data},
        targets={'simple_task': y_data.flatten()},
        batch_size=1,
        shuffle=False
    )
    
    # 检查初始状态
    print(f"\n初始模型参数:")
    for name, param in trainer.model.named_parameters():
        print(f"  {name}: {param.data}")
    
    # 手动训练几步
    print(f"\n手动训练步骤:")
    for step in range(5):
        step_losses = []
        
        # 重新创建迭代器
        dataset_iter = iter(dataset)
        
        for batch_idx, (batch_data, batch_targets) in enumerate(dataset_iter):
            print(f"\n  Step {step}, Batch {batch_idx}:")
            print(f"    输入: {batch_data['input'].data}")
            print(f"    目标: {batch_targets['simple_task'].data}")
            
            # 前向传播
            output = trainer.model(batch_data['input'])
            print(f"    预测: {output['simple_task'].data}")
            
            # 计算损失
            loss = trainer.loss_functions['simple_task'](output['simple_task'], batch_targets['simple_task'])
            print(f"    损失: {float(loss.data)}")
            
            # 训练步骤
            step_loss = trainer.train_step(batch_data, batch_targets)
            step_losses.append(step_loss['simple_task'])
            print(f"    训练后损失: {step_loss['simple_task']}")
            
            # 检查参数是否变化
            print(f"    训练后预测: {trainer.model(batch_data['input'])['simple_task'].data}")
        
        avg_loss = np.mean(step_losses)
        print(f"  Step {step} 平均损失: {avg_loss:.6f}")
        
        # 检查参数变化
        if step == 0:
            print(f"  训练后模型参数:")
            for name, param in trainer.model.named_parameters():
                print(f"    {name}: {param.data}")


def debug_evaluate_function():
    """调试evaluate函数"""
    print("\n" + "=" * 60)
    print("调试evaluate函数")
    print("=" * 60)
    
    # 使用相同的配置
    config = MTLConfig()
    
    task1 = TaskConfig(
        name='simple_task',
        task_type='regression',
        num_classes=1,
        loss_fn='mse',
        metrics=['mse']
    )
    config.add_task(task1)
    
    config.set_architecture(ArchitectureConfig(
        name='HPS',
        shared_layers=[2],
        task_specific_layers=[1]
    ))
    
    config.set_weighting(WeightingConfig(name='EW'))
    config.set_training(TrainingConfig(
        batch_size=1,
        learning_rate=0.1,
        num_epochs=10,
        optimizer='sgd',
        grad_clip=None
    ))
    
    config.input_dim = 1
    
    trainer = MTLTrainer(config)
    
    # 创建数据
    x_data = np.array([[1.0], [2.0]], dtype=np.float32)
    y_data = np.array([[2.0], [4.0]], dtype=np.float32)
    
    dataset = MTLDataset(
        data={'input': x_data},
        targets={'simple_task': y_data.flatten()},
        batch_size=1,
        shuffle=False
    )
    
    # 评估初始状态
    print("初始评估:")
    initial_metrics = trainer.evaluate(dataset)
    print(f"  指标: {initial_metrics}")
    
    # 训练一步
    print("\n训练一步...")
    for batch_data, batch_targets in dataset:
        step_loss = trainer.train_step(batch_data, batch_targets)
        print(f"  训练损失: {step_loss}")
        break
    
    # 再次评估
    print("\n训练后评估:")
    after_metrics = trainer.evaluate(dataset)
    print(f"  指标: {after_metrics}")
    
    # 比较
    print(f"\n指标变化:")
    for task_name in initial_metrics:
        for metric_name in initial_metrics[task_name]:
            initial_val = initial_metrics[task_name][metric_name]
            after_val = after_metrics[task_name][metric_name]
            change = after_val - initial_val
            print(f"  {task_name}.{metric_name}: {initial_val:.6f} -> {after_val:.6f} (变化: {change:.6f})")


if __name__ == "__main__":
    debug_mtl_step_by_step()
    debug_evaluate_function() 