"""
Multi-Task Learning Framework Demo
多任务学习框架演示

这个示例展示了如何使用自定义的多任务学习框架进行训练和评估。
"""

import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))

import numpy as np
from ml_lib.mtl.config import (
    MTLConfig, TaskConfig, ArchitectureConfig, WeightingConfig, TrainingConfig,
    create_classification_task, create_regression_task, create_default_config
)
from ml_lib.mtl.trainer import MTLTrainer
from ml_lib.mtl.utils import MTLDataset, create_synthetic_mtl_data
from ml_lib.core import Tensor


def create_sample_config():
    """创建示例配置"""
    # 创建任务配置
    task1 = create_classification_task("sentiment", num_classes=3, weight=1.0)
    # task2 = create_regression_task("rating", weight=1.0)
    # task3 = create_classification_task("category", num_classes=5, weight=0.8)
    
    # 创建MTL配置
    config = create_default_config([task1])
    # config = create_default_config([task1, task2, task3])
    
    # 设置输入维度
    config.input_dim = 50
    
    # 配置架构
    arch_config = ArchitectureConfig(
        name='HPS',
        shared_layers=[256, 128],
        task_specific_layers=[64, 32]
    )
    config.set_architecture(arch_config)
    
    # 配置权重策略
    weight_config = WeightingConfig(name='EW')  # 等权重策略
    config.set_weighting(weight_config)
    
    # 配置训练参数
    train_config = TrainingConfig(
        batch_size=32,
        num_epochs=50,
        learning_rate=0.001,
        optimizer='adam',
        early_stopping=True,
        patience=10
    )
    config.set_training(train_config)
    
    return config


def create_sample_data(config: MTLConfig, num_samples: int = 1000):
    """创建示例数据"""
    # 准备任务配置
    task_configs = {}
    for task_name, task_config in config.tasks.items():
        task_configs[task_name] = {
            'task_type': task_config.task_type,
            'num_classes': task_config.num_classes
        }
    
    # 生成合成数据
    data, targets = create_synthetic_mtl_data(
        num_samples=num_samples,
        input_dim=config.input_dim,
        task_configs=task_configs,
        noise_level=0.1
    )
    
    return data, targets


def split_data(data, targets, train_ratio=0.7, val_ratio=0.15, test_ratio=0.15):
    """分割数据集"""
    total_samples = len(list(data.values())[0])
    
    # 计算分割点
    train_end = int(total_samples * train_ratio)
    val_end = train_end + int(total_samples * val_ratio)
    
    # 生成随机索引
    indices = np.random.permutation(total_samples)
    
    train_indices = indices[:train_end]
    val_indices = indices[train_end:val_end]
    test_indices = indices[val_end:]
    
    # 分割数据
    def split_dict(d, idx):
        return {key: arr[idx] for key, arr in d.items()}
    
    train_data = split_dict(data, train_indices)
    train_targets = split_dict(targets, train_indices)
    
    val_data = split_dict(data, val_indices)
    val_targets = split_dict(targets, val_indices)
    
    test_data = split_dict(data, test_indices)
    test_targets = split_dict(targets, test_indices)
    
    return (train_data, train_targets), (val_data, val_targets), (test_data, test_targets)


def demo_equal_weighting():
    """演示等权重策略"""
    print("=" * 60)
    print("多任务学习框架演示 - 等权重策略")
    print("=" * 60)
    
    # 创建配置
    config = create_sample_config()
    print(f"任务配置: {config.get_task_names()}")
    print(f"架构: {config.architecture.name}")
    print(f"权重策略: {config.weighting.name}")
    
    # 创建数据
    print("\n生成合成数据...")
    data, targets = create_sample_data(config, num_samples=2000)
    
    # 分割数据
    (train_data, train_targets), (val_data, val_targets), (test_data, test_targets) = split_data(data, targets)
    
    print(f"训练集大小: {len(list(train_data.values())[0])}")
    print(f"验证集大小: {len(list(val_data.values())[0])}")
    print(f"测试集大小: {len(list(test_data.values())[0])}")
    
    # 创建数据集
    train_dataset = MTLDataset(train_data, train_targets, batch_size=config.training.batch_size)
    val_dataset = MTLDataset(val_data, val_targets, batch_size=config.training.batch_size, shuffle=False)
    test_dataset = MTLDataset(test_data, test_targets, batch_size=config.training.batch_size, shuffle=False)
    
    # 创建训练器
    print("\n初始化训练器...")
    trainer = MTLTrainer(config)
    
    # 训练模型
    print("\n开始训练...")
    history = trainer.train(train_dataset, val_dataset, verbose=True)
    
    # 评估模型
    print("\n评估模型...")
    test_metrics = trainer.evaluate(test_dataset)
    
    print("\n测试集结果:")
    for task_name, metrics in test_metrics.items():
        print(f"  {task_name}: {metrics}")
    
    # 显示最终权重
    final_weights = trainer.get_task_weights()
    print(f"\n最终任务权重: {final_weights}")
    
    return trainer, history, test_metrics


def demo_gradnorm_weighting():
    """演示GradNorm权重策略"""
    print("\n" + "=" * 60)
    print("多任务学习框架演示 - GradNorm权重策略")
    print("=" * 60)
    
    # 创建配置
    config = create_sample_config()
    
    # 修改权重策略为GradNorm
    weight_config = WeightingConfig(name='GradNorm', alpha=0.12)
    config.set_weighting(weight_config)
    
    # 减少训练轮数以便快速演示
    config.training.num_epochs = 30
    
    print(f"权重策略: {config.weighting.name} (alpha={config.weighting.alpha})")
    
    # 创建数据
    data, targets = create_sample_data(config, num_samples=1500)
    (train_data, train_targets), (val_data, val_targets), (test_data, test_targets) = split_data(data, targets)
    
    # 创建数据集
    train_dataset = MTLDataset(train_data, train_targets, batch_size=config.training.batch_size)
    val_dataset = MTLDataset(val_data, val_targets, batch_size=config.training.batch_size, shuffle=False)
    test_dataset = MTLDataset(test_data, test_targets, batch_size=config.training.batch_size, shuffle=False)
    
    # 创建训练器
    trainer = MTLTrainer(config)
    
    # 训练模型
    print("\n开始训练...")
    history = trainer.train(train_dataset, val_dataset, verbose=True)
    
    # 评估模型
    test_metrics = trainer.evaluate(test_dataset)
    
    print("\n测试集结果:")
    for task_name, metrics in test_metrics.items():
        print(f"  {task_name}: {metrics}")
    
    # 显示权重变化
    final_weights = trainer.get_task_weights()
    print(f"\n最终任务权重: {final_weights}")
    
    return trainer, history, test_metrics


def demo_config_save_load():
    """演示配置的保存和加载"""
    print("\n" + "=" * 60)
    print("配置保存和加载演示")
    print("=" * 60)
    
    # 创建配置
    config = create_sample_config()
    
    # 保存配置
    config_path = "mtl_config_demo.json"
    config.save(config_path)
    print(f"配置已保存到: {config_path}")
    
    # 加载配置
    loaded_config = MTLConfig.load(config_path)
    print(f"配置已从 {config_path} 加载")
    
    # 验证配置
    print(f"原始任务数量: {config.get_num_tasks()}")
    print(f"加载任务数量: {loaded_config.get_num_tasks()}")
    print(f"任务名称匹配: {config.get_task_names() == loaded_config.get_task_names()}")
    
    # 清理文件
    if os.path.exists(config_path):
        os.remove(config_path)
        print(f"已删除临时文件: {config_path}")


def demo_prediction():
    """演示模型预测"""
    print("\n" + "=" * 60)
    print("模型预测演示")
    print("=" * 60)
    
    # 创建简单配置
    config = create_sample_config()
    config.training.num_epochs = 20  # 快速训练
    
    # 创建数据
    data, targets = create_sample_data(config, num_samples=1000)
    (train_data, train_targets), _, _ = split_data(data, targets)
    
    # 训练模型
    train_dataset = MTLDataset(train_data, train_targets, batch_size=32)
    trainer = MTLTrainer(config)
    
    print("快速训练模型...")
    trainer.train(train_dataset, verbose=False)
    
    # 进行预测
    print("\n进行预测...")
    sample_input = Tensor(np.random.randn(5, config.input_dim).astype(np.float32))
    predictions = trainer.predict(sample_input)
    
    print(f"输入形状: {sample_input.shape}")
    print("预测结果:")
    for task_name, pred in predictions.items():
        print(f"  {task_name}: 形状 {pred.shape}, 样例值 {pred.data[0]}")


def create_improved_config():
    """创建改进的配置"""
    # 创建任务配置
    task1 = create_classification_task("sentiment", num_classes=3, weight=1.0)
    task2 = create_regression_task("rating", weight=0.1)  # 降低回归任务权重
    task3 = create_classification_task("category", num_classes=5, weight=1.0)
    
    # 创建MTL配置
    config = create_default_config([task1, task2, task3])
    
    # 设置输入维度
    config.input_dim = 50
    
    # 配置架构 - 使用更深的网络
    arch_config = ArchitectureConfig(
        name='HPS',
        shared_layers=[512, 256, 128],  # 更深的共享层
        task_specific_layers=[64, 32]
    )
    config.set_architecture(arch_config)
    
    # 配置权重策略
    weight_config = WeightingConfig(name='EW')
    config.set_weighting(weight_config)
    
    # 配置训练参数 - 更保守的设置
    train_config = TrainingConfig(
        batch_size=64,  # 增大批次大小
        num_epochs=100,  # 增加训练轮数
        learning_rate=0.01,  # 增大学习率
        optimizer='adam',
        early_stopping=True,
        patience=20,  # 增加patience
        grad_clip=1.0  # 添加梯度裁剪
    )
    config.set_training(train_config)
    
    return config


def create_better_synthetic_data(config: MTLConfig, num_samples: int = 1000):
    """创建更好的合成数据"""
    np.random.seed(42)
    
    # 生成更有结构的输入数据
    X = np.random.randn(num_samples, config.input_dim).astype(np.float32)
    
    # 添加一些结构化特征
    # 前20维用于sentiment和category，后30维用于rating
    sentiment_features = X[:, :20]
    category_features = X[:, 10:30]  # 有重叠
    rating_features = X[:, 20:]
    
    data = {'input': X}
    targets = {}
    
    # Sentiment分类任务 (3类)
    sentiment_weights = np.array([
        [1.5, -0.5, -1.0],  # 第一个特征对类别0有利
        [-0.5, 1.5, -1.0],  # 第二个特征对类别1有利
        [-1.0, -0.5, 1.5],  # 第三个特征对类别2有利
    ] + [[0.1 * np.random.randn() for _ in range(3)] for _ in range(17)])  # 其他特征噪声
    
    sentiment_logits = sentiment_features @ np.array(sentiment_weights)
    sentiment_logits += 0.1 * np.random.randn(*sentiment_logits.shape)  # 少量噪声
    targets['sentiment'] = np.argmax(sentiment_logits, axis=1)
    
    # Category分类任务 (5类)
    category_weights = np.random.randn(20, 5) * 0.5
    category_logits = category_features @ category_weights
    category_logits += 0.1 * np.random.randn(*category_logits.shape)
    targets['category'] = np.argmax(category_logits, axis=1)
    
    # Rating回归任务 - 缩放到合理范围
    rating_weights = np.random.randn(30, 1) * 0.1  # 小权重
    rating_values = rating_features @ rating_weights
    rating_values += 0.05 * np.random.randn(*rating_values.shape)  # 小噪声
    # 缩放到[0, 5]范围
    rating_values = 2.5 + 2.0 * np.tanh(rating_values)
    targets['rating'] = rating_values.flatten()
    
    return data, targets


def demo_improved_training():
    """演示改进的训练"""
    print("=" * 60)
    print("改进的多任务学习训练演示")
    print("=" * 60)
    
    # 创建改进的配置
    config = create_improved_config()
    print(f"任务配置: {config.get_task_names()}")
    print(f"架构: {config.architecture.name}")
    print(f"共享层: {config.architecture.shared_layers}")
    print(f"任务层: {config.architecture.task_specific_layers}")
    print(f"学习率: {config.training.learning_rate}")
    print(f"批次大小: {config.training.batch_size}")
    
    # 创建更好的数据
    print("\n生成改进的合成数据...")
    data, targets = create_better_synthetic_data(config, num_samples=3000)
    
    # 检查数据分布
    print(f"Sentiment标签分布: {np.bincount(targets['sentiment'])}")
    print(f"Category标签分布: {np.bincount(targets['category'])}")
    print(f"Rating值范围: [{targets['rating'].min():.2f}, {targets['rating'].max():.2f}]")
    print(f"Rating均值: {targets['rating'].mean():.2f}, 标准差: {targets['rating'].std():.2f}")
    
    # 分割数据
    (train_data, train_targets), (val_data, val_targets), (test_data, test_targets) = split_data(data, targets)
    
    print(f"\n训练集大小: {len(list(train_data.values())[0])}")
    print(f"验证集大小: {len(list(val_data.values())[0])}")
    print(f"测试集大小: {len(list(test_data.values())[0])}")
    
    # 创建数据集
    train_dataset = MTLDataset(train_data, train_targets, batch_size=config.training.batch_size)
    val_dataset = MTLDataset(val_data, val_targets, batch_size=config.training.batch_size, shuffle=False)
    test_dataset = MTLDataset(test_data, test_targets, batch_size=config.training.batch_size, shuffle=False)
    
    # 创建训练器
    print("\n初始化训练器...")
    trainer = MTLTrainer(config)
    
    # 训练模型
    print("\n开始训练...")
    history = trainer.train(train_dataset, val_dataset, verbose=True)
    
    # 评估模型
    print("\n评估模型...")
    test_metrics = trainer.evaluate(test_dataset)
    
    print("\n测试集结果:")
    for task_name, metrics in test_metrics.items():
        print(f"  {task_name}: {metrics}")
    
    # 显示最终权重
    final_weights = trainer.get_task_weights()
    print(f"\n最终任务权重: {final_weights}")
    
    return trainer, history, test_metrics


def create_trigonometric_data(num_samples: int = 2000):
    """创建三角函数多任务数据"""
    np.random.seed(42)
    
    # 生成输入数据：在[-2π, 2π]范围内
    x = np.random.uniform(-2*np.pi, 2*np.pi, (num_samples, 1)).astype(np.float32)
    
    # 添加一些噪声特征
    noise_features = np.random.randn(num_samples, 4).astype(np.float32) * 0.1
    X = np.concatenate([x, noise_features], axis=1)  # 5维输入
    
    data = {'input': X}
    targets = {}
    
    # 任务1: 预测sin(x)
    targets['sin_task'] = np.sin(x.flatten())
    
    # 任务2: 预测cos(x) 
    targets['cos_task'] = np.cos(x.flatten())
    
    # 任务3: 预测sin(x) * cos(x) = 0.5 * sin(2x)
    targets['sincos_task'] = (np.sin(x) * np.cos(x)).flatten()
    
    return data, targets


def create_trigonometric_config():
    """创建三角函数任务配置"""
    # 创建三个回归任务
    task1 = create_regression_task("sin_task", weight=1.0)
    task2 = create_regression_task("cos_task", weight=1.0) 
    task3 = create_regression_task("sincos_task", weight=1.0)
    
    # 创建MTL配置
    config = create_default_config([task1, task2, task3])
    
    # 设置输入维度
    config.input_dim = 5
    
    # 配置架构 - 简单的网络
    arch_config = ArchitectureConfig(
        name='HPS',
        shared_layers=[64, 32],  # 简单的共享层
        task_specific_layers=[16]  # 简单的任务特定层
    )
    config.set_architecture(arch_config)
    
    # 配置权重策略
    weight_config = WeightingConfig(name='EW')
    config.set_weighting(weight_config)
    
    # 配置训练参数
    train_config = TrainingConfig(
        batch_size=32,
        num_epochs=200,  # 更多轮次
        learning_rate=0.01,
        optimizer='adam',
        early_stopping=True,
        patience=30,
        grad_clip=1.0
    )
    config.set_training(train_config)
    
    return config


def demo_trigonometric_mtl():
    """演示三角函数多任务学习"""
    print("=" * 60)
    print("三角函数多任务学习演示")
    print("任务1: 预测sin(x)")
    print("任务2: 预测cos(x)")  
    print("任务3: 预测sin(x)*cos(x)")
    print("=" * 60)
    
    # 创建配置
    config = create_trigonometric_config()
    print(f"任务配置: {config.get_task_names()}")
    print(f"输入维度: {config.input_dim}")
    print(f"共享层: {config.architecture.shared_layers}")
    print(f"任务层: {config.architecture.task_specific_layers}")
    print(f"学习率: {config.training.learning_rate}")
    
    # 创建数据
    print("\n生成三角函数数据...")
    data, targets = create_trigonometric_data(num_samples=3000)
    
    # 检查数据分布
    print(f"Sin值范围: [{targets['sin_task'].min():.3f}, {targets['sin_task'].max():.3f}]")
    print(f"Cos值范围: [{targets['cos_task'].min():.3f}, {targets['cos_task'].max():.3f}]")
    print(f"Sin*Cos值范围: [{targets['sincos_task'].min():.3f}, {targets['sincos_task'].max():.3f}]")
    
    # 分割数据
    (train_data, train_targets), (val_data, val_targets), (test_data, test_targets) = split_data(data, targets)
    
    print(f"\n训练集大小: {len(list(train_data.values())[0])}")
    print(f"验证集大小: {len(list(val_data.values())[0])}")
    print(f"测试集大小: {len(list(test_data.values())[0])}")
    
    # 创建数据集
    train_dataset = MTLDataset(train_data, train_targets, batch_size=config.training.batch_size)
    val_dataset = MTLDataset(val_data, val_targets, batch_size=config.training.batch_size, shuffle=False)
    test_dataset = MTLDataset(test_data, test_targets, batch_size=config.training.batch_size, shuffle=False)
    
    # 创建训练器
    print("\n初始化训练器...")
    trainer = MTLTrainer(config)
    
    # 训练模型
    print("\n开始训练...")
    history = trainer.train(train_dataset, val_dataset, verbose=True)
    
    # 评估模型
    print("\n评估模型...")
    test_metrics = trainer.evaluate(test_dataset)
    
    print("\n测试集结果:")
    for task_name, metrics in test_metrics.items():
        print(f"  {task_name}: {metrics}")
    
    # 测试一些具体的值
    print("\n测试具体数值:")
    test_x = np.array([[0.0, 0, 0, 0, 0],      # x=0
                       [np.pi/2, 0, 0, 0, 0],   # x=π/2  
                       [np.pi, 0, 0, 0, 0],     # x=π
                       [-np.pi/2, 0, 0, 0, 0]], dtype=np.float32)  # x=-π/2
    
    test_input = Tensor(test_x)
    predictions = trainer.predict(test_input)
    
    expected_sin = [0.0, 1.0, 0.0, -1.0]
    expected_cos = [1.0, 0.0, -1.0, 0.0]
    expected_sincos = [0.0, 0.0, 0.0, 0.0]
    
    print("x值\t\t预测sin\t\t真实sin\t\t预测cos\t\t真实cos\t\t预测sin*cos\t真实sin*cos")
    print("-" * 90)
    
    x_values = [0, "π/2", "π", "-π/2"]
    for i in range(4):
        pred_sin = predictions['sin_task'].data[i, 0]
        pred_cos = predictions['cos_task'].data[i, 0] 
        pred_sincos = predictions['sincos_task'].data[i, 0]
        
        print(f"{x_values[i]}\t\t{pred_sin:.3f}\t\t{expected_sin[i]:.3f}\t\t"
              f"{pred_cos:.3f}\t\t{expected_cos[i]:.3f}\t\t"
              f"{pred_sincos:.3f}\t\t{expected_sincos[i]:.3f}")
    
    return trainer, history, test_metrics


def main():
    """主函数"""
    print("自定义多任务学习框架演示")
    print("基于自定义张量库实现")
    
    try:
        # 演示1: 等权重策略
        demo_equal_weighting()
        
        # # 演示2: GradNorm权重策略
        # demo_gradnorm_weighting()
        
        # # 演示3: 配置保存和加载
        # demo_config_save_load()
        
        # # 演示4: 模型预测
        # demo_prediction()
        
        # # 演示5: 改进的训练
        # demo_improved_training()
        
        # 演示6: 三角函数多任务学习
        # demo_trigonometric_mtl()
        
        print("\n" + "=" * 60)
        print("所有演示完成!")
        print("=" * 60)
        
    except Exception as e:
        print(f"演示过程中出现错误: {e}")
        import traceback
        traceback.print_exc()


if __name__ == "__main__":
    main() 