"""
PyTorch 练习示例
包含各种练习题目和解决方案
"""

from pytorch_practice_tools import *
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt

def exercise_1_linear_regression():
    """练习1：线性回归从零实现"""
    print("=" * 60)
    print("练习1：线性回归从零实现")
    print("=" * 60)
    
    # 生成回归数据
    X, y = DataGenerator.generate_regression_data(n_samples=100, n_features=1, noise=0.1)
    
    # 转换为张量
    X_tensor = torch.FloatTensor(X)
    y_tensor = torch.FloatTensor(y)
    
    # 初始化参数
    w = torch.tensor(0.0, requires_grad=True)
    b = torch.tensor(0.0, requires_grad=True)
    
    # 训练参数
    learning_rate = 0.01
    num_epochs = 100
    
    # 训练循环
    losses = []
    for epoch in range(num_epochs):
        # 前向传播
        y_pred = w * X_tensor.squeeze() + b
        loss = torch.mean((y_pred - y_tensor) ** 2)
        
        # 反向传播
        loss.backward()
        
        # 更新参数
        with torch.no_grad():
            w -= learning_rate * w.grad
            b -= learning_rate * b.grad
            w.grad.zero_()
            b.grad.zero_()
        
        losses.append(loss.item())
        
        if epoch % 20 == 0:
            print(f"Epoch {epoch}, Loss: {loss.item():.4f}, w: {w.item():.4f}, b: {b.item():.4f}")
    
    # 可视化结果
    plt.figure(figsize=(12, 4))
    
    plt.subplot(1, 2, 1)
    plt.plot(losses)
    plt.title('训练损失')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.grid(True)
    
    plt.subplot(1, 2, 2)
    plt.scatter(X, y, alpha=0.6, label='数据点')
    plt.plot(X, w.item() * X.squeeze() + b.item(), 'r-', 
             label=f'拟合直线: y = {w.item():.2f}x + {b.item():.2f}')
    plt.title('线性回归结果')
    plt.xlabel('x')
    plt.ylabel('y')
    plt.legend()
    plt.grid(True)
    
    plt.tight_layout()
    plt.show()
    
    print(f"\n最终参数: w = {w.item():.4f}, b = {b.item():.4f}")

def exercise_2_model_comparison():
    """练习2：模型比较"""
    print("=" * 60)
    print("练习2：模型比较")
    print("=" * 60)
    
    # 生成数据
    X, y = DataGenerator.generate_classification_data(n_samples=1000, n_features=2)
    
    # 准备数据
    train_loader, test_loader, X_train, X_test, y_train, y_test = prepare_data(X, y)
    
    # 创建比较器
    comparator = ModelComparator()
    
    # 测试不同模型
    models_to_test = [
        ("简单逻辑回归", SimpleLogisticRegression, {'input_size': 2, 'num_classes': 2}),
        ("浅层MLP", MLP, {'input_size': 2, 'hidden_sizes': [10], 'num_classes': 2}),
        ("中等MLP", MLP, {'input_size': 2, 'hidden_sizes': [20, 10], 'num_classes': 2}),
        ("深度MLP", DeepMLP, {'input_size': 2, 'num_classes': 2})
    ]
    
    for name, model_class, params in models_to_test:
        print(f"\n训练 {name}...")
        
        # 创建模型
        model = model_class(**params)
        criterion = nn.CrossEntropyLoss()
        optimizer = optim.Adam(model.parameters(), lr=0.001)
        
        # 训练
        trainer = ModelTrainer(model, criterion, optimizer)
        train_time = trainer.train(train_loader, test_loader, num_epochs=50, verbose=False)
        
        # 评估
        test_loss, test_accuracy, _, _ = trainer.evaluate(test_loader)
        
        # 添加到比较器
        comparator.add_model(name, model, train_time, test_accuracy, test_loss)
    
    # 比较结果
    comparator.compare_models()
    comparator.plot_comparison()

def exercise_3_hyperparameter_tuning():
    """练习3：超参数调优"""
    print("=" * 60)
    print("练习3：超参数调优")
    print("=" * 60)
    
    # 生成数据
    X, y = DataGenerator.generate_classification_data(n_samples=1000, n_features=2)
    
    # 准备数据
    train_loader, test_loader, X_train, X_test, y_train, y_test = prepare_data(X, y)
    
    # 超参数网格
    learning_rates = [0.001, 0.01, 0.1]
    hidden_sizes = [[10], [20], [10, 5], [20, 10]]
    dropouts = [0.1, 0.2, 0.3]
    
    best_accuracy = 0
    best_params = None
    best_model = None
    
    results = []
    
    for lr in learning_rates:
        for hidden in hidden_sizes:
            for dropout in dropouts:
                print(f"\n测试参数: LR={lr}, Hidden={hidden}, Dropout={dropout}")
                
                # 创建模型
                model = MLP(input_size=2, hidden_sizes=hidden, num_classes=2, dropout=dropout)
                criterion = nn.CrossEntropyLoss()
                optimizer = optim.Adam(model.parameters(), lr=lr)
                
                # 训练
                trainer = ModelTrainer(model, criterion, optimizer)
                train_time = trainer.train(train_loader, test_loader, num_epochs=30, verbose=False)
                
                # 评估
                test_loss, test_accuracy, _, _ = trainer.evaluate(test_loader)
                
                results.append({
                    'lr': lr,
                    'hidden': hidden,
                    'dropout': dropout,
                    'accuracy': test_accuracy,
                    'loss': test_loss,
                    'time': train_time
                })
                
                if test_accuracy > best_accuracy:
                    best_accuracy = test_accuracy
                    best_params = {'lr': lr, 'hidden': hidden, 'dropout': dropout}
                    best_model = model
                
                print(f"准确率: {test_accuracy:.2f}%")
    
    # 显示最佳结果
    print(f"\n最佳参数: {best_params}")
    print(f"最佳准确率: {best_accuracy:.2f}%")
    
    # 可视化结果
    plt.figure(figsize=(15, 5))
    
    # 学习率 vs 准确率
    plt.subplot(1, 3, 1)
    lr_acc = {}
    for r in results:
        lr = r['lr']
        if lr not in lr_acc:
            lr_acc[lr] = []
        lr_acc[lr].append(r['accuracy'])
    
    lrs = list(lr_acc.keys())
    accs = [np.mean(lr_acc[lr]) for lr in lrs]
    plt.bar([str(lr) for lr in lrs], accs)
    plt.title('学习率 vs 平均准确率')
    plt.xlabel('学习率')
    plt.ylabel('准确率 (%)')
    
    # Dropout vs 准确率
    plt.subplot(1, 3, 2)
    do_acc = {}
    for r in results:
        do = r['dropout']
        if do not in do_acc:
            do_acc[do] = []
        do_acc[do].append(r['accuracy'])
    
    dos = list(do_acc.keys())
    accs = [np.mean(do_acc[do]) for do in dos]
    plt.bar([str(do) for do in dos], accs)
    plt.title('Dropout vs 平均准确率')
    plt.xlabel('Dropout')
    plt.ylabel('准确率 (%)')
    
    # 训练时间 vs 准确率
    plt.subplot(1, 3, 3)
    times = [r['time'] for r in results]
    accs = [r['accuracy'] for r in results]
    plt.scatter(times, accs, alpha=0.6)
    plt.title('训练时间 vs 准确率')
    plt.xlabel('训练时间 (秒)')
    plt.ylabel('准确率 (%)')
    
    plt.tight_layout()
    plt.show()

def exercise_4_different_datasets():
    """练习4：不同数据集上的表现"""
    print("=" * 60)
    print("练习4：不同数据集上的表现")
    print("=" * 60)
    
    # 生成不同难度的数据集
    datasets = [
        ("简单线性", DataGenerator.generate_classification_data(n_samples=500, noise=0.1)),
        ("中等难度", DataGenerator.generate_classification_data(n_samples=500, noise=0.3)),
        ("圆形数据", DataGenerator.generate_circle_data(n_samples=500, noise=0.1)),
        ("复杂数据", DataGenerator.generate_classification_data(n_samples=500, n_features=4, noise=0.2))
    ]
    
    model_results = {}
    
    for dataset_name, (X, y) in datasets:
        print(f"\n测试数据集: {dataset_name}")
        
        # 准备数据
        train_loader, test_loader, X_train, X_test, y_train, y_test = prepare_data(X, y)
        
        # 测试不同模型
        models = [
            ("逻辑回归", SimpleLogisticRegression, {'input_size': X.shape[1], 'num_classes': 2}),
            ("浅层MLP", MLP, {'input_size': X.shape[1], 'hidden_sizes': [10], 'num_classes': 2}),
            ("深度MLP", MLP, {'input_size': X.shape[1], 'hidden_sizes': [20, 10, 5], 'num_classes': 2})
        ]
        
        dataset_results = {}
        
        for model_name, model_class, params in models:
            # 创建模型
            model = model_class(**params)
            criterion = nn.CrossEntropyLoss()
            optimizer = optim.Adam(model.parameters(), lr=0.001)
            
            # 训练
            trainer = ModelTrainer(model, criterion, optimizer)
            train_time = trainer.train(train_loader, test_loader, num_epochs=50, verbose=False)
            
            # 评估
            test_loss, test_accuracy, _, _ = trainer.evaluate(test_loader)
            
            dataset_results[model_name] = {
                'accuracy': test_accuracy,
                'loss': test_loss,
                'time': train_time
            }
            
            print(f"  {model_name}: {test_accuracy:.2f}%")
        
        model_results[dataset_name] = dataset_results
    
    # 可视化结果
    plt.figure(figsize=(12, 8))
    
    # 准确率比较
    plt.subplot(2, 2, 1)
    model_names = list(model_results[list(model_results.keys())[0]].keys())
    x = np.arange(len(model_names))
    width = 0.2
    
    for i, dataset_name in enumerate(model_results.keys()):
        accuracies = [model_results[dataset_name][model]['accuracy'] for model in model_names]
        plt.bar(x + i * width, accuracies, width, label=dataset_name)
    
    plt.title('不同数据集上的模型准确率')
    plt.xlabel('模型')
    plt.ylabel('准确率 (%)')
    plt.xticks(x + width * 1.5, model_names)
    plt.legend()
    plt.grid(True, alpha=0.3)
    
    # 训练时间比较
    plt.subplot(2, 2, 2)
    for i, dataset_name in enumerate(model_results.keys()):
        times = [model_results[dataset_name][model]['time'] for model in model_names]
        plt.bar(x + i * width, times, width, label=dataset_name)
    
    plt.title('不同数据集上的训练时间')
    plt.xlabel('模型')
    plt.ylabel('训练时间 (秒)')
    plt.xticks(x + width * 1.5, model_names)
    plt.legend()
    plt.grid(True, alpha=0.3)
    
    # 如果数据是2维的，显示决策边界
    if len(datasets) >= 2:
        plt.subplot(2, 2, 3)
        X, y = datasets[0][1]  # 简单线性数据
        if X.shape[1] == 2:
            # 重新训练一个模型用于可视化
            train_loader, test_loader, X_train, X_test, y_train, y_test = prepare_data(X, y)
            model = MLP(input_size=2, hidden_sizes=[10, 5], num_classes=2)
            criterion = nn.CrossEntropyLoss()
            optimizer = optim.Adam(model.parameters(), lr=0.001)
            trainer = ModelTrainer(model, criterion, optimizer)
            trainer.train(train_loader, test_loader, num_epochs=50, verbose=False)
            
            visualize_decision_boundary(model, X, y, "简单线性数据决策边界")
    
    plt.tight_layout()
    plt.show()

def exercise_5_regularization():
    """练习5：正则化技术"""
    print("=" * 60)
    print("练习5：正则化技术")
    print("=" * 60)
    
    # 生成容易过拟合的数据（小数据集）
    X, y = DataGenerator.generate_classification_data(n_samples=200, n_features=2, noise=0.1)
    
    # 准备数据
    train_loader, test_loader, X_train, X_test, y_train, y_test = prepare_data(X, y)
    
    # 测试不同的正则化技术
    regularization_methods = [
        ("无正则化", {'weight_decay': 0, 'dropout': 0}),
        ("L2正则化", {'weight_decay': 0.01, 'dropout': 0}),
        ("Dropout", {'weight_decay': 0, 'dropout': 0.3}),
        ("L2 + Dropout", {'weight_decay': 0.01, 'dropout': 0.3})
    ]
    
    results = []
    
    for method_name, params in regularization_methods:
        print(f"\n测试: {method_name}")
        
        # 创建模型
        model = MLP(input_size=2, hidden_sizes=[50, 30, 20], 
                   num_classes=2, dropout=params['dropout'])
        criterion = nn.CrossEntropyLoss()
        optimizer = optim.Adam(model.parameters(), lr=0.001, 
                              weight_decay=params['weight_decay'])
        
        # 训练
        trainer = ModelTrainer(model, criterion, optimizer)
        train_time = trainer.train(train_loader, test_loader, num_epochs=100, verbose=False)
        
        # 评估
        train_loss, train_acc, _, _ = trainer.evaluate(train_loader)
        test_loss, test_acc, _, _ = trainer.evaluate(test_loader)
        
        results.append({
            'method': method_name,
            'train_acc': train_acc,
            'test_acc': test_acc,
            'train_loss': train_loss,
            'test_loss': test_loss,
            'trainer': trainer
        })
        
        print(f"  训练准确率: {train_acc:.2f}%")
        print(f"  测试准确率: {test_acc:.2f}%")
        print(f"  过拟合程度: {train_acc - test_acc:.2f}%")
    
    # 可视化结果
    plt.figure(figsize=(15, 10))
    
    # 准确率比较
    plt.subplot(2, 3, 1)
    methods = [r['method'] for r in results]
    train_accs = [r['train_acc'] for r in results]
    test_accs = [r['test_acc'] for r in results]
    
    x = np.arange(len(methods))
    width = 0.35
    
    plt.bar(x - width/2, train_accs, width, label='训练准确率', alpha=0.8)
    plt.bar(x + width/2, test_accs, width, label='测试准确率', alpha=0.8)
    
    plt.title('正则化方法比较 - 准确率')
    plt.xlabel('正则化方法')
    plt.ylabel('准确率 (%)')
    plt.xticks(x, methods, rotation=45)
    plt.legend()
    plt.grid(True, alpha=0.3)
    
    # 过拟合程度
    plt.subplot(2, 3, 2)
    overfitting = [r['train_acc'] - r['test_acc'] for r in results]
    plt.bar(methods, overfitting, color='red', alpha=0.7)
    plt.title('过拟合程度')
    plt.xlabel('正则化方法')
    plt.ylabel('过拟合程度 (%)')
    plt.xticks(rotation=45)
    plt.grid(True, alpha=0.3)
    
    # 训练曲线
    for i, result in enumerate(results):
        plt.subplot(2, 3, 3 + i)
        trainer = result['trainer']
        plt.plot(trainer.train_losses, label='训练损失')
        plt.plot(trainer.val_losses, label='验证损失')
        plt.title(f'{result["method"]} - 损失曲线')
        plt.xlabel('Epoch')
        plt.ylabel('Loss')
        plt.legend()
        plt.grid(True, alpha=0.3)
    
    plt.tight_layout()
    plt.show()

def run_all_exercises():
    """运行所有练习"""
    print("PyTorch 练习示例")
    print("=" * 60)
    
    # 设置随机种子
    set_random_seed(42)
    
    # 运行练习
    exercises = [
        ("线性回归从零实现", exercise_1_linear_regression),
        ("模型比较", exercise_2_model_comparison),
        ("超参数调优", exercise_3_hyperparameter_tuning),
        ("不同数据集表现", exercise_4_different_datasets),
        ("正则化技术", exercise_5_regularization)
    ]
    
    for i, (name, func) in enumerate(exercises, 1):
        print(f"\n{'='*20} 练习 {i}: {name} {'='*20}")
        try:
            func()
        except Exception as e:
            print(f"练习 {i} 出错: {e}")
        
        input("\n按回车键继续下一个练习...")
    
    print("\n所有练习完成！")

if __name__ == "__main__":
    # 可以选择运行单个练习或所有练习
    print("选择要运行的练习:")
    print("1. 线性回归从零实现")
    print("2. 模型比较")
    print("3. 超参数调优")
    print("4. 不同数据集表现")
    print("5. 正则化技术")
    print("6. 运行所有练习")
    
    choice = input("\n请输入选择 (1-6): ").strip()
    
    if choice == "1":
        exercise_1_linear_regression()
    elif choice == "2":
        exercise_2_model_comparison()
    elif choice == "3":
        exercise_3_hyperparameter_tuning()
    elif choice == "4":
        exercise_4_different_datasets()
    elif choice == "5":
        exercise_5_regularization()
    elif choice == "6":
        run_all_exercises()
    else:
        print("无效选择，运行所有练习...")
        run_all_exercises()
