"""
PyTorch 快速练习脚本
适合快速测试和学习的小例子
"""

import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
from pytorch_practice_tools import *

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

def quick_linear_regression():
    """快速线性回归示例"""
    print("快速线性回归示例")
    print("-" * 30)
    
    # 生成数据
    X = torch.linspace(0, 10, 100).reshape(-1, 1)
    y = 2 * X.squeeze() + 1 + torch.randn(100) * 0.5
    
    # 模型
    model = nn.Linear(1, 1)
    criterion = nn.MSELoss()
    optimizer = optim.SGD(model.parameters(), lr=0.01)
    
    # 训练
    losses = []
    for epoch in range(100):
        optimizer.zero_grad()
        outputs = model(X)
        loss = criterion(outputs.squeeze(), y)
        loss.backward()
        optimizer.step()
        losses.append(loss.item())
        
        if epoch % 20 == 0:
            print(f"Epoch {epoch}, Loss: {loss.item():.4f}")
    
    # 结果
    w = model.weight.item()
    b = model.bias.item()
    print(f"学习到的参数: w = {w:.2f}, b = {b:.2f}")
    print(f"真实参数: w = 2.0, b = 1.0")
    
    # 可视化
    plt.figure(figsize=(10, 4))
    
    plt.subplot(1, 2, 1)
    plt.plot(losses)
    plt.title('训练损失')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.grid(True)
    
    plt.subplot(1, 2, 2)
    plt.scatter(X.numpy(), y.numpy(), alpha=0.6, label='数据点')
    plt.plot(X.numpy(), model(X).detach().numpy(), 'r-', 
             label=f'拟合直线: y = {w:.2f}x + {b:.2f}')
    plt.title('线性回归结果')
    plt.xlabel('x')
    plt.ylabel('y')
    plt.legend()
    plt.grid(True)
    
    plt.tight_layout()
    plt.show()

def quick_classification():
    """快速分类示例"""
    print("\n快速分类示例")
    print("-" * 30)
    
    # 生成数据
    X, y = DataGenerator.generate_classification_data(n_samples=200, n_features=2)
    
    # 准备数据
    train_loader, test_loader, X_train, X_test, y_train, y_test = prepare_data(X, y, batch_size=32)
    
    # 模型
    model = MLP(input_size=2, hidden_sizes=[10, 5], num_classes=2)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=0.001)
    
    # 训练
    trainer = ModelTrainer(model, criterion, optimizer)
    train_time = trainer.train(train_loader, test_loader, num_epochs=50, verbose=True)
    
    # 评估
    test_loss, test_accuracy, predictions, targets = trainer.evaluate(test_loader)
    print(f"\n测试准确率: {test_accuracy:.2f}%")
    
    # 可视化
    trainer.plot_training_history()
    visualize_decision_boundary(model, X, y, "分类决策边界")

def quick_model_comparison():
    """快速模型比较"""
    print("\n快速模型比较")
    print("-" * 30)
    
    # 生成数据
    X, y = DataGenerator.generate_classification_data(n_samples=500, n_features=2)
    train_loader, test_loader, X_train, X_test, y_train, y_test = prepare_data(X, y)
    
    # 比较器
    comparator = ModelComparator()
    
    # 测试模型
    models = [
        ("逻辑回归", SimpleLogisticRegression, {'input_size': 2, 'num_classes': 2}),
        ("浅层MLP", MLP, {'input_size': 2, 'hidden_sizes': [10], 'num_classes': 2}),
        ("深度MLP", MLP, {'input_size': 2, 'hidden_sizes': [20, 10, 5], 'num_classes': 2})
    ]
    
    for name, model_class, params in models:
        print(f"训练 {name}...")
        
        model = model_class(**params)
        criterion = nn.CrossEntropyLoss()
        optimizer = optim.Adam(model.parameters(), lr=0.001)
        
        trainer = ModelTrainer(model, criterion, optimizer)
        train_time = trainer.train(train_loader, test_loader, num_epochs=30, verbose=False)
        
        test_loss, test_accuracy, _, _ = trainer.evaluate(test_loader)
        comparator.add_model(name, model, train_time, test_accuracy, test_loss)
    
    # 比较结果
    comparator.compare_models()
    comparator.plot_comparison()

def quick_hyperparameter_test():
    """快速超参数测试"""
    print("\n快速超参数测试")
    print("-" * 30)
    
    # 生成数据
    X, y = DataGenerator.generate_classification_data(n_samples=300, n_features=2)
    train_loader, test_loader, X_train, X_test, y_train, y_test = prepare_data(X, y)
    
    # 测试不同学习率
    learning_rates = [0.001, 0.01, 0.1]
    results = []
    
    for lr in learning_rates:
        print(f"测试学习率: {lr}")
        
        model = MLP(input_size=2, hidden_sizes=[10, 5], num_classes=2)
        criterion = nn.CrossEntropyLoss()
        optimizer = optim.Adam(model.parameters(), lr=lr)
        
        trainer = ModelTrainer(model, criterion, optimizer)
        train_time = trainer.train(train_loader, test_loader, num_epochs=30, verbose=False)
        
        test_loss, test_accuracy, _, _ = trainer.evaluate(test_loader)
        results.append({'lr': lr, 'accuracy': test_accuracy, 'time': train_time})
        
        print(f"  准确率: {test_accuracy:.2f}%")
    
    # 可视化
    plt.figure(figsize=(10, 4))
    
    plt.subplot(1, 2, 1)
    lrs = [r['lr'] for r in results]
    accs = [r['accuracy'] for r in results]
    plt.bar([str(lr) for lr in lrs], accs)
    plt.title('学习率 vs 准确率')
    plt.xlabel('学习率')
    plt.ylabel('准确率 (%)')
    plt.grid(True, alpha=0.3)
    
    plt.subplot(1, 2, 2)
    times = [r['time'] for r in results]
    plt.bar([str(lr) for lr in lrs], times)
    plt.title('学习率 vs 训练时间')
    plt.xlabel('学习率')
    plt.ylabel('训练时间 (秒)')
    plt.grid(True, alpha=0.3)
    
    plt.tight_layout()
    plt.show()

def quick_regularization_demo():
    """快速正则化演示"""
    print("\n快速正则化演示")
    print("-" * 30)
    
    # 生成小数据集（容易过拟合）
    X, y = DataGenerator.generate_classification_data(n_samples=100, n_features=2)
    train_loader, test_loader, X_train, X_test, y_train, y_test = prepare_data(X, y)
    
    # 测试不同正则化
    methods = [
        ("无正则化", {'weight_decay': 0, 'dropout': 0}),
        ("L2正则化", {'weight_decay': 0.01, 'dropout': 0}),
        ("Dropout", {'weight_decay': 0, 'dropout': 0.3}),
        ("L2 + Dropout", {'weight_decay': 0.01, 'dropout': 0.3})
    ]
    
    results = []
    
    for method_name, params in methods:
        print(f"测试: {method_name}")
        
        model = MLP(input_size=2, hidden_sizes=[50, 30], 
                   num_classes=2, dropout=params['dropout'])
        criterion = nn.CrossEntropyLoss()
        optimizer = optim.Adam(model.parameters(), lr=0.001, 
                              weight_decay=params['weight_decay'])
        
        trainer = ModelTrainer(model, criterion, optimizer)
        train_time = trainer.train(train_loader, test_loader, num_epochs=50, verbose=False)
        
        train_loss, train_acc, _, _ = trainer.evaluate(train_loader)
        test_loss, test_acc, _, _ = trainer.evaluate(test_loader)
        
        results.append({
            'method': method_name,
            'train_acc': train_acc,
            'test_acc': test_acc,
            'overfitting': train_acc - test_acc
        })
        
        print(f"  训练准确率: {train_acc:.2f}%")
        print(f"  测试准确率: {test_acc:.2f}%")
        print(f"  过拟合程度: {train_acc - test_acc:.2f}%")
    
    # 可视化
    plt.figure(figsize=(12, 4))
    
    plt.subplot(1, 2, 1)
    methods = [r['method'] for r in results]
    train_accs = [r['train_acc'] for r in results]
    test_accs = [r['test_acc'] for r in results]
    
    x = np.arange(len(methods))
    width = 0.35
    
    plt.bar(x - width/2, train_accs, width, label='训练准确率', alpha=0.8)
    plt.bar(x + width/2, test_accs, width, label='测试准确率', alpha=0.8)
    
    plt.title('正则化方法比较')
    plt.xlabel('正则化方法')
    plt.ylabel('准确率 (%)')
    plt.xticks(x, methods, rotation=45)
    plt.legend()
    plt.grid(True, alpha=0.3)
    
    plt.subplot(1, 2, 2)
    overfitting = [r['overfitting'] for r in results]
    plt.bar(methods, overfitting, color='red', alpha=0.7)
    plt.title('过拟合程度')
    plt.xlabel('正则化方法')
    plt.ylabel('过拟合程度 (%)')
    plt.xticks(rotation=45)
    plt.grid(True, alpha=0.3)
    
    plt.tight_layout()
    plt.show()

def interactive_playground():
    """交互式练习场"""
    print("\n交互式练习场")
    print("-" * 30)
    print("在这里你可以自由实验！")
    
    # 生成数据
    X, y = DataGenerator.generate_classification_data(n_samples=500, n_features=2)
    
    # 让用户选择参数
    print("\n选择模型参数:")
    hidden_size = int(input("隐藏层大小 (默认10): ") or "10")
    num_layers = int(input("隐藏层数量 (默认2): ") or "2")
    learning_rate = float(input("学习率 (默认0.001): ") or "0.001")
    num_epochs = int(input("训练轮数 (默认50): ") or "50")
    
    # 构建隐藏层
    hidden_sizes = [hidden_size] * num_layers
    
    # 创建模型
    model = MLP(input_size=2, hidden_sizes=hidden_sizes, num_classes=2)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)
    
    print(f"\n模型结构:")
    print(model)
    
    # 准备数据
    train_loader, test_loader, X_train, X_test, y_train, y_test = prepare_data(X, y)
    
    # 训练
    trainer = ModelTrainer(model, criterion, optimizer)
    train_time = trainer.train(train_loader, test_loader, num_epochs=num_epochs, verbose=True)
    
    # 评估
    test_loss, test_accuracy, predictions, targets = trainer.evaluate(test_loader)
    print(f"\n最终结果:")
    print(f"测试准确率: {test_accuracy:.2f}%")
    print(f"训练时间: {train_time:.2f}秒")
    
    # 可视化
    trainer.plot_training_history()
    visualize_decision_boundary(model, X, y, "你的模型决策边界")

def main():
    """主函数"""
    print("PyTorch 快速练习")
    print("=" * 50)
    
    # 设置随机种子
    set_random_seed(42)
    
    while True:
        print("\n选择练习:")
        print("1. 快速线性回归")
        print("2. 快速分类")
        print("3. 快速模型比较")
        print("4. 快速超参数测试")
        print("5. 快速正则化演示")
        print("6. 交互式练习场")
        print("0. 退出")
        
        choice = input("\n请输入选择 (0-6): ").strip()
        
        if choice == "0":
            print("再见！")
            break
        elif choice == "1":
            quick_linear_regression()
        elif choice == "2":
            quick_classification()
        elif choice == "3":
            quick_model_comparison()
        elif choice == "4":
            quick_hyperparameter_test()
        elif choice == "5":
            quick_regularization_demo()
        elif choice == "6":
            interactive_playground()
        else:
            print("无效选择，请重试！")
        
        input("\n按回车键继续...")

if __name__ == "__main__":
    main()