"""
PyTorch 学习练习工具集
包含各种实用的函数和类，帮助你更好地学习和练习PyTorch
"""

import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification, make_regression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import time
import random

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

class DataGenerator:
    """数据生成器 - 生成各种类型的数据集用于练习"""
    
    @staticmethod
    def generate_classification_data(n_samples=1000, n_features=2, n_classes=2, noise=0.1):
        """生成分类数据"""
        X, y = make_classification(
            n_samples=n_samples,
            n_features=n_features,
            n_redundant=0,
            n_informative=n_features,
            n_clusters_per_class=1,
            n_classes=n_classes,
            random_state=42
        )
        
        # 添加噪声
        X += np.random.normal(0, noise, X.shape)
        
        return X, y
    
    @staticmethod
    def generate_regression_data(n_samples=1000, n_features=1, noise=0.1):
        """生成回归数据"""
        X, y = make_regression(
            n_samples=n_samples,
            n_features=n_features,
            noise=noise,
            random_state=42
        )
        return X, y
    
    @staticmethod
    def generate_circle_data(n_samples=1000, noise=0.1):
        """生成圆形分类数据"""
        angles = np.random.uniform(0, 2*np.pi, n_samples)
        radius = np.random.uniform(0, 1, n_samples)
        
        # 内圆和外圆
        inner_mask = radius < 0.5
        outer_mask = radius >= 0.5
        
        X = np.zeros((n_samples, 2))
        y = np.zeros(n_samples)
        
        X[inner_mask] = radius[inner_mask, None] * np.column_stack([
            np.cos(angles[inner_mask]), np.sin(angles[inner_mask])
        ])
        y[inner_mask] = 0
        
        X[outer_mask] = radius[outer_mask, None] * np.column_stack([
            np.cos(angles[outer_mask]), np.sin(angles[outer_mask])
        ])
        y[outer_mask] = 1
        
        # 添加噪声
        X += np.random.normal(0, noise, X.shape)
        
        return X, y

class ModelTrainer:
    """模型训练器 - 提供各种训练和评估功能"""
    
    def __init__(self, model, criterion, optimizer, device='cpu'):
        self.model = model
        self.criterion = criterion
        self.optimizer = optimizer
        self.device = device
        self.model.to(device)
        
        # 训练历史
        self.train_losses = []
        self.train_accuracies = []
        self.val_losses = []
        self.val_accuracies = []
    
    def train_epoch(self, train_loader, is_classification=True):
        """训练一个epoch"""
        self.model.train()
        running_loss = 0.0
        correct = 0
        total = 0
        
        for data, target in train_loader:
            data, target = data.to(self.device), target.to(self.device)
            
            # 清零梯度
            self.optimizer.zero_grad()
            
            # 前向传播
            outputs = self.model(data)
            loss = self.criterion(outputs, target)
            
            # 反向传播
            loss.backward()
            
            # 更新参数
            self.optimizer.step()
            
            # 统计
            running_loss += loss.item()
            if is_classification:
                _, predicted = torch.max(outputs.data, 1)
                total += target.size(0)
                correct += (predicted == target).sum().item()
        
        epoch_loss = running_loss / len(train_loader)
        epoch_acc = 100 * correct / total if is_classification else 0
        
        self.train_losses.append(epoch_loss)
        self.train_accuracies.append(epoch_acc)
        
        return epoch_loss, epoch_acc
    
    def evaluate(self, test_loader, is_classification=True):
        """评估模型"""
        self.model.eval()
        test_loss = 0.0
        correct = 0
        total = 0
        all_predictions = []
        all_targets = []
        
        with torch.no_grad():
            for data, target in test_loader:
                data, target = data.to(self.device), target.to(self.device)
                outputs = self.model(data)
                loss = self.criterion(outputs, target)
                
                test_loss += loss.item()
                if is_classification:
                    _, predicted = torch.max(outputs.data, 1)
                    total += target.size(0)
                    correct += (predicted == target).sum().item()
                    all_predictions.extend(predicted.cpu().numpy())
                    all_targets.extend(target.cpu().numpy())
        
        test_loss /= len(test_loader)
        accuracy = 100 * correct / total if is_classification else 0
        
        return test_loss, accuracy, all_predictions, all_targets
    
    def train(self, train_loader, test_loader=None, num_epochs=100, 
              is_classification=True, verbose=True):
        """完整训练过程"""
        start_time = time.time()
        
        for epoch in range(num_epochs):
            # 训练
            train_loss, train_acc = self.train_epoch(train_loader, is_classification)
            
            # 验证
            if test_loader is not None:
                val_loss, val_acc, _, _ = self.evaluate(test_loader, is_classification)
                self.val_losses.append(val_loss)
                self.val_accuracies.append(val_acc)
            
            # 打印进度
            if verbose and epoch % 20 == 0:
                if test_loader is not None:
                    print(f'Epoch [{epoch}/{num_epochs}], '
                          f'Train Loss: {train_loss:.4f}, Train Acc: {train_acc:.2f}%, '
                          f'Val Loss: {val_loss:.4f}, Val Acc: {val_acc:.2f}%')
                else:
                    print(f'Epoch [{epoch}/{num_epochs}], '
                          f'Train Loss: {train_loss:.4f}, Train Acc: {train_acc:.2f}%')
        
        training_time = time.time() - start_time
        if verbose:
            print(f'训练完成！用时: {training_time:.2f}秒')
        
        return training_time
    
    def plot_training_history(self):
        """绘制训练历史"""
        fig, axes = plt.subplots(1, 2, figsize=(12, 4))
        
        # 损失曲线
        axes[0].plot(self.train_losses, label='训练损失')
        if self.val_losses:
            axes[0].plot(self.val_losses, label='验证损失')
        axes[0].set_title('训练损失')
        axes[0].set_xlabel('Epoch')
        axes[0].set_ylabel('Loss')
        axes[0].legend()
        axes[0].grid(True)
        
        # 准确率曲线
        axes[1].plot(self.train_accuracies, label='训练准确率')
        if self.val_accuracies:
            axes[1].plot(self.val_accuracies, label='验证准确率')
        axes[1].set_title('训练准确率')
        axes[1].set_xlabel('Epoch')
        axes[1].set_ylabel('Accuracy (%)')
        axes[1].legend()
        axes[1].grid(True)
        
        plt.tight_layout()
        plt.show()

class ModelComparator:
    """模型比较器 - 比较不同模型的性能"""
    
    def __init__(self):
        self.results = {}
    
    def add_model(self, name, model, train_time, test_accuracy, test_loss):
        """添加模型结果"""
        self.results[name] = {
            'model': model,
            'train_time': train_time,
            'test_accuracy': test_accuracy,
            'test_loss': test_loss
        }
    
    def compare_models(self):
        """比较所有模型"""
        if not self.results:
            print("没有模型可以比较！")
            return
        
        print("=" * 60)
        print("模型性能比较")
        print("=" * 60)
        print(f"{'模型名称':<20} {'训练时间(s)':<12} {'测试准确率(%)':<15} {'测试损失':<10}")
        print("-" * 60)
        
        for name, result in self.results.items():
            print(f"{name:<20} {result['train_time']:<12.2f} "
                  f"{result['test_accuracy']:<15.2f} {result['test_loss']:<10.4f}")
        
        # 找出最佳模型
        best_accuracy = max(self.results.items(), key=lambda x: x[1]['test_accuracy'])
        fastest_training = min(self.results.items(), key=lambda x: x[1]['train_time'])
        
        print("\n最佳准确率模型:", best_accuracy[0])
        print("最快训练模型:", fastest_training[0])
    
    def plot_comparison(self):
        """绘制比较图"""
        if len(self.results) < 2:
            print("需要至少2个模型才能比较！")
            return
        
        names = list(self.results.keys())
        accuracies = [self.results[name]['test_accuracy'] for name in names]
        times = [self.results[name]['train_time'] for name in names]
        
        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5))
        
        # 准确率比较
        ax1.bar(names, accuracies, color=['skyblue', 'lightcoral', 'lightgreen', 'gold'][:len(names)])
        ax1.set_title('模型准确率比较')
        ax1.set_ylabel('准确率 (%)')
        ax1.set_ylim(0, 100)
        
        # 训练时间比较
        ax2.bar(names, times, color=['skyblue', 'lightcoral', 'lightgreen', 'gold'][:len(names)])
        ax2.set_title('模型训练时间比较')
        ax2.set_ylabel('训练时间 (秒)')
        
        plt.tight_layout()
        plt.show()

# 预定义的模型架构
class SimpleLinearRegression(nn.Module):
    """简单线性回归模型"""
    def __init__(self, input_size=1):
        super(SimpleLinearRegression, self).__init__()
        self.linear = nn.Linear(input_size, 1)
    
    def forward(self, x):
        return self.linear(x)

class SimpleLogisticRegression(nn.Module):
    """简单逻辑回归模型"""
    def __init__(self, input_size=2, num_classes=2):
        super(SimpleLogisticRegression, self).__init__()
        self.linear = nn.Linear(input_size, num_classes)
    
    def forward(self, x):
        return self.linear(x)

class MLP(nn.Module):
    """多层感知机"""
    def __init__(self, input_size=2, hidden_sizes=[10, 5], num_classes=2, dropout=0.2):
        super(MLP, self).__init__()
        
        layers = []
        prev_size = input_size
        
        for hidden_size in hidden_sizes:
            layers.extend([
                nn.Linear(prev_size, hidden_size),
                nn.ReLU(),
                nn.Dropout(dropout)
            ])
            prev_size = hidden_size
        
        layers.append(nn.Linear(prev_size, num_classes))
        self.network = nn.Sequential(*layers)
    
    def forward(self, x):
        return self.network(x)

class DeepMLP(nn.Module):
    """深度多层感知机"""
    def __init__(self, input_size=2, num_classes=2, dropout=0.3):
        super(DeepMLP, self).__init__()
        
        self.network = nn.Sequential(
            nn.Linear(input_size, 64),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(64, 32),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(32, 16),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(16, 8),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(8, num_classes)
        )
    
    def forward(self, x):
        return self.network(x)

# 实用函数
def set_random_seed(seed=42):
    """设置随机种子"""
    torch.manual_seed(seed)
    np.random.seed(seed)
    random.seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)

def prepare_data(X, y, test_size=0.2, batch_size=32, normalize=True):
    """准备数据"""
    # 数据分割
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=test_size, random_state=42
    )
    
    # 标准化
    if normalize:
        scaler = StandardScaler()
        X_train = scaler.fit_transform(X_train)
        X_test = scaler.transform(X_test)
    
    # 转换为张量
    X_train = torch.FloatTensor(X_train)
    X_test = torch.FloatTensor(X_test)
    y_train = torch.LongTensor(y_train)
    y_test = torch.LongTensor(y_test)
    
    # 创建数据加载器
    from torch.utils.data import DataLoader, TensorDataset
    
    train_dataset = TensorDataset(X_train, y_train)
    test_dataset = TensorDataset(X_test, y_test)
    
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
    
    return train_loader, test_loader, X_train, X_test, y_train, y_test

def visualize_decision_boundary(model, X, y, title="决策边界"):
    """可视化决策边界"""
    model.eval()
    
    # 创建网格
    h = 0.02
    x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
    y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
    xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
                         np.arange(y_min, y_max, h))
    
    # 预测网格点
    with torch.no_grad():
        Z = model(torch.FloatTensor(np.c_[xx.ravel(), yy.ravel()]))
        Z = torch.max(Z, 1)[1].numpy()
        Z = Z.reshape(xx.shape)
    
    # 绘制
    plt.figure(figsize=(8, 6))
    plt.contourf(xx, yy, Z, alpha=0.3, cmap=plt.cm.RdYlBu)
    
    # 绘制数据点
    unique_classes = np.unique(y)
    colors = ['red', 'blue', 'green', 'orange', 'purple']
    for i, cls in enumerate(unique_classes):
        mask = y == cls
        plt.scatter(X[mask, 0], X[mask, 1], 
                   c=colors[i % len(colors)], 
                   label=f'类别 {cls}', alpha=0.7)
    
    plt.title(title)
    plt.xlabel('特征 1')
    plt.ylabel('特征 2')
    plt.legend()
    plt.grid(True)
    plt.show()

def quick_experiment(model_class, model_params, X, y, 
                    num_epochs=100, learning_rate=0.001, 
                    is_classification=True, verbose=True):
    """快速实验函数"""
    # 准备数据
    train_loader, test_loader, X_train, X_test, y_train, y_test = prepare_data(X, y)
    
    # 创建模型
    model = model_class(**model_params)
    
    # 设置损失函数和优化器
    if is_classification:
        criterion = nn.CrossEntropyLoss()
    else:
        criterion = nn.MSELoss()
    
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)
    
    # 训练
    trainer = ModelTrainer(model, criterion, optimizer)
    train_time = trainer.train(train_loader, test_loader, num_epochs, 
                              is_classification, verbose)
    
    # 评估
    test_loss, test_accuracy, predictions, targets = trainer.evaluate(
        test_loader, is_classification
    )
    
    if verbose:
        print(f"\n最终结果:")
        print(f"测试损失: {test_loss:.4f}")
        print(f"测试准确率: {test_accuracy:.2f}%")
    
    return {
        'model': model,
        'trainer': trainer,
        'train_time': train_time,
        'test_loss': test_loss,
        'test_accuracy': test_accuracy,
        'predictions': predictions,
        'targets': targets
    }

# 示例使用
if __name__ == "__main__":
    print("PyTorch 学习练习工具集")
    print("=" * 50)
    
    # 设置随机种子
    set_random_seed(42)
    
    # 生成数据
    print("1. 生成分类数据...")
    X, y = DataGenerator.generate_classification_data(n_samples=1000, n_features=2)
    
    # 快速实验
    print("\n2. 运行快速实验...")
    result = quick_experiment(
        model_class=MLP,
        model_params={'input_size': 2, 'hidden_sizes': [10, 5], 'num_classes': 2},
        X=X, y=y,
        num_epochs=50
    )
    
    # 可视化结果
    print("\n3. 可视化决策边界...")
    visualize_decision_boundary(result['model'], X, y, "MLP决策边界")
    
    print("\n实验完成！")
