import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np

# 设置随机种子确保结果可重现
torch.manual_seed(42)
np.random.seed(42)

# 检查设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"使用设备: {device}")

# 定义简单的神经网络
class SimpleNet(nn.Module):
    def __init__(self, input_size=10, hidden_size=20, output_size=1):
        super(SimpleNet, self).__init__()
        self.fc1 = nn.Linear(input_size, hidden_size)
        self.fc2 = nn.Linear(hidden_size, hidden_size)
        self.fc3 = nn.Linear(hidden_size, output_size)
        
    def forward(self, x):
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x

# 创建示例数据
def create_sample_data(num_samples=1000, input_size=10, output_size=1):
    """创建示例数据"""
    print(f"创建 {num_samples} 个样本数据...")
    X = torch.randn(num_samples, input_size)
    y = torch.randn(num_samples, output_size)
    return X, y

# 数据加载器（简化版）
class SimpleDataLoader:
    def __init__(self, X, y, batch_size=32, shuffle=True):
        self.X = X
        self.y = y
        self.batch_size = batch_size
        self.shuffle = shuffle
        self.num_samples = X.shape[0]
        self.indices = list(range(self.num_samples))
        
    def __iter__(self):
        if self.shuffle:
            np.random.shuffle(self.indices)
            
        for i in range(0, self.num_samples, self.batch_size):
            batch_indices = self.indices[i:i+self.batch_size]
            # 确保输入和目标具有相同的样本数
            X_batch = self.X[batch_indices]
            y_batch = self.y[batch_indices]
            yield X_batch, y_batch
            
    def __len__(self):
        return (self.num_samples + self.batch_size - 1) // self.batch_size

# 修复batch_size不匹配问题的训练函数
def train_model_fixed():
    """修复batch_size不匹配问题的训练函数"""
    print("=== 修复batch_size不匹配问题的训练示例 ===")
    
    # 创建数据
    X, y = create_sample_data(num_samples=1000, input_size=10, output_size=1)
    print(f"数据形状: X={X.shape}, y={y.shape}")
    
    # 创建数据加载器
    train_loader = SimpleDataLoader(X, y, batch_size=32, shuffle=True)
    
    # 创建模型
    model = SimpleNet(input_size=10, hidden_size=20, output_size=1).to(device)
    print(f"模型已创建并移动到 {next(model.parameters()).device}")
    
    # 定义损失函数和优化器
    criterion = nn.MSELoss()
    optimizer = optim.Adam(model.parameters(), lr=0.001)
    
    # 训练模型
    model.train()
    total_loss = 0
    num_batches = 0
    
    print("\n开始训练...")
    for epoch in range(3):  # 训练3个epoch
        epoch_loss = 0
        batch_count = 0
        
        for batch_idx, (inputs, targets) in enumerate(train_loader):
            # 确保输入和目标在相同的设备上
            inputs = inputs.to(device)
            targets = targets.to(device)
            
            # 检查batch_size是否匹配
            if inputs.size(0) != targets.size(0):
                print(f"警告: batch {batch_idx} 中输入和目标的batch_size不匹配!")
                print(f"  输入batch_size: {inputs.size(0)}, 目标batch_size: {targets.size(0)}")
                continue  # 跳过这个batch
            
            # 打印第一个batch的信息用于调试
            if epoch == 0 and batch_idx == 0:
                print(f"第一个batch信息:")
                print(f"  输入形状: {inputs.shape}")
                print(f"  目标形状: {targets.shape}")
                print(f"  输入设备: {inputs.device}")
                print(f"  目标设备: {targets.device}")
            
            # 梯度清零
            optimizer.zero_grad()
            
            # 前向传播
            outputs = model(inputs)
            
            # 检查输出和目标的形状是否匹配
            if outputs.shape != targets.shape:
                print(f"形状不匹配警告:")
                print(f"  输出形状: {outputs.shape}")
                print(f"  目标形状: {targets.shape}")
                # 尝试调整形状
                if outputs.size(0) == targets.size(0):
                    if len(targets.shape) == 1:
                        targets = targets.unsqueeze(1)
                    elif len(outputs.shape) == 1:
                        outputs = outputs.unsqueeze(1)
            
            # 计算损失
            loss = criterion(outputs, targets)
            
            # 反向传播
            loss.backward()
            
            # 参数更新
            optimizer.step()
            
            epoch_loss += loss.item()
            batch_count += 1
            
            # 每10个batch打印一次信息
            if batch_idx % 10 == 0:
                print(f'Epoch: {epoch+1} | Batch: {batch_idx} | Loss: {loss.item():.6f}')
        
        avg_epoch_loss = epoch_loss / batch_count if batch_count > 0 else 0
        print(f'Epoch {epoch+1} 完成 | 平均损失: {avg_epoch_loss:.6f}')
        total_loss += avg_epoch_loss
        num_batches += 1
    
    print(f"\n训练完成! 总平均损失: {total_loss/num_batches:.6f}")

# 演示错误情况和修复方法
def demonstrate_batch_size_issue():
    """演示batch_size不匹配问题及其修复方法"""
    print("\n=== batch_size不匹配问题演示 ===")
    
    # 创建不匹配的数据
    X = torch.randn(32, 10)  # 32个样本
    y = torch.randn(8, 1)    # 8个样本 (故意制造不匹配)
    
    print(f"输入数据形状: {X.shape}")
    print(f"目标数据形状: {y.shape}")
    print(f"输入batch_size: {X.size(0)}")
    print(f"目标batch_size: {y.size(0)}")
    
    # 这种情况下直接使用会导致错误
    print("\n如果直接使用这些不匹配的数据进行训练，会出现ValueError:")
    print("ValueError: Expected input batch_size (32) to match target batch_size (8)")
    
    # 修复方法1: 调整数据使其匹配
    print("\n修复方法1: 调整数据使其匹配")
    min_samples = min(X.size(0), y.size(0))
    X_fixed = X[:min_samples]
    y_fixed = y[:min_samples]
    print(f"修复后输入形状: {X_fixed.shape}")
    print(f"修复后目标形状: {y_fixed.shape}")
    
    # 修复方法2: 重新组织数据加载方式
    print("\n修复方法2: 使用正确的数据加载方式")
    print("确保在数据加载过程中输入和目标始终具有相同的样本数")

# 解释batch_size匹配的重要性
def explain_batch_size_matching():
    """解释batch_size匹配的重要性"""
    print("\n=== batch_size匹配的重要性 ===")
    
    importance_points = [
        "1. 神经网络在计算损失时需要输入和目标具有相同的样本数",
        "2. 每个输入样本必须对应一个目标值",
        "3. batch_size不匹配会导致维度错误",
        "4. PyTorch的损失函数要求输入和目标的第0维(样本维)必须相同",
        "5. 在自定义数据加载器中需要确保正确配对输入和目标"
    ]
    
    for point in importance_points:
        print(point)
    
    print("\n常见导致batch_size不匹配的原因:")
    print("- 数据加载器实现错误")
    print("- 输入和目标来自不同的数据源且未正确对齐")
    print("- 数据预处理过程中改变了数据形状")
    print("- 使用了错误的索引方式")

# 解释正确的数据加载实现
def explain_correct_data_loading():
    """解释正确的数据加载实现"""
    print("\n=== 正确的数据加载实现 ===")
    
    print("正确的数据加载器应该:")
    print("1. 确保输入和目标数据来自相同的样本集合")
    print("2. 在每个batch中保持输入和目标的样本数一致")
    print("3. 正确处理最后一个batch(可能样本数不足)")
    print("4. 确保输入和目标在相同的设备上")
    
    print("\n示例实现要点:")
    print("# 在数据加载器中")
    print("def __iter__(self):")
    print("    for i in range(0, num_samples, batch_size):")
    print("        batch_indices = indices[i:i+batch_size]")
    print("        X_batch = X[batch_indices]  # 确保使用相同的索引")
    print("        y_batch = y[batch_indices]  # 确保使用相同的索引")
    print("        yield X_batch, y_batch      # 返回配对的数据")
    
    print("\n在训练循环中:")
    print("for inputs, targets in dataloader:")
    print("    # 确保在相同设备上")
    print("    inputs = inputs.to(device)")
    print("    targets = targets.to(device)")
    print("    # 检查batch_size是否匹配")
    print("    assert inputs.size(0) == targets.size(0)")

# 主函数
def main():
    """主函数"""
    print("PyTorch batch_size匹配问题解决方案")
    print("=" * 50)
    
    # 解释batch_size匹配的重要性
    explain_batch_size_matching()
    
    # 解释正确的数据加载实现
    explain_correct_data_loading()
    
    # 演示问题和修复方法
    demonstrate_batch_size_issue()
    
    # 实际训练示例
    train_model_fixed()
    
    print("\n程序执行完成!")

if __name__ == '__main__':
    main()