import os
import torch
from models.CnnNet import CnnNet
from torchvision import datasets,transforms
import torch.optim as optim #导入优化器
import torch.nn as nn
from tqdm import tqdm

# 自动选择设备（有GPU则用GPU，否则用CPU）
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

def train():
    """训练数据"""
    download()
    # 1.处理数据转为Tensor
    transform = transforms.Compose([
        transforms.ToTensor(), #转为Tensor并归一化到[0,1]
        transforms.Normalize((0.1307,),(0.3081,)) #标准化(均值=0.1307，标准差=0.3081)
    ])
    # 2.加载数据集
    train_dataset = datasets.MNIST(
        root='./data',#数据路径
        train=True,#加载训练集
        download=False,#自动下载
        transform=transform #应用预处理数据
    )
    test_dataset = datasets.MNIST(
        root='./data',
        train=False, # 加载测试集
        download=False,
        transform=transform
    )
    # 3.创建加载器
    train_loader = torch.utils.data.DataLoader(train_dataset,batch_size=64,shuffle=True) #shuffle是否打乱
    test_loader = torch.utils.data.DataLoader(test_dataset,batch_size=1000,shuffle=False)
    # 4.初始化模型
    model = CnnNet().to(device)
    # 验证前向传播
    # test_input = torch.randn(64, 1, 28, 28).to(device)
    # try:
    #     output = model(test_input)
    #     print(f"测试前向传播成功！输出形状: {output.shape}")
    # except Exception as e:
    #     print(f"前向传播错误: {str(e)}")
    #     return
    # 5.初始化优化器和学习率调度
    optimizer = optim.Adam(model.parameters(),lr=0.001) #使用Adam优化器，输入模型参数和学习率
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', patience=3, factor=0.5)
    criterion = nn.CrossEntropyLoss() #定义损失函数，使用交叉熵

    #报错训练和测试过程中的损失和准确率
    train_losses = []
    train_accuracys = []
    test_accuracys = []

    # 6.循环训练
    epochs = 5 #训练轮数
    max_accuracy = 0.0 #准确率
    #早停机制
    early_stop_patience = 5
    no_improve = 0
    model_path = './models/cnnnet.pt'
    for epoch in range(epochs):
        runing_loss = 0.0
        correct_train = 0 #正确预测的数量
        total_train =0 #样本总数

        # 6.1训练过程
        model.train() #设定模型为训练模式
        for inputs,labels in tqdm(train_loader, desc=f'Epoch {epoch+1}/{epochs} - Training'):
            inputs,labels = inputs.to(device),labels.to(device)
            optimizer.zero_grad() #梯度清零
            outputs = model(inputs) #向前传导
            loss = criterion(outputs,labels) #计算损失
            loss.backward() #反向传播
            # 在这里添加梯度裁剪(防止梯度爆炸)
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
            optimizer.step() #使用裁剪后的梯度更新参数
            # 更新进度条信息（Loss + 学习率）
            runing_loss += loss.item() #累计损失
            #torch.max()输出一个二维张量，形状是[batch_size,num_classes],取1代表类别维度
            _,predicted = torch.max(outputs,1) #获取预测结果
            total_train += labels.size(0) #累计样本数据量
            correct_train += (predicted == labels).sum().item() #累计正确预测的数量
        #计算训练集上的准确率
        train_accuracy = correct_train / total_train
        train_losses.append(runing_loss/len(train_loader)) #记录每轮的平均损失，len(train_loader)为批次数量
        train_accuracys.append(train_accuracy) #记录每轮的训练集准确率
        print(f'Epoch {epoch+1}/{epochs} Loss: {runing_loss/len(train_loader):.4f} Train accuracy: {train_accuracy:.2f}')

        # 6.2测试集评估
        model.eval() #设定模型为评估模式
        correct=0 #正确预测的数量
        total = 0 #样本总数
        with torch.no_grad(): #关闭梯度计算
            with tqdm(test_loader, 
                    desc=f"Epoch {epoch+1}/{epochs} - Testing"
            ) as test_loop:
                for data,target in test_loop:
                    data, target = data.to(device), target.to(device)
                    output = model(data) #向前传导
                    _,pred=torch.max(output,1) #获取预测结果  
                    total+=target.size(0) #累计样本数量
                    correct += pred.eq(target).sum().item() #累计正确预测的数量
        #计算测试集上的准确率
        test_accuracy=correct/total
        test_accuracys.append(test_accuracy) #记录每轮测试集准确率      
        print(f'\nEpoch {epoch+1}/{epochs} Test Accuracy:{test_accuracy:.2f}%')
        # 学习率调整
        scheduler.step(test_accuracy)
        # 6.3保存最佳模型
        if test_accuracy > max_accuracy + 1e-6:
            no_improve = 0
            max_accuracy = test_accuracy
            torch.save(model.state_dict(),model_path)
            #saveOnnxModel(model)
            # 保存完整检查点
            # torch.save({
            #     'model_state_dict': model.state_dict(),
            #     'optimizer_state_dict': optimizer.state_dict(),
            #     'max_accuracy': max_accuracy,
            #     'epoch': epoch
            # }, model_path)
            print(f'Model saved with accuracy:{max_accuracy:.4f}')
        else:
            no_improve += 1
            if no_improve >= early_stop_patience:
                print(f"Early stopping at epoch {epoch}")
                break
    print(f"Best accuracy on test set:{max_accuracy:.2f}")

def saveOnnxModel(model):
    """
    保存onnx格式的模型供golang调用
    需要安装onnx库
    conda install -c conda-forge onnx
    """
    onnx_path = './models/cnnnet.onnx'
    dummy_input = torch.randn(1, 1, 28, 28).to(device)  # 与模型输入形状匹配的虚拟输入
    torch.onnx.export(
        model,
        dummy_input,
        onnx_path,
        input_names=['input'],
        output_names=['output'],
        dynamic_axes={'input': {0: 'batch_size'}, 'output': {0: 'batch_size'}},
        opset_version=11
    )
    print(f"Model exported to ONNX format at {onnx_path}")

def download():
    """下载数据集"""
    #检查路径是否存在
    data_path = "./data/MNIST"
    if os.path.exists(data_path):
        print(f"已找到数据集：{os.path.abspath(data_path)}")
    else:
        print("开始下载数据集...")
        datasets.MNIST(root='./data',download=True)

def main():
    """主函数"""
    #实例化模型
    # model = CnnNet()
    #打印网络结构
    # print(model)
    # input = torch.randn(2,1,28,28) #2张28*28的随机图像
    # output = model(input)
    # print(f"输入形状:{input.shape}")
    # print(f"输出形状:{output.shape}") # 应为[2,10]

    train()

if __name__ == "__main__":
    main() # 程序入口