import sys
import os
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
from torchvision import datasets
from torch.utils.data import DataLoader, random_split, Subset
import torchvision.transforms as transforms
from usr import Model, EarlyStop, Model_train, PlotSave
import json
import h5py
import time
# 添加模块所在的目录到 sys.path
module_path = r"D:\work\witin\Witin-NN-main\wwitinnntool-main\wwitinnntool-main\py38_win"
sys.path.append(module_path)
import witin_nn
import numpy as np
from witin_nn import GlobalConfigFactory, LayerConfigFactory, HandleNegInType, NoiseModel
from witin_nn import WitinLinear, WitinConv2d, WitinPReLU

# # 测试是否可以导入 witin_nn
# try:
#     import witin_nn
#     print("witin_nn 模块加载成功")
# except ImportError as e:
#     print(f"模块加载失败: {e}")

class ImageFolderWithPaths(datasets.ImageFolder):
    def __init__(self, root, transform=None):
        super().__init__(root, transform)

    def __getitem__(self, index):
        # 获取图像和标签
        img, label = super().__getitem__(index)

        # 获取图像路径
        img_path = self.imgs[index][0]

        return img, label, img_path  # 返回图像，标签和路径

if __name__ == '__main__':
    seed = 32
    torch.manual_seed(seed)
    np.random.seed(seed)

    # 定义训练集的预处理操作（包括数据增强）
    train_transform = transforms.Compose([
        transforms.RandomHorizontalFlip(),
        # transforms.RandomRotation(10),
        transforms.RandomResizedCrop(32),
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.4121, 0.1405, 0.3770), std=(0.2998, 0.1353, 0.1654)),  # voxforge
        # transforms.Normalize(mean=(0.4914, 0.4822, 0.4465), std=(0.2023, 0.1994, 0.2010)),
    ])

    # 定义测试集的预处理操作（通常不进行增强，只做标准化）
    test_transform = transforms.Compose([
        transforms.Resize(32),
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.4121, 0.1406, 0.3763), std=(0.3004, 0.1356, 0.1659)),
        # transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
    ])

    # 加载数据集
    dataset = datasets.ImageFolder(
        root=r'D:\work\witin\Witin-NN-main\wwitinnntool-main\wwitinnntool-main\test_demo\dataset\Voxforge-mini\Voxforge-mini-2')

    # 设置训练集和测试集的比例
    train_size = int(0.8 * len(dataset))  # 80%的数据用于训练
    test_size = len(dataset) - train_size  # 剩余的用于测试

    # 随机分割数据集
    train_dataset, test_dataset = random_split(dataset, [train_size, test_size])

    # 应用预处理
    train_dataset.dataset.transform = train_transform
    test_dataset.dataset.transform = test_transform

    # # 创建数据加载器
    # # 加载数据集，使用自定义的 ImageFolderWithPaths
    # dataset = ImageFolderWithPaths(root=r'D:\work\witin\Witin-NN-main\wwitinnntool-main\wwitinnntool-main\test_demo\dataset\Voxforge-mini\Voxforge-mini', transform=transform)

    # train_dataset = torchvision.datasets.CIFAR10(root='./dataset', train=True, download=True, transform=train_transform)
    # test_dataset = torchvision.datasets.CIFAR10(root='./dataset', train=False, download=True, transform=test_transform)

    print('train_dataset', len(train_dataset))
    print('test_dataset', len(test_dataset))

    # 创建数据加载器
    train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=32, shuffle=False)

    # # 查看分割后的数据集
    # print(f"Training set size: {len(train_dataset)}")
    # print(f"Test set size: {len(test_dataset)}")

    # # 查看数据集的一些信息
    # for images, labels, img_paths in train_loader:
    #     print(f"Image batch shape: {images.shape}")  # 图像的尺寸
    #     print(f"Labels: {labels}")  # 标签
    #     print(f"Image paths: {img_paths}")  # 图像路径
    #     break  # 只打印第一批次的信息

    num_epochs = 250
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = Model.Lenet5().to(device)

    criterion = nn.CrossEntropyLoss(label_smoothing=0.5)
    optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=1e-4)
    # optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=10, verbose=True, min_lr=1e-8)

    save_filepath = 'test_save/Voxforge/mini-2/quanti-auto-batchnorm-decay-niose8-3'
    model_save_path = f'{save_filepath}/final_model.pth'
    os.makedirs(save_filepath, exist_ok=True)
    early_stopping = EarlyStop.EarlyStopping(patience=20, verbose=True, save_path=save_filepath)

    model_set_json_file = f'{save_filepath}/model_set_config.json'
    with open(model_set_json_file, 'w') as json_file:
        json.dump(model.configuerations, json_file, indent=4)

    # 训练和测试
    Model_train.train(model, train_loader, test_loader, criterion, optimizer, scheduler, early_stopping, device, num_epochs=num_epochs, log_path=save_filepath)
    model_acc = Model_train.evaluate_model(model, test_loader, model_save_path, device)
    PlotSave.Save_Plot(model, model_acc, save_filepath)
