import os
from torchvision import datasets
import torchvision.transforms as transforms
from torch.optim import Adam
from utils.networkHelper import *

from DiffusionModels.noisePredictModels.Unet.UNet import Unet
from utils.trainNetworkHelper import SimpleDiffusionTrainer
from DiffusionModels.diffusionModels.simpleDiffusion.simpleDiffusion import DiffusionModel

import matplotlib.pyplot as plt

# 数据集加载
transform = transforms.Compose([transforms.Resize((128, 128)),
                                transforms.RandomHorizontalFlip(),
                                transforms.ToTensor(),
                                transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
imagenet_data = datasets.ImageFolder(root="./dataset/train_ddpm/train4", transform=transform, target_transform=lambda x: 0)


# transform = transforms.Compose([
#     transforms.Resize((224, 224)),  # 调整图像大小
#     transforms.ToTensor(),           # 将图像转换为张量
#     transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))  # 标准化
# ])
# imagenet_data = datasets.ImageFolder(root="./dataset/train_ddpm/train4", transform=transform, target_transform=lambda x: 0)
# imagenet_data = datasets.ImageFolder(root="./dataset/train_ddpm/train3", transform=transform)
# imagenet_data = datasets.ImageFolder(root="./dataset/train_ddpm/train1")

train = 4

image_size = 128
channels = 3
batch_size = 1

data_loader = torch.utils.data.DataLoader(imagenet_data,
                                          batch_size=batch_size,
                                          shuffle=True,
                                          num_workers=0)


device = "cuda" if torch.cuda.is_available() else "cpu"
dim_mults = (1, 2, 4,)

denoise_model = Unet(
    dim=image_size,
    channels=channels,
    dim_mults=dim_mults
)

timesteps = 1000
schedule_name = "linear_beta_schedule"
DDPM = DiffusionModel(schedule_name=schedule_name,
                      timesteps=timesteps,
                      beta_start=0.0001,
                      beta_end=0.02,
                      denoise_model=denoise_model).to(device)

optimizer = Adam(DDPM.parameters(), lr=1e-3)
epoches = 30

Trainer = SimpleDiffusionTrainer(epoches=epoches,
                                 train_loader=data_loader,
                                 optimizer=optimizer,
                                 device=device,
                                 timesteps=timesteps)

# 训练参数设置
root_path = "./saved_train_models"
setting = "train{}_imageSize{}_epoch{}_channels{}_dimMults{}_timeSteps{}_scheduleName{}".format(train, image_size, epoches, channels, dim_mults, timesteps, schedule_name)
# setting = "train{}_imageSize{}_channels{}_dimMults{}_timeSteps{}_scheduleName{}".format(train, image_size, channels, dim_mults, timesteps, schedule_name)

saved_path = os.path.join(root_path, setting)
if not os.path.exists(saved_path):
    os.makedirs(saved_path)


# 训练好的模型加载，如果模型是已经训练好的，则可以将下面两行代码取消注释
# best_model_path = saved_path + '/' + 'BestModel.pth'
# DDPM.load_state_dict(torch.load(best_model_path))

# 如果模型已经训练好则注释下面这行代码，反之则注释上面两行代码
DDPM = Trainer(DDPM, model_save_path=saved_path)

# 采样:sample 64 images
samples = DDPM(mode="generate", image_size=image_size, batch_size=64, channels=channels)

# # 随机挑一张显示
# random_index = 1
# generate_image = samples[-1][random_index].reshape(channels, image_size, image_size)
# figtest = reverse_transform(torch.from_numpy(generate_image))
# figtest.show()


# 保存所有生成的图像
root_path = "./dataset/generated"
setting = "train{}_imageSize{}_epoch{}".format(train, image_size, epoches)
save_path = os.path.join(root_path, setting)
os.makedirs(save_path, exist_ok=True)

# 遍历所有采样图像
for i, sample in enumerate(samples[-1]):
    generate_image = sample.reshape(channels, image_size, image_size)
    fig = reverse_transform(torch.from_numpy(generate_image))

    # 指定文件名
    file_name = f"generated_image_{i}.png"

    # 拼接完整路径
    file_path = os.path.join(save_path, file_name)

    # 保存图像
    fig.save(file_path)

print(f"All generated images saved to {save_path}")



