import torch
from torch import nn
import torch.optim.optimizer
from model.unet import UNet
import math
from data.load_data import show
from tqdm import tqdm
# 定义我的 α和β以及连乘积
STEP = 1000
MIN_BETA, MAX_BETA=1e-4, 2e-2
beta=torch.arange(start=MIN_BETA, end=MAX_BETA, step=(MAX_BETA-MIN_BETA)/STEP)
alpha=1-beta
alpha_tandem=[]
for i in range(STEP):
    alpha_tandem.append(alpha[i]*alpha_tandem[i-1] if i>0 else alpha[i])

# 定义公共超参数
P_SIZE=128

# 定义神经网络结构和训练工具
net=UNet(P_SIZE)
optimizer = torch.optim.Adam(net.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-8, weight_decay=0)

# 定义损失函数
loss_func=nn.MSELoss(reduction='mean')

# 定义训练过程
def train(data_loader, epoch, device=None):
    global net, STEP, P_SIZE, optimizer
    if device is None:
        device = 'cuda' if torch.cuda.is_available() else 'cpu'
    net.to(device)
    for e in range(epoch):
        for b, data in enumerate(tqdm(data_loader)):
            data = data.to(device)  # 将数据移动到设备上
            for s in range(STEP):
                rand = torch.randn(size=[len(data), 3, P_SIZE, P_SIZE], dtype=torch.float32, device=device)
                x_t = math.sqrt(alpha_tandem[s]) * data + math.sqrt(1 - alpha_tandem[s]) * rand
                generated = net(x_t)
                loss = loss_func(generated, data)
                loss.sum().backward()
                optimizer.step()
                optimizer.zero_grad()
    print("Training completed.")
    # 保存模型参数
    torch.save(net.state_dict(), f"model_epoch_{e}.pth")
    
def test(data_loader, epoch, device=None):
    global net, STEP, P_SIZE, optimizer
    error, loss1, loss2=0.0
    if device is None:
        device = 'cuda' if torch.cuda.is_available() else 'cpu'
    net.to(device)
    for b, data in enumerate(tqdm(data_loader)):
        data = data.to(device)  # 将数据移动到设备上
        loss2=0.0
        for s in range(STEP):
            rand = torch.randn(size=[len(data), 3, P_SIZE, P_SIZE], dtype=torch.float32, device=device)
            x_t = math.sqrt(alpha_tandem[s]) * data + math.sqrt(1 - alpha_tandem[s]) * rand
            generated = net(x_t)
            loss = loss_func(generated, data)
            loss2+=loss.item()
        loss1+=loss2/STEP
    error+=loss1/len(data_loader)
    error.to('cpu')
    print("loss is: ",error)
    
def generate(label=None,pth=None):
    global net,STEP
    if pth is not None:
        net.load_state_dict(torch.load(pth))
    
    # 随机生成正态分布 TODO: 添加随机噪声
    rand=torch.randn(size=[P_SIZE, P_SIZE])
    
    # 按轮数进行反向过程
    for i in range(STEP):
        rand=net(rand)
    # 得到图片并展示
    show(rand)
    