import torch 
import torchvision
from   text_encoder import build_encoder
from   vae          import build_vae
from   unet         import build_unet
from   diffusers    import DiffusionPipeline
from   datasets     import load_dataset
from   transformers import PreTrainedModel, PretrainedConfig

device    = 'cuda' if torch.cuda.is_available() else 'cpu'
pipeline  = DiffusionPipeline.from_pretrained('lansinuote/diffsion_from_scratch.params', safety_checker=None)
scheduler = pipeline.scheduler
tokenizer = pipeline.tokenizer

del pipeline

#############################################################
#! 1. 数据集加载，前处理
dataset   = load_dataset(path='lansinuote/diffsion_from_scratch', split='train')     # 加载数据集


compose   = torchvision.transforms.Compose([                                         # 图像增强模块
                torchvision.transforms.Resize(512, 
                    interpolation=torchvision.transforms.InterpolationMode.BILINEAR),
                torchvision.transforms.CenterCrop(512),
                torchvision.transforms.ToTensor(),
                torchvision.transforms.Normalize([0.5], [0.5]),
            ])

def preprocess(data):
    pixel_values = [compose(i) for i in data['image']]                 # 应用图像增强    
    input_ids    = tokenizer.batch_encode_plus(data['text'],           # 文字编码
                                                padding='max_length',
                                                truncation=True,
                                                max_length=77).input_ids
    return {'pixel_values': pixel_values, 'input_ids': input_ids}


dataset = dataset.map(preprocess, batched=True, batch_size=100,
                      num_proc=1, remove_columns=['image', 'text'])
dataset.set_format(type='torch')


# 定义dataloader
def collate_fn(data):
    pixel_values = [i['pixel_values'] for i in data]
    input_ids    = [i['input_ids']    for i in data]
    pixel_values = torch.stack(pixel_values).to(device)
    input_ids    = torch.stack(input_ids).to(device)
    return {'pixel_values': pixel_values, 'input_ids': input_ids}


loader = torch.utils.data.DataLoader(dataset, shuffle=True,
                        collate_fn=collate_fn, batch_size=1)


#############################################################
#! 2. 模型加载

encoder = build_encoder()
vae     = build_vae()
unet    = build_unet()

encoder.requires_grad_(False)
vae.requires_grad_(False)
unet.requires_grad_(True)

encoder.eval()
vae.eval()
unet.train()

encoder.to(device)
vae.to(device)
unet.to(device)

optimizer = torch.optim.AdamW(unet.parameters(), lr=1e-5, betas=(0.9, 0.999),
                              weight_decay=0.01, eps=1e-8)

criterion = torch.nn.MSELoss()

def get_loss(data):
    with torch.no_grad():
        out_encoder = encoder(data['input_ids'])                #! 1. 将图像的文本描述编码
        out_vae     = vae.encoder(data['pixel_values'])         #! 2. 将原始图像进行编码，变成图像的中间表达
        out_vae     = vae.sample(out_vae) * 0.18215

    noise = torch.randn_like(out_vae)                           # 生成高斯分布的噪声数据
    
    #! 3. 往特征图中添加噪声
    # 1000 = scheduler.num_train_timesteps
    # 1    = batch size
    noise_step    = torch.randint(0, 1000, (1, )).long().to(device)
    out_vae_noise = scheduler.add_noise(out_vae, noise, noise_step)
    
    #! 4. 使用noise predicator预测噪声
    out_unet = unet(out_vae=out_vae_noise, out_encoder=out_encoder, time=noise_step)
    return criterion(out_unet, noise)


def train():
    loss_sum  =  0
    for epoch in range(400):
        for i, data in enumerate(loader):
            loss = get_loss(data) / 4       # TODO: why?
            loss.backward()
            loss_sum += loss.item()
            
            
            """
            @brief torch.nn.utils.clip_grad_norm_ 是 PyTorch 中的一个实用函数，用于防止梯度爆炸问题。它通过将参数的梯度裁
                   剪到一个指定的范围内来实现。梯度裁剪是训练深度学习模型时常用的技术之一，尤其是在处理很难训练的模型时（例如，具
                   有很深层次的网络或在训练过程中梯度容易变得非常大的模型）。
                   
            @format torch.nn.utils.clip_grad_norm_(parameters, max_norm, norm_type=2.0)
            @demo 
                    import torch
                    from torch.nn.utils import clip_grad_norm_

                    # 假设 model 是你的模型
                    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

                    # 在训练循环中
                    optimizer.zero_grad()
                    loss = compute_loss(model(input), target)
                    loss.backward()
                    # 在执行 optimizer.step() 之前裁剪梯度
                    clip_grad_norm_(model.parameters(), max_norm=1.0)
                    optimizer.step()
            """
            if (epoch * len(loader) + i) % 4 == 0:
                torch.nn.utils.clip_grad_norm_(unet.parameters(), 1.0)
                optimizer.step()
                optimizer.zero_grad()
                
        if epoch % 10 == 0:
            print(epoch, loss_sum)
            loss_sum = 0

train()


# class Model(PreTrainedModel):
#     config_class = PretrainedConfig
    
#     def __init__(self, config):
#         super(Model, self).__init__(config)
#         self.unet = unet.to("cpu")
        
# # 保存到hub
# Model(PretrainedConfig()).push_to_hub(
#     repo_id='lansinuote/diffsion_from_scratch.unet',
#     use_auth_token=open('/root/hub_token.txt').read().strip())