import torch 
import torchvision
from   text_encoder import build_encoder
from   vae          import build_vae
from   unet         import build_unet
from   diffusers    import DiffusionPipeline
from   datasets     import load_dataset
from   transformers import PreTrainedModel, PretrainedConfig
from   matplotlib   import pyplot as plt

device    = 'cuda' if torch.cuda.is_available() else 'cpu'
pipeline  = DiffusionPipeline.from_pretrained('lansinuote/diffsion_from_scratch.params', safety_checker=None)

scheduler = pipeline.scheduler
tokenizer = pipeline.tokenizer

del pipeline

encoder = build_encoder()
vae     = build_vae()
unet    = build_unet()

encoder.to(device)
vae.to(device)
unet.to(device)

encoder.eval()
vae.eval()
unet.eval()

@torch.no_grad()
def generate(text):
    pos = tokenizer(text, padding="max_length", max_length=77,
                    truncate=True, return_tensors="pt"
                ).input_ids.to(device=device)
    
    neg = tokenizer("", padding="max_length", max_length=77,
                    truncate=True, return_tensors="pt"
                ).input_ids.to(device=device)
    
    pos = encoder(pos)                                      # [1, 77, 768]
    neg = encoder(neg)
    
    out_encoder = torch.cat((neg, pos), dim=0)              # [1+1, 77, 768] -> [2, 77, 768]
    out_vae     = torch.randn(1, 4, 64, 64, device=device)  # vae的压缩图,从随机噪声开始


    scheduler.set_timesteps(50, device=device)              # 生成50个时间步,一般是从980-0
    for time in scheduler.timesteps:
        #! 1. 往图中加噪音
        noise = torch.cat((out_vae, out_vae), dim=0)        # [1+1, 4, 64, 64] -> [2, 4, 64, 64]
        noise = scheduler.scale_model_input(noise, time)

        #! 2. 计算噪音
        pred_noise = unet(out_vae=noise, out_encoder=out_encoder, time=time)  # [2, 4, 64, 64],[2, 77, 768],scala -> [2, 4, 64, 64]
        
        #! 3.从正例图中减去反例图
        pred_noise = pred_noise[0] + 7.5 * (pred_noise[1] - pred_noise[0])    # [2, 4, 64, 64] -> [1, 4, 64, 64]

        #! 4.重新添加噪音,以进行下一步计算
        out_vae = scheduler.step(pred_noise, time, out_vae).prev_sample       # [1, 4, 64, 64]

    #! 5. 从压缩图中恢复图像
    out_vae = 1/0.18215 * out_vae
    image   = vae.decoer(out_vae)                                             # [1, 4, 64, 64] -> [1, 3, 512, 512]

    # 转换成图片数据
    image = image.cpu()
    image = (image + 1) / 2
    image = image.clamp(0, 1)
    image = image.permute(0, 2, 3, 1)
    return image.numpy()[0]

def show():
    texts = [
        'a drawing of a star with a jewel in the center',   # 宝石海星
        'a drawing of a woman in a red cape',               # 迷唇姐
        'a drawing of a dragon sitting on its hind legs',   # 肥大
        'a drawing of a blue sea turtle holding a rock',    # 拉普拉斯
        'a blue and white bird with its wings spread',      # 急冻鸟
        'a blue and white stuffed animal sitting on top of a white surface',  #卡比兽
    ]

    images = [generate(i) for i in texts]

    plt.figure(figsize=(10, 5))
    for i in range(6):
        plt.subplot(2, 3, i + 1)
        plt.imshow(images[i])
        plt.axis('off')

    plt.show()

show()


# 包装类
# class Model(PreTrainedModel):
#     config_class = PretrainedConfig

#     def __init__(self, config):
#         super().__init__(config)
#         self.unet = unet.to('cpu')


# #加载训练好的模型
# unet = Model.from_pretrained('lansinuote/diffsion_from_scratch.unet').unet
# unet.eval().to(device)

# show()