#
import sys
sys.path.append('./anns/mar')
from typing import Dict
import torch
import numpy as np
from torchvision.utils import save_image
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from PIL import Image
from models import mar
from models.vae import AutoencoderKL
from util import download
from util.crop import center_crop_arr
import util.misc as misc

class MarApp(object):
    def __init__(self):
        self.name = 'apps.mar.mar_app.MarApp'

    @staticmethod
    def startup(params:Dict = {}) -> None:
        print(f'Mar app v0.0.1 001')
        # MarApp.sample(params=params)
        MarApp.vae_demo(params=params)

    @staticmethod
    def vae_demo(params:Dict = {}) -> None:
        ''' 使用VAE压缩图片和解压图片 '''
        device = "cuda" if torch.cuda.is_available() else "cpu"
        if device == "cpu":
            print("GPU not found. Using CPU instead.")
        vae = AutoencoderKL(embed_dim=16, ch_mult=(1, 1, 2, 2, 4), ckpt_path="anns/mar/work/pretrained_models/vae/kl16.ckpt").cuda().eval()
        # augmentation following DiT and ADM
        img_size = 256
        transform_train = transforms.Compose([
            transforms.Lambda(lambda pil_image: center_crop_arr(pil_image, img_size)),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
        ])
        dataset_train = datasets.ImageFolder('anns/mar/work/datasets/ImageNet-Mini', transform=transform_train)
        print(dataset_train)
        num_tasks = 1
        global_rank = 0
        sampler_train = torch.utils.data.DistributedSampler(
            dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
        )
        print("### Sampler_train = %s" % str(sampler_train))
        batch_size = 1
        num_workers = 8
        pin_mem = False
        data_loader_train = torch.utils.data.DataLoader(
            dataset_train, sampler=sampler_train,
            batch_size=batch_size,
            num_workers=num_workers,
            pin_memory=pin_mem,
            drop_last=True,
        )
        metric_logger = misc.MetricLogger(delimiter="  ")
        metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}'))
        epoch = 0
        header = 'Epoch: [{}]'.format(epoch)
        print_freq = 20
        for data_iter_step, (samples, labels) in enumerate(data_loader_train):
            samples = samples.to(device, non_blocking=True) # (batch_size, 3, 256, 256)
            labels = labels.to(device, non_blocking=True) # (batch_size, ) 图片的类别编号
            with torch.no_grad():
                posterior = vae.encode(samples) # DiagonalGaussianDistribution：是一个可采样的分布
                # normalize the std of latent to be 1. Change it if you use a different tokenizer
                x = posterior.sample() # .mul_(0.2325) # (batch_size, 16, 16, 16)
                print(f'压缩后图片: x: {x.shape};')
                x_ = vae.decode(x)
                print(f'恢复后图片: x_: {x_.shape};')
                save_image(samples[0], './work/images/org_001.png')
                save_image(x_[0], './work/images/restored_001.png')
            exit(0)
        print(f'^_^ The End! ^_^')


    @staticmethod
    def sample(params:Dict = {}) -> None:
        torch.set_grad_enabled(False)
        device = "cuda" if torch.cuda.is_available() else "cpu"
        if device == "cpu":
            print("GPU not found. Using CPU instead.")
        model_type = "mar_base" #@param ["mar_base", "mar_large", "mar_huge"]
        if params['official_pt'] == 1:
            pt_fn = f"anns/mar/work/pretrained_models/mar/{model_type}/checkpoint-last.pth".format(model_type)
        else:
            pt_fn = 'anns/mar/work/outputs/checkpoint-last.pth'
        print(f'权值文件为：{pt_fn};')
        diffloss_d = 6
        diffloss_w = 1024
        batch_size = 64
        num_sampling_steps_diffloss = 100 #@param {type:"slider", min:1, max:1000, step:1}
        model = mar.__dict__[model_type](
            buffer_size=batch_size,
            diffloss_d=diffloss_d,
            diffloss_w=diffloss_w,
            num_sampling_steps=str(num_sampling_steps_diffloss)
        ).to(device)
        state_dict = torch.load(pt_fn)["model_ema"]
        model.load_state_dict(state_dict)
        model.eval() # important!
        vae = AutoencoderKL(embed_dim=16, ch_mult=(1, 1, 2, 2, 4), ckpt_path="anns/mar/work/pretrained_models/vae/kl16.ckpt").cuda().eval()
        # Set user inputs:
        seed = 0 #@param {type:"number"}
        torch.manual_seed(seed)
        np.random.seed(seed)
        num_ar_steps = 64 #@param {type:"slider", min:1, max:256, step:1}
        cfg_scale = 4 #@param {type:"slider", min:1, max:10, step:0.1}
        cfg_schedule = "constant" #@param ["linear", "constant"]
        temperature = 1.0 #@param {type:"slider", min:0.9, max:1.1, step:0.01}
        class_labels = 8, 61, 88, 300, 39, 918, 23, 99 #@param {type:"raw"}
        samples_per_row = 4 #@param {type:"number"}
        with torch.cuda.amp.autocast():
            sampled_tokens = model.sample_tokens(
                bsz=len(class_labels), num_iter=num_ar_steps,
                cfg=cfg_scale, cfg_schedule=cfg_schedule,
                labels=torch.Tensor(class_labels).long().cuda(),
                temperature=temperature, progress=True)
            sampled_images = vae.decode(sampled_tokens / 0.2325)
        # Save and display images:
        save_image(sampled_images, "./work/images/a001.png", nrow=int(samples_per_row), normalize=True, value_range=(-1, 1))
        print(f'^_^ The End! ^_^')
