from reflow.diffusers import AltDiffusionPipeline, StableDiffusionPipeline
from diffusers import DPMSolverMultistepScheduler,EulerDiscreteScheduler,EulerAncestralDiscreteScheduler 
from reflow.diffusers.schedulers.scheduling_euler_dummy import EulerDummyScheduler
import random
import numpy as np
import torch
from loguru import logger
import random
import os

from reflow.data.utils import scale_image
from diffusers import AutoencoderKL, UNet2DConditionModel
from transformers import CLIPTextModel, CLIPTokenizer, XLMRobertaTokenizer
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import RobertaSeriesModelWithTransformation

_MODELS = {
    'clip_text_model': CLIPTextModel,
    'clip_tokenizer': CLIPTokenizer,

    'xlm_roberta_text_model': RobertaSeriesModelWithTransformation,
    'xlm_roberta_tokenizer': XLMRobertaTokenizer,

    'autoencoder_kl': AutoencoderKL,
    'unet_2d_condition_model': UNet2DConditionModel,
}


def create_models(config):
    """
    tokenizer, text_encoder, vae 固定不变，使用 hugface from_pretrained 的加载方式

    score_model 类型: nn.Module -> ModelMixin ->  UNet2DConditionModel , 可以兼容 reflow 的 load 和 save 方式

    新建 score_model 的时候提供两种选择:

        1. 完全新建，从 unet config 中构建模型，不加载权重

            config = UNet2DConditionModel.load_config(
                'checkpoints/AltDiffusion', subfolder='unet')
            unet = UNet2DConditionModel.from_config(config)

        2. 加载预训练文生图模型的 unet 权重 (sd 或 altDiff 的 unet 权重)

            unet = UNet2DConditionModel.from_pretrained(
                'checkpoints/AltDiffusion', subfolder='unet')
    """
    def create_submodel(name, part, ckpt_path, load_weights=True):
        model_cls = _MODELS[name]
        if load_weights:
            submodel = model_cls.from_pretrained(ckpt_path, subfolder=part)
        else:
            submodel_config = model_cls.load_config(ckpt_path, subfolder=part)
            submodel = model_cls.from_config(submodel_config)
        return submodel

    # create submodels
    tokenizer = create_submodel(
        config.diffusers.tokenizer, 'tokenizer', config.diffusers.ckpt_path)
    text_encoder = create_submodel(
        config.diffusers.text_encoder, 'text_encoder', config.diffusers.ckpt_path)
    vae = create_submodel(
        config.diffusers.vae, 'vae', config.diffusers.ckpt_path)

    score_model = create_submodel(config.diffusers.score_model, 'unet',
                                  config.diffusers.ckpt_path, load_weights=config.diffusers.load_score_model)
    if config.diffusers.score_model_ckpt is not None:
        score_model.load_state_dict(torch.load(
            config.diffusers.score_model_ckpt, map_location='cpu'), strict=True)
        logger.info(
            f"use score model ckpt <<{config.diffusers.score_model_ckpt}>>")

    # freeze vae and text_encoder
    vae.requires_grad_(False)
    text_encoder.requires_grad_(False)

    # score_model.to(config.device)

    if config.diffusers.gradient_checkpointing:
        score_model.enable_gradient_checkpointing()

    if config.diffusers.use_xformers:
        try:
            score_model.enable_xformers_memory_efficient_attention()
        except Exception as e:
            logger.warning(
                f"Could not enable memory efficient attention. Make sure xformers is installed correctly and a GPU is available: {e}"
            )

    return tokenizer, text_encoder, vae, score_model


def devide_to_groups(seq: list, num_groups):
    """ 将 seq 等分成 num_groups 个 sub list ; 返回所有的 sub list 和其在原队列中的 start 和 end idx
    """
    size_per_group = len(seq) // num_groups
    if len(seq) % num_groups != 0:
        size_per_group = size_per_group + 1
    groups = []
    groups_se = []
    idx = 0
    while idx < len(seq):
        s, e = idx, idx+size_per_group
        if e >= len(seq):
            e = len(seq)
        groups.append(seq[s:e])
        groups_se.append((s, e))
        idx = idx+size_per_group
    return groups, groups_se


def nothing(obj):
    if obj == None:
        return True
    if obj == '':
        return True
    return False


def decode_latents(vae, latents, float=True, cpu=True, permute=False) -> torch.Tensor:
    latents = latents.to(vae.dtype)
    latents = 1 / 0.18215 * latents
    image = vae.decode(latents).sample
    image = (image / 2 + 0.5).clamp(0, 1)
    # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
    if float:
        image = image.float()
    if cpu:
        image = image.cpu()
    if permute:
        image = image.permute(0, 2, 3, 1)
    return image


def get_model_fn(model, train=False):
    """Create a function to give the output of the score-based model.

    Args:
      model: The score model.
      train: `True` for training and `False` for evaluation.

    Returns:
      A model function.
    """

    def model_fn(*args, **kwargs):
        """Compute the output of the score-based model.

        Args:
          x: A mini-batch of input data.
          labels: A mini-batch of conditioning variables for time steps. Should be interpreted differently
                for different models.

        Returns:
          A tuple of (model output, new mutable states)
        """
        if not train:
            model.eval()
            return model(*args, **kwargs)
        else:
            model.train()
            return model(*args, **kwargs)

    return model_fn


def to_flattened_numpy(x):
    """Flatten a torch tensor `x` and convert it to numpy."""
    return x.detach().cpu().numpy().reshape((-1,))


def from_flattened_numpy(x, shape):
    """Form a torch tensor with the given `shape` from a flattened numpy array `x`."""
    return torch.from_numpy(x.reshape(shape))


def set_seed(seed: int,):
    """
    Helper function for reproducible behavior to set the seed in `random`, `numpy`, `torch`.

    Args:
            seed (`int`): The seed to set.
    """
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)


def to_device(data, device):
    if isinstance(data, dict):
        for k,v in data.items():
            try:
                data[k] = v.to(device)
            except :
                ...
    return data


def cycle(dl):
    while True:
        for batch in dl:
            yield batch


_PIPELINES = {
    'alt_diffusion': AltDiffusionPipeline,
    'stable_diffusion': StableDiffusionPipeline,
}
_SCHEDULERS = {
    'euler_dummy': EulerDummyScheduler,
    'euler': EulerDiscreteScheduler,
    'euler_a': EulerAncestralDiscreteScheduler,
    'dpm_solver_multi': DPMSolverMultistepScheduler,
}


# # ! deprecated
# def restore_checkpoint(ckpt_path, state, device):
#     ckpt_path = Path(ckpt_path)
#     if not ckpt_path.exists():
#         logger.warning(
#             f"No checkpoint found at {str(ckpt_path)}. Returned the same state as input")
#         return state
#     else:
#         loaded_state = torch.load(str(ckpt_path), map_location=device)
#         state['model'].load_state_dict(loaded_state['model'], strict=False)
#         state['ema'].load_state_dict(loaded_state['ema'])
#         state['step'] = loaded_state['step']

#         if loaded_state.get('optimizer', None) and state.get('optimizer', None):
#             state['optimizer'].load_state_dict(loaded_state['optimizer'])

#         return state

# # ! deprecated
# def save_checkpoint(ckpt_path, state):
#     saved_state = {
#         # 'optimizer': state['optimizer'].state_dict(),
#         'model': state['model'].state_dict(),
#         'ema': state['ema'].state_dict(),
#         'step': state['step']
#     }
#     if state.get('optimizer', None):
#         saved_state['optimizer'] = state['optimizer'].state_dict()
#     torch.save(saved_state, ckpt_path)

# # ! deprecated
# def optimization_manager(config):
#     """Returns an optimize_fn based on `config`."""

#     def optimize_fn(
#         optimizer,
#         params,
#         step,
#         lr=config.optim.lr,
#         warmup=config.optim.warmup,
#         grad_clip=config.optim.grad_clip
#     ):
#         """Optimizes with warmup and gradient clipping (disabled if negative)."""
#         if warmup > 0:
#             for g in optimizer.param_groups:
#                 g['lr'] = lr * np.minimum(step / warmup, 1.0)
#         if grad_clip >= 0:
#             torch.nn.utils.clip_grad_norm_(params, max_norm=grad_clip)
#         optimizer.step()

#     return optimize_fn
