# Copyright 2024 Black Forest Labs and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union

import numpy as np
import torch
import torch.distributed
from diffusers import CogView4Pipeline
from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback
from diffusers.utils import is_torch_xla_available
from diffusers.pipelines.cogview4.pipeline_output import CogView4PipelineOutput
from diffusers.pipelines.cogview4.pipeline_cogview4 import retrieve_timesteps, calculate_shift

from xfuser.config import EngineConfig, InputConfig
from xfuser.core.distributed import (
    get_cfg_group,
    get_classifier_free_guidance_world_size,
    get_pipeline_parallel_world_size,
    get_runtime_state,
    get_pp_group,
    get_sequence_parallel_world_size,
    get_sequence_parallel_rank,
    get_sp_group,
    is_pipeline_first_stage,
    is_pipeline_last_stage,
    is_dp_last_group,
    get_world_group,
    get_vae_parallel_group,
    get_dit_world_size,
)
from xfuser.core.distributed.group_coordinator import GroupCoordinator
from .base_pipeline import xFuserPipelineBaseWrapper
from .register import xFuserPipelineWrapperRegister
from torchao.quantization import int8_weight_only, quantize_
# from optimum.quanto import freeze, qint8, quantize
from transformers import GlmModel
from diffusers.models import CogView4Transformer2DModel, AutoencoderKL

if is_torch_xla_available():
    import torch_xla.core.xla_model as xm

    XLA_AVAILABLE = True
else:
    XLA_AVAILABLE = False


@xFuserPipelineWrapperRegister.register(CogView4Pipeline)
class xFuserCogView4Pipeline(xFuserPipelineBaseWrapper):

    @classmethod
    def from_pretrained(
        cls,
        pretrained_model_name_or_path: Optional[Union[str, os.PathLike]],
        engine_config: EngineConfig,
        quantize_8bit=False,
        return_org_pipeline: bool = False,
        **kwargs,
    ):
        dtype = kwargs.pop("torch_dtype", None)
        print(f"torch_dtype: {dtype}")
        text_encoder = GlmModel.from_pretrained(
            pretrained_model_name_or_path + "/text_encoder",
            torch_dtype=dtype
        )
        transformer = CogView4Transformer2DModel.from_pretrained(
            pretrained_model_name_or_path + "/transformer",
            torch_dtype=dtype
        )
        vae = AutoencoderKL.from_pretrained(
            pretrained_model_name_or_path + "/vae",
            torch_dtype=dtype
        )

        if quantize_8bit:
            print("quantize weight to 8bit")
            quantize_(text_encoder, int8_weight_only())
            quantize_(transformer, int8_weight_only())
            # quantize(text_encoder, weights=qint8)
            # freeze(text_encoder)
            # quantize(transformer, weights=qint8)
            # freeze(transformer)
            # quantize(vae, weights=qint8)
            # freeze(vae)
            print("quantize weight to 8bit done")

        pipeline = CogView4Pipeline.from_pretrained(pretrained_model_name_or_path, 
                                                    text_encoder=text_encoder,
                                                    transformer=transformer,
                                                    vae = vae,
                                                    **kwargs)
        
        if return_org_pipeline:
            return pipeline
        return cls(pipeline, engine_config)

    @property
    def guidance_scale(self):
        return self._guidance_scale

    # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
    # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
    # corresponds to doing no classifier free guidance.
    @property
    def do_classifier_free_guidance(self):
        return self._guidance_scale > 1

    @property
    def num_timesteps(self):
        return self._num_timesteps

    @property
    def interrupt(self):
        return self._interrupt

    @property
    def attention_kwargs(self):
        return self._attention_kwargs
    
    @property
    def cfg_attention_kwargs(self):
        return self._cfg_attention_kwargs
    
    def rope_warpper(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
        return self.transformer.rope(hidden_states)

    @torch.no_grad()
    @xFuserPipelineBaseWrapper.enable_data_parallel
    @xFuserPipelineBaseWrapper.check_to_use_naive_forward
    def __call__(
        self,
        prompt: Optional[Union[str, List[str]]] = None,
        negative_prompt: Optional[Union[str, List[str]]] = None,
        height: Optional[int] = None,
        width: Optional[int] = None,
        num_inference_steps: int = 50,
        timesteps: Optional[List[int]] = None,
        sigmas: Optional[List[float]] = None,
        guidance_scale: float = 5.0,
        num_images_per_prompt: int = 1,
        generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
        latents: Optional[torch.FloatTensor] = None,
        prompt_embeds: Optional[torch.FloatTensor] = None,
        negative_prompt_embeds: Optional[torch.FloatTensor] = None,
        original_size: Optional[Tuple[int, int]] = None,
        crops_coords_top_left: Tuple[int, int] = (0, 0),
        output_type: str = "pil",
        return_dict: bool = True,
        attention_kwargs: Optional[Dict[str, Any]] = None,
        callback_on_step_end: Optional[
            Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]
        ] = None,
        callback_on_step_end_tensor_inputs: List[str] = ["latents"],
        max_sequence_length: int = 1024,
        use_taylorseer: bool = False,
    ) -> Union[CogView4PipelineOutput, Tuple]:
        """
        Function invoked when calling the pipeline for generation.

        Args:
            prompt (`str` or `List[str]`, *optional*):
                The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
            negative_prompt (`str` or `List[str]`, *optional*):
                The prompt or prompts not to guide the image generation. If not defined, one has to pass
                `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
                less than `1`).
            height (`int`, *optional*, defaults to self.transformer.config.sample_size * self.vae_scale_factor):
                The height in pixels of the generated image. If not provided, it is set to 1024.
            width (`int`, *optional*, defaults to self.transformer.config.sample_size * self.vae_scale_factor):
                The width in pixels of the generated image. If not provided it is set to 1024.
            num_inference_steps (`int`, *optional*, defaults to `50`):
                The number of denoising steps. More denoising steps usually lead to a higher quality image at the
                expense of slower inference.
            timesteps (`List[int]`, *optional*):
                Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
                in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
                passed will be used. Must be in descending order.
            sigmas (`List[float]`, *optional*):
                Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in
                their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed
                will be used.
            guidance_scale (`float`, *optional*, defaults to `5.0`):
                Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
                `guidance_scale` is defined as `w` of equation 2. of [Imagen
                Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
                1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
                usually at the expense of lower image quality.
            num_images_per_prompt (`int`, *optional*, defaults to `1`):
                The number of images to generate per prompt.
            generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
                One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
                to make generation deterministic.
            latents (`torch.FloatTensor`, *optional*):
                Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
                generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
                tensor will ge generated by sampling using the supplied random `generator`.
            prompt_embeds (`torch.FloatTensor`, *optional*):
                Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
                provided, text embeddings will be generated from `prompt` input argument.
            negative_prompt_embeds (`torch.FloatTensor`, *optional*):
                Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
                weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
                argument.
            original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
                If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
                `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as
                explained in section 2.2 of
                [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
            crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
                `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
                `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
                `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
                [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
            output_type (`str`, *optional*, defaults to `"pil"`):
                The output format of the generate image. Choose between
                [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
            return_dict (`bool`, *optional*, defaults to `True`):
                Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead
                of a plain tuple.
            attention_kwargs (`dict`, *optional*):
                A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
                `self.processor` in
                [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
            callback_on_step_end (`Callable`, *optional*):
                A function that calls at the end of each denoising steps during the inference. The function is called
                with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
                callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
                `callback_on_step_end_tensor_inputs`.
            callback_on_step_end_tensor_inputs (`List`, *optional*):
                The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
                will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
                `._callback_tensor_inputs` attribute of your pipeline class.
            max_sequence_length (`int`, defaults to `224`):
                Maximum sequence length in encoded prompt. Can be set to other values but may lead to poorer results.

        Examples:

        Returns:
            [`~pipelines.cogview4.pipeline_CogView4.CogView4PipelineOutput`] or `tuple`:
            [`~pipelines.cogview4.pipeline_CogView4.CogView4PipelineOutput`] if `return_dict` is True, otherwise a
            `tuple`. When returning a tuple, the first element is a list with the generated images.
        """

        if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)):
            callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs

        height = height or self.transformer.config.sample_size * self.vae_scale_factor
        width = width or self.transformer.config.sample_size * self.vae_scale_factor

        original_size = original_size or (height, width)
        target_size = (height, width)

        # Check inputs. Raise error if not correct
        self.check_inputs(
            prompt,
            height,
            width,
            negative_prompt,
            callback_on_step_end_tensor_inputs,
            prompt_embeds,
            negative_prompt_embeds,
        )
        self._guidance_scale = guidance_scale
        self._attention_kwargs = attention_kwargs
        self._cfg_attention_kwargs = attention_kwargs
        self._current_timestep = None
        self._interrupt = False

        if self.attention_kwargs is None:
            self._attention_kwargs = {}

        if self.cfg_attention_kwargs is None:
            self._cfg_attention_kwargs = {}

        # Default call parameters
        if prompt is not None and isinstance(prompt, str):
            batch_size = 1
        elif prompt is not None and isinstance(prompt, list):
            batch_size = len(prompt)
        else:
            batch_size = prompt_embeds.shape[0]

        device = self._execution_device

        #! ---------------------------------------- ADDED BELOW(计算出各种运行时原数据) ----------------------------------------
        # * set runtime state input parameters
        get_runtime_state().set_input_parameters(
            height=height,
            width=width,
            batch_size=batch_size,
            num_inference_steps=num_inference_steps,
            max_condition_sequence_length=max_sequence_length,
            split_text_embed_in_sp=get_pipeline_parallel_world_size() == 1,
        )
        #! ---------------------------------------- ADDED ABOVE ----------------------------------------

        # Encode input prompt
        prompt_embeds, negative_prompt_embeds = self.encode_prompt(
            prompt,
            negative_prompt,
            self.do_classifier_free_guidance,
            num_images_per_prompt=num_images_per_prompt,
            prompt_embeds=prompt_embeds,
            negative_prompt_embeds=negative_prompt_embeds,
            max_sequence_length=max_sequence_length,
            device=device,
        )

        if self.do_classifier_free_guidance and get_classifier_free_guidance_world_size() == 2:
            prompt_embeds = self._process_cfg_split_batch(negative_prompt_embeds, prompt_embeds)

        # Prepare latents
        latent_channels = self.transformer.config.in_channels
        latents = self.prepare_latents(
            batch_size * num_images_per_prompt,
            latent_channels,
            height,
            width,
            torch.float32,
            device,
            generator,
            latents,
        )

        # Prepare additional timestep conditions
        original_size = torch.tensor([original_size], dtype=prompt_embeds.dtype, device=device)
        target_size = torch.tensor([target_size], dtype=prompt_embeds.dtype, device=device)
        crops_coords_top_left = torch.tensor([crops_coords_top_left], dtype=prompt_embeds.dtype, device=device)

        original_size = original_size.repeat(batch_size * num_images_per_prompt, 1)
        target_size = target_size.repeat(batch_size * num_images_per_prompt, 1)
        crops_coords_top_left = crops_coords_top_left.repeat(batch_size * num_images_per_prompt, 1)

        # Prepare timesteps
        image_seq_len = ((height // self.vae_scale_factor) * (width // self.vae_scale_factor)) // (
            self.transformer.config.patch_size**2
        )
        timesteps = (
            np.linspace(self.scheduler.config.num_train_timesteps, 1.0, num_inference_steps)
            if timesteps is None
            else np.array(timesteps)
        )
        timesteps = timesteps.astype(np.int64).astype(np.float32)
        sigmas = timesteps / self.scheduler.config.num_train_timesteps if sigmas is None else sigmas
        mu = calculate_shift(
            image_seq_len,
            self.scheduler.config.get("base_image_seq_len", 256),
            self.scheduler.config.get("base_shift", 0.25),
            self.scheduler.config.get("max_shift", 0.75),
        )
        timesteps, num_inference_steps = retrieve_timesteps(
            self.scheduler, num_inference_steps, device, timesteps, sigmas, mu=mu
        )
        self._num_timesteps = len(timesteps)

        # Denoising loop
        transformer_dtype = self.transformer.dtype
        num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
        
        # new RoPE
        image_rotary_emb = self.rope_warpper(latents.to(transformer_dtype))
        latents, prompt_embeds, image_rotary_emb = self._init_sync_pipeline(latents, prompt_embeds, image_rotary_emb)

        #! ---------------------------------------- MODIFIED BELOW ----------------------------------------
        with self.progress_bar(total=num_inference_steps) as progress_bar:
            for i, t in enumerate(timesteps):
                if self.interrupt:
                    continue

                self._current_timestep = t
                latent_model_input = latents.to(transformer_dtype)

                # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
                timestep = t.expand(latents.shape[0])
                noise_pred_cond = self.transformer(
                    hidden_states=latent_model_input,
                    encoder_hidden_states=prompt_embeds,
                    timestep=timestep,
                    original_size=original_size,
                    target_size=target_size,
                    crop_coords=crops_coords_top_left,
                    attention_kwargs=self.attention_kwargs,
                    image_rotary_emb = image_rotary_emb,
                    use_taylorseer = use_taylorseer,
                    return_dict=False,
                )[0]

                # perform guidance
                if self.do_classifier_free_guidance:
                    # noise_pred = self.transformer(
                    #     hidden_states=latent_model_input,
                    #     encoder_hidden_states=negative_prompt_embeds,
                    #     timestep=timestep,
                    #     original_size=original_size,
                    #     target_size=target_size,
                    #     crop_coords=crops_coords_top_left,
                    #     attention_kwargs=attention_kwargs,
                    #     return_dict=False,
                    # )[0]

                    if get_classifier_free_guidance_world_size() == 1:
                        # noise_pred_uncond, noise_pred_cond = noise_pred_cond.chunk(2)
                        noise_pred_uncond = self.transformer(
                            hidden_states=latent_model_input,
                            encoder_hidden_states=negative_prompt_embeds,
                            timestep=timestep,
                            original_size=original_size,
                            target_size=target_size,
                            crop_coords=crops_coords_top_left,
                            attention_kwargs=self.cfg_attention_kwargs,
                            image_rotary_emb = image_rotary_emb,
                            use_taylorseer = use_taylorseer,
                            return_dict=False,
                        )[0]
                    elif get_classifier_free_guidance_world_size() == 2:
                        noise_pred_uncond, noise_pred_cond = get_cfg_group().all_gather(
                            noise_pred_cond, separate_tensors=True
                        )

                    noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_cond - noise_pred_uncond)
                else:
                    noise_pred = noise_pred_cond

                latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]

                # call the callback, if provided
                if callback_on_step_end is not None:
                    callback_kwargs = {}
                    for k in callback_on_step_end_tensor_inputs:
                        callback_kwargs[k] = locals()[k]
                    callback_outputs = callback_on_step_end(self, i, self.scheduler.sigmas[i], callback_kwargs)
                    latents = callback_outputs.pop("latents", latents)
                    prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
                    negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)

                if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
                    progress_bar.update()

        # 8. Decode latents
        #! ---------------------------------------- ADD BELOW ----------------------------------------
        if get_sequence_parallel_world_size() > 1:
            latents = get_sp_group().all_gather(latents, dim=-2)
            
        image = None
        if self.is_dp_last_group():
            if not output_type == "latent":
                latents = latents.to(self.vae.dtype) / self.vae.config.scaling_factor
                image = self.vae.decode(latents, return_dict=False, generator=generator)[0]
                image = self.image_processor.postprocess(image, output_type=output_type)
            else:
                image = latents

            # Offload all models
            self.maybe_free_model_hooks()

            if not return_dict:
                return (image,)

            return CogView4PipelineOutput(images=image)
        else:
            return image
        #! ---------------------------------------- ADD ABOVE ----------------------------------------

    def _init_sync_pipeline(
        self,
        latents: torch.Tensor,
        prompt_embeds: torch.Tensor,
        image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
    ):
        latents = super()._init_sync_pipeline(latents)

        if get_runtime_state().split_text_embed_in_sp:
            if prompt_embeds.shape[-2] % get_sequence_parallel_world_size() == 0:
                prompt_embeds = torch.chunk(prompt_embeds, get_sequence_parallel_world_size(), dim=-2)[
                    get_sequence_parallel_rank()
                ]
            else:
                get_runtime_state().split_text_embed_in_sp = False

        if get_runtime_state().split_text_embed_in_sp:
            if image_rotary_emb is not None:
                image_rotary_emb = (
                    torch.cat(
                        [
                            image_rotary_emb[0][start_token_idx:end_token_idx, ...]
                            for start_token_idx, end_token_idx in get_runtime_state().pp_patches_token_start_end_idx_global
                        ],
                        dim=0,
                    ),
                    torch.cat(
                        [
                            image_rotary_emb[1][start_token_idx:end_token_idx, ...]
                            for start_token_idx, end_token_idx in get_runtime_state().pp_patches_token_start_end_idx_global
                        ],
                        dim=0,
                    ),
                )
            
        return latents, prompt_embeds, image_rotary_emb