import os

import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
from einops import rearrange
from rotary_embedding_torch import RotaryEmbedding
from timm.models.layers import DropPath
from timm.models.vision_transformer import Mlp
from transformers import PretrainedConfig, PreTrainedModel

from opensora.acceleration.checkpoint import auto_grad_checkpoint
from opensora.acceleration.communications import gather_forward_split_backward, split_forward_gather_backward
from opensora.acceleration.parallel_states import get_sequence_parallel_group
from opensora.models.layers.blocks import (
    Attention,
    CaptionEmbedder,
    MultiHeadCrossAttention,
    PatchEmbed3D,
    PositionEmbedding2D,
    SeqParallelAttention,
    SeqParallelMultiHeadCrossAttention,
    SizeEmbedder,
    T2IFinalLayer,
    TimestepEmbedder,
    approx_gelu,
    get_layernorm,
    t2i_modulate,
)
from opensora.registry import MODELS
from opensora.utils.ckpt_utils import load_checkpoint


def get_dynamic_size(x, patch_size=(1,2,2)):
        _, _, T, H, W = x.size()
        if T % patch_size[0] != 0:
            T += patch_size[0] - T % patch_size[0]
        if H % patch_size[1] != 0:
            H += patch_size[1] - H % patch_size[1]
        if W % patch_size[2] != 0:
            W += patch_size[2] - W % patch_size[2]
        T = T // patch_size[0]
        H = H // patch_size[1]
        W = W // patch_size[2]
        return (T, H, W)

def modified_forward(dit, x, timestep, y, mask=None, x_mask=None, fps=None, height=None, width=None, dis_layers=[4,8,12,16,20,24,28], **kwargs):
        dtype = dit.x_embedder.proj.weight.dtype
        B = x.size(0)
        x = x.to(dtype)
        timestep = timestep.to(dtype)
        y = y.to(dtype)

        # === get pos embed ===
        T, H, W = get_dynamic_size(x)
        S = H * W
        base_size = round(S**0.5)
        resolution_sq = (height[0].item() * width[0].item()) ** 0.5
        scale = resolution_sq / 512
        pos_emb = dit.pos_embed(x, H, W, scale=scale, base_size=base_size)

        # === get timestep embed ===
        t = dit.t_embedder(timestep, dtype=x.dtype)  # [B, C]
        fps = dit.fps_embedder(fps.unsqueeze(1), B)
        t = t + fps
        t_mlp = dit.t_block(t)
        t0 = t0_mlp = None
        if x_mask is not None:
            t0_timestep = torch.zeros_like(timestep)
            t0 = dit.t_embedder(t0_timestep, dtype=x.dtype)
            t0 = t0 + fps
            t0_mlp = dit.t_block(t0)

        # === get y embed ===
        if dit.config.skip_y_embedder:
            y_lens = mask
            if isinstance(y_lens, torch.Tensor):
                y_lens = y_lens.long().tolist()
        else:
            y, y_lens = dit.encode_text(y, mask)

        # === get x embed ===
        x = dit.x_embedder(x)  # [B, N, C]
        x = rearrange(x, "B (T S) C -> B T S C", T=T, S=S)
        x = x + pos_emb

        # shard over the sequence dim if sp is enabled
        if dit.enable_sequence_parallelism:
            x = split_forward_gather_backward(x, get_sequence_parallel_group(), dim=2, grad_scale="down")
            S = S // dist.get_world_size(get_sequence_parallel_group())

        x = rearrange(x, "B T S C -> B (T S) C", T=T, S=S)

        # === blocks ===
        output_features = []
        layers = 1
        for spatial_block, temporal_block in zip(dit.spatial_blocks, dit.temporal_blocks):
            x = auto_grad_checkpoint(spatial_block, x, y, t_mlp, y_lens, x_mask, t0_mlp, T, S)
            x = auto_grad_checkpoint(temporal_block, x, y, t_mlp, y_lens, x_mask, t0_mlp, T, S)
            # if dit.enable_sequence_parallelism:
            #     x = rearrange(x, "B (T S) C -> B T S C", T=T, S=S)
            #     x = gather_forward_split_backward(x, get_sequence_parallel_group(), dim=2, grad_scale="up")
            #     S = S * dist.get_world_size(get_sequence_parallel_group())
            #     x = rearrange(x, "B T S C -> B (T S) C", T=T, S=S)
            if layers in dis_layers:
                output_features.append(x)
            layers += 1
        return output_features
        
# copy from https://github.com/G-U-N/Phased-Consistency-Model/blob/5910b03fcf07d8d7d806cbeb75ef0fb51a93ff6c/code/text_to_image_sd15/discriminator_sd15.py#L348C1-L368C17

class DiscriminatorHead(nn.Module):
    def __init__(self, input_channel, output_channel=1):
        super().__init__()
        self.conv1 = nn.Sequential(
            nn.Conv1d(input_channel, input_channel, 3, 1, 1),
            nn.GroupNorm(32, input_channel),
            nn.LeakyReLU(inplace=True), # use LeakyReLu instead of GELU shown in the paper to save memory
        )
        self.conv2 = nn.Sequential(
            nn.Conv1d(input_channel, input_channel, 3, 1, 1),
            nn.GroupNorm(32, input_channel),
            nn.LeakyReLU(inplace=True), # use LeakyReLu instead of GELU shown in the paper to save memory
        )

        self.conv_out = nn.Conv1d(input_channel, output_channel, 1, 1, 0)

    def forward(self, x):
        x = self.conv1(x)
        x = self.conv2(x) + x
        x = self.conv_out(x)
        return x

class Discriminator(nn.Module):

    def __init__(
        self,
        dit,
        num_h_per_head=1,
    ):
        super().__init__()
        adapter_channel_dims=[1280] * 7
        self.dit = dit
        self.num_h_per_head = num_h_per_head
        self.head_num = len(adapter_channel_dims)
        self.heads = nn.ModuleList(
            [
                nn.ModuleList(
                    [
                        DiscriminatorHead(adapter_channel)
                        for _ in range(self.num_h_per_head)
                    ]
                )
                for adapter_channel in adapter_channel_dims
            ],
        ).to(dtype=dit.dtype, device=dit.device)
    def device(self):
        return self.dit.device
    def _forward(self, x, timestep, y, **kwargs):
        with torch.no_grad():
            features = modified_forward(self.dit, x, timestep, y, **kwargs)
        assert self.head_num == len(features)
        outputs = []
        for feature, head in zip(features, self.heads):
            for h in head:
                outputs.append(h(feature))
        return outputs
    
  

    def forward(self, flag, *args):
        if flag == "d_loss":
            return self.d_loss(*args)
        elif flag == "g_loss":
            return self.g_loss(*args)
        else:
            assert 0, "not supported"
    
    def d_loss(self, sample_fake, sample_real, timestep, weight, kwargs):
        loss = 0.0
        fake_outputs = self._forward(
            sample_fake.detach(), timestep, **kwargs
        )
        real_outputs = self._forward(
            sample_real.detach(), timestep, **kwargs
        )
        for fake_output, real_output in zip(fake_outputs, real_outputs):
            loss += (
                torch.mean(weight * torch.relu(fake_output.float() + 1))
                + torch.mean(weight * torch.relu(1 - real_output.float()))
            ) / (self.head_num * self.num_h_per_head)
        return loss

    def g_loss(self, sample_fake, timestep, weight, kwargs):
        loss = 0.0
        fake_outputs = self._forward(sample_fake, timestep, **kwargs)
        for fake_output in fake_outputs:
            loss += torch.mean(weight * torch.relu(1 - fake_output.float())) / (
                self.head_num * self.num_h_per_head
            )
        return loss

@MODELS.register_module("Discriminator_3B/2")
def Discriminator_3B_2(from_pretrained=None, **kwargs):
    dist = Discriminator(**kwargs)
    return dist
    
# if __name__ == "__main__":
   

#     discriminator = Discriminator(teacher_unet).cuda()

#     sample = torch.randn((1, 4, 64, 64)).cuda()
#     encoder_hidden_states = torch.randn((1, 77, 768)).cuda()
#     timesteps = torch.randn((1,)).cuda()

#     features = discriminator(sample, timesteps, encoder_hidden_states)