import torch
from torch import nn
import torch.nn.functional as F
from einops.layers.torch import Rearrange

from compressai.models import JointAutoregressiveHierarchicalPriors
from compressai.models.sensetime import ResidualBottleneckBlock, conv, deconv
from compressai.layers import AttentionBlock

from .transformers import TransformerEncoder, EncoderBlock, RmsNorm, PatchEmbed


def make_res_units(N, layers = 3):
    return nn.Sequential(*[ResidualBottleneckBlock(N, N) for _ in range(layers)])


class FusionBlock(nn.Module):
    def __init__(self, 
        n_views = 4,
        depth = 5,
        d_feat = 192, d_model = 512, num_heads = 8, 
        qkv_bias = True, qk_norm = True, 
        mlp_bias = True
    ):
        super().__init__()

        self.feat_embed = nn.Sequential(
            Rearrange("b v c h w -> b (v h w) c"),
            RmsNorm(d_feat),
            nn.Linear(d_feat, d_model),
            RmsNorm(d_model)
        )

        self.ray_embed = nn.Sequential(
            Rearrange("b v c h w -> (b v) c h w "), 
            nn.Conv2d(6, d_model, kernel_size = 16, stride = 16),
            Rearrange("(b v) c h w -> b (v h w) c", v = n_views),
            RmsNorm(d_model)
        )

        self.reproj = nn.Sequential(
            RmsNorm(d_model),
            nn.Linear(d_model, d_feat)
        )

        self.encoder = TransformerEncoder(
            EncoderBlock(
                d_model, num_heads, 
                qkv_bias = qkv_bias, qk_norm = qk_norm, 
                mlp_bias = mlp_bias
            ), 
            depth
        )

    def forward(self, x, rays):
        """
        x: [B, V, C, H, W]
        rays: [B, V, 6, H, W]
        """
        B, V, _, H, W = x.shape

        x = self.feat_embed(x)
        rays = self.ray_embed(rays)

        x = x + rays

        x = self.encoder(x)

        x = self.reproj(x)

        x = x.view(B, V, H, W, -1).permute(0, 1, 4, 2, 3)

        return x


class RayFIC(JointAutoregressiveHierarchicalPriors):
    def __init__(self, 
        N = 192, 
        n_views = 4, depth = 5, d_model = 512, n_heads = 8
    ):
        super().__init__(N = N, M = N)
        self.n_views = n_views

        self.g_a = nn.Sequential(
            conv(3, N, kernel_size = 5, stride = 2),
            make_res_units(N, 3),

            conv(N, N, kernel_size = 5, stride = 2),
            make_res_units(N, 3),
            AttentionBlock(N),

            conv(N, N, kernel_size = 5, stride = 2),
            make_res_units(N, 3),

            conv(N, N, kernel_size = 5, stride = 2),
        )
        self.fusion_enc = FusionBlock(
            n_views = n_views,
            depth = depth,
            d_feat = N, d_model = d_model, num_heads = n_heads
        )

        self.g_s = nn.Sequential(
            deconv(N, N, kernel_size = 5, stride = 2),
            make_res_units(N, 3),
            deconv(N, N, kernel_size = 5, stride = 2),
            AttentionBlock(N),
            make_res_units(N, 3),
            deconv(N, N, kernel_size = 5, stride = 2),
            make_res_units(N, 3),
            deconv(N, 3, kernel_size = 5, stride = 2),
        )
        self.fusion_dec = FusionBlock(
            n_views = n_views,
            depth = depth,
            d_feat = N, d_model = d_model, num_heads = n_heads
        )

    def forward(self, x, rays):
        B, V, _, H, W = x.shape
        assert V == self.n_views

        # encoder
        x = merge_views(x)  # [B * V, C, H, W]
        x = self.g_a(x)

        x = split_views(x, V)  # [B, V, C, H, W]
        y = self.fusion_enc(x, rays)
        y = merge_views(y)  # [B, V, C, H, W] -> [B * V, C, H, W]
        
        # entropy models
        z = self.h_a(y)
        z_hat, z_likelihoods = self.entropy_bottleneck(z)
        params = self.h_s(z_hat)

        y_hat = self.gaussian_conditional.quantize(
            y, "noise" if self.training else "dequantize"
        )
        ctx_params = self.context_prediction(y_hat)
        gaussian_params = self.entropy_parameters(
            torch.cat((params, ctx_params), dim=1)
        )
        scales_hat, means_hat = gaussian_params.chunk(2, 1)
        _, y_likelihoods = self.gaussian_conditional(y, scales_hat, means=means_hat)


        # deocder
        y_hat = split_views(y_hat, V)
        y_hat = self.fusion_dec(y_hat, rays)
        y_hat = merge_views(y_hat)
        x_hat = self.g_s(y_hat)


        # output
        x_hat = split_views(x_hat, V)
        y_hat = split_views(y_hat, V)
        y_likelihoods = split_views(y_likelihoods, V)
        z_likelihoods = split_views(z_likelihoods, V)

        return {
            "x_hat": x_hat,
            "y_hat": y_hat,
            "likelihoods": {"y": y_likelihoods, "z": z_likelihoods},
        }


def split_views(x, v):
    bv, c, h, w = x.shape
    b = bv // v

    return x.view(b, v, c, h, w)

def merge_views(x):
    b, v, c, h, w = x.shape

    return x.view(b * v, c, h, w)


