import torch
import torch.nn as nn

from typing import List, Tuple
from . import model_utils
from .model_opts import ModelOpts
from .MVF import SpatialMultiViewFusion, SimpleMultiViewFusion
from .model_utils import apply_ftl_to_feature_maps, make_eye


class FeatureExtractor(nn.Module):
    """
    V1: 各种计算逻辑在内部，qnn不支持
    使用注意力机制做融合
    """

    def __init__(
            self,
            input_size: Tuple[int, int],
            model_opts: ModelOpts,
    ) -> None:
        super(FeatureExtractor, self).__init__()
        nc_img_features = model_opts.nImageFeatureChannels

        self._input_img_sizes = input_size
        self._ftl_ratio = float(model_opts.spatialFTLRatio)
        # canonical focal length for depth normalization
        self._canonical_focal_length = float(model_opts.canonicalFocalLength)

        # Create the image backbone
        self.backbone, backbone_outshape = model_utils.create_backbone(
            model_opts.network,
            input_size,
            nc_img_features,
        )

        self._backbone_out_feature_sizes: List[int] = backbone_outshape[-2:]
        self.multi_view_fusion = SpatialMultiViewFusion(nc_img_features, 4, 16, 4)

    def forward(
            self,
            images: torch.Tensor,
            extrinsic_matrices: torch.Tensor,
            intrinsic_matrices: torch.Tensor,
            # resize_ratios: torch.Tensor,
            masks: torch.Tensor,
    ) -> torch.Tensor:
        # images: (B, V, C, H, W)
        # resize_ratios: (B, V, 2) [h_scale, w_scale]
        # intrinsic_matrices: (B, V, 3, 3) after cropping
        B, V, C, H, W = images.shape

        # 1) flatten
        images_flat = images.reshape(B * V, C, H, W)
        mask_flat = masks.reshape(B * V)

        valid_mask = mask_flat.eq(1)
        valid_images = images_flat[valid_mask]

        # 2) backbone feature extraction
        features = self.backbone(valid_images)

        # scatter back to full batch
        _, C_out, H_out, W_out = features.shape
        feats_flat = torch.zeros(
            (B * V, C_out, H_out, W_out),
            dtype=images_flat.dtype,
            device=images_flat.device
        )
        feats_flat[valid_mask] = features

        # 3) compute composite transforms for FTL
        extrinsics_flat = extrinsic_matrices.reshape(-1, 4, 4)
        intrinsics_flat = intrinsic_matrices.reshape(-1, 3, 3)
        # ratios_flat = resize_ratios.reshape(-1, 2)
        #
        # # 3.1 resize inverse
        # T_resize_inv = make_eye(4, device=ratios_flat.device, dtype=ratios_flat.dtype)
        # T_resize_inv = T_resize_inv.unsqueeze(0).repeat(B * V, 1, 1)
        # # scale u by w_scale, v by h_scale
        # T_resize_inv[:, 0, 0] = ratios_flat[:, 1]
        # T_resize_inv[:, 1, 1] = ratios_flat[:, 0]

        # 3.2 intrinsics inverse
        K_inv = torch.inverse(intrinsics_flat)
        K4_inv = make_eye(4, device=K_inv.device, dtype=K_inv.dtype)
        K4_inv = K4_inv.unsqueeze(0).repeat(B * V, 1, 1)
        K4_inv[:, :3, :3] = K_inv

        # 3.3 focal normalization
        f_orig = intrinsics_flat[:, 0, 0]
        scale_z = (self._canonical_focal_length / f_orig)
        S_z_inv = make_eye(4, device=f_orig.device, dtype=f_orig.dtype)
        S_z_inv = S_z_inv.unsqueeze(0).repeat(B * V, 1, 1)
        S_z_inv[:, 2, 2] = scale_z

        # 3.4 camera->world
        T_cam2world = torch.inverse(extrinsics_flat)

        # composite transform: cam2world @ S_z_inv @ K4_inv @ T_resize_inv
        # total_xfs = T_cam2world @ S_z_inv @ K4_inv @ T_resize_inv
        total_xfs = T_cam2world @ S_z_inv @ K4_inv

        # 4) apply FTL using composite transforms
        warped_feats = apply_ftl_to_feature_maps(
            total_xfs,
            feats_flat,
            self._ftl_ratio,
        )

        # reshape back
        warped_feats = warped_feats.reshape(B, V, C_out, H_out, W_out)
        mask_expanded = masks.unsqueeze(2).unsqueeze(3).unsqueeze(4)  # Shape: (B, V, 1, 1, 1)
        mask_expanded = mask_expanded.expand(-1, -1, C_out, H_out, W_out)  # Shape: (B, V, C_out, H_out, W_out)

        # Apply the mask: set invalid views to 0
        warped_feats = warped_feats * mask_expanded

        # 5) fuse the multiview features
        fused = self.multi_view_fusion.forward(
            warped_feats,
            extrinsic_matrices,
            intrinsic_matrices,
            masks
        )
        return fused


class FeatureExtractorV2(nn.Module):
    """
    V2：qnn不支持的算子迁移到外面，使用卷积融合特征
    """

    def __init__(
            self,
            input_size: Tuple[int, int],
            model_opts: ModelOpts,
    ) -> None:
        super(FeatureExtractorV2, self).__init__()
        nc_img_features = model_opts.nImageFeatureChannels

        self._input_img_sizes = input_size
        self._ftl_ratio = float(model_opts.spatialFTLRatio)
        # canonical focal length for depth normalization
        self._canonical_focal_length = float(model_opts.canonicalFocalLength)

        # Create the image backbone
        self.backbone, backbone_outshape = model_utils.create_backbone(
            model_opts.network,
            input_size,
            nc_img_features,
        )
        self.backbone_out_feature_sizes: List[int] = backbone_outshape[-2:]
        self.multi_view_fusion = SimpleMultiViewFusion(nc_img_features, 4, 3)

    def forward(
            self,
            images: torch.Tensor,
            masks: torch.Tensor,
            total_xfs: torch.Tensor,
            world_to_ref_camera: torch.Tensor
    ) -> torch.Tensor:
        B, V, C, H, W = images.shape
        # 1) flatten
        images_flat = images.reshape(B * V, C, H, W)

        # 2) backbone feature extraction
        features = self.backbone(images_flat)

        # scatter back to full batch
        _, C_out, H_out, W_out = features.shape

        # 3) apply FTL using composite transforms
        warped_feats = apply_ftl_to_feature_maps(
            total_xfs.reshape(-1, 4, 4),
            features,
            self._ftl_ratio,
        )

        # reshape back
        warped_feats = warped_feats.reshape(B, V, C_out, H_out, W_out)

        # 4) fuse the multiview features
        fused = self.multi_view_fusion.forward(
            warped_feats,
            masks
        )
        # 应用逆变换将特征映射回参考相机空间
        warped_back = apply_ftl_to_feature_maps(
            world_to_ref_camera.reshape(-1, 4, 4),
            fused,
            self._ftl_ratio
        )

        return warped_back
