import re
import torch
import torch.nn as nn

from typing import List, Tuple
from . import backbone_resnet as resnet


# torch中的eye qnn不支持
def make_eye(n: int,
             dtype: torch.dtype = torch.float32,
             device: torch.device = torch.device("cpu")) -> torch.Tensor:
    i = torch.arange(n, device=device).unsqueeze(1)
    j = torch.arange(n, device=device).unsqueeze(0)
    # 广播比较得到对角线为 True 的布尔矩阵，转为指定 dtype
    return (i == j).to(dtype)


def create_backbone(
        arch_name: str,
        input_size: Tuple[int, int],
        n_out_channels: int,
) -> Tuple[nn.Module, List[int]]:
    assert arch_name.startswith("resnet")
    # The "arch" string will be directly mapped to the function name
    # in resnet.create_model so that we don't have to hard code every
    # supported architecture
    arch, start_planes_str = re.split("-f", arch_name)
    start_planes = int(start_planes_str)

    start_layers = nn.Sequential(
        nn.Conv2d(1, start_planes, kernel_size=3, padding=1),
        nn.BatchNorm2d(start_planes),
        nn.ReLU(inplace=True),
        nn.MaxPool2d(kernel_size=2, stride=2),
    )
    resnet_input_size = (int(input_size[0] / 2), int(input_size[1] / 2))

    resnet_model = resnet.create_model(
        arch, start_layers, resnet_input_size, start_planes
    )
    n_img_maps = resnet_model.outshape[0]
    # Add an extra conv to the image feature layers since
    # the output of the base network usually has a ReLU on top
    # which doesn't have any negative number
    proj_layer = nn.Conv2d(n_img_maps, n_out_channels, kernel_size=1, padding=0)
    backbone = nn.Sequential(resnet_model, proj_layer)
    output_shape = [n_out_channels, *resnet_model.outshape[1:]]

    return backbone, output_shape


def apply_ftl_to_feature_maps(
        xfs: torch.Tensor,
        feature_maps: torch.Tensor,
        ftl_ratio: float,
) -> torch.Tensor:
    """
    Transform feature maps to 3D points and back using FTL.
    Args:
        xfs: (n_images, 4, 4) affine transformation matrices
        feature_maps: (n_images, n_channels, H, W) input feature maps
        ftl_ratio: ratio of channels to apply FTL on
    Returns:
        Transformed feature maps of same shape
    """
    assert 0 <= ftl_ratio <= 1
    if ftl_ratio == 0:
        return feature_maps

    n_images, n_channels, H, W = feature_maps.shape
    # number of FTL channels (must be divisible by 3)
    nc_ftl = int(n_channels * ftl_ratio)
    assert nc_ftl % 3 == 0, "FTL channels must be divisible by 3"

    # Split out FTL portion
    ftl_maps = feature_maps[:, :nc_ftl]  # (n_images, nc_ftl, H, W)
    # Treat FTL channels as 3D coords
    point_feats = ftl_maps.reshape(n_images, 3, -1)  # (n_images,3, H*W*(nc_ftl/3))

    # Get rotation and translation
    R = xfs[:, :3, :3]  # (n_images,3,3)
    t = xfs[:, :3, 3].unsqueeze(-1)  # (n_images,3,1)

    # Apply transformation
    transformed = torch.matmul(R, point_feats) + t  # (n_images,3,*)

    # Reshape back to feature maps
    ftl_transformed = transformed.reshape(ftl_maps.shape)

    # Concatenate back with remaining channels
    if nc_ftl < n_channels:
        out = torch.cat([ftl_transformed, feature_maps[:, nc_ftl:]], dim=1)
    else:
        out = ftl_transformed
    return out


def create_pose_regression_layers(
        n_in_channels: int,
        n_blocks: int,
        n_out_channels: int,
) -> nn.Module:
    pose_regression_layers = []
    for _ in range(n_blocks):
        pose_regression_layers.append(resnet.BasicBlock(n_in_channels, n_in_channels))
    pose_regression_layers.append(
        nn.Conv2d(n_in_channels, n_out_channels, kernel_size=1)
    )
    pose_regression_layers.append(nn.AdaptiveAvgPool2d((1, 1)))
    pose_regression_layers = nn.Sequential(*pose_regression_layers)
    return pose_regression_layers


def procrustes_align(
        from_points: torch.Tensor,
        to_points: torch.Tensor,
) -> torch.Tensor:
    """
    Inputs have same shape `(batch_size, n_points, 3)`.
    Within each sample of the batch, `from_points` and `to_points`
    implicitly correspond to each other along dim=1.

    Returns:
    - `rot`, `translation` with shape `(batch_size, 3, 3)`
    representing transformations for each example in batch
    """
    device = from_points.device

    batch_size = from_points.shape[0]
    from_mean = from_points.mean(dim=1)
    to_mean = to_points.mean(dim=1)

    from_centered = from_points - from_mean.reshape(-1, 1, 3)
    to_centered = to_points - to_mean.reshape(-1, 1, 3)

    outer_prod = torch.matmul(torch.transpose(from_centered, 1, 2), to_centered)

    u, _, v = outer_prod.svd()
    # u, _, v = torch_svd(outer_prod)

    v_m_ut = torch.matmul(v, torch.transpose(u, 1, 2))
    w = make_eye(3, device=device).unsqueeze(0).repeat(batch_size, 1, 1)
    det = torch.det(v_m_ut)
    w[:, 2, 2] = det

    xfs = make_eye(4, device=device).unsqueeze(0).repeat(batch_size, 1, 1)

    xfs[:, 0:3, 0:3] = torch.matmul(torch.matmul(v, w), torch.transpose(u, 1, 2))
    xfs[:, 0:3, 3] = (
            to_mean - torch.matmul(xfs[:, 0:3, 0:3], from_mean.unsqueeze(-1)).squeeze()
    )

    return xfs
