from typing import List, Optional, Tuple, Union
import warnings
import copy

import numpy as np
import cv2
import torch
import torch.nn as nn

from mmcv.utils import build_from_cfg
from mmcv.cnn import Linear, bias_init_with_prob
from mmcv.runner import BaseModule, force_fp32
from mmcv.cnn.bricks.registry import (
    ATTENTION,
    PLUGIN_LAYERS,
    POSITIONAL_ENCODING,
    FEEDFORWARD_NETWORK,
    NORM_LAYERS,
)
from mmdet.core import reduce_mean
from mmdet.models import HEADS
from mmdet.core.bbox.builder import BBOX_SAMPLERS, BBOX_CODERS
from mmdet.models import build_loss

from projects.mmdet3d_plugin.datasets.utils import box3d_to_corners
from projects.mmdet3d_plugin.core.box3d import *

from ..attention import gen_sineembed_for_position
from ..blocks import linear_relu_ln
from ..instance_bank import topk


@HEADS.register_module()
class EgoHead(BaseModule):
    def __init__(
        self,
        ego_from='front', 
        embed_dims = 256,
        out_dims = 12,
        feature_map_scale=(128, 128),
        init_cfg=dict(
                     type='Xavier', layer='Conv2d', distribution='uniform'),
        loss_ego_status=None,
        decoder=None,

    ):
        super(EgoHead, self).__init__()
        assert ego_from in ['front','bev']
        kernel_size = tuple([int(x / 2) for x in feature_map_scale])
        self.ego_encoder = nn.Sequential(
            nn.Conv2d(embed_dims, embed_dims, 3, stride=1, padding=1, bias=False),
            nn.BatchNorm2d(embed_dims),
            nn.Conv2d(embed_dims, embed_dims, 3, stride=2, padding=1, bias=False),
            nn.BatchNorm2d(embed_dims),
            nn.ReLU(),
            nn.AvgPool2d(kernel_size),
        )
        self.ego_decoder = nn.Sequential(
            nn.Conv2d(embed_dims, embed_dims, 1, stride=1, padding=0, bias=False),
            nn.BatchNorm2d(embed_dims),
            nn.Conv2d(embed_dims, out_dims, 1, stride=1, padding=0),
        )
        self.loss_ego_status = build_loss(loss_ego_status)
    def forward(
        self,
        feature_maps: Union[torch.Tensor, List],
        metas: dict,
    ):
        if isinstance(feature_maps, list):
            feature_maps = feature_maps[0]
        # shape = feature_maps.shape
        # feature_maps = feature_maps.view(shape[0],*)
        ego_features = self.ego_encoder(feature_maps)
        ego_status = self.ego_decoder(ego_features)
        ego_output={"status": [ego_status]}
        return ego_output

    def loss(self,
        model_outs, 
        data, 
    ):
        status_preds = model_outs["status"]
        output = {}
        for decoder_idx, status in enumerate(
            status_preds):
            status_loss = self.loss_ego_status(status.squeeze(), data['ego_status'])

            output.update(
                {
                    f"loss_ego_status_{decoder_idx}": status_loss,
                }
            )

        return output


    @force_fp32(apply_to=("model_outs"))
    def post_process(
        self, 
        output,
        data,
    ):
        ego_result = self.decoder.decode(
            det_output["ego_status"],
        )

        return ego_result