from typing import List, Optional, Tuple, Union
import warnings
import copy

import numpy as np
import cv2
import torch
import torch.nn as nn

from mmcv.utils import build_from_cfg
from mmcv.cnn import Linear, bias_init_with_prob
from mmcv.runner import BaseModule, force_fp32
from mmcv.cnn.bricks.registry import (
    ATTENTION,
    PLUGIN_LAYERS,
    POSITIONAL_ENCODING,
    FEEDFORWARD_NETWORK,
    NORM_LAYERS,
)
from mmdet.core import reduce_mean
from mmdet.models import HEADS
from mmdet.core.bbox.builder import BBOX_SAMPLERS, BBOX_CODERS
from mmdet.models import build_loss

from projects.mmdet3d_plugin.datasets.utils import box3d_to_corners
from projects.mmdet3d_plugin.core.box3d import *

from ..attention import gen_sineembed_for_position
from ..blocks import linear_relu_ln
from ..instance_bank import topk
from ..detection3d.detection3d_head import Sparse4DHead

@HEADS.register_module()
class PlanningHead(Sparse4DHead):
    def __init__(
        self,
        **kwargs,
    ):
        super(PlanningHead, self).__init__(**kwargs)

    @force_fp32(apply_to=("model_outs"))
    def loss(self, model_outs, data):
        cls_scores = model_outs["classification"]
        reg_preds = model_outs["prediction"]
        output = {}
        for decoder_idx, (cls, reg) in enumerate(
            zip(cls_scores, reg_preds)
        ):
            (
                cls,
                cls_target, 
                cls_weight, 
                reg_pred, 
                reg_target, 
                reg_weight, 
            ) = self.sampler.sample(
                cls,
                reg,
                data['gt_ego_fut_trajs'],
                data['gt_ego_fut_masks'],
                data,
            )
            cls = cls.flatten(end_dim=1)
            cls_target = cls_target.flatten(end_dim=1)
            cls_weight = cls_weight.flatten(end_dim=1)
            cls_loss = self.loss_cls(cls, cls_target, weight=cls_weight)

            reg_weight = reg_weight.flatten(end_dim=1)
            reg_pred = reg_pred.flatten(end_dim=1)
            reg_target = reg_target.flatten(end_dim=1)
            reg_weight = reg_weight.unsqueeze(-1)

            reg_loss = self.loss_reg(
                 reg_pred[...,:2], reg_target[...,:2], weight=reg_weight
            )

            output.update(
                {
                    f"{self.task_prefix}_loss_cls_{decoder_idx}": cls_loss,
                    f"{self.task_prefix}_loss_reg_{decoder_idx}": reg_loss,
                }
            )
            if reg_pred.shape[-1] >=4:
                reg_loss_yaw = self.plan_loss_reg(
                    reg_pred[...,2:4], reg_target[...,2:4], weight=reg_weight
                )
                output.update(
                    {f"{self.task_prefix}_loss_yaw_{decoder_idx}": reg_loss_yaw,
                    }
                )

        return output
    @force_fp32(apply_to=("model_outs"))
    def post_process(self, model_outs,data, output_idx=-1):
        return self.decoder.decode(
            model_outs,data,
            output_idx=output_idx,
        )