import sys
import numpy as np
import math
import cv2

import torch
import torch.nn as nn
from torch.nn import init
import torch.nn.functional as F
from torch.nn.init import normal_

from mmengine.model import bias_init_with_prob
# from mmdet.models.builder import build_loss
from mmdet.registry import MODELS
# TODO:
# from builder import build_transformer

from mmdet.models.utils import multi_apply

from mmengine.config import Config
from sparse_ins import SparseInsDecoder
from utils import inverse_sigmoid
# from transformer_bricks import *
from transformer_bricks import ground2img
from transformer import FFN, MultiheadAttention, build_positional_encoding, build_transformer
from transformer_bricks import  LATRTransformer
from positional_encoding import SinePositionalEncoding
from mmengine.model import kaiming_init, xavier_init


class LATRHead(nn.Module):
    def __init__(self, args,
                 dim=128,
                 num_group=1,
                 num_convs=4,
                 in_channels=128,
                 kernel_dim=128,
                 positional_encoding=dict(
                    type='SinePositionalEncoding',
                    num_feats=128 // 2, normalize=True),
                 num_classes=21,
                 num_query=30,
                 embed_dims=128,
                 transformer=None,
                 num_reg_fcs=2,
                 depth_num=50,
                 depth_start=3,
                 top_view_region=None,
                 position_range=[-50, 3, -10, 50, 103, 10.],
                 pred_dim=10,
                 loss_cls=dict(
                     type='FocalLoss',
                     use_sigmoid=True,
                     gamma=2.0,
                     alpha=0.25,
                     loss_weight=2.0),
                 loss_reg=dict(type='L1Loss', loss_weight=2.0),
                 loss_vis=dict(type='BCEWithLogitsLoss', reduction='mean'),
                 sparse_ins_decoder=Config(
                    dict(
                        encoder=dict(
                            out_dims=64),# neck output feature channels
                        decoder=dict(
                            num_group=1,
                            output_iam=True,
                            scale_factor=1.),
                        sparse_decoder_weight=1.0,
                        )),
                 xs_loss_weight=1.0,
                 zs_loss_weight=5.0,
                 vis_loss_weight=1.0,
                 cls_loss_weight=20,
                 project_loss_weight=1.0,
                 trans_params=dict(
                     init_z=0, bev_h=250, bev_w=100),
                 pt_as_query=False,
                 num_pt_per_line=5,
                 num_feature_levels=1,
                 gt_project_h=20,
                 gt_project_w=30,
                 project_crit=dict(
                     type='SmoothL1Loss',
                     reduction='none'),
                 ):
        super().__init__()
        self.trans_params = dict(
            top_view_region=top_view_region,
            z_region=[position_range[2], position_range[5]])
        self.trans_params.update(trans_params)
        self.gt_project_h = gt_project_h
        self.gt_project_w = gt_project_w

        self.num_y_steps = args.num_y_steps
        # self.register_buffer('anchor_y_steps',
        #     torch.from_numpy(args.anchor_y_steps).float())
        self.anchor_y_steps = np.linspace(*args.anchor_y_steps)
        # self.register_buffer('anchor_y_steps_dense',
        #     torch.from_numpy(args.anchor_y_steps_dense).float())
        self.anchor_y_steps_dense = torch.from_numpy(args.get(
            'anchor_y_steps_dense',
            np.linspace(3, 103, 200)))

        project_crit['reduction'] = 'none'
        self.project_crit = getattr(
            nn, project_crit.pop('type'))(**project_crit)

        self.num_classes = num_classes
        self.embed_dims = embed_dims
        # points num along y-axis.
        self.code_size = pred_dim # 20
        self.num_query = num_query # 40
        self.num_group = num_group # 1
        self.num_pred = transformer['decoder']['num_layers'] # 6
        self.pc_range = position_range # [-30, -17, -5, 30, 123, 5.0]
        self.xs_loss_weight = xs_loss_weight
        self.zs_loss_weight = zs_loss_weight
        self.vis_loss_weight = vis_loss_weight
        self.cls_loss_weight = cls_loss_weight
        self.project_loss_weight = project_loss_weight

        loss_reg['reduction'] = 'none'
        self.reg_crit = MODELS.build(loss_reg)
        self.cls_crit = MODELS.build(loss_cls)
        self.bce_loss = build_nn_loss(loss_vis)
        self.sparse_ins = SparseInsDecoder(cfg=sparse_ins_decoder)

        self.depth_num = depth_num
        self.position_dim = 3 * self.depth_num
        self.position_range = position_range # [-30, -17, -5, 30, 123, 5.0]
        self.depth_start = depth_start
        self.adapt_pos3d = nn.Sequential(
            nn.Conv2d(self.embed_dims, self.embed_dims*4, kernel_size=1, stride=1, padding=0),
            nn.ReLU(),
            nn.Conv2d(self.embed_dims*4, self.embed_dims, kernel_size=1, stride=1, padding=0),
        )
        # print("positional_encoding: ", positional_encoding)
        self.positional_encoding = build_positional_encoding(positional_encoding)
        self.position_encoder = nn.Sequential(
            nn.Conv2d(self.position_dim, self.embed_dims*4, kernel_size=1, stride=1, padding=0),
            nn.ReLU(),
            nn.Conv2d(self.embed_dims*4, self.embed_dims, kernel_size=1, stride=1, padding=0),
        )
        # print("transformer: ", transformer)
        self.transformer = build_transformer(transformer)
        # print("self.transformer= ", self.transformer)
        self.query_embedding = nn.Sequential(
            nn.Linear(self.embed_dims, self.embed_dims),
            nn.ReLU(),
            nn.Linear(self.embed_dims, self.embed_dims),
        )

        # build pred layer: cls, reg, vis
        self.num_reg_fcs = num_reg_fcs # 2
        cls_branch = []
        for _ in range(self.num_reg_fcs):
            cls_branch.append(nn.Linear(self.embed_dims, self.embed_dims))
            cls_branch.append(nn.LayerNorm(self.embed_dims))
            cls_branch.append(nn.ReLU(inplace=True))
        cls_branch.append(nn.Linear(self.embed_dims, self.num_classes)) # [256,21]
        fc_cls = nn.Sequential(*cls_branch)

        reg_branch = []
        for _ in range(self.num_reg_fcs): # range(2)
            reg_branch.append(nn.Linear(self.embed_dims, self.embed_dims)) # [256,256]
            reg_branch.append(nn.ReLU()) 
        reg_branch.append(
            nn.Linear(
                self.embed_dims,
                3 * self.code_size // num_pt_per_line)) # [256,3]
        reg_branch = nn.Sequential(*reg_branch)

        self.cls_branches = nn.ModuleList(
            [fc_cls for _ in range(self.num_pred)]) # [fc_cls, ...], len=6
        self.reg_branches = nn.ModuleList(
            [reg_branch for _ in range(self.num_pred)]) # [reg_branch, ...], len=6
        

        self.num_pt_per_line = num_pt_per_line # 20
        self.point_embedding = nn.Embedding(
            self.num_pt_per_line, self.embed_dims) # [20,256]

        self.reference_points = nn.Sequential(
            nn.Linear(self.embed_dims, self.embed_dims),
            nn.ReLU(True),
            nn.Linear(self.embed_dims, self.embed_dims),
            nn.ReLU(True),
            nn.Linear(self.embed_dims, 2 * self.code_size // num_pt_per_line)) # [256,2]
        self.num_feature_levels = num_feature_levels # 1
        self.level_embeds = nn.Parameter(torch.Tensor(
            self.num_feature_levels, self.embed_dims)) # [1,256]

        self._init_weights()

    def _init_weights(self):
        self.transformer.init_weights()
        xavier_init(self.reference_points, distribution='uniform', bias=0)
        if self.cls_crit.use_sigmoid:
            bias_init = bias_init_with_prob(0.01)
            for m in self.cls_branches:
                nn.init.constant_(m[-1].bias, bias_init)
        normal_(self.level_embeds)

    def forward(self, input_dict, is_training=True):
        output_dict = {}
        img_feats = input_dict['x'] # [1, 256, 90, 120], 1/8 downsampling

        if not isinstance(img_feats, (list, tuple)): # True
            img_feats = [img_feats]
            
        # print("img_feats[0].shape= ", img_feats[0].shape), #[1, 256, 90, 120]
        sparse_output = self.sparse_ins(
            img_feats[0], # [1,256,90,120]
            lane_idx_map=input_dict['lane_idx'].cuda(), # [1,20,720,960]
            input_shape=input_dict['seg'].shape[-2:], # [1,1,720,960]
            is_training=is_training)
        # generate 2d pos emb
        B, C, H, W = img_feats[0].shape
        masks = img_feats[0].new_zeros((B, H, W))

        # TODO use actual mask if using padding or other aug
        sin_embed = self.positional_encoding(masks) # [1, 256, 90, 120]
        sin_embed = self.adapt_pos3d(sin_embed) # [1, 256, 90, 120]

        # init query and reference pt
        query = sparse_output['inst_features'] # BxNxC
        # print("query: ", query.shape), [1, 40, 256]
        # B, N, C -> B, N, num_anchor_per_line, C
        # [1,40,1,256]+[1,1,20,256] -> [1, 40, 20, 256]
        # lane embedding + point embedding -> query embedding
        query = query.unsqueeze(2) + self.point_embedding.weight[None, None, ...] # [1, 40, 20, 256]
        
        query_embeds = self.query_embedding(query).flatten(1, 2) # [1,800,256]
        query = torch.zeros_like(query_embeds) # [1,800,256]
        reference_points = self.reference_points(query_embeds) # [1,800,2]
        reference_points = reference_points.sigmoid()
        mlvl_feats = img_feats
        feat_flatten = []
        spatial_shapes = []
        mlvl_masks = []
        assert self.num_feature_levels == len(mlvl_feats)
        for lvl, feat in enumerate(mlvl_feats):
            bs, c, h, w = feat.shape # [1, 256, 90, 120]
            spatial_shape = (h, w) # [90,120]
            feat = feat.flatten(2).permute(2, 0, 1) # NxBxC, [10800,1,256]
            # self.level_embeds[None, lvl:lvl+1, :], [1,1,256]
            feat = feat + self.level_embeds[None, lvl:lvl+1, :].to(feat.device) # [10800,1,256]
            spatial_shapes.append(spatial_shape)
            feat_flatten.append(feat)
            mlvl_masks.append(torch.zeros((bs, *spatial_shape),
                                           dtype=torch.bool,
                                           device=feat.device))
        if self.transformer.with_encoder: # None
            mlvl_positional_encodings = []
            pos_embed2d = []
            for lvl, feat in enumerate(mlvl_feats):
                mlvl_positional_encodings.append(
                    self.positional_encoding(mlvl_masks[lvl]))
                pos_embed2d.append(
                    mlvl_positional_encodings[-1].flatten(2).permute(2, 0, 1))
            pos_embed2d = torch.cat(pos_embed2d, 0)
        else:
            mlvl_positional_encodings = None
            pos_embed2d = None
        feat_flatten = torch.cat(feat_flatten, 0) # [10800,1,256]

        spatial_shapes = torch.as_tensor(
            spatial_shapes, dtype=torch.long, device=query.device)
        # print("spatial_shapes: ", spatial_shapes.shape), [1,2]
        # print("spatial_shapes.new_zeros((1, )): ", spatial_shapes.new_zeros((1, ))), [0]
        # print("spatial_shapes.prod(1).cumsum(0)[:-1]: ", spatial_shapes.prod(1).cumsum(0)[:-1])
        level_start_index = torch.cat(
            (spatial_shapes.new_zeros((1, )),
             spatial_shapes.prod(1).cumsum(0)[:-1])
        )
        # print("level_start_index= ", level_start_index), [0]

        # head
        # print("feat_flatten: ", feat_flatten.shape), [10800, 1, 256]
        # print("query: ", query.shape), [1, 800, 256]
        # print("query_embeds: ", query_embeds.shape), [1, 800, 256]
        # print("reference_points: ", reference_points.shape), [1, 800, 2]
        # print("img_feats[0]: ", img_feats[0].shape), [1, 256, 90, 120]
        # print("sin_embed: ", sin_embed.shape), [1, 256, 90, 120]
        pos_embed = None
        outs_dec, project_results, outputs_classes, outputs_coords = \
            self.transformer( # class LATRTransformer...
                feat_flatten, None, # x, mask
                query, query_embeds, pos_embed, # query, query_embed, pos_embed=None,
                reference_points=reference_points, 
                reg_branches=self.reg_branches,
                cls_branches=self.cls_branches,
                img_feats=img_feats,
                lidar2img=input_dict['lidar2img'],
                pad_shape=input_dict['pad_shape'],
                sin_embed=sin_embed,
                spatial_shapes=spatial_shapes,
                level_start_index=level_start_index,
                mlvl_masks=mlvl_masks,
                mlvl_positional_encodings=mlvl_positional_encodings,
                pos_embed2d=pos_embed2d,
                image=input_dict['image'],
                **self.trans_params)
        # print("project_results, outputs_classes, outputs_coords",
        #       len(project_results), len(outputs_classes), len(outputs_coords)) # [5,6,6]
        # print("project_results, outputs_classes, outputs_coords",
        #       project_results[4].shape, outputs_classes[5].shape, outputs_coords[5].shape)
        # torch.Size([1, 4, 90, 120]) torch.Size([1, 40, 21]) torch.Size([1, 40, 20, 1, 3])
        all_cls_scores = torch.stack(outputs_classes) # [6,1,40,21]
        all_line_preds = torch.stack(outputs_coords) # [6,1,40,20,1,3]
        all_line_preds[..., 0] = (all_line_preds[..., 0]
            * (self.pc_range[3] - self.pc_range[0]) + self.pc_range[0])
        all_line_preds[..., 1] = (all_line_preds[..., 1]
            * (self.pc_range[5] - self.pc_range[2]) + self.pc_range[2])
        # print("all_line_preds: ", all_line_preds.shape), [6, 1, 40, 20, 1, 3]

        # reshape to original format
        all_line_preds = all_line_preds.view(
            len(outputs_classes), bs, self.num_query,
            self.transformer.decoder.num_anchor_per_query,
            self.transformer.decoder.num_points_per_anchor, 2 + 1 # x,z,vis
        ) # [6,1,40,20,1,3]
        all_line_preds = all_line_preds.permute(0, 1, 2, 5, 3, 4) # [6,1,40,3,20,1]
        all_line_preds = all_line_preds.flatten(3, 5) # [6, 1, 40, 60]

        output_dict.update({
            'all_cls_scores': all_cls_scores, # [6,1,40,21]
            'all_line_preds': all_line_preds, # [6, 1, 40, 60]
        })
        output_dict.update(sparse_output)

        if is_training:
            losses = self.get_loss(output_dict, input_dict)
            project_loss = self.get_project_loss(
                project_results, input_dict,
                h=self.gt_project_h, w=self.gt_project_w)
            losses['project_loss'] = \
                self.project_loss_weight * project_loss
            output_dict.update(losses)
        return output_dict

    def get_project_loss(self, results, input_dict, h=20, w=30):
        gt_lane = input_dict['ground_lanes_dense'] # [1,20,600]
        gt_ys = self.anchor_y_steps_dense.clone() # np.linspace(3, 103, 200)
        code_size = gt_ys.shape[0] # 200
        gt_xs = gt_lane[..., :code_size].cuda() # [1,20,200]
        gt_zs = gt_lane[..., code_size : 2*code_size].cuda() # [1,20,200]
        gt_vis = gt_lane[..., 2*code_size:3*code_size] # [1,20,200]
        gt_ys = gt_ys[None, None, :].expand_as(gt_xs).cuda() # [1,20,200]
        gt_points = torch.stack([gt_xs, gt_ys, gt_zs], dim=-1) # [1,20,200,3]

        B = results[0].shape[0] # 1
        ref_3d_home = F.pad(gt_points, (0, 1), value=1) # [1, 20, 200, 4], 右侧填充1列,值为1
        coords_img = ground2img(
            ref_3d_home,
            h, w,
            input_dict['lidar2img'],
            input_dict['pad_shape'], mask=gt_vis) # [1, 4, 20, 30]

        all_loss = 0.
        for projct_result in results:
            projct_result = F.interpolate(
                projct_result,
                size=(h, w),
                mode='nearest')
            gt_proj = coords_img.clone() # [1, 4, 20, 30]

            mask = (gt_proj[:, -1, ...] > 0) * (projct_result[:, -1, ...] > 0)
            diff_loss = self.project_crit(
                projct_result[:, :3, ...],
                gt_proj[:, :3, ...],
            )
            diff_y_loss = diff_loss[:, 1, ...]
            diff_z_loss = diff_loss[:, 2, ...]
            diff_loss = diff_y_loss * 0.1 + diff_z_loss
            diff_loss = (diff_loss * mask).sum() / torch.clamp(mask.sum(), 1)
            all_loss = all_loss + diff_loss

        return all_loss / len(results)

    def get_loss(self, output_dict, input_dict):
        all_cls_pred = output_dict['all_cls_scores'] # [6,1,40,21]
        all_lane_pred = output_dict['all_line_preds'] # [6,1,40,60]
        gt_lanes = input_dict['ground_lanes']  # [20,81]
        all_xs_loss = 0.0
        all_zs_loss = 0.0
        all_vis_loss = 0.0
        all_cls_loss = 0.0
        matched_indices = output_dict['matched_indices']
        num_layers = all_lane_pred.shape[0] # 6

        def single_layer_loss(layer_idx):
            gcls_pred = all_cls_pred[layer_idx] # [1,40,21]
            glane_pred = all_lane_pred[layer_idx] # [1,40,60]

            glane_pred = glane_pred.view(
                glane_pred.shape[0],
                self.num_group,
                self.num_query,
                glane_pred.shape[-1]) # [1,1,40,60]
            gcls_pred = gcls_pred.view(
                gcls_pred.shape[0],
                self.num_group,
                self.num_query,
                gcls_pred.shape[-1]) # [1,1,40,21]

            per_xs_loss = 0.0
            per_zs_loss = 0.0
            per_vis_loss = 0.0
            per_cls_loss = 0.0
            batch_size = len(matched_indices[0])

            for b_idx in range(len(matched_indices[0])):
                for group_idx in range(self.num_group): # 1
                    pred_idx = matched_indices[group_idx][b_idx][0]
                    gt_idx = matched_indices[group_idx][b_idx][1]

                    cls_pred = gcls_pred[:, group_idx, ...]
                    lane_pred = glane_pred[:, group_idx, ...]

                    if gt_idx.shape[0] < 1:
                        cls_target = cls_pred.new_zeros(cls_pred[b_idx].shape[0]).long()
                        cls_loss = self.cls_crit(cls_pred[b_idx], cls_target)
                        per_cls_loss = per_cls_loss + cls_loss
                        per_xs_loss = per_xs_loss + 0.0 * lane_pred[b_idx].mean()
                        continue

                    pos_lane_pred = lane_pred[b_idx][pred_idx]
                    gt_lane = gt_lanes[b_idx][gt_idx]

                    pred_xs = pos_lane_pred[:, :self.code_size]
                    pred_zs = pos_lane_pred[:, self.code_size : 2*self.code_size]
                    pred_vis = pos_lane_pred[:, 2*self.code_size:]
                    gt_xs = gt_lane[:, :self.code_size]
                    gt_zs = gt_lane[:, self.code_size : 2*self.code_size]
                    gt_vis = gt_lane[:, 2*self.code_size:3*self.code_size]

                    loc_mask = gt_vis > 0
                    xs_loss = self.reg_crit(pred_xs, gt_xs)
                    zs_loss = self.reg_crit(pred_zs, gt_zs)
                    xs_loss = (xs_loss * loc_mask).sum() / torch.clamp(loc_mask.sum(), 1)
                    zs_loss = (zs_loss * loc_mask).sum() / torch.clamp(loc_mask.sum(), 1)
                    vis_loss = self.bce_loss(pred_vis, gt_vis)

                    cls_target = cls_pred.new_zeros(cls_pred[b_idx].shape[0]).long()
                    cls_target[pred_idx] = torch.argmax(
                        gt_lane[:, 3*self.code_size:], dim=1)
                    cls_loss = self.cls_crit(cls_pred[b_idx], cls_target)

                    per_xs_loss += xs_loss
                    per_zs_loss += zs_loss
                    per_vis_loss += vis_loss
                    per_cls_loss += cls_loss

            return tuple(map(lambda x: x / batch_size / self.num_group,
                             [per_xs_loss, per_zs_loss, per_vis_loss, per_cls_loss]))

        all_xs_loss, all_zs_loss, all_vis_loss, all_cls_loss = multi_apply(
            single_layer_loss, range(all_lane_pred.shape[0]))
        # print("all_xs_loss: ", len(all_xs_loss)), 6
        all_xs_loss = sum(all_xs_loss) / num_layers
        all_zs_loss = sum(all_zs_loss) / num_layers
        all_vis_loss = sum(all_vis_loss) / num_layers
        all_cls_loss = sum(all_cls_loss) / num_layers

        return dict(
            all_xs_loss=self.xs_loss_weight * all_xs_loss,
            all_zs_loss=self.zs_loss_weight * all_zs_loss,
            all_vis_loss=self.vis_loss_weight * all_vis_loss,
            all_cls_loss=self.cls_loss_weight * all_cls_loss,
        )

    @staticmethod
    def get_reference_points(H, W, bs=1, device='cuda', dtype=torch.float):
        ref_y, ref_x = torch.meshgrid(
            torch.linspace(
                0.5, H - 0.5, H, dtype=dtype, device=device),
            torch.linspace(
                0.5, W - 0.5, W, dtype=dtype, device=device)
        )
        ref_y = ref_y.reshape(-1)[None] / H
        ref_x = ref_x.reshape(-1)[None] / W
        ref_2d = torch.stack((ref_x, ref_y), -1)
        ref_2d = ref_2d.repeat(bs, 1, 1) 
        return ref_2d

def build_nn_loss(loss_cfg):
    crit_t = loss_cfg.pop('type')
    return getattr(nn, crit_t)(**loss_cfg)