# Copyright (c) OpenMMLab. All rights reserved.
import copy

import torch
from mmcv.runner import BaseModule
from torch import nn
from torch.nn import functional as F
import numpy as np
from mmdet.models import builder as build_mm
# from ..builder import build_loss
from mmdet.models import HEADS
# from ..base_model.dense.base_model import ConvBN3D, ConvBN, ResidualBlock_V2, conv_bn_relu
from torch.distributed import get_rank
from .occ_loss_utils import geo_scal_loss, sem_scal_loss, CE_ssc_loss, lovasz_softmax
from torch import distributed as dist
import os
from PIL import Image
import gc
from torch.cuda.amp import autocast
# from ..tools.init_mode import init__model_parameter
def show_occupancy_se(image, flag, name):
    if not os.path.exists("buffer/{}/".format(flag)):
        os.makedirs("buffer/{}/".format(flag))
    image = image.detach() #[X,Y]
    image = Image.fromarray(np.uint8(image.numpy()), 'RGB')
    image.save("buffer/{}/{}.png".format(flag, name))

@HEADS.register_module()
class OccupancyHead(nn.Module):
    def __init__(self,
                 up_sample,
                 in_channels,
                 out_shape,
                 num_classes,
                 det_reg_loss=True,
                 color_map=None,
                 names=[],
                 class_weights=None,
                 point_cloud_range=[],
                 occupany_range=[],
                 flag=None,
                 num_det_classes=10,
                 **kwargs):
        super(OccupancyHead, self).__init__()
        self.up_sample = up_sample
        self.out_shape = out_shape
        self.flag = flag
        in_ch = in_channels
        self.num_classes = num_classes
        self.det_reg_loss = det_reg_loss
        self.num_det_classes = num_det_classes
        self.occupany_range = occupany_range
        self.crop = [
            (occupany_range[0] - point_cloud_range[0])/(point_cloud_range[3] - point_cloud_range[0]),
            (occupany_range[1] - point_cloud_range[1])/(point_cloud_range[4] - point_cloud_range[1]),
            (occupany_range[3] - point_cloud_range[0])/(point_cloud_range[3] - point_cloud_range[0]),
            (occupany_range[4] - point_cloud_range[1])/(point_cloud_range[4] - point_cloud_range[1]),
        ]
        if self.det_reg_loss:
            X, Y, Z = out_shape["X"], out_shape["Y"], out_shape["Z"]
            xs = torch.linspace(0.5, X-0.5, X, dtype=torch.float32).view(X, 1, 1).expand(X, Y, Z)/X
            ys = torch.linspace(0.5, Y-0.5, Y, dtype=torch.float32).view(1, Y, 1).expand(X, Y, Z)/Y
            zs = torch.linspace(0.5, Z-0.5, Z, dtype=torch.float32).view(1, 1, Z).expand(X, Y, Z)/Z
            ref_3d = torch.stack((zs, ys, xs), -1)  # [X, Y, Z, 3]
            self.ref_3d = (2*ref_3d - 1).cuda()

        self.free_idx = names.index('free')
        out_ch = 512 #int(out_shape["Z"]/up_sample)*32
        self.conv2d = conv_bn_relu(in_channels=in_ch,
                                   out_channels=out_ch,
                                   kernel_size=3,
                                   stride=1,
                                   padding=1)
        UP = int(np.log2(up_sample))
        self.UP = UP
        in_ch_for_head = [out_ch]
        for i in range(UP):
            in_ch = out_ch
            out_ch = 512 #min(int(out_shape["Z"]/(up_sample**(UP-i-1))*32), 512)
            up_conv = nn.Sequential(
                nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False),
                conv_bn_relu(in_channels=in_ch,
                             out_channels=out_ch,
                             kernel_size=3,
                             stride=1,
                             padding=1),
            )
            in_ch_for_head.append(out_ch)
            self.__setattr__("up_{}".format(i+1), up_conv)
        for i in range(UP+1):
            #if i<UP:continue
            head_occ = nn.Sequential(conv_bn_relu(in_channels=in_ch_for_head[i],
                                    out_channels=2*int(out_shape["Z"]*num_classes/(2**(UP-i))),
                                    nonlinear=nn.Softplus(),
                                    kernel_size=3,
                                    stride=1,
                                    padding=1),
                                     nn.Conv2d(
                                         in_channels=2*int(out_shape["Z"]*num_classes/(2**(UP-i))),
                                         out_channels=int(out_shape["Z"]*num_classes/(2**(UP-i))),
                                         kernel_size=1,
                                         stride=1,
                                         padding=0,
                                         bias=True
                                     ))
            self.__setattr__("head_{}".format(i), head_occ)
            if det_reg_loss:
                det_cls = nn.Sequential(
                    nn.Linear(in_features=in_ch_for_head[i], out_features=128),
                    nn.LayerNorm(128),
                    nn.Softplus(),
                    nn.Linear(in_features=128, out_features=num_det_classes)
                )
                det_lwh = nn.Sequential(
                    nn.Linear(in_features=in_ch_for_head[i], out_features=128),
                    nn.LayerNorm(128),
                    nn.Softplus(),
                    nn.Linear(in_features=128, out_features=3)
                )
                det_angle = nn.Sequential(
                    nn.Linear(in_features=in_ch_for_head[i], out_features=128),
                    nn.LayerNorm(128),
                    nn.Softplus(),
                    nn.Linear(in_features=128, out_features=2)
                )
                det_vel = nn.Sequential(
                    nn.Linear(in_features=in_ch_for_head[i], out_features=128),
                    nn.LayerNorm(128),
                    nn.Softplus(),
                    nn.Linear(in_features=128, out_features=2)
                )
                det_z = nn.Sequential(
                    nn.Linear(in_features=in_ch_for_head[i], out_features=128),
                    nn.LayerNorm(128),
                    nn.Softplus(),
                    nn.Linear(in_features=128, out_features=1)
                )
                self.__setattr__("det_cls{}".format(i), det_cls)
                self.__setattr__("det_lwh{}".format(i), det_lwh)
                self.__setattr__("det_angle{}".format(i), det_angle)
                self.__setattr__("det_vel{}".format(i), det_vel)
                self.__setattr__("det_z{}".format(i), det_z)

            if i==UP:
                head_flow = nn.Sequential(nn.Conv2d(in_channels=in_ch_for_head[i],
                                         out_channels=128,
                                         kernel_size=3,
                                         stride=1,
                                         padding=1),
                                          nn.Softplus(),
                                          nn.Conv2d(in_channels=128,
                                                    out_channels=int(out_shape["Z"]*2/(2**(UP-i))),
                                                    kernel_size=1,
                                                    stride=1,
                                                    padding=0
                                                    ))
                self.__setattr__("flow_{}".format(i), head_flow)
            #
            # focal_loss = build_mm.build_loss(dict(type='CustomFocalLoss',
            #                                            H=out_shape["X"]//(2**(UP-i)),
            #                                            W=out_shape["Y"]//(2**(UP-i)),
            #                                            cx=100//(2**(UP-i)),
            #                                            cy=100//(2**(UP-i)),
            #                                            ))
            focal_loss = build_mm.build_loss(dict(type='CustomFocalLoss',
                                                  H=out_shape["X"],
                                                  W=out_shape["Y"],
                                                  cx=100,
                                                  cy=100,
                                                  ))
            self.__setattr__("focal_loss{}".format(int((2**(UP-i)))), focal_loss)

        # self.dice_loss = build_mm.build_loss(dict(type='DiceLoss',
        #                                           loss_weight=2.0
        #                                           ))

        occ_label = {
                     "empty": 0,
                     "unknown_semantic": 1,
                     "car": 2,
                     "truck": 3,
                     "bus": 4,
                     "trailer": 5,
                     "engineering_vehicle": 6,
                     "bicycle": 7,
                     "motorcycle": 8,
                     "pedestrian": 9,
                     "traffic_cone": 10,
                     "barrier_gate": 11,
                     "debris": 12,
                     "road": 13
                     }
        self.ALL = out_shape["X"]*out_shape["Y"]*out_shape["Z"]
        if color_map is None:
            self.color_map = torch.tensor(
                [
                    [0.0, 0, 0],  # 0 free
                    [0, 200, 0],  # 1 vegetation, 植被
                    [255, 0, 0],  # 2 car  Darkslategrey 拖车
                    [220, 20, 60],  # 3 bus  Crimson
                    [255, 69, 0],  # 4 construction_vehicle  Orangered #施工车辆
                    [255, 140, 0],  # 5 bicycle  Darkorange
                    [233, 150, 70],  # 6 motorcycle  Darksalmon
                    [255, 61, 99],  # 7 pedestrian  Red
                    [112, 128, 144],  # 8 traffic_cone  Slategrey 锥形交通路标
                    [222, 184, 135],  # 9 barrier Burlywood 栅栏
                    [165, 42, 42],  # 11 other_flat  green
                    [75, 0, 75],  # 13 terrain,
                    [175, 174, 0],  # 12 sidewalk,
                    [75, 75, 75],  # 10 driveable_surface
                    [255, 158, 0],  # 14 manmade,
                    [0, 0, 255],  # 1 truck  Blue
                    [0, 0, 0]  # 16free
                ]
            )
        else:
            self.color_map = torch.tensor(
                [color_map[names.index(key)] for key in names]
                ,dtype=torch.float32
            )
        self.count = 0
        self.step = 0
        if class_weights is None:
            class_weights = np.array([
                9.19753308e+06, 6.91161799e+04,
                2.51505232e+03, 3.36162317e+02,
                7.09276790e+01, 1.65816995e+02,
                3.02964558e+01, 1.48460680e+01,
                1.70726573e+02, 1.06688053e+02,
                3.28348309e+01, 2.00146738e+01,
                1.36071519e+01, 6.87537715e+04])
        else:
            class_weights = np.array(class_weights)

        self.focal_weights = torch.from_numpy(1/np.log(class_weights + 0.001))

        self.class_weights = torch.from_numpy(np.log2(class_weights))
        self.class_weights = self.class_weights/torch.sum(self.class_weights)
        self.sem_weights = self.class_weights/torch.sum(self.class_weights[0:self.free_idx-1])

        init__model_parameter(self)

    def init_weights(self):
        pass

    def forward(self, bev_feature_maps, metas):
        if isinstance(bev_feature_maps, list) or isinstance(bev_feature_maps, tuple):
            bev_feature_maps = bev_feature_maps[0]
        B, C, Y, X = bev_feature_maps.shape
        bev_feature_maps = bev_feature_maps[:, :, int(Y*self.crop[1]):int(Y*self.crop[3]), int(X*self.crop[0]):int(X*self.crop[2])]
        feature = self.conv2d(bev_feature_maps)
        multi_feature = [feature]
        multi_out = []

        for i in range(self.UP):
            feature = self.__getattr__("up_{}".format(i+1))(feature)
            multi_feature.append(feature)

        for i in range(self.UP-1):
            u = F.interpolate(multi_feature[i], size=multi_feature[-1].shape[2:], mode='bilinear', align_corners=False)
            multi_feature[-1] = multi_feature[-1] + u

        for i in range(self.UP+1):
            if not self.training and i<self.UP:
                continue
            out = self.__getattr__("head_{}".format(i))(multi_feature[i])
            B, C, Y, X = out.shape
            out = out.reshape(B, self.num_classes, int(C/self.num_classes), Y, X).permute(0, 1, 4, 3, 2).contiguous()
            multi_out.append(out)

        flow_ = self.__getattr__("flow_{}".format(i))(multi_feature[i])
        flow_ = flow_.reshape(B, 2, int(C/self.num_classes), Y, X).permute(0, 4, 3, 2, 1).contiguous()
        if not self.training or not self.det_reg_loss:
            return {'occupancy': multi_out, "flow": flow_}
        else:
            return {'occupancy': multi_out, "flow": flow_, "multi_feature": multi_feature}

    def loss(self, occupancy_out, metas):
        occupancy = metas.get("occupancy")
        flow = metas.get("flow")
        flow_ = occupancy_out["flow"]
        B = occupancy.shape[0]
        if self.det_reg_loss:
            gt_bboxes_3d_b = metas["gt_bboxes_3d"]
            gt_labels_3d_b = metas["gt_labels_3d"]
            multi_feature = occupancy_out["multi_feature"]
            det_gt = []
            total_points = 0
            reference_points, z, lwh, sin, cos, vel, gt_labels_3d = [], [], [], [], [], [], []
            for i in range(B):
                gt_bboxes_3d = gt_bboxes_3d_b[i]
                if len(gt_bboxes_3d)<1:
                    reference_points.append(None)
                    continue
                gt_labels_3d.append(gt_labels_3d_b[i])
                gt_bboxes_3d[:, 0] = (gt_bboxes_3d[:, 0] - self.occupany_range[0])/(self.occupany_range[3] - self.occupany_range[0])
                gt_bboxes_3d[:, 1] = (gt_bboxes_3d[:, 0] - self.occupany_range[0])/(self.occupany_range[4] - self.occupany_range[1])
                reference_points.append((2*gt_bboxes_3d[:,0:2]-1).unsqueeze(0))
                z.append(gt_bboxes_3d[:, 2])
                lwh.append(gt_bboxes_3d[:, 3:6])
                sin.append(torch.sin(gt_bboxes_3d[:, 6]))
                cos.append(torch.cos(gt_bboxes_3d[:, 6]))
                vel.append(gt_bboxes_3d[:,7:])
            gt_labels_3d = torch.cat(gt_labels_3d, dim=0)
            z = torch.cat(z, dim=0)
            lwh = torch.cat(lwh, dim=0)
            sin = torch.cat(sin, dim=0)
            cos = torch.cat(cos, dim=0)
            vel = torch.cat(vel, dim=0)
            det_gt = [reference_points, z, lwh, sin, cos, vel, gt_labels_3d]
        else:
            det_gt = None
            multi_feature = [None for i in range(len(occupancy_out["occupancy"]))]
        with torch.no_grad():
            mask = (flow!=0)
            static = flow[~mask]
            flow = flow[mask]
        with autocast(False):
            if len(flow)>0:
                loss_flow = F.l1_loss(flow, flow_[mask], reduction="mean")*4 + F.l1_loss(static, flow_[~mask], reduction="mean")
            else:
                loss_flow = 0*flow_.mean()
            loss_dict = {"loss_flow":loss_flow}


            for p, feature in zip(occupancy_out["occupancy"], multi_feature):
                self.forward_one_level(p, feature, occupancy, det_gt, loss_dict)
        return loss_dict



    def dice_loss(self, prob, target, weight=None):
        '''

        Args:
            prob: N, C *
            target: N, H *
            weight:
        Returns:

        '''
        C = prob.shape[1]
        prob_ = prob.permute(1, 0, 2, 3, 4).contiguous()
        dice = 0
        for c in range(C):
            if weight is None:
                w = 1.0/C
            else:
                w = weight[c]
            num_gt = len(target[target==c])
            if num_gt == 0:
                continue
            p = prob_[c]
            u = torch.sum(p[target==c])
            dice += (1 - (2*u + 0.1)/(u + torch.sum(p) + 0.1))*w
        return dice

    def forward_one_level(self, pred, feature, occupancy, det_gt, loss_dict):
        #print(pred.shape, occupancy.shape)
        #torch.Size([5, 1, 14, 608, 384, 40]) torch.Size([5, 1, 608, 384, 40])

        # if get_rank() == 0:
        #     print(
        #         occupancy.shape,
        #         pred.shape,
        #     )


        B, X, Y, Z = occupancy.shape
        B, C, X_, Y_, Z_ = pred.shape
        level = int(X/X_)
        level = int(np.log2(level))
        if self.det_reg_loss:
            loss_dict['loss_occ_det_cls{}'.format(level)] = 0
            loss_dict['loss_occ_det_angle{}'.format(level)] = 0
            loss_dict['loss_occ_det_vel{}'.format(level)] = 0
            loss_dict['loss_occ_det_z{}'.format(level)] = 0
            loss_dict['loss_occ_det_lwh{}'.format(level)] = 0
            sample = []
            for i in range(B):
                reference_points = det_gt[0][i]
                if reference_points is None:
                    continue
                sample.append(nn.functional.grid_sample(feature[i].unsqueeze(dim=0),
                                                        reference_points.unsqueeze(dim=0), mode='bilinear',
                                                        align_corners=False).squeeze(dim=0).squeeze(dim=1).permute(1, 0))
            sample = torch.cat(sample, dim=0)
            reference_points, z, lwh, sin, cos, vel, gt_labels_3d = det_gt
            det_cls = self.__getattr__("det_cls{}".format(level))(sample)
            det_lwh = self.__getattr__("det_lwh{}".format(level))(sample)
            det_angle = self.__getattr__("det_angle{}".format(level))(sample)
            det_vel = self.__getattr__("det_vel{}".format(level))(sample)
            det_z = self.__getattr__("det_z{}".format(level))(sample)

            ce = F.cross_entropy(det_cls, gt_labels_3d, reduction='none')
            p = torch.exp(-ce)
            loss_dict['loss_occ_det_cls{}'.format(level)] += torch.mean(torch.pow(1 - p, 2) * ce)
            loss_dict['loss_occ_det_lwh{}'.format(level)] += F.l1_loss(torch.exp(det_lwh), lwh)
            loss_dict['loss_occ_det_angle{}'.format(level)] += F.l1_loss(det_angle[:, 0], sin) + F.l1_loss(det_angle[:, 1], cos)
            loss_dict['loss_occ_det_vel{}'.format(level)] += F.l1_loss(det_vel, vel)
            loss_dict['loss_occ_det_z{}'.format(level)] += F.l1_loss(det_z.squeeze(dim=-1), z)
            # if get_rank()==0:
            #     print(det_z.shape, z.shape)
            #     print(det_vel.shape, vel.shape)
            #     print(det_angle.shape, sin.shape, cos.shape)
            #     print(det_cls.shape, gt_labels_3d.shape)
        level = int(X/X_)
        focal_loss = self.__getattr__('focal_loss{}'.format(level))
        if level>1:
            output_voxels = nn.functional.grid_sample(
                input=pred,
                grid=self.ref_3d.unsqueeze(dim=0).repeat(B, 1, 1, 1, 1),
                mode = "bilinear",
                align_corners=False
            ).contiguous()
            # with torch.no_grad():
            #     occupancy_ = occupancy.clone().unsqueeze(dim=1)
            #     # occupancy_[occupancy_==0] = 100
            #     # occupancy_[occupancy_==1] = 99
            #     gt_o = -F.max_pool3d(-occupancy_.float(),
            #                         kernel_size=(int(X/X_), int(Y/Y_), int(Z/Z_)),
            #                         stride=(int(X/X_), int(Y/Y_), int(Z/Z_))).long().squeeze(dim=1)
            #     # gt_o[gt_o==99] = 1
            #     # gt_o[gt_o==100] = 0
        else:
            output_voxels = pred
            #gt_o = occupancy


        target_voxels = occupancy.long()
        prob = torch.softmax(output_voxels, dim=1)

        loss_dict['loss_voxel_sem_{}'.format(level)] = 5*sem_scal_loss(output_voxels,
                                                                       target_voxels,
                                                                       start_idx=0,
                                                                       n_classes=output_voxels.shape[1]-1,
                                                                       weights=self.sem_weights)

        ce = 0
        for i in range(self.num_classes):
            idx = target_voxels == i
            num = len(target_voxels[idx])
            if num > 0:
                p = prob[:, i][idx]
                ce += -self.class_weights[i]*torch.mean(torch.log(p)*((1-p)**2))
        loss_dict['loss_voxel_ce_{}'.format(level)] = ce
        #
        empty = prob[:, self.free_idx]
        fg = -torch.mean(torch.log(1 - empty[target_voxels != self.free_idx]))
        bg = -torch.mean(torch.log(empty[target_voxels == self.free_idx]))
        num_bg = (target_voxels == self.free_idx).sum()
        loss_dict['loss_geo_fg_{}'.format(level)] = fg*(1- num_bg/(B*X*Y*Z))
        loss_dict['loss_geo_bg_{}'.format(level)] = bg*num_bg/(B*X*Y*Z)
        # loss_dict['loss_voxel_ce_{}'.format(level)] = focal_loss(output_voxels,
        #                                                          target_voxels,
        #                                                          self.focal_weights.type_as(output_voxels),
        #                                                          ignore_index=255
        #                                                          )
        #loss_dict['loss_voxel_geo_scal_{}'.format(level)] = geo_scal_loss(output_voxels, target_voxels, ignore_index=255 ,empty_idx=self.free_idx)
        loss_dict['loss_voxel_lovasz_{}'.format(level)] = lovasz_softmax(prob, target_voxels, ignore=255, weight=self.class_weights)
        loss_dict['loss_voxel_dice_{}'.format(level)] = self.dice_loss(prob, target_voxels)*5

        self.show(output_voxels[-1], target_voxels[-1], level)

        del output_voxels, target_voxels, prob
        torch.cuda.empty_cache()
        gc.collect()
    def show(self, pred, target, level):
        #print(pred.shape, target.shape, len(target[target>0]))
        with torch.no_grad():
            if dist.get_rank() == 0:
                C, X, Y, Z = pred.shape
                pred_occ = torch.argmax(pred.detach(), dim=0)
                grid_size = [X, Y, Z, 3]
                pred_occ = pred_occ.cpu()
                occupancy_ = target.cpu().long()
                voxels = torch.zeros(grid_size)
                pred_voxels = torch.zeros(grid_size)
                voxels[occupancy_ > -1, :] = self.color_map[occupancy_[occupancy_ > -1]]
                pred_voxels[pred_occ > -1, :] = self.color_map[pred_occ[pred_occ > -1]]

                if Z < 5:
                    l1, l2, l3 = 0, 1, 3
                elif Z < 11:
                    l1, l2, l3 = 1, 3, 5
                elif Z < 21:
                    l1, l2, l3 = 5, 6, 7
                elif Z < 41:
                    l1, l2, l3 = 4, 5, 7

                check_image1 = torch.cat([pred_voxels[:, :, l1], pred_voxels[:, :, l2], pred_voxels[:, :, l3]], dim=1)
                check_image2 = torch.cat([voxels[:, :, l1], voxels[:, :, l2], voxels[:, :, l3]], dim=1)

                # print(check_image1.size(), check_image2.size(), check_image3.size())
                check_image = torch.cat([check_image1, check_image2], dim=1)
                show_occupancy_se(check_image, self.__class__.__name__, "occupancy_{}_{}".format(self.flag, level))

    def forward_test(self, feature, S):
        '''
        Args:
            feature: B*S, C, Y, X
        Returns:
        '''

        BS = feature.shape[0]
        B = int(BS//S)
        X, Y, Z = self.out_shape["X"], self.out_shape["Y"], self.out_shape["Z"]
        feature = self.conv2d(feature)
        multi_feature = [feature]
        multi_out = []
        for i in range(self.UP):
            feature = self.__getattr__("up_{}".format(i+1))(feature)
            multi_feature.append(feature)
        for i in range(self.UP-1):
            u = F.interpolate(multi_feature[i], size=multi_feature[-1].shape[2:], mode='bilinear', align_corners=False)
            multi_feature[-1] = multi_feature[-1] + u

        for i in range(self.UP+1):
            if i<self.UP: continue
            out = self.__getattr__("head_{}".format(i))(multi_feature[i])
            B_, C, Y, X = out.shape

            out = out.view(B, S, C, Y, X).reshape(B, S, self.num_classes, int(Z/2**(self.UP-i)), Y, X).permute(0, 1, 2, 5, 4, 3)
            # out = torch.softmax(out, dim=2)
            # out[:, :, 1] = out[:, :, 1]*2
            out = torch.argmax(out, dim=2)

            multi_out.append(out)
        flow_ = self.__getattr__("flow_{}".format(i))(multi_feature[i])
        B_, C, Y, X = flow_.shape
        flow_ = flow_.view(B, S, C, Y, X).reshape(B, S, 3, int(Z/2**(self.UP-i)), Y, X).permute(0, 1, 5, 4, 3, 2)

        return {"flow": flow_,
                "occupancy": multi_out[-1]}