import argparse
import copy
import os
import mindspore
import numpy as np
from pathlib import Path
from mind3d.utils.sim_builder import build_dataset
from mind3d.utils.sim_center_utils import (draw_gaussian, gaussian_radius)
from mind3d.utils.sim_box_np_ops import center_to_corner_box2d
from mind3d.utils.sim_geometry import points_in_convex_polygon_jit
from mind3d.models.build_sim_model import build_model, get_config
from nuscenes.nuscenes import NuScenes

from mindspore import load_checkpoint, ops, Tensor
from mindspore import load_param_into_net
from mindspore import dataset as de
from mindspore import context

def simtrack_infer(args):
    device_id = int(os.getenv('DEVICE_ID', '1'))
    device_num = int(os.getenv('RANK_SIZE', '1'))
    context.set_context(mode=context.PYNATIVE_MODE, device_target=args.device_target, device_id=device_id)
    cfg_path = Path(args.config)
    cfg = get_config(cfg_path)

    global voxel_size, downsample, voxel_range, num_classes, size_h, size_w
    voxel_size = np.array(cfg['_voxel_size'])[:2]
    downsample = cfg['assigner']['out_size_factor']
    voxel_range = np.array(cfg['_pc_range'])
    num_classes = sum([t['num_class'] for t in cfg['tasks']])
    size_w, size_h = ((voxel_range[3:5] - voxel_range[:2]) / voxel_size / downsample).astype(np.int32)

    dataset = build_dataset(cfg['data']['val'])
    ms_model = build_model(model_cfg=cfg['model'])

    ds = de.GeneratorDataset(dataset, column_names=cfg['eval_column_names'], shuffle=False)
    # ds = ds.batch(batch_size=1, input_columns=cfg['eval_column_names'], drop_remainder=True, per_batch_map=eval_collate)
    detections = {}

    ckpt = args.checkpoint
    print(ckpt)
    ms_checkpoint = load_checkpoint(ckpt)
    ms_checkpoint.items()
    load_param_into_net(ms_model, ms_checkpoint)

    prev_detections = {}
    nusc = NuScenes(version='v1.0-trainval', dataroot='/data0/HR_dataset/nuscenes/data/nuscenes/', verbose=True)
    grids = meshgrid(size_w, size_h)
    start_id = 0
    expand_dims = ops.ExpandDims()

    for _, data_batch in enumerate(ds.create_dict_iterator()):
        points = ops.zeros((data_batch['points'].shape[0], 6), mindspore.float32)
        points[:, 1:6] = data_batch['points']
        data_batch['points'] = points
        coor = ops.zeros((data_batch['coordinates'].shape[0], 4), mindspore.float32)
        coor[:, 1:4] = data_batch['coordinates']
        data_batch['coordinates'] = coor
        data_batch['shape'] = data_batch['shape'].view(1, 3).astype('int32')  
        data_batch['ref_from_car'] = expand_dims(data_batch['ref_from_car'], 0)
        data_batch['car_from_global'] = expand_dims(data_batch['car_from_global'], 0)
        data_batch['num_voxels'] = data_batch['num_voxels'].astype('int32')  

        prev_token = nusc.get('sample', str(data_batch['token']))['prev']
        track_outputs = None
        if prev_token != '':  # non-first frame
            assert prev_token in prev_detections.keys()
            box3d = prev_detections[prev_token]['box3d_global']
            box3d = (data_batch['ref_from_car'][0].asnumpy() @ data_batch['car_from_global'][
                0].asnumpy()) @ box3d
            box3d = box3d.T
            prev_detections[prev_token]['box3d_lidar'] = np.concatenate((box3d[:, :3],
                                                                         prev_detections[prev_token]['box3d_lidar'][
                                                                         :, 3:]), axis=1)

            prev_hm_, prev_track_id_ = render_trackmap(prev_detections[prev_token], grids, cfg)
            prev_hm_ = prev_hm_.transpose((0, 2, 3, 1)).view(1, int(size_h * size_w), int(num_classes))
            prev_track_id_ = prev_track_id_.transpose((0, 2, 3, 1)).view(1, int(size_h * size_w),
                                                                     int(num_classes))
            prev_hm = []
            prev_track_id = []
            class_id = 0
            for task in cfg['tasks']:
                prev_hm.append(prev_hm_[..., class_id: class_id + task['num_class']])
                prev_track_id.append(prev_track_id_[..., class_id: class_id + task['num_class']])
                class_id += task['num_class']

            preds = ms_model(data_batch, return_loss=False,
                          return_feature=True)
            outputs, track_outputs = ms_model.bbox_head.predict_tracking(data_batch, preds, ms_model.test,
                                                                      prev_hm=prev_hm, prev_track_id=prev_track_id,
                                                                      new_only=False)
            # print('box3d_lidar:', outputs[0]['box3d_lidar'])   #xs, ys, batch_hei, batch_dim, batch_vel, batch_rot
            print('scores:', outputs[0]['scores'])
            print('label_preds:', outputs[0]['label_preds'])
            outputs[0]['tracking_id'] = mindspore.numpy.arange(start_id, start_id + outputs[0]['scores'].shape[0]).astype('int')
            start_id += outputs[0]['scores'].shape[0]

        else:  # first frame
            ms_model.set_train(False)
            outputs = ms_model(data_batch, return_loss=False,return_feature=False)
            # print('box3d_lidar:', outputs[0]['box3d_lidar'])   #xs, ys, batch_hei, batch_dim, batch_vel, batch_rot
            print('scores:', outputs[0]['scores'])
            print('label_preds:', outputs[0]['label_preds'])
            outputs[0]['tracking_id'] = mindspore.numpy.arange(start_id, start_id + outputs[0]['scores'].shape[0]).astype('int')
            start_id += outputs[0]['scores'].shape[0]

        output = outputs[0].copy()
        token = output["token"]
        cat = ops.Concat(axis=0)
        for k, v in output.items():
            if k not in ["token"]:
                if track_outputs is not None:
                    output[k] = cat([v.copy().astype('float'), track_outputs[0][k].copy().astype('float')])
                else:
                    output[k] = v.copy()

        detections.update({str(token): output, })

        prev_output = {}
        box3d_lidar = output['box3d_lidar'].copy().asnumpy()
        box3d = np.concatenate((box3d_lidar[:, :3], np.ones((box3d_lidar.shape[0], 1))), axis=1).T
        box3d = (np.linalg.inv(data_batch['car_from_global'][0].asnumpy()) @ np.linalg.inv(
            data_batch['ref_from_car'][0].asnumpy())) @ box3d
        prev_output['box3d_lidar'] = box3d_lidar
        prev_output['box3d_global'] = box3d
        prev_output['label_preds'] = output['label_preds'].asnumpy()
        prev_output['scores'] = output['scores'].asnumpy()
        prev_output['tracking_id'] = output['tracking_id'].asnumpy()
        prev_detections[str(output['token'])] = prev_output




def render_trackmap(preds_dicts, grids, cfg):
    prev_hm = np.zeros((1, num_classes, size_h, size_w), dtype=np.float32)
    prev_tracking_map = np.zeros((1, num_classes, size_h, size_w), dtype=np.int64) - 1
    label_preds = preds_dicts['label_preds'].astype('int')
    box3d_lidar = preds_dicts['box3d_lidar']
    scores = preds_dicts['scores']
    tracking_ids = preds_dicts['tracking_id'].astype('int')
    box_corners = center_to_corner_box2d(box3d_lidar[:, :2], box3d_lidar[:, 3:5], box3d_lidar[:, -1])
    box_corners = (box_corners - voxel_range[:2].reshape(1, 1, 2)) / voxel_size[:2].reshape(1, 1, 2) / downsample
    masks = points_in_convex_polygon_jit(grids, box_corners)

    for obj in range(label_preds.shape[0]):
        cls_id = label_preds[obj]
        score = scores[obj]
        tracking_id = tracking_ids[obj]
        size_x, size_y = box3d_lidar[obj, 3] / voxel_size[0] / downsample, box3d_lidar[obj, 4] / voxel_size[
            1] / downsample
        if size_x > 0 and size_y > 0:
            radius = gaussian_radius((size_y, size_x), min_overlap=0.1)
            radius = min(cfg['assigner']['min_radius'], int(radius))
            coor_x = (box3d_lidar[obj, 0] - voxel_range[0]) / voxel_size[0] / downsample
            coor_y = (box3d_lidar[obj, 1] - voxel_range[1]) / voxel_size[1] / downsample
            ct = np.array([coor_x, coor_y], dtype=np.float32)
            ct_int = ct.astype(np.int32)
            # throw out not in range objects to avoid out of array area when creating the heatmap
            if not (0 <= ct_int[0] < size_w and 0 <= ct_int[1] < size_h):
                continue
                # render center map as in centertrack
            draw_gaussian(prev_hm[0, cls_id], ct, radius, score)  #

            # tracking ID map
            mask = masks[:, obj].nonzero()[0]
            coord_in_box = grids[mask, :]
            mask1 = prev_tracking_map[0, cls_id][coord_in_box[:, 1], coord_in_box[:, 0]] == -1
            mask2 = prev_hm[0, cls_id][coord_in_box[:, 1], coord_in_box[:, 0]] < score
            mask = mask[np.logical_or(mask1, mask2)]
            coord_in_box = grids[mask, :]
            prev_tracking_map[0, cls_id][coord_in_box[:, 1], coord_in_box[:, 0]] = tracking_id
            prev_tracking_map[0, cls_id][ct_int[1], ct_int[0]] = tracking_id
    return Tensor(prev_hm, mindspore.float32), Tensor(prev_tracking_map, mindspore.int64)


def meshgrid(w, h):
    ww, hh = np.meshgrid(range(w), range(h))
    ww = ww.reshape(-1)
    hh = hh.reshape(-1)
    return np.stack([ww, hh], axis=1)



if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--config", help="train config file path",
                        default='/mind3d/configs/simtrack/simtrack.yaml')
    parser.add_argument('--device_target', default='GPU', help='device id')
    parser.add_argument("--work_dir", help="the dir to save logs and models",
                        default='/mind3d/word_dirs/train/new2_copy2')
    parser.add_argument(
        "--checkpoint", help="the dir to checkpoint which the model read from",
        default='/mind3d/word_dirs/train/new2_copy2/epoch_20.ckpt')
    parser.add_argument("--eval_det", default=False) #不评估检测

    args = parser.parse_args()
    simtrack_infer(args)
