import torch
import torch_npu
from long_tail_bench.core.executer import Executer
import mmcv
from mmcv.ops import batched_nms

# def batched_nms(boxes, scores, idxs, nms_cfg, class_agnostic=False):
#     """Performs non-maximum suppression in a batched fashion.

#     Modified from https://github.com/pytorch/vision/blob
#     /505cd6957711af790211896d32b40291bea1bc21/torchvision/ops/boxes.py#L39.
#     In order to perform NMS independently per class, we add an offset to all
#     the boxes. The offset is dependent only on the class idx, and is large
#     enough so that boxes from different classes do not overlap.

#     Arguments:
#         boxes (torch.Tensor): boxes in shape (N, 4).
#         scores (torch.Tensor): scores in shape (N, ).
#         idxs (torch.Tensor): each index value correspond to a bbox cluster,
#             and NMS will not be applied between elements of different idxs,
#             shape (N, ).
#         nms_cfg (dict): specify nms type and other parameters like iou_thr.
#             Possible keys includes the following.

#             - iou_thr (float): IoU threshold used for NMS.
#             - split_thr (float): threshold number of boxes. In some cases the
#                 number of boxes is large (e.g., 200k). To avoid OOM during
#                 training, the users could set `split_thr` to a small value.
#                 If the number of boxes is greater than the threshold, it will
#                 perform NMS on each group of boxes separately and sequentially.
#                 Defaults to 10000.
#         class_agnostic (bool): if true, nms is class agnostic,
#             i.e. IoU thresholding happens over all boxes,
#             regardless of the predicted class.

#     Returns:
#         tuple: kept dets and indice.
#     """
#     nms_cfg_ = nms_cfg.copy()
#     class_agnostic = nms_cfg_.pop('class_agnostic', class_agnostic)
#     if class_agnostic:
#         boxes_for_nms = boxes
#     else:
#         max_coordinate = boxes.max()
#         offsets = idxs.to(boxes) * (max_coordinate + torch.tensor(1).to(boxes))
#         boxes_for_nms = boxes + offsets[:, None]

#     nms_type = nms_cfg_.pop('type', 'nms')
#     nms_op = eval(nms_type)

#     split_thr = nms_cfg_.pop('split_thr', 10000)
#     # Won't split to multiple nms nodes when exporting to onnx
#     if boxes_for_nms.shape[0] < split_thr or torch.onnx.is_in_onnx_export():
#         dets, keep = nms_op(boxes_for_nms, scores, **nms_cfg_)
#         boxes = boxes[keep]
#         # -1 indexing works abnormal in TensorRT
#         # This assumes `dets` has 5 dimensions where
#         # the last dimension is score.
#         # TODO: more elegant way to handle the dimension issue.
#         # Some type of nms would reweight the score, such as SoftNMS
#         scores = dets[:, 4]
#     else:
#         max_num = nms_cfg_.pop('max_num', -1)
#         total_mask = scores.new_zeros(scores.size(), dtype=torch.bool)
#         # Some type of nms would reweight the score, such as SoftNMS
#         scores_after_nms = scores.new_zeros(scores.size())
#         for id in torch.unique(idxs):
#             mask = (idxs == id).nonzero(as_tuple=False).view(-1)
#             dets, keep = nms_op(boxes_for_nms[mask], scores[mask], **nms_cfg_)
#             total_mask[mask[keep]] = True
#             scores_after_nms[mask[keep]] = dets[:, -1]
#         keep = total_mask.nonzero(as_tuple=False).view(-1)

#         scores, inds = scores_after_nms[keep].sort(descending=True)
#         keep = keep[inds]
#         boxes = boxes[keep]

#         if max_num > 0:
#             keep = keep[:max_num]
#             boxes = boxes[:max_num]
#             scores = scores[:max_num]

#     return torch.cat([boxes, scores[:, None]], -1), keep


def args_adaptor(np_args):
    boxes = torch.from_numpy(np_args[0]).npu()
    scores = torch.from_numpy(np_args[1]).npu()
    idxs = torch.from_numpy(np_args[2]).npu()
    nms_cfg = dict(type='nms', iou_threshold=0.5)
    return [boxes, scores, idxs, nms_cfg]


def executer_creator():
    return Executer(batched_nms, args_adaptor)
