
import torch
import torch.backends.cudnn as cudnn
import time
import os
import random


@torch.no_grad()
def topk_test_org(count=1000, loop_count=10, device='cuda'):
# def topk_test(count=1, loop_count=1, device='cuda'):
    # num_gts: min 1, max 99, mean 13.024460138875936, std 10.216212645089183
    # fg_counts: min 41, max 8400, mean 3235.785929273318, std 1594.3575315506896
    # dynamic_ks: min 1, max 9, mean 4.281798025772753, std 2.1530076669472873
    num_gts = torch.normal(13.024460138875936, 10.216212645089183, (count, )).round_().int().clamp_(1, 99)
    fg_counts = torch.normal(3235.785929273318, 1594.3575315506896, (count, )).round_().int().clamp_(41, 8400)
    dynamic_ks_list = []
    cost_list = []
    num_gt_list = []
    matching_matrix_list = []
    for num_gt, fg_count in zip(num_gts, fg_counts):
    # for num_gt, fg_count in [[13, 3235]]:
    #     num_gt = 13
        # dynamic_ks = torch.normal(4.281798025772753, 2.1530076669472873, (num_gt, )).round_().to(dtype=torch.int, device=device).clamp_(1, 9)
        dynamic_ks = torch.normal(4.281798025772753, 2.1530076669472873, (num_gt,), device=device).round_().int().clamp_(1, 9)
        dynamic_ks_list.append(dynamic_ks)
        cost = torch.empty((num_gt, fg_count), dtype=torch.float, device=device).uniform_(0, 1)
        cost_list.append(cost)
        num_gt_list.append(num_gt)
        matching_matrix = torch.zeros(cost.shape, dtype=torch.uint8, device=cost.device)  # [num_gt, fg_count]
        matching_matrix_list.append(matching_matrix)

    # dynamic_ks, cost, num_gt = dynamic_ks_list[0], cost_list[0], num_gt_list[0]
    # print('*' * 80)
    # for gt_idx in range(num_gt):
    #     _, pos_idx = torch.topk(cost[gt_idx], k=dynamic_ks[gt_idx].item(), largest=False)  # 求第gt_idx个gtbox的前动态k个cost最小的前景anchor索引（正例索引）
    #     print(pos_idx)
    # print('*' * 80)
    # ks = dynamic_ks.max()
    # _, pos_idxes = torch.topk(cost, k=ks, dim=1, largest=False)  # 求第gt_idx个gtbox的前动态k个cost最小的前景anchor索引（正例索引）
    # for pos_idx, ks in zip(pos_idxes, dynamic_ks):
    #     print(pos_idx[:ks])

    # time1 = time.time_ns()
    # for _ in range(loop_count):
    #     for dynamic_ks, cost, num_gt in zip(dynamic_ks_list, cost_list, num_gt_list):
    #         for gt_idx in range(num_gt):
    #             _, pos_idx = torch.topk(cost[gt_idx], k=dynamic_ks[gt_idx].item(), largest=False)  # 求第gt_idx个gtbox的前动态k个cost最小的前景anchor索引（正例索引）
    # time2 = time.time_ns()
    # for _ in range(loop_count):
    #     for dynamic_ks, cost, num_gt in zip(dynamic_ks_list, cost_list, num_gt_list):
    #         ks = dynamic_ks.tolist()
    #         for gt_idx in range(num_gt):
    #             _, pos_idx = torch.topk(cost[gt_idx], k=ks[gt_idx], largest=False)  # 求第gt_idx个gtbox的前动态k个cost最小的前景anchor索引（正例索引）
    # time3 = time.time_ns()
    # for _ in range(loop_count):
    #     for dynamic_ks, cost, num_gt in zip(dynamic_ks_list, cost_list, num_gt_list):
    #         ks = dynamic_ks.max()
    #         _, pos_idxes = torch.topk(cost, k=ks, dim=1, largest=False)  # 求第gt_idx个gtbox的前动态k个cost最小的前景anchor索引（正例索引）
    # time4 = time.time_ns()
    #
    # total_count = count * loop_count
    # print('AvgTime1 {}ns, AvgTime2 {}ns, AvgTime3 {}ns'.format((time2-time1)/total_count, (time3-time2)/total_count, (time4-time3)/total_count))

    # dynamic_ks, cost, num_gt = dynamic_ks_list[0], cost_list[0], num_gt_list[0]
    # matching_matrix1 = torch.zeros(cost.shape, dtype=torch.uint8, device=cost.device)  # [num_gt, fg_count]
    # print('*' * 80)
    # for gt_idx in range(num_gt):
    #     _, pos_idx = torch.topk(cost[gt_idx], k=dynamic_ks[gt_idx].item(), largest=False)  # 求第gt_idx个gtbox的前动态k个cost最小的前景anchor索引（正例索引）
    #     matching_matrix1[gt_idx][pos_idx] = 1
    #     print(pos_idx)
    # print('*' * 80)
    # matching_matrix2 = torch.zeros(cost.shape, dtype=torch.uint8, device=cost.device)  # [num_gt, fg_count]
    # ks = dynamic_ks.max()
    # # indexes = torch.arange(0, ks, dtype=dynamic_ks.dtype, device=dynamic_ks.device)[None, :].expand(num_gt, ks)
    # # masks = indexes < dynamic_ks[:, None]
    # masks = (torch.arange(0, ks, dtype=dynamic_ks.dtype, device=dynamic_ks.device)[None, :].expand(num_gt, ks) < dynamic_ks[:, None])
    # offsets = torch.arange(0, matching_matrix2.shape[0] * matching_matrix2.shape[1], step=matching_matrix2.shape[1], dtype=dynamic_ks.dtype, device=dynamic_ks.device)[:, None]
    # _, pos_idxes = torch.topk(cost, k=ks, dim=1, largest=False)  # 求第gt_idx个gtbox的前动态k个cost最小的前景anchor索引（正例索引）
    # pos_idxes.add_(offsets)
    # pos_idxes = torch.masked_select(pos_idxes, masks)
    # matching_matrix2.view(-1).index_fill_(0, pos_idxes, 1)
    # s0 = (matching_matrix1 - matching_matrix2).sum()
    # for pos_idx, ks in zip(pos_idxes, dynamic_ks):
    #     print(pos_idx[:ks])

    # dynamic_ks, cost, num_gt = dynamic_ks_list[0], cost_list[0], num_gt_list[0]
    # matching_matrix1 = torch.zeros(cost.shape, dtype=torch.uint8, device=cost.device)  # [num_gt, fg_count]
    # print('*' * 80)
    # for gt_idx in range(num_gt):
    #     _, pos_idx = torch.topk(cost[gt_idx], k=dynamic_ks[gt_idx].item(), largest=False)  # 求第gt_idx个gtbox的前动态k个cost最小的前景anchor索引（正例索引）
    #     matching_matrix1[gt_idx][pos_idx] = 1
    #     print(pos_idx)
    # print('*' * 80)
    # matching_matrix2 = torch.zeros(cost.shape, dtype=torch.uint8, device=cost.device)  # [num_gt, fg_count]
    # ks = dynamic_ks.max()
    # _, pos_idxes = torch.topk(cost, k=ks, dim=1, largest=False)  # 求第gt_idx个gtbox的前动态k个cost最小的前景anchor索引（正例索引）
    # ks = dynamic_ks.tolist()
    # offset = 0
    # fg_count = matching_matrix2.shape[1]
    # _pos_idxes = []
    # for idx, _ks in enumerate(ks):
    #     pos_idx = pos_idxes[idx, :_ks]
    #     pos_idx += offset
    #     offset += fg_count
    #     _pos_idxes.append(pos_idx)
    # pos_idxes = torch.cat(_pos_idxes, dim=0)
    # matching_matrix2.view(-1).index_fill_(0, pos_idxes, 1)
    # s0 = (matching_matrix1 - matching_matrix2).sum()
    # for pos_idx, ks in zip(pos_idxes, dynamic_ks):
    #     print(pos_idx[:ks])

    # time1 = time.time_ns()
    # for _ in range(loop_count):
    #     for dynamic_ks, cost, matching_matrix, num_gt in zip(dynamic_ks_list, cost_list, matching_matrix_list, num_gt_list):
    #         for gt_idx in range(num_gt):
    #             _, pos_idx = torch.topk(cost[gt_idx], k=dynamic_ks[gt_idx].item(), largest=False)  # 求第gt_idx个gtbox的前动态k个cost最小的前景anchor索引（正例索引）
    #             matching_matrix[gt_idx][pos_idx] = 1
    # time2 = time.time_ns()
    # for _ in range(loop_count):
    #     for dynamic_ks, cost, matching_matrix, num_gt in zip(dynamic_ks_list, cost_list, matching_matrix_list, num_gt_list):
    #         ks = dynamic_ks.tolist()
    #         for gt_idx in range(num_gt):
    #             _, pos_idx = torch.topk(cost[gt_idx], k=ks[gt_idx], largest=False)  # 求第gt_idx个gtbox的前动态k个cost最小的前景anchor索引（正例索引）
    #             matching_matrix[gt_idx][pos_idx] = 1
    # time3 = time.time_ns()
    # for _ in range(loop_count):
    #     for dynamic_ks, cost, matching_matrix, num_gt in zip(dynamic_ks_list, cost_list, matching_matrix_list, num_gt_list):
    #         ks = dynamic_ks.max()
    #         offsets = torch.arange(0, matching_matrix.shape[0] * matching_matrix.shape[1], step=matching_matrix.shape[1], dtype=dynamic_ks.dtype, device=dynamic_ks.device)[:, None]
    #         masks = (torch.arange(0, ks, dtype=dynamic_ks.dtype, device=dynamic_ks.device)[None, :].expand(num_gt, ks) < dynamic_ks[:, None])
    #         _, pos_idxes = torch.topk(cost, k=ks, dim=1, largest=False)  # 求第gt_idx个gtbox的前动态k个cost最小的前景anchor索引（正例索引）
    #         pos_idxes.add_(offsets)
    #         pos_idxes = torch.masked_select(pos_idxes, masks)
    #         matching_matrix.view(-1).index_fill_(0, pos_idxes, 1)
    # time4 = time.time_ns()
    # for _ in range(loop_count):
    #     for dynamic_ks, cost, matching_matrix, num_gt in zip(dynamic_ks_list, cost_list, matching_matrix_list, num_gt_list):
    #         ks = dynamic_ks.max()
    #         _, pos_idxes = torch.topk(cost, k=ks, dim=1, largest=False)  # 求第gt_idx个gtbox的前动态k个cost最小的前景anchor索引（正例索引）
    #         ks = dynamic_ks.tolist()
    #         offset = 0
    #         fg_count = matching_matrix.shape[1]
    #         _pos_idxes = []
    #         for idx, _ks in enumerate(ks):
    #             pos_idx = pos_idxes[idx, :_ks]
    #             pos_idx += offset
    #             offset += fg_count
    #             _pos_idxes.append(pos_idx)
    #         pos_idxes = torch.cat(_pos_idxes, dim=0)
    #         matching_matrix.view(-1).index_fill_(0, pos_idxes, 1)
    # time5 = time.time_ns()
    #
    # total_count = count * loop_count
    # print('AvgTime1 {}ns, AvgTime2 {}ns, AvgTime3 {}ns, AvgTime4 {}ns'.format((time2-time1)/total_count, (time3-time2)/total_count, (time4-time3)/total_count, (time5-time4)/total_count))

    # time1 = time.time_ns()
    # for _ in range(loop_count):
    #     for dynamic_ks, cost, matching_matrix, num_gt in zip(dynamic_ks_list, cost_list, matching_matrix_list, num_gt_list):
    #         ks = dynamic_ks.tolist()
    #         for gt_idx in range(num_gt):
    #             _, pos_idx = torch.topk(cost[gt_idx], k=ks[gt_idx], largest=False)  # 求第gt_idx个gtbox的前动态k个cost最小的前景anchor索引（正例索引）
    #             matching_matrix[gt_idx][pos_idx] = 1
    # time2 = time.time_ns()
    # for _ in range(loop_count):
    #     for dynamic_ks, cost, matching_matrix, num_gt in zip(dynamic_ks_list, cost_list, matching_matrix_list, num_gt_list):
    #         ks = dynamic_ks.max()
    #         offsets = torch.arange(0, matching_matrix.shape[0] * matching_matrix.shape[1], step=matching_matrix.shape[1], dtype=dynamic_ks.dtype, device=dynamic_ks.device)[:, None]
    #         masks = (torch.arange(0, ks, dtype=dynamic_ks.dtype, device=dynamic_ks.device)[None, :].expand(num_gt, ks) < dynamic_ks[:, None])
    #         _, pos_idxes = torch.topk(cost, k=ks, dim=1, largest=False)  # 求第gt_idx个gtbox的前动态k个cost最小的前景anchor索引（正例索引）
    #         pos_idxes.add_(offsets)
    #         pos_idxes = torch.masked_select(pos_idxes, masks)
    #         matching_matrix.view(-1).index_fill_(0, pos_idxes, 1)
    # time3 = time.time_ns()
    #
    # total_count = count * loop_count
    # print('AvgTime1 {}ns, AvgTime2 {}ns'.format((time2-time1)/total_count, (time3-time2)/total_count))

    time1 = time.time_ns()
    for _ in range(loop_count):
        for dynamic_ks, cost, matching_matrix, num_gt in zip(dynamic_ks_list, cost_list, matching_matrix_list, num_gt_list):
            ks = dynamic_ks.tolist()
            for gt_idx in range(num_gt):
                _, pos_idx = torch.topk(cost[gt_idx], k=ks[gt_idx], largest=False)  # 求第gt_idx个gtbox的前动态k个cost最小的前景anchor索引（正例索引）
                matching_matrix[gt_idx][pos_idx] = 1
    time2 = time.time_ns()
    for _ in range(loop_count):
        for dynamic_ks, cost, matching_matrix, num_gt in zip(dynamic_ks_list, cost_list, matching_matrix_list, num_gt_list):
            if num_gt > 3:
                ks = dynamic_ks.max()
                offsets = torch.arange(0, matching_matrix.shape[0] * matching_matrix.shape[1], step=matching_matrix.shape[1], dtype=dynamic_ks.dtype, device=dynamic_ks.device)[:, None]
                masks = (torch.arange(0, ks, dtype=dynamic_ks.dtype, device=dynamic_ks.device)[None, :].expand(num_gt, ks) < dynamic_ks[:, None])
                _, pos_idxes = torch.topk(cost, k=ks, dim=1, largest=False)  # 求第gt_idx个gtbox的前动态k个cost最小的前景anchor索引（正例索引）
                pos_idxes.add_(offsets)
                pos_idxes = torch.masked_select(pos_idxes, masks)
                matching_matrix.view(-1).index_fill_(0, pos_idxes, 1)
            else:
                ks = dynamic_ks.tolist()
                for gt_idx in range(num_gt):
                    _, pos_idx = torch.topk(cost[gt_idx], k=ks[gt_idx], largest=False)  # 求第gt_idx个gtbox的前动态k个cost最小的前景anchor索引（正例索引）
                    matching_matrix[gt_idx][pos_idx] = 1
    time3 = time.time_ns()

    total_count = count * loop_count
    print('AvgTime1 {}ns, AvgTime2 {}ns'.format((time2-time1)/total_count, (time3-time2)/total_count))


@torch.no_grad()
def topk_test1(count=2000, loop_count=5, device='cuda'):
    # num_gts: min 1, max 99, mean 13.024460138875936, std 10.216212645089183
    # fg_counts: min 41, max 8400, mean 3235.785929273318, std 1594.3575315506896
    # dynamic_ks: min 1, max 9, mean 4.281798025772753, std 2.1530076669472873
    num_gts = torch.normal(13.024460138875936, 10.216212645089183, (count, )).round_().int().clamp_(1, 99)
    fg_counts = torch.normal(3235.785929273318, 1594.3575315506896, (count, )).round_().int().clamp_(41, 8400)
    dynamic_ks_list = []
    cost_list = []
    num_gt_list = []
    matching_matrix_list = []
    for num_gt, fg_count in zip(num_gts, fg_counts):
        dynamic_ks = torch.normal(4.281798025772753, 2.1530076669472873, (num_gt,), device=device).round_().int().clamp_(1, 9)
        dynamic_ks_list.append(dynamic_ks)
        cost = torch.empty((num_gt, fg_count), dtype=torch.float, device=device).uniform_(0, 1)
        cost_list.append(cost)
        num_gt_list.append(num_gt)
        matching_matrix = torch.zeros(cost.shape, dtype=torch.uint8, device=cost.device)  # [num_gt, fg_count]
        matching_matrix_list.append(matching_matrix)

    time1 = time.time_ns()
    for _ in range(loop_count):
        for dynamic_ks, cost, num_gt in zip(dynamic_ks_list, cost_list, num_gt_list):
            for gt_idx in range(num_gt):
                _, pos_idx = torch.topk(cost[gt_idx], k=dynamic_ks[gt_idx].item(), largest=False)  # 求第gt_idx个gtbox的前动态k个cost最小的前景anchor索引（正例索引）
    time2 = time.time_ns()
    for _ in range(loop_count):
        for dynamic_ks, cost, num_gt in zip(dynamic_ks_list, cost_list, num_gt_list):
            ks = dynamic_ks.tolist()
            for gt_idx in range(num_gt):
                _, pos_idx = torch.topk(cost[gt_idx], k=ks[gt_idx], largest=False)  # 求第gt_idx个gtbox的前动态k个cost最小的前景anchor索引（正例索引）
    time3 = time.time_ns()
    for _ in range(loop_count):
        for dynamic_ks, cost, num_gt in zip(dynamic_ks_list, cost_list, num_gt_list):
            ks = dynamic_ks.max()
            _, pos_idxes = torch.topk(cost, k=ks, dim=1, largest=False)  # 求第gt_idx个gtbox的前动态k个cost最小的前景anchor索引（正例索引）
    time4 = time.time_ns()

    total_count = count * loop_count
    print('AvgTime1 {}ns, AvgTime2 {}ns, AvgTime3 {}ns'.format((time2-time1)/total_count, (time3-time2)/total_count, (time4-time3)/total_count))


@torch.no_grad()
def topk_test2(count=2000, loop_count=5, device='cuda'):
    # num_gts: min 1, max 99, mean 13.024460138875936, std 10.216212645089183
    # fg_counts: min 41, max 8400, mean 3235.785929273318, std 1594.3575315506896
    # dynamic_ks: min 1, max 9, mean 4.281798025772753, std 2.1530076669472873
    num_gts = torch.normal(13.024460138875936, 10.216212645089183, (count, )).round_().int().clamp_(1, 99)
    fg_counts = torch.normal(3235.785929273318, 1594.3575315506896, (count, )).round_().int().clamp_(41, 8400)
    dynamic_ks_list = []
    cost_list = []
    num_gt_list = []
    matching_matrix_list = []
    for num_gt, fg_count in zip(num_gts, fg_counts):
        dynamic_ks = torch.normal(4.281798025772753, 2.1530076669472873, (num_gt,), device=device).round_().int().clamp_(1, 9)
        dynamic_ks_list.append(dynamic_ks)
        cost = torch.empty((num_gt, fg_count), dtype=torch.float, device=device).uniform_(0, 1)
        cost_list.append(cost)
        num_gt_list.append(num_gt)
        matching_matrix = torch.zeros(cost.shape, dtype=torch.uint8, device=cost.device)  # [num_gt, fg_count]
        matching_matrix_list.append(matching_matrix)

    time1 = time.time_ns()
    for _ in range(loop_count):
        for dynamic_ks, cost, matching_matrix, num_gt in zip(dynamic_ks_list, cost_list, matching_matrix_list, num_gt_list):
            for gt_idx in range(num_gt):
                _, pos_idx = torch.topk(cost[gt_idx], k=dynamic_ks[gt_idx].item(), largest=False)  # 求第gt_idx个gtbox的前动态k个cost最小的前景anchor索引（正例索引）
                matching_matrix[gt_idx][pos_idx] = 1
    time2 = time.time_ns()
    for _ in range(loop_count):
        for dynamic_ks, cost, matching_matrix, num_gt in zip(dynamic_ks_list, cost_list, matching_matrix_list, num_gt_list):
            ks = dynamic_ks.tolist()
            for gt_idx in range(num_gt):
                _, pos_idx = torch.topk(cost[gt_idx], k=ks[gt_idx], largest=False)  # 求第gt_idx个gtbox的前动态k个cost最小的前景anchor索引（正例索引）
                matching_matrix[gt_idx][pos_idx] = 1
    time3 = time.time_ns()
    for _ in range(loop_count):
        for dynamic_ks, cost, matching_matrix, num_gt in zip(dynamic_ks_list, cost_list, matching_matrix_list, num_gt_list):
            ks = dynamic_ks.max()
            _, pos_idxes = torch.topk(cost, k=ks, dim=1, largest=False)  # 求第gt_idx个gtbox的前动态k个cost最小的前景anchor索引（正例索引）
            ks = dynamic_ks.tolist()
            offset = 0
            fg_count = matching_matrix.shape[1]
            _pos_idxes = []
            for idx, _ks in enumerate(ks):
                pos_idx = pos_idxes[idx, :_ks]
                pos_idx += offset
                offset += fg_count
                _pos_idxes.append(pos_idx)
            pos_idxes = torch.cat(_pos_idxes, dim=0)
            matching_matrix.view(-1).index_fill_(0, pos_idxes, 1)
    time4 = time.time_ns()
    for _ in range(loop_count):
        for dynamic_ks, cost, matching_matrix, num_gt in zip(dynamic_ks_list, cost_list, matching_matrix_list, num_gt_list):
            ks = dynamic_ks.max()
            offsets = torch.arange(0, matching_matrix.shape[0] * matching_matrix.shape[1], step=matching_matrix.shape[1], dtype=dynamic_ks.dtype, device=dynamic_ks.device)[:, None]
            masks = (torch.arange(0, ks, dtype=dynamic_ks.dtype, device=dynamic_ks.device)[None, :].expand(num_gt, ks) < dynamic_ks[:, None])
            _, pos_idxes = torch.topk(cost, k=ks, dim=1, largest=False)  # 求第gt_idx个gtbox的前动态k个cost最小的前景anchor索引（正例索引）
            pos_idxes.add_(offsets)
            pos_idxes = torch.masked_select(pos_idxes, masks)
            matching_matrix.view(-1).index_fill_(0, pos_idxes, 1)
    time5 = time.time_ns()

    total_count = count * loop_count
    print('AvgTime1 {}ns, AvgTime2 {}ns, AvgTime3 {}ns, AvgTime4 {}ns'.format((time2-time1)/total_count, (time3-time2)/total_count, (time4-time3)/total_count, (time5-time4)/total_count))


@torch.no_grad()
def topk_test3(count=2000, loop_count=5, device='cuda'):
    def sub_topk_test(num_gt, count):
        # fg_counts: min 41, max 8400, mean 3235.785929273318, std 1594.3575315506896
        # dynamic_ks: min 1, max 9, mean 4.281798025772753, std 2.1530076669472873
        fg_counts = torch.normal(3235.785929273318, 1594.3575315506896, (count,)).round_().int().clamp_(41, 8400)
        dynamic_ks_list = []
        cost_list = []
        num_gt_list = []
        matching_matrix_list = []
        for fg_count in fg_counts:
            dynamic_ks = torch.normal(4.281798025772753, 2.1530076669472873, (num_gt,), device=device).round_().int().clamp_(1, 9)
            dynamic_ks_list.append(dynamic_ks)
            cost = torch.empty((num_gt, fg_count), dtype=torch.float, device=device).uniform_(0, 1)
            cost_list.append(cost)
            num_gt_list.append(num_gt)
            matching_matrix = torch.zeros(cost.shape, dtype=torch.uint8, device=cost.device)  # [num_gt, fg_count]
            matching_matrix_list.append(matching_matrix)

        time1 = time.time_ns()
        for _ in range(loop_count):
            for dynamic_ks, cost, matching_matrix, num_gt in zip(dynamic_ks_list, cost_list, matching_matrix_list, num_gt_list):
                ks = dynamic_ks.tolist()
                for gt_idx in range(num_gt):
                    _, pos_idx = torch.topk(cost[gt_idx], k=ks[gt_idx], largest=False)  # 求第gt_idx个gtbox的前动态k个cost最小的前景anchor索引（正例索引）
                    matching_matrix[gt_idx][pos_idx] = 1
        time2 = time.time_ns()
        for _ in range(loop_count):
            for dynamic_ks, cost, matching_matrix, num_gt in zip(dynamic_ks_list, cost_list, matching_matrix_list, num_gt_list):
                ks = dynamic_ks.max().item()
                offsets = torch.arange(0, matching_matrix.shape[0] * matching_matrix.shape[1], step=matching_matrix.shape[1], dtype=dynamic_ks.dtype, device=dynamic_ks.device)[:, None]
                masks = (torch.arange(0, ks, dtype=dynamic_ks.dtype, device=dynamic_ks.device)[None, :].expand(num_gt, ks) < dynamic_ks[:, None])
                _, pos_idxes = torch.topk(cost, k=ks, dim=1, largest=False)  # 求第gt_idx个gtbox的前动态k个cost最小的前景anchor索引（正例索引）
                pos_idxes.add_(offsets)
                pos_idxes = torch.masked_select(pos_idxes, masks)
                matching_matrix.view(-1).index_fill_(0, pos_idxes, 1)
        time3 = time.time_ns()

        total_count = count * loop_count
        print('AvgTime1 {}ns, AvgTime2 {}ns'.format((time2-time1)/total_count, (time3-time2)/total_count))

    for num_gt in range(1, 14):
        print('num_gt:', num_gt)
        sub_topk_test(num_gt, count)
        torch.cuda.empty_cache()


@torch.no_grad()
def topk_test4(count=2000, loop_count=5, device='cuda'):
    seed = 12345678
    random.seed(seed)
    torch.manual_seed(seed)
    # num_gts: min 1, max 99, mean 13.024460138875936, std 10.216212645089183
    # fg_counts: min 41, max 8400, mean 3235.785929273318, std 1594.3575315506896
    # dynamic_ks: min 1, max 9, mean 4.281798025772753, std 2.1530076669472873
    num_gts = torch.normal(13.024460138875936, 10.216212645089183, (count, )).round_().int().clamp_(1, 99)
    fg_counts = torch.normal(3235.785929273318, 1594.3575315506896, (count, )).round_().int().clamp_(41, 8400)
    dynamic_ks_list = []
    cost_list = []
    num_gt_list = []
    matching_matrix_list = []
    for num_gt, fg_count in zip(num_gts, fg_counts):
        dynamic_ks = torch.normal(4.281798025772753, 2.1530076669472873, (num_gt,), device=device).round_().int().clamp_(1, 9)
        dynamic_ks_list.append(dynamic_ks)
        cost = torch.empty((num_gt, fg_count), dtype=torch.float, device=device).uniform_(0, 1)
        cost_list.append(cost)
        num_gt_list.append(num_gt)
        matching_matrix = torch.zeros(cost.shape, dtype=torch.uint8, device=cost.device)  # [num_gt, fg_count]
        matching_matrix_list.append(matching_matrix)

    time1 = time.time_ns()
    for _ in range(loop_count):
        for dynamic_ks, cost, matching_matrix, num_gt in zip(dynamic_ks_list, cost_list, matching_matrix_list, num_gt_list):
            for gt_idx in range(num_gt):
                _, pos_idx = torch.topk(cost[gt_idx], k=dynamic_ks[gt_idx].item(), largest=False)  # 求第gt_idx个gtbox的前动态k个cost最小的前景anchor索引（正例索引）
                matching_matrix[gt_idx][pos_idx] = 1
    time2 = time.time_ns()
    for _ in range(loop_count):
        for dynamic_ks, cost, matching_matrix, num_gt in zip(dynamic_ks_list, cost_list, matching_matrix_list, num_gt_list):
            ks = dynamic_ks.tolist()
            for gt_idx in range(num_gt):
                _, pos_idx = torch.topk(cost[gt_idx], k=ks[gt_idx], largest=False)  # 求第gt_idx个gtbox的前动态k个cost最小的前景anchor索引（正例索引）
                matching_matrix[gt_idx][pos_idx] = 1
    time3 = time.time_ns()
    for _ in range(loop_count):
        for dynamic_ks, cost, matching_matrix, num_gt in zip(dynamic_ks_list, cost_list, matching_matrix_list, num_gt_list):
            ks = dynamic_ks.max().item()
            _, pos_idxes = torch.topk(cost, k=ks, dim=1, largest=False)  # 求第gt_idx个gtbox的前动态k个cost最小的前景anchor索引（正例索引）
            ks = dynamic_ks.tolist()
            offset = 0
            fg_count = matching_matrix.shape[1]
            _pos_idxes = []
            for idx, _ks in enumerate(ks):
                pos_idx = pos_idxes[idx, :_ks]
                pos_idx += offset
                offset += fg_count
                _pos_idxes.append(pos_idx)
            pos_idxes = torch.cat(_pos_idxes, dim=0)
            matching_matrix.view(-1).index_fill_(0, pos_idxes, 1)
    time4 = time.time_ns()
    for _ in range(loop_count):
        for dynamic_ks, cost, matching_matrix, num_gt in zip(dynamic_ks_list, cost_list, matching_matrix_list, num_gt_list):
            ks = dynamic_ks.max().item()
            offsets = torch.arange(0, matching_matrix.shape[0] * matching_matrix.shape[1], step=matching_matrix.shape[1], dtype=torch.int, device=dynamic_ks.device)[:, None]
            masks = (torch.arange(0, ks, dtype=dynamic_ks.dtype, device=dynamic_ks.device)[None, :].expand(num_gt, ks) < dynamic_ks[:, None])
            _, pos_idxes = torch.topk(cost, k=ks, dim=1, largest=False)  # 求第gt_idx个gtbox的前动态k个cost最小的前景anchor索引（正例索引）
            pos_idxes.add_(offsets)
            pos_idxes = torch.masked_select(pos_idxes, masks)
            matching_matrix.view(-1).index_fill_(0, pos_idxes, 1)
    time5 = time.time_ns()
    for _ in range(loop_count):
        for dynamic_ks, cost, matching_matrix, num_gt in zip(dynamic_ks_list, cost_list, matching_matrix_list, num_gt_list):
            if num_gt > 3:
                ks = dynamic_ks.max().item()
                offsets = torch.arange(0, matching_matrix.shape[0] * matching_matrix.shape[1], step=matching_matrix.shape[1], dtype=dynamic_ks.dtype, device=dynamic_ks.device)[:, None]
                masks = (torch.arange(0, ks, dtype=dynamic_ks.dtype, device=dynamic_ks.device)[None, :].expand(num_gt, ks) < dynamic_ks[:, None])
                _, pos_idxes = torch.topk(cost, k=ks, dim=1, largest=False)  # 求第gt_idx个gtbox的前动态k个cost最小的前景anchor索引（正例索引）
                pos_idxes.add_(offsets)
                pos_idxes = torch.masked_select(pos_idxes, masks)
                matching_matrix.view(-1).index_fill_(0, pos_idxes, 1)
            else:
                ks = dynamic_ks.tolist()
                for gt_idx in range(num_gt):
                    _, pos_idx = torch.topk(cost[gt_idx], k=ks[gt_idx], largest=False)  # 求第gt_idx个gtbox的前动态k个cost最小的前景anchor索引（正例索引）
                    matching_matrix[gt_idx][pos_idx] = 1
    time6 = time.time_ns()

    total_count = count * loop_count
    print('AvgTime1 {}ns, AvgTime2 {}ns, AvgTime3 {}ns, AvgTime4 {}ns, AvgTime5 {}ns'.format((time2-time1)/total_count, (time3-time2)/total_count, (time4-time3)/total_count, (time5-time4)/total_count, (time6-time5)/total_count))


def select_test1(count=2000, loop_count=1, dim=(13, 3235), fg_count=13, device='cuda'):
    # torch.Size([46, 43])
    # torch.Size([43])
    pair_wise_ious_list = []
    fg_mask_inboxes_inds_list = []
    for _ in range(count):
        pair_wise_ious = torch.randn(dim, device=device)
        fg_mask_inboxes_inds = torch.randint(0, dim[1], (fg_count, ), device=device)
        pair_wise_ious_list.append(pair_wise_ious)
        fg_mask_inboxes_inds_list.append(fg_mask_inboxes_inds)

    # pred_ious_this_matching = pair_wise_ious[:, fg_mask_inboxes_inds]
    # pred_ious_this_matching1 = pair_wise_ious.index_select(1, fg_mask_inboxes_inds)
    # s0 = (pred_ious_this_matching1 - pred_ious_this_matching).sum()

    time1 = time.time_ns()
    for pair_wise_ious, fg_mask_inboxes_inds in zip(pair_wise_ious_list, fg_mask_inboxes_inds_list):
        for _ in range(loop_count):
            pred_ious_this_matching = pair_wise_ious[:, fg_mask_inboxes_inds]
    time2 = time.time_ns()
    for pair_wise_ious, fg_mask_inboxes_inds in zip(pair_wise_ious_list, fg_mask_inboxes_inds_list):
        for _ in range(loop_count):
            pred_ious_this_matching = pair_wise_ious.index_select(1, fg_mask_inboxes_inds)
    time3 = time.time_ns()
    total_count = count*loop_count
    print('{} {} {}'.format((time2-time1)/(time3-time2), (time2-time1)/total_count, (time3-time2)/total_count))


def select_test2(count=2000, loop_count=1, dim=(3235, ), fg_count=13, device='cuda'):
    # torch.Size([46, 43])
    # torch.Size([43])
    pair_wise_ious_list = []
    fg_mask_inboxes_inds_list = []
    for _ in range(count):
        pair_wise_ious = torch.ones(dim, device=device)
        fg_mask_inboxes_inds = torch.randint(0, dim[0], (fg_count, ), device=device)
        pair_wise_ious_list.append(pair_wise_ious)
        fg_mask_inboxes_inds_list.append(fg_mask_inboxes_inds)

    # pred_ious_this_matching = pair_wise_ious[fg_mask_inboxes_inds]
    # pred_ious_this_matching1 = pair_wise_ious.index_select(0, fg_mask_inboxes_inds)
    # s0 = (pred_ious_this_matching1 - pred_ious_this_matching).sum()
    # print(s0, pred_ious_this_matching.shape, pred_ious_this_matching1.shape)

    time1 = time.time_ns()
    for pair_wise_ious, fg_mask_inboxes_inds in zip(pair_wise_ious_list, fg_mask_inboxes_inds_list):
        for _ in range(loop_count):
            pred_ious_this_matching = pair_wise_ious[fg_mask_inboxes_inds]
    time2 = time.time_ns()
    for pair_wise_ious, fg_mask_inboxes_inds in zip(pair_wise_ious_list, fg_mask_inboxes_inds_list):
        for _ in range(loop_count):
            pred_ious_this_matching = pair_wise_ious.index_select(0, fg_mask_inboxes_inds)
    time3 = time.time_ns()
    total_count = count*loop_count
    print('{} {} {}'.format((time2-time1)/(time3-time2), (time2-time1)/total_count, (time3-time2)/total_count))


def select_fill_test(count=2000, loop_count=1, dim=(13, 3235), fg_count=2, device='cuda'):
    # torch.Size([46, 43])
    # torch.Size([43])
    pair_wise_ious_list = []
    fg_mask_inboxes_inds_list = []
    for _ in range(count):
        pair_wise_ious = torch.ones(dim, device=device)
        fg_mask_inboxes_inds = torch.randint(0, dim[1], (fg_count, ), device=device)
        pair_wise_ious_list.append(pair_wise_ious)
        fg_mask_inboxes_inds_list.append(fg_mask_inboxes_inds)

    # pair_wise_ious0 = pair_wise_ious_list[0]
    # pair_wise_ious1 = pair_wise_ious_list[1]
    # pair_wise_ious0[:, fg_mask_inboxes_inds] = 0
    # pair_wise_ious1.index_fill_(1, fg_mask_inboxes_inds, 0)
    # s0 = (pair_wise_ious1 - pair_wise_ious0).sum()
    # print(s0)

    time1 = time.time_ns()
    for pair_wise_ious, fg_mask_inboxes_inds in zip(pair_wise_ious_list, fg_mask_inboxes_inds_list):
        for _ in range(loop_count):
            pair_wise_ious[:, fg_mask_inboxes_inds] = 0
    time2 = time.time_ns()
    for pair_wise_ious, fg_mask_inboxes_inds in zip(pair_wise_ious_list, fg_mask_inboxes_inds_list):
        for _ in range(loop_count):
            pair_wise_ious.index_fill_(1, fg_mask_inboxes_inds, 0)
    time3 = time.time_ns()
    total_count = count*loop_count
    print('{} {} {}'.format((time2-time1)/(time3-time2), (time2-time1)/total_count, (time3-time2)/total_count))


def gather_test(count=2000, loop_count=1, dim=(13, 13), device='cuda'):
    # torch.Size([46, 43])
    # torch.Size([43])
    pair_wise_ious_list = []
    matched_gt_inds_list = []
    for _ in range(count):
        pair_wise_ious = torch.randn(dim, device=device)
        matched_gt_inds = torch.randint(0, dim[0], (dim[1], ), device=device)
        pair_wise_ious_list.append(pair_wise_ious)
        matched_gt_inds_list.append(matched_gt_inds)

    time1 = time.time_ns()
    for pair_wise_ious, matched_gt_inds in zip(pair_wise_ious_list, matched_gt_inds_list):
        for _ in range(loop_count):
            pred_ious_this_matching = pair_wise_ious[matched_gt_inds, torch.arange(0, matched_gt_inds.shape[0], device=device)]
    time2 = time.time_ns()
    for pair_wise_ious, matched_gt_inds in zip(pair_wise_ious_list, matched_gt_inds_list):
        for _ in range(loop_count):
            pred_ious_this_matching = pair_wise_ious.gather(dim=0, index=matched_gt_inds[None, :])
    time3 = time.time_ns()
    total_count = count*loop_count
    print('{} {} {}'.format((time2-time1)/(time3-time2), (time2-time1)/total_count, (time3-time2)/total_count))


def IOU(ax1, ay1, ax2, ay2, bx1, by1, bx2, by2):
    sa = abs((ax2 - ax1) * (ay2 - ay1))
    sb = abs((bx2 - bx1) * (by2 - by1))
    x1, y1 = max(ax1, bx1), max(ay1, by1)
    x2, y2 = min(ax2, bx2), min(ay2, by2)
    w = x2 - x1
    h = y2 - y1
    if w < 0 or h < 0:
        return 0.0
    else:
        return 1.0 * w * h / (sa + sb - w * h)


def bboxes_iou1(bboxes_a, bboxes_b):
    bboxes_a = bboxes_a.tolist()
    bboxes_b = bboxes_b.tolist()
    ious = []
    for bbox_a, bbox_b in zip(bboxes_a, bboxes_b):
        iou = IOU(*bbox_a, *bbox_b )
        ious.append(iou)
    return ious


def rotate_test():
    import numpy as np
    import cv2

    def draw_bboxes(image, bboxes, color=(0, 0, 255), thickness=2):
        for bbox in bboxes:
            cv2.rectangle(image, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), color=color, thickness=thickness)

    def draw_ious(image, ious, bboxes, color=(0, 0, 255), thickness=2):
        for iou, bbox in zip(ious, bboxes):
            text = 'IOU: {:.03f}'.format(iou)
            cv2.putText(image, text, (int(bbox[0]), int(bbox[1])), cv2.FONT_HERSHEY_PLAIN, thickness, color, thickness=thickness)

    def rotation(img, targets, degree=5):
        # Rotation and Scale
        M = np.eye(3)
        M[:2] = cv2.getRotationMatrix2D(angle=degree, center=(0, 0), scale=1.0)

        height, width = img.shape[:2]
        new_img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))

        # Transform label coordinates
        n = len(targets)
        # warp points
        xy = np.ones((n * 4, 3))
        xy[:, :2] = targets[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(
            n * 4, 2
        )  # x1y1, x2y2, x1y2, x2y1
        xy = xy @ M.T  # transform
        xy = xy[:, :2].reshape(n, 8)

        # create new boxes
        x = xy[:, [0, 2, 4, 6]]
        y = xy[:, [1, 3, 5, 7]]
        xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T

        # clip boxes
        xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
        xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
        new_targets = xy
        return new_img, new_targets

    def bboxes_iou(bboxes_a, bboxes_b):
        tl = np.maximum(bboxes_a[:, :2], bboxes_b[:, :2])
        br = np.minimum(bboxes_a[:, 2:], bboxes_b[:, 2:])
        area_a = np.prod(bboxes_a[:, 2:] - bboxes_a[:, :2], 1)
        area_b = np.prod(bboxes_b[:, 2:] - bboxes_b[:, :2], 1)
        hw = (br - tl)  # [rows, 2]
        hw = np.maximum(hw, 0)
        area_i = np.prod(hw, 1)
        ious = area_i / (area_a + area_b - area_i)
        return ious

    def test():
        img = cv2.imread('./00347.jpg')
        targets = np.array([
            [252.0, 304.0, 526.0, 1025.0],
            [660.0, 362.0, 976.0, 1063.0],
        ], dtype=np.float)

        new_img5, new_targets5 = rotation(img, targets, degree=5)
        new_img10, new_targets10 = rotation(img, targets, degree=10)

        height, width = img.shape[:2]
        draw_bboxes(img, targets, color=(0, 0, 255))
        draw_bboxes(new_img5, new_targets5, color=(255, 0, 0))
        draw_bboxes(new_img10, new_targets10, color=(255, 0, 0))
        # cv2.imshow('img', img)
        # cv2.imshow('new_img5', new_img5)
        # cv2.imshow('new_img10', new_img10)
        # cv2.waitKey()

        # ground truth(after rotation)
        gt_targets5 = np.array([
            [303.0, 270.0, 572.0, 982.0],
            [740.0, 284.0, 1010.0, 993.0],
        ], dtype=np.float)
        gt_targets10 = np.array([
            [341.0, 233.0, 623.0, 934.0],
            [812.0, 207.0, 1040.0, 920.0],
        ], dtype=np.float)

        draw_bboxes(new_img5, gt_targets5, color=(0, 0, 255))
        draw_bboxes(new_img10, gt_targets10, color=(0, 0, 255))

        ious5 = bboxes_iou(new_targets5, gt_targets5).tolist()
        ious10 = bboxes_iou(new_targets10, gt_targets10).tolist()
        print('ious5:', ious5)
        print('ious10:', ious10)

        draw_ious(new_img5, ious5, new_targets5, color=(0, 255, 0))
        draw_ious(new_img10, ious10, new_targets10, color=(0, 255, 0))

        img = cv2.resize(img, (width//2, height//2))
        new_img5 = cv2.resize(new_img5, (width//2, height//2))
        new_img10 = cv2.resize(new_img10, (width//2, height//2))
        cv2.imshow('img', img)
        cv2.imshow('gt_new_img5', new_img5)
        cv2.imshow('gt_new_img10', new_img10)
        cv2.waitKey()

    test()


def view_test():
    # v1 = torch.randn((2, 3, 3), dtype=torch.float)
    # v2 = v1.view(3, 2, 3)
    # v3 = v1.view(1, 6, 3)
    # print(v2.is_contiguous(), v3.is_contiguous())

    v1 = torch.randn((1, 2, 3, 4), dtype=torch.float)
    # v2 = v1.view(1, 3, 2, 4)
    v2 = v1.permute(0, 2, 1, 3)
    v3 = v2.view(1, 3, 2, 4)
    print(v2.is_contiguous(), v3.is_contiguous())

    v1 = torch.randn((1, 2, 1, 4), dtype=torch.float)
    v2 = v1.permute(0, 2, 1, 3)
    v3 = v2.view(1, 2, 1, 4)
    print(v2.is_contiguous(), v3.is_contiguous())

    v1 = torch.randn((1, 2, 2, 4), dtype=torch.float)
    v2 = v1.permute(0, 2, 1, 3)
    v3 = v2.view(1, 2, 2, 4)
    v2[0, 0, 0, 0] = 1
    v2[0, 0, 1, 0] = 1
    v2[0, 1, 0, 0] = 1
    v2[0, 1, 1, 0] = 1
    print(v2.is_contiguous(), v3.is_contiguous())


def os_test():
    import threading
    pid = os.getpid()
    tid = threading.get_ident()
    print('pid {}, tid {}'.format(pid, tid))


def grid_test():
    def cal_xy_shifts(wsize, hsize, stride, dtype=torch.cuda.HalfTensor):
        yv, xv = torch.meshgrid([torch.arange(hsize), torch.arange(wsize)])
        # 可定义全局变量来判断分辨率是否改变，从而将后面的grid = grid.view(1, -1, 2)，改到下面一行，减少操作
        grid = torch.stack((xv, yv), 2).view(1, -1, 2).type(dtype).contiguous()  # [1, 1*hsize*wsize, 2]
        xy_shift = grid * stride + (stride / 2)  # 改为使用网格中心且乘上stride，以便统一对xy_shifts的使用
        xy_shift1 = (grid + 0.5) * stride  # 改为使用网格中心且乘上stride，以便统一对xy_shifts的使用
        s0 = (xy_shift1 - xy_shift).sum()
        print(wsize, hsize, stride, '-', s0.item())

    cal_xy_shifts(640 // 8, 640 // 8, 8)
    cal_xy_shifts(640 // 16, 640 // 16, 16)
    cal_xy_shifts(640 // 32, 640 // 32, 32)


def tensor_test():
    m1 = torch.ones((30, 400), dtype=torch.bool)
    m2 = torch.ones((30, 400), dtype=torch.uint8)
    c1 = m1.sum(0)
    c2 = m2.sum(0)
    print()


@torch.no_grad()
# def matching_matrix_test(count=5000, loop_count=5, device='cuda'):
def matching_matrix_test(count=100, loop_count=5, device='cuda'):
    seed = 12345678
    random.seed(seed)
    torch.manual_seed(seed)
    # num_gts: min 1, max 99, mean 13.024460138875936, std 10.216212645089183
    # fg_counts: min 41, max 8400, mean 3235.785929273318, std 1594.3575315506896
    # dynamic_ks: min 1, max 9, mean 4.281798025772753, std 2.1530076669472873
    num_gts = torch.normal(13.024460138875936, 10.216212645089183, (count, )).round_().int().clamp_(1, 99).tolist()
    fg_counts = torch.normal(3235.785929273318, 1594.3575315506896, (count, )).round_().int().clamp_(41, 8400).tolist()
    matching_matrix_list = []
    bool_matching_matrix_list = []
    for num_gt, fg_count in zip(num_gts, fg_counts):
        matching_matrix = torch.zeros((num_gt, fg_count), dtype=torch.uint8, device=device)  # [num_gt, fg_count]
        bool_matching_matrix = torch.zeros((num_gt, fg_count), dtype=torch.bool, device=device)  # [num_gt, fg_count]
        count = random.randint(1, 9)
        count = min(count, fg_count)
        indexes = torch.randint(0, fg_count, (num_gt, count), device=device)
        matching_matrix.scatter_(1, indexes, 1)
        matching_matrix_list.append(matching_matrix)
        bool_matching_matrix.scatter_(1, indexes, 1)
        bool_matching_matrix_list.append(bool_matching_matrix)

    # for matching_matrix in bool_matching_matrix_list:
    # # for matching_matrix in matching_matrix_list:
    #     mask1 = matching_matrix.sum(0) > 0
    #     mask2 = matching_matrix.any(dim=0)
    #     # s0 = (mask1 - mask2).sum().item()
    #     s0 = (mask1 ^ mask2).sum().item()
    #     s1 = mask1.sum()
    #     s2 = mask2.sum()
    #     if s0 != 0:
    #         print(s0)

    time1 = time.time_ns()
    for _ in range(loop_count):
        for matching_matrix in bool_matching_matrix_list:
            mask = matching_matrix.sum(0) > 0
    time2 = time.time_ns()
    for _ in range(loop_count):
        for matching_matrix in bool_matching_matrix_list:
            mask = matching_matrix.any(dim=0)
    time3 = time.time_ns()
    for _ in range(loop_count):
        for matching_matrix in matching_matrix_list:
            mask = matching_matrix.sum(0) > 0
    time4 = time.time_ns()
    for _ in range(loop_count):
        for matching_matrix in matching_matrix_list:
            mask = matching_matrix.any(dim=0)
    time5 = time.time_ns()

    total_count = count * loop_count
    print('AvgTime1 {}ns, AvgTime2 {}ns, AvgTime3 {}ns, AvgTime4 {}ns'.format((time2-time1)/total_count, (time3-time2)/total_count, (time4-time3)/total_count, (time5-time4)/total_count))


def index_test():
    torch.manual_seed(123)
    cost1 = torch.zeros((4, 5), dtype=torch.int)
    cost2 = torch.zeros((4, 5), dtype=torch.int)
    anchor_matching_one_more_gt_inds = torch.tensor([1, 3], dtype=torch.long)
    mask = torch.tensor([False, False, True, False, True], dtype=torch.bool)
    c1 = cost1[:, anchor_matching_one_more_gt_inds]
    c2 = cost2.index_select(1, anchor_matching_one_more_gt_inds)
    c1[0, 0] = 1
    c1[1, 1] = 1
    c2[0, 0] = 2
    c2[1, 1] = 2

    # cost1[:, mask] = 1

    c3 = cost1[:, mask]
    c3[0, 0] = 3
    c3[1, 1] = 3

    # cost3 = torch.zeros((4, 5, 6), dtype=torch.int)
    cost3 = torch.randn((4, 5, 6), dtype=torch.float)
    cc1 = cost3[:, :, [0, 2]]
    cc1[0, 0, 0] = 1
    # cc1 = cost3[:, :, : 2]
    # cc1[0, 0, 0] = 1
    cc2 = cost3[:, [0, 2], :]
    cc2[0, 0, 0] = 1
    cc3 = cost3[[0, 2], :, :]
    cc3[0, 0, 0] = 1
    cc4 = cost3[[0, 1, 2], :, :]
    cc4[0, 0, 0] = 1
    # cc5 = cost3[:2, :, :]
    # cc5[0, 0, 0] = 1
    idx = torch.tensor([0, 1], dtype=torch.long)
    cc6 = cost3[idx, :, :]
    cc6[0, 0, 0] = 1
    # cc6 = cost3[0::2, :, :]  #
    # cc6[0, 0, 0] = 1
    # cc7 = cost3[:, :, 0::2]  #
    # cc7[0, 0, 0] = 1
    cc7 = cost3[:, 0::2, :]  #
    cc7[0, 0, 0] = 1
    print(cc6.is_contiguous(), cc7.is_contiguous())
    print()


def main():
    # topk_test3()
    # topk_test4()
    # gather_test()
    # select_test2()
    # select_fill_test()
    # rotate_test()
    # view_test()
    # os_test()
    # grid_test()
    # tensor_test()
    # matching_matrix_test()
    index_test()


if __name__ == "__main__":
    main()
