import torch
from torchvision.ops import box_convert
import torch.nn.functional as F
from tqdm import tqdm
import json
import os
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
import math
import torch.distributed as tdist



def collate_fn(batch): #batch (image, label, img_id, (w, h))
    images = [item[0] for item in batch]
    images = torch.stack(images, dim = 0)
    labels = [item[1] for item in batch]
    img_ids = [item[2] for item in batch]
    img_ori_size = [item[3] for item in batch]
    anno = []
    for i in labels:
        box = box_convert(torch.tensor(i[0]), in_fmt='xywh', out_fmt='cxcywh') if i[0] else torch.tensor(i[0])
        label = torch.tensor(i[1])
        anno.append({'target_box':box, 'label':label})

    # boxes, category = label
    # category = torch.tensor(category)
    # boxes = torch.tensor(boxes)
    # boxes_cxcywh = box_convert(boxes=boxes, in_fmt='xywh', out_fmt='cxcywh')
    return images, anno, img_ids, img_ori_size

def collate_fn_voc(batch):
    #(PIL.image, annotation)
    images = [item[0] for item in batch]
    images = torch.stack(images, dim = 0)
    labels = [item[1] for item in batch]
    img_ids = [item[2] for item in batch]
    img_ori_size = [item[3] for item in batch]
    anno = []
    for i in labels:
        box = box_convert(torch.tensor(i[0]), in_fmt='xywh', out_fmt='cxcywh')
        label = torch.tensor(i[1])
        anno.append({'target_box':box, 'label':label})

    return images, anno, img_ids, img_ori_size



def deformable_attention_core_func(value, value_spatial_shapes, sampling_locations, attention_weights):
    """
    Args:
        value (Tensor): [bs, value_length, n_head, c]
        value_spatial_shapes (Tensor|List): [n_levels, 2]
        value_level_start_index (Tensor|List): [n_levels]
        sampling_locations (Tensor): [bs, query_length, n_head, n_levels, n_points, 2]
        attention_weights (Tensor): [bs, query_length, n_head, n_levels, n_points]

    Returns:
        output (Tensor): [bs, Length_{query}, C]
    """
    bs, _, n_head, c = value.shape
    _, Len_q, _, n_levels, n_points, _ = sampling_locations.shape

    split_shape = [h * w for h, w in value_spatial_shapes]
    value_list = value.split(split_shape, dim=1)
    sampling_grids = 2 * sampling_locations - 1
    sampling_value_list = []
    for level, (h, w) in enumerate(value_spatial_shapes):
        # N_, H_*W_, M_, D_ -> N_, H_*W_, M_*D_ -> N_, M_*D_, H_*W_ -> N_*M_, D_, H_, W_
        value_l_ = value_list[level].flatten(2).permute(
            0, 2, 1).reshape(bs * n_head, c, h, w)
        # N_, Lq_, M_, P_, 2 -> N_, M_, Lq_, P_, 2 -> N_*M_, Lq_, P_, 2
        sampling_grid_l_ = sampling_grids[:, :, :, level].permute(
            0, 2, 1, 3, 4).flatten(0, 1)
        # N_*M_, D_, Lq_, P_
        sampling_value_l_ = F.grid_sample(
            value_l_,
            sampling_grid_l_,
            mode='bilinear',
            padding_mode='zeros',
            align_corners=False)
        sampling_value_list.append(sampling_value_l_)
    # (N_, Lq_, M_, L_, P_) -> (N_, M_, Lq_, L_, P_) -> (N_*M_, 1, Lq_, L_*P_)
    attention_weights = attention_weights.permute(0, 2, 1, 3, 4).reshape(
        bs * n_head, 1, Len_q, n_levels * n_points)
    output = (torch.stack(
        sampling_value_list, dim=-2).flatten(-2) *
              attention_weights).sum(-1).reshape(bs, n_head * c, Len_q)

    return output.permute(0, 2, 1)


@torch.no_grad
def eval(val_dataloader, model, device, num_query, root, epoch):

    result = []
    for samples, targets, img_ids, img_ori_size in tqdm(val_dataloader, \
                                                        desc=f'\033[1;32mvalid epoch[{epoch}]\033[0m', ncols=100):
        
        samples = samples.to(device)
        dec_out = model(samples)
        final_dec_out = dec_out[5] 
        pred_box = final_dec_out['pred_box'] #tensor batch size x 300 x 4 
        pred_box_xywh = box_convert(pred_box, in_fmt='cxcywh', out_fmt='xywh') #tensor batch size x 300 x 4 
        pred_prob = torch.sigmoid(final_dec_out['pred_logits']) #tensor batch size x 300 x 80
        max_values, indices = pred_prob.max(dim=2) #tensor batch_size x 300 
        for i in range(max_values.shape[0]):
            w, h = img_ori_size[i]
            pred_box_xywh_ori = pred_box_xywh[i] * torch.tensor([w, h, w, h]).to(device) #tensor 300x4
            pred_box_list = pred_box_xywh_ori.tolist()
            pred_box_list_round = [[round(value,1) for value in row] for row in pred_box_list]
            pred_score = max_values[i] #tensor 300
            pred_score_list = pred_score.tolist()
            pred_score_list_round = [round(value, 2) for value in pred_score_list]
            pred_cls_list = indices[i].tolist()#tensor 300
            img_id = img_ids[i] 
            # for j in range(args.num_query):
            #     result.append({"image_id":img_id, "category_id":pred_cls[j], "bbox":pred_box_xywh_ori[j], "score":pred_score[j]})
            result.extend([{"image_id":img_id, "category_id":label2coco[pred_cls_list[j]], \
                            "bbox":pred_box_list_round[j], "score":pred_score_list_round[j]}
                            for j in range(num_query)])
    
    if tdist.is_initialized():
        result = merge_results_simple(result)

    if not tdist.is_initialized() or tdist.get_rank() == 0: 
        os.makedirs(root, exist_ok=True)
        with open(f'{root}prediction{epoch}.json', 'w') as json_file:
            json.dump(result, json_file)
        print(f'\033[1;38;5;31mprediciton{epoch}.json completed!\033[0m \033[32m\033[1m\u2713\033[0m')
        coco_true = COCO(annotation_file='/mnt/sdb2/ray/mmdetection-main/data/coco/annotations/instances_val2017.json')
        coco_pre = coco_true.loadRes(resFile=result)
        cocoevaluator = COCOeval(cocoGt = coco_true, cocoDt = coco_pre, iouType = "bbox")
        cocoevaluator.evaluate()
        cocoevaluator.accumulate()
        cocoevaluator.summarize()
        print('')

    if tdist.is_initialized():
        tdist.barrier()



@torch.no_grad
def eval_voc(val_dataloader, model, device, num_query, root, epoch):

    result = []
    for samples, targets, img_ids, img_ori_size in tqdm(val_dataloader, \
                                                        desc=f'\033[1;32mvalid epoch[{epoch}]\033[0m', ncols=100):
        
        samples = samples.to(device)
        dec_out = model(samples)
        final_dec_out = dec_out[5] 
        pred_box = final_dec_out['pred_box'] #tensor batch size x 300 x 4 
        pred_box_xywh = box_convert(pred_box, in_fmt='cxcywh', out_fmt='xyxy') #tensor batch size x 300 x 4  for voc xyxy
        pred_prob = torch.sigmoid(final_dec_out['pred_logits']) #tensor batch size x 300 x 80
        max_values, indices = pred_prob.max(dim=2) #tensor batch_size x 300 
        for i in range(max_values.shape[0]):
            w, h = img_ori_size[i]
            pred_box_xywh_ori = pred_box_xywh[i] * torch.tensor([w, h, w, h]).to(device) #tensor 300x4
            pred_box_list = pred_box_xywh_ori.tolist()
            pred_box_list_round = [[round(value,1) for value in row] for row in pred_box_list]
            pred_score = max_values[i] #tensor 300
            pred_score_list = pred_score.tolist()
            pred_score_list_round = [round(value, 2) for value in pred_score_list]
            pred_cls_list = indices[i].tolist()#tensor 300
            img_id = img_ids[i] 
            # for j in range(args.num_query):
            #     result.append({"image_id":img_id, "category_id":pred_cls[j], "bbox":pred_box_xywh_ori[j], "score":pred_score[j]})
            result.extend([{"image_id":img_id, "category_id":pred_cls_list[j], \
                            "bbox":pred_box_list_round[j], "score":pred_score_list_round[j]}
                            for j in range(num_query)])
    
    if tdist.is_initialized():
        result = merge_results_simple(result)

    if not tdist.is_initialized() or tdist.get_rank() == 0: 
        os.makedirs(root, exist_ok=True)
        with open(f'{root}prediction{epoch}.json', 'w') as json_file:
            json.dump(result, json_file)
        print(f'\033[1;38;5;31mprediciton{epoch}.json completed!\033[0m \033[32m\033[1m\u2713\033[0m')
        coco_true = COCO(annotation_file='/mnt/sdb2/ray/rtdetr-implement/voctest_anno.json')
        coco_pre = coco_true.loadRes(resFile=result)
        cocoevaluator = COCOeval(cocoGt = coco_true, cocoDt = coco_pre, iouType = "bbox")
        cocoevaluator.evaluate()
        cocoevaluator.accumulate()
        cocoevaluator.summarize()
        print('')

    if tdist.is_initialized():
        tdist.barrier()



def merge_results_simple(local_results):
    """
    使用 all_gather_object 将各进程的列表合并
    Args:
    local_results: 当前进程的评估结果（列表）
    Returns:
    合并后的所有进程结果（列表）
    """
    # 初始化空列表用于收集
    all_results = [None] * tdist.get_world_size()

    # 收集所有进程的结果
    tdist.all_gather_object(all_results, local_results)

    # 合并所有进程的结果为一个列表
    merged_results = []
    for results in all_results:
        merged_results.extend(results)

    return merged_results
            

mscoco_category2name = {
    1: 'person',
    2: 'bicycle',
    3: 'car',
    4: 'motorcycle',
    5: 'airplane',
    6: 'bus',
    7: 'train',
    8: 'truck',
    9: 'boat',
    10: 'traffic light',
    11: 'fire hydrant',
    13: 'stop sign',
    14: 'parking meter',
    15: 'bench',
    16: 'bird',
    17: 'cat',
    18: 'dog',
    19: 'horse',
    20: 'sheep',
    21: 'cow',
    22: 'elephant',
    23: 'bear',
    24: 'zebra',
    25: 'giraffe',
    27: 'backpack',
    28: 'umbrella',
    31: 'handbag',
    32: 'tie',
    33: 'suitcase',
    34: 'frisbee',
    35: 'skis',
    36: 'snowboard',
    37: 'sports ball',
    38: 'kite',
    39: 'baseball bat',
    40: 'baseball glove',
    41: 'skateboard',
    42: 'surfboard',
    43: 'tennis racket',
    44: 'bottle',
    46: 'wine glass',
    47: 'cup',
    48: 'fork',
    49: 'knife',
    50: 'spoon',
    51: 'bowl',
    52: 'banana',
    53: 'apple',
    54: 'sandwich',
    55: 'orange',
    56: 'broccoli',
    57: 'carrot',
    58: 'hot dog',
    59: 'pizza',
    60: 'donut',
    61: 'cake',
    62: 'chair',
    63: 'couch',
    64: 'potted plant',
    65: 'bed',
    67: 'dining table',
    70: 'toilet',
    72: 'tv',
    73: 'laptop',
    74: 'mouse',
    75: 'remote',
    76: 'keyboard',
    77: 'cell phone',
    78: 'microwave',
    79: 'oven',
    80: 'toaster',
    81: 'sink',
    82: 'refrigerator',
    84: 'book',
    85: 'clock',
    86: 'vase',
    87: 'scissors',
    88: 'teddy bear',
    89: 'hair drier',
    90: 'toothbrush',
    # 91: 'background',
}

label2coco = {i:k for i, k in enumerate(mscoco_category2name.keys())}

def bias_init_with_prob(prior_prob=0.01):
    """initialize conv/fc bias value according to a given probability value."""
    bias_init = float(-math.log((1 - prior_prob) / prior_prob))
    return bias_init


def inverse_sigmoid(x: torch.Tensor, eps: float=1e-5) -> torch.Tensor:
    x = x.clip(min=0., max=1.)
    return torch.log(x.clip(min=eps) / (1 - x).clip(min=eps))

if __name__ == '__main__':
    pass
    