# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Train and eval functions used in main.py
"""
import math
import os
import random
import sys
from typing import Iterable
import tqdm
import torch
import logging
import os.path as osp
import util.misc as utils
# from dataset.coco_eval import CocoEvaluator
# from dataset.panoptic_eval import PanopticEvaluator
import pdb
from util import box_ops
from dataset.tad_eval import TADEvaluator


def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module,
                    data_loader: Iterable, optimizer: torch.optim.Optimizer,
                    device: torch.device, epoch: int, max_norm: float = 0):
    model.train()
    criterion.train()
    metric_logger = utils.MetricLogger(delimiter="  ")
    metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
    header = 'Epoch: [{}]'.format(epoch)
    print_freq = 10
    for samples, targets in metric_logger.log_every(data_loader, print_freq, header):
        # (x1 y1 x2 y2 label) * max_box + EOS
        max_seq_length = max([len(target['segments']) for target in targets]) * 3 + 1
        samples = samples.to(device)
        targets = [{k: v.to(device) if k in ['segments', 'labels']
        else v for k, v in t.items()} for t in targets]
        bins = 200
        num_box = 100
        box_labels = []
        start = 2001
        padding = 2002
        end = 2000
        category_start = 1500
        for target in targets:
            box = (target['segments'] * (bins - 1)).int()
            label = target['labels'].unsqueeze(-1) + category_start
            box_label = torch.cat([box, label], dim=-1)
            idx = torch.randperm(box_label.shape[0])
            box_label = box_label[idx]
            box_label = torch.cat([box_label.flatten(), torch.ones(1).to(box_label) * end])
            if max_seq_length > len(box_label):
                pad_seq = torch.ones(max_seq_length - len(box_label)).to(box_label) * padding
                box_label = torch.cat([box_label, pad_seq])
            box_labels.append(box_label.unsqueeze(0))
        box_labels = torch.cat(box_labels, dim=0).flatten(1)
        outputs = model(samples, box_labels)
        outputs = outputs.reshape(-1, 2003)
        box_labels = box_labels.unsqueeze(0).repeat(6, 1, 1).flatten()
        loss = criterion(outputs[box_labels != 2002], box_labels[box_labels != 2002])
        loss_dict = {'at': loss}
        weight_dict = {'at': 1}
        losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict)
        # reduce losses over all GPUs for logging purposes
        loss_dict_reduced = utils.reduce_dict(loss_dict)
        loss_dict_reduced_unscaled = {f'{k}_unscaled': v
                                      for k, v in loss_dict_reduced.items()}
        loss_dict_reduced_scaled = {k: v * weight_dict[k]
                                    for k, v in loss_dict_reduced.items() if k in weight_dict}
        losses_reduced_scaled = sum(loss_dict_reduced_scaled.values())

        loss_value = losses_reduced_scaled.item()

        if not math.isfinite(loss_value):
            print("Loss is {}, stopping training".format(loss_value))
            print(loss_dict_reduced)
            sys.exit(1)

        optimizer.zero_grad()
        losses.backward()
        if max_norm > 0:
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
        optimizer.step()

        metric_logger.update(loss=loss_value, **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled)
        metric_logger.update(lr=optimizer.param_groups[0]["lr"])
    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    return {k: meter.global_avg for k, meter in metric_logger.meters.items()}


def train_one_epochs(model: torch.nn.Module, criterion: torch.nn.Module,
                    data_loader: Iterable, optimizer: torch.optim.Optimizer,
                    device: torch.device, epoch: int, max_norm: float = 0, lr_scheduler: list = [0]):
    model.train()
    criterion.train()
    metric_logger = utils.MetricLogger(delimiter="  ")
    metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
    header = 'Epoch: [{}]'.format(epoch)
    print_freq = 10
    optimizer.param_groups[0]['lr'] = lr_scheduler[epoch]
    optimizer.param_groups[1]['lr'] = lr_scheduler[epoch] * 0.1
    for samples, targets in metric_logger.log_every(data_loader, print_freq, header):
        # 这里还没有开始构造五元组
        # (x1 y1 x2 y2 label) * max_box + EOS
        max_box = max([len(target['segments']) for target in targets])
        max_seq_length = max_box * 3 + 1
        print('samples shape',samples.tensors.shape, samples.mask.shape)
        print(samples.tensors[0])
        print(samples.mask[0])
        # shape torch.Size([8, 3, 800, 1066]) torch.Size([8, 800, 1066])
        # print(samples.mask[0][-80:])
        # print(samples.mask[0].flatten().sum())
        # 360320 != 800*1066，说明有部分是非True
        samples = samples.to(device)
        targets = [{k: v.to(device) if k in ['segments', 'labels']
        else v for k, v in t.items()} for t in targets]
        # targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
        bins = 200  # 定义多少个量化bins
        num_box = max(max_box + 2, 100)
        # print('num_box', num_box)
        # num_box 100
        box_labels = []
        input_seqs = []
        output_seqs = []
        # start = 2001
        start = 301
        # padding = 2002
        padding = 302
        # end = 2000
        end = 300
        # category_start = 1500
        category_start = 250
        # no_known = 2002  # n/a and padding share the same label to be eliminated from loss calculation
        no_known = 302
        # noise = 1998
        noise = 298
        # print('max_seq_length', max_seq_length)
        # max_seq_length 46



        for target in targets:
            # print('target boxes:',target['boxes'])
            box = (target['segments'] * (bins - 1)).int()
            # print('box:', box) # 四个坐标点转换为格子数
            label = target['labels'].unsqueeze(-1) + category_start
            # print('label:', label)
            box_label = torch.cat([box, label], dim=-1)
            # print('box_label', box_label) #第二个维度是5
            idx = torch.randperm(box_label.shape[0])
            # 随机顺序进去训练？
            # torch.randperm(10)，随机打乱0,1,...,9
            # ===> tensor([2, 3, 6, 7, 8, 9, 1, 5, 0, 4])
            box_label = box_label[idx]
            # todo 为啥没有范围。。。。应该在1，128之间？？？
            random_box = torch.rand(num_box - box_label.shape[0], 2).to(target['segments'])
            random_box = (random_box * (bins - 1)).int()
            # todo 这里coco有91类 thumos动作20类 作者说应该+1所以是21
            random_label = torch.randint(0, 21, (num_box - box_label.shape[0], 1)).to(label)
            random_label = random_label + category_start
            # 这里应该是随机生成不包含物体的框
            random_box_label = torch.cat([random_box, random_label], dim=-1)

            input_seq = torch.cat([box_label, random_box_label], dim=0)  # input sequence = real + noise
            input_seq = torch.cat([torch.ones(1).to(box_label) * start, input_seq.flatten()])
            input_seqs.append(input_seq.unsqueeze(0))
            # todo ????????
            output_na = torch.ones(num_box - box_label.shape[0], 1).to(input_seq) * no_known
            output_noise = torch.ones(num_box - box_label.shape[0], 1).to(input_seq) * noise
            output_end = torch.ones(num_box - box_label.shape[0], 1).to(input_seq) * end
            output_seq = torch.cat([output_na, output_noise, output_end], dim=-1)
            # output_seq = torch.cat([output_na, output_noise], dim=-1)

            output_seq = torch.cat([box_label.flatten(), torch.ones(1).to(box_label) * end, output_seq.flatten()])
            output_seqs.append(output_seq.unsqueeze(0))

            # print('input_seq.unsqueeze(0).shape', input_seq.unsqueeze(0).shape)
            # torch.Size([1, 501])
            # print('output_seq.unsqueeze(0).shape', output_seq.unsqueeze(0).shape)
            # torch.Size([1, 501])
        input_seqs = torch.cat(input_seqs, dim=0)
        # todo input_seqs shape 16,301
        output_seqs = torch.cat(output_seqs, dim=0)
        # print('input_seqs.shape', input_seqs.shape)
        # input_seqs.shape torch.Size([8, 501])
        # print('output_seqs.shape', output_seqs.shape)
        # output_seqs.shape torch.Size([8, 501])
        box_labels = output_seqs.flatten()
        # print(box_labels.shape)
        # torch.Size([4008])
        # print((box_labels != 2002).shape)
        # torch.Size([4008])

        #        with torch.cuda.amp.autocast():



        if True:
            outputs = model(samples, input_seqs)
            # print('*' * 1000)
            # print(type(outputs))
            # print(outputs.shape)
            # print(outputs)
            # outputs = outputs[-1].reshape(-1, 2003)
            outputs = outputs[-1].reshape(-1, 303)
            # print('outputs[box_labels != 2002].shape, box_labels[box_labels != 2002].shape',
            #      outputs[box_labels != 2002].shape, box_labels[box_labels != 2002].shape)
            # 2002是unknown的类别，不参与loss计算
            # outputs[box_labels != 2002].shape, box_labels[box_labels != 2002].shape
            # torch.Size([1725, 2003]) torch.Size([1725])。相当于分2003个类别，1725为8个img的共345个b-box
            loss = criterion(outputs[box_labels != 302], box_labels[box_labels != 302])


        loss_dict = {'at': loss}
        weight_dict = {'at': 1}
        losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict)
        # reduce losses over all GPUs for logging purposes
        loss_dict_reduced = utils.reduce_dict(loss_dict)
        loss_dict_reduced_unscaled = {f'{k}_unscaled': v
                                      for k, v in loss_dict_reduced.items()}
        loss_dict_reduced_scaled = {k: v * weight_dict[k]
                                    for k, v in loss_dict_reduced.items() if k in weight_dict}
        losses_reduced_scaled = sum(loss_dict_reduced_scaled.values())

        loss_value = losses_reduced_scaled.item()

        if not math.isfinite(loss_value):
            print("Loss is {}, stopping training".format(loss_value))
            print(loss_dict_reduced)
            sys.exit(1)

        optimizer.zero_grad()
        losses.backward()
        if max_norm > 0:
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
        optimizer.step()
        # raise Exception
        metric_logger.update(loss=loss_value, **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled)
        metric_logger.update(lr=optimizer.param_groups[0]["lr"])
        # metric_logger.update(lr=0.0001)
        # raise Exception
    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    return {k: meter.global_avg for k, meter in metric_logger.meters.items()}


@torch.no_grad()
def evaluate(model, criterion, postprocessors, data_loader, base_ds, device, output_dir):
    model.eval()
    criterion.eval()

    metric_logger = utils.MetricLogger(delimiter="  ")
    metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}'))
    header = 'Test:'

    iou_types = tuple(k for k in ('segm', 'bbox') if k in postprocessors.keys())
    coco_evaluator = CocoEvaluator(base_ds, iou_types)
    # coco_evaluator.coco_eval[iou_types[0]].params.iouThrs = [0, 0.1, 0.5, 0.75]

    panoptic_evaluator = None
    if 'panoptic' in postprocessors.keys():
        panoptic_evaluator = PanopticEvaluator(
            data_loader.dataset.ann_file,
            data_loader.dataset.ann_folder,
            output_dir=os.path.join(output_dir, "panoptic_eval"),
        )
    for samples, targets in data_loader:
        batch = len(targets)
        targets = targets[: batch // 2]
        samples.mask = samples.mask[: batch // 2, :, :]
        samples.tensors = samples.tensors[: batch // 2, :, :, :]
        samples = samples.to(device)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
        seq = torch.ones(len(targets), 1).to(samples.mask) * 2001
        outputs = model(samples, seq)

        batch_index = 0
        results = []
        outputs, values = outputs
        for output in outputs:
            output = output[1:].reshape(-1, 5)
            box = output[:, :4].clip(0, 999).float() / (1000 - 1)
            box = box_ops.box_cxcywh_to_xyxy(box)
            label = output[:, 4].unsqueeze(-1) - 1500
            orig_size = targets[batch_index]["orig_size"]
            img_h, img_w = orig_size[0], orig_size[1]
            scale_fct = torch.stack([img_w, img_h, img_w, img_h]).unsqueeze(0)
            box = scale_fct * box
            value = values[batch_index].reshape(-1, 5)[:, -1]
            threshold = 0.3
            select = (value > threshold)
            results.append({'scores': value[select], 'labels': label.squeeze(-1)[select], 'boxes': box[select]})
            batch_index = batch_index + 1
        res = {target['image_id'].item(): output for target, output in zip(targets, results)}
        if coco_evaluator is not None:
            coco_evaluator.update(res)
        if panoptic_evaluator is not None:
            res_pano = postprocessors["panoptic"](outputs, target_sizes, orig_target_sizes)
            for i, target in enumerate(targets):
                image_id = target["image_id"].item()
                file_name = f"{image_id:012d}.png"
                res_pano[i]["image_id"] = image_id
                res_pano[i]["file_name"] = file_name

            panoptic_evaluator.update(res_pano)

    if coco_evaluator is not None:
        coco_evaluator.synchronize_between_processes()
    if panoptic_evaluator is not None:
        panoptic_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    if coco_evaluator is not None:
        coco_evaluator.accumulate()
        coco_evaluator.summarize()
    panoptic_res = None
    if panoptic_evaluator is not None:
        panoptic_res = panoptic_evaluator.summarize()
    return 0, coco_evaluator


@torch.no_grad()
def test(model, criterion, postprocessor, data_loader, base_ds, device, output_dir, cfg, subset='val', epoch=None,
         test_mode=False):
    '''
    Run inference and evaluation. Do not compute loss
    test_mode: indicates that we are evaluating specific epoch during testing
    '''
    model.eval()
    criterion.eval()

    metric_logger = utils.MetricLogger(delimiter="  ")
    metric_logger.add_meter('class_error', utils.SmoothedValue(
        window_size=1, fmt='{value:.2f}'))

    iou_range = [0.3, 0.4, 0.5, 0.6, 0.7] if cfg.dataset_name == 'thumos14' else [
        num / 100 for num in range(50, 100, 5)]
    # logging.info('iou range {}'.format(iou_range))

    # action_evaluator = None
    action_evaluator = TADEvaluator(cfg.dataset_name, subset, base_ds, nms_mode=[
        'raw'], iou_range=iou_range, epoch=epoch)

    # raw_res = []
    cnt = 0
    # for (samples, targets) in tqdm.tqdm(data_loader, total=len(data_loader)):
    #     # print('$' * 100)
    #     samples = samples.to(device)
    #     outputs = model((samples.tensors, samples.mask))
    #     # print(outputs['pred_logits'].shape)  # ([16, 40, 20])
    #     # print(outputs['pred_segments'].shape)  # ([16, 40, 2])
    #     video_duration = torch.FloatTensor(
    #         [t["video_duration"] for t in targets]).to(device)
    #     # todo results: {'segments': tensor([[0.1025, 0.1348]]), 'labels': tensor([11]), 'orig_labels': [11], 'video_id': 'video_test_0000188_window_1920_2012', 'video_duration': 34.13333333333333, 'feature_fps': 3.75}
    #     results = postprocessor(outputs, video_duration, fuse_score=cfg.act_reg)
    #
    #     res = {target['video_id']: output for target,
    #                                           output in zip(targets, results)}
    # for samples, targets in data_loader:
    cnt = 0
    for (samples, targets) in tqdm.tqdm(data_loader, total=len(data_loader)):
        print(cnt)
        cnt += 1

        samples = samples.to(device)
        seq = torch.ones(len(targets), 1).to(samples.mask) * 301
        # seq = torch.ones(len(targets), 1).to(samples.mask) * 2001
        outputs = model(samples, seq)
        video_duration = torch.FloatTensor(
            [t["video_duration"] for t in targets]).to(device)
        batch_index = 0
        results = []
        outputs, values = outputs
        print('outputs', len(outputs))
        print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>' * 10)
        print(outputs.shape, values.shape)
        for output in outputs:
            # todo torch.Size([301])
            output = output[1:].reshape(-1, 3)
            # todo torch.Size([100, 3])
            box = output[:, :2].clip(0, 199).float() / (200 - 1)
            # todo torch.Size([100, 2])
            box = box_ops.box_cxcywh_to_xyxy(box)
            # label = output[:, 2].unsqueeze(-1) - 1500
            label = output[:, 2].unsqueeze(-1) - 250
            # print(label.shape)
            # todo torch.Size([100, 1])
            orig_size = video_duration[batch_index]
            scale_fct = torch.stack([orig_size, orig_size]).unsqueeze(0)
            box = scale_fct * box
            value = values[batch_index].reshape(-1, 3)[:, -1]
            # print(value)
            # todo torch.Size([100])
            threshold = 0.0035
            select = (value > threshold)
            results.append({'scores': value[select], 'labels': label.squeeze(-1)[select], 'segments': box[select]})
            # print(results)
            batch_index = batch_index + 1
        print(len(results))
        # raise Exception
        res = {target['video_id']: output for target, output in zip(targets, results)}

        # print('%'*100)
        # print(res)
        if action_evaluator is not None:
            action_evaluator.update(res, assign_cls_labels=cfg.binary)
        # if cnt >= 9:
        #     break

    logging.getLogger().setLevel(logging.INFO)

    # raise Exception
    # accumulate predictions from all videosdd
    if action_evaluator is not None:
        action_evaluator.synchronize_between_processes()
        action_evaluator.accumulate(cfg.test_slice_overlap)
        # dump detections
        if test_mode:
            save_path = osp.join('outputs', 'detection_{}.json')
            action_evaluator.dump_detection(save_path)
        action_evaluator.summarize()

    stats = {}

    if action_evaluator is not None:
        print('.>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>.')
        for k, v in action_evaluator.stats.items():
            # print(k,v)
            for vk, vv in v.items():
                stats[vk + '_' + k] = vv
        # print(stats)
        mAP_values = ' '.join([f'{k}: {100 * v:.2f}'.format(k, v)
                               for k, v in stats.items() if k.startswith('mAP')])
        # print(mAP_values)
        print('here????????????????????')
        logging.info(mAP_values)

        stats['stats_summary'] = action_evaluator.stats_summary

    # with open('raw_outputs.pkl', 'wb') as f:
    #     pickle.dump(raw_res, f)

    return stats
