# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import sys
import time
from os import path as osp

import motmetrics as mm
import numpy as np
import sacred
import torch
import tqdm
import yaml
from torch.utils.data import DataLoader

from trackformer.datasets.tracking import TrackDatasetFactory
from trackformer.models import build_model
from trackformer.models.tracker import Tracker
from trackformer.util.misc import nested_dict_to_namespace
from trackformer.util.track_utils import (evaluate_mot_accums, get_mot_accum,
                                          interpolate_tracks, plot_sequence)

mm.lap.default_solver = 'lap'

ex = sacred.Experiment('track')
ex.add_config('cfgs/track.yaml')
ex.add_named_config('reid', 'cfgs/track_reid.yaml')


@ex.automain
def main(seed, dataset_name, obj_detect_checkpoint_file, tracker_cfg,
         write_images, output_dir, interpolate, verbose, load_results_dir,
         data_root_dir, generate_attention_maps, frame_range,
         _config, _log, _run, obj_detector_model=None):
    if write_images:
        assert output_dir is not None

    # obj_detector_model is only provided when run as evaluation during
    # training. in that case we omit verbose outputs.
    if obj_detector_model is None:
        sacred.commands.print_config(_run)  # 打印当前实验配置

    # set all seeds
    if seed is not None:
        torch.manual_seed(seed)  # 通过固定随机种子，可以确保模型在每次运行时都能得到相同的初始条件，从而使得实验结果可比。
        torch.cuda.manual_seed(seed)
        np.random.seed(seed)
        torch.backends.cudnn.deterministic = True

    if output_dir is not None:
        if not osp.exists(output_dir):
            os.makedirs(output_dir)
        # 这段代码的整体功能是将当前实验的配置保存到指定目录下的 track.yaml 文件中，以便于后续查看和复现实验。
        yaml.dump(
            _config,
            open(osp.join(output_dir, 'track.yaml'), 'w'),
            default_flow_style=False)

    ##########################
    # Initialize the modules #
    ##########################

    # object detection
    if obj_detector_model is None:  # 若没有提供预训练模型
        # 构建配置文件路径：config.yaml
        obj_detect_config_path = os.path.join(
            os.path.dirname(obj_detect_checkpoint_file),
            'config.yaml')
        # 使用 yaml.unsafe_load 加载yaml配置文件，并将其转换为命名空间对象（namespace），方便后续访问配置参数。
        obj_detect_args = nested_dict_to_namespace(yaml.unsafe_load(open(obj_detect_config_path)))
        # 从配置中提取参数 img_transform,  max_size: 1333   val_width: 800
        img_transform = obj_detect_args.img_transform
        # 【!】使用加载的配置参数来构建模型
        obj_detector, _, obj_detector_post = build_model(obj_detect_args)

        # 从指定路径加载模型文件checkpoint_epoch_40.pth，该文件通常包含模型的权重、优化器状态、训练轮次等信息。
        obj_detect_checkpoint = torch.load(
            obj_detect_checkpoint_file, map_location=lambda storage, loc: storage)

        # 从加载的obj_detect_checkpoint中提取名为 'model' 的部分。这通常是一个字典，包含了模型的各个参数（如权重和偏置）。
        obj_detect_state_dict = obj_detect_checkpoint['model']
        # obj_detect_state_dict = {
        #     k: obj_detect_state_dict[k] if k in obj_detect_state_dict
        #     else v
        #     for k, v in obj_detector.state_dict().items()}

        # 使用字典推导式来创建一个新的字典 obj_detect_state_dict，它从原始的 obj_detect_state_dict 中提取键值对。
        # 对每个键 k，将字符串 'detr.' 替换为空字符串。这通常是为了去除前缀，以便与当前模型的状态字典键匹配。
        obj_detect_state_dict = {
            k.replace('detr.', ''): v
            for k, v in obj_detect_state_dict.items()
            if 'track_encoding' not in k}

        # 【!】将上一步处理后的状态字典加载到目标检测器 obj_detector 中。
        obj_detector.load_state_dict(obj_detect_state_dict)
        if 'epoch' in obj_detect_checkpoint:
            _log.info(f"INIT object detector [EPOCH: {obj_detect_checkpoint['epoch']}]")

        obj_detector.cuda()
    else:
        obj_detector = obj_detector_model['model']
        obj_detector_post = obj_detector_model['post']
        img_transform = obj_detector_model['img_transform']

    if hasattr(obj_detector, 'tracking'):
        obj_detector.tracking()

    track_logger = None
    if verbose:  # false
        track_logger = _log.info

    # 【!】初始化跟踪器
    tracker = Tracker(  # from trackformer.models.tracker import Tracker
        obj_detector, obj_detector_post, tracker_cfg,
        generate_attention_maps, track_logger, verbose)

    time_total = 0  # 初始化总运行时间计数器
    num_frames = 0  # 初始化帧计数器，用于统计处理的帧数。
    mot_accums = [] # 初始化一个空列表，用于存储多目标跟踪（MOT）评估的累积结果。

    # 创建数据集实例，一次性可以读取多个不同序列，每个序列都属于独立的一部分【正好对应MOT中多个视频片段】
    dataset = TrackDatasetFactory(
        dataset_name, root_dir=data_root_dir, img_transform=img_transform)

    # 【!】创建数据集实例 data就是数据集的路径
    for seq in dataset:
        # 在处理每个新序列之前，重置跟踪器的状态，以确保从干净的状态开始。
        tracker.reset()
        '''
        def reset()
            self.tracks = []  # 清空当前正在跟踪的对象列表，表示不在保留其跟踪轨迹
            self.inactive_tracks = []  # 清空非活动轨迹列表，【表明目标开始丢失了，超过一定帧数就彻底删除】，即删除这样的跟踪目标
            self._prev_features = deque([None], maxlen=self.prev_frame_dist)：# 不再记住之前任何帧的特征信息。
        '''

        _log.info(f"------------------")
        _log.info(f"TRACK SEQ: {seq}")

        start_frame = int(frame_range['start'] * len(seq))
        end_frame = int(frame_range['end'] * len(seq))

        seq_loader = DataLoader(
            torch.utils.data.Subset(seq, range(start_frame, end_frame)))

        num_frames += len(seq_loader)

        results = seq.load_results(load_results_dir)  # load_results_dir = null

        # 如果没有已存在的结果，则开始处理当前序列
        if not results:
            start = time.time()
            # 遍历 seq_loader 中的每一帧
            for frame_id, frame_data in enumerate(tqdm.tqdm(seq_loader, file=sys.stdout)):
                with torch.no_grad():
                    # 开始真正的推理，推理当前所有的图像序列
                    tracker.step(frame_data)

            results = tracker.get_results()

            time_total += time.time() - start

            _log.info(f"NUM TRACKS: {len(results)} ReIDs: {tracker.num_reids}")
            _log.info(f"RUNTIME: {time.time() - start :.2f} s")

            if interpolate:
                results = interpolate_tracks(results)

            if output_dir is not None:
                _log.info(f"WRITE RESULTS")
                seq.write_results(results, output_dir)
        else:
            _log.info("LOAD RESULTS")

        if seq.no_gt:
            _log.info("NO GT AVAILBLE")
        else:
            mot_accum = get_mot_accum(results, seq_loader)
            mot_accums.append(mot_accum)

            if verbose:
                mot_events = mot_accum.mot_events
                reid_events = mot_events[mot_events['Type'] == 'SWITCH']
                match_events = mot_events[mot_events['Type'] == 'MATCH']

                switch_gaps = []
                for index, event in reid_events.iterrows():
                    frame_id, _ = index
                    match_events_oid = match_events[match_events['OId'] == event['OId']]
                    match_events_oid_earlier = match_events_oid[
                        match_events_oid.index.get_level_values('FrameId') < frame_id]

                    if not match_events_oid_earlier.empty:
                        match_events_oid_earlier_frame_ids = \
                            match_events_oid_earlier.index.get_level_values('FrameId')
                        last_occurrence = match_events_oid_earlier_frame_ids.max()
                        switch_gap = frame_id - last_occurrence
                        switch_gaps.append(switch_gap)

                switch_gaps_hist = None
                if switch_gaps:
                    switch_gaps_hist, _ = np.histogram(
                        switch_gaps, bins=list(range(0, max(switch_gaps) + 10, 10)))
                    switch_gaps_hist = switch_gaps_hist.tolist()

                _log.info(f'SWITCH_GAPS_HIST (bin_width=10): {switch_gaps_hist}')

        if output_dir is not None and write_images:
            _log.info("PLOT SEQ")
            plot_sequence(
                results, seq_loader, osp.join(output_dir, dataset_name, str(seq)),
                write_images, generate_attention_maps)
    # 检查总时间并记录运行信息,记录所有序列的运行时间信息，包括处理帧数和每秒处理的帧率（Hz）。
    if time_total:
        _log.info(f"RUNTIME ALL SEQS (w/o EVAL or IMG WRITE): "
                  f"{time_total:.2f} s for {num_frames} frames "
                  f"({num_frames / time_total:.2f} Hz)")
    # 评估跟踪性能
    if obj_detector_model is None:
        _log.info(f"EVAL:")

        summary, str_summary = evaluate_mot_accums(
            mot_accums,
            [str(s) for s in dataset if not s.no_gt])

        _log.info(f'\n{str_summary}')

        return summary

    return mot_accums
