# ---------------------------------------------------------------------------------------------------
# CLIP-DINOiser
# authors: Monika Wysoczanska, Warsaw University of Technology
# ---------------------------------------------------------------------------------------------------
# modified from TCL (https://github.com/kakaobrain/tcl/) Copyright (c) 2023 Kakao Brain. All Rights Reserved.
# ---------------------------------------------------------------------------------------------------

import os
import argparse

import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.multiprocessing as mp
from hydra import compose, initialize
from mmcv.parallel import MMDistributedDataParallel
from mmcv.runner import get_dist_info, init_dist, set_random_seed
from mmseg.apis import multi_gpu_test

from helpers.logger import get_logger
from models import build_model
from segmentation.evaluation import build_seg_dataloader, build_seg_dataset, build_seg_inference

import random
from torch.multiprocessing import spawn
from mmcv.runner import get_dist_info, init_dist, load_checkpoint
import time

@torch.no_grad()
def evaluate(cfg, val_loaders, rank):
    logger = get_logger()
    ret = {}
    model = None  # 初始化模型
    for key, loader in val_loaders.items():

        logger.info(f"### Validation dataset: {key}")
        CLASSES = loader.dataset.CLASSES
        logger.info(f"Creating model:{cfg.model.type}")
        if rank == 0:
            print(f"Rank {rank}: Building model...")
            time_now = time.time()
            model = build_model(cfg.model, class_names=CLASSES)
            logger.info(f"模型实例化成功，耗时: {time.time() - time_now}s")
            model.apply_found = False
            if key in ["voc", "coco_object"]:
                model.apply_found = True
            # 通过调试发现，这里的.pt文件只保存了一层卷积的参数 768->256 分别对应clip和dino token特征的维度
            check_path = 'checkpoints/last.pt'
            # checkpoint = load_checkpoint(model, check_path, map_location='cpu')
            
            check = torch.load(check_path)['model_state_dict']
            # 加载参数并不严格对齐，只加载对应上的部分
            model.load_state_dict(check, strict=False)
            logger.info(f"rank{rank}模型本地加载成功")

        dist.barrier()  # 确保 rank=0 加载完成
        if rank != 0:
            logger.info(f"rank{rank}开始加载模型")
            model = build_model(cfg.model, class_names=CLASSES)
        # 广播模型权重到所有进程
        model.to(rank)  # 确保模型在正确的 GPU 上

        
        for param in model.parameters():
            dist.broadcast(param.data.contiguous(), src=0)  # 从 rank=0 广播权重到其他 rank


        # model.device = "cuda"
        model.eval()
        miou, metrics = validate_seg(cfg, cfg.evaluate.get(key), loader, model)
        logger.info(f"[{key}] mIoU of {len(loader.dataset)} test images: {miou:.2f}%")
        ret[f"val/{key}_miou"] = miou

    ret["val/avg_miou"] = np.mean([v for k, v in ret.items() if "miou" in k])
    return ret


@torch.no_grad()
def validate_seg(config, seg_config, data_loader, model):
    logger = get_logger()
    dist.barrier()
    model.eval()
    seg_model = build_seg_inference(
        model,
        data_loader.dataset,
        config,
        seg_config,
    )
    logger.info("模型本地加载成功,下面开始模型并行")
    mmddp_model = MMDistributedDataParallel(
        seg_model, device_ids=[torch.cuda.current_device()], broadcast_buffers=False
    )
    mmddp_model.eval()
    logger.info("模型并行成功,下面开始模型测试")
    results = multi_gpu_test(
        model=mmddp_model,
        data_loader=data_loader,
        tmpdir=None,
        gpu_collect=True,
        efficient_test=False,
        pre_eval=True,
        format_only=False,
    )

    if dist.get_rank() == 0:
        metric = [data_loader.dataset.evaluate(results, metric="mIoU", logger=logger)]
    else:
        metric = [None]

    dist.broadcast_object_list(metric)
    miou_result = metric[0]["mIoU"] * 100
    torch.cuda.empty_cache()
    dist.barrier()
    return miou_result, metric

def setup(rank, world_size):
    # 避免重复初始化
    if dist.is_initialized():
        print(f"Process group already initialized for rank {rank}")
        return
    # 设置主机地址和端口号，这两个环境变量用于配置进程组通信的初始化。
    # MASTER_ADDR指定了负责协调初始化过程的主机地址，在这里设置为'localhost'，
    # 表示在单机多GPU的设置中所有的进程都将连接到本地机器上。
    os.environ['MASTER_ADDR'] = '127.0.0.1'
    # MASTER_PORT指定了主机监听的端口号，用于进程间的通信。这里设置为'12355'。
    # 注意要选择一个未被使用的端口号来进行监听
    os.environ['MASTER_PORT'] = '29502'
    # os.environ["NCCL_DEBUG"] = "INFO"
    # os.environ["NCCL_BLOCKING_WAIT"] = "1"  # 启用 NCCL 阻塞等待模式
    # os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"  # 异步错误处理
    # 初始化分布式进程组。
    # 使用NCCL作为通信后端，这是NVIDIA GPUs优化的通信库，适用于高性能GPU之间的通信。
    # rank是当前进程在进程组中的编号，world_size是总进程数（GPU数量），即进程组的大小。
    print(f"Initializing process group at rank {rank}...")
    dist.init_process_group("nccl", rank=rank, world_size=world_size)
    print(f"Process group initialized for rank {rank}.")

    # 为每个进程设置GPU
    torch.cuda.set_device(rank)

def main(rank, cfg, world_size):
    # mp.set_start_method("fork", force=True)
    # init_dist("pytorch")
    # rank, world_size = get_dist_info()
    # print(f"RANK and WORLD_SIZE in environ: {rank}/{world_size}")
    # 手动设置进程组
    setup(rank, world_size)


    dist.barrier()
    set_random_seed(cfg.seed, use_rank_shift=True)
    cudnn.benchmark = True

    logger = get_logger(cfg)

    val_loaders = {}
    for key in cfg.evaluate.task:
        loader = build_seg_dataloader(build_seg_dataset(cfg.evaluate.get(key)))
        val_loaders[key] = loader
    res = evaluate(cfg, val_loaders,rank)
    logger.info(res)
    dist.barrier()


def parse_args():
    parser = argparse.ArgumentParser(
        description='mmseg test (and eval) a model')
    parser.add_argument('config', help='config file path')
    args = parser.parse_args()
    return args


if __name__ == "__main__":
    # mp.set_start_method("spawn", force=True)
    args = parse_args()
    initialize(config_path="configs", version_base=None)
    cfg = compose(config_name=args.config)
    torch.manual_seed(cfg.seed)
    np.random.seed(cfg.seed)
    random.seed(cfg.seed)

    # rank = int(os.environ['RANK'])
    world_size = torch.cuda.device_count()

    # 第一个参数，进程控制(rank): 使用 Python 代码内启动多进程 (nprocs=world_size)。
    # 进程函数: 传入的 main 函数会在每个进程中运行，rank 自动作为第一个参数传入
    spawn(main, args=(cfg, world_size), nprocs=world_size, join=True)

    # main(cfg)
