import argparse
import os
import random
import time
from datetime import datetime
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torchvision
import torchvision.transforms as T
from hydra import compose, initialize
from mmcv.parallel import MMDistributedDataParallel
from mmcv.runner import get_dist_info
from mmseg.apis import multi_gpu_test
from tqdm import tqdm

from helpers.logger import get_logger
from models import build_model
from scheduler import MultiStepLR
from segmentation.evaluation import build_seg_dataloader, build_seg_dataset, build_seg_inference

from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data.distributed import DistributedSampler
from torch.multiprocessing import spawn
import wandb

def get_model_dict(model):
    new_check = {}
    new_check['obj_proj.bias'] = model.state_dict()['obj_proj.bias'].cpu()
    new_check['obj_proj.weight'] = model.state_dict()['obj_proj.weight'].cpu()
    new_check['bkg_decoder.bias'] = model.state_dict()['bkg_decoder.bias'].cpu()
    new_check['bkg_decoder.weight'] = model.state_dict()['bkg_decoder.weight'].cpu()
    return new_check

def get_criterion(cfg):
    if cfg.get('loss') == 'CE':
        return torch.nn.BCEWithLogitsLoss(reduction='mean')
    else:
        raise NotImplementedError

@torch.no_grad()
def validate(model, cfg, rank):
    model.eval()
    logger = get_logger(cfg) if rank == 0 else None

    ret = {}
    tasks = cfg.evaluate.task

    for key in tasks:
        loader = build_seg_dataloader(build_seg_dataset(cfg.evaluate.get(key)))
        model.apply_found = False
        # class_names = loader.dataset.CLASSES
        # model.clip_backbone.decode_head.update_vocab(class_names)
        if key in ["voc", "coco_object"]:
            model.apply_found = True
        metric = run_val(model, loader, cfg.evaluate.get(key), logger, cfg)
        dist.broadcast_object_list(metric)
        torch.cuda.empty_cache()
        dist.barrier()
        if rank == 0:
            ret[f"val/{key}_miou"] = metric[0]["mIoU"] * 100
            wandb.log({f"val/{key}_miou": metric[0]["mIoU"] * 100})
            # logger.info(f"{key} mIoU: {metric[0]['mIoU'] * 100:.2f}")
    if rank == 0:
        logger.info(ret)

def run_val(model, loader, eval_key, logger, cfg):
    model.clip_backbone.decode_head.update_vocab(loader.dataset.CLASSES)

    seg_model = build_seg_inference(
        model,
        loader.dataset,
        cfg,
        eval_key)
    seg_model.cuda()
    model.device = 'cuda'

    results = multi_gpu_test(
        model=MMDistributedDataParallel(seg_model, device_ids=[torch.cuda.current_device()]),
        data_loader=loader,
        tmpdir=None,
        gpu_collect=True,
        efficient_test=False,
        pre_eval=True,
        format_only=False,
    )

    if dist.get_rank() == 0:
        metric = [loader.dataset.evaluate(results, metric="mIoU", logger=logger)]
    else:
        metric = [None]
    return metric

def parse_args():
    parser = argparse.ArgumentParser(
        description='CLIP-DINOiser training procedure')
    parser.add_argument('config', help='config file path')
    args = parser.parse_args()
    return args

def setup(rank, world_size):
    # 避免重复初始化
    if dist.is_initialized():
        print(f"Process group already initialized for rank {rank}")
        return
    # 设置主机地址和端口号，这两个环境变量用于配置进程组通信的初始化。
    # MASTER_ADDR指定了负责协调初始化过程的主机地址，在这里设置为'localhost'，
    # 表示在单机多GPU的设置中所有的进程都将连接到本地机器上。
    os.environ['MASTER_ADDR'] = '127.0.0.1'
    # MASTER_PORT指定了主机监听的端口号，用于进程间的通信。这里设置为'12355'。
    # 注意要选择一个未被使用的端口号来进行监听
    os.environ['MASTER_PORT'] = '29501'


    # os.environ["NCCL_DEBUG"] = "INFO"
    # os.environ["NCCL_BLOCKING_WAIT"] = "1"  # 启用 NCCL 阻塞等待模式
    # os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"  # 异步错误处理
    # 初始化分布式进程组。
    # 使用NCCL作为通信后端，这是NVIDIA GPUs优化的通信库，适用于高性能GPU之间的通信。
    # rank是当前进程在进程组中的编号，world_size是总进程数（GPU数量），即进程组的大小。
    print(f"Initializing process group at rank {rank}...")
    dist.init_process_group("nccl", rank=rank, world_size=world_size)
    print(f"Process group initialized for rank {rank}.")

    # 为每个进程设置GPU
    torch.cuda.set_device(rank)

def do_train_ddp(rank, cfg, world_size):

    setup(rank, world_size)
    wdb_init(rank,cfg)


    out_path = cfg.get('output', os.getcwd())
    os.makedirs(out_path, exist_ok=True)
    dset_path = cfg.train.get('data')
    train_folder = os.path.join(dset_path, 'train')
    assert os.path.exists(train_folder), 'Empty training dataset path'

    logger = get_logger(cfg) if rank == 0 else None
    if rank == 0:
        logger.info(f"Running CLIP-DINOiser training")

    im_size = cfg.train.get('im_size', 448)
    num_workers = cfg.train.get('num_workers', 4)
    transforms = [T.ToTensor(), T.Resize(im_size), T.RandomCrop(im_size), T.RandomHorizontalFlip(p=0.5),
                  T.ColorJitter(0.5)]
    train_dataset = torchvision.datasets.ImageFolder(train_folder, transform=T.Compose(transforms))
    subset_indices = np.random.choice(
        list(range(len(train_dataset))), 
        cfg.train.ds_size, 
        replace=False
    )
    train_subset = torch.utils.data.Subset(train_dataset, subset_indices)

    # 为子集创建 DistributedSampler
    train_sampler = DistributedSampler(
        train_subset, 
        shuffle=True,
        num_replicas=world_size,
        rank=rank
    )

    train_loader = torch.utils.data.DataLoader(
        train_subset,
        batch_size=cfg.train.batch_size,
        num_workers=num_workers,
        sampler=train_sampler,
        drop_last=True
    )

    classes = ['']

    model = build_model(cfg.model, class_names=classes)
    model.load_teachers()

    model = model.cuda()
    dist.barrier()  # 确保所有进程都到达此处
    print(f"Before wrapping with DDP, model is on: {next(model.parameters()).device}")
    model = DDP(model, device_ids=[torch.cuda.current_device()])
    print(f"After wrapping with DDP, model is on: {next(model.parameters()).device}")

    timestamp = time.time()
    date_time = datetime.fromtimestamp(timestamp)
    str_date_time = date_time.strftime("%d%m%Y-%H%M%S")

    ch_path = os.path.join(out_path, str_date_time)
    if rank == 0:
        os.mkdir(ch_path)

    epochs = cfg.train.get("epochs", 100)
    criterion = get_criterion(cfg.train)
    optimizer = torch.optim.AdamW([{'params': model.module.obj_proj.parameters()},
                                   {'params': model.module.bkg_decoder.parameters(), 'lr': cfg.train.get('found_lr')}],
                                  lr=cfg.train.get('corr_lr'))
    scheduler = MultiStepLR(optimizer, cfg.train.get('milestones'), gamma=cfg.train.get("step_lr_gamma"), warmup=0)

    for epoch in range(epochs):
        train_sampler.set_epoch(epoch)  # Shuffle dataset for each epoch
        tbar = tqdm(enumerate(train_loader, 0), disable=(rank != 0))
        for i, data in tbar:
            model.module.bkg_decoder.train()
            model.module.obj_proj.train()
            # # data[0] 通常是输入数据，data[1] 通常是标签数据
            inputs = data[0].to(rank)
            optimizer.zero_grad()
            preds_bkg, pred_corrs, clip_feats = model.module.forward_pass(inputs)
            pred_corrs[pred_corrs < 0] = 0.

            with torch.no_grad():
                found_pred = model.module.get_found_preds(inputs, resize=preds_bkg.shape[-2:])
                found_pred = (found_pred > 0.5).float()
                dino_corrs = model.module.get_dino_corrs(inputs).detach()

            dino_loss = criterion(pred_corrs.float().flatten(-2, -1), (dino_corrs.flatten(-2, -1) > 0).float())
            found_loss = criterion(preds_bkg.float().flatten(-2, -1), found_pred.float().flatten(-2, -1))
            loss = dino_loss + found_loss
            loss.backward()
            optimizer.step()

            if rank == 0:
                tbar.set_description(f"epoch{epoch}: processing {i}th batch data | loss:{loss.item()}")
                wandb.log({
                    "epoch": epoch,
                    "dino_loss": dino_loss.item(),
                    "found_loss": found_loss.item(),
                    "loss": loss.item()
                    })

            scheduler.step()
        if (epoch+1) % cfg.train.get('val_interval', 1) == 0:
            validate(model.module, cfg, rank)
    # Save checkpoint only on rank 0
    if rank == 0:
        model.module.found_model = None
        model.module.vit_encoder = None
        torch.save({
            'epoch': epoch,
            'model_state_dict': get_model_dict(model.module),
        }, os.path.join(ch_path, 'last.pt'))

        if rank == 0:
            logger.info(f"Training finished. Running evaluation...")
            model.module.found_model = None
            model.module.vit_encoder = None
            validate(model.module, cfg)

            wandb.finish()
    dist.destroy_process_group()

def wdb_init(rank, cfg):
    if rank == 0:
        wandb.init(
        project="ICCV_dinoClip",
        name=cfg.train.get('wandb_name', 'test'),
        config={
            "batch_size": 32,
            "architecture": "VIT-B/16",
            "dataset": "imagenet_subset_1000",
        }
)


def main():
    args = parse_args()
    initialize(config_path="configs", version_base=None)
    cfg = compose(config_name=args.config)

    torch.manual_seed(cfg.seed)
    np.random.seed(cfg.seed)
    random.seed(cfg.seed)

    # rank = int(os.environ['RANK'])
    world_size = torch.cuda.device_count()

    # do_train_ddp(rank, cfg, world_size)
    spawn(do_train_ddp, args=(cfg, world_size), nprocs=world_size, join=True)

if __name__ == "__main__":

    main()
