﻿# ---------------------------------------------------------------------------------------------------
# CLIP-DINOiser
# authors: Monika Wysoczanska, Warsaw University of Technology
# ---------------------------------------------------------------------------------------------------
from datasets import transforms

import argparse
import os
import random
import time
from datetime import datetime
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.multiprocessing as mp
import torchvision
import torchvision.transforms as T
from hydra import compose, initialize, initialize_config_dir
from torch.utils.data import DataLoader, Subset, RandomSampler, DistributedSampler
from mmcv.parallel import MMDistributedDataParallel
from mmcv.runner import get_dist_info, init_dist
from mmseg.apis import multi_gpu_test
from tqdm import tqdm
import torch.nn.functional as F
from pathlib import Path
from helpers.logger import get_logger
from models import build_model
from scheduler import MultiStepLR, CosineAnnealingLR
from segmentation.evaluation import build_seg_dataloader, build_seg_dataset, build_seg_inference
import sys

def setup():
    # 分布式训练初始化环境
    # dist.init_process_group(backend="nccl")
    # # print(f"Process group initialized for rank {rank}.")
    # # 为每个进程设置GPU
    # torch.cuda.set_device(int(os.environ['LOCAL_RANK']))
    
    # 集群验证过可行
    if not dist.is_initialized():
        dist.init_process_group(backend="nccl")
    torch.cuda.set_device(int(os.environ['LOCAL_RANK']))
    rank = dist.get_rank()
    world_size = dist.get_world_size()
    print(f"Rank: {rank}, World Size: {world_size}")

def get_criterion(loss):
    if loss == 'CE':
        return torch.nn.BCEWithLogitsLoss(reduction='mean')
    elif loss == 'cosine':
        return torch.nn.CosineEmbeddingLoss(reduction='mean')
    elif loss == 'L1':
        return torch.nn.L1Loss(reduction='mean')
    elif loss == 'MSE':   
        return torch.nn.MSELoss(reduction='mean')
    elif loss == 'KL':
        return torch.nn.KLDivLoss(reduction='batchmean')
    else:
        raise NotImplementedError
    
# def caculate_dino_sim_loss(corr_dino, clip_trn_attn_list, criterion):
#     loss_lvl = []
#     assert isinstance(clip_trn_attn_list, list), "clip_trn_attn_list should be a list"
#     assert len(corr_dino.shape) == 4, "corr_dino should be a 4-D tensor"
#     B, token_length, H_t, W_t = corr_dino.shape

#     # 归一化操作: 对corr_dino进行softmax归一化
#     # corr_dino_normalized = F.softmax(corr_dino.flatten(start_dim=2), dim=2).view(B, token_length, H_t, W_t)
#     # 使用 Min-Max 归一化处理 corr_dino
#     corr_dino_normalized = normalize_attention(corr_dino)
#     # 将clip_trn_attn_list中每个level的特征图转化成corr_dino的形状
#     for clip_attn in clip_trn_attn_list:
#         # 取元组第二个元素，是qk计算出的注意力
#         clip_attn = clip_attn[1][:,1:,1:].contiguous().reshape(B, token_length, H_t, W_t) 
#         assert clip_attn.shape == corr_dino.shape, "clip_attn and corr_dino should have the same shape"
#         # 归一化clip_attn
#         # clip_attn_normalized = F.softmax(clip_attn.flatten(start_dim=2), dim=2).view(B, token_length, H_t, W_t)
#         # 使用 Min-Max 归一化处理 clip_attn
#         clip_attn_normalized = normalize_attention(clip_attn, apply_log=True)
#         _loss = criterion(clip_attn_normalized, corr_dino_normalized)
#         loss_lvl.append(_loss)

#     # print(f'\n loss_lvl: {loss_lvl}')
#     loss_mean = torch.mean(torch.stack(loss_lvl))
#     # 返回多个lvl的均值
#     return loss_mean
import math

def _align_teacher_corr(corr_teacher, Hs, Ws):
    """
    corr_teacher: (B, Tt, Ht, Wt), 其中 Tt=Ht*Wt
    对齐到学生网格 (Hs, Ws)，返回 (B, Hs*Ws, Hs, Ws)
    """
    B, Tt, Ht, Wt = corr_teacher.shape
    assert Tt == Ht * Wt, f"Tt={Tt} 应等于 Ht*Wt={Ht*Wt}"

    # 1) 先对齐“键”空间 (Ht,Wt) → (Hs,Ws)
    corr_key = F.interpolate(
        corr_teacher.reshape(B * Tt, 1, Ht, Wt),
        size=(Hs, Ws), mode="bilinear", align_corners=False
    ).view(B, Tt, Hs, Ws)  # (B, Ht*Wt, Hs, Ws)

    # 2) 再对齐“查询”网格 (Ht,Wt) → (Hs,Ws)
    corr5 = corr_key.view(B, Ht, Wt, Hs, Ws)         # (B, Hq, Wq, Hk, Wk)
    corr5 = corr5.permute(0, 3, 4, 1, 2)             # (B, Hk, Wk, Hq, Wq)
    corr_q = F.interpolate(
        corr5.reshape(B * Hs * Ws, 1, Ht, Wt),
        size=(Hs, Ws), mode="bilinear", align_corners=False
    ).view(B, Hs, Ws, Hs, Ws)                        # (B, Hk, Wk, Hq', Wq')

    return corr_q.view(B, Hs * Ws, Hs, Ws)           # (B, T_s, H_s, W_s)


def _compute_teacher_attn_loss(corr_teacher, clip_trn_attn_list, criterion):
    loss_vals = []
    assert isinstance(clip_trn_attn_list, list), "clip_trn_attn_list should be a list"
    B, Tt, Ht, Wt = corr_teacher.shape
    for clip_attn in clip_trn_attn_list:
        attn = clip_attn[1][:, 1:, 1:]
        B2, Ns, Ns2 = attn.shape
        assert B2 == B and Ns == Ns2, f"attention 形状异常: {attn.shape}"
        Hs = Ws = int(math.isqrt(Ns))
        assert Hs * Ws == Ns, f"Ns={Ns} 不是完全平方，检查学生 patch 网格"
        attn_maps = attn.reshape(B, Ns, Hs, Ws)
        corr_align = _align_teacher_corr(corr_teacher, Hs, Ws)
        teacher_norm = normalize_attention(corr_align)
        student_norm = normalize_attention(attn_maps, apply_log=True)
        loss_vals.append(criterion(student_norm, teacher_norm))
    return torch.mean(torch.stack(loss_vals))

def caculate_dino_sim_loss(teacher_corrs, clip_trn_attn_list, criterion):
    total_loss = 0
    for name, corr in teacher_corrs.items():
        loss_ = _compute_teacher_attn_loss(corr, clip_trn_attn_list, criterion)
        total_loss+=loss_

    return total_loss

def normalize_attention(attention, eps=1e-8, apply_log=False):
    # Min-Max 标准化，将注意力图缩放到 [min, max] 范围内
    min_val = attention.min(dim=-1, keepdim=True)[0].min(dim=-2, keepdim=True)[0]
    max_val = attention.max(dim=-1, keepdim=True)[0].max(dim=-2, keepdim=True)[0]
    
    attention = (attention - min_val) / (max_val - min_val + eps)
    
    # 展平为 (B, token_length, H*W) 以便 softmax 操作
    attention = attention.flatten(start_dim=2)
    
    if apply_log:
        # 生成 log 概率分布 (适用于 KLDivLoss 的第一个输入)
        normalized_attn = F.log_softmax(attention, dim=-1)
    else:
        # 生成概率分布 (适用于 KLDivLoss 的第二个输入)
        normalized_attn = F.softmax(attention, dim=-1)
    
    # 恢复到原始形状
    normalized_attn = normalized_attn.view_as(attention)
    
    return normalized_attn

def is_main():
    return (not dist.is_available()) or (not dist.is_initialized()) or dist.get_rank() == 0


def do_train(model, train_cfg, loaders, out_path, logger, val_loader_dict, wandbrun):
    # 获取当前时间并格式化为字符串
    str_date_time = datetime.now().strftime("%d%m%Y-%H%M%S")
    exp_name = train_cfg.get('name')
    # 生成输出目录路径
    ch_path = os.path.join(out_path, exp_name)

    # 只在 rank 0 进程中创建目录，避免冲突
    if dist.get_rank() == 0:
        try:
            os.makedirs(ch_path, exist_ok=True)
            print(f"Successfully created directory: {ch_path}")
        except Exception as e:
            print(f"Error creating directory {ch_path}: {e}")
 
    model.to("cuda")
    epochs = train_cfg.get("epochs", 100)
    criterion_attn = get_criterion(train_cfg.get('loss_attn'))
    criterion_distill = get_criterion(train_cfg.get('loss_dis'))
    optimizer = torch.optim.AdamW([
        # {'params': model.obj_proj.parameters()},
        # {'params': model.bkg_decoder.parameters(), 'lr': train_cfg.get('found_lr')},
        # 仅使用一种学习率 clip
        {'params': model.clip_backbone_train.parameters(), 'lr': train_cfg.get('clip_lr')}
                                   ],
                                  lr=train_cfg.get('corr_lr'), weight_decay=train_cfg.get('weight_decay', 1e-4))
    # scheduler = MultiStepLR(optimizer, train_cfg.get('milestones'), gamma=train_cfg.get("step_lr_gamma"), warmup=train_cfg.get("warmup_epochs", 0))
    scheduler = CosineAnnealingLR(optimizer, T_max=train_cfg.get("T_max", 100), eta_min=train_cfg.get("eta_min"))

    for epoch in range(epochs):
        global_step = 0
        tbar = tqdm(enumerate(loaders['train'], 0))
        
        if is_main():
            tbar = tqdm(enumerate(loaders['train'], 0),
                        total=len(loaders['train']),
                        dynamic_ncols=True,
                        mininterval=1.0,
                        file=sys.stdout,
                        desc=f"Epoch {epoch}/{epochs-1}",
                        leave=False)
        else:
            tbar = enumerate(loaders['train'], 0)  # 其他 rank 静默


        if train_cfg.get('pretrained_path'):
            pretrain_path = train_cfg.get('pretrained_path')
            checkpoint = torch.load(pretrain_path, map_location='cuda')
            # 删除 `visual.positional_embedding` 保存的时候，位置编码没有恢复回去
            if "visual.positional_embedding" in checkpoint:
                del checkpoint["visual.positional_embedding"]
            model.clip_backbone_train.backbone.load_state_dict(checkpoint, strict=False)
            print(f'load pretrain model from {pretrain_path} successfully')

        if (epoch+1) % cfg.train.get('val_interval', 1) == 0:
            if dist.get_rank() == 0 and epoch > 1:
                save_path = os.path.join(ch_path, f'trn_clip_model_epoch{epoch}.pth')
                # 保存整个模型
                # 创建
                torch.save(model.clip_backbone_train.backbone.state_dict(), save_path)
                logger.info(f"epoch {epoch} Model saved to {save_path}")

            model.remove_hooks()
            logger.info(f'removing all hooks')
            validate(model, cfg, val_loader_dict, wandbrun)
            model.register_hooks()
            logger.info(f'registering all hooks')
        # if torch.distributed.get_rank() == 0:
        #     tbar = tqdm(enumerate(loaders['train'], 0), desc=f"Epoch {epoch}/{epochs}")
        # else:
        #     tbar = enumerate(loaders['train'], 0)

        for i, data in tbar:
            # 模型初始化哪里虽已设定那些参数是可训练的，但是也要设置训练模式
            # model.bkg_decoder.train()
            # model.obj_proj.train()
            model.clip_backbone_train.train()
            inputs = data[0].to("cuda")
            batch_size = inputs.size(0)  # 获取当前 batch 大小
            cls_token_freeze, cls_token_trn, teacher_corrs, clip_trn_attn_list = model.forward_train(inputs)

            dino_sim_loss = caculate_dino_sim_loss(teacher_corrs, clip_trn_attn_list, criterion_attn)
            # distill_loss = criterion(F.normalize(cls_token_freeze, dim=-1), F.normalize(cls_token_trn, dim=-1))

            # cos_sim = F.cosine_similarity(F.normalize(cls_token_freeze, dim=-1), 
            #                   F.normalize(cls_token_trn, dim=-1), 
            #                   dim=-1)
            cos_sim = criterion_distill(cls_token_freeze, cls_token_trn)

            # 一种方法是直接对相似度取负值作为损失
            distill_loss = cos_sim
            distill_loss_weight = train_cfg.get('distill_loss_weight')
            # distill_loss_weight = train_cfg.get('distill_loss_weight')*2**(epoch//8 - 1)
            loss = distill_loss_weight * distill_loss + train_cfg.get('dino_sim_loss_weight') * dino_sim_loss
            optimizer.zero_grad()
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.clip_backbone_train.parameters(), max_norm=1.0)
            optimizer.step()

            # 更新全局 step 不受batchsize影响
            global_step += batch_size
            if dist.get_rank() == 0 and global_step % 3000 < batch_size:
                tbar.set_description(
                    f"epoch{epoch}: processing {i}th batch data | "
                    f"total_loss:{loss.item():.4f}, "
                    f"distill_loss:{distill_loss.item():.4f}, "
                    f"dino_sim_loss:{dino_sim_loss.item():.4f}"
                )
            if dist.get_rank() == 0 and wandbrun is not None:
                wandbrun.log({
                    "train/epoch": epoch,
                    "train/distill_loss": distill_loss.item(),
                    "train/dino_sim_loss": dino_sim_loss.item(),
                    "distill_loss_weight": distill_loss_weight,
                    "Learning Rate": float(f"{scheduler.get_last_lr()[0]:.6f}")
                })
        scheduler.step()
        

    # save checkpoint
    # model.found_model = None
    # model.vit_encoder = None
    # torch.save({
    #     'epoch': epoch,
    #     'model_state_dict': get_model_dict(model.clip_backbone_train),
    # }, os.path.join(ch_path, 'clip_backbone_train_last.pt'))
    save_path = os.path.join(ch_path, f'trn_clip_model_epoch{epoch}.pth')
    # 保存整个模型
    torch.save(model.clip_backbone_train.backbone.state_dict(), save_path)
    logger.info(f"epoch {epoch} Model saved to {save_path}")


@torch.no_grad()
def validate(model, cfg, loader_dict, wandbrun):
    model.eval()
    logger = get_logger()
    ret = {}
    tasks = cfg.evaluate.task

    for key in tasks:
        # 初始化阶段就载入好
        # loader = build_seg_dataloader(build_seg_dataset(cfg.evaluate.get(key)))
        loader = loader_dict[key]
        # model.apply_found = False
        metric = run_val(model, loader, key, logger, cfg)
        dist.broadcast_object_list(metric)
        ret[f"val/{key}_miou"] = metric[0]["mIoU"] * 100
        if dist.get_rank() == 0:
            wandbrun.log({f"val/{key}_miou": metric[0]["mIoU"] * 100})
        torch.cuda.empty_cache()
        dist.barrier()
    logger.info(ret)


def get_val_loader_dict(cfg):
    tasks = cfg.evaluate.task
    loader_dict = dict()
    for key in tasks:
        loader = build_seg_dataloader(build_seg_dataset(cfg.evaluate.get(key)))
        loader_dict[key] = loader
    return loader_dict

def get_target_dataset_text_embedding_path(cfg, eval_key):
    dataset_text_embedding_root = Path(cfg.get("class_names_path"))
    # 寻找以eval_key开头的文件
    for file_name in os.listdir(dataset_text_embedding_root):
        if file_name.startswith(eval_key):
            return os.path.join(dataset_text_embedding_root, file_name)
        
def run_val(model, loader, key, logger, cfg):
    eval_key = cfg.evaluate.get(key)
    txt_path, area_thd, prob_thd = get_txt_path_from_dataset_name(key) # 获取
    
    # model.clip_backbone_train.decode_head.update_vocab(loader.dataset.CLASSES)
    # txt_path = get_txt_path_from_dataset_name(key)
    model.clip_backbone_train.decode_head.update_vocab_from_file(txt_path, area_thd, prob_thd)

    # model.clip_backbone_train.decode_head.update_vocab_from_file(txt_path)
    dist.barrier()
    # 更新maskclip的分类头
    if dist.get_rank() == 0:
        logger.info(f"Updating text embeddings for {key}...")
    # target_dataset_text_embedding_path = get_target_dataset_text_embedding_path(cfg, key)
    # model.clip_backbone_train.decode_head.update_text_embeddings(target_dataset_text_embedding_path)
    seg_model = build_seg_inference(
        model,
        loader.dataset,
        cfg,
        eval_key)
    seg_model.cuda()
    model.device = 'cuda'
    model = MMDistributedDataParallel(seg_model, device_ids=[torch.cuda.current_device()])
    # model.eval()
    results = multi_gpu_test(
        model=model,
        data_loader=loader,
        tmpdir=None,
        gpu_collect=True,
        efficient_test=False,
        pre_eval=True,
        format_only=False,
    )

    if dist.get_rank() == 0:
        metric = [loader.dataset.evaluate(results, metric="mIoU", logger=logger)]
    else:
        metric = [None]
    return metric

# 应对背景trick
def get_txt_path_from_dataset_name(dataset_name):
    txt_path = None

    area_thd = None
    prob_thd = 0.0

    txt_root = Path('/space/liangc/code/ovss/clip_dinoiser/pretrained/dataset_name')
    # txt_root = Path('/space0/liangc/code/study/OVSS/clip_dinoiser/pretrained/dataset_name')
    if dataset_name == 'voc':
        txt_path = os.path.join(txt_root, 'cls_voc21.txt')
        area_thd = 0.1
        prob_thd = 0.01
    elif dataset_name == 'coco_object':
        txt_path = os.path.join(txt_root, 'cls_coco_object.txt')
        # prob_thd=0.4
    elif dataset_name == 'context60':
        txt_path = os.path.join(txt_root, 'cls_context60.txt')
        # prob_thd=0.01
    elif dataset_name == 'voc20':
        txt_path = os.path.join(txt_root, 'cls_voc20.txt')
    elif dataset_name == 'coco_stuff':
        txt_path = os.path.join(txt_root, 'cls_coco_stuff.txt')
    elif dataset_name == 'ade20k':
        txt_path = os.path.join(txt_root, 'cls_ade20k.txt')
    elif dataset_name == 'cityscapes':
        txt_path = os.path.join(txt_root, 'cls_city_scapes.txt')
    elif dataset_name == 'context59':
        txt_path = os.path.join(txt_root, 'cls_context59.txt')
    
    return txt_path, area_thd, prob_thd

def main(cfg, wandbrun):
    out_path = cfg.get('output')
    if out_path == "": out_path = os.getcwd()
    os.makedirs(out_path, exist_ok=True)
    dset_path = cfg.train.get('data')  # Imagenet root
    # train_folder = os.path.join(dset_path, 'train')
    train_folder = dset_path
    assert os.path.exists(train_folder), 'Empty dataset path'
    logger = get_logger(cfg)
    logger.info(f"Running dinoclip training")

    # set up the ImageFolder loader
    im_size = cfg.train.get('im_size', 448)
    num_workers = cfg.train.get('num_workers', 4)
    transforms = [T.ToTensor(), T.Resize(im_size), T.RandomCrop(im_size), T.RandomHorizontalFlip(p=0.5),
                  T.ColorJitter(0.5)]
    train_dataset = torchvision.datasets.ImageFolder(train_folder, transform=T.Compose(transforms))

    val_loader_dict  = get_val_loader_dict(cfg)
    logger.info(f"Building val dataloaders successfully...")
    if cfg.train.get("ds_size", None) is not None:
        # 1. 选择子集的随机索引
        indices = np.random.choice(list(range(len(train_dataset))), cfg.train.ds_size)
        # 2. 使用 Subset 构建数据子集
        subset = Subset(train_dataset, indices)
        if True:
            # 3. 分布式采样器 (适用于 DDP)
            sampler = DistributedSampler(subset, shuffle=True)
        else:
            # 非分布式情况下，使用随机采样器
            sampler = RandomSampler(subset)

        train_loader = DataLoader(
            subset,
            batch_size=cfg.train.batch_size,
            num_workers=num_workers,
            sampler=sampler,
            pin_memory=True,
            drop_last=True  # 在分布式情况下通常建议设置为 True
        )
    else:
        if True:
            sampler = DistributedSampler(train_dataset, shuffle=True)
        else:
            sampler = RandomSampler(train_dataset)

        train_loader = DataLoader(
            train_dataset,
            batch_size=cfg.train.batch_size,
            num_workers=num_workers,
            sampler=sampler,
            pin_memory=True,
            drop_last=True
        )

    classes = ['']  # dummy text query - we do not use text queries for the training
    time_now = time.time()
    model = build_model(cfg.model, class_names=classes)
    print(f"Model built in {time.time() - time_now} seconds")
    
    # 只保留一个教师模型
    primary_teacher = cfg.train.get('primary_teacher')
    model.load_teachers(primary_teacher)
    # init_dist("pytorch", )
    rank, world_size = get_dist_info()
    logger.info(f"RANK and WORLD_SIZE in environ: {rank}/{world_size}")

    cudnn.benchmark = True
    do_train(model, cfg.train, {"train": train_loader}, out_path=out_path, logger=logger, val_loader_dict = val_loader_dict, wandbrun = wandbrun)
    # logger.info(f"Training finished. Running evaluation...")
    # # run full evaluation
    # model.remove_hooks()
    # logger.info(f'removing all hooks')
    # validate(model.clip_backbone_train, val_loader_dict, cfg)
    # model.register_hooks()
    # logger.info(f'registering all hooks')


def parse_args():
    parser = argparse.ArgumentParser(
        description='CLIP-DINOiser custom training procedure')
    parser.add_argument('config', help='config file path')
    args = parser.parse_args()
    return args

def wandb_init(cfg):
    import wandb
    if dist.get_rank() == 0:  # 仅在主进程中初始化 WandB
        os.environ["WANDB_API_KEY"] = 'ec02ce569fb7eb2ba3b2e827ca71a52c129b35dc' # 将引号内的*替换成自己在wandb上的key
        run = wandb.init(
            entity="liangchen976",
            project="mm-dinoclip",
            name=f"{cfg.train.get('name')}",
            tags=["demo"],  # 添加标签
            config={
                "learning_rate": cfg.train.get('corr_lr'),
                "distill_loss_weight": cfg.train.get('distill_loss_weight'),
                "dino_sim_loss_weight": cfg.train.get('dino_sim_loss_weight'),
                "train_block_num": cfg.model.get('train_block_num'),
                "train_proj": cfg.model.get('train_proj'),
                "ds_size": cfg.train.get('ds_size'),
                "batch_size": cfg.train.get('batch_size'),
                "train_block_num": cfg.model.get('train_block_num'),
                "loss_attn": cfg.train.get('loss_attn'),
                "loss_dis": cfg.train.get('loss_dis'),
                "proj": cfg.model.get('proj'),
            },
        )
    else:
        run = None
    return run


if __name__ == "__main__":
    mp.set_start_method("fork", force=True)
    setup()
    args = parse_args()
    # initialize(config_path="configs", version_base=None)
    # 获取配置文件所在的目录和文件名
    config_dir = os.path.dirname(args.config)
    config_name = os.path.basename(args.config)
    initialize_config_dir(config_dir=config_dir, version_base=None)
    cfg = compose(config_name=config_name)
    torch.manual_seed(cfg.seed)
    np.random.seed(cfg.seed)
    random.seed(cfg.seed)
    wandbrun = wandb_init(cfg)
    main(cfg, wandbrun)
