﻿# ---------------------------------------------------------------------------------------------------
# CLIP-DINOiser
# authors: Monika Wysoczanska, Warsaw University of Technology
# ---------------------------------------------------------------------------------------------------
from datasets import transforms

import argparse
import os
import random
import time
import math
from datetime import datetime
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.multiprocessing as mp
import torchvision
import torchvision.transforms as T
from hydra import compose, initialize, initialize_config_dir
from mmcv.parallel import MMDistributedDataParallel
from mmcv.runner import get_dist_info, init_dist
from mmseg.apis import multi_gpu_test
from tqdm import tqdm
import torch.nn.functional as F

from helpers.logger import get_logger
from models import build_model
from scheduler import MultiStepLR
from segmentation.evaluation import build_seg_dataloader, build_seg_dataset, build_seg_inference


def get_criterion(cfg):
    if cfg.get('loss') == 'CE':
        return torch.nn.BCEWithLogitsLoss(reduction='mean')
    elif cfg.get('loss') == 'cosine':
        return torch.nn.CosineEmbeddingLoss(reduction='mean')
    elif cfg.get('loss') == 'L1':
        return torch.nn.L1Loss(reduction='mean')
    elif cfg.get('loss') == 'MSE':   
        return torch.nn.MSELoss(reduction='mean')
    elif cfg.get('loss') == 'KL':
        return torch.nn.KLDivLoss(reduction='batchmean')
    else:
        raise NotImplementedError
    


def _align_teacher_corr(corr_teacher, Hs, Ws):
    """Resize teacher correlations to match student token grid."""
    B, Tt, Ht, Wt = corr_teacher.shape
    assert Tt == Ht * Wt, f"Teacher tokens {Tt} mismatch spatial size {Ht * Wt}"
    corr_key = F.interpolate(
        corr_teacher.reshape(B * Tt, 1, Ht, Wt),
        size=(Hs, Ws),
        mode='bilinear',
        align_corners=False,
    ).view(B, Tt, Hs, Ws)
    corr5 = corr_key.view(B, Ht, Wt, Hs, Ws).permute(0, 3, 4, 1, 2)
    corr_q = F.interpolate(
        corr5.reshape(B * Hs * Ws, 1, Ht, Wt),
        size=(Hs, Ws),
        mode='bilinear',
        align_corners=False,
    ).view(B, Hs, Ws, Hs, Ws)
    return corr_q.view(B, Hs * Ws, Hs, Ws)

def _compute_teacher_attn_loss(corr_teacher, clip_trn_attn_list, criterion):
    losses = []
    assert isinstance(clip_trn_attn_list, list), "clip_trn_attn_list should be a list"
    B, Tt, Ht, Wt = corr_teacher.shape
    for clip_attn in clip_trn_attn_list:
        attn = clip_attn[1][:, 1:, 1:]
        B2, Ns, Ns2 = attn.shape
        assert B2 == B and Ns == Ns2, f"attention 形状异常: {attn.shape}"
        Hs = Ws = int(math.isqrt(Ns))
        assert Hs * Ws == Ns, f"Ns={Ns} 不是完全平方，检查学生 patch 网格"
        attn_maps = attn.reshape(B, Ns, Hs, Ws)
        corr_align = _align_teacher_corr(corr_teacher, Hs, Ws)
        teacher_norm = normalize_attention(corr_align)
        student_norm = normalize_attention(attn_maps, apply_log=True)
        losses.append(criterion(student_norm, teacher_norm))
    return torch.mean(torch.stack(losses))

def caculate_dino_sim_loss(teacher_corrs, clip_trn_attn_list, criterion, teacher_key=None):
    if isinstance(teacher_corrs, dict):
        corr = None
        if teacher_key is not None:
            corr = teacher_corrs.get(teacher_key)
        if corr is None:
            corr = next(iter(teacher_corrs.values()))
    elif torch.is_tensor(teacher_corrs):
        corr = teacher_corrs
    else:
        raise TypeError("teacher_corrs must be a Tensor or dict of Tensors")
    assert corr is not None, "teacher correlation tensor is required"
    return _compute_teacher_attn_loss(corr, clip_trn_attn_list, criterion)

def normalize_attention(attention, eps=1e-8, apply_log=False):
    # Min-Max 标准化，将注意力图缩放到 [min, max] 范围内
    min_val = attention.min(dim=-1, keepdim=True)[0].min(dim=-2, keepdim=True)[0]
    max_val = attention.max(dim=-1, keepdim=True)[0].max(dim=-2, keepdim=True)[0]
    
    attention = (attention - min_val) / (max_val - min_val + eps)
    
    # 展平为 (B, token_length, H*W) 以便 softmax 操作
    attention = attention.flatten(start_dim=2)
    
    if apply_log:
        # 生成 log 概率分布 (适用于 KLDivLoss 的第一个输入)
        normalized_attn = F.log_softmax(attention, dim=-1)
    else:
        # 生成概率分布 (适用于 KLDivLoss 的第二个输入)
        normalized_attn = F.softmax(attention, dim=-1)
    
    # 恢复到原始形状
    normalized_attn = normalized_attn.view_as(attention)
    
    return normalized_attn

def do_train(model, train_cfg, loaders, out_path, logger, val_loader_dict, wandbrun):
    timestamp = time.time()
    date_time = datetime.fromtimestamp(timestamp)
    str_date_time = date_time.strftime("%d%m%Y-%H%M%S")

    ch_path = os.path.join(out_path, str_date_time)
    os.mkdir(ch_path)
    model.to("cuda")
    epochs = train_cfg.get("epochs", 100)
    criterion = get_criterion(train_cfg)
    optimizer = torch.optim.AdamW([
        # {'params': model.obj_proj.parameters()},
        # {'params': model.bkg_decoder.parameters(), 'lr': train_cfg.get('found_lr')},
        # 仅使用一种学习率 clip
        {'params': model.clip_backbone_train.parameters(), 'lr': train_cfg.get('clip_lr')}
                                   ],
                                  lr=train_cfg.get('corr_lr'), weight_decay=1e-4)
    scheduler = MultiStepLR(optimizer, train_cfg.get('milestones'), gamma=train_cfg.get("step_lr_gamma"), warmup=0)

    primary_teacher = train_cfg.get('primary_teacher')

    for epoch in range(epochs):
        tbar = tqdm(enumerate(loaders['train'], 0))

        for i, data in tbar:
            # 模型初始化哪里虽已设定那些参数是可训练的，但是也要设置训练模式
            # model.bkg_decoder.train()
            # model.obj_proj.train()
            model.clip_backbone_train.train()
            inputs = data[0].to("cuda")
            cls_token_freeze, cls_token_trn, teacher_corrs, clip_trn_attn_list = model(inputs)

            dino_sim_loss = caculate_dino_sim_loss(teacher_corrs, clip_trn_attn_list, criterion, primary_teacher)
            # distill_loss = criterion(F.normalize(cls_token_freeze, dim=-1), F.normalize(cls_token_trn, dim=-1))

            cos_sim = F.cosine_similarity(F.normalize(cls_token_freeze, dim=-1),
                              F.normalize(cls_token_trn, dim=-1),
                              dim=-1)

            # 一种方法是直接对相似度取负值作为损失
            distill_loss = 1 - cos_sim.mean()
            loss = train_cfg.get('distill_loss_weight') * distill_loss + train_cfg.get('dino_sim_loss_weight') * dino_sim_loss
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            tbar.set_description(
                f"epoch{epoch}: processing {i}th batch data | "
                f"total_loss:{loss.item():.4f}, "
                f"distill_loss:{distill_loss.item():.4f}, "
                f"dino_sim_loss:{dino_sim_loss.item():.4f}"
            )
            scheduler.step()
            wandbrun.log({"train/iter": i, "train/distill_loss": distill_loss.item(), "train/dino_sim_loss": dino_sim_loss.item()})
    if (epoch) % 10 == 0 and epoch > 0:
        model.remove_hooks()
        logger.info(f'removing all hooks')
        validate(model.clip_backbone_train, cfg, val_loader_dict, wandbrun)
        model.register_hooks()
        logger.info(f'registering all hooks')

    # save checkpoint
    model.found_model = None
    model.vit_encoder = None
    # torch.save({
    #     'epoch': epoch,
    #     'model_state_dict': get_model_dict(model.clip_backbone_train),
    # }, os.path.join(ch_path, 'clip_backbone_train_last.pt'))
    save_path = os.path.join(ch_path, f'trn_clip_model_epoch{epoch}.pth')
    # 保存整个模型
    torch.save(model.clip_backbone_train.backbone.state_dict(), save_path)
    logger.info(f"Model saved to {save_path}")


@torch.no_grad()
def validate(model, cfg, loader_dict, wandbrun):
    model.eval()
    logger = get_logger()
    ret = {}
    tasks = cfg.evaluate.task

    for key in tasks:
        # 初始化阶段就载入好
        # loader = build_seg_dataloader(build_seg_dataset(cfg.evaluate.get(key)))
        loader = loader_dict[key]
        # model.apply_found = False
        if key in ["voc", "coco_object"]:
            model.apply_found = True
        metric = run_val(model, loader, cfg.evaluate.get(key), logger, cfg)
        dist.broadcast_object_list(metric)
        torch.cuda.empty_cache()
        dist.barrier()
        ret[f"val/{key}_miou"] = metric[0]["mIoU"] * 100
        wandbrun.log({f"val/{key}_miou": metric[0]["mIoU"] * 100})
    logger.info(ret)


def get_val_loader_dict(cfg):
    tasks = cfg.evaluate.task
    loader_dict = dict()
    for key in tasks:
        loader = build_seg_dataloader(build_seg_dataset(cfg.evaluate.get(key)))
        loader_dict[key] = loader
    return loader_dict


def run_val(model, loader, eval_key, logger, cfg):
    model.decode_head.update_vocab(loader.dataset.CLASSES)
    seg_model = build_seg_inference(
        model,
        loader.dataset,
        cfg,
        eval_key)
    seg_model.cuda()
    model.device = 'cuda'
    model = MMDistributedDataParallel(seg_model, device_ids=[torch.cuda.current_device()])
    # model.eval()
    results = multi_gpu_test(
        model=model,
        data_loader=loader,
        tmpdir=None,
        gpu_collect=True,
        efficient_test=False,
        pre_eval=True,
        format_only=False,
    )

    if dist.get_rank() == 0:
        metric = [loader.dataset.evaluate(results, metric="mIoU", logger=logger)]
    else:
        metric = [None]
    return metric


def main(cfg, wandbrun):
    out_path = cfg.get('output')
    if out_path == "": out_path = os.getcwd()
    os.makedirs(out_path, exist_ok=True)
    dset_path = cfg.train.get('data')  # Imagenet root
    train_folder = os.path.join(dset_path, 'train')
    assert os.path.exists(train_folder), 'Empty dataset path'
    logger = get_logger(cfg)
    logger.info(f"Running dinoclip training")

    # set up the ImageFolder loader
    im_size = cfg.train.get('im_size', 448)
    num_workers = cfg.train.get('num_workers', 4)
    transforms = [T.ToTensor(), T.Resize(im_size), T.RandomCrop(im_size), T.RandomHorizontalFlip(p=0.5),
                  T.ColorJitter(0.5)]
    train_dataset = torchvision.datasets.ImageFolder(train_folder, transform=T.Compose(transforms))

    val_loader_dict  = get_val_loader_dict(cfg)
    logger.info(f"Building val dataloaders successfully...")
    if cfg.train.get("ds_size", None) is not None:
        # if subset: SAMPLE
        indices = np.random.choice(list(range(len(train_dataset))), cfg.train.ds_size)
        train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=cfg.train.batch_size,
                                                   num_workers=num_workers,
                                                   sampler=torch.utils.data.SubsetRandomSampler(indices))
    else:
        train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=cfg.train.batch_size,
                                                   num_workers=num_workers,
                                                   shuffle=True)

    classes = ['']  # dummy text query - we do not use text queries for the training
    time_now = time.time()
    model = build_model(cfg.model, class_names=classes)
    # print(f"Model built in {time.time() - time_now} seconds")
    model.load_teachers()
    mp.set_start_method("fork", force=True)
    init_dist("pytorch", )
    rank, world_size = get_dist_info()
    logger.info(f"RANK and WORLD_SIZE in environ: {rank}/{world_size}")

    cudnn.benchmark = True
    do_train(model, cfg.train, {"train": train_loader}, out_path=out_path, logger=logger, val_loader_dict = val_loader_dict, wandbrun = wandbrun)
    # logger.info(f"Training finished. Running evaluation...")
    # # run full evaluation
    # model.remove_hooks()
    # logger.info(f'removing all hooks')
    # validate(model.clip_backbone_train, val_loader_dict, cfg)
    # model.register_hooks()
    # logger.info(f'registering all hooks')


def parse_args():
    parser = argparse.ArgumentParser(
        description='CLIP-DINOiser custom training procedure')
    parser.add_argument('config', help='config file path')
    args = parser.parse_args()
    return args

def wandb_init(cfg):
    import wandb
    run = wandb.init(
        entity="liangchen976",
        project="mm-dinoclip",
        name=f"{cfg.train.get('name')}",
        tags=["demo"],  # 添加标签
        # notes="This is a baseline experiment with learning rate 0.02.",  # 添加备注
        config={
            "learning_rate": cfg.train.get('corr_lr'),
            "distill_loss_weight": cfg.train.get('distill_loss_weight'),
            "dino_sim_loss_weight": cfg.train.get('dino_sim_loss_weight'),
            "train_block_num": cfg.model.get('train_block_num'),
            "train_proj": cfg.model.get('train_proj'),
            "ds_size": cfg.train.get('ds_size'),
            "batch_size": cfg.train.get('batch_size'),
            "train_block_num": cfg.model.get('train_block_num'),
            "loss": cfg.train.get('loss'),
        },
    )
    return run


if __name__ == "__main__":
    args = parse_args()
    # initialize(config_path="configs", version_base=None)
    # 获取配置文件所在的目录和文件名
    config_dir = os.path.dirname(args.config)
    config_name = os.path.basename(args.config)
    initialize_config_dir(config_dir=config_dir, version_base=None)
    cfg = compose(config_name=config_name)
    torch.manual_seed(cfg.seed)
    np.random.seed(cfg.seed)
    random.seed(cfg.seed)
    wandbrun = wandb_init(cfg)
    main(cfg, wandbrun)
