# --------------------------------------------------------
# Modified By $@#Anonymous#@$
# --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu
# --------------------------------------------------------

import os
from math import inf
from matplotlib import pyplot as plt
from matplotlib.lines import Line2D
import numpy as np
from sklearn.manifold import TSNE
import torch
import torch.distributed as dist
from timm.utils import ModelEma as ModelEma
import wandb
import os
import torch
import numpy as np
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
from sklearn.metrics import confusion_matrix

def load_checkpoint_ema(config, model, optimizer, lr_scheduler, loss_scaler, logger, model_ema: ModelEma=None):
    logger.info(f"==============> Resuming form {config.MODEL.RESUME}....................")
    if config.MODEL.RESUME.startswith('https'):
        checkpoint = torch.hub.load_state_dict_from_url(
            config.MODEL.RESUME, map_location='cpu', check_hash=True)
    else:
        checkpoint = torch.load(config.MODEL.RESUME, map_location='cpu')
    
    if 'model' in checkpoint:
        msg = model.load_state_dict(checkpoint['model'], strict=False)
        logger.info(f"resuming model: {msg}")
    else:
        logger.warning(f"No 'model' found in {config.MODEL.RESUME}! ")

    if model_ema is not None:
        if 'model_ema' in checkpoint:
            msg = model_ema.ema.load_state_dict(checkpoint['model_ema'], strict=False)
            logger.info(f"resuming model_ema: {msg}")
        else:
            logger.warning(f"No 'model_ema' found in {config.MODEL.RESUME}! ")

    max_accuracy = 0.0
    max_accuracy_ema = 0.0
    if not config.EVAL_MODE and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:
        optimizer.load_state_dict(checkpoint['optimizer'])
        lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
        config.defrost()
        config.TRAIN.START_EPOCH = checkpoint['epoch'] + 1
        config.freeze()
        if 'scaler' in checkpoint:
            loss_scaler.load_state_dict(checkpoint['scaler'])
        logger.info(f"=> loaded successfully '{config.MODEL.RESUME}' (epoch {checkpoint['epoch']})")
        if 'max_accuracy' in checkpoint:
            max_accuracy = checkpoint['max_accuracy']
        if 'max_accuracy_ema' in checkpoint:
            max_accuracy_ema = checkpoint['max_accuracy_ema']

    del checkpoint
    torch.cuda.empty_cache()
    return max_accuracy, max_accuracy_ema

def load_pretrained_ema(config, model, logger, model_ema: ModelEma=None, load_ema_separately=False):
    logger.info(f"==============> Loading weight {config.MODEL.PRETRAINED} for fine-tuning......")
    checkpoint = torch.load(config.MODEL.PRETRAINED, map_location='cpu')

    model_dict = model.state_dict()
    for k, v in model_dict.items():
        if k in checkpoint['model'] and model_dict[k].shape != checkpoint['model'][k].shape:
            del checkpoint['model'][k]
    if 'head.weight' in checkpoint['model']:
        del checkpoint['model']['head.weight']
        del checkpoint['model']['head.bias']
    
    if 'model' in checkpoint:
        msg = model.load_state_dict(checkpoint['model'], strict=False)
        logger.warning(msg)
        logger.info(f"=> loaded 'model' successfully from '{config.MODEL.PRETRAINED}'")
    else:
        logger.warning(f"No 'model' found in {config.MODEL.PRETRAINED}! ")

    if model_ema is not None:
        key = "model_ema" if load_ema_separately else "model"
        if key in checkpoint:
            msg = model_ema.ema.load_state_dict(checkpoint[key], strict=False)
            logger.warning(msg)
            logger.info(f"=> loaded '{key}' successfully from '{config.MODEL.PRETRAINED}' for model_ema")
        else:
            logger.warning(f"No '{key}' found in {config.MODEL.PRETRAINED}! ")

    del checkpoint
    torch.cuda.empty_cache()


def save_checkpoint_ema(config, epoch, model, max_accuracy, optimizer, lr_scheduler, loss_scaler, logger, model_ema: ModelEma=None, max_accuracy_ema=None):
    save_state = {'model': model.state_dict(),
                  'optimizer': optimizer.state_dict(),
                  'lr_scheduler': lr_scheduler.state_dict(),
                  'max_accuracy': max_accuracy,
                  'scaler': loss_scaler.state_dict(),
                  'epoch': epoch,
                  'config': config}
    
    if model_ema is not None:
        save_state.update({'model_ema': model_ema.ema.state_dict(),
            'max_accuray_ema': max_accuracy_ema})

    save_path = os.path.join(config.OUTPUT, f'ckpt_epoch_{epoch}.pth')
    logger.info(f"{save_path} saving......")
    torch.save(save_state, save_path)
    logger.info(f"{save_path} saved !!!")


def get_grad_norm(parameters, norm_type=2):
    if isinstance(parameters, torch.Tensor):
        parameters = [parameters]
    parameters = list(filter(lambda p: p.grad is not None, parameters))
    norm_type = float(norm_type)
    total_norm = 0
    for p in parameters:
        param_norm = p.grad.data.norm(norm_type)
        total_norm += param_norm.item() ** norm_type
    total_norm = total_norm ** (1. / norm_type)
    return total_norm


def auto_resume_helper(output_dir):
    checkpoints = os.listdir(output_dir)
    checkpoints = [ckpt for ckpt in checkpoints if ckpt.endswith('pth')]
    print(f"All checkpoints founded in {output_dir}: {checkpoints}")
    if len(checkpoints) > 0:
        latest_checkpoint = max([os.path.join(output_dir, d) for d in checkpoints], key=os.path.getmtime)
        print(f"The latest checkpoint founded: {latest_checkpoint}")
        resume_file = latest_checkpoint
    else:
        resume_file = None
    return resume_file


def reduce_tensor(tensor):
    rt = tensor.clone()
    dist.all_reduce(rt, op=dist.ReduceOp.SUM)
    rt /= dist.get_world_size()
    return rt


def ampscaler_get_grad_norm(parameters, norm_type: float = 2.0) -> torch.Tensor:
    if isinstance(parameters, torch.Tensor):
        parameters = [parameters]
    parameters = [p for p in parameters if p.grad is not None]
    norm_type = float(norm_type)
    if len(parameters) == 0:
        return torch.tensor(0.)
    device = parameters[0].grad.device
    if norm_type == inf:
        total_norm = max(p.grad.detach().abs().max().to(device) for p in parameters)
    else:
        total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(),
                                                        norm_type).to(device) for p in parameters]), norm_type)
    return total_norm


class NativeScalerWithGradNormCount:
    state_dict_key = "amp_scaler"

    def __init__(self):
        self._scaler = torch.amp.GradScaler('cuda')

    def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True):
        self._scaler.scale(loss).backward(create_graph=create_graph)
        if update_grad:
            if clip_grad is not None:
                assert parameters is not None
                self._scaler.unscale_(optimizer)  # unscale the gradients of optimizer's assigned params in-place
                norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad)
            else:
                self._scaler.unscale_(optimizer)
                norm = ampscaler_get_grad_norm(parameters)
            self._scaler.step(optimizer)
            self._scaler.update()
        else:
            norm = None
        return norm

    def state_dict(self):
        return self._scaler.state_dict()

    def load_state_dict(self, state_dict):
        self._scaler.load_state_dict(state_dict)

def DA_accuracy(model, loader):
    correct = 0
    total = 0

    model.eval()
    with torch.no_grad():
        for data in loader:
            x = data[0].cuda().float()
            y = data[1].cuda().long()
            p = model(x)

            if p.size(1) == 1:
                correct += (p.gt(0).ep(y).float()).sum().item()
            else:
                correct += (p.argmax(1).eq(y).float()).sum().item()
            total += len(x)
    model.train()
    return correct / total




def visda_acc(predict, all_label):
    matrix = confusion_matrix(all_label, predict)
    acc = matrix.diagonal()/matrix.sum(axis=1) * 100
    aacc = acc.mean()
    aa = [str(np.round(i, 2)) for i in acc]
    acc = ' '.join(aa)
    return aacc, acc



def tsne_visualization(source_feats, target_feats, save_dir='../autodl-tmp/tsne_plots/', mode=''):
    """
    本地保存TSNE可视化图片
    Args:
        source_feats (Tensor): source features [N, D]
        target_feats (Tensor): 目标域特征 [M, D]
        save_dir (str): 图片保存目录
        mode (str): 图片名称后缀（如epoch数）
    """
    print("save_dir" + save_dir)
    os.makedirs(save_dir, exist_ok=True)
    
    print(f'Source samples: {len(source_feats)}, Target samples: {len(target_feats)}')
    assert source_feats.dim() == 2 and target_feats.dim() == 2, "输入特征必须是2D张量"
    
    n_samples = source_feats.shape[0] + target_feats.shape[0]
    perplexity = max(5, min(30, n_samples // 3))
    
    combined_feats = torch.cat((source_feats, target_feats), dim=0)
    tsne_results = TSNE(
        perplexity=perplexity, 
        n_components=2, 
        init='pca', 
        n_iter=3000
    ).fit_transform(combined_feats.cpu().numpy())
    
    tsne_results = (tsne_results - tsne_results.min(0)) / (tsne_results.max(0) - tsne_results.min(0))
    
    source_labels = np.zeros(len(source_feats))
    target_labels = np.ones(len(target_feats))
    domain_labels = np.concatenate([source_labels, target_labels])
    
    plt.figure(figsize=(10, 8))
    plt.scatter(
        tsne_results[domain_labels == 0, 0], 
        tsne_results[domain_labels == 0, 1], 
        c='blue', s=10, alpha=0.6, label='Source'
    )
    plt.scatter(
        tsne_results[domain_labels == 1, 0], 
        tsne_results[domain_labels == 1, 1], 
        c='red', s=10, alpha=0.6, label='Target'
    )

    plt.title(f't-SNE Visualization ({mode})', fontsize=14)
    plt.xlabel('t-SNE Dimension 1', fontsize=12)
    plt.ylabel('t-SNE Dimension 2', fontsize=12)
    plt.legend(fontsize=12)
    plt.grid(alpha=0.3)
    
    # save the plot
    save_path = os.path.join(save_dir, f'tsne_{mode}.png')
    plt.savefig(save_path, bbox_inches='tight', dpi=300)
    plt.close()
    print(f't-SNE plot saved to: {save_path}')


def tsne_visualization_(source_feats, source_labels, target_feats, target_labels, 
                      save_dir='../autodl-tmp/tsne_plots/', mode='classes'):
    """
    支持三种可视化模式：
    - 'domains': 按领域着色（原始功能）
    - 'classes': 按类别着色（新增功能）
    - 'both': 同时显示类别和领域信息（复合可视化）
    """
    os.makedirs(save_dir, exist_ok=True)
    
    # 合并特征和标签
    combined_feats = torch.cat([source_feats, target_feats])
    domains = torch.cat([torch.zeros(len(source_feats)), torch.ones(len(target_feats))])
    all_labels = torch.cat([source_labels, target_labels])

    # 执行t-SNE降维
    tsne = TSNE(n_components=2, perplexity=30, n_iter=3000)
    tsne_results = tsne.fit_transform(combined_feats.cpu().numpy())
    
    # 创建可视化画布
    plt.figure(figsize=(15, 10))
    
    # 可视化逻辑
    unique_classes = torch.unique(all_labels).cpu().numpy()
    color_palette = plt.cm.get_cmap('tab20', len(unique_classes))
    
    # 按类别着色，用形状区分领域
    for idx, cls in enumerate(unique_classes):
        # 源域样本
        src_mask = (all_labels.cpu() == cls) & (domains.cpu() == 0)
        plt.scatter(tsne_results[src_mask, 0], tsne_results[src_mask, 1],
                   c=np.array(color_palette(idx))[np.newaxis],  # 保持颜色维度
                   marker='o',  # 源域用圆形
                   s=40, 
                   alpha=0.7,
                   edgecolors='w',
                   linewidths=0.5,
                   label=f'Class {cls} (Source)')
        
        # 目标域样本
        tgt_mask = (all_labels.cpu() == cls) & (domains.cpu() == 1)
        plt.scatter(tsne_results[tgt_mask, 0], tsne_results[tgt_mask, 1],
                   c=np.array(color_palette(idx))[np.newaxis],
                   marker='^',  # 目标域用三角形
                   s=40,
                   alpha=0.7,
                   edgecolors='w',
                   linewidths=0.5,
                   label=f'Class {cls} (Target)')

    # 创建自定义图例
    legend_elements = [
        Line2D([0], [0], marker='o', color='w', label='Source Domain',
              markerfacecolor='gray', markersize=12),
        Line2D([0], [0], marker='^', color='w', label='Target Domain',
              markerfacecolor='gray', markersize=12)
    ] + [
        Line2D([0], [0], marker='o', color='w', label=f'Class {cls}',
              markerfacecolor=color_palette(i), markersize=12)
        for i, cls in enumerate(unique_classes)
    ]

    plt.legend(handles=legend_elements, 
              bbox_to_anchor=(1.05, 1), 
              loc='upper left',
              borderaxespad=0.,
              frameon=False)

    plt.title(f't-SNE Visualization ({mode})')
    plt.xlabel('Dimension 1')
    plt.ylabel('Dimension 2')
    plt.grid(alpha=0.2)
    
    # 保存图像
    save_path = os.path.join(save_dir, f'tsne_{mode}.png')
    plt.savefig(save_path, bbox_inches='tight', dpi=300)
    plt.close()
    print(f'可视化结果已保存至：{save_path}')