import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np

# from .triplet_loss import TripletLoss, CrossEntropyLabelSmooth
# from .metric_learning import ContrastiveLoss
# from .metric_learning import ContrastiveLoss, SupConLoss

from layers.xent_loss import DistillKL   # , CAMKD, mse_loss_function  # , kd_loss_function, feature_loss_function
# from layers.triplet_loss import TripletLoss
from layers.focal_loss import FocalLoss

# from torch.utils.tensorboard import SummaryWriter

def make_hs_loss():    # modified by gu
    make_hs_loss.update_iter_interval = 500
    # make_loss.id_loss_history = []
    # make_loss.metric_loss_history = []
    make_hs_loss.kl_loss_history = []
    make_hs_loss.focal_loss_history = [] 
    make_hs_loss.ID_LOSS_WEIGHT = 0.5    # cfg.MODEL.ID_LOSS_WEIGHT
    make_hs_loss.KL_LOSS_WEIGHT = 1.0    # cfg.MODEL.TRIPLET_LOSS_WEIGHT
    
  
        # metric_loss_func = FocalLoss(cfg.SOLVER.MARGIN, cfg.SOLVER.HARD_EXAMPLE_MINING_METHOD)  # triplet loss
    focal_loss_func = FocalLoss(gamma=2)
    # id_loss_func = F.cross_entropy                    self_distill_kl = DistillKL()
    kl_loss_func = DistillKL()

    def loss_func(scores, target):
        # _id_loss = id_loss_func(score, target)
        # _metric_loss = metric_loss_func(feat, target)
        # _kl_loss = kl_loss_func(scores[iii+1], scores[0], is_ca=True)
        _kl_loss = 0
        _focal_loss = 0
        min_id_loss_weight = 0.1    # 最小权重阈值
        max_adjustment_factor = 0.1    # 最大权重调整幅度因子
        for iii in range(4):
            # ls_t = criterion_cls_lc(scores[iii+1], pids) - criterion_cls_lc(scores[0], pids)

            # tensor_replaced = torch.where(ls_t < 0, torch.tensor(-4.0).cuda(), ls_t)
            # tensor_clamped = ls_t.clamp(min=-2)
            # tensor_weight = torch.sigmoid(tensor_replaced)
            # print(tensor_weight)
            dk_ts = kl_loss_func(scores[iii+1], scores[0], is_ca=True)    # .sum()/ (1.0*bsz)
            # loss_ditill_self_adp += torch.mul(tensor_weight, dk_ts).sum()/ (1.0*dk_ts.shape[0])
            _kl_loss += dk_ts.sum()/ (1.0*dk_ts.shape[0])         
            _focal_loss += focal_loss_func(scores[iii+1], target)        
        # make_loss.id_loss_history.append(_kl_loss.item())
        # make_loss.metric_loss_history.append(_focal_loss.item())
        make_hs_loss.kl_loss_history.append(_kl_loss.item())
        make_hs_loss.focal_loss_history.append(_focal_loss.item())        
        if len(make_hs_loss.focal_loss_history)==0:
            pass
        elif (len(make_hs_loss.focal_loss_history) % make_hs_loss.update_iter_interval == 0):
            
            _id_history = np.array(make_hs_loss.focal_loss_history)
            id_mean = _id_history.mean()
            id_std = _id_history.std()
            
            _kl_history = np.array(make_hs_loss.kl_loss_history)
            kl_mean = _kl_history.mean()
            kl_std = _kl_history.std()
            
            id_weighted = id_std
            kl_weighted = kl_std
            # if id_weighted > kl_weighted:
                # new_weight = 1 - (id_weighted-kl_weighted)/id_weighted
            if kl_weighted > id_weighted:
                new_weight = 1 - (kl_weighted-id_weighted)/kl_weighted
                    
                # 限制每次调整幅度的最大值，逐渐减小调整幅度    ID_LOSS_WEIGHT
                max_adjustment = make_hs_loss.KL_LOSS_WEIGHT * max_adjustment_factor
                adjustment = new_weight * max_adjustment
                if adjustment > max_adjustment:
                    adjustment = max_adjustment
            
                make_hs_loss.KL_LOSS_WEIGHT = make_hs_loss.KL_LOSS_WEIGHT * 0.9 + adjustment
  
            # make_hs_loss.ID_LOSS_WEIGHT = make_hs_loss.ID_LOSS_WEIGHT*0.9+new_weight*0.1
        # 限制最小权重阈值
            if make_hs_loss.KL_LOSS_WEIGHT < min_id_loss_weight:
                make_hs_loss.KL_LOSS_WEIGHT = min_id_loss_weight
            # make_loss.id_loss_history = []
            # make_loss.metric_loss_history = []
            make_hs_loss.kl_loss_history = []
            make_hs_loss.focal_loss_history = [] 
            print(f"update weighted loss ID_LOSS_WEIGHT={round(make_hs_loss.ID_LOSS_WEIGHT,3)},TRIPLET_LOSS_WEIGHT={make_hs_loss.KL_LOSS_WEIGHT}")
        else:
            pass
        return make_hs_loss.ID_LOSS_WEIGHT * _focal_loss + make_hs_loss.KL_LOSS_WEIGHT * _kl_loss
    return loss_func