import torch
import torch.nn as nn
import numpy as np
import os
from PIL import Image
from tqdm import tqdm
from concurrent.futures import ThreadPoolExecutor
from torch.nn import Parameter
import math
import pytest
import torch.nn.functional as F
IMAGESUFFIX=('.png', '.jpg', '.jpeg','.bmp')


def weights_init_kaiming(m):
    classname = m.__class__.__name__
    if classname.find('Linear')!=-1:
        nn.init.kaiming_normal_(m.weight,a=0,mode='fan_out')
        nn.init.constant_(m.bias,0.0)
    elif classname.find('Conv') !=-1:
        nn.init.kaiming_normal_(m.weight,a=0,mode='fan_in')
        if m.bias is not None:
            nn.init.constant_(m.bias,0.0)
    elif classname.find('BatchNorm')!=-1:
        if m.affine:
            nn.init.constant_(m.weight,1.0)
            nn.init.constant_(m.bias,0.0)


def weights_init_classifier(m):
    classname = m.__class__.__name__
    if classname.find('Linear') != -1:
        nn.init.normal_(m.weight, std=0.001)
        if m.bias:
            nn.init.constant_(m.bias, 0.0)


def process_image(image_path):
    try:
        image = Image.open(image_path).convert('RGB')
        return True
    except Exception:
        print(image_path)
        os.system(f'rm {image_path}')
        return False

def wash_data(base_dir):
    image_files = []
    for root, dirs, files in os.walk(base_dir):
        for file in files:
            if file.endswith(IMAGESUFFIX):
                image_files.append(os.path.join(root, file))

    # 使用线程池并行处理图片
    with ThreadPoolExecutor() as executor:
        results = list(tqdm(executor.map(process_image, image_files), total=len(image_files), desc="处理图片进度", unit="张"))
    return results

class SupConLoss(nn.Module):
    def __init__(self, device):
        super(SupConLoss, self).__init__()
        self.device = device
        self.temperature = 1.0
    def forward(self, text_features, image_features, t_label, i_targets): 
        batch_size = text_features.shape[0] 
        batch_size_N = image_features.shape[0] 
        mask = torch.eq(t_label.unsqueeze(1).expand(batch_size, batch_size_N), \
            i_targets.unsqueeze(0).expand(batch_size,batch_size_N)).float().to(self.device) 

        logits = torch.div(torch.matmul(text_features, image_features.T),self.temperature)
        # for numerical stability
        logits_max, _ = torch.max(logits, dim=1, keepdim=True)
        logits = logits - logits_max.detach() 
        exp_logits = torch.exp(logits) 
        log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True)) 
        mean_log_prob_pos = (mask * log_prob).sum(1) / mask.sum(1) 
        loss = - mean_log_prob_pos.mean()

        return loss


class CenterLoss(nn.Module):
    """Center loss.

    Reference:
    Wen et al. A Discriminative Feature Learning Approach for Deep Face Recognition. ECCV 2016.

    Args:
        num_classes (int): number of classes.
        feat_dim (int): feature dimension.
    """

    def __init__(self, num_classes=751, feat_dim=2048, use_gpu=True,use_amp=False):
        super(CenterLoss, self).__init__()
        self.num_classes = num_classes
        self.feat_dim = feat_dim
        self.use_gpu = use_gpu

        if self.use_gpu:
            self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim).cuda())
        else:
            self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim))
        if use_amp:
            self.centers.half()
    def forward(self, x, labels):
        
        """
        Args:
            x: feature matrix with shape (batch_size, feat_dim).
            labels: ground truth labels with shape (num_classes).
        """
        assert x.size(0) == labels.size(0), "features.size(0) is not equal to labels.size(0)"
        batch_size = x.size(0)
        distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(batch_size, self.num_classes) + \
                  torch.pow(self.centers, 2).sum(dim=1, keepdim=True).expand(self.num_classes, batch_size).t()
        distmat.addmm_(1, -2, x, self.centers.t())

        classes = torch.arange(self.num_classes).long()
        if self.use_gpu: classes = classes.cuda()
        labels = labels.unsqueeze(1).expand(batch_size, self.num_classes)
        mask = labels.eq(classes.expand(batch_size, self.num_classes))

        dist = []
        for i in range(batch_size):
            value = distmat[i][mask[i]]
            value = value.clamp(min=1e-12, max=1e+12)  # for numerical stability
            dist.append(value)
        dist = torch.cat(dist)
        loss = dist.mean()
        return loss




class ArcFace(nn.Module):
    def __init__(self, in_features, out_features, s=30.0, m=0.50, bias=False):
        super(ArcFace, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.s = s
        self.m = m
        self.cos_m = math.cos(m)
        self.sin_m = math.sin(m)

        self.th = math.cos(math.pi - m)
        self.mm = math.sin(math.pi - m) * m

        self.weight = Parameter(torch.Tensor(out_features, in_features))
        if bias:
            self.bias = Parameter(torch.Tensor(out_features))
        else:
            self.register_parameter('bias', None)
        self.reset_parameters()

    def reset_parameters(self):
        nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
        if self.bias is not None:
            fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)
            bound = 1 / math.sqrt(fan_in)
            nn.init.uniform_(self.bias, -bound, bound)

    def forward(self, input, label):
        cosine = F.linear(F.normalize(input), F.normalize(self.weight))
        sine = torch.sqrt((1.0 - torch.pow(cosine, 2)).clamp(0, 1))
        phi = cosine * self.cos_m - sine * self.sin_m
        phi = torch.where(cosine > self.th, phi, cosine - self.mm)
        # --------------------------- convert label to one-hot ---------------------------
        # one_hot = torch.zeros(cosine.size(), requires_grad=True, device='cuda')
        one_hot = torch.zeros(cosine.size(), device='cuda')
        one_hot.scatter_(1, label.view(-1, 1).long(), 1)
        # -------------torch.where(out_i = {x_i if condition_i else y_i) -------------
        output = (one_hot * phi) + (
                    (1.0 - one_hot) * cosine)  # you can use torch.where if your torch.__version__ is 0.4
        output *= self.s
        # print(output)

        return output

class CircleLoss(nn.Module):
    def __init__(self, in_features, num_classes, s=256, m=0.25):
        super(CircleLoss, self).__init__()
        self.weight = Parameter(torch.Tensor(num_classes, in_features))
        self.s = s
        self.m = m
        self._num_classes = num_classes
        self.reset_parameters()


    def reset_parameters(self):
        nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))

    def __call__(self, bn_feat, targets):

        sim_mat = F.linear(F.normalize(bn_feat), F.normalize(self.weight))
        alpha_p = torch.clamp_min(-sim_mat.detach() + 1 + self.m, min=0.)
        alpha_n = torch.clamp_min(sim_mat.detach() + self.m, min=0.)
        delta_p = 1 - self.m
        delta_n = self.m

        s_p = self.s * alpha_p * (sim_mat - delta_p)
        s_n = self.s * alpha_n * (sim_mat - delta_n)

        targets = F.one_hot(targets, num_classes=self._num_classes)

        pred_class_logits = -(targets * s_p + (1.0 - targets) * s_n)

        return pred_class_logits.mean()




# 测试 CircleLoss 的前向传播
@pytest.mark.parametrize("batch_size", [1, 4])  # 测试不同的 batch size
@pytest.mark.parametrize("in_features", [10, 20])  # 测试不同的特征维度
@pytest.mark.parametrize("num_classes", [5, 8])  # 测试不同的类别数量
def test_circle_loss_forward(batch_size, in_features, num_classes):
    circle_loss = CircleLoss(in_features, num_classes)
    bn_feat = torch.randn(batch_size, in_features)
    targets = torch.randint(0, num_classes, (batch_size,))

    output = circle_loss(bn_feat, targets)

    assert isinstance(output, torch.Tensor)
    assert output.shape == (batch_size, num_classes)




# 测试 CircleLoss 的前向传播
@pytest.mark.parametrize("batch_size", [1, 4])  # 测试不同的 batch size
@pytest.mark.parametrize("in_features", [10, 20])  # 测试不同的特征维度
@pytest.mark.parametrize("num_classes", [5, 8])  # 测试不同的类别数量
def test_circle_loss_forward(batch_size, in_features, num_classes):
    circle_loss = CircleLoss(in_features, num_classes)
    bn_feat = torch.randn(batch_size, in_features)
    targets = torch.randint(0, num_classes, (batch_size,))

    output = circle_loss(bn_feat, targets)

    assert isinstance(output, torch.Tensor)
    assert output.shape == (batch_size, num_classes)
if __name__ == '__main__':


    circle_loss = CircleLoss(in_features=512,num_classes=751)
    features = torch.randn(36,512)
    targets = torch.Tensor([147, 359, 255, 649, 323, 680, 215, 135, 289, 480, 558, 338, 663, 245,
        117, 111, 177, 122,  12, 594, 150, 162, 696, 134, 113, 363, 297, 716,
        571, 588, 637, 290, 573, 240, 395, 241]).long()
    loss = circle_loss(features,targets).mean()

    print(loss)

    # use_gpu = True
    # center_loss = CenterLoss(num_classes=751,feat_dim=512,use_gpu=use_gpu)
    # features = torch.rand(36, 512)
    # targets = torch.Tensor([147, 359, 255, 649, 323, 680, 215, 135, 289, 480, 558, 338, 663, 245,
    #     117, 111, 177, 122,  12, 594, 150, 162, 696, 134, 113, 363, 297, 716,
    #     571, 588, 637, 290, 573, 240, 395, 241]).long()
    # if use_gpu:
    #     features = torch.rand(36, 512).cuda()
    #     targets = torch.Tensor([147, 359, 255, 649, 323, 680, 215, 135, 289, 480, 558, 338, 663, 245,
    #     117, 111, 177, 122,  12, 594, 150, 162, 696, 134, 113, 363, 297, 716,
    #     571, 588, 637, 290, 573, 240, 395, 241]).cuda()

    # loss = center_loss(features, targets)
    # print(loss)
    # wash_data('/mnt/c/Users/luotianhang/work_item/data_item/reid/msmt17/bounding_box_train/')