# -*- coding: utf-8 -*-
"""
author:LTH
data:
"""
import torch.nn as nn


def center_loss(feature, label, lambdas):
    '''

    :param feature:
    :param label:
    :param lambdas:
    :return:
    '''
    '''
    构造center
    '''
    center = nn.Parameter(torch.randn(int(max(label).item() + 1), feature.shape[1]), requires_grad=True).cuda()
    # print(center.shape)  # torch.Size([2, 2])
    # print(label.shape)  # torch.Size([5])

    center_exp = center.index_select(dim=0, index=label.long())
    # print(center_exp.shape)  # torch.Size([5, 2])

    count = torch.histc(label, bins=int(max(label).item() + 1), min=0, max=int(max(label).item()))
    # print(count)  # tensor([3., 2.], device='cuda:0')

    count_exp = count.index_select(dim=0, index=label.long())
    # print(count_exp)  # tensor([3., 3., 2., 3., 2.], device='cuda:0')

    loss = lambdas / 2 * torch.mean(torch.div(torch.sum(torch.pow(feature - center_exp, 2), dim=1), count_exp))
    return loss

import numpy as np
class CenterLoss(nn.Module):
    def __init__(self, num_classes, feature_shape):
        super(CenterLoss, self).__init__()
        self.num_classes = num_classes
        # self.center = nn.Parameter(torch.randn(num_classes, feature_shape), requires_grad=False)
        a = [[5 * np.sin(np.pi * i / 180), 5 * np.cos(np.pi * i / 180)] for i in range(0, 360, 36)]
        a = np.array(a)
        self.center=nn.Parameter(torch.from_numpy(a),requires_grad=False)
    def forward(self, feature, label):

        center_exp = self.center.index_select(dim=0, index=label.long())
        count = torch.histc(label, bins=self.num_classes, min=0, max=self.num_classes)
        count_exp = count.index_select(dim=0, index=label.long())

        # loss = torch.mean(
        #     torch.div(torch.sum(torch.pow(feature - center_exp, 2), dim=1), count_exp))
        loss = torch.sum(torch.pow(feature - center_exp, 2))
        return 0.5*loss

    def cuda(self, device_id=None):
        self.use_cuda = True
        return self._apply(lambda t: t.cuda(device_id))


import torch
from torch.autograd import Variable


class CenterLoss2(torch.nn.Module):
    def __init__(self, num_classes, feat_dim, loss_weight=1.0):
        super(CenterLoss2, self).__init__()
        self.num_classes = num_classes
        self.feat_dim = feat_dim
        self.loss_weight = loss_weight
        self.centers = torch.nn.Parameter(torch.randn(num_classes, feat_dim))
        self.use_cuda = False

    def forward(self, y, feat):
        if self.use_cuda:
            hist = Variable(
                torch.histc(y.cpu().data.float(), bins=self.num_classes, min=0, max=self.num_classes) + 1).cuda()
        else:
            hist = Variable(torch.histc(y.data.float(), bins=self.num_classes, min=0, max=self.num_classes) + 1)

        centers_count = hist.index_select(0, y.long())  # 计算每个类别对应的数目

        batch_size = feat.size()[0]
        feat = feat.view(batch_size, 1, 1, -1).squeeze()
        if feat.size()[1] != self.feat_dim:
            raise ValueError("Center's dim: {0} should be equal to input feature's dim: {1}".format(self.feat_dim,
                                                                                                    feat.size()[1]))
        centers_pred = self.centers.index_select(0, y.long())
        diff = feat - centers_pred
        loss = self.loss_weight * 1 / 2.0 * (diff.pow(2).sum(1) / centers_count).sum()
        return loss

    def cuda(self, device_id=None):
        self.use_cuda = True
        return self._apply(lambda t: t.cuda(device_id))


if __name__ == '__main__':
    data = torch.tensor([[3, 4], [5, 6], [7, 8], [9, 8], [6, 5]], dtype=torch.float32).cuda()
    label = torch.tensor([0, 0, 1, 0, 1], dtype=torch.float32).cuda()
    loss = center_loss(data, label, 2)
    print(loss)
