import random

import numpy as np
import torch.cuda
from numpy.random import shuffle
from torch import nn

from common_models.MLP import MLP
from common_utils.utils import l2_normalize
import torch.nn.functional as F

device = 'cuda' if torch.cuda.is_available() else 'cpu'


class ALEModel(nn.Module):
    def __init__(self, args, feat_dim, attr_dim):
        super(ALEModel, self).__init__()
        # 输入维度为图片特征维度，输出维度为属性的数量
        self.attr_classifier = MLP(feat_dim, attr_dim, relu=False).to(device)
        self.label_emb = MLP(feat_dim, 512, relu=True, dropout=args.dropout, norm=args.norm).to(device)

    def forward(self, imgs, attrs=None,labels=None, sig=None):
        # 根据图片特征学习对象的属性
        attr_preds = self.attr_classifier(imgs)  # 属性评分
        # loss_fn = torch.nn.MSELoss()# 训练速度快，训练集过拟合
        loss_fn = nn.CosineEmbeddingLoss(margin=0.2)
        # loss = 1 - torch.cosine_similarity(attr_preds, attrs, dim=1)  # N*C （no. f classes)
        y = torch.ones(imgs.shape[0]).to(device)
        wx = torch.matmul(attr_preds, sig.T)
        wx_softmax = F.softmax(wx, dim=1)
        wx_max = torch.max(wx_softmax,dim=1)[1]
        wx_min = torch.min(wx_softmax,dim=1)[1]
        y_attrs_max = torch.index_select(sig, dim=0, index=wx_max)
        y_attrs_min = torch.index_select(sig, dim=0, index=wx_min)
        loss = loss_fn(attr_preds, attrs, target=y)
        return loss.mean(), attr_preds
