import numpy as np
import random
import torch
import torch.nn.functional as F


def attention_crop(attention_maps,input_image):
    B,N,W,H = input_image.shape
    input_tensor = input_image
    batch_size, num_parts, height, width = attention_maps.shape
    attention_maps = torch.nn.functional.interpolate(attention_maps.detach(),size=(W,H),mode='bilinear')
    part_weights = F.avg_pool2d(attention_maps,(W,H)).reshape(batch_size,-1)
    part_weights = torch.add(torch.sqrt(part_weights),1e-12)
    part_weights = torch.div(part_weights,torch.sum(part_weights,dim=1).unsqueeze(1)).cpu()

    ret_imgs = []
    topks = []
    for i in range(batch_size):
        attention_map = attention_maps[i]
        part_weight = part_weights[i]
        topk, inds =part_weight.topk(4)

        attens = torch.index_select(attention_map, 0, inds)
        regions = []
        for att in attens:
            threshold = random.uniform(0.4, 0.6)
            itemindex = torch.where(att >= att.max() * threshold)
            height_min = itemindex[0].min()
            height_max = itemindex[0].max()
            width_min = itemindex[1].min()
            width_max = itemindex[1].max()
            out_img = input_tensor[i][:,height_min:height_max,width_min:width_max].unsqueeze(0)
            out_img = torch.nn.functional.interpolate(out_img,size=(W,H),mode='bilinear', align_corners=True)
            out_img = out_img.squeeze(0)
            regions.append(out_img)
        ret_imgs.append(torch.stack(regions))
        topks.append(topk)
    ret_imgs = torch.stack(ret_imgs)
    topks = torch.stack(topks)
    return ret_imgs, topks

def calculate_pooling_center_loss(features, centers, label, alfa=0.95):
    features = features.reshape(features.shape[0], -1)
    # print(features.shape)
    centers_batch = centers[label]
    # print(centers_batch)
    # print(centers_batch.shape,centers.shape)
    centers_batch = torch.nn.functional.normalize(centers_batch, dim=-1)
    diff = (1-alfa)*(features.detach() - centers_batch)
    distance = torch.pow(features - centers_batch,2)
    distance = torch.sum(distance, dim=-1)
    center_loss = torch.mean(distance)
    # loss2 = mse_loss(features,centers_batch)
    # print('================',center_loss.item(),loss2.item())
    return center_loss, diff

if __name__ == '__main__':
    x = torch.rand(3, 3, 4, 4)
    att = torch.rand(3, 5, 4, 4)
    images, topks= attention_crop(att, x)
    print(images.shape, topks.shape)
