import os
import pprint

import cv2
import torch
import torch.nn.functional as F
import numpy as np
from scipy.spatial.distance import cosine


def ensure_path(path):
    if not os.path.exists(path):
        os.mkdir(path)


class Averager():

    def __init__(self):
        self.n = 0
        self.v = 0

    def add(self, x):
        self.v = (self.v * self.n + x) / (self.n + 1)
        self.n += 1

    def item(self):
        return self.v


def count_acc(result):
    way = result.shape[1]
    n_query = result.shape[0] / way
    label = torch.tensor([_ for _ in range(way)]).repeat((int(n_query)))
    pred = torch.argmax(result, dim=1)
    # print(pred.size())
    if torch.cuda.is_available():
        return (pred == label).type(torch.cuda.FloatTensor).mean().item()
    else:
        return (pred == label).type(torch.FloatTensor).mean().item()


_utils_pp = pprint.PrettyPrinter()


def pprint(x):
    _utils_pp.pprint(x)


def compute_confidence_interval(data):
    """
    Compute 95% confidence interval
    :param data: An array of mean accuracy (or mAP) across a number of sampled episodes.
    :return: the 95% confidence interval for this data.
    """
    a = 1.0 * np.array(data)
    m = np.mean(a)
    std = np.std(a)
    pm = 1.96 * (std / np.sqrt(len(a)))
    return m, pm


def calculate_imp(Attentions):
    with torch.no_grad():
        result = torch.eye(Attentions[0].size(-1) - 1).unsqueeze(0).to(Attentions[0].device)  # (1, L, L)
        for attn in Attentions:
            attn = attn.mean(dim=1)[:, 1:, 1:]
            I_matrix = torch.eye(attn.size(-1)).unsqueeze(0).to(attn.device)
            a = (attn + I_matrix) / 2.
            a = a / a.sum(dim=-1, keepdim=True)
            result = a @ result
    return result.mean(1).cuda()


def get_patches_imp(net):
    attentions = net.attentions
    # 对多头求均值
    # for L in range(len(attentions)):
    #     attentions[L] = torch.mean(attentions[L], dim=1)
    # 对support集和query集分别处理
    attentions_s = attentions[:12]
    attentions_q = attentions[12:]
    net.attentions = []
    return calculate_imp(attentions_s), calculate_imp(attentions_q)


def calculate_keyPatches_index(model, n_shot=1):
    # 获取注意力信息
    attentions = model.attentions

    # 对多头求均值
    for l in range(len(attentions)):
        attentions[l] = torch.mean(attentions[l], dim=1)
    # 对support集和query集分别处理
    attentions_s = attentions[:12]
    attentions_q = attentions[12:]
    # 转换成numpy数组
    attentions_s = np.stack([t.detach().numpy() for t in attentions_s], axis=0)  # L ,way,dim,dim
    attentions_s = attentions_s.transpose((1, 0, 2, 3))  # way, L, dim,dim
    attentions_q = np.stack([t.detach().numpy() for t in attentions_q], axis=0)
    attentions_q = attentions_q.transpose((1, 0, 2, 3))
    # 计算重要性
    imp_s = np.prod(attentions_s, axis=1).mean(axis=1)
    # 去除cls token
    imp_s = imp_s[:, 1:]
    # 计算重要性得分
    score_support = imp_s / np.sum(imp_s, axis=1, keepdims=True)
    imp_q = np.prod(attentions_q, axis=1).mean(axis=1)
    imp_q = imp_q[:, 1:]
    score_query = imp_q / np.sum(imp_q, axis=1, keepdims=True)
    # 获取key patch索引
    support_indices = np.argsort(score_support)[::-1]
    query_indices = np.argsort(score_query)[::-1]

    # # 清空attention
    model.attentions = []
    # # 移除hook
    # for hook in model.hooks:
    #     hook.remove()
    return score_support, support_indices, score_query, query_indices


# 做idx和图像中patch的位置映射
def idx_map_loc(idx):
    square_x = (idx % 14) * 16
    square_y = int(idx / 14) * 16
    square_width = 16
    square_height = 16
    return [square_x, square_x + square_width, square_x + square_width, square_x, square_x], \
        [square_y, square_y, square_y + square_height, square_y + square_height, square_y]


"""
    计算key patch的个数
"""


def calculate_keyPatches_num(score, indices):
    nums = []
    for row, ids in zip(score, indices):
        att_score = 0.0
        n = 0
        for idx in ids:
            att_score += row[idx]
            n += 1
            if att_score >= 0.95:
                nums.append(n)
                break
    return nums


def emd_inference_opencv(cost_matrix, weight1, weight2):
    """
    :param cost_matrix: shape (196,196)
    :param weight1:  shape(196,)
    :param weight2:  shape(196,)
    :return:  emd_distance of two samples
    """
    cost_matrix = cost_matrix.detach().cpu().numpy().astype(np.float32)

    weight1 = F.relu(weight1) + 1e-5
    weight2 = F.relu(weight2) + 1e-5

    weight1 = (weight1 * (weight1.shape[0] / weight1.sum().item())).view(-1, 1).detach().cpu().numpy()
    weight2 = (weight2 * (weight2.shape[0] / weight2.sum().item())).view(-1, 1).detach().cpu().numpy()

    cost, _, flow = cv2.EMD(weight1, weight2, cv2.DIST_USER, cost_matrix)
    return cost, flow


def emd_inference_opencv_test(distance_matrix, weight1, weight2):
    """
    :param distance_matrix:shape of way*(shot+n_query),patch_num,patch_num
                            for 5-way 1-shot setting: shape: (375,196,196)
    :param weight1:proto‘s feature weight shape : (375,196)
    :param weight2:query samples' feature weight:(375,196)
    :return emd_distance(list) len:375
             flow transfer_matrix shape:(375,196,196)
    """

    distance_list = []
    flow_list = []

    for i in range(distance_matrix.shape[0]):
        cost, flow = emd_inference_opencv(distance_matrix[i], weight1[i], weight2[i])
        distance_list.append(cost)
        flow_list.append(torch.from_numpy(flow))

    emd_distance = torch.Tensor(distance_list).cuda().double()
    flow = torch.stack(flow_list, dim=0).cuda().double()

    return emd_distance, flow


def get_similarity_map(proto, query, way):
    """
    :param way:
    :param proto: support集的原型  way,196,384 (s1,s2,s3) 3-way
    :param query: query集的特征  way*n_query,196,384  (q11,q12,q13,q12,q22,q23,q13,q23,q33) n_query=3
    :return: similarity_matrix: 相似性矩阵  way,query,196,196 similarity_matrix[0,0,:,:] 代表原型s1和q11的各个特征的相似性矩阵
    """
    simi_list = []
    for i in range(way):
        for j in range(query.shape[0]):
            v1 = np.array(proto[i].detach().cpu())
            v2 = np.array(query[j].detach().cpu())
            # 计算两两余弦相似度
            similarity = np.zeros((196, 196))
            for m in range(196):
                for n in range(m, 196):
                    sim = 1 - cosine(v1[m], v2[n])
                    similarity[m, n] = sim
                    similarity[n, m] = sim
            simi_list.append(similarity)  # [Sim_s1q11,Sim_s1q21,Sim_s1q13,....] len=way*n_query*way
    similarity_matrix = np.stack([_ for _ in simi_list])
    similarity_matrix = torch.from_numpy(similarity_matrix)
    similarity_matrix = similarity_matrix.view((way, query.shape[0], 196, 196))  # way,way*n_query,196,196
    similarity_matrix = similarity_matrix.permute(1, 0, 2, 3)

    # way = proto.shape[0]
    # num_query = query.shape[0]
    # query = query.view(query.shape[0], query.shape[1], -1)
    # proto = proto.view(proto.shape[0], proto.shape[1], -1)
    #
    # proto = proto.unsqueeze(0).repeat([num_query, 1, 1, 1])
    # query = query.unsqueeze(1).repeat([1, way, 1, 1])
    # proto = proto.permute(0, 1, 3, 2)
    # query = query.permute(0, 1, 3, 2)
    # feature_size = proto.shape[-2]
    #
    # # if self.args.metric == 'cosine':
    # if True:
    #     proto = proto.unsqueeze(-3)
    #     query = query.unsqueeze(-2)
    #     query = query.repeat(1, 1, 1, feature_size, 1)
    #     similarity_map = F.cosine_similarity(proto, query, dim=-1)
    # if self.args.metric == 'l2':
    #     proto = proto.unsqueeze(-3)
    #     query = query.unsqueeze(-2)
    #     query = query.repeat(1, 1, 1, feature_size, 1)
    #     similarity_map = (proto - query).pow(2).sum(-1)
    #     similarity_map = 1 - similarity_map

    return similarity_matrix


def get_emd_distance(similarity_map, weight_1, weight_2, solver='opencv'):
    """
    :param similarity_map: [way,way*n_query,196,196]
    :param weight_1: 原型的权重  [way,196]
    :param weight_2: 查询集样本patch的权重 [num_query,196]
    :param solver: 求解器
    :return: result [way,way*n_query,196,196]
    """

    num_proto = similarity_map.shape[1]  # way
    num_query = similarity_map.shape[0]  # 查询样本的个数
    if solver == 'opencv':  # use openCV solver

        for i in range(num_query):
            for j in range(num_proto):
                _, flow = emd_inference_opencv(1 - similarity_map[i, j, :, :], weight_1[j, :], weight_2[i, :])
                similarity_map[i, j, :, :] = (similarity_map[i, j, :, :].cuda()) * torch.from_numpy(flow).cuda()

        # temperature = (self.args.temperature / num_node)
        # logitis = similarity_map.sum(-1).sum(-1) * temperature
        result = similarity_map.sum(-1).sum(-1)
        return result


def sum_min(similarity_map):
    pred = similarity_map.max(dim=2)[0].sum(-1).argmax(dim=0)
    label = torch.tensor([_ for _ in range(5)]).repeat((int(15)))
    return (pred == label).type(torch.cuda.FloatTensor).mean().item()