import torch
from torch import nn
from network.CT_max_distance import get_rt, cube_max_distance
from utils.cal_voxel_mse import cal_voxel_mse_loss
import numpy as np
from utils.label_transform import label2real
from scipy.stats import norm as gauss_norm


def norm(datas, min_v=None, max_v=None):
    if min_v is None:
        min_v = np.min(datas)
    if max_v is None:
        max_v = np.max(datas)
    return np.round((datas - min_v) / (max_v - min_v), 5)


def max_distance(rx, tx, pre_rx, pre_tx, Batch_size, is_label_transform=True,
                 rot_cen=None, d_s2c=None):
    """
    输入一组位姿真值和估计，计算组中每一对的最大顶点距离，并返回最大顶点距离均值
    :param rx: 真实的旋转位姿参数
    :param tx: 真实的平移位姿参数
    :param pre_rx: 预测的旋转位姿参数
    :param pre_tx: 预测的平移位姿参数
    :param Batch_size: 每组所含的位姿个数
    :param is_label_transform: 是否需要进行标签和真实坐标之间的转换
    :param rot_cen: 旋转中心在体素中的位置
    :param d_s2c:体素旋转中心到光源的距离
    :return: 返回最大顶点距离的均值
    """
    all_dis = 0
    # 计算这一轮的顶点距离
    try:
        truth_rx = rx.detach().cpu().numpy()
        truth_tx = tx.detach().cpu().numpy()
    except AttributeError:
        truth_rx = rx.reshape(Batch_size, 3)
        truth_tx = tx.reshape(Batch_size, 3)
    try:
        pred_rx = pre_rx.detach().cpu().numpy()
        pred_tx = pre_tx.detach().cpu().numpy()
    except AttributeError:
        pred_rx = pre_rx.reshape(Batch_size, 3)
        pred_tx = pre_tx.reshape(Batch_size, 3)
    if is_label_transform:
        # 获取标准值
        tru_alpha, tru_beta, tru_theta, tru_tx, tru_ty, tru_tz = label2real(truth_rx, truth_tx, Batch_size)
        # 获取预测值
        pre_alpha, pre_beta, pre_theta, pre_tx, pre_ty, pre_tz = label2real(pred_rx, pred_tx, Batch_size)
    else:
        tru_alpha, tru_beta, tru_theta, tru_tx, tru_ty, tru_tz = \
            truth_rx[:, 0], truth_rx[:, 1], truth_rx[:, 2], truth_tx[:, 0], truth_tx[:, 1], truth_tx[:, 2]
        pre_alpha, pre_beta, pre_theta, pre_tx, pre_ty, pre_tz = \
            pred_rx[:, 0], pred_rx[:, 1], pred_rx[:, 2], pred_tx[:, 0], pred_tx[:, 1], pred_tx[:, 2]
    for i in range(Batch_size):
        rt1 = get_rt(tru_alpha[i], tru_beta[i], tru_theta[i], tru_tx[i], tru_ty[i], tru_tz[i],
                     rot_cen=rot_cen, d_s2c=d_s2c)
        rt2 = get_rt(pre_alpha[i], pre_beta[i], pre_theta[i], pre_tx[i], pre_ty[i], pre_tz[i],
                     rot_cen=rot_cen, d_s2c=d_s2c)
        # 计算顶点之间的最大距离
        max_dis = cube_max_distance(rt1, rt2)
        all_dis += max_dis
    return all_dis / Batch_size


def vm_sample(rx, tx, pre_rx, pre_tx, Batch_size,
              voxel_size=None, interval_num=None, is_label_transform=True,
              rot_cen=None, d_s2c=None):
    """
    根据体素大小和取点间隔，生成部分样本，计算vm_sample
    :param rx: 真实的旋转位姿参数
    :param tx: 真实的平移位姿参数
    :param pre_rx: 预测的旋转位姿参数
    :param pre_tx: 预测的平移位姿参数
    :param Batch_size: 每组所含的位姿个数
    :param voxel_size: CT体素的大小
    :param interval_num: 在每个维度分别间隔多少个样本点采样
    :param is_label_transform: 是否需要进行标签和真实坐标之间的转换
    :param rot_cen: 旋转中心在体素中的位置
    :param d_s2c:体素旋转中心到光源的距离
    :return:
    """
    all_vm = 0
    # 计算这一轮的顶点距离
    try:
        truth_rx = rx.detach().cpu().numpy()
        truth_tx = tx.detach().cpu().numpy()
    except AttributeError:
        truth_rx = rx.reshape(Batch_size, 3)
        truth_tx = tx.reshape(Batch_size, 3)
    try:
        pred_rx = pre_rx.detach().cpu().numpy()
        pred_tx = pre_tx.detach().cpu().numpy()
    except AttributeError:
        pred_rx = pre_rx.reshape(Batch_size, 3)
        pred_tx = pre_tx.reshape(Batch_size, 3)
    if is_label_transform:
        # 获取标准值
        tru_alpha, tru_beta, tru_theta, tru_tx, tru_ty, tru_tz = label2real(truth_rx, truth_tx, Batch_size)
        # 获取预测值
        pre_alpha, pre_beta, pre_theta, pre_tx, pre_ty, pre_tz = label2real(pred_rx, pred_tx, Batch_size)
    else:
        tru_alpha, tru_beta, tru_theta, tru_tx, tru_ty, tru_tz = \
            truth_rx[:, 0], truth_rx[:, 1], truth_rx[:, 2], truth_tx[:, 0], truth_tx[:, 1], truth_tx[:, 2]
        pre_alpha, pre_beta, pre_theta, pre_tx, pre_ty, pre_tz = \
            pred_rx[:, 0], pred_rx[:, 1], pred_rx[:, 2], pred_tx[:, 0], pred_tx[:, 1], pred_tx[:, 2]
    for i in range(Batch_size):
        rt1 = get_rt(tru_alpha[i], tru_beta[i], tru_theta[i], tru_tx[i], tru_ty[i], tru_tz[i],
                     rot_cen=rot_cen, d_s2c=d_s2c)
        rt2 = get_rt(pre_alpha[i], pre_beta[i], pre_theta[i], pre_tx[i], pre_ty[i], pre_tz[i],
                     rot_cen=rot_cen, d_s2c=d_s2c)
        vm = cal_voxel_mse_loss(voxel_size, interval_num, rt1, rt2)
        all_vm += vm
    return np.array(all_vm / Batch_size)


# 计算图片相似度
def caculate_NCC(image1, image2):
    return np.mean(np.multiply((image1 - np.mean(image1)), (image2 - np.mean(image2)))) \
        / (np.std(image1) * np.std(image2))


# 正则化项
def l1_regularization(model, l1_alpha):
    l1_loss = []
    for module in model.modules():
        if type(module) is nn.BatchNorm2d:
            l1_loss.append(torch.abs(module.weight).sum())
    return l1_alpha * sum(l1_loss)


def l2_regularization(model, l2_alpha):
    l2_loss = []
    for module in model.modules():
        if type(module) is nn.Conv2d:
            l2_loss.append((module.weight ** 2).sum() / 2.0)
    return l2_alpha * sum(l2_loss)
