import copy
import numpy as np

import torch
import torch.nn as nn

import math


# 初始化模块的权重和偏置
def init(module, weight_init, bias_init, gain=1):
    weight_init(module.weight.data, gain=gain)
    if module.bias is not None:
        bias_init(module.bias.data)
    return module


# 获取模块的N个副本
def get_clones(module, N):
    return nn.ModuleList([copy.deepcopy(module) for i in range(N)])


# 检查输入类型，如果是numpy数组则转换为torch张量
def check(input):
    output = torch.from_numpy(input) if type(input) == np.ndarray else input
    return output


# 计算参数梯度的范数和，返回总的梯度范数。
def get_gard_norm(it):
    sum_grad = 0
    for x in it:
        if x.grad is None:
            continue
        sum_grad += x.grad.norm() ** 2
    return math.sqrt(sum_grad)


# Huber损失函数，e为误差，d为阈值。
def huber_loss(e, d):
    a = (abs(e) <= d).float()
    b = (e > d).float()
    return a * e ** 2 / 2 + b * d * (abs(e) - d / 2)


# 均方误差损失函数。
def mse_loss(e):
    return e ** 2 / 2

