import math
import torch
import numpy as np

def kl_gaussian(mu, log_var):
    return -0.5 * torch.sum(1 + log_var - mu.pow(2) - log_var.exp(), dim=1)

def gaussian_sample(mu, log_var):
    gaussian_noise = torch.randn(mu.size())
    if mu.is_cuda:
        gaussian_noise = gaussian_noise.cuda()
    return mu + torch.exp(log_var * 0.5) * gaussian_noise

def normal_distribution(x, mean, sigma):
    return torch.exp(-1*((x-mean)**2)/(2*(sigma**2)))/(np.sqrt(2*np.pi) * sigma)

def mixed_2_gaussian_distribution(x, mean1, sigma1, mean2, sigma2):
    return 0.5*normal_distribution(x,mean1,sigma1)+0.5*normal_distribution(x,mean2,sigma2)

def get_poisson_distribution(lam,k):
    return math.pow(lam,k)/math.factorial(k)*math.exp(-1*lam)

def get_poisson_normalizer(lam,lower=1,upper=100):
    poisson_probs=[get_poisson_distribution(lam,x) for x in range(lower,upper+1)]
    return np.sum(poisson_probs)

def get_normalized_poisson(lam,lower,upper):
    probs=[get_poisson_distribution(lam,k) for k in range(lower,upper+1)]
    normalizer=sum(probs)
    probs=list(np.array(probs)/normalizer)
    return probs