from numpy.lib import math
import torch
import numpy as np

import torch.nn as nn

from function import get_normalized_poisson, kl_gaussian, mixed_2_gaussian_distribution

def traj_break(trajs):
    obs_segs=[]
    act_segs=[]
    length=len(trajs["observations"])
    tail=-1
    for i in range(length):
        if trajs["terminals"][i]:
            obs_segs.append(trajs["observations"][tail+1:i+1])
            act_segs.append(trajs["actions"][tail+1:i+1])
            tail=i
    return obs_segs,act_segs

def seg_zero_pad(segs,pad_len=100):
    for i in range(len(segs)):
        seg_shape=list(segs[i].shape)
        seg_shape[0]=pad_len
        seg_shape=tuple(seg_shape)
        padded0s=np.zeros(seg_shape)
        segs[i]=np.vstack((padded0s,segs[i]))
    return segs

def seg_zero_append(segs):
    maxLen=max([len(seg) for seg in segs])
    for i in range(len(segs)):
        seg_shape=list(segs[i].shape)
        seg_shape[0]=maxLen-seg_shape[0]
        seg_shape=tuple(seg_shape)
        appended0s=np.zeros(seg_shape)
        segs[i]=np.vstack((segs[i],appended0s))
    return segs

def get_array(seg_list,pad_len=100):
    return np.array(seg_zero_append(seg_zero_pad(seg_list,pad_len)))

def get_tensor_f(narr_segs):
    return torch.from_numpy(narr_segs).float()

def get_tensor(narr_segs):
    return torch.from_numpy(narr_segs)

def get_dataSet(obs_segs,act_segs,device=torch.device("cpu")):
    return torch.utils.data.TensorDataset(get_tensor_f(obs_segs).to(device),get_tensor_f(act_segs).to(device))

def get_dataLoader(dataset,batchSize=5,shuffle=True):
    return torch.utils.data.DataLoader(dataset,batchSize,shuffle)

def get_lstm_initial_state(batch_size, hidden_dim, device):
    hidden_state = torch.zeros(batch_size, hidden_dim, device=device)
    cell_state = torch.zeros(batch_size, hidden_dim, device=device)
    return hidden_state, cell_state

def get_reconstruct_mse(pred_actions,actions):
    pred_actions=torch.cat(pred_actions,dim=1)
    mse_fn=nn.MSELoss()
    mse_loss=mse_fn(pred_actions,actions)
    return mse_loss

def get_latent_coding_kl(latent_codings,split_size):
    traj_len=len(latent_codings)
    latent_kl=0.0
    for coding in latent_codings:
        mu,log_var=torch.split(coding,split_size,dim=1)
        latent_kl+=kl_gaussian(mu,log_var).mean(0)
    latent_kl/=traj_len
    return latent_kl

def get_mask_01_loss(batch_step_mask, mean1, sigma1, mean2, sigma2):
    return (-1*torch.log(mixed_2_gaussian_distribution(batch_step_mask,mean1,sigma1,mean2,sigma2))).mean(0)

def get_batch_mask_01_loss(batch_masks, mean1, sigma1, mean2, sigma2):
    batch_mask_loss=0.0
    for step_mask in batch_masks:
        batch_mask_loss+=get_mask_01_loss(step_mask,mean1,sigma1,mean2,sigma2)
    batch_mask_loss/=len(batch_masks)
    return batch_mask_loss

def get_segment_mask_loss(batch_masks,lam=80,lower=1,upper=100,epsilon=0.001):
    mask_shape=batch_masks[0].shape
    padded_masks=[torch.ones(mask_shape) for i in range(upper-1)]
    padded_masks.append(torch.ones(mask_shape)*epsilon)
    padded_masks.extend(batch_masks)
    
    steps_kl=[]
    normalized_poisson=get_normalized_poisson(lam,lower,upper)
    log_normalized_poisson=[math.log(item) for item in normalized_poisson]

    for i in range(upper,len(padded_masks)):
        l_log_probs=[]
        for l in range(lower,upper+1):
            log_prob=torch.log(1-padded_masks[i-l])
            for j in range(i-l+1,i):
                log_prob+=torch.log(padded_masks[j])
            l_log_probs.append(log_prob)
        kl_items=[normalized_poisson[i]*(log_normalized_poisson[i]-l_log_probs[i]) for i in range(len(l_log_probs))]
        step_kl=0.0
        for i in range(len(kl_items)):
            step_kl+=kl_items[i]
        steps_kl.append(step_kl.detach())
    steps_loss=[]
    for i in range(len(steps_kl)):
        steps_loss.append((1-padded_masks[upper+i])*steps_kl[i])
    loss_sum=0.0
    for loss_item in steps_loss:
        loss_sum+=loss_item
    loss_sum/=len(steps_kl)
    return loss_sum.mean(0)