import math
from numpy.lib.twodim_base import mask_indices
import torch

import numpy as np
import torch.nn as nn
from torch.nn.modules import loss

from function import *

def traj_breaks(trajs):
    # trajs:dict,D4RL调用get_dataset的数据
    # D4RL Dataset是多条轨迹拼接，因此需要拆分
    # 本函数拆分依据是terminals标志
    obs_segs=[]
    act_segs=[]
    length=len(trajs["observations"])
    tail=-1
    for i in range(length):
        if trajs["terminals"][i]:
            obs_segs.append(trajs["observations"][tail+1:i+1])
            act_segs.append(trajs["actions"][tail+1:i+1])
            tail=i
    return obs_segs,act_segs

def segs_dim_expand(segs):
    # segs:list,D4RL数据进行了拆分，元素是np.array类型
    # 将每一个np.array成员都扩展一维作为batch_dimension
    expanded_segs=[np.expand_dims(seg,axis=0) for seg in segs]
    return expanded_segs

def seg_to_tensor(seg,device):
    # seg:np.array,代表一个trajectory
    # 将一个np.array类型的轨迹转换成为一个Tensor并且移动到指定的device上
    return torch.from_numpy(seg).to(device)

def traj_breaks_timeout(dataset):
    # dataset:dict,D4RL调用get_dataset的数据
    # D4RL Dataset是多条轨迹拼接，因此需要拆分
    # 本函数拆分依据是timeouts标志
    obs_segs=[]
    act_segs=[]
    length=len(dataset['observations'])
    tail=-1
    for i in range(length):
        if dataset['timeouts'][i]:
            obs_segs.append(dataset['observations'][tail+1:i+1])
            act_segs.append(dataset["actions"][tail+1:i+1])
            tail=i
    if tail<length-1:
        obs_segs.append(dataset["observations"][tail+1:])
        act_segs.append(dataset["actions"][tail+1:])
    return obs_segs,act_segs

def seg_zero_pad(segs,pad_len=0):
    # segs:list, element in segs: np.array of [N,Dimension of Observation],pad_len:integer,padded zero number
    # 根据seg元素的shape，填充pad_len个元素到每个seg
    if pad_len==0:
        return segs
    for i in range(len(segs)):
        seg_shape=list(segs[i].shape)
        seg_shape[0]=pad_len
        seg_shape=tuple(seg_shape)
        padded0s=np.zeros(seg_shape)
        segs[i]=np.vstack((padded0s,segs[i]))
    return segs

def seg_zero_append(segs):
    # segs:list, element in segs: np.array of [N,Dimension of Observation]
    # 根据segs中的分段长度，补齐所有分段到最大长度
    maxLen=max([len(seg) for seg in segs])
    for i in range(len(segs)):
        seg_shape=list(segs[i].shape)
        seg_shape[0]=maxLen-seg_shape[0]
        seg_shape=tuple(seg_shape)
        appended0s=np.zeros(seg_shape)
        segs[i]=np.vstack((segs[i],appended0s))
    return segs

def get_array(segs,pad_len=0):
    # segs:list, element in segs: np.array of [N,Dimension of Observation]
    # 处理d4rl_dataset的observation/action的分割分段，填充后形成一个完整的np.array
    return np.array(seg_zero_append(seg_zero_pad(segs,pad_len)))

def get_tensor_f(narr_segs):
    # narr_segs:np.array
    # 处理填充好的分段组成的数据集，生成float类型的tensor
    return torch.from_numpy(narr_segs).float()

def get_tensor(narr_segs):
    # narr_segs:np.array
    # 处理填充好的分段组成的数据集，生成double类型的tensor
    return torch.from_numpy(narr_segs)

def get_dataSet(obs_segs,act_segs,device=torch.device("cpu")):
    # obs_segs:np.array,observations分段经过填充后的结果,act_segs:np.array,actions分段经过填充后的结果
    # 根据分段数据生成Dataset对象
    return torch.utils.data.TensorDataset(get_tensor_f(obs_segs).to(device),get_tensor_f(act_segs).to(device))

def get_dataLoader(dataset,batchSize=5,shuffle=True):
    # dataset:Object of Dataset
    # 根据Dataset对象生成DataLoader对象
    return torch.utils.data.DataLoader(dataset,batchSize,shuffle)

def get_lstm_initial_state(batch_size, hidden_dim, device):
    # batch_size:integer, number of trjactories, hidden_dim:integer, dimension of hidden state, device, object of torch.device
    # 按照batch_size数以及hidden_dim生成LSTM的初始hidden state，把tensor拷贝到device指定的设备上
    hidden_state = torch.zeros(batch_size, hidden_dim, device=device)
    cell_state = torch.zeros(batch_size, hidden_dim, device=device)
    return hidden_state, cell_state

def get_reconstruct_mse(pred_actions,actions):
    # pred_actions:torch.tensor, size [BatchSize,Dimension of Actions],actions:torch.tensor, size [BatchSize,Dimension of Actions]
    # 在预测的actions和真正的actions上计算MSE损失,result:torch.tensor, size [1]
    pred_actions=torch.cat(pred_actions,dim=1)
    mse_fn=nn.MSELoss()
    mse_loss=mse_fn(pred_actions,actions)
    return mse_loss

def get_latent_coding_kl(latent_codings,split_size):
    # latent_codings: list, element of latent_codings:torch.tensor, size [BatchSize,Dimension of Latent Codings(Maybe Double)]
    # split_size:integer, how to split mean and log_var
    # 计算当前的潜在编码和标准正态分布之间的KL散度
    traj_len=len(latent_codings)
    latent_kl=0.0
    for coding in latent_codings:
        mu,log_var=torch.split(coding,split_size,dim=1)
        latent_kl+=kl_gaussian(mu,log_var).mean(0)
    latent_kl/=traj_len
    return latent_kl

def get_mask_01_loss(batch_step_mask, mean1, sigma1, mean2, sigma2):
    # batch_step_mask:torch.tensor, size [BatchSize,1], mean1/2 float, sigma1/2 float
    # 计算每一步的mask值的概率密度函数的负对数似然，然后返回这一步的关于Batch内所有轨迹的均值
    return (-1*torch.log(mixed_2_gaussian_distribution(batch_step_mask,mean1,sigma1,mean2,sigma2))).mean(0)

def get_batch_mask_01_loss(batch_masks, mean1, sigma1, mean2, sigma2):
    # batch_masks:list, element of batch_masks:torch.tensor, size [BatchSize,1], mean1/2:float, sigma1/2:float
    # 返回整个完整Batch的mask_0/1损失的均值
    batch_mask_loss=0.0
    for step_mask in batch_masks:
        batch_mask_loss+=get_mask_01_loss(step_mask,mean1,sigma1,mean2,sigma2)
    batch_mask_loss/=len(batch_masks)
    return batch_mask_loss

def get_segment_mask_loss(batch_masks,lam=80,lower=1,upper=100,epsilon=0.001):
    # batch_masks:list, element of batch masks:torch.tensor, size [BatchSize,1]
    # 返回所有的分段mask，在分段的条件下KL散度，然后用KL散度乘以分段概率并求平均作为分段Collapse损失
    # 前面填充upper个step_mask
    mask_shape=batch_masks[0].shape
    padded_masks=[torch.ones(mask_shape).to(batch_masks[0].device) for i in range(upper-1)]
    padded_masks.append(torch.ones(mask_shape).to(batch_masks[0].device)*epsilon)
    padded_masks.extend(batch_masks)
    # 计算每一步的条件分段KL散度
    steps_kl=[]
    normalized_poisson=get_normalized_poisson(lam,lower,upper)
    log_normalized_poisson=[math.log(item) for item in normalized_poisson]
    # 循环处理
    for i in range(upper,len(padded_masks)):
        l_log_probs=[]
        for l in range(lower,upper+1):
            log_prob=torch.log(1-padded_masks[i-l])
            for j in range(i-l+1,i):
                log_prob+=torch.log(padded_masks[j])
            l_log_probs.append(log_prob)
        kl_items=[normalized_poisson[i]*(log_normalized_poisson[i]-l_log_probs[i]) for i in range(len(l_log_probs))]
        step_kl=0.0
        for i in range(len(kl_items)):
            step_kl+=kl_items[i]
        steps_kl.append(step_kl.detach())
    steps_loss=[]
    for i in range(len(steps_kl)):
        steps_loss.append((1-padded_masks[upper+i])*steps_kl[i])
    loss_sum=0.0
    for loss_item in steps_loss:
        loss_sum+=loss_item
    loss_sum/=len(steps_kl)
    return loss_sum.mean(0)

def get_segment_mask_stretched_loss(batch_masks,lam=80,lower=1,upper=100,epsilon=0.001):
    mask_shape=batch_masks[0].shape
    padded_masks=[torch.ones(mask_shape).to(batch_masks[0].device) for i in range(upper-1)]
    padded_masks.append(torch.ones(mask_shape).to(batch_masks[0].device)*epsilon)
    padded_masks.extend(batch_masks)
    # 计算每一步的条件分段KL散度
    steps_kl=[]
    normalized_poisson=get_normalized_poisson(lam,lower,upper)
    log_normalized_poisson=[math.log(item) for item in normalized_poisson]
    # 循环处理
    for i in range(upper,len(padded_masks)):
        l_log_probs=[]
        for l in range(lower,upper+1):
            log_prob=torch.log(1-padded_masks[i-l])
            for j in range(i-l+1,i):
                log_prob+=torch.log(padded_masks[j])
            l_log_probs.append(log_prob)
        kl_items=[normalized_poisson[i]*(log_normalized_poisson[i]-l_log_probs[i]) for i in range(len(l_log_probs))]
        step_kl=0.0
        for i in range(len(kl_items)):
            step_kl+=kl_items[i]
        step_kl*=stretched_sigmoid(i-upper,99.5,5)
        steps_kl.append(step_kl.detach())
    steps_loss=[]
    for i in range(len(steps_kl)):
        steps_loss.append((1-padded_masks[upper+i])*steps_kl[i])
    loss_sum=0.0
    for loss_item in steps_loss:
        loss_sum+=loss_item
    loss_sum/=len(steps_kl)
    return loss_sum.mean(0)

def get_mask_simple_scatter(masks,lower_bound=0.25,upper_bound=0.75):
    # 计算mask的总数量
    mask_total_num=len(masks)*masks[0].shape[0]
    # 两端及中间部分计数
    lower_num=0.0
    mid_num=0.0
    upper_num=0.0
    for step_mask in masks:
        for i in range(step_mask.shape[0]):
            mask_val=step_mask[i][0].item()
            if mask_val<lower_bound:
                lower_num+=1
            elif mask_val>upper_bound:
                upper_num+=1
            else:
                mid_num+=1
    return lower_num/mask_total_num,mid_num/mask_total_num,upper_num/mask_total_num

def get_mask_middle_scatter(masks):
    # 计算mask的总数量
    mask_total_num=len(masks)*masks[0].shape[0]
    # 上下两部分计数
    lower_num=0.0
    upper_num=0.0
    for step_mask in masks:
        for i in range(step_mask.shape[0]):
            mask_val=step_mask[i][0].item()
            if mask_val<=0.5:
                lower_num+=1
            else:
                upper_num+=1
    return lower_num/mask_total_num,upper_num/mask_total_num

def get_segLen_prob_loss(batch_masks,upper=100,prob_thresh=0.3,weight=3.0,k=5.0,epsilon=0.01):
    # batch_masks:list, element of batch_masks:torch.tensor, size [BatchSize,1]
    # 返回所有分段mask，在分段条件下，基于分段长度崩塌的概率的损失项
    # 获取mask所在设备
    mask_device=batch_masks[0].device
    # 前面填充upper个step_masks
    mask_shape=batch_masks[0].shape
    padded_masks=[torch.ones(mask_shape).to(mask_device)*(1-epsilon) for i in range(upper-1)]
    padded_masks.append(torch.ones(mask_shape).to(mask_device)*epsilon)
    padded_masks.extend(batch_masks)
    # 计算长度小于等于upper
    prob_losses=[]
    # 循环遍历各个时间步
    for i in range(upper,len(padded_masks)):
        probs=[]
        for l in range(1,upper+1):
            log_prob=0.0
            for j in range(i-l+1,i):
                log_prob+=torch.log(padded_masks[j])
            log_prob+=torch.log(1-padded_masks[i-l])
            probs.append(np.exp(log_prob))
        cum_prob=sum(probs)
        cum_prob_sigmoid=stretched_sigmoid_torch(cum_prob,prob_thresh,k).detach()
        step_prob_loss=cum_prob_sigmoid*(1-padded_masks[i])
        prob_losses.append(step_prob_loss)
    # 整合得到最后的损失项
    loss_term=(weight*sum(prob_losses)/len(prob_losses)).mean(0)
    return loss_term

def get_action_log_prob(action_mu,action_logvar,actions):
    # action_mu: tensor, [BatchSize,Dim of Action Space], action_logvar: tensor, [BatchSize,Dim of Action Space], actions: tensor, [BatchSize,Dim of Action Space]
    # 输出一个时间步的action的对数似然
    gaussian_coef=1/np.sqrt(2*np.pi)
    return torch.log(gaussian_coef*(1/torch.exp(action_logvar*0.5))*torch.exp(-1*torch.pow(actions-action_mu,2)/(2*torch.exp(action_logvar))))

def step_log_prob_2_log_prob_loss(steps_log_prob):
    step_num=len(steps_log_prob)
    neg_log_prob_sum=sum(-1*steps_log_prob)
    neg_log_prob_sum/=step_num
    return torch.mean(neg_log_prob_sum)

def get_mask_local_mean_loss(batch_masks,local_size,epsilon=0.001,mask_mean=90):
    # batch_masks:list, element of batch_masks:torch.tensor, size [BatchSize,1]
    # 返回mask在局部窗口下，真实取值和应该达到的水平之间的误差
    # 获取mask的设备
    mask_device=batch_masks[0].device
    # 获取batch_masks中元素的形状
    mask_shape=batch_masks[0].shape
    padded_masks=[(torch.ones(mask_shape)*(1-epsilon)).to(mask_device) for i in range(local_size-1)]
    padded_masks.append((torch.ones(mask_shape)*epsilon).to(mask_device))
    padded_masks.extend(batch_masks)
    # 损失函数计算对象
    mse_fn=nn.MSELoss()
    # 局部损失列表
    local_losses=[]
    # 循环遍历各个时间步
    for i in range(local_size,len(padded_masks)):
        local_mask_sum=sum(padded_masks[i-local_size+1:i+1])
        local_loss=mse_fn(local_mask_sum,torch.ones_like(local_mask_sum)*mask_mean)
        local_losses.append(local_loss)
    return sum(local_losses)/len(local_losses)

def get_mask_local_mean_loss_with_detach(batch_masks,local_size,epsilon=0.001,mask_mean=90):
    # batch_masks:list, element of batch_masks:torch.tensor, size [BatchSize,1]
    # 返回mask在局部窗口下，真实取值和应该达到的水平之间的误差
    # 获取mask的设备
    mask_device=batch_masks[0].device
    # 获取batch_masks中元素的形状
    mask_shape=batch_masks[0].shape
    padded_masks=[(torch.ones(mask_shape)*(1-epsilon)).to(mask_device) for i in range(local_size-1)]
    padded_masks.append((torch.ones(mask_shape)*epsilon).to(mask_device))
    padded_masks.extend(batch_masks)
    # 损失函数计算对象
    mse_fn=nn.MSELoss()
    # 局部损失列表
    local_losses=[]
    # 循环遍历各个时间步
    for i in range(local_size,len(padded_masks)):
        local_mask_sum=(sum(padded_masks[i-local_size+1:i]).detach()+padded_masks[i])
        local_loss=mse_fn(local_mask_sum,torch.ones_like(local_mask_sum)*mask_mean)
        local_losses.append(local_loss)
    return sum(local_losses)/len(local_losses)

