import collections
import random

import numpy as np


class Trajectory:
    """用来记录一条完整的轨迹"""

    def __init__(self, init_state):
        self.states = [init_state]
        self.actions = []
        self.rewards = []
        self.dones = []
        self.length = 0

    def store_step(self, action, state, reward, done):
        self.actions.append(action)
        self.states.append(state)
        self.rewards.append(reward)
        self.dones.append(done)
        self.length += 1


class ReplayBuffer_Trajectory:
    """存储轨迹的经验回访池"""

    def __init__(self, capacity):
        self.buffer = collections.deque(maxlen=capacity)

    def add_trajectory(self, trajectory: Trajectory):
        self.buffer.append(trajectory)

    def size(self):
        return len(self.buffer)

    def sample(self, batch_size, use_her, dis_threshold=0.15, her_ratio=0.8):
        """
        从buffer中采样batch_size大小数据
        :param batch_size: 采样数据数量
        :param use_her: 是否使用HER
        :param dis_threshlod: goal容忍距离
        :param her_ratio: 使用HER替换goal所占比例
        """
        batch = dict(states=[], actions=[], next_states=[], rewards=[], dones=[])
        for _ in range(batch_size):
            # 随机选取一条轨迹
            traj = random.sample(self.buffer, 1)[0]
            # 随机选取某个时刻 (st, at, st+1, rt+1, dt+1 ,g)
            step_state = np.random.randint(traj.length)
            state = traj.states[step_state]
            next_state = traj.states[step_state + 1]
            action = traj.actions[step_state]
            reward = traj.rewards[step_state]
            done = traj.dones[step_state]

            # 是否使用HER算法
            if use_her and np.random.uniform() <= her_ratio:
                # 从下一个状态的后续时刻随机采样一个时刻的信息
                step_goal = np.random.randint(step_state + 1, traj.length + 1)
                # 使用HER算法的future方案设置目标
                # 这里[:2]是因为用的环境是world_env，前两维是状态信息，后两维是目标
                # 将采样时刻的状态作为当前时刻的goal
                goal = traj.states[step_goal][:2]  # 使用HER算法的future方案设置目标
                # 替换原来时刻goal ,reward,done ，
                # (st, at, st+1, rt+1, dt+1, g) 变成 (st,at, st+1, r', d', g')
                # 看当前状态执行动作转移到的下一个状态是否达到了goal
                dis = np.sqrt(np.sum(np.square(next_state[:2] - goal)))
                reward = -1.0 if dis > dis_threshold else 0
                done = False if dis > dis_threshold else True
                state = np.hstack((state[:2], goal))
                next_state = np.hstack((next_state[:2], goal))

            batch['states'].append(state)
            batch['next_states'].append(next_state)
            batch['actions'].append(action)
            batch['rewards'].append(reward)
            batch['dones'].append(done)

        batch['states'] = np.array(batch['states'])
        batch['next_states'] = np.array(batch['next_states'])
        batch['actions'] = np.array(batch['actions'])
        return batch
