import random
import numpy as np

class ReplayBUffer:
    def __init__(self, args):
        random.seed(args.seed)
        self.size = args.replay_buffer_size
        self.batch_size = args.batch_size
        self.buffer = []
        self.current_idx = 0

    def remember(self, state, action, reward, next_state, done):
        if len(self.buffer) < self.size:
            self.buffer.append(None)
        self.buffer[self.current_idx] = (state, action, reward, next_state, done)
        self.current_idx = (self.current_idx + 1) % self.size

    def sample(self, batch_size):
        batch = random.sample(self.buffer, batch_size)
        state, action, reward, next_state, done = map(np.stack, zip(*batch))
        return state, action, reward, next_state, done

    def __len__(self):
        return len(self.buffer)


class NStepReplayBUffer:
    def __init__(self, args):
        random.seed(args.seed)
        self.args = args
        self.size = args.replay_buffer_size
        self.batch_size = args.batch_size
        self.buffer = []
        self.current_idx = 0

    def remember(self, state, action, reward, next_state, done):
        if len(self.buffer) < self.size:
            self.buffer.append(None)
        self.buffer[self.current_idx] = (state, action, reward, next_state, done)
        self.current_idx = (self.current_idx + 1) % self.size
    
    def sample(self, batch_size):
        # sample episodic memory
        states, actions, rewards, next_states, dones = [], [], [], [], []
        for i in range(batch_size):
            if len(self.buffer) < self.size:
                finish = random.randint(self.args.n_step, len(self.buffer))
            else:
                finish = random.randint(self.args.n_step, self.args.replay_buffer_size)
            begin = finish-self.args.n_step
            sum_reward = 0 # n_step rewards
            data = self.buffer[begin:finish]
            state = data[0][0]
            action = data[0][1]
            for j in range(self.args.n_step):
                # compute the n-th reward
                sum_reward += (self.args.gamma**j) * data[j][2]
                if data[j][4]:
                    # manage end of episode
                    states_look_ahead = data[j][3]
                    done_look_ahead = True
                    break
                else:
                    states_look_ahead = data[j][3]
                    done_look_ahead = False
            
            states.append(state)
            actions.append(action)
            rewards.append(sum_reward)
            next_states.append(states_look_ahead)
            dones.append(done_look_ahead)
        return states, actions, rewards, next_states, dones

    def __len__(self):
        return len(self.buffer)


class SumTree(object):
    def __init__(self, replay_buffer_size):
        self.capacity = replay_buffer_size  # the length of replay buffer, replay buffer size
        self.tree = np.zeros(2 * self.capacity - 1)   #for sum tree node
        self.data = []  # memory for all transitions
        self.current_idx = 0

    def add(self, p, transition):
        tree_idx = self.current_idx + self.capacity - 1
        if len(self.data) < self.capacity:  
            self.data.append(None)
        self.data[self.current_idx] = transition
        self.update(tree_idx, p)  # update tree_frame

        self.current_idx = (self.current_idx + 1) % self.capacity

    def update(self, tree_idx, p):
        change = p - self.tree[tree_idx]
        self.tree[tree_idx] = p
        # then propagate the change through tree
        while tree_idx != 0:    # this method is faster than the recursive loop in the reference code
            tree_idx = (tree_idx - 1) // 2
            self.tree[tree_idx] += change

    def get_leaf(self, v):
        parent_idx = 0
        while True:     # the while loop is faster than the method in the reference code
            cl_idx = 2 * parent_idx + 1         # this leaf's left and right kids
            cr_idx = cl_idx + 1
            if cl_idx >= len(self.tree):        # reach bottom, end search
                leaf_idx = parent_idx
                break
            else:       # downward search, always search for a higher priority node
                if v <= self.tree[cl_idx] or self.tree[cr_idx] == 0 :
                    parent_idx = cl_idx
                else:
                    v -= self.tree[cl_idx]
                    parent_idx = cr_idx
        # print("leaf_idx = ", leaf_idx)
        data_idx = leaf_idx - self.capacity + 1
        p = self.tree[leaf_idx]
        return leaf_idx, p, data_idx

    def total_p(self):
        return self.tree[0]  # the root


class PrioritizedReplayBuffer():  # stored as ( s, a, r, s_ ) in SumTree
    """
    This Memory class is modified based on the original code from:
    https://github.com/jaara/AI-blog/blob/master/Seaquest-DDQN-PER.py
    """
    def __init__(self, args):
        random.seed(args.seed)
        self.args = args
        self.buffer = SumTree(args.replay_buffer_size)
        self.size = args.replay_buffer_size
        self.batch_size = args.batch_size
        self.epsilon = 0.01  # small amount to avoid zero priority
        self.alpha = 0.6  # [0~1] convert the importance of TD error to priority
        self.beta = 0.4  # importance-sampling, from initial value increasing to 1
        self.beta_increment_per_sampling = 0.001
        self.abs_err_upper = 1.  # clipped abs error


    def remember(self, state, action, reward, next_state, done):
        max_p = np.max(self.buffer.tree[-self.buffer.capacity:])
        if max_p == 0:
            max_p = self.abs_err_upper
        transition = (state, action, reward, next_state, done)
        self.buffer.add(max_p, transition)   # set the max p for new p

    def sample(self, batch_size):
        # print(" ")
        # print("sample =============")
        # print("len of data = ", len(self.buffer.data))
        batch_idx, IS_weights = [], []
        states, actions, rewards, next_states, dones = [], [], [], [], []
        pri_seg = self.buffer.total_p() / batch_size       # priority segment
        # print("self.buffer.total_p() = ", self.buffer.total_p())
        self.beta = np.min([1., self.beta + self.beta_increment_per_sampling])  # max = 1
        min_prob = np.min(self.buffer.tree[-self.buffer.capacity:]) / self.buffer.total_p()     # for later calculate ISweight
        if min_prob == 0:
            min_prob = 0.00001
       
        #for n step
        for i in range(batch_size):
            a, b = pri_seg * i, pri_seg * (i + 1)
            v = np.random.uniform(a, b)
            # print("a, b, delta, v = ", a, b, b - self.buffer.total_p(), v)
            tree_idx, p, data_idx= self.buffer.get_leaf(v)
            # print("tree_idx = ", tree_idx)
            prob = p / self.buffer.total_p()
            IS_weights.append(np.power(prob/min_prob, -self.beta)) 
            batch_idx.append(tree_idx)

            if data_idx + self.args.n_step > len(self.buffer.data):  #保证n step的末端不会超出经验池
                end = len(self.buffer.data)
            else:
                end = data_idx + self.args.n_step
            sum_reward = 0 # n_step rewards
            data = self.buffer.data[data_idx:end]
            state = data[0][0]
            action = data[0][1]
            for j in range(end-data_idx):
                # compute the n-th reward
                sum_reward += (self.args.gamma**j) * data[j][2]
                if data[j][4]:   
                    # if done, manage end of episode
                    states_look_ahead = data[j][3]
                    done_look_ahead = True
                    break
                else:
                    states_look_ahead = data[j][3]
                    done_look_ahead = False
            
            states.append(state)
            actions.append(action)
            rewards.append(sum_reward)
            next_states.append(states_look_ahead)
            dones.append(done_look_ahead)
        # print("buffer.len = ", len(self.buffer.data))
        # print("batch_idx = ", batch_idx)
        return states, actions, rewards, next_states, dones, batch_idx, IS_weights

    def update_priorities(self, batch_idx, abs_td_errors):   #每次学习完成之后需要根据新的TD error来更新tree中的P值
        abs_td_errors += self.epsilon  # convert to abs and avoid 0
        clipped_errors = np.minimum(abs_td_errors, self.abs_err_upper)  #防止td error过大，保证一直小于等于1
        ps = np.power(clipped_errors, self.alpha)  #p^α
        for i, p in zip(batch_idx, ps):
            self.buffer.update(i, p)

    def __len__(self):
        return len(self.buffer.data)