from utils_sumtree import SumTree
from utils_memory import ReplayMemory
import numpy as np
import torch

from utils_types import (
    TensorStack5,
    TorchDevice,
)


class PrioritizedMemory(object):  # stored as ( s, a, r, s_ ) in SumTree
    epsilon = 1.5e-4  # small amount to avoid zero priority
    alpha = 0.99  # [0~1] convert the importance of TD error to priority
    beta = 0.4  # importance-sampling, from initial value increasing to 1
    beta_increment_per_sampling = 0.001
    abs_err_upper = 1.  # clipped abs error

    def __init__(self,
                 channels: int,
                 capacity: int,
                 device: TorchDevice,
                 full_sink: bool = True,
                 ):
        self.device=device
        self.tree = SumTree(capacity,device)
        self.memory = ReplayMemory(channels, capacity, device, full_sink)

    def push(self,
             folded_state: TensorStack5,
             action: int,
             reward: int,
             done: bool,
             ):
        transition = self.memory.push(folded_state, action, reward, done)
        max_p = torch.max(self.tree.tree[-self.tree.capacity:])
        if max_p == 0:
            max_p = self.abs_err_upper
        self.tree.add(max_p, transition)  # set the max of p for new p

    def sample(self, n):
        b_idx, b_memory = torch.empty((n,), dtype=torch.int32).to(self.device), torch.empty((n,),dtype=torch.long).to(self.device)
        ISWeights = torch.empty((n, 1)).to(self.device)
        pri_seg = self.tree.total_p / n
        self.beta = np.min([1., self.beta + self.beta_increment_per_sampling])  # max=1

        min_prob = torch.min(
            self.tree.tree[-self.tree.capacity:]) / self.tree.total_p  # for later calculation ISweight
        for i in range(n):
            a, b = pri_seg * i, pri_seg * (i + 1)
            v = np.random.uniform(a, b)
            idx, p, data = self.tree.get_leaf(v)
            prob = p / self.tree.total_p
            ISWeights[i, 0] = torch.pow(prob / min_prob, -self.beta)
            b_idx[i], b_memory[i] = idx, data

        return b_idx, self.memory.sample(b_memory), ISWeights

    def batch_update(self, tree_idx, abs_errors):
        abs_errors += self.epsilon  # convert to abs and avoid 0
        clipped_errors = torch.minimum(abs_errors.data, torch.full_like(abs_errors.data,self.abs_err_upper))
        ps = torch.pow(clipped_errors, self.alpha)
        for ti, p in zip(tree_idx, ps):
            self.tree.update(ti, p)
            
    def __len__(self):
        return len(self.memory)
