import torch


class ReplayBuffer():
    def __init__(self, state_dim, action_dim, max_size, dvc):
        self.max_size = max_size  # 最大数据量
        self.dvc = dvc
        self.ptr = 0  # 当前指针
        self.size = 0  # 实际数据量

        self.s = torch.zeros((max_size, state_dim), dtype=torch.float, device=self.dvc)
        self.a = torch.zeros((max_size, action_dim), dtype=torch.float, device=self.dvc)
        self.r = torch.zeros((max_size, 1), dtype=torch.float, device=self.dvc)
        self.s_next = torch.zeros((max_size, state_dim), dtype=torch.float, device=self.dvc)
        self.dw = torch.zeros((max_size, 1), dtype=torch.bool, device=self.dvc)

    def add_one_step(self, s, a, r, s_next, dw):
        # 每次只放入一个时刻的数据
        self.s[self.ptr] = torch.from_numpy(s).to(self.dvc)
        self.a[self.ptr] = torch.from_numpy(a).to(self.dvc)  # Note that a is numpy.array
        self.r[self.ptr] = torch.from_numpy(r).to(self.dvc)
        self.s_next[self.ptr] = torch.from_numpy(s_next).to(self.dvc)
        self.dw[self.ptr] = torch.tensor(dw,dtype=torch.bool).to(self.dvc)

        self.ptr = (self.ptr + 1) % self.max_size  # 存满了又重头开始存
        self.size = min(self.size + 1, self.max_size)

    def add_one_Gstep(self, states, actions, rewards, states_next, done_or_wins):
        """
        # env_num = 3
        :param states: (env_num,state_dim) # [[s1,s2],[s1,s2],[s1,s2] ]
        :param actions: (env_num,action_dim) # [[a],[a],[a]]
        :param rewards: (env_num,) # [[1],[2],[0]]
        :param states_next: (env_num,state_dim)
        :param done_or_wins: (env_num,)
        :return:
        """
        env_num = states.shape[0]

        # 确保有足够的空间存储所有环境的数据
        if self.ptr + env_num > self.max_size:
            # 如果剩余空间不足，先填充到末尾
            space_left = self.max_size - self.ptr
            if space_left > 0:
                self.s[self.ptr:self.max_size] = torch.from_numpy(states[:space_left]).to(self.dvc)
                self.a[self.ptr:self.max_size] = torch.from_numpy(actions[:space_left]).to(self.dvc)
                self.r[self.ptr:self.max_size] = torch.from_numpy(rewards[:space_left]).unsqueeze(1).to(self.dvc)
                self.s_next[self.ptr:self.max_size] = torch.from_numpy(states_next[:space_left]).to(self.dvc)
                self.dw[self.ptr:self.max_size] = torch.from_numpy(done_or_wins[:space_left]).unsqueeze(1).to(self.dvc)

            # 然后将剩余数据从缓冲区开始处存储
            remaining = env_num - space_left
            if remaining > 0:
                self.s[:remaining] = torch.from_numpy(states[space_left:]).to(self.dvc)
                self.a[:remaining] = torch.from_numpy(actions[space_left:]).to(self.dvc)
                self.r[:remaining] = torch.from_numpy(rewards[space_left:]).unsqueeze(1).to(self.dvc)
                self.s_next[:remaining] = torch.from_numpy(states_next[space_left:]).to(self.dvc)
                self.dw[:remaining] = torch.from_numpy(done_or_wins[space_left:]).unsqueeze(1).to(self.dvc)

            self.ptr = remaining % self.max_size
        else:
            # 如果空间足够，直接存储所有数据
            self.s[self.ptr:self.ptr + env_num] = torch.from_numpy(states).to(self.dvc)
            self.a[self.ptr:self.ptr + env_num] = torch.from_numpy(actions).to(self.dvc)
            self.r[self.ptr:self.ptr + env_num] = torch.from_numpy(rewards).unsqueeze(1).to(self.dvc)
            self.s_next[self.ptr:self.ptr + env_num] = torch.from_numpy(states_next).to(self.dvc)
            self.dw[self.ptr:self.ptr + env_num] = torch.from_numpy(done_or_wins).unsqueeze(1).to(self.dvc)

            self.ptr = (self.ptr + env_num) % self.max_size

        # 更新缓冲区大小
        self.size = min(self.size + env_num, self.max_size)

    def sample(self, batch_size):
        ind = torch.randint(0, self.size, device=self.dvc, size=(batch_size,))
        return self.s[ind], self.a[ind], self.r[ind], self.s_next[ind], self.dw[ind]
