import numpy as np
import random
import torch

class Memory:
    def __init__(self, memory_capacity, user_num, state_dim):
        self.state = np.zeros((memory_capacity, user_num, state_dim))
        self.action = np.zeros((memory_capacity, user_num))
        self.reword = np.zeros(memory_capacity)
        self.next_state = np.zeros((memory_capacity, user_num, state_dim))
        self.memory_counter = 0
        self.memory_capacity = memory_capacity
        self.max_seq = 200

    def appendMemory(self, state, action, reward, next_state):
        index = self.memory_counter % self.memory_capacity
        state = np.array(state)
        self.state[index, :] = state
        self.action[index] = np.array(action)
        self.reword[index] = np.array(reward)
        self.next_state[index, :] = np.array(next_state)
        self.memory_counter += 1

    def sample(self, batch_size):
        # sample episodic memory
        states, actions, rewards, next_states, dones = [], [], [], [], []
        for i in range(batch_size):
            finish = random.randint(self.max_seq, self.memory_counter - 1)
            begin = finish - self.max_seq

            # sample batch from memory
            # sample_index = np.random.choice(MEMORY_CAPACITY, BATCH_SIZE)  # 先随机出要采样的下标
            # batch_memory = self.memory[sample_index, :] # 再从memory里取出采样的path
            # batch_state = torch.FloatTensor(batch_memory[:, :NUM_STATES])
            # batch_action = torch.LongTensor(batch_memory[:, NUM_STATES:NUM_STATES + 1].astype(int))
            # batch_reward = torch.FloatTensor(batch_memory[:, NUM_STATES + 1:NUM_STATES + 2])
            # batch_next_state = torch.FloatTensor(batch_memory[:, -NUM_STATES:])

            batch_state = torch.FloatTensor(self.state[begin:finish, :])
            batch_action = torch.LongTensor(self.action[begin:finish, :])
            batch_reward = torch.FloatTensor(self.reword[begin:finish])
            batch_next_state = torch.FloatTensor(self.next_state[begin:finish, :])

            # data = self.rec_memory_buffer.buffer[begin:finish]
            # state, action, reward, next_state, done = zip(*data)
            #
            # states.append(np.concatenate([self.observe(state_i) for state_i in state]))
            # actions.append(action)
            # rewards.append(reward)
            # next_states.append(np.concatenate([self.observe(state_i) for state_i in next_state]))
            # dones.append(done)

        return batch_state, batch_action, batch_reward, batch_next_state

