import os
import sys
import time

import gym
import numpy as np
import torch
from torch import nn


def tensorize(array_list):
    return [torch.as_tensor(np.array(array, np.float32), dtype=torch.float32) for array in array_list]


def np_init(buffer_size, shape=None, num=1):
    if shape: return np.zeros((buffer_size, shape), np.float32)
    if num > 1: return [np.zeros(buffer_size, np.float32) for _ in range(num)]
    return np.zeros(buffer_size, np.float32)


class Logger:
    def __init__(self, filename=None):
        if "log" not in os.listdir(): os.mkdir("log")
        if not filename: filename = f"log/{time.strftime('%m%d%H%M', time.localtime(time.time()))}-t.txt"
        self.ter, self.log = sys.stdout, open(filename, "a", encoding="utf8")

    def write(self, message):
        self.ter.write(message)
        self.log.write(message)

    def flush(self): ...


class Agent:
    def __init__(self, state_space, action_space):
        self.state_dim = state_space.shape[0]
        self.space, self.action_dim, self.action_limit = "None", None, None  # 初始化
        if isinstance(action_space, gym.spaces.Discrete):
            self.space, self.action_dim = "discrete", action_space.n
        if isinstance(action_space, gym.spaces.Box):
            self.space, self.action_dim = "box", action_space.shape[0]
            self.action_limit = action_space.high[0]

    def build_network(self, layer, activation, output_activation):
        network = nn.Sequential()
        for i in range(len(layer) - 1):
            network.append(nn.Linear(layer[i], layer[i + 1]))
            network.append(activation())
        network[-1] = output_activation()
        return network

    def optimize(self, loss, optimizer):
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()


class Buffer:
    def __init__(self, buffer_size, state_shape, action_shape):
        self.states = np_init(buffer_size, state_shape[0])
        self.actions = np_init(buffer_size, action_shape[0])
        self.next_states = np_init(buffer_size, state_shape[0])
        self.rewards, self.dones = np_init(buffer_size), np.zeros(buffer_size, np.int16)
        self.size, self.index_step, self.step = buffer_size, 0, 0

    def store(self, state, action, reward, next_state, done):
        self.states[self.step] = state
        self.actions[self.step] = action
        self.rewards[self.step] = reward
        self.next_states[self.step] = next_state
        self.dones[self.step] = done

        self.step = (self.step + 1) % self.size
        self.index_step += 1

    def sample_batch(self):
        index = np.random.randint(0, min(self.index_step, self.size), size=100)
        array_list = [self.states, self.actions, self.rewards, self.next_states, self.dones]
        return [torch.as_tensor(array[index], dtype=torch.float32) for array in array_list]
