import numpy as np
from my_envs.env import BaseEnv


class EnvCore:
    """
    # 环境中的智能体
    """

    def __init__(self):
        # self.agent_num = 2  # 设置智能体(小飞机)的个数，这里设置为两个 # set the number of agents(aircrafts), here set to two
        self.env = BaseEnv()
        self.agent_num = self.env.agent_num
        self.obs_dim = self.env.obs_dim  # 设置智能体的观测维度 # set the observation dimension of agents
        self.action_dim = self.env.action_dim  # 设置智能体的动作维度，指的是动作向量的维度，USV的动作只涉及方向和速度这两个量

    def reset(self):
        """
        # self.agent_num设定为2个智能体时，返回值为一个list，每个list里面为一个shape = (self.obs_dim, )的观测数据
        # When self.agent_num is set to 2 agents, the return value is a list, each list contains a shape = (self.obs_dim, ) observation data
        """

        # ③
        # 加载想定，产生env.scenario
        # 设置推进速度 SIMULATE_COMPRESSION
        # 初始化全局态势
        # 将所有推演方对象静态化
        return self.env.reset()

    def step(self, actions):
        """
        # self.agent_num设定为2个智能体时，actions的输入为一个2纬的list，每个list里面为一个shape = (self.action_dim, )的动作数据
        # 默认参数情况下，输入为一个list，里面含有两个元素，因为动作维度为5，所里每个元素shape = (5, )
        # When self.agent_num is set to 2 agents, the input of actions is a 2-dimensional list, each list contains a shape = (self.action_dim, ) action data
        # The default parameter situation is to input a list with two elements, because the action dimension is 5, so each element shape = (5, )
        """
        dones = []
        env_actions = []
        # 这里的问题在于，actions是一个list<np.array> 所以重新转换成list<list> 才支持json操作
        for i in range(self.agent_num):
            action = actions[i]
            changing = []
            for j in range(len(action)):
                if action[j] == 1.0:
                    if j < self.action_dim:
                        # 0-8为速度
                        changing.append((j - 4))
                    else:
                        # 9-17为角度
                        changing.append((j - 9 - 4))
            env_actions.append(changing)
        obs, rewards, done = self.env.step(env_actions)
        shaped_rewards = []
        for i in range(self.agent_num):
            dones.append(done)
            shaped_rewards.append([rewards[i]])
        return obs, shaped_rewards, dones