import copy
import random
import numpy as np


class Environment(object):
    def __init__(self, init_env, agent='A', road='O', trap='X', target='V'):
        # 保存初始环境
        self.init_env = copy.deepcopy(init_env)
        self.env = init_env
        self.height = len(self.env)  # 环境高
        self.weight = len(self.env[0])  # 环境宽
        self.observation_dimension = self.height * self.weight  # 观测维度大小
        # 当前观测值
        self.obs = None
        # 设置各个元素的形式
        self.agent = agent
        self.road = road
        self.trap = trap
        self.target = target
        # 设置各元素reward大小
        self.reward = None
        self.road_reward = -1
        self.trap_reward = -100
        self.target_reward = 1
        # 是否完成的标志
        self.done = False
        # 产生的信息(用于step函数的返回)
        self.info = None

    def render(self):
        # 打印环境
        for i in range(self.weight + 1):
            print('*', end='')
        print()
        for y in range(self.height):
            for x in range(self.weight):
                print(self.env[y][x], end='')
            print()
        for i in range(self.weight + 1):
            print('*', end='')
        print()

    def init_render(self):
        # 打印环境
        for i in range(self.weight + 1):
            print('#', end='')
        print()
        for y in range(self.height):
            for x in range(self.weight):
                print(self.init_env[y][x], end='')
            print()
        for i in range(self.weight + 1):
            print('#', end='')
        print()

    def reset(self):
        # 重置环境
        self.env = copy.deepcopy(self.init_env)
        # 重置done
        self.done = False
        # 获取当前状态值
        self.obs = self.get_obs()
        return self.obs

    def get_pos(self):
        # 获取Agent位置(以左上角为原点)
        agent_x = 0
        agent_y = 0
        for y in range(self.height):
            for x in range(self.weight):
                if self.env[y][x] == self.agent:
                    agent_y = y
                    agent_x = x
        return agent_x, agent_y

    def set_pos(self, x, y):
        # 设置agent的位置, 并获取reward值
        if self.env[y][x] == self.road:
            self.reward = self.road_reward
            # 消去原来的位置
            agent_x, agent_y = self.get_pos()
            self.env[agent_y][agent_x] = self.road
            # 设置agent的当前位置
            self.env[y][x] = self.agent

        elif self.env[y][x] == self.trap:
            self.reward = self.trap_reward
            # 掉入陷阱重新开始
            self.env = copy.deepcopy(self.init_env)

        elif self.env[y][x] == self.target:
            self.reward = self.target_reward
            # 消去原来的位置
            agent_x, agent_y = self.get_pos()
            self.env[agent_y][agent_x] = self.road
            # 设置agent的当前位置
            self.env[y][x] = self.agent
            # 成功后也重新开始
            # self.env = copy.deepcopy(self.init_env)
            # 成功后完成标志设为True
            self.done = True
        # 如果撞墙暂定为-1
        elif self.env[y][x] == self.agent:
            self.reward = self.road_reward
        else:
            print("error!")

        # if self.env[y][x] == self.road:
        #     # 消去原来的位置
        #     agent_x, agent_y = self.get_pos()
        #     self.env[agent_y][agent_x] = self.road
        #     # 设置agent的当前位置
        #     self.env[y][x] = self.agent

    def get_obs(self):
        agent_x, agent_y = self.get_pos()
        obs = agent_y * self.weight + agent_x
        return obs

    def step(self, action):
        # 获取Agent位置
        agent_x, agent_y = self.get_pos()
        # 给定一个动作让环境与其交互
        if action == 0:
            if agent_y > 0:
                self.set_pos(agent_x, agent_y - 1)
            else:
                self.set_pos(agent_x, agent_y)
        elif action == 1:
            if agent_y < self.height - 1:
                self.set_pos(agent_x, agent_y + 1)
            else:
                self.set_pos(agent_x, agent_y)
        elif action == 2:
            if agent_x > 0:
                self.set_pos(agent_x - 1, agent_y)
            else:
                self.set_pos(agent_x, agent_y)
        elif action == 3:
            if agent_x < self.weight - 1:
                self.set_pos(agent_x + 1, agent_y)
            else:
                self.set_pos(agent_x, agent_y)
        else:
            print("We Do Not Have This Action !!!")

        # 获取当前观测值
        self.obs = self.get_obs()
        # 加入相关信息（暂无）
        self.info = None

        return self.obs, self.reward, self.done, self.info


if __name__ == "__main__":

    init_env = [
        '      ',
        '      ',
        'AXXXXV'
    ]
    # 转换为矩阵形式
    init_env_mat = [list(line) for line in init_env]

    env = Environment(init_env_mat, agent='A', road=' ', trap='X', target='V')
    print(env.get_pos())
    env.render()

    actions = [0, 1, 2, 3]  # ['up', 'down', 'left', 'right']

    obs = env.reset()  # 重置环境
    print(obs)
    action = random.sample(actions, 1)

    for i in range(10):
        action = random.sample(actions, 1)
        print("obs: ", env.get_obs())
        next_obs, reward, done, _ = env.step(actions[0])
        print(reward)
        print(done)
        env.render()
        env.init_render()
        print("next_obs: ", next_obs)

    print("reset:::::::::::::::::")

    obs = env.reset()  # 重置环境
    print(obs)
    env.render()
    action = random.sample(actions, 1)

    for i in range(10):
        action = random.sample(actions, 1)
        print("obs: ", env.get_obs())
        next_obs, reward, done, _ = env.step(action[0])
        print(reward)
        print(done)
        env.render()
        print("next_obs: ", next_obs)

    # next_obs, reward, done = env.step('up')
    # print(reward)
    # print(done)
    # env.print_env()
    # next_obs, reward, done = env.step('right')
    # print(reward)
    # print(done)
    # env.print_env()
    # next_obs, reward, done = env.step('right')
    # print(reward)
    # print(done)
    # env.print_env()
    # next_obs, reward, done = env.step('right')
    # print(reward)
    # print(done)
    # env.print_env()
    # next_obs, reward, done = env.step('right')
    # print(reward)
    # print(done)
    # env.print_env()
    # next_obs, reward, done = env.step('right')
    # print(reward)
    # print(done)
    # env.print_env()
    # next_obs, reward, done = env.step('down')
    # print(reward)
    # print(done)
    # env.print_env()
