# -*- coding: utf-8 -*-
# @Author: lidongdong
# @time  : 19-6-4 上午9:57
# @file  : gridworld.py

"""
usage:
"""
import torch
import argparse
import torch.nn as nn
import random
from gridpanel import *
import tqdm


def parse_arg():
    parser = argparse.ArgumentParser()
    parser.add_argument("--width", type=int, default=4)
    parser.add_argument("--height", type=int, default=4)
    parser.add_argument("--transition_reward", type=int, default=-1)
    parser.add_argument("--sample_num", type=int, default=100)
    parser.add_argument("--train_step", type=int, default=1000)
    parser.add_argument("--max_step", type=int, default=100)
    parser.add_argument("--test", action="store_true")
    parser.add_argument("--gpu", type=int, default=0)        # use GPU 0
    config = parser.parse_args()
    return config


class Actor(nn.Module):
    def __init__(self, width, height, gpu):
        super(Actor, self).__init__()
        self.width = width
        self.height = height
        self.gpu = gpu

        self.linear1 = nn.Linear(2, 128)
        self.relu1 = nn.LeakyReLU()
        self.linear2 = nn.Linear(128, 128)
        self.relu2 = nn.LeakyReLU()
        self.linear3 = nn.Linear(128, 4)
        self.softmax = nn.Softmax()
        self.main = nn.Sequential(self.linear1, self.relu1,
                                  self.linear2, self.relu2,
                                  self.linear3)

    def forward(self, x, y):
        """forward 对于给定的坐标，给出移动的方向, 以及决定是否结束"""
        input = torch.tensor([x, y], dtype=torch.float32, requires_grad=False).to(self.gpu)
        action = self.main(input)   # action is a tenor denote 4 directions, left, down, right, top respectively

        if x == 0:
            action.data[0] = -1e16
        if y == 0:
            action.data[3] = -1e16
        if x == self.width - 1:
            action.data[2] = -1e16
        if y == self.height - 1:
            action.data[1] = -1e16

        action = self.softmax(action)
        action_data_sum = action.data.sum()
        action.data = action.data/(action_data_sum if action_data_sum > 0. else (action_data_sum + 1e-14))
        return action


class Environment:
    """Environment can be a black box, it do not to be differentiable"""
    def __init__(self, reward_fn):
        self.reward_fn = reward_fn

    def interaction(self, x, y, action):
        if action == 0:
            x -= 1
        elif action == 1:
            y += 1
        elif action == 2:
            x += 1
        elif action == 3:
            y -= 1
        else:
            raise NotImplementedError
        return x, y, self.reward_fn()


class ActionSampler(object):
    def __init__(self):
        pass

    def sample(self, action):
        random_dart = random.random()
        target = action.cpu().tolist()
        one_hot_action = torch.zeros_like(action, requires_grad=False)
        for i in range(len(target)):
            top_value = np.sum(target[0: i + 1])
            if top_value >= random_dart:
                one_hot_action[i] = 1.0
                return i, one_hot_action

        print(target)
        print(random_dart)
        raise RuntimeError


class TrajectorySampler(object):
    def __init__(self, width, height, actor, environ, action_sampler, max_step):
        self.width = width
        self.height = height
        self.actor = actor
        self.environ = environ
        self.action_sampler = action_sampler
        self.max_step = max_step

    def sample_one_action(self, start_coord):
        """给定初始化的坐标，然后给出经过actor过后的坐标"""
        # print(start_coord)
        # 1, 动作
        action = self.actor(start_coord[0], start_coord[1])
        action_index, action_one_hot = self.action_sampler.sample(action)
        # 2, 奖励 & new state
        x, y, one_step_reward = self.environ.interaction(start_coord[0], start_coord[1], action_index)
        new_coord = (x, y)
        action_sampled = action * action_one_hot

        finished = False
        if x == (self.width - 1) and y == (self.height - 1):
            finished = True

        return new_coord, one_step_reward, finished, action_sampled

    def sample_one_trajectory(self):
        # start_coord = [random.randint(0, self.width - 1), random.randint(0, self.height - 1)]
        start_coord = [0, 0]
        # print("start {}".format(start_coord))
        finished = False
        trajectory_reward = 0.
        trajectory_sampled = 0.

        current_step = 0

        while not finished:
            new_coord, one_step_reward, finished, action_sampled = self.sample_one_action(start_coord)
            start_coord = new_coord
            trajectory_reward += one_step_reward
            trajectory_sampled += torch.log(action_sampled.sum())

            current_step += 1
            if current_step > self.max_step:    # 避免出现 → ← 这种情况发生
                break

        # 这里获得的是一个trajectory得到的奖励和采样概率的积
        return trajectory_sampled * trajectory_reward


class Trainer(object):
    def __init__(self, trajectory_sampler, sample_num, width, height):
        self.trajectory_sampler = trajectory_sampler
        self.sample_num = sample_num
        self.optimizer = torch.optim.Adam(trajectory_sampler.actor.parameters(), lr=2e-5)
        self.loss_fn = lambda x: -x
        self.width = width
        self.height = height

    def train(self, train_step):
        for step in tqdm.tqdm(list(range(train_step))):
            self.train_one_step(step)

    def train_one_step(self, step):
        expected_reward = 0.
        # 计算图全部保留着呢
        for i in range(self.sample_num):
            reward = self.trajectory_sampler.sample_one_trajectory()
            expected_reward += reward

        loss = self.loss_fn(expected_reward / self.sample_num)

        if step % 10 == 0:
            print(loss.item())

        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()

    def test(self):
        with torch.no_grad():
            add_panel(self.width, self.height)
            actor = self.trajectory_sampler.actor
            environ = self.trajectory_sampler.environ
            start_coord = [0, 0]
            finished = False

            while not finished:
                # TODO 还得限制step，避免无法找到终点，一直运行
                x, y = start_coord
                action = actor(x, y)
                # action, _ = self.trajectory_sampler.action_sampler.sample(action)
                action = np.argmax(action.cpu()).item()
                print(direction_list[action])
                add_arrow(x, y, action, height=self.height)
                x, y, _ = environ.interaction(x, y, action)
                start_coord = (x, y)
                if x == (self.width - 1) and y == (self.height - 1):
                    break

            plt.show()


if __name__ == '__main__':

    config = parse_arg()
    width, height = config.width, config.height
    train_step = config.train_step
    max_step = config.max_step
    gpu = config.gpu
    test = config.test
    transition_reward = config.transition_reward
    sample_num = config.sample_num

    # server object
    # actor, environment, action_sampler, trajectory_sampler, trainer
    actor = Actor(width, height, gpu).to(gpu)

    def reward_fn():
        return -1

    environ = Environment(reward_fn)
    action_sampler = ActionSampler()

    actor_filename = "actor_model.t7"
    trajectory_sample = TrajectorySampler(width, height, actor, environ, action_sampler, max_step)
    trainer = Trainer(trajectory_sample, sample_num, width, height)

    if not test:
        trainer.train(train_step)
        print("save actor to {}".format(actor_filename))
        torch.save(actor.state_dict(), actor_filename)
    else:
        actor.load_state_dict(torch.load(actor_filename))

    # TODO 这里的起点终点弄错了
    trainer.test()
