#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File    :   QLearning.py    
@Contact :   ruaqy@qq.com
@License :   (C)Copyright 2022-~, GPL 2.0

@Modify Time      @Author    @Version    @Description
------------      -------    --------    -----------
2022/7/3 13:15   rqy        1.0         None
"""
import numpy as np
import os
import pickle
import matplotlib.pyplot as plt


# the problem of finding road in cliff
class FindRoadInCliff:
    def __init__(self, reward_map, target_position, gamma, epsilon, learning_rate):
        self.__map = reward_map
        self.__target_position = target_position

        self.__size = reward_map.shape
        self.__gamma = gamma
        self.__epsilon = epsilon
        self.__lr = learning_rate

        self.q_table = {}

    # Decide whether agent is on the destination or not.
    def agent_done(self, agent):
        if agent.position == self.__target_position:
            return True
        else:
            return False

    def agent_move(self, agent):
        # choose action and calculate next position
        if np.random.random() > self.__epsilon:  # using epsilon policy
            action = np.argmax(self.get_qtable(agent.position))
        else:
            action = np.random.randint(0, 4)  # random choice
        if action == 0:
            next_position = agent.left()
        elif action == 1:
            next_position = agent.right()
        elif action == 2:
            next_position = agent.up()
        else:
            next_position = agent.down()

        reward = self.get_reward(next_position)
        if self.movable(next_position):  # next position is available
            reward = self.get_reward(next_position)
        else:  # If the next position is unavailable, stay and get reward -1
            next_position = agent.position

        # update Qtable
        q_table_k = self.get_qtable(tuple(agent.position))  # get Qtable now
        q_predict = q_table_k[action]  # q value now
        q_target = reward + self.__gamma * np.max(self.get_qtable(next_position))
        q_table_k[action] += self.__lr * (q_target - q_predict)
        self.q_table[tuple(agent.position)] = q_table_k

        # move agent
        agent.position = next_position

    def get_qtable(self, position):
        return self.q_table.get(tuple(position), np.zeros(4))

    def movable(self, position):
        if position[0] < 0 or position[0] > self.__size[0] - 1:
            return False
        if position[1] < 0 or position[1] > self.__size[1] - 1:
            return False
        return True

    def get_reward(self, position):
        if self.movable(position):
            return self.__map[position[0]][position[1]]
        else:
            return -1


class Agent:
    def __init__(self, init_position):
        self.position = None

        self.__init_position = init_position

        self.clean()

    def clean(self):
        self.position = self.__init_position

    def left(self):
        return self.position[0] - 1, self.position[1]

    def right(self):
        return self.position[0] + 1, self.position[1]

    def up(self):
        return self.position[0], self.position[1] - 1

    def down(self):
        return self.position[0], self.position[1] + 1


def main():
    gamma, epsilon, learning_rate = 0.9, 0.0, 0.2
    T, MAX_STEP = 100, 100

    # gaming information
    reward_map = np.array([[-1,   -1,   -1, -100,   -1,   -1,   -1, -100, -1,   -1, -1, -100],
                           [-1, -100,   -1, -100,   -1, -100,   -1, -100, -1, -100, -1, -100],
                           [-1, -100,   -1,   -1,   -1, -100,   -1,   -1, -1, -100, -1,   -1],
                           [-1, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, 100]])
    target_position = (3, 11)
    initial_position = (3, 0)

    find_road = FindRoadInCliff(reward_map, target_position, gamma, epsilon, learning_rate)
    agent = Agent(initial_position)

    if os.path.exists('qtable.pkl'):
        with open('qtable.pkl', 'rb') as f:
            qtable = pickle.load(f)
            find_road.q_table = qtable

    # using for draw
    longest_step = np.zeros(T)
    fig = plt.figure()

    for k in range(T):
        agent.clean() # initial agent
        for step in range(MAX_STEP):
            if find_road.agent_done(agent):
                break  # Agent at the destination

            find_road.agent_move(agent)  # agent learn

            # draw
            fig.clf()
            ax = fig.add_axes([0.05, 0.1, 0.9, 0.8])
            ax.matshow(reward_map)
            ax.scatter(agent.position[1], agent.position[0], s=500, c='r')
            ax.set_title('iter:{},step:{}'.format(k, step))
            plt.pause(0.1)
        longest_step[k] = step

    for k, v in find_road.q_table.items():
        # find_road.q_table[k] = find_road.q_table[k].tolist()
        print(k, v)

    with open('qtable.pkl', 'wb') as f:
        pickle.dump(find_road.q_table, f)

    plt.figure()
    plt.plot(longest_step)
    plt.title('Step Usage in Each Iter')

    plt.show()


if __name__ == '__main__':
    main()
