﻿import numpy as np
import copy

from matplotlib import pyplot as plt

map = \
{
    0: [0, 0], 1: [0, 1], 2: [0, 2], 3: [0, 3], 4: [1, 0], 5: [1, 2],
    6: [1, 3], 7: [2, 0], 8: [2, 1], 9: [2, 2], 10: [2, 3]
}

def get_next_state(x, delta_x, delta_y, map_size=11):

    next_state = (x + delta_x + map_size) % map_size
    tmp = map[x]
    tmp[0] += delta_x
    tmp[1] += delta_y
    if tmp == map[next_state]:
        return next_state, 1
    return next_state, 0

def config(x, a):
    step = back = 0
    if 2 <= x < 5:
        step += 1
    if 5 <= x < 8:
        back += 1

    res = {i: 0 for i in range(11)}

    forward = left = right = backward = 0.1
    if a == 0:
        forward = 0.8
        backward = 0
    elif a == 1:
        left = 0.8
        right = 0
    elif a == 2:
        right = 0.8
        left = 0
    else:
        forward = 0
        backward = 0.8

    map_copy = copy.deepcopy(map)

    next_state, prob = get_next_state(x, 4 - step, 0, back_offset=back)
    res[next_state] = forward if prob else 0

    next_state, prob = get_next_state(x, -1, 0)
    res[next_state] = left if prob else 0

    next_state, prob = get_next_state(x, 1, 0)
    res[next_state] = right if prob else 0

    next_state, prob = get_next_state(x, -4 + back, 0, back_offset=back)
    res[next_state] = backward if prob else 0

    total_prob = sum(res.values())
    res[x] = 1 - total_prob

    return res

def normal(P):
    return np.abs(P) / np.sum(np.abs(P))


def X2RowCol(x):
    return map[x]

class Env:
    def __init__(self):
        self.N = 11
        self.A = np.arange(4)
        self.X = np.arange(self.N)
        self.matrix_p = None
        self.R = None
        self.make_p()
        self.make_r()
        self.gamma = 1
        self.start_state = 0
        self.end_states = [6, 10]

    def action(self, x, a):
        x_ = np.random.choice(self.N, p=self.matrix_p[x, a, :])
        return x_
    def make_p(self):
        self.matrix_p = np.zeros((self.N, self.A.size, self.N))
        for x in range(self.N):
            for a in range(self.A.size):
                res = config(x, a)
                for key in res:
                    self.matrix_p[x, a, key] = res[key]
    def make_r(self):
        self.R = [-0.04, -0.04, -0.04, -0.04, -0.04, -0.04, -1, -0.04, -0.04, -0.04, 1]


# 被动学习-时序差分方法TD Learning
class TD:
    def __init__(self, E):
        self.E = E
        self.Alpha = 0.5
        self.Pi = [0, 1, 1, 3, 0, 0, 0, 2, 2, 2, 0]
        self.U = [0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 1]

    def train(self):
        x = np.random.choice([0, 1, 2, 3, 4, 5, 7, 8, 9])
        while x not in self.E.end_states:
            a = self.Pi[x]
            _x = self.E.action(x, a)
            r = self.E.R[x]
            self.U[x] = self.U[x] + self.Alpha * (r + self.E.gamma * self.U[_x] - self.U[x])
            x = _x

    def draw_res(self):
        plt.plot(self.U)
        plt.xlabel('State')
        plt.ylabel('Value')
        plt.title('Value Function')
        plt.show()


# 主动学习-Q Learning
class QLearning():
    def __init__(self, E):
        self.E = E
        self.Alpha = 0.5
        self.Q = np.ones((11, 4)) / 4
        self.Q[10, :] = 1
        self.Q[6, :] = -1
        self.Pi = [0, 1, 1, 3, 0, 0, 0, 2, 2, 2, 0]

    def train(self):
        x = np.random.choice([0, 1, 2, 3, 4, 5, 7, 8, 9])
        while x not in self.E.end_states:
            P = normal(self.Q[x])
            a = np.random.choice(4, p=P)
            _x = self.E.action(x, a)
            r = self.E.R[x]
            self.Q[x, a] = (self.Q[x, a] +
                            self.Alpha * (r + self.E.gamma * np.max(self.Q[_x]) - self.Q[x, a]))
            x = _x

    def GetPi(self):
        self.Pi = np.argmax(self.Q, axis=1)
        return self.Pi


# 价值函数的线性逼近
class FTD():
    def __init__(self, E):
        self.w = np.array([0.5, 0.5, 0.5])
        self.E = E
        self.alpha = 0.001
        self.pi = [0, 1, 1, 3, 0, 0, 0, 2, 2, 2, 0]
        self.sum_u = [0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 1]

    def U(self, x):
        if x == 10:
            return 1
        if x == 6:
            return -1
        (row, col) = X2RowCol(x)
        return np.dot(np.array([1, row, col]), self.w)

    def dU(self, x):
        (row, col) = X2RowCol(x)
        return np.array([1, row, col])

    def train(self):
        x0 = np.random.choice([0, 1, 2, 3, 4, 5, 7, 8, 9])
        a0 = self.pi[x0]
        rsum = self.E.R[x0]
        x = x0
        a = a0
        gamma = self.E.gamma

        path=[]

        while x not in self.E.end_states:
            x = self.E.action(x, a)
            rsum += gamma * self.E.R[x]
            a = self.pi[x]
            gamma *= self.E.gamma

        self.w = self.w + self.alpha * (rsum - self.U(x0)) * self.dU(x0)
        print(path)

    def draw_res(self):
        plt.plot(self.sum_u)
        plt.xlabel('State')
        plt.ylabel('Value')
        plt.title('Value Function')
        plt.show()


# if __name__ == '__main__':
#     e = Env()
#     cnt=int(input("输入训练次数:"))
#
#     # flag = cnt = 1
#     # while flag == 1:
#     #     print("输入T是被动学习价值函数，输入F是线性逼近价值函数")
#     #     select = input("请输入：")
#     #     cnt = int(input("训练次数："))
#     #
#     #     if select == "t":
#     #         td = TD(E=e)
#     #         for i in range(cnt):
#     #             td.train()
#     #         print("被动学习价值函数:")
#     #         for i in range(11):
#     #             print(td.U[i])
#     #
#     #         td.draw_res(cnt)
#     #
#     #         flag = 0
#     #     elif select == "f":
#     #         ftd = FTD(e)
#     #         print("线性逼近价值函数:")
#     #         for i in range(cnt):
#     #             ftd.train()
#     #         print("U:")
#     #         for i in range(11):
#     #             ftd.SumU[i]=ftd.U(i)
#     #             print(ftd.SumU[i])
#     #         ftd.draw_res(cnt)
#     #         flag = 0
#
#     ql = QLearning(E=e)
#     for i in range(cnt):
#         ql.train()
#     print("行为价值函数Q:\n", ql.Q)
#
#     print("按照行为价值函数每一个状态取最高的价值得到的策略:\n", ql.GetPi())