import numpy
import numpy as np
from scipy import linalg


class Env():
    def __init__(self):
        self.N=11
        self.A=np.arange(4)#{Right,Down,Left,Up}
        self.X=np.arange(self.N)
        self.makeP()  # 定义转移矩阵
        self.makeR()  # 定义报酬向量
        self.Gamma = 1 # 折扣
        self.StartState = 0
        self.EndStates = [6, 10]

    def makeP(self):
        P=np.zeros((self.N,len(self.A),self.N))
        for x in range(11):
            if x==6 or x==10:   #两个终点吸收，无法再次移动
                continue
            if x<=4:
                row=int(x/4)             #记录当前行数
                colum=x%4                #记录当前列数
            if x>4:
                row=int((x+1)/4)             #记录当前行数
                colum=(x+1)%4
            if colum!=3 and x!=4 :  #右移
                P[x][0][x+1]=0.8
                P[x][1][x+1]=0.1
                P[x][3][x+1]=0.1
            else:                    #遇墙不改变位置
                P[x][0][x] += 0.8
                P[x][1][x] += 0.1
                P[x][3][x] += 0.1
            if colum!=0 and x!=5:   #左移
                P[x][2][x-1]=0.8
                P[x][1][x-1]=0.1
                P[x][3][x-1]=0.1
            else:                   #遇墙不改变位置
                P[x][2][x] += 0.8
                P[x][1][x] += 0.1
                P[x][3][x] += 0.1
            if row!=2 and x!=1:       #上下移动
                if x<=4 and x>=2:     #因障碍只需上移3个状态
                    P[x][0][x + 3] = 0.1
                    P[x][2][x + 3] = 0.1
                    P[x][3][x + 3] = 0.8
                else:                 #无障碍上移4个状态
                    P[x][0][x+4]=0.1
                    P[x][2][x+4]=0.1
                    P[x][3][x+4]=0.8
            else:
                P[x][0][x] += 0.1
                P[x][2][x] += 0.1
                P[x][3][x] += 0.8
            if row!=0 and x!=8:
                if x==7:
                    P[x][0][x-3]=0.1
                    P[x][1][x-3]=0.8
                    P[x][2][x-3]=0.1
                else:
                    P[x][0][x-4]=0.1
                    P[x][1][x-4]=0.8
                    P[x][2][x-4]=0.1
            else:
                P[x][0][x] += 0.1
                P[x][1][x] += 0.8
                P[x][2][x] += 0.1
        self.P=P
    def makeR(self):
        R=np.zeros(self.N)
        for i in range(self.N):
            R[i]=-0.04
        R[6]=-1
        R[10]=1
        self.R=R
def ValueIter(E):
    U = np.zeros(E.N)
    U_ = np.zeros(E.N)
    delta = 1
    while delta > 0.0001:
        U = np.copy(U_)  # 将当前的状态值函数复制到U
        U_ = E.R + E.Gamma * np.max(np.dot( E . P [ : , : , : ], U), axis=1)  # 更新状态值函数
        delta = np.max(np.abs(U - U_))  # 计算最大差异
    Pai = np.argmax(np.dot(E.P [ : , : , : ], U), axis=1)
    print("Value",U, Pai)
    return U, Pai
def Eval(E, Pi):
    A = np.zeros((E.N, E.N))
    for i in range(E.N):
        A[i, :] = E.Gamma * E.P[i, Pi[i], :]
    A = A - np.identity(E.N)
    b = -E.R
    U = linalg.solve(A, b)
    return U
def PolicyIter(E):
    Pai = np.zeros(E.N, int)  # 初始策略
    change = True
    while change:
        U = Eval(E, Pai)  # 计算该策略下的最大效用
        change = False
        for x in E.X:
            if np.max(np.dot(E.P[x, :, :], U)) > np.dot(E.P[x, Pai[x], :], U) + 1E-5:
                Pai[x] = np.argmax(np.dot(E.P[x, :, :], U))
                change = True
    print("Policy",U, Pai)
    return U, Pai


E=Env()
print(E.P[0][0][0])
for i in range(11):
    for j in range(4):
        for k in range(11):
            if E.P[i][j][k]!=0:
                print(i,'--',j,'--',k,"概率是:",E.P[i][j][k])
ValueIter(E)
PolicyIter(E)

