import random
import copy
import numpy as np
from numpy import linalg


class Env:
    def __init__(self, name):
        self.Name = name
        self.N = 11
        self.A = np.arange(4)  # (Right, Down, Left, Up)
        self.X = np.arange(self.N)
        self.makeP()  # Define transition matrix
        self.makeR()  # Define reward vector
        self.Gamma = 1  # Discount factor
        self.StartState = 0
        self.EndState = [6, 10]

    def action(self, x, a):
        # Environment model tells Agent the reward and new state through action function
        x_ = np.random.choice(self.N, p=self.P[x, a, :])
        return x_

    def makeP(self):
        blocks = [5]
        X2RowCol = {}
        x = 0
        for i in range(12):
            if i not in blocks:
                X2RowCol[x] = divmod(i, 4)
                x += 1
        self.X2RowCol = X2RowCol

        RowCol2X = {}
        for x in range(11):
            RowCol2X[self.X2RowCol[x]] = x
        self.RowCol2X = RowCol2X

        def neighbour(row, col):
            ne = []
            if (row, col - 1) in RowCol2X:
                ne.append(RowCol2X[(row, col - 1)])
            if (row, col + 1) in RowCol2X:
                ne.append(RowCol2X[(row, col + 1)])
            if (row - 1, col) in RowCol2X:
                ne.append(RowCol2X[(row - 1, col)])
            if (row + 1, col) in RowCol2X:
                ne.append(RowCol2X[(row + 1, col)])
            return ne

        def rel_pos(x1, x2):
            (row1, col1) = self.X2RowCol[x1]
            (row2, col2) = self.X2RowCol[x2]
            if row1 < row2:
                return 3
            elif row1 > row2:
                return 1
            elif col1 < col2:
                return 0
            else:
                return 2

        P = np.zeros((11, 4, 11))

        for x in self.X:
            (row, col) = self.X2RowCol[x]
            ne = neighbour(row, col)
            for x_ in ne:
                d = rel_pos(x, x_)
                P[x, d, x_] = 0.8
                P[x, (d + 1) % 4, x_] = 0.1
                P[x, (d + 3) % 4, x_] = 0.1

        for x in self.X:
            for d in range(4):
                P[x, d, x] = 1 - sum(P[x, d, :])

        P[0, :, :] = 0
        P[10, :, :] = 0
        self.P = P

    def makeR(self):
        self.R = np.ones(11) * (-0.02)
        self.R[6] = -1
        self.R[10] = 1


def ValueIter(E):
    U = np.zeros(E.N)
    U_ = np.zeros(E.N)
    delta = 1
    iteration = 0
    while delta > 0.0001:
        iteration += 1
        U = np.copy(U_)
        U_ = E.R + E.Gamma * np.max(np.dot(E.P[:, :, :], U), axis=1)
        delta = np.max(np.abs(U - U_))
        print(f"Value Iteration - Iteration {iteration}: delta = {delta}")
    Pai = np.argmax(np.dot(E.P[:, :, :], U), axis=1)
    return U, Pai


def Eval(E, Pai):
    A = np.zeros((E.N, E.N))
    for i in range(E.N):
        A[i, :] = E.Gamma * E.P[i, Pai[i], :]
    A = A - np.identity(E.N)
    b = -E.R
    U = linalg.solve(A, b)
    return U


def PolicyIter(E):
    Pai = np.zeros(E.N, dtype=int)  # Changed np.int to int
    change = True
    iteration = 0
    while change:
        iteration += 1
        U = Eval(E, Pai)  # Calculate maximum utility under this policy
        change = False
        for x in E.X:
            if np.max(np.dot(E.P[x, :, :], U)) > np.dot(E.P[x, Pai[x], :], U) + 1E-5:
                Pai[x] = np.argmax(np.dot(E.P[x, :, :], U))
                change = True
        print(f"Policy Iteration - Iteration {iteration}: Policy changed = {change}")
    return U, Pai


if __name__ == '__main__':
    E = Env("Robot")
    U, Pai = ValueIter(E)
    print("Value Iteration Results:")
    print("U:", U)
    print("Policy:", Pai)

    U, Pai = PolicyIter(E)
    print("\nPolicy Iteration Results:")
    print("U:", U)
    print("Policy:", Pai)