# -*- coding: utf-8 -*-
"""
Implements the gridworld MDP.
Matthew Alger, 2015
matthew.alger@anu.edu.au
"""

import numpy as np
import random
import numpy.random as rn
from collections import deque


class DaPingTai(object):
    """
    Gridworld MDP.
    """

    def __init__(self, grid_size, wind, discount):
        """
        grid_size: Grid size. int.
        wind: Chance of moving randomly. float.
        discount: MDP discount. float.
        -> Gridworld
        """

        self.actions = ((1, 0), (0, 1), (-1, 0), (0, -1))    #down,right, up, left
        self.n_actions = len(self.actions)
        self.n_states = grid_size ** 2
        self.grid_size = grid_size
        self.wind = wind
        self.discount = discount
        self.feature_size = 8 * 8
        # Construct whole map as an n_states x 1 array
        self.ground_r = np.array([self.setReward(s) for s in range(self.feature_size)])
        self.true_r=self.set_true_r(self.ground_r)
        # self.ground_r = np.array([0, 0, -1, 0, 0, 0, -1, 0, 0, 0, -1, 0, 0, 0, 0, 0])
        #
        self.gamma=0.99
        self.alpha=0.9
        # self.goalstate=self.setgoalstate(self.true_r)
        # self.positiveState = 3
        # self.startState = self.setStartState(self.ground_r)
        # self.startState = 0
        # self.currentState = self.startState
        self.negativeScore = 0
        self.appro=16
        self.weights=None
        self.Q={}


    def feature_vector(self, i, feature_map="ident"):
        """
        Get the feature vector associated with a state integer.
        i: State int.
        feature_map: Which feature map to use (default ident). String in {ident,
            coord, proxi}.
        -> Feature vector.
        """

        # Assume identity map.
        f = np.zeros(self.feature_size)
        appro_st=self.stateInt_to_appoximateStateInt(i)
        f[appro_st] = 1
        return f


    def int_to_point(self, i):
        """
        Convert a state int into the corresponding coordinate.
        i: State int.
        -> (x, y) int tuple.
        """

        return (i // self.grid_size, i % self.grid_size)

    def point_to_int(self, p):
        """
        Convert a coordinate into the corresponding state int.
        p: (x, y) tuple.
        -> State int.
        """

        return p[0]*self.grid_size + p[1]


    def getLegalAction(self, state):
        # print('L State', state)
        sx, sy = self.int_to_point(state)
        flag = np.zeros((4,))
        if sx == 0:
            flag[2] = 1      
        if sx == self.grid_size - 1:
            flag[0] = 1
        if sy == 0:
            flag[3] = 1
        if sy == self.grid_size - 1:
            flag[1] = 1
        
        return list(np.where(flag == 0)[0])
    
    def getAction(self, action):
        return self.actions[action]

    # def setPostiveRewardState(self, ground_r):
    #     """
    #     Set positive reward state
    #     """
    #     ground_r_copy = np.copy(ground_r)
    #     points = np.array(range(self.n_states))
    #
    #     return np.random.choice(points[ground_r_copy == 0])
    #
    # def getPostiveRewardState(self):
    #     """
    #     Return postive rewards state
    #     """
    #     return self.positiveState

    def setReward(self, state_int):
        """
        Set Negative rewards
        """
        p = np.random.uniform()
        if p > 0.9:
            return np.random.uniform()
        else:
            return 0

    def set_true_r(self,ground_r):
        ground_r=np.copy(ground_r)
        sum=np.sum(ground_r)
        true_r=ground_r/sum
        return true_r

    # def setgoalstate(self,true_r):
    #
    #     goalstate=np.argmax(true_r)
    #
    #     return goalstate

    def setStartState(self, ground_r):
        """
        Set the starting state
        """
        ground_r_copy = np.copy(ground_r)
        goalState = self.getPostiveRewardState()
        ground_r_copy[goalState] = 1
        points = np.array(range(self.n_states))


        return np.random.choice(points[ground_r_copy == 0])
    def getStartState(self):
        """
        Return starting state
        """

        return self.startState

    def getCurrentState(self):

        return self.currentState

    def setCurrentState(self, currentState):

        self.currentState = currentState

    def convertToMatrix(self, ground_r):
        """
        Generate matrix version of the whole map. With up left being first state, right corner being last state 
        """
        matrix_ground_r = np.reshape(ground_r, (self.grid_size, self.grid_size))
        return matrix_ground_r

    def increaseNegativeScore(self, score):
        self.negativeScore = self.negativeScore + score

    def getNegativeScore(self):
        return self.negativeScore

    def setNegativeScore(self, num):
        self.negativeScore = num


    # def generatemaze(self, ground_r, startState):
    #     """
    #     Generate the maze for BFS
    #     """
    #     sx, sy = self.int_to_point(startState)
    #
    #     goalState = self.getPostiveRewardState()
    #     gx, gy = self.int_to_point(goalState)
    #
    #     matrix_ground_r = np.copy(self.convertToMatrix(ground_r))
    #     matrix_ground_r[sx, sy] = 0
    #
    #     maze = np.zeros(((abs(gx - sx) + 3), (abs(gy - sy) + 3)))  - 1 # Generate walls around
    #     #print(sx, sy, gx, gy)
    #     #print(maze)
    #     extract_ground_r =  maze[1:-1, 1:-1]
    #     if sx <= gx:
    #         if sy <= gy:
    #             extract_ground_r = matrix_ground_r[sx:gx+1, sy:gy+1]
    #             ms, mg = (1, 1), (len(maze) - 2, len(maze[0])-2)
    #         elif gy <= sy:
    #             extract_ground_r = matrix_ground_r[sx:gx+1, gy:sy+1]
    #             ms, mg = (1, len(maze[0])-2), (len(maze) - 2, 1)
    #     elif gx <= sx:
    #         if gy <= sy:
    #             extract_ground_r = matrix_ground_r[gx:sx+1, gy:sy+1]
    #             ms, mg = (len(maze) - 2, len(maze[0])-2), (1, 1)
    #         elif sy <= gy:
    #             extract_ground_r = matrix_ground_r[gx:sx+1, sy:gy+1]
    #             ms, mg = (len(maze) - 2, 1), (1, len(maze[0])-2)
    #     #print(extract_ground_r)
    #     maze[1:-1, 1:-1] = extract_ground_r
    #
    #
    #
    #     return maze, ms, mg, sx, sy
    #
    # def maze2graph(self, maze):
    #     """
    #     Convert maze to graph for BFS
    #     """
    #     height = len(maze)
    #     width = len(maze[0]) if height else 0
    #     graph = {(i, j): [] for j in range(width) for i in range(height) if not maze[i][j]}
    #     for row, col in graph.keys():
    #         if row < height - 1 and not maze[row + 1][col]:
    #             graph[(row, col)].append(((1, 0), (row + 1, col)))
    #             graph[(row + 1, col)].append(((-1, 0), (row, col)))
    #         if col < width - 1 and not maze[row][col + 1]:
    #             graph[(row, col)].append(((0, 1), (row, col + 1)))
    #             graph[(row, col + 1)].append(((0, -1), (row, col)))
    #     return graph
    #
    # def find_path_bfs(self, maze, ms, mg):
    #     """
    #     Take the result of BFS as our optimal policy
    #     """
    #     start, goal = ms, mg
    #     #print(start, goal)
    #     queue = deque([([], start)])
    #     visited = set()
    #     graph = self.maze2graph(maze)
    #     while queue:
    #         path, current = queue.popleft()
    #         if current == goal:
    #             return path
    #         if current in visited:
    #             continue
    #         visited.add(current)
    #         for direction, neighbour in graph[current]:
    #             temp = path[:]
    #             temp.append(direction)
    #             queue.append((temp, neighbour))
    #     return [(0, 0)]   # If no way, just stay


    # def optimalPolicy(self, gamma, numberOfTrajectories):
    #     matrix_ground_r = self.convertToMatrix(self.ground_r)
    #
    #     phi_sum = 0
    #
    #     state_range = np.random.permutation(range((self.n_states)))[:numberOfTrajectories]
    #
    #     #print('g', matrix_ground_r)
    #     for state in state_range:
    #
    #         #print('State:', state)
    #         if self.ground_r[state] == -1:
    #             continue
    #         maze, ms, mg, sx, sy = self.generatemaze(self.ground_r, state) # Return start state of maze and real start state
    #         #print('Start:', (sx, sy))
    #         #print('Goal:', self.int_to_point(self.getPostiveRewardState()))
    #         phi = self.feature_vector(self.point_to_int((sx, sy)))
    #         path = self.find_path_bfs(maze, ms, mg)
    #         #print(path)
    #
    #         for t, step in enumerate(path):
    #             sx = sx + step[0]
    #             sy = sy + step[1]
    #             phi = phi + (gamma ** t) * self.feature_vector(self.point_to_int((sx, sy)))
    #
    #         #print(np.reshape(phi, (4,4)))
    #         phi_sum = phi_sum + phi
    #
    #     phi_sum = np.reshape(phi_sum, (self.n_states, 1))
    #
    #     return phi_sum / self.n_states
    #
    # def get_policy(self):
    #
    #     pass

    def stateInt_to_appoximateStateInt(self,state):
        sx,sy=self.int_to_point(state)
        appro_sx=sx//self.appro
        appro_sy=sy//self.appro
        approximate_st=appro_sx*8+appro_sy
        print state,appro_sx,appro_sy,approximate_st
        return approximate_st

    def move_to_nextstate(self,state,action):
        sx,sy=self.int_to_point(state)
        next_sx=sx+self.actions[action][0]
        next_sy=sy+self.actions[action][1]
        if next_sx<0 :
            next_sx=0
        if next_sx>self.grid_size:
            next_sx=self.grid_size
        if next_sy<0 :
            next_sy=0
        if next_sy>self.grid_size:
            next_sy=self.grid_size
        next_state=next_sx*128+next_sy
        return next_state


    def get_action(self,state):
        actions = self.getLegalActions(state)
        if not actions.size:
            return 0.0
        else:
            return np.argmax([self.getQValue(state, action) for action in actions])

    def getLegalActions(self, state):

        return np.array(self.getLegalAction(state))

    def value_iteration(self,w,iterNUM):
        start_state = np.random.randint(128 * 128)
        state = start_state
        action = self.get_action(state)
        if np.random.uniform()<self.wind:
            ac=action
        else:
            ac=np.random.randint(4)
        for i in range(iterNUM):
            next_state=self.move_to_nextstate(state,ac)

            feature=self.feature_vector(self.stateInt_to_appoximateStateInt(next_state))
            reward=w.T.dot(feature)
            correction=reward+self.gamma*self.getValue(next_state)-self.getQValue(state,ac)
            if self.weights is None:
                self.weights = self.alpha * correction * feature
            else:
                self.weights += self.alpha * correction * feature

            state=next_state
            action=self.get_action(state)
            if np.random.uniform() < self.wind:
                ac = action
            else:
                ac = np.random.randint(4)

    def getFeatures(self, state, action):

        new_state = self.move_to_nextstate(state,action)

        return np.reshape(self.feature_vector(new_state), (self.feature_size, 1))

    def getValue(self,state):
        actions = self.getLegalActions(state)
        if not actions.size:
            return 0.0
        else:
            return max([self.getQValue(state, action) for action in actions])

    def getQValue(self,state,action):
        if not self.weights is None:
            features = self.getFeatures(state, action)
            return self.weights.T.dot(features)
        else:
            return 0.0

    def generate_trajectories(self, w, n_trajectories, trajectory_length):
        itnum = 100
        for i in range(itnum):
            self.value_iteration(w,iterNUM=self.grid_size*self.grid_size/2)

        features = []

        for i in range(n_trajectories):
            start_state = np.random.randint(128 * 128)
            state = start_state
            action = self.get_action(state)
            if np.random.uniform() < self.wind:
                ac = action
            else:
                ac = np.random.randint(4)
            for i in range(trajectory_length):
                next_state = self.move_to_nextstate(state, ac)
                feature = self.feature_vector(self.stateInt_to_appoximateStateInt(next_state))
                # reward = w.T.dot(feature)
                # correction = reward + self.getValue(next_state) - self.getQvalue(state, ac)
                # if self.Qvalue is None:
                #     self.Qvalue = self.alpha * correction * feature
                # else:
                #     self.Qvalue += self.alpha * correction * feature
                state = next_state
                action = self.get_action(state)
                if np.random.uniform() > self.wind:
                    ac = action
                else:
                    ac = np.random.randint(4)
                features = features.append(feature)
                features=np.array(features)

        return features/(n_trajectories * trajectory_length)

