{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 54,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "from copy import deepcopy"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Dynamic programming\n",
    "\n",
    "pitfalls\n",
    "- Brute force algorithm\n",
    "- requires looping through all possible states and action\n",
    "- mot practicle for Lage state or action spaces."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 63,
   "metadata": {},
   "outputs": [],
   "source": [
    "LARGE = [\n",
    "  [0  ,0   ,0  ,0  ,0  ,0  ,1  ],\n",
    "  [0  ,'x' ,'x',0  ,'x',0  ,-1 ],\n",
    "  [0  ,0   ,'x',0  ,'x',0  ,1  ],\n",
    "  [0  ,-1  ,0  ,0  ,'x',0  ,'x'],\n",
    "  [5  ,-1  ,0  ,0  ,0  ,0  ,0  ],\n",
    "]\n",
    "\n",
    "DEFAULT = [\n",
    "  [0  ,0   ,0  ,0  ,1  ],\n",
    "  [0  ,'x' ,'x',0  ,-1 ],\n",
    "  [0  ,0   ,0  ,0  ,0  ],\n",
    "]\n",
    "\n",
    "SMALL = [\n",
    "  [0  ,0   ,0  ,1  ],\n",
    "  [0  ,'x' ,0  ,-1 ],\n",
    "  [0  ,0   ,0  ,0  ],\n",
    "]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 56,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[('UP', -0.1), ('RIGHT', -0.1), ('DOWN', -0.1), ('LEFT', -0.1)]"
      ]
     },
     "execution_count": 56,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "class Gridworld:\n",
    "    def __init__(self, architecture, walking_penality=-0.1):\n",
    "        self.grid = deepcopy(architecture)\n",
    "        self.available_states = []\n",
    "        self.terminal_states = []\n",
    "        self.available_moves = {}\n",
    "        \n",
    "        self.height = len(self.grid)\n",
    "        self.width = len(self.grid[0])\n",
    "        self.move_prob = 0.8\n",
    "\n",
    "        for y in range(self.height):\n",
    "            for x in range(self.width):\n",
    "\n",
    "                # Check if state is a valid position to be in\n",
    "                if (self.grid[y][x] != 'x'):\n",
    "\n",
    "                    # Check if state is a terminal position\n",
    "                    if (self.grid[y][x] > 0 or self.grid[y][x] < 0):\n",
    "                        self.terminal_states.append((y,x))\n",
    "                        \n",
    "                    if (self.grid[y][x] == 0):\n",
    "                        self.available_states.append((y,x))\n",
    "                        # set move penalty\n",
    "                        self.grid[y][x] = -0.1\n",
    "                \n",
    "                    \n",
    "                    \n",
    "        for y, x in self.available_states:\n",
    "            self.available_moves[(y, x)] = self.get_valid_moves((y, x))\n",
    "\n",
    "    def get_valid_moves(self, state):\n",
    "        y = state[0]\n",
    "        x = state[1]\n",
    "        valid_moves = []\n",
    "        \n",
    "        # Action Up\n",
    "        new_loc = (y - 1 if y - 1 > 0 else 0, x)\n",
    "        if (self.grid[new_loc[0]][new_loc[1]] != 'x'):\n",
    "            valid_moves.append((\"UP\", self.grid[new_loc[0]][new_loc[1]]))\n",
    "                               \n",
    "        # Action Right\n",
    "        new_loc = (y, x + 1 if x + 1 < self.width else x)\n",
    "        if (self.grid[new_loc[0]][new_loc[1]] != 'x'):\n",
    "            valid_moves.append((\"RIGHT\", self.grid[new_loc[0]][new_loc[1]]))\n",
    "\n",
    "        # Action Down\n",
    "        new_loc = (y + 1 if y + 1 < self.height else y, x)\n",
    "        if (self.grid[new_loc[0]][new_loc[1]] != 'x'):\n",
    "            valid_moves.append((\"DOWN\", self.grid[new_loc[0]][new_loc[1]]))\n",
    "\n",
    "                            \n",
    "        # Action Left\n",
    "        new_loc = (y, x - 1 if x - 1 > 0 else 0)\n",
    "        if (self.grid[new_loc[0]][new_loc[1]] != 'x'):\n",
    "            valid_moves.append((\"LEFT\", self.grid[new_loc[0]][new_loc[1]]))\n",
    "        \n",
    "        return valid_moves\n",
    "\n",
    "    def reward_of_action(self, action, state):\n",
    "        y = state[0]\n",
    "        x = state[1]\n",
    "        \n",
    "        # returns (s', R(s'))\n",
    "        if action == \"UP\":\n",
    "            new_loc = (y - 1 if y - 1 > 0 else 0, x)\n",
    "            if (self.grid[new_loc[0]][new_loc[1]] != 'x'):\n",
    "                return new_loc , self.grid[new_loc[0]][new_loc[1]]\n",
    "            else:\n",
    "                return (y, x) , self.grid[y][x]\n",
    "\n",
    "        elif action == \"RIGHT\":\n",
    "            new_loc = (y, x + 1 if x + 1 < self.width else x)\n",
    "            if (self.grid[new_loc[0]][new_loc[1]] != 'x'):\n",
    "                return new_loc , self.grid[new_loc[0]][new_loc[1]]\n",
    "            else:\n",
    "                return (y, x) , self.grid[y][x]\n",
    "        \n",
    "\n",
    "        elif action == \"DOWN\":\n",
    "            new_loc = (y + 1 if y + 1 < self.height else y, x)\n",
    "            if (self.grid[new_loc[0]][new_loc[1]] != 'x'):\n",
    "                return new_loc , self.grid[new_loc[0]][new_loc[1]]\n",
    "            else:\n",
    "                return (y, x) , self.grid[y][x]\n",
    "\n",
    "        elif action == \"LEFT\":\n",
    "            new_loc = (y, x - 1 if x - 1 > 0 else 0)\n",
    "            if (self.grid[new_loc[0]][new_loc[1]] != 'x'):\n",
    "                return new_loc , self.grid[new_loc[0]][new_loc[1]]\n",
    "            else:\n",
    "                return (y, x) , self.grid[y][x]\n",
    "    \n",
    "    def transition_probabilities(self, action, state):\n",
    "        y = state[0]\n",
    "        x = state[1]\n",
    "        # returns list of (probability, reward, next_state)\n",
    "        probs = []\n",
    "    \n",
    "        next_state, next_reward = self.reward_of_action(action, (y, x))\n",
    "        probs.append((self.move_prob, next_reward, next_state))\n",
    "        \n",
    "        \n",
    "        disobey_probs = 1 - self.move_prob\n",
    "        \n",
    "        \n",
    "        if action == \"UP\" or action == \"DOWN\":\n",
    "            next_state, next_reward = self.reward_of_action(\"LEFT\", (y, x))\n",
    "            probs.append((disobey_probs / 2, next_reward, next_state))\n",
    "            next_state, next_reward = self.reward_of_action(\"RIGHT\", (y, x))\n",
    "            probs.append((disobey_probs / 2, next_reward, next_state))\n",
    "        \n",
    "        if action == \"LEFT\" or action == \"RIGHT\":\n",
    "            next_state, next_reward = self.reward_of_action(\"UP\", (y, x))\n",
    "            probs.append((disobey_probs / 2, next_reward, next_state))\n",
    "            \n",
    "            next_state, next_reward = self.reward_of_action(\"DOWN\", (y, x))\n",
    "            probs.append((disobey_probs / 2, next_reward, next_state))\n",
    "        \n",
    "        return probs\n",
    "\n",
    "g = Gridworld(DEFAULT)\n",
    "g.get_valid_moves((0,0))\n",
    "# g.reward_of_action(\"UP\", (0 ,2))\n",
    "# reward_of_action"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "The Bellman equation is defined as follows:\n",
    "\n",
    "\\begin{equation*}\n",
    "V(s) = max_a \\left( R(s, a) + \\gamma \\sum_{s'} P(s, a, s')V(s')\\right)\n",
    "\\end{equation*}\n",
    "\n",
    "- V = Value\n",
    "- S = current state\n",
    "- a = Action\n",
    "- R = Reward\n",
    "- gamma = discount factor\n",
    "- s' = prime means next state\n",
    "\n",
    "Completley non-deterministic - the actions we take happen 80% of the time...\n",
    "\n",
    "The value of the square is the current rewards plus the sum of the probablility of all actions and next states times their value times gamma the discount factor.\n",
    "\n",
    "### Value Iteration Algorithm\n",
    "\n",
    "- Initialise a table V of value estimates for each square with all zeros\n",
    "- Loop over every possible state s\n",
    "    - From state s loop over every possible action a\n",
    "        - Get a list of all (probability, reward, s') transition tuples from state s, action a\n",
    "        - expected_reward = sum of all possibel rewards multiplied by the probabilities\n",
    "        - expected_value = loopup V[s'] foro each possible s', multiply by probaility, sum\n",
    "        - action_value = expected_reward + gamme * expected_valye\n",
    "    - set V[s] to best action_value_found\n",
    "    \n",
    "https://github.com/colinskow/move37/blob/master/dynamic_programming/grid_world.py\n",
    "http://localhost:8888/notebooks/Gridworld-with-Q-Learning-Reinforcement-Learning-/Gridworld.ipynb"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 60,
   "metadata": {},
   "outputs": [],
   "source": [
    "def best_action_value(grid, V, state):\n",
    "    \n",
    "    v_max = float('-inf')\n",
    "    a_max = None\n",
    "\n",
    "    for action, next_state in grid.get_valid_moves(state):\n",
    "        transition_probabilities = grid.transition_probabilities(action, state)\n",
    "        expected_v = 0\n",
    "        expected_r = 0\n",
    "        v = 0\n",
    "        \n",
    "        # Calculate the summed expected value and reward\n",
    "        for (prob, r, next_state) in transition_probabilities:\n",
    "            expected_v += prob * V[next_state[0], next_state[1]]\n",
    "            expected_r += prob * r\n",
    "        \n",
    "        # Add gamma as discount factor (we care less about future reward than reward now.)\n",
    "        v = expected_r + 0.9 * expected_v\n",
    "\n",
    "        if v > v_max:\n",
    "            v_max = v\n",
    "            a_max = action\n",
    "    \n",
    "    return a_max, v_max"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 74,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[ 0.20197319,  0.45434235,  0.67095015,  0.90658975,  0.        ],\n",
       "       [-0.04462392,  0.        ,  0.        ,  0.50668304,  0.        ],\n",
       "       [-0.27999523, -0.12936727,  0.07084305,  0.25499121, -0.02825949]])"
      ]
     },
     "execution_count": 74,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "def calculate_values(grid, policy):\n",
    "    max_change = float('inf')\n",
    "\n",
    "    while max_change > 0.0001:\n",
    "        new_policy = np.zeros((g.height,g.width))\n",
    "        for state in grid.available_states:\n",
    "            a_max, v_max = best_action_value(grid, policy, state)\n",
    "\n",
    "            new_policy[state[0], state[1]] = v_max\n",
    "        \n",
    "\n",
    "        max_change = np.max(policy - new_policy)\n",
    "        policy = new_policy\n",
    "\n",
    "    return policy\n",
    "\n",
    "g = Gridworld(DEFAULT)\n",
    "policy = np.zeros((g.height,g.width))\n",
    "calculate_values(g, policy)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "- https://itnext.io/reinforcement-learning-with-q-tables-5f11168862c8\n",
    "- https://towardsdatascience.com/advanced-dqns-playing-pac-man-with-deep-reinforcement-learning-3ffbd99e0814\n",
    "- https://towardsdatascience.com/advanced-dqns-playing-pac-man-with-deep-reinforcement-learning-3ffbd99e0814\n",
    "- https://medium.com/free-code-camp/how-to-apply-reinforcement-learning-to-real-life-planning-problems-90f8fa3dc0c5\n",
    "- https://towardsdatascience.com/self-learning-ai-agents-part-ii-deep-q-learning-b5ac60c3f47\n",
    "- https://medium.com/@zsalloum/monte-carlo-in-reinforcement-learning-the-easy-way-564c53010511\n",
    "\n",
    "Good Blog Example...\n",
    "https://towardsdatascience.com/reinforcement-learning-rl-101-with-python-e1aa0d37d43b"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
