{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "%matplotlib inline"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "from tqdm import tqdm"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# all states\n",
    "N_STATES = 19\n",
    "\n",
    "# all states but terminal states\n",
    "STATES = np.arange(1, N_STATES + 1)\n",
    "\n",
    "# start from the middle state\n",
    "START_STATE = 10\n",
    "\n",
    "# two terminal states\n",
    "# an action leading to the left terminal state has reward -1\n",
    "# an action leading to the right terminal state has reward 1\n",
    "END_STATES = [0, N_STATES + 1]\n",
    "\n",
    "# true state values from Bellman equation\n",
    "TRUE_VALUE = np.arange(-20, 22, 2) / 20.0\n",
    "TRUE_VALUE[0] = TRUE_VALUE[N_STATES + 1] = 0.0\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "# base class for lambda-based algorithms in this chapter\n",
    "# In this example, we use the simplest linear feature function, state aggregation.\n",
    "# And we use exact 19 groups, so the weights for each group is exact the value for that state\n",
    "class ValueFunction:\n",
    "    # @rate: lambda, as it's a keyword in python, so I call it rate\n",
    "    # @stepSize: alpha, step size for update\n",
    "    def __init__(self, rate, step_size):\n",
    "        self.rate = rate\n",
    "        self.step_size = step_size\n",
    "        self.weights = np.zeros(N_STATES + 2)\n",
    "\n",
    "    # the state value is just the weight\n",
    "    def value(self, state):\n",
    "        return self.weights[state]\n",
    "\n",
    "    # feed the algorithm with new observation\n",
    "    # derived class should override this function\n",
    "    def learn(self, state, reward):\n",
    "        return\n",
    "\n",
    "    # initialize some variables at the beginning of each episode\n",
    "    # must be called at the very beginning of each episode\n",
    "    # derived class should override this function\n",
    "    def new_episode(self):\n",
    "        return\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Off-line lambda-return algorithm\n",
    "class OffLineLambdaReturn(ValueFunction):\n",
    "    def __init__(self, rate, step_size):\n",
    "        ValueFunction.__init__(self, rate, step_size)\n",
    "        # To accelerate learning, set a truncate value for power of lambda\n",
    "        self.rate_truncate = 1e-3\n",
    "    def new_episode(self):\n",
    "        # initialize the trajectory\n",
    "        self.trajectory = [START_STATE]\n",
    "        # only need to track the last reward in one episode, as all others are 0\n",
    "        self.reward = 0.0\n",
    "    \n",
    "    def learn(self, state, reward):\n",
    "        # add the new state to the trajectory\n",
    "        self.trajectory.append(state)\n",
    "        if state in END_STATES:\n",
    "            # start off-line learning once the episode ends\n",
    "            self.reward = reward\n",
    "            self.T = len(self.trajectory) - 1\n",
    "            self.off_line_learn()\n",
    "\n",
    "    # get the n-step return from the given time\n",
    "    def n_step_return_from_time(self, n, time):\n",
    "        # gamma is always 1 and rewards are zero except for the last reward\n",
    "        # the formula can be simplified\n",
    "        end_time = min(time + n, self.T)\n",
    "        returns = self.value(self.trajectory[end_time])\n",
    "        if end_time == self.T:\n",
    "            returns += self.reward\n",
    "        return returns\n",
    "\n",
    "    # get the lambda-return from the given time\n",
    "    def lambda_return_from_time(self, time):\n",
    "        returns = 0.0\n",
    "        lambda_power = 1\n",
    "        for n in range(1, self.T - time):\n",
    "            returns += lambda_power * self.n_step_return_from_time(n, time)\n",
    "            lambda_power *= self.rate\n",
    "            if lambda_power < self.rate_truncate:\n",
    "                # If the power of lambda has been too small, discard all the following sequences\n",
    "                break\n",
    "        returns *= 1 - self.rate\n",
    "        if lambda_power >= self.rate_truncate:\n",
    "            returns += lambda_power * self.reward\n",
    "        return returns\n",
    "\n",
    "    # perform off-line learning at the end of an episode\n",
    "    def off_line_learn(self):\n",
    "        for time in range(self.T):\n",
    "            # update for each state in the trajectory\n",
    "            state = self.trajectory[time]\n",
    "            delta = self.lambda_return_from_time(time) - self.value(state)\n",
    "            delta *= self.step_size\n",
    "            self.weights[state] += delta\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# TD(lambda) algorithm\n",
    "class TemporalDifferenceLambda(ValueFunction):\n",
    "    def __init__(self, rate, step_size):\n",
    "        ValueFunction.__init__(self, rate, step_size)\n",
    "        self.new_episode()\n",
    "\n",
    "    def new_episode(self):\n",
    "        # initialize the eligibility trace\n",
    "        self.eligibility = np.zeros(N_STATES + 2)\n",
    "        # initialize the beginning state\n",
    "        self.last_state = START_STATE\n",
    "\n",
    "    def learn(self, state, reward):\n",
    "        # update the eligibility trace and weights\n",
    "        self.eligibility *= self.rate\n",
    "        self.eligibility[self.last_state] += 1\n",
    "        delta = reward + self.value(state) - self.value(self.last_state)\n",
    "        delta *= self.step_size\n",
    "        self.weights += delta * self.eligibility\n",
    "        self.last_state = state"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# 19-state random walk\n",
    "def random_walk(value_function):\n",
    "    value_function.new_episode()\n",
    "    state = START_STATE\n",
    "    while state not in END_STATES:\n",
    "        next_state = state + np.random.choice([-1, 1])\n",
    "        if next_state == 0:\n",
    "            reward = -1\n",
    "        elif next_state == N_STATES + 1:\n",
    "            reward = 1\n",
    "        else:\n",
    "            reward = 0\n",
    "        value_function.learn(next_state, reward)\n",
    "        state = next_state"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# general plot framework\n",
    "# @valueFunctionGenerator: generate an instance of value function\n",
    "# @runs: specify the number of independent runs\n",
    "# @lambdas: a series of different lambda values\n",
    "# @alphas: sequences of step size for each lambda\n",
    "def parameter_sweep(value_function_generator, runs, lambdas, alphas):\n",
    "    # play for 10 episodes for each run\n",
    "    episodes = 10\n",
    "    # track the rms errors\n",
    "    errors = [np.zeros(len(alphas_)) for alphas_ in alphas]\n",
    "    for run in tqdm(range(runs)):\n",
    "        for lambdaIndex, rate in enumerate(lambdas):\n",
    "            for alphaIndex, alpha in enumerate(alphas[lambdaIndex]):\n",
    "                valueFunction = value_function_generator(rate, alpha)\n",
    "                for episode in range(episodes):\n",
    "                    random_walk(valueFunction)\n",
    "                    stateValues = [valueFunction.value(state) for state in STATES]\n",
    "                    errors[lambdaIndex][alphaIndex] += np.sqrt(np.mean(np.power(stateValues - TRUE_VALUE[1: -1], 2)))\n",
    "\n",
    "    # average over runs and episodes\n",
    "    for error in errors:\n",
    "        error /= episodes * runs\n",
    "\n",
    "    for i in range(len(lambdas)):\n",
    "        plt.plot(alphas[i], errors[i], label='lambda = ' + str(lambdas[i]))\n",
    "    plt.xlabel('alpha')\n",
    "    plt.ylabel('RMS error')\n",
    "    plt.legend()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# Figure 12.3: Off-line lambda-return algorithm\n",
    "def figure_12_3():\n",
    "    lambdas = [0.0, 0.4, 0.8, 0.9, 0.95, 0.975, 0.99, 1]\n",
    "    alphas = [np.arange(0, 1.1, 0.1),\n",
    "              np.arange(0, 1.1, 0.1),\n",
    "              np.arange(0, 1.1, 0.1),\n",
    "              np.arange(0, 1.1, 0.1),\n",
    "              np.arange(0, 1.1, 0.1),\n",
    "              np.arange(0, 0.55, 0.05),\n",
    "              np.arange(0, 0.22, 0.02),\n",
    "              np.arange(0, 0.11, 0.01)]\n",
    "    parameter_sweep(OffLineLambdaReturn, 50, lambdas, alphas)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 14%|█▍        | 7/50 [00:27<02:48,  3.92s/it]"
     ]
    }
   ],
   "source": [
    "figure_12_3()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# True online TD(lambda) algorithm\n",
    "class TrueOnlineTemporalDifferenceLambda(ValueFunction):\n",
    "    def __init__(self, rate, step_size):\n",
    "        ValueFunction.__init__(self, rate, step_size)\n",
    "    \n",
    "    \n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "rl",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.20"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
