{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "181b66ca",
   "metadata": {},
   "source": [
    "# 蛇棋环境搭建"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "1590539c",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import gym\n",
    "from gym.spaces import Discrete\n",
    "\n",
    "from contextlib import contextmanager\n",
    "import time\n",
    "\n",
    "@contextmanager\n",
    "def timer(name):\n",
    "    start = time.time()\n",
    "    yield\n",
    "    end = time.time()\n",
    "    print('{} COST:{}'.format(name, end-start))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "3626c7e9",
   "metadata": {},
   "outputs": [],
   "source": [
    "class SnakeEnv(gym.Env):\n",
    "    SIZE=100\n",
    "    \n",
    "    def __init__(self, ladder_num, dices):\n",
    "        self.ladder_num = ladder_num\n",
    "        self.dices = dices\n",
    "        self.observation_space = Discrete(self.SIZE+1)\n",
    "        self.action_space = Discrete(len(dices))\n",
    "        \n",
    "        if ladder_num == 0:\n",
    "            self.ladders = {0:0}\n",
    "        else:\n",
    "            # 处理梯子值，让梯子的数值无重复地反向赋值\n",
    "            ladders = set(np.random.randint(1, self.SIZE, size=self.ladder_num*2))\n",
    "            while len(ladders) < self.ladder_num*2:\n",
    "                ladders.add(np.random.randint(1, self.SIZE))\n",
    "\n",
    "            ladders = list(ladders)\n",
    "            ladders = np.array(ladders)\n",
    "            np.random.shuffle(ladders)\n",
    "            ladders = ladders.reshape((self.ladder_num,2))\n",
    "\n",
    "            re_ladders = list()\n",
    "            for i in ladders:\n",
    "                re_ladders.append([i[1],i[0]])\n",
    "\n",
    "            re_ladders = np.array(re_ladders)\n",
    "            # dict()可以把nx2维数组转化为字典形式\n",
    "            self.ladders = dict(np.append(re_ladders, ladders, axis=0))\n",
    "        print(f'ladders info:{self.ladders} dice ranges:{self.dices}')\n",
    "        self.pos = 1\n",
    "        \n",
    "    def reset(self):\n",
    "        self.pos = 1\n",
    "        return self.pos\n",
    "    \n",
    "    def step(self, a):\n",
    "        step = np.random.randint(1, self.dices[a]+1)\n",
    "        self.pos += step\n",
    "        if self.pos == 100:\n",
    "            return 100, 100, 1, {}\n",
    "        elif self.pos > 100:\n",
    "            self.pos = 200 - self.pos\n",
    "            \n",
    "        if self.pos in self.ladders:\n",
    "            self.pos = self.ladders[self.pos]\n",
    "        return self.pos, -1, 0, {}\n",
    "    \n",
    "    def reward(self, s):\n",
    "        if s == 100:\n",
    "            return 100\n",
    "        else:\n",
    "            return -1\n",
    "    \n",
    "    # 无渲染\n",
    "    def render(self):\n",
    "        pass"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "54d43bb5",
   "metadata": {},
   "source": [
    "# 智能体构建"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "fe563c2b",
   "metadata": {},
   "outputs": [],
   "source": [
    "class TableAgent(object):\n",
    "    def __init__(self, env):\n",
    "        self.s_len = env.observation_space.n\n",
    "        self.a_len = env.action_space.n\n",
    "        \n",
    "        self.r = [env.reward(s) for s in range(0, self.s_len)]\n",
    "        # 确定性策略\n",
    "        self.pi = np.zeros(self.s_len, dtype=int)\n",
    "        # A x S x S\n",
    "        self.p = np.zeros([self.a_len, self.s_len, self.s_len], dtype=float)\n",
    "        \n",
    "        # 函数参数向量化，参数可以传入列表\n",
    "        ladder_move = np.vectorize(lambda x: env.ladders[x] if x in env.ladders else x)\n",
    "        \n",
    "        # based-model 初始化表格所有位置的概率p[A,S,S]\n",
    "        for i, dice in enumerate(env.dices):\n",
    "            prob = 1.0 / dice\n",
    "            for src in range(1, 100):\n",
    "                # 因为arange只给一个数字的时候，是从0开始取到end-1，所以在此处+1\n",
    "                step = np.arange(dice) + 1\n",
    "                step += src\n",
    "                step = np.piecewise(step, [step>100, step<=100], [lambda x: 200-x, lambda x: x])\n",
    "                step = ladder_move(step)\n",
    "                for dst in step:\n",
    "                    # 在当前位置pos=src的情况下，采取i投掷色子的方式，得到最终位置dst\n",
    "                    # 概率直接求和的方式是否合理？\n",
    "                    self.p[i, src, dst] += prob\n",
    "        \n",
    "        # 因为src最多到99，所以p[:, 100, 100]是0，此处进行填补\n",
    "        self.p[:, 100, 100] = 1\n",
    "        self.value_pi = np.zeros((self.s_len))\n",
    "        self.value_q = np.zeros((self.s_len, self.a_len))\n",
    "        self.gamma = 0.8\n",
    "        \n",
    "        \n",
    "    def play(self, state):\n",
    "        return self.pi[state]"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "53ff3205",
   "metadata": {},
   "source": [
    "# 策略评估（reward计算）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "99d983da",
   "metadata": {},
   "outputs": [],
   "source": [
    "def eval_game(env, agent):\n",
    "    state = env.reset()\n",
    "    total_reward = 0\n",
    "    state_action = []\n",
    "    \n",
    "    while True:\n",
    "        act = agent.play(state)\n",
    "        state_action.append((state,act))\n",
    "        state, reward, done, _ = env.step(act)\n",
    "        total_reward += reward\n",
    "        if done:\n",
    "            break\n",
    "    \n",
    "    return total_reward, state_action"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c739c9c1",
   "metadata": {},
   "source": [
    "# 算法"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f4eff7d1",
   "metadata": {},
   "source": [
    "## 1. 策略迭代"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "03f3cb77",
   "metadata": {},
   "outputs": [],
   "source": [
    "class PolicyIteration(object):\n",
    "    \n",
    "    dice = [3,6]\n",
    "    \n",
    "    def policy_evaluation(self, agent, max_iter=-1):\n",
    "        iteration = 0\n",
    "        while True:\n",
    "            iteration += 1\n",
    "            new_value_pi = agent.value_pi.copy()\n",
    "            # 遍历所有的state 1~100  (s.len=101)\n",
    "            for i in range(1, agent.s_len):\n",
    "                ac = agent.pi[i]\n",
    "                \n",
    "                for j in range(0, agent.a_len):\n",
    "                    # 选择确定性策略的action\n",
    "                    if ac != j:\n",
    "                        continue\n",
    "                    transition = agent.p[ac, i, :]\n",
    "                    value_sa = np.dot(transition, agent.r + agent.gamma * agent.value_pi)\n",
    "                    # 放在j循环外部会报错:UnboundLocalError 因为跳过action无法算value_sa的值。\n",
    "                    # 未求得value_sa就用该值就会报错(UnboundLocalError)\n",
    "                    new_value_pi[i] = value_sa\n",
    "            \n",
    "            diff = np.sqrt(np.sum(np.power(agent.value_pi - new_value_pi, 2)))\n",
    "            # 判断是否收敛\n",
    "            if diff < 1e-6:\n",
    "                print('policy evaluation proceed {} iters.'.format(iteration))\n",
    "                break\n",
    "            else:\n",
    "                agent.value_pi = new_value_pi\n",
    "            if iteration == max_iter:\n",
    "                print('policy evaluation proceed {} iters.'.format(iteration))\n",
    "                break\n",
    "    \n",
    "    \n",
    "    def policy_improvement(self, agent):\n",
    "        new_policy = np.zeros_like(agent.pi)\n",
    "        for i in range(1, agent.s_len):\n",
    "            for j in range(0, agent.a_len):\n",
    "                transition = agent.p[j, i, :]\n",
    "                agent.value_q[i,j] = np.dot(transition, agent.r + agent.gamma * agent.value_pi)\n",
    "            # update policy\n",
    "            max_act = np.argmax(agent.value_q[i,:])\n",
    "            # 选择使value_q最大的action\n",
    "            new_policy[i] = max_act\n",
    "        \n",
    "        # 如果没有更新（新策略和旧策略一致），返回False；如果不相等就赋值新策略/优化策略后，返回True\n",
    "        if np.all(np.equal(new_policy, agent.pi)):\n",
    "            return False\n",
    "        else:\n",
    "            agent.pi = new_policy\n",
    "            return True\n",
    "    \n",
    "    \n",
    "    def policy_iteration(self, agent, max_iter=-1):\n",
    "        iteration = 0\n",
    "        with timer('Timer PolicyIter'):\n",
    "            while True:\n",
    "                iteration += 1\n",
    "                with timer('Timer PolicyEval'):\n",
    "                    # 通过迭代求得agent.value_pi 准确估计值函数\n",
    "                    self.policy_evaluation(agent, max_iter)\n",
    "                with timer('Timer PolicyImprove'):\n",
    "                    # 获得最优策略\n",
    "                    ret = self.policy_improvement(agent)\n",
    "                if not ret:\n",
    "                    break\n",
    "        print('Iter {} rounds converge'.format(iteration))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "605d0dab",
   "metadata": {},
   "outputs": [],
   "source": [
    "def policy_iteration_demo(env):\n",
    "    agent = TableAgent(env)\n",
    "    pi_algo = PolicyIteration()\n",
    "    pi_algo.policy_iteration(agent)\n",
    "    print('agent.pi={}'.format(agent.pi))\n",
    "    total_reward, state_action = eval_game(env, agent)\n",
    "    print('total_reward={0}, state_action={1}'.format(total_reward, state_action))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "9366d9d2",
   "metadata": {},
   "source": [
    "## 2. 价值迭代"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "04bf6ce5",
   "metadata": {},
   "outputs": [],
   "source": [
    "def value_iteration(agent, max_iter=-1):\n",
    "        iteration = 0\n",
    "        dice = [3,6]\n",
    "        with timer('Timer ValueIter'):\n",
    "            while True:\n",
    "                iteration += 1\n",
    "                new_value_pi = agent.value_pi.copy()\n",
    "                for i in range(1, agent.s_len):\n",
    "                    value_sas = []\n",
    "                    for j in range(0, agent.a_len):\n",
    "                        value_sa = np.dot(agent.p[j,i,:], agent.r + agent.gamma * agent.value_pi)\n",
    "                        value_sas.append(value_sa)\n",
    "                    new_value_pi[i] = max(value_sas)\n",
    "\n",
    "                diff = np.sqrt(np.sum(np.power(agent.value_pi - new_value_pi, 2)))\n",
    "                if diff < 1e-6:\n",
    "                    break\n",
    "                else:\n",
    "                    agent.value_pi = new_value_pi\n",
    "                if iteration == max_iter:\n",
    "                    break\n",
    "            print('Iter {} rounds converge'.format(iteration))\n",
    "            for i in range(1, agent.s_len):\n",
    "                for j in range(0, agent.a_len):\n",
    "                    agent.value_q[i,j] = np.dot(agent.p[j,i,:], agent.r + agent.gamma * agent.value_pi)\n",
    "                max_act = np.argmax(agent.value_q[i,:])\n",
    "                agent.pi[i] = max_act"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "0c34641f",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 价值迭代和策略迭代的对比\n",
    "def policy_vs_value_demo(env):\n",
    "    policy_agent = TableAgent(env)\n",
    "    value_agent = TableAgent(env)\n",
    "    \n",
    "    pi_algo = PolicyIteration()\n",
    "    pi_algo.policy_iteration(policy_agent)\n",
    "    print('agent.pi={}'.format(policy_agent.pi))\n",
    "    total_reward, state_action = eval_game(env, policy_agent)\n",
    "    print('total_reward={0}, state_action={1}'.format(total_reward, state_action))\n",
    "    \n",
    "    value_iteration(value_agent)\n",
    "    print('agent.pi={}'.format(value_agent.pi))\n",
    "    total_reward, state_action = eval_game(env, value_agent)\n",
    "    print('total_reward={0}, state_action={1}'.format(total_reward, state_action))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c472bba8",
   "metadata": {},
   "source": [
    "## 3. 泛化迭代"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "ea257984",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 泛化迭代和前两种迭代的对比\n",
    "def generalized_policy_compare(env):\n",
    "    policy_vs_value_demo(env)\n",
    "    \n",
    "    gener_agent = TableAgent(env)\n",
    "    \n",
    "    with timer('Timer GeneralizedIter'):\n",
    "        value_iteration(gener_agent, 10)\n",
    "        pi_algo = PolicyIteration()\n",
    "        pi_algo.policy_iteration(gener_agent, 1)\n",
    "    print('agent.pi={}'.format(gener_agent.pi))\n",
    "    total_reward, state_action = eval_game(env, gener_agent)\n",
    "    print('total_reward={0}, state_action={1}'.format(total_reward, state_action))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "f2b09cce",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "ladders info:{27: 4, 23: 9, 22: 91, 80: 63, 55: 15, 20: 24, 58: 33, 50: 16, 89: 41, 13: 48, 4: 27, 9: 23, 91: 22, 63: 80, 15: 55, 24: 20, 33: 58, 16: 50, 41: 89, 48: 13} dice ranges:[3, 6]\n",
      "policy evaluation proceed 94 iters.\n",
      "Timer PolicyEval COST:0.1179966926574707\n",
      "Timer PolicyImprove COST:0.0030281543731689453\n",
      "policy evaluation proceed 1 iters.\n",
      "Timer PolicyEval COST:0.0\n",
      "Timer PolicyImprove COST:0.003000497817993164\n",
      "Timer PolicyIter COST:0.12502598762512207\n",
      "Iter 2 rounds converge\n",
      "agent.pi=[0 1 1 1 1 1 0 0 0 1 0 1 1 1 1 1 1 1 1 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1\n",
      " 1 0 0 0 1 1 1 1 1 1 0 0 0 1 1 0 0 0 0 0 0 1 1 0 0 0 1 1 1 1 1 1 1 1 1 1 1\n",
      " 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0]\n",
      "total_reward=85, state_action=[(1, 1), (7, 0), (23, 0), (20, 0), (21, 0), (9, 1), (11, 1), (50, 1), (15, 1), (17, 1), (24, 1), (30, 1), (36, 1), (89, 1), (90, 1), (96, 1)]\n",
      "Iter 94 rounds converge\n",
      "Timer ValueIter COST:0.2518918514251709\n",
      "agent.pi=[0 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1\n",
      " 1 0 0 0 1 1 1 1 1 1 0 0 0 1 1 1 1 1 0 0 1 1 1 0 0 0 1 1 1 1 1 1 1 1 1 1 1\n",
      " 0 1 1 1 1 1 1 1 1 0 1 0 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0]\n",
      "total_reward=58, state_action=[(1, 1), (3, 1), (8, 1), (10, 0), (48, 0), (51, 1), (52, 1), (15, 1), (24, 1), (26, 1), (28, 1), (29, 1), (58, 1), (59, 1), (62, 0), (65, 1), (69, 1), (73, 1), (78, 1), (79, 1), (81, 1), (84, 1), (86, 1), (90, 1), (22, 0), (20, 0), (9, 1), (55, 0), (33, 1), (35, 1), (40, 0), (43, 1), (45, 1), (13, 1), (18, 1), (21, 0), (20, 0), (21, 0), (20, 0), (91, 1), (94, 1), (99, 0), (99, 0)]\n",
      "Iter 10 rounds converge\n",
      "Timer ValueIter COST:0.025992631912231445\n",
      "policy evaluation proceed 1 iters.\n",
      "Timer PolicyEval COST:0.0010008811950683594\n",
      "Timer PolicyImprove COST:0.0029985904693603516\n",
      "policy evaluation proceed 1 iters.\n",
      "Timer PolicyEval COST:0.0010044574737548828\n",
      "Timer PolicyImprove COST:0.0029969215393066406\n",
      "Timer PolicyIter COST:0.008000850677490234\n",
      "Iter 2 rounds converge\n",
      "Timer GeneralizedIter COST:0.03399348258972168\n",
      "agent.pi=[0 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1\n",
      " 1 0 0 0 1 1 1 1 1 1 0 0 0 1 1 0 0 1 0 0 1 1 1 0 0 0 1 1 1 1 1 1 1 1 1 1 1\n",
      " 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0]\n",
      "total_reward=86, state_action=[(1, 1), (27, 1), (30, 1), (34, 1), (38, 0), (89, 1), (95, 1), (96, 1), (99, 0), (99, 0), (99, 0), (98, 0), (99, 0), (99, 0), (98, 0)]\n"
     ]
    }
   ],
   "source": [
    "if __name__ == '__main__':\n",
    "    # 策略迭代  两个demo梯子数不同\n",
    "#     env1 = SnakeEnv(0,[3,6])\n",
    "    env2 = SnakeEnv(10,[3,6])\n",
    "#     policy_iteration_demo(env1)\n",
    "#     policy_iteration_demo(env2)\n",
    "    \n",
    "    # 价值迭代和策略迭代的对比\n",
    "#     policy_vs_value_demo(env2)\n",
    "    \n",
    "    # 泛化迭代\n",
    "    generalized_policy_compare(env2)\n",
    "    "
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.11"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
