{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 253,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "import cv2\n",
    "from easydict import EasyDict as edict\n",
    "from tqdm import tqdm_notebook"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 254,
   "metadata": {},
   "outputs": [],
   "source": [
    "training_data = pd.read_excel(\"trace11.xls\", header=None)\n",
    "track1 = np.array(training_data, dtype=int)\n",
    "data = edict()\n",
    "data.track = track1\n",
    "data.episode = edict({'S':[], 'A':[], 'R':[]})\n",
    "data.trackInformation = edict({'startLine': np.arange(3, 9, dtype=int), 'finishLine': np.arange(0, 5, dtype=int), 'shape': (32, 17)})\n",
    "        \n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 255,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Environment():\n",
    "    def __init__(self, data):\n",
    "        self.data = data\n",
    "\n",
    "    def randomSelect(self, npArr):\n",
    "        return np.random.choice(npArr)\n",
    "\n",
    "    def reset(self):\n",
    "        self.data.episode = edict({'S':[], 'A':[], 'R':[], 'Probs':[]})\n",
    "\n",
    "    def getNewState(self, state, action):\n",
    "        newState = state.copy()\n",
    "        newState[0] = state[0] - state[2]\n",
    "        newState[1] = state[1] + state[3]\n",
    "        newState[2] = state[2] + action[0]\n",
    "        newState[3] = state[3] + action[1]\n",
    "        return newState\n",
    "    \n",
    "    def judgeFinishLine(self, state, action):\n",
    "        newState = self.getNewState(state, action)\n",
    "        col = list(np.arange(state[1], newState[1] + 1))\n",
    "        row = list(np.arange(newState[0], state[0] + 1))\n",
    "        finishCol = 16\n",
    "        if finishCol in col:\n",
    "            for i in self.data.trackInformation.finishLine:\n",
    "                if i in row:\n",
    "                    return True\n",
    "        return False\n",
    "    \n",
    "    def judgeRemake(self, state, action):\n",
    "        newState = self.getNewState(state, action)\n",
    "        if newState[0] < 0 or newState[0] > 31 or newState[1] < 0 or newState[1] > 16:\n",
    "            return True\n",
    "        if self.data.track[newState[0]][newState[1]] == -1:\n",
    "            return True\n",
    "        \n",
    "        return False\n",
    "\n",
    "    def start(self):\n",
    "        startState = np.zeros(shape = 4, dtype=int)\n",
    "        startState[0:2] = (31, self.randomSelect(self.data.trackInformation.startLine))\n",
    "        self.data.episode.S.append(startState)\n",
    "        return startState\n",
    "    \n",
    "    def step(self, state, action):\n",
    "        if self.judgeFinishLine(state, action):\n",
    "            newState = np.array([-1, -1, 0, 0])\n",
    "            self.data.episode.A.append(action)\n",
    "            self.data.episode.R.append(-1)\n",
    "        elif self.judgeRemake(state, action):\n",
    "            newState = self.start()\n",
    "            self.data.episode.A.append(action)\n",
    "            self.data.episode.R.append(-1)\n",
    "        else:\n",
    "            newState = self.getNewState(state, action)\n",
    "            self.data.episode.S.append(newState)\n",
    "            self.data.episode.A.append(action)\n",
    "            self.data.episode.R.append(-1)\n",
    "        return newState\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 256,
   "metadata": {},
   "outputs": [],
   "source": [
    "class episodeGenerator():\n",
    "    def __init__(self):\n",
    "        pass\n",
    "\n",
    "    def actionMapTo1D(self, action2D):\n",
    "        container = [(0, 0), (0, 1), (0, -1), (1, 0), (1, 1), (1, -1), (-1, 0), (-1, 1), (-1, -1)]\n",
    "        for i in range(9):\n",
    "            if action2D == container[i]:\n",
    "                return i\n",
    "\n",
    "    def actionMapTo2D(self, action1D):\n",
    "        container = [(0, 0), (0, 1), (0, -1), (1, 0), (1, 1), (1, -1), (-1, 0), (-1, 1), (-1, -1)]\n",
    "        return container[int(action1D)]\n",
    "    \n",
    "    def chooseAction(self, state):\n",
    "        choices = []\n",
    "        for i in range(-1, 2):\n",
    "            for j in range(-1, 2):\n",
    "                if state[2]+i < 5 and state[2]+i >= 0 and state[3]+j < 5 and state[3]+j >= 0:\n",
    "                    choices.append((i, j))\n",
    "                    # if i == 1 and j == 1:\n",
    "                    #     choices.append((i, j))\n",
    "                    # if i == 1 and j == 0:\n",
    "                    #     choices.append((i, j))\n",
    "        length = len(choices)\n",
    "        # print(choices[np.random.choice(np.arange(0, length))])\n",
    "        return choices[np.random.choice(np.arange(0, length))]\n",
    "\n",
    "\n",
    "    def generateEpisode(self, env):\n",
    "        state = env.start()\n",
    "        endState = np.array([-1, -1, 0, 0])\n",
    "        while (state == endState).all() == False:\n",
    "            action = self.chooseAction(state)\n",
    "            state = env.step(state, action)\n",
    "            # print(state)\n",
    "        return env"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 257,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "def getStateActionProb():\n",
    "    stateActionProbMatrix = np.zeros(shape = (5, 5), dtype=float)\n",
    "    for p in range(5):\n",
    "        for q in range(5):\n",
    "            choices = []\n",
    "            for i in range(-1, 2):\n",
    "                for j in range(-1, 2):\n",
    "                    if p+i < 5 and p+i >= 0 and q+j < 5 and q+j >= 0:\n",
    "                        choices.append((i, j))\n",
    "            stateActionProbMatrix[p][q] = 1.0 / len(choices)\n",
    "    return stateActionProbMatrix\n",
    "\n",
    "stateActionProbMatrix = getStateActionProb()\n",
    "\n",
    "def offPolicyMC(n_episode, gamma = 1):\n",
    "    Q = np.zeros(shape = (32, 17, 5, 5, 9))\n",
    "    Q.fill(-1e300)\n",
    "    C = np.zeros(shape = (32, 17, 5, 5, 9))\n",
    "    policy = np.zeros(shape = (32, 17, 5, 5))\n",
    "    generator = episodeGenerator()\n",
    "    env = Environment(data)\n",
    "    for episode in tqdm_notebook(range(n_episode)):\n",
    "        generator.generateEpisode(env)\n",
    "        G = 0.0\n",
    "        W = 1.0\n",
    "        times = len(env.data.episode.S)\n",
    "        # print(times)\n",
    "        for i in range(times-1, -1, -1):\n",
    "            G = gamma * G + env.data.episode.R[i]\n",
    "            state = env.data.episode.S[i]\n",
    "            action = env.data.episode.A[i]\n",
    "            action1D = generator.actionMapTo1D(action)\n",
    "            # policy[state[0]][state[1]][state[2]][state[3]] = action1D\n",
    "            bestAction = action1D\n",
    "            C[state[0]][state[1]][state[2]][state[3]][action1D] += W\n",
    "            Q[state[0]][state[1]][state[2]][state[3]][action1D] = Q[state[0]][state[1]][state[2]][state[3]][action1D] + \\\n",
    "                W / C[state[0]][state[1]][state[2]][state[3]][action1D] * (G - Q[state[0]][state[1]][state[2]][state[3]][action1D])\n",
    "            for actionNumber in range(9):\n",
    "                if Q[state[0]][state[1]][state[2]][state[3]][actionNumber] > Q[state[0]][state[1]][state[2]][state[3]][bestAction]:\n",
    "                    bestAction = actionNumber\n",
    "            policy[state[0]][state[1]][state[2]][state[3]] = bestAction\n",
    "            if bestAction != action1D:\n",
    "                break\n",
    "            W = W / stateActionProbMatrix[state[2]][state[3]]\n",
    "        env.reset()\n",
    "    print(np.min(Q))\n",
    "    np.save(\"Q.npy\", Q)\n",
    "    np.save(\"C.npy\", C)\n",
    "    np.save(\"policy.npy\", policy)\n",
    "    return policy\n",
    "\n",
    "\n",
    "\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 258,
   "metadata": {},
   "outputs": [],
   "source": [
    "def drawPictures(data, state, order):\n",
    "\n",
    "    img = np.zeros((20 * 32, 20 * 17, 3), np.uint8) #生成一个空灰度图像\n",
    "    img.fill(255)\n",
    "    wallColor = (0, 0, 0)    # BGR 黑色\n",
    "    trackColor = (0, 255, 0) # 绿色\n",
    "    startColor = (255, 0, 0)\n",
    "    endColor = (0, 0, 255)\n",
    "    stateColor = (128, 0, 128)\n",
    "    \n",
    "\n",
    "    for i in range(32):\n",
    "        for j in range(17):\n",
    "            if data.track[i][j] == -1:\n",
    "                cv2.rectangle(img, (20 * j, 20 * i), (20 * (j + 1), 20 * (i + 1)), wallColor)\n",
    "            elif data.track[i][j] == 0:\n",
    "                cv2.rectangle(img, (20 * j, 20 * i), (20 * (j + 1), 20 * (i + 1)), trackColor)\n",
    "            elif data.track[i][j] == 1:\n",
    "                cv2.rectangle(img, (20 * j, 20 * i), (20 * (j + 1), 20 * (i + 1)), startColor)\n",
    "            elif data.track[i][j] == 2:\n",
    "                cv2.rectangle(img, (20 * j, 20 * i), (20 * (j + 1), 20 * (i + 1)), endColor)\n",
    "\n",
    "    cv2.rectangle(img, (20 * state[1], 20 * state[0]), (20 * (state[1] + 1), 20 * (state[0] + 1)), stateColor, thickness=-1)\n",
    "    \n",
    "    cv2.imwrite(\"./images/track1/step\" + str(order) + \".jpg\", img)\n",
    "\n",
    "def getVideo():\n",
    "    import cv2\n",
    "    import os\n",
    "    \n",
    "    # 读取时序图中的第一张图片\n",
    "    img = cv2.imread('./images/track1/step0.jpg')\n",
    "    # 设置每秒读取多少张图片\n",
    "    fps = 2\n",
    "    imgInfo = img.shape\n",
    "    \n",
    "    # 获取图片宽高度信息\n",
    "    size = (imgInfo[1], imgInfo[0])\n",
    "    fourcc = cv2.VideoWriter_fourcc(*\"MPEG\")\n",
    "    \n",
    "    # 定义写入图片的策略\n",
    "    videoWrite = cv2.VideoWriter('output.mp4', fourcc, fps, size)\n",
    "    \n",
    "    files = os.listdir('./images/track1')\n",
    "\n",
    "    out_num = len(files)\n",
    "    for i in range(0, out_num):\n",
    "        # 读取所有的图片\n",
    "        fileName = './images/track1/step' + str(i) + '.jpg'\n",
    "        img = cv2.imread(fileName)\n",
    "        # print(out_num)\n",
    "        # 将图片写入所创建的视频对象\n",
    "        videoWrite.write(img)\n",
    "    \n",
    "    videoWrite.release()\n",
    "    print('finish')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 259,
   "metadata": {},
   "outputs": [],
   "source": [
    "def getBestTrace(policy):\n",
    "    \n",
    "    generator = episodeGenerator()\n",
    "    env = Environment(data)\n",
    "    state = env.start()\n",
    "    bestTrace = [state]\n",
    "    while True:\n",
    "        action1D = policy[state[0]][state[1]][state[2]][state[3]]\n",
    "        action2D = generator.actionMapTo2D(action1D)\n",
    "\n",
    "        if env.judgeFinishLine(state, action2D):\n",
    "            break\n",
    "        elif env.judgeRemake(state, action2D):\n",
    "            state = env.start()\n",
    "        else:\n",
    "            state = env.getNewState(state, action2D)\n",
    "        # if data.track[state[0]][state[1]] == -1:\n",
    "        #     state = env.start()\n",
    "        print(state)\n",
    "        bestTrace.append(state)\n",
    "    return bestTrace\n",
    "\n",
    "def visualizer(policy):\n",
    "    bestTrace = getBestTrace(policy)\n",
    "    length = len(bestTrace)\n",
    "    for i in range(length):\n",
    "        drawPictures(data, bestTrace[i], i)\n",
    "    getVideo()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 260,
   "metadata": {},
   "outputs": [],
   "source": [
    "def runner(n_episode):\n",
    "    policy = offPolicyMC(n_episode)\n",
    "    visualizer(policy)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 261,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\inspur\\AppData\\Local\\Programs\\Python\\Python37\\lib\\site-packages\\ipykernel_launcher.py:22: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0\n",
      "Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "382b5d63de6f4a24addd3ef7c2a8f439",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/10000 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "-1e+300\n",
      "[31  4  0  1]\n",
      "[31  5  1  2]\n",
      "[30  7  2  1]\n",
      "[28  8  3  0]\n",
      "[25  8  4  0]\n",
      "[21  8  4  0]\n",
      "[17  8  3  0]\n",
      "[14  8  2  0]\n",
      "[12  8  3  0]\n",
      "[9 8 4 1]\n",
      "[5 9 3 2]\n",
      "[ 2 11  2  2]\n",
      "[ 0 13  2  3]\n",
      "finish\n"
     ]
    }
   ],
   "source": [
    "runner(10000)"
   ]
  }
 ],
 "metadata": {
  "interpreter": {
   "hash": "494899efd6527d56ea7f55c588d0081523a17dc3a9ff1107f3394ad815ff2527"
  },
  "kernelspec": {
   "display_name": "Python 3.7.7 64-bit",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.7"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
