{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From D:\\Miniconda\\Miniconda1\\lib\\site-packages\\tensorflow\\python\\util\\tf_should_use.py:170: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.\n",
      "Instructions for updating:\n",
      "Use `tf.global_variables_initializer` instead.\n",
      "5000\n",
      "Loss: 47922.0\n",
      "Epsilon: 0.995062354401\n",
      "10000\n",
      "Loss: 13964.7\n",
      "Epsilon: 0.990149335412\n",
      "15000\n",
      "Loss: 7638.14\n",
      "Epsilon: 0.985260820207\n",
      "20000\n",
      "Loss: 8168.88\n",
      "Epsilon: 0.980396686574\n",
      "25000\n",
      "Loss: 6702.18\n",
      "Epsilon: 0.975556812908\n",
      "30000\n",
      "Loss: 4179.47\n",
      "Epsilon: 0.970741078213\n",
      "35000\n",
      "Loss: 5849.48\n",
      "Epsilon: 0.965949362095\n",
      "40000\n",
      "Loss: 3407.37\n",
      "Epsilon: 0.961181544761\n",
      "45000\n",
      "Loss: 4370.87\n",
      "Epsilon: 0.956437507015\n",
      "50000\n",
      "Loss: 4264.85\n",
      "Epsilon: 0.951717130256\n",
      "55000\n",
      "Loss: 5200.89\n",
      "Epsilon: 0.947020296474\n",
      "60000\n",
      "Loss: 3527.57\n",
      "Epsilon: 0.942346888248\n",
      "65000\n",
      "Loss: 5732.39\n",
      "Epsilon: 0.937696788744\n",
      "70000\n",
      "Loss: 7409.68\n",
      "Epsilon: 0.933069881707\n",
      "75000\n",
      "Loss: 3915.83\n",
      "Epsilon: 0.928466051465\n",
      "80000\n",
      "Loss: 4021.38\n",
      "Epsilon: 0.923885182923\n",
      "85000\n",
      "Loss: 5353.25\n",
      "Epsilon: 0.919327161557\n",
      "90000\n",
      "Loss: 4880.43\n",
      "Epsilon: 0.914791873419\n",
      "95000\n",
      "Loss: 5085.54\n",
      "Epsilon: 0.910279205124\n",
      "100000\n",
      "Loss: 4462.06\n",
      "Epsilon: 0.905789043856\n",
      "105000\n",
      "Loss: 3690.49\n",
      "Epsilon: 0.90132127736\n",
      "110000\n",
      "Loss: 5435.46\n",
      "Epsilon: 0.896875793944\n",
      "115000\n",
      "Loss: 5201.61\n",
      "Epsilon: 0.892452482468\n",
      "120000\n",
      "Loss: 5298.3\n",
      "Epsilon: 0.88805123235\n",
      "125000\n",
      "Loss: 3290.43\n",
      "Epsilon: 0.883671933559\n",
      "130000\n",
      "Loss: 5532.19\n",
      "Epsilon: 0.879314476611\n",
      "135000\n",
      "Loss: 4838.29\n",
      "Epsilon: 0.874978752571\n",
      "140000\n",
      "Loss: 3775.01\n",
      "Epsilon: 0.870664653045\n",
      "145000\n",
      "Loss: 3067.91\n",
      "Epsilon: 0.86637207018\n",
      "150000\n",
      "Loss: 4634.25\n",
      "Epsilon: 0.862100896661\n",
      "155000\n",
      "Loss: 4542.0\n",
      "Epsilon: 0.857851025709\n",
      "160000\n",
      "Loss: 3508.54\n",
      "Epsilon: 0.853622351077\n",
      "165000\n",
      "Loss: 5093.68\n",
      "Epsilon: 0.849414767047\n",
      "170000\n",
      "Loss: 3668.23\n",
      "Epsilon: 0.84522816843\n",
      "175000\n",
      "Loss: 4272.97\n",
      "Epsilon: 0.841062450562\n",
      "180000\n",
      "Loss: 5547.84\n",
      "Epsilon: 0.836917509297\n",
      "185000\n",
      "Loss: 5112.96\n",
      "Epsilon: 0.832793241014\n",
      "190000\n",
      "Loss: 3943.81\n",
      "Epsilon: 0.828689542604\n",
      "195000\n",
      "Loss: 4218.71\n",
      "Epsilon: 0.824606311475\n",
      "200000\n",
      "Loss: 4690.49\n",
      "Epsilon: 0.820543445547\n",
      "205000\n",
      "Loss: 4159.99\n",
      "Epsilon: 0.816500843247\n",
      "210000\n",
      "Loss: 3734.61\n",
      "Epsilon: 0.81247840351\n",
      "215000\n",
      "Loss: 5300.91\n",
      "Epsilon: 0.808476025776\n",
      "220000\n",
      "Loss: 3720.41\n",
      "Epsilon: 0.804493609983\n",
      "225000\n",
      "Loss: 3469.93\n",
      "Epsilon: 0.800531056572\n",
      "230000\n",
      "Loss: 3283.38\n",
      "Epsilon: 0.796588266478\n",
      "235000\n",
      "Loss: 3259.63\n",
      "Epsilon: 0.792665141132\n",
      "240000\n",
      "Loss: 3861.97\n",
      "Epsilon: 0.788761582456\n",
      "245000\n",
      "Loss: 4518.34\n",
      "Epsilon: 0.784877492859\n",
      "250000\n",
      "Loss: 3992.25\n",
      "Epsilon: 0.781012775241\n",
      "255000\n",
      "Loss: 3288.77\n",
      "Epsilon: 0.777167332981\n",
      "260000\n",
      "Loss: 3529.91\n",
      "Epsilon: 0.773341069946\n",
      "265000\n",
      "Loss: 4022.24\n",
      "Epsilon: 0.769533890476\n",
      "270000\n",
      "Loss: 3517.79\n",
      "Epsilon: 0.765745699393\n",
      "275000\n",
      "Loss: 3427.47\n",
      "Epsilon: 0.761976401993\n",
      "280000\n",
      "Loss: 3445.51\n",
      "Epsilon: 0.758225904041\n",
      "285000\n",
      "Loss: 4300.09\n",
      "Epsilon: 0.754494111776\n",
      "290000\n",
      "Loss: 3693.91\n",
      "Epsilon: 0.750780931903\n",
      "295000\n",
      "Loss: 3198.23\n",
      "Epsilon: 0.747086271591\n",
      "300000\n",
      "Loss: 3801.26\n",
      "Epsilon: 0.743410038475\n",
      "305000\n",
      "Loss: 2834.89\n",
      "Epsilon: 0.739752140648\n",
      "310000\n",
      "Loss: 3179.86\n",
      "Epsilon: 0.736112486662\n",
      "315000\n",
      "Loss: 3083.76\n",
      "Epsilon: 0.732490985526\n",
      "320000\n",
      "Loss: 2943.67\n",
      "Epsilon: 0.728887546703\n",
      "325000\n",
      "Loss: 3768.58\n",
      "Epsilon: 0.725302080106\n",
      "330000\n",
      "Loss: 3442.64\n",
      "Epsilon: 0.721734496098\n",
      "335000\n",
      "Loss: 3465.64\n",
      "Epsilon: 0.718184705489\n",
      "340000\n",
      "Loss: 2951.88\n",
      "Epsilon: 0.714652619535\n"
     ]
    }
   ],
   "source": [
    "%matplotlib qt5\n",
    "import tensorflow as tf\n",
    "import numpy as np\n",
    "from collections import deque\n",
    "import random\n",
    "import matplotlib.pyplot as plt\n",
    "class Q_Network:\n",
    "    N_Mesh=15\n",
    "    State_Number=N_Mesh*N_Mesh;\n",
    "    Action_Number=4;\n",
    "    Action_List=np.identity(Action_Number)\n",
    "    State_List=np.identity(State_Number)\n",
    "    Epsilon=0\n",
    "    Epsilon_Begin=1\n",
    "    Epsilon_Final=0.01\n",
    "    decay_rate=0.000001\n",
    "    Step=0\n",
    "    Batch_Number=30\n",
    "    #Explore = 100000.\n",
    "    Observe=1000#The number of steps of observation before the beginning of the training\n",
    "    Store_Memory = deque()\n",
    "    Cost_History=[]\n",
    "    Memory_Size = 5000\n",
    "    Obstacle=[3,5,16,21,23,27,29,31,33,34,56,57,66,67,78,81,86,87,88,89,103,107,110,118,121,132,133,134,135,146,149,153\n",
    "              ,154,155,156,157,176,180]\n",
    "    Play_Done=None\n",
    "    def __init__(self,Learning_Rate=0.001,Gamma=0.9,Memory_Size=5000):\n",
    "        self.Learning_Rate=Learning_Rate\n",
    "        self.Gamma=Gamma\n",
    "        self.Memory_Size=Memory_Size\n",
    "        self.New_Network()\n",
    "        self.session=tf.InteractiveSession()\n",
    "        self.session.run(tf.initialize_all_variables())\n",
    "     #***************************************************************\n",
    "    def New_Network(self):\n",
    "         #Construct a neural network\n",
    "        self.State_Input=tf.placeholder(tf.float32,[None,self.State_Number])#Input_Number=100, M=Batch*100\n",
    "        self.Action_Input=tf.placeholder(tf.float32,[None,self.Action_Number])#Input_Number=4, M=Batch*4\n",
    "        self.Q_Target_OI=tf.placeholder(dtype=tf.float32,shape=[None])\n",
    "        #Layer1\n",
    "        Layer1_Number=200\n",
    "        w1=tf.Variable(tf.random_normal([self.State_Number,Layer1_Number]))\n",
    "        b1=tf.Variable(tf.zeros([1,Layer1_Number])+0.1)\n",
    "        l1=tf.nn.relu(tf.matmul(self.State_Input,w1)+b1) #b1 use the propagation mechanism\n",
    "        #Layer2\n",
    "        Layer2_Number=40\n",
    "        w2=tf.Variable(tf.random_normal([Layer1_Number,Layer2_Number]))\n",
    "        b2=tf.Variable(tf.zeros([1,Layer2_Number]))\n",
    "        l2=tf.nn.relu(tf.matmul(l1,w2)+b2)\n",
    "        #Layer3\n",
    "        Layer3_Number=self.Action_Number\n",
    "        w3=tf.Variable(tf.random_normal([Layer2_Number,Layer3_Number]))\n",
    "        b3=tf.Variable(tf.zeros([1,Layer3_Number]))\n",
    "        self.l3=tf.matmul(l2,w3)+b3#At this time, not yet use the active action, l3 is the output matrice, M=Batch*4\n",
    "        #self.l3=tf.tanh(tf.matmul(l1,w2)+b2)\n",
    "\n",
    "        self.Q_Value=tf.reduce_sum(tf.multiply(self.l3,self.Action_Input),reduction_indices=1)\n",
    "        self.Loss=tf.reduce_mean(tf.square(self.Q_Value-self.Q_Target_OI))\n",
    "        #self.Loss=tf.reduce_mean(tf.square(self.Q_Value-tf.tanh(self.Q_Target_OI)))\n",
    "        self.Optimizer=tf.train.AdamOptimizer(self.Learning_Rate).minimize(self.Loss)\n",
    "        #Network's Output is like this form [19,20,297,30], argmax is 2, then choose Action2  \n",
    "        self.Predict=tf.argmax(self.l3,1)\n",
    "     #***************************************************************\n",
    "    def Select_Action(self,State_Index):\n",
    "        Current_State=self.State_List[State_Index:State_Index+1] #By example, Action_Index is 2, Current_State=[0,0,1,0， ，]\n",
    "        if np.random.uniform()<self.Epsilon:\n",
    "            Choose_Action_Index=np.random.randint(0,self.Action_Number)\n",
    "        else:\n",
    "            Action_QValue_Output=self.session.run(self.l3,feed_dict={self.State_Input:Current_State})\n",
    "            Choose_Action_Index=np.argmax(Action_QValue_Output)\n",
    "        #The first inegality means the train has beginned\n",
    "        #The second inegality means Epsilon has not yet decreased to final value\n",
    "        self.Epsilon=self.Epsilon_Final+(self.Epsilon_Begin-self.Epsilon_Final)*np.exp(-self.decay_rate*self.Step)\n",
    "#         if(self.Step%1000==0):\n",
    "#             print(\"epsilon \",self.Epsilon)\n",
    "        return Choose_Action_Index\n",
    "     #***************************************************************\n",
    "    def Next_State(self,Action_Index,State_Index):\n",
    "        State=State_Index+1\n",
    "        done=False\n",
    "        #Execute left motion\n",
    "        if(Action_Index==0):\n",
    "            if(State%self.N_Mesh==1):\n",
    "                State=State\n",
    "                R=-500\n",
    "            elif((State+1) in self.Obstacle):#When encounter obstables, we stay \n",
    "                State=State\n",
    "                R=-500\n",
    "            else:\n",
    "                State=State-1\n",
    "                R=0\n",
    "        elif(Action_Index==1):\n",
    "            if(State==self.State_Number-1):\n",
    "                State=State+1\n",
    "                done=True\n",
    "                R=100\n",
    "            elif(State%self.N_Mesh==0):\n",
    "                State=State\n",
    "                R=-500\n",
    "            elif((State+1) in self.Obstacle):\n",
    "                State=State\n",
    "                R=-500\n",
    "            else:\n",
    "                State=State+1\n",
    "                R=0.2\n",
    "        elif(Action_Index==2):\n",
    "            if(State==self.State_Number-self.N_Mesh):\n",
    "                State=State+self.N_Mesh\n",
    "                done=True\n",
    "                R=100\n",
    "            elif(self.State_Number+1-self.N_Mesh<=State<=self.State_Number-1):\n",
    "                State=State\n",
    "                R=-500\n",
    "            elif((State+self.N_Mesh) in self.Obstacle):\n",
    "                State=State+self.N_Mesh\n",
    "                R=-500\n",
    "            else:\n",
    "                State=State+self.N_Mesh\n",
    "                R=0.2\n",
    "        else:\n",
    "            if(0<=State<=self.N_Mesh):\n",
    "                State=State\n",
    "                R=-500\n",
    "            elif((State-self.N_Mesh) in self.Obstacle):\n",
    "                State=State-self.N_Mesh\n",
    "                R=-500\n",
    "            else:\n",
    "                R=0\n",
    "                State=State-self.N_Mesh\n",
    "        return State-1,R,done\n",
    "    \n",
    "    def Save_Memory(self,CURRENT_STATET,CHOOSE_ACTION,NEXT_STATE,REWARD,DONE):\n",
    "        Memory_Current_State=self.State_List[CURRENT_STATET:CURRENT_STATET+1]\n",
    "        Memory_Choose_Action=self.Action_List[CHOOSE_ACTION:CHOOSE_ACTION+1]\n",
    "        Memory_Next_State=self.State_List[NEXT_STATE:NEXT_STATE+1]\n",
    "        self.Store_Memory.append((Memory_Current_State,Memory_Choose_Action,Memory_Next_State,REWARD,DONE))\n",
    "        if len(self.Store_Memory)>self.Memory_Size:\n",
    "            self.Store_Memory.popleft()\n",
    "    \n",
    "     #***************************************************************\n",
    "    def Experience_Replay(self):\n",
    "        Batch=self.Batch_Number\n",
    "        MiniBatch=random.sample(self.Store_Memory,Batch)\n",
    "        Batch_Current_State = None\n",
    "        Batch_Execute_Action = None\n",
    "        Batch_Reward = None\n",
    "        Batch_Next_State = None\n",
    "        Batch_Done = None\n",
    "        \n",
    "        for Index in range(Batch):\n",
    "            if Batch_Current_State is None:\n",
    "                Batch_Current_State=MiniBatch[Index][0]\n",
    "            elif Batch_Current_State is not None:\n",
    "                Batch_Current_State=np.vstack((Batch_Current_State, MiniBatch[Index][0]))\n",
    "            #---------------------------------------------------------------------------------------   \n",
    "            #print(\"BEA\",MiniBatch[Index][1].shape)\n",
    "            if Batch_Execute_Action is None:\n",
    "                Batch_Execute_Action=MiniBatch[Index][1]\n",
    "            elif Batch_Execute_Action is not None:\n",
    "                Batch_Execute_Action=np.vstack((Batch_Execute_Action,MiniBatch[Index][1]))\n",
    "           #---------------------------------------------------------------------------------------           \n",
    "            if Batch_Reward is None:\n",
    "                Batch_Reward=MiniBatch[Index][3]\n",
    "            elif Batch_Reward is not None:\n",
    "                Batch_Reward=np.vstack((Batch_Reward,MiniBatch[Index][3]))\n",
    "            #---------------------------------------------------------------------------------------  \n",
    "            if Batch_Next_State is None:\n",
    "                Batch_Next_State=MiniBatch[Index][2]\n",
    "            elif Batch_Next_State is not None:\n",
    "                Batch_Next_State=np.vstack((Batch_Next_State,MiniBatch[Index][2]))\n",
    "            #---------------------------------------------------------------------------------------      \n",
    "            if Batch_Done is None:\n",
    "                Batch_Done=MiniBatch[Index][4]\n",
    "            elif Batch_Done is not None:\n",
    "                Batch_Done=np.vstack((Batch_Done,MiniBatch[Index][4]))\n",
    "            #Calculate the Q Value of the next State \n",
    "        Q_Next=self.session.run(self.l3,feed_dict={self.State_Input:Batch_Next_State})\n",
    "        Q_Target=[]\n",
    "        for i in range(Batch):\n",
    "            Each_Reward=Batch_Reward[i][0]#This is a 2D array because of the vstack\n",
    "            #Calculate the Target-Q-Value of each element in the Batch\n",
    "            Each_QValue=Each_Reward+self.Gamma*np.max(Q_Next[i]) #The network ouput has its own []\n",
    "            if Each_Reward<0:\n",
    "                Q_Target.append(Each_Reward)\n",
    "            else:\n",
    "                Q_Target.append(Each_QValue)\n",
    "        #print(self.session.run(self.Q_Value,feed_dict={self.Q_Target_OI:Q_Target}))\n",
    "        _,Cost,Rew=self.session.run([self.Q_Value,self.Loss,self.Optimizer],feed_dict={self.State_Input:Batch_Current_State,\n",
    "                                                                                        self.Action_Input:Batch_Execute_Action,\n",
    "                                                                                        self.Q_Target_OI: Q_Target})\n",
    "        self.Cost_History.append(Cost)\n",
    "        if self.Step%5000==0:\n",
    "            print(self.Step)  \n",
    "            print(\"Loss:\", Cost)  \n",
    "            print(\"Epsilon:\", self.Epsilon)   \n",
    "    #**************************************************************\n",
    "    def Train(self):\n",
    "        Train_Current_State=np.random.randint(0,self.State_Number-1)\n",
    "        self.Epsilon = self.Epsilon_Begin\n",
    "        while True:\n",
    "            Train_Action=self.Select_Action(Train_Current_State)\n",
    "            #print(\"TA\",Train_Action)\n",
    "            Train_Next_State,Train_Reward,Train_Done=self.Next_State(Train_Action,Train_Current_State)\n",
    "            self.Save_Memory(Train_Current_State,Train_Action,Train_Next_State,Train_Reward,Train_Done)\n",
    "            if self.Step>self.Observe:\n",
    "                self.Experience_Replay()\n",
    "            if self.Step>2000000:\n",
    "                 break;\n",
    "            if Train_Done:\n",
    "                Train_Current_State=np.random.randint(0,self.State_Number-1)\n",
    "            else:\n",
    "                Train_Current_State=Train_Next_State\n",
    "                self.Step+=1          \n",
    "    #***************************************************************\n",
    "    def Play(self):\n",
    "        self.Train()\n",
    "        Start_Room_Index=0;\n",
    "        Play_Current_State_Index=0\n",
    "        Play_Step=0;\n",
    "        print(\"****************** Agent is in the State\",Start_Room_Index,\"**********************\")\n",
    "        while(Play_Current_State_Index!=self.State_Number-1):\n",
    "            Play_Current_State=self.State_List[Play_Current_State_Index:Play_Current_State_Index+1]\n",
    "            Play_Action=(self.session.run(self.Predict,feed_dict={self.State_Input:Play_Current_State}))\n",
    "            if Play_Action==0:\n",
    "                Play_Current_State_Index=Play_Current_State_Index-1\n",
    "            elif Play_Action==1:\n",
    "                Play_Current_State_Index=Play_Current_State_Index+1\n",
    "            elif Play_Action==2:\n",
    "                Play_Current_State_Index=Play_Current_State_Index+self.N_Mesh\n",
    "            else:\n",
    "                Play_Current_State_Index=Play_Current_State_Index-self.N_Mesh\n",
    "            Play_Step+=1\n",
    "            print(\"Step is \",Play_Step,\"****************** Agent is in the State\", Play_Current_State_Index,\"**********************\")\n",
    "            if(Play_Step>30):\n",
    "                break\n",
    "        if Play_Current_State_Index==self.State_Number-1:\n",
    "            self.Graph()\n",
    "    #***************************************************************        \n",
    "    def Caculate_coordinate(self,State):\n",
    "        X_Axis=0.5+(State-1)%self.N_Mesh\n",
    "        Y_Axis=0.5+((State-1)//self.N_Mesh)\n",
    "        return X_Axis,Y_Axis \n",
    "    #***************************************************************  \n",
    "    def Graph(self): \n",
    "        Graph_Step=0\n",
    "        fig=plt.figure()\n",
    "        ax=fig.gca()\n",
    "        ax.set(xlim=[0, self.N_Mesh], ylim=[0, self.N_Mesh])\n",
    "        ax.set_xticks(np.arange(0,(self.N_Mesh+1)))\n",
    "        ax.set_yticks(np.arange(0,(self.N_Mesh+1)))\n",
    "        plt.grid()\n",
    "        Graph_Start_Index=0\n",
    "        Graph_Current_Index=Graph_Start_Index\n",
    "        while(Graph_Current_Index!=self.State_Number-1):\n",
    "            Graph_Current_State=self.State_List[Graph_Current_Index:Graph_Current_Index+1]\n",
    "            Graph_Action=(self.session.run(self.Predict,feed_dict={self.State_Input:Graph_Current_State}))\n",
    "            if(Graph_Action==0):\n",
    "                Graph_Next_Index=Graph_Current_Index-1\n",
    "            elif(Graph_Action==1):\n",
    "                Graph_Next_Index=Graph_Current_Index+1\n",
    "            elif(Graph_Action==2):\n",
    "                 Graph_Next_Index=Graph_Current_Index+self.N_Mesh\n",
    "            else:\n",
    "                 Graph_Next_Index=Graph_Current_Index-self.N_Mesh\n",
    "            Graph_Step+=1\n",
    "            X_State,Y_State=self.Caculate_coordinate(Graph_Current_Index+1)\n",
    "            X_Next_State,Y_Next_State=self.Caculate_coordinate(Graph_Next_Index+1)\n",
    "            if(Graph_Step==0):\n",
    "                plt.scatter(X_State,Y_State)  \n",
    "            else:\n",
    "                plt.scatter(X_State,Y_State) \n",
    "                plt.scatter(X_Next_State,Y_Next_State)\n",
    "                plt.plot([X_State,X_Next_State],[Y_State,Y_Next_State])\n",
    "            for J in np.arange(len(self.Obstacle)):\n",
    "                plt.scatter((self.Caculate_coordinate(self.Obstacle[J]+1))[0],(self.Caculate_coordinate(self.Obstacle[J]+1))[1],marker=\"x\")\n",
    "            Graph_Current_Index=Graph_Next_Index\n",
    "    plt.show()\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    q_network = Q_Network()\n",
    "    q_network.Play()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
