{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 79,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import torch\n",
    "from ple import PLE\n",
    "from ple.games.flappybird import FlappyBird\n",
    "import torch.nn as nn\n",
    "from collections import deque\n",
    "import torch.nn.functional as F\n",
    "from torchvision import transforms"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 80,
   "metadata": {},
   "outputs": [],
   "source": [
    "\"\"\"\n",
    "众多超参数初始化\n",
    "\"\"\"\n",
    "\n",
    "# Replay Memory Capacity\n",
    "CapacityReplayMemory = 100000\n",
    "# e-贪心的epsilon\n",
    "EPSILON = 0.1\n",
    "# minibatch的size\n",
    "BATCH_SIZE = 32\n",
    "GAMMA = 0.99\n",
    "N_ACTION = 2\n",
    "N_EPISODES = 1000\n",
    "T = 1000\n",
    "USE_CUDA = torch.cuda.is_available()\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 81,
   "metadata": {},
   "outputs": [],
   "source": [
    "class DQN(nn.Module):\n",
    "    def __init__(self, in_channels = 8, n_actions = 2):\n",
    "        \"\"\"\n",
    "        DQN: Q-net\n",
    "        Input is a vector with dim of 8.\n",
    "        Number of actions is 2.\n",
    "        \"\"\"\n",
    "        super(DQN, self).__init__()\n",
    "        # self.conv1 = nn.Conv2d(in_channels, 32, kernel_size=8, stride=4)\n",
    "        # self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)\n",
    "        # self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)\n",
    "        # self.flatten = nn.Flatten()\n",
    "        self.fc1 = nn.Linear(in_channels, 32)\n",
    "        self.fc2 = nn.Linear(32, 128)\n",
    "        self.fc3 = nn.Linear(128, 256)\n",
    "        self.fc4 = nn.Linear(256, n_actions)\n",
    "\n",
    "    def forward(self, x):\n",
    "        # x = self.conv1(x)\n",
    "        # x = F.relu(x)\n",
    "        # x = self.conv2(x)\n",
    "        # x = F.relu(x)\n",
    "        # x = self.conv3(x)\n",
    "        # x = F.relu(x)\n",
    "        # x = self.flatten(x)\n",
    "        x = self.fc1(x)\n",
    "        x = F.relu(x)\n",
    "        x = self.fc2(x)\n",
    "        x = F.relu(x)\n",
    "        x = self.fc3(x)\n",
    "        x = F.relu(x)\n",
    "        x = self.fc4(x)\n",
    "        # x = F.softmax(x)\n",
    "        return x\n",
    "\n",
    "# tmp = torch.from_numpy(np.ones(shape = (3, 3, 84, 84))).type(torch.cuda.FloatTensor)\n",
    "# net = DQN().cuda()\n",
    "# res = net(tmp)\n",
    "# g = res.max(1)\n",
    "# print(res.shape)\n",
    "# print(g[0])\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 82,
   "metadata": {},
   "outputs": [],
   "source": [
    "ReplayMemory = deque(maxlen=CapacityReplayMemory)\n",
    "def sampleFromReplayMemory(batch_size):\n",
    "    indices = np.random.permutation(len(ReplayMemory))[:batch_size]\n",
    "    container = [[], [], [], [], []]\n",
    "    for idx in indices:\n",
    "        memory = ReplayMemory[idx]\n",
    "        for col, value in zip(container, memory):\n",
    "            col.append(value)\n",
    "    cols = [np.array(col) for col in container]\n",
    "    return cols\n",
    "\n",
    "\n",
    "\n",
    "def epsilon_greedy(q_values, epsilon, n_action):\n",
    "    rd = np.random.uniform(0, 1)\n",
    "    if rd < 1 - epsilon:\n",
    "        tmp_q = q_values.detach().cpu()\n",
    "        action = np.argmax(tmp_q).numpy()\n",
    "    else:\n",
    "        action = np.random.randint(0, n_action)\n",
    "    return action"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 83,
   "metadata": {},
   "outputs": [],
   "source": [
    "Reward_Recorder = []"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 84,
   "metadata": {},
   "outputs": [],
   "source": [
    "# def getStateTensor(state_numpy):\n",
    "#     \"\"\"\n",
    "#     transform is used when the game playing is breakout, but in this case, \n",
    "#     the game playing is flappy bird, so the input we get is a vector of 8 elements rather than a \n",
    "#     picture.\n",
    "#     \"\"\"\n",
    "#     # transform = transforms.Compose([\n",
    "# \t#     transforms.ToPILImage(),\n",
    "#     #     transforms.Grayscale(),\n",
    "# \t#     transforms.Resize((110, 84)),\n",
    "# \t#     transforms.RandomCrop((84, 84)),\n",
    "# \t#     transforms.ToTensor()\n",
    "#     # ])\n",
    "#     state = torch.cat([transform(state_numpy[i]) for i in range(4)]).unsqueeze(0)\n",
    "#     return state\n",
    "\n",
    "def get_observation(env):\n",
    "    state = env.getGameState()\n",
    "    # print(state.values())\n",
    "    # print(np.array(list(state.values())))\n",
    "    return np.array(list(state.values()))\n",
    "\n",
    "\n",
    "def train_DQN(\n",
    "    env:PLE,\n",
    "    target_q_net:DQN,\n",
    "    q_net:DQN,\n",
    "    batch_size,\n",
    "    gamma,\n",
    "    replayMemory:deque,\n",
    "    n_episodes,\n",
    "    epsilon,\n",
    "    n_action,\n",
    "    optimizer,\n",
    "    use_cuda = True,\n",
    "    T = 1000,\n",
    "    renew_steps = 1000,\n",
    "    ):\n",
    "    \n",
    "    best_reward = 0\n",
    "    # transform = transforms.Compose([\n",
    "\t#     transforms.ToPILImage(),\n",
    "    #     transforms.Grayscale(),\n",
    "\t#     transforms.Resize((110, 84)),\n",
    "\t#     transforms.RandomCrop((84, 84)),\n",
    "\t#     transforms.ToTensor()\n",
    "    # ])\n",
    "\n",
    "    if use_cuda:\n",
    "        q_net.cuda()\n",
    "        target_q_net.cuda()\n",
    "    step = 0\n",
    "    # target_q_net.eval()\n",
    "    for episode in range(n_episodes):\n",
    "        reward_container = 0.0\n",
    "        env.reset_game()\n",
    "        state_numpy = get_observation(env).astype(\"float32\")\n",
    "        state = torch.from_numpy(state_numpy).unsqueeze(0)\n",
    "        # print(\"state tensor shape:\", state.shape)\n",
    "        \n",
    "        if use_cuda:\n",
    "            state = state.cuda()\n",
    "        for _ in range(T):\n",
    "            step += 1\n",
    "            q_values = q_net(state)\n",
    "            action = epsilon_greedy(q_values, epsilon, n_action)\n",
    "            if action == 0:\n",
    "                action_ = 119\n",
    "            else:\n",
    "                action_ = None\n",
    "            reward = env.act(action_)\n",
    "            new_state_numpy = get_observation(env).astype(\"float32\")\n",
    "            new_state = torch.from_numpy(new_state_numpy).unsqueeze(0)\n",
    "            done = env.game_over()\n",
    "            \n",
    "            if use_cuda:\n",
    "                new_state = new_state.cuda()\n",
    "            reward_container += reward\n",
    "            \n",
    "            new_frame = [state_numpy, action, reward, new_state_numpy, done]\n",
    "            replayMemory.append(new_frame)\n",
    "            state_batch_numpy, action_batch, reward_batch, newState_batch_numpy, done_batch = sampleFromReplayMemory(batch_size)\n",
    "            \n",
    "            state = new_state\n",
    "            state_numpy = new_state_numpy\n",
    "\n",
    "            num = state_batch_numpy.shape[0]\n",
    "            # state_batch = transform(state_batch_numpy[0])\n",
    "\n",
    "            state_batch = torch.from_numpy(state_batch_numpy)\n",
    "            \n",
    "            # state_batch = torch.from_numpy(state_batch) / 255.0\n",
    "            action_batch = torch.from_numpy(action_batch).long()\n",
    "            reward_batch = torch.from_numpy(reward_batch)\n",
    "            \n",
    "            newState_batch = torch.from_numpy(newState_batch_numpy)\n",
    "\n",
    "            # newState_batch = torch.from_numpy(newState_batch) / 255.0\n",
    "            not_done_batch = torch.from_numpy(1 - done_batch)\n",
    "\n",
    "            # state_batch = transforms.RandomResizedCrop(84)(state_batch)\n",
    "            # newState_batch = transforms.RandomResizedCrop(84)(newState_batch)\n",
    "\n",
    "            if use_cuda:\n",
    "                state_batch = state_batch.cuda()\n",
    "                action_batch = action_batch.cuda()\n",
    "                reward_batch = reward_batch.cuda()\n",
    "                newState_batch = newState_batch.cuda()\n",
    "                not_done_batch = not_done_batch.cuda()\n",
    "\n",
    "            next_max_q = target_q_net(newState_batch).detach().max(1)[0] * not_done_batch\n",
    "            y = reward_batch + gamma * next_max_q\n",
    "            cur_q_values = q_net(state_batch).gather(1, action_batch.unsqueeze(1)).squeeze(1)\n",
    "            loss:torch.Tensor = nn.MSELoss()(y.float(), cur_q_values.float())\n",
    "            optimizer.zero_grad()\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "\n",
    "            if step % renew_steps == 0:\n",
    "                torch.save(q_net.state_dict(), \"DQNv2.pth\")\n",
    "                target_q_net.load_state_dict(torch.load(\"DQNv2.pth\"))\n",
    "\n",
    "            if done:\n",
    "                break\n",
    "        \n",
    "        \n",
    "        \n",
    "        best_reward = max(best_reward, reward_container)\n",
    "        Reward_Recorder.append(reward_container)\n",
    "        if(episode % 10 == 0):\n",
    "            print(\"Epoch {} Episode {}\".format(episode*T // 50000, episode))\n",
    "            print(\"    Episode reward:\", reward_container)\n",
    "            print(\"    Best reward:\", best_reward)\n",
    "        if(episode % 100 == 0):\n",
    "            torch.save(q_net, \"DQNv2_new.pth\")\n",
    "            np.save(\"Reward_Episode_v2.npy\", np.array(Reward_Recorder))\n",
    "\n",
    "\n",
    "def visualizer(env:PLE, q_net, n_episodes, T=1000, use_cuda=True, n_action=2):\n",
    "    env.display_screen = True\n",
    "    if use_cuda:\n",
    "        q_net.cuda()\n",
    "    for episode in range(n_episodes):\n",
    "        env.reset_game()\n",
    "        state = get_observation(env).astype(\"float32\")\n",
    "        state = torch.from_numpy(state).unsqueeze(0)\n",
    "        total_reward = 0.0\n",
    "        if use_cuda:\n",
    "            state = state.cuda()\n",
    "        for t in range(T):\n",
    "            q_values = q_net(state)\n",
    "            action = epsilon_greedy(q_values, 0, n_action)\n",
    "            if action == 0:\n",
    "                action_ = 119\n",
    "            else:\n",
    "                action_ = None\n",
    "            # action = env.action_space.sample()\n",
    "            reward = env.act(action_)\n",
    "            total_reward += reward\n",
    "            newState = get_observation(env).astype(\"float32\")\n",
    "            done = env.game_over()\n",
    "            newState = torch.from_numpy(newState).unsqueeze(0)\n",
    "            state = newState\n",
    "            if use_cuda:\n",
    "                state = state.cuda()\n",
    "            if done:\n",
    "                break\n",
    "        print(\"Episode finished after {} timesteps\".format(t+1))\n",
    "        print(\"    Reward of this Episode is:\", total_reward)\n",
    "    env.display_screen = False"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 85,
   "metadata": {},
   "outputs": [],
   "source": [
    "def main():\n",
    "    q_net = DQN()\n",
    "    target_q_net = DQN()\n",
    "    torch.save(q_net.state_dict(), \"DQNv2.pth\")\n",
    "    target_q_net.load_state_dict(torch.load(\"DQNv2.pth\"))\n",
    "    optimizer = torch.optim.Adam(q_net.parameters(), lr=0.00025)\n",
    "    game = FlappyBird(pipe_gap=125)\n",
    "    env = PLE(game, fps=30, display_screen=False)\n",
    "    env.init()\n",
    "    env.getGameState = game.getGameState\n",
    "    train_DQN(env, target_q_net, q_net, BATCH_SIZE, GAMMA, ReplayMemory, N_EPISODES, EPSILON, N_ACTION, optimizer, USE_CUDA, T)\n",
    "    torch.save(q_net, \"DQNv2_new.pth\")\n",
    "    visualizer(env, q_net, 10, 1000)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 86,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "libpng warning: iCCP: known incorrect sRGB profile\n",
      "libpng warning: iCCP: known incorrect sRGB profile\n",
      "libpng warning: iCCP: known incorrect sRGB profile\n",
      "libpng warning: iCCP: known incorrect sRGB profile\n",
      "libpng warning: iCCP: known incorrect sRGB profile\n",
      "libpng warning: iCCP: known incorrect sRGB profile\n",
      "libpng warning: iCCP: known incorrect sRGB profile\n",
      "libpng warning: iCCP: known incorrect sRGB profile\n",
      "libpng warning: iCCP: known incorrect sRGB profile\n",
      "libpng warning: iCCP: known incorrect sRGB profile\n",
      "libpng warning: iCCP: known incorrect sRGB profile\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0 Episode 0\n",
      "    Episode reward: -5.0\n",
      "    Best reward: 0\n",
      "Epoch 0 Episode 10\n",
      "    Episode reward: -5.0\n",
      "    Best reward: 0\n",
      "Epoch 0 Episode 20\n",
      "    Episode reward: -4.0\n",
      "    Best reward: 0\n",
      "Epoch 0 Episode 30\n",
      "    Episode reward: -5.0\n",
      "    Best reward: 0\n",
      "Epoch 0 Episode 40\n",
      "    Episode reward: -5.0\n",
      "    Best reward: 0\n",
      "Epoch 1 Episode 50\n",
      "    Episode reward: -5.0\n",
      "    Best reward: 0\n",
      "Epoch 1 Episode 60\n",
      "    Episode reward: -5.0\n",
      "    Best reward: 0\n",
      "Epoch 1 Episode 70\n",
      "    Episode reward: -5.0\n",
      "    Best reward: 0\n",
      "Epoch 1 Episode 80\n",
      "    Episode reward: -5.0\n",
      "    Best reward: 0\n",
      "Epoch 1 Episode 90\n",
      "    Episode reward: -5.0\n",
      "    Best reward: 0\n",
      "Epoch 2 Episode 100\n",
      "    Episode reward: -4.0\n",
      "    Best reward: 0\n",
      "Epoch 2 Episode 110\n",
      "    Episode reward: -5.0\n",
      "    Best reward: 0\n",
      "Epoch 2 Episode 120\n",
      "    Episode reward: -5.0\n",
      "    Best reward: 0\n",
      "Epoch 2 Episode 130\n",
      "    Episode reward: -5.0\n",
      "    Best reward: 0\n",
      "Epoch 2 Episode 140\n",
      "    Episode reward: -5.0\n",
      "    Best reward: 0\n",
      "Epoch 3 Episode 150\n",
      "    Episode reward: -5.0\n",
      "    Best reward: 0\n",
      "Epoch 3 Episode 160\n",
      "    Episode reward: -3.0\n",
      "    Best reward: 0\n",
      "Epoch 3 Episode 170\n",
      "    Episode reward: -3.0\n",
      "    Best reward: 0\n",
      "Epoch 3 Episode 180\n",
      "    Episode reward: -4.0\n",
      "    Best reward: 0\n",
      "Epoch 3 Episode 190\n",
      "    Episode reward: -5.0\n",
      "    Best reward: 0\n",
      "Epoch 4 Episode 200\n",
      "    Episode reward: -3.0\n",
      "    Best reward: 0\n",
      "Epoch 4 Episode 210\n",
      "    Episode reward: -4.0\n",
      "    Best reward: 0\n",
      "Epoch 4 Episode 220\n",
      "    Episode reward: -1.0\n",
      "    Best reward: 0\n",
      "Epoch 4 Episode 230\n",
      "    Episode reward: 0.0\n",
      "    Best reward: 5.0\n",
      "Epoch 4 Episode 240\n",
      "    Episode reward: 10.0\n",
      "    Best reward: 25.0\n",
      "Epoch 5 Episode 250\n",
      "    Episode reward: 14.0\n",
      "    Best reward: 25.0\n",
      "Epoch 5 Episode 260\n",
      "    Episode reward: 15.0\n",
      "    Best reward: 25.0\n",
      "Epoch 5 Episode 270\n",
      "    Episode reward: 0.0\n",
      "    Best reward: 25.0\n",
      "Epoch 5 Episode 280\n",
      "    Episode reward: 2.0\n",
      "    Best reward: 25.0\n",
      "Epoch 5 Episode 290\n",
      "    Episode reward: -5.0\n",
      "    Best reward: 25.0\n",
      "Epoch 6 Episode 300\n",
      "    Episode reward: 15.0\n",
      "    Best reward: 25.0\n",
      "Epoch 6 Episode 310\n",
      "    Episode reward: 18.0\n",
      "    Best reward: 25.0\n",
      "Epoch 6 Episode 320\n",
      "    Episode reward: 25.0\n",
      "    Best reward: 25.0\n",
      "Epoch 6 Episode 330\n",
      "    Episode reward: 25.0\n",
      "    Best reward: 25.0\n",
      "Epoch 6 Episode 340\n",
      "    Episode reward: 25.0\n",
      "    Best reward: 25.0\n",
      "Epoch 7 Episode 350\n",
      "    Episode reward: -2.0\n",
      "    Best reward: 25.0\n",
      "Epoch 7 Episode 360\n",
      "    Episode reward: -3.0\n",
      "    Best reward: 25.0\n",
      "Epoch 7 Episode 370\n",
      "    Episode reward: -2.0\n",
      "    Best reward: 25.0\n",
      "Epoch 7 Episode 380\n",
      "    Episode reward: 10.0\n",
      "    Best reward: 25.0\n",
      "Epoch 7 Episode 390\n",
      "    Episode reward: -3.0\n",
      "    Best reward: 25.0\n",
      "Epoch 8 Episode 400\n",
      "    Episode reward: -2.0\n",
      "    Best reward: 25.0\n",
      "Epoch 8 Episode 410\n",
      "    Episode reward: 25.0\n",
      "    Best reward: 25.0\n",
      "Epoch 8 Episode 420\n",
      "    Episode reward: 25.0\n",
      "    Best reward: 25.0\n",
      "Epoch 8 Episode 430\n",
      "    Episode reward: -3.0\n",
      "    Best reward: 25.0\n",
      "Epoch 8 Episode 440\n",
      "    Episode reward: 4.0\n",
      "    Best reward: 25.0\n",
      "Epoch 9 Episode 450\n",
      "    Episode reward: -1.0\n",
      "    Best reward: 25.0\n",
      "Epoch 9 Episode 460\n",
      "    Episode reward: 16.0\n",
      "    Best reward: 25.0\n",
      "Epoch 9 Episode 470\n",
      "    Episode reward: -2.0\n",
      "    Best reward: 25.0\n",
      "Epoch 9 Episode 480\n",
      "    Episode reward: 25.0\n",
      "    Best reward: 25.0\n",
      "Epoch 9 Episode 490\n",
      "    Episode reward: -2.0\n",
      "    Best reward: 25.0\n",
      "Epoch 10 Episode 500\n",
      "    Episode reward: -3.0\n",
      "    Best reward: 25.0\n",
      "Epoch 10 Episode 510\n",
      "    Episode reward: -5.0\n",
      "    Best reward: 25.0\n",
      "Epoch 10 Episode 520\n",
      "    Episode reward: 25.0\n",
      "    Best reward: 25.0\n",
      "Epoch 10 Episode 530\n",
      "    Episode reward: 6.0\n",
      "    Best reward: 25.0\n",
      "Epoch 10 Episode 540\n",
      "    Episode reward: -5.0\n",
      "    Best reward: 25.0\n",
      "Epoch 11 Episode 550\n",
      "    Episode reward: 17.0\n",
      "    Best reward: 25.0\n",
      "Epoch 11 Episode 560\n",
      "    Episode reward: 2.0\n",
      "    Best reward: 25.0\n",
      "Epoch 11 Episode 570\n",
      "    Episode reward: 0.0\n",
      "    Best reward: 25.0\n",
      "Epoch 11 Episode 580\n",
      "    Episode reward: 11.0\n",
      "    Best reward: 25.0\n",
      "Epoch 11 Episode 590\n",
      "    Episode reward: -1.0\n",
      "    Best reward: 25.0\n",
      "Epoch 12 Episode 600\n",
      "    Episode reward: 9.0\n",
      "    Best reward: 25.0\n",
      "Epoch 12 Episode 610\n",
      "    Episode reward: -4.0\n",
      "    Best reward: 25.0\n",
      "Epoch 12 Episode 620\n",
      "    Episode reward: -4.0\n",
      "    Best reward: 25.0\n",
      "Epoch 12 Episode 630\n",
      "    Episode reward: 7.0\n",
      "    Best reward: 25.0\n",
      "Epoch 12 Episode 640\n",
      "    Episode reward: 3.0\n",
      "    Best reward: 25.0\n",
      "Epoch 13 Episode 650\n",
      "    Episode reward: 7.0\n",
      "    Best reward: 25.0\n",
      "Epoch 13 Episode 660\n",
      "    Episode reward: 6.0\n",
      "    Best reward: 25.0\n",
      "Epoch 13 Episode 670\n",
      "    Episode reward: 17.0\n",
      "    Best reward: 25.0\n",
      "Epoch 13 Episode 680\n",
      "    Episode reward: 15.0\n",
      "    Best reward: 25.0\n",
      "Epoch 13 Episode 690\n",
      "    Episode reward: 11.0\n",
      "    Best reward: 25.0\n",
      "Epoch 14 Episode 700\n",
      "    Episode reward: 3.0\n",
      "    Best reward: 25.0\n",
      "Epoch 14 Episode 710\n",
      "    Episode reward: 18.0\n",
      "    Best reward: 25.0\n",
      "Epoch 14 Episode 720\n",
      "    Episode reward: 10.0\n",
      "    Best reward: 25.0\n",
      "Epoch 14 Episode 730\n",
      "    Episode reward: -4.0\n",
      "    Best reward: 25.0\n",
      "Epoch 14 Episode 740\n",
      "    Episode reward: 1.0\n",
      "    Best reward: 25.0\n",
      "Epoch 15 Episode 750\n",
      "    Episode reward: 0.0\n",
      "    Best reward: 25.0\n",
      "Epoch 15 Episode 760\n",
      "    Episode reward: 15.0\n",
      "    Best reward: 25.0\n",
      "Epoch 15 Episode 770\n",
      "    Episode reward: -2.0\n",
      "    Best reward: 25.0\n",
      "Epoch 15 Episode 780\n",
      "    Episode reward: -4.0\n",
      "    Best reward: 25.0\n",
      "Epoch 15 Episode 790\n",
      "    Episode reward: 7.0\n",
      "    Best reward: 25.0\n",
      "Epoch 16 Episode 800\n",
      "    Episode reward: 13.0\n",
      "    Best reward: 25.0\n",
      "Epoch 16 Episode 810\n",
      "    Episode reward: 3.0\n",
      "    Best reward: 25.0\n",
      "Epoch 16 Episode 820\n",
      "    Episode reward: 25.0\n",
      "    Best reward: 25.0\n",
      "Epoch 16 Episode 830\n",
      "    Episode reward: -3.0\n",
      "    Best reward: 25.0\n",
      "Epoch 16 Episode 840\n",
      "    Episode reward: 6.0\n",
      "    Best reward: 25.0\n",
      "Epoch 17 Episode 850\n",
      "    Episode reward: 14.0\n",
      "    Best reward: 25.0\n",
      "Epoch 17 Episode 860\n",
      "    Episode reward: -3.0\n",
      "    Best reward: 25.0\n",
      "Epoch 17 Episode 870\n",
      "    Episode reward: 25.0\n",
      "    Best reward: 25.0\n",
      "Epoch 17 Episode 880\n",
      "    Episode reward: 25.0\n",
      "    Best reward: 25.0\n",
      "Epoch 17 Episode 890\n",
      "    Episode reward: 2.0\n",
      "    Best reward: 25.0\n",
      "Epoch 18 Episode 900\n",
      "    Episode reward: 13.0\n",
      "    Best reward: 25.0\n",
      "Epoch 18 Episode 910\n",
      "    Episode reward: 11.0\n",
      "    Best reward: 25.0\n",
      "Epoch 18 Episode 920\n",
      "    Episode reward: 25.0\n",
      "    Best reward: 25.0\n",
      "Epoch 18 Episode 930\n",
      "    Episode reward: 5.0\n",
      "    Best reward: 25.0\n",
      "Epoch 18 Episode 940\n",
      "    Episode reward: -1.0\n",
      "    Best reward: 25.0\n",
      "Epoch 19 Episode 950\n",
      "    Episode reward: 9.0\n",
      "    Best reward: 25.0\n",
      "Epoch 19 Episode 960\n",
      "    Episode reward: 10.0\n",
      "    Best reward: 25.0\n",
      "Epoch 19 Episode 970\n",
      "    Episode reward: 18.0\n",
      "    Best reward: 25.0\n",
      "Epoch 19 Episode 980\n",
      "    Episode reward: 18.0\n",
      "    Best reward: 25.0\n",
      "Epoch 19 Episode 990\n",
      "    Episode reward: -2.0\n",
      "    Best reward: 25.0\n",
      "Episode finished after 1000 timesteps\n",
      "    Reward of this Episode is: 25.0\n",
      "Episode finished after 1000 timesteps\n",
      "    Reward of this Episode is: 25.0\n",
      "Episode finished after 1000 timesteps\n",
      "    Reward of this Episode is: 25.0\n",
      "Episode finished after 1000 timesteps\n",
      "    Reward of this Episode is: 25.0\n",
      "Episode finished after 1000 timesteps\n",
      "    Reward of this Episode is: 25.0\n",
      "Episode finished after 1000 timesteps\n",
      "    Reward of this Episode is: 25.0\n",
      "Episode finished after 1000 timesteps\n",
      "    Reward of this Episode is: 25.0\n",
      "Episode finished after 1000 timesteps\n",
      "    Reward of this Episode is: 25.0\n",
      "Episode finished after 1000 timesteps\n",
      "    Reward of this Episode is: 25.0\n",
      "Episode finished after 1000 timesteps\n",
      "    Reward of this Episode is: 25.0\n"
     ]
    }
   ],
   "source": [
    "main()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 88,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "libpng warning: iCCP: known incorrect sRGB profile\n",
      "libpng warning: iCCP: known incorrect sRGB profile\n",
      "libpng warning: iCCP: known incorrect sRGB profile\n",
      "libpng warning: iCCP: known incorrect sRGB profile\n",
      "libpng warning: iCCP: known incorrect sRGB profile\n",
      "libpng warning: iCCP: known incorrect sRGB profile\n",
      "libpng warning: iCCP: known incorrect sRGB profile\n",
      "libpng warning: iCCP: known incorrect sRGB profile\n",
      "libpng warning: iCCP: known incorrect sRGB profile\n",
      "libpng warning: iCCP: known incorrect sRGB profile\n",
      "libpng warning: iCCP: known incorrect sRGB profile\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Episode finished after 1000 timesteps\n",
      "    Reward of this Episode is: 25.0\n",
      "Episode finished after 1000 timesteps\n",
      "    Reward of this Episode is: 25.0\n",
      "Episode finished after 1000 timesteps\n",
      "    Reward of this Episode is: 25.0\n",
      "Episode finished after 1000 timesteps\n",
      "    Reward of this Episode is: 25.0\n",
      "Episode finished after 1000 timesteps\n",
      "    Reward of this Episode is: 25.0\n",
      "Episode finished after 1000 timesteps\n",
      "    Reward of this Episode is: 25.0\n",
      "Episode finished after 1000 timesteps\n",
      "    Reward of this Episode is: 25.0\n",
      "Episode finished after 1000 timesteps\n",
      "    Reward of this Episode is: 25.0\n",
      "Episode finished after 1000 timesteps\n",
      "    Reward of this Episode is: 25.0\n",
      "Episode finished after 1000 timesteps\n",
      "    Reward of this Episode is: 25.0\n"
     ]
    }
   ],
   "source": [
    "game = FlappyBird(pipe_gap=125)\n",
    "env = PLE(game, fps=30, display_screen=True)\n",
    "env.init()\n",
    "env.getGameState = game.getGameState\n",
    "q_net = torch.load(\"DQNv2_new.pth\")\n",
    "visualizer(env, q_net, 10, 1000)"
   ]
  }
 ],
 "metadata": {
  "interpreter": {
   "hash": "494899efd6527d56ea7f55c588d0081523a17dc3a9ff1107f3394ad815ff2527"
  },
  "kernelspec": {
   "display_name": "Python 3.7.7 64-bit",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.7"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
