import numpy as np
import torch
from ple import PLE
from ple.games.flappybird import FlappyBird
import torch.nn as nn
from collections import deque
import torch.nn.functional as F

class DQN(nn.Module):
    def __init__(self, in_channels = 8, n_actions = 2):
        """
        DQN: Q-net
        Input is a vector with dim of 8.
        Number of actions is 2.
        """
        super(DQN, self).__init__()
        # self.conv1 = nn.Conv2d(in_channels, 32, kernel_size=8, stride=4)
        # self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
        # self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
        # self.flatten = nn.Flatten()
        self.fc1 = nn.Linear(in_channels, 32)
        self.fc2 = nn.Linear(32, 128)
        self.fc3 = nn.Linear(128, 256)
        self.fc4 = nn.Linear(256, n_actions)

    def forward(self, x):
        # x = self.conv1(x)
        # x = F.relu(x)
        # x = self.conv2(x)
        # x = F.relu(x)
        # x = self.conv3(x)
        # x = F.relu(x)
        # x = self.flatten(x)
        x = self.fc1(x)
        x = F.relu(x)
        x = self.fc2(x)
        x = F.relu(x)
        x = self.fc3(x)
        x = F.relu(x)
        x = self.fc4(x)
        # x = F.softmax(x)
        return x

def epsilon_greedy(q_values, epsilon, n_action):
    rd = np.random.uniform(0, 1)
    if rd < 1 - epsilon:
        tmp_q = q_values.detach().cpu()
        action = np.argmax(tmp_q).numpy()
    else:
        action = np.random.randint(0, n_action)
    return action

def get_observation(env):
    state = env.getGameState()
    # print(state.values())
    # print(np.array(list(state.values())))
    return np.array(list(state.values()))

def visualizer(env:PLE, q_net, n_episodes, T=1000, use_cuda=True, n_action=2):
    env.display_screen = True
    if use_cuda:
        q_net.cuda()
    for episode in range(n_episodes):
        env.reset_game()
        state = get_observation(env).astype("float32")
        state = torch.from_numpy(state).unsqueeze(0)
        total_reward = 0.0
        if use_cuda:
            state = state.cuda()
        for t in range(T):
            q_values = q_net(state)
            action = epsilon_greedy(q_values, 0, n_action)
            if action == 0:
                action_ = 119
            else:
                action_ = None
            # action = env.action_space.sample()
            reward = env.act(action_)
            total_reward += reward
            newState = get_observation(env).astype("float32")
            done = env.game_over()
            newState = torch.from_numpy(newState).unsqueeze(0)
            state = newState
            if use_cuda:
                state = state.cuda()
            if done:
                break
        print("Episode finished after {} timesteps".format(t+1))
        print("    Reward of this Episode is:", total_reward)
    env.display_screen = False

game = FlappyBird(pipe_gap=125)
env = PLE(game, fps=30, display_screen=False)
env.init()
env.getGameState = game.getGameState
q_net = torch.load("DQNv2_new.pth")
visualizer(env, q_net, 10, 1000)