from __future__ import print_function

from itertools import count

import gym
from os import path
from torch.autograd import Variable
import gym_ple
import DQN
import torch
import numpy as np
import torch.optim as optim
import torch.nn.functional as F
import math
from OptimizeTensor import OTensor

from matplotlib import pyplot as plt

from Agent import Agent
from RelayMemory import Transition
from env import FlappyEnvironment
from utilenn import tensor_image_to_numpy_image

env = FlappyEnvironment(mem_size=20000)

model = DQN.DQN()

if path.exists('./dqn.net'):
    model.load_state_dict(torch.load('./dqn.net'))

if torch.cuda.is_available():
    model.cuda()

agent = Agent(model, 2)

env.reset()

optimizer = optim.Adam(model.parameters(), lr=0.00001)
# optimizer = optim.RMSprop(model.parameters())


total_loss = []


def _optimize_model(memory):
    if len(memory) < BATCH_SIZE:
        return 0

    transitions = memory.sample(BATCH_SIZE)
    # Transpose the batch (see http://stackoverflow.com/a/19343/3343043 for
    # detailed explanation).
    actual_batch_size = len(transitions)
    batch = Transition(*zip(*transitions))

    # Compute a mask of non-final states and concatenate the batch elements
    non_final_mask = OTensor.ByteTensor(
        tuple(map(lambda s: s is not None, batch.next_state))
    )

    non_final_next_states = Variable(torch.cat([s for s in batch.next_state
                                                if s is not None]),
                                     volatile=True)

    state_batch = Variable(torch.cat(batch.state))
    action_batch = Variable(torch.cat(batch.action))
    reward_batch = Variable(torch.cat(batch.reward))
    # next_state = Variable(torch.cat(batch.next_state))

    state_action_values = model(state_batch).gather(1, action_batch)

    # # Compute V(s_{t+1}) for all next states.
    next_state_values = Variable(torch.zeros(actual_batch_size).type(OTensor.FloatTensor))
    next_state_values[non_final_mask] = model(non_final_next_states).max(1)[0]
    next_state_values.volatile = False

    expected_state_action_values = (next_state_values * GAMMA) + reward_batch

    # Compute Huber loss
    loss = F.smooth_l1_loss(state_action_values, expected_state_action_values)

    next_state_values.volatile = True

    # Optimize the model
    optimizer.zero_grad()
    loss.backward()
    # prevent value go too big or small
    # for param in model.parameters():
    #     param.grad.data.clamp_(-1, 1)
    optimizer.step()

    return loss.data[0]


def optimize_model(memory):
    if len(memory) < BATCH_SIZE:
        return -1
    losses = []
    for i in count():
        _loss = _optimize_model(memory)
        losses.append(_loss)

        # mean_loss = np.mean(losses)

        if i >= 0:
            break

    torch.save(model.state_dict(), './dqn.net')

    return np.mean(losses)


current_loss = 1

GAMMA = 0.999

exploration = 100
BATCH_SIZE = 64


def test_result(epoch):
    best_step = 0
    for _ in range(1):

        env.reset()
        step = 0
        while True:
            action = agent.select_action(
                env.current_state,
                -1
            )
            done = env.step(action)
            step += 1

            if done:
                break
        best_step = max(best_step, step)

    print(epoch, 'best step ', best_step)
    return best_step


#
# plt.figure(1)
# plt.show()

losses = []
steps = []

env.reset()
env.step(1)

# img = plt.imshow(tensor_image_to_numpy_image(env.current_state), cmap='gray')

initial_epsilon = 0.06
final_epsilon = 0.001

for epoch in count():

    epsilon = final_epsilon + (initial_epsilon - final_epsilon) * \
                              math.exp(-1. * epoch / 2000)

    env.reset()

    while True:
        action = agent.select_action(
            env.current_state,
            epsilon
        )
        done = env.step(action)

        # im.set_data(tensor_image_to_numpy_image(env.current_state))
        # plt.draw()
        # plt.pause(0.001)

        if done:
            break

    new_loss = optimize_model(env.mem)
    # losses.append(loss)
    # step = test_result(epoch)
    # steps.append(step)

    # plt.figure(1)
    # plt.gcf().gca().cla()
    # plt.plot(losses)
    # plt.plot(steps)
    # plt.pause(0.001)

    print('loss', epoch, len(env.mem), new_loss, epsilon)
