# -*- coding: utf-8 -*-
from DoubleDQN_agent import DoubleDQNModel
from atari import AtariPlayer
import paddle.fluid as fluid
import gym
import argparse
import cv2
from tqdm import tqdm
from expreplay import ReplayMemory, Experience
import numpy as np
import os

from datetime import datetime
from atari_wrapper import FrameStack, MapState, FireResetEnv, LimitLength
from collections import deque

# -*- coding: utf-8 -*-

import math
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr
from tqdm import tqdm


class DoubleDQNModel(object):
    def __init__(self, state_dim, action_dim, gamma, hist_len, use_cuda=False):
        self.img_height = state_dim[0]
        self.img_width = state_dim[1]
        self.action_dim = action_dim
        self.gamma = gamma
        self.exploration = 1.1
        self.update_target_steps = 10000 // 4
        self.hist_len = hist_len
        self.use_cuda = use_cuda

        self.global_step = 0
        self._build_net()

    def _get_inputs(self):
        return fluid.layers.data(
            name='state',
            shape=[self.hist_len, self.img_height, self.img_width],
            dtype='float32'), \
               fluid.layers.data(
                   name='action', shape=[1], dtype='int32'), \
               fluid.layers.data(
                   name='reward', shape=[], dtype='float32'), \
               fluid.layers.data(
                   name='next_s',
                   shape=[self.hist_len, self.img_height, self.img_width],
                   dtype='float32'), \
               fluid.layers.data(
                   name='isOver', shape=[], dtype='bool')

    def _build_net(self):
        self.predict_program = fluid.Program()
        self.train_program = fluid.Program()
        self._sync_program = fluid.Program()

        with fluid.program_guard(self.predict_program):
            state, action, reward, next_s, isOver = self._get_inputs()
            self.pred_value = self.get_DQN_prediction(state)

        with fluid.program_guard(self.train_program):
            state, action, reward, next_s, isOver = self._get_inputs()
            pred_value = self.get_DQN_prediction(state)

            reward = fluid.layers.clip(reward, min=-1.0, max=1.0)

            action_onehot = fluid.layers.one_hot(action, self.action_dim) # 每一行对应action的位置为1，其余为0
            action_onehot = fluid.layers.cast(action_onehot, dtype='float32') # 类型转换

            pred_action_value = fluid.layers.reduce_sum(# 该OP是对指定维度上的Tensor元素进行求和运算，并输出相应的计算结果。
                fluid.layers.elementwise_mul(action_onehot, pred_value), dim=1) # 该OP是逐元素相乘算子，输入 x 与输入 y 逐元素相乘，并将各个位置的输出元素保存到返回结果中

            targetQ_predict_value = self.get_DQN_prediction(next_s, target=True)

            next_s_predcit_value = self.get_DQN_prediction(next_s)
            greedy_action = fluid.layers.argmax(next_s_predcit_value, axis=1)
            greedy_action = fluid.layers.unsqueeze(greedy_action, axes=[1])

            predict_onehot = fluid.layers.one_hot(greedy_action, self.action_dim)
            best_v = fluid.layers.reduce_sum(
                fluid.layers.elementwise_mul(predict_onehot, targetQ_predict_value),
                dim=1)
            best_v.stop_gradient = True

            target = reward + (1.0 - fluid.layers.cast(
                isOver, dtype='float32')) * self.gamma * best_v
            cost = fluid.layers.square_error_cost(pred_action_value, target)
            cost = fluid.layers.reduce_mean(cost)

            optimizer = fluid.optimizer.Adam(1e-3 * 0.5, epsilon=1e-3)
            optimizer.minimize(cost)

        vars = list(self.train_program.list_vars())
        target_vars = list(filter(
            lambda x: 'GRAD' not in x.name and 'target' in x.name, vars))

        policy_vars_name = [
            x.name.replace('target', 'policy') for x in target_vars]
        policy_vars = list(filter(
            lambda x: x.name in policy_vars_name, vars))

        policy_vars.sort(key=lambda x: x.name)
        target_vars.sort(key=lambda x: x.name)

        with fluid.program_guard(self._sync_program):
            sync_ops = []
            for i, var in enumerate(policy_vars):
                sync_op = fluid.layers.assign(policy_vars[i], target_vars[i])
                sync_ops.append(sync_op)

        # fluid exe
        place = fluid.CUDAPlace(0) if self.use_cuda else fluid.CPUPlace()
        self.exe = fluid.Executor(place)
        self.exe.run(fluid.default_startup_program())

    def get_DQN_prediction(self, image, target=False):
        image = image / 255.0

        variable_field = 'target' if target else 'policy'

        conv1 = fluid.layers.conv2d(
            input=image,
            num_filters=32,
            filter_size=5,
            stride=1,
            padding=2,
            act='relu',
            param_attr=ParamAttr(name='{}_conv1'.format(variable_field)),
            bias_attr=ParamAttr(name='{}_conv1_b'.format(variable_field)))

        max_pool1 = fluid.layers.pool2d(
            input=conv1, pool_size=2, pool_stride=2, pool_type='max')

        conv2 = fluid.layers.conv2d(
            input=max_pool1,
            num_filters=32,
            filter_size=5,
            stride=1,
            padding=2,
            act='relu',
            param_attr=ParamAttr(name='{}_conv2'.format(variable_field)),
            bias_attr=ParamAttr(name='{}_conv2_b'.format(variable_field)))

        max_pool2 = fluid.layers.pool2d(
            input=conv2, pool_size=2, pool_stride=2, pool_type='max')

        conv3 = fluid.layers.conv2d(
            input=max_pool2,
            num_filters=64,
            filter_size=4,
            stride=1,
            padding=1,
            act='relu',
            param_attr=ParamAttr(name='{}_conv3'.format(variable_field)),
            bias_attr=ParamAttr(name='{}_conv3_b'.format(variable_field)))
        max_pool3 = fluid.layers.pool2d(
            input=conv3, pool_size=2, pool_stride=2, pool_type='max')

        conv4 = fluid.layers.conv2d(
            input=max_pool3,
            num_filters=64,
            filter_size=3,
            stride=1,
            padding=1,
            act='relu',
            param_attr=ParamAttr(name='{}_conv4'.format(variable_field)),
            bias_attr=ParamAttr(name='{}_conv4_b'.format(variable_field)))

        flatten = fluid.layers.flatten(conv4, axis=1)

        out = fluid.layers.fc(
            input=flatten,
            size=self.action_dim,
            param_attr=ParamAttr(name='{}_fc1'.format(variable_field)),
            bias_attr=ParamAttr(name='{}_fc1_b'.format(variable_field)))
        return out

    def act(self, state, train_or_test):
        sample = np.random.random()
        if train_or_test == 'train' and sample < self.exploration:
            act = np.random.randint(self.action_dim)
        else:
            if np.random.random() < 0.01:
                act = np.random.randint(self.action_dim)
            else:
                state = np.expand_dims(state, axis=0)
                pred_Q = self.exe.run(self.predict_program,
                                      feed={'state': state.astype('float32')},
                                      fetch_list=[self.pred_value])[0]
                pred_Q = np.squeeze(pred_Q, axis=0)
                act = np.argmax(pred_Q)
        if train_or_test == 'train':
            self.exploration = max(0.1, self.exploration - 1e-6)
        return act

    def train(self, state, action, reward, next_state, isOver):
        if self.global_step % self.update_target_steps == 0:
            self.sync_target_network()
        self.global_step += 1

        action = np.expand_dims(action, -1)
        self.exe.run(self.train_program,
                     feed={
                         'state': state.astype('float32'),
                         'action': action.astype('int32'),
                         'reward': reward,
                         'next_s': next_state.astype('float32'),
                         'isOver': isOver
                     })

    def sync_target_network(self):
        self.exe.run(self._sync_program)


UPDATE_FREQ = 4

MEMORY_SIZE = 1e6
MEMORY_WARMUP_SIZE = MEMORY_SIZE // 20
IMAGE_SIZE = (84, 84)
CONTEXT_LEN = 4
ACTION_REPEAT = 4  # aka FRAME_SKIP
UPDATE_FREQ = 4


def run_train_episode(agent, env, exp):
    total_reward = 0
    state = env.reset()
    step = 0
    while True:
        step += 1
        context = exp.recent_state()
        context.append(state)
        context = np.stack(context, axis=0)
        action = agent.act(context, train_or_test='train')
        next_state, reward, isOver, _ = env.step(action)
        exp.append(Experience(state, action, reward, isOver))
        # train model
        # start training
        if len(exp) > MEMORY_WARMUP_SIZE:
            if step % UPDATE_FREQ == 0:
                batch_all_state, batch_action, batch_reward, batch_isOver = exp.sample_batch(
                    args.batch_size)
                batch_state = batch_all_state[:, :CONTEXT_LEN, :, :]
                batch_next_state = batch_all_state[:, 1:, :, :]
                agent.train(batch_state, batch_action, batch_reward,
                            batch_next_state, batch_isOver)
        total_reward += reward
        state = next_state
        if isOver:
            break
    return total_reward, step


def get_player(rom, viz=False, train=False):
    env = AtariPlayer(
        rom,
        frame_skip=ACTION_REPEAT,
        viz=viz,
        live_lost_as_eoe=train,
        max_num_frames=60000)
    env = FireResetEnv(env)
    env = MapState(env, lambda im: cv2.resize(im, IMAGE_SIZE))
    if not train:
        # in training, context is taken care of in expreplay buffer
        env = FrameStack(env, CONTEXT_LEN)
    return env


def eval_agent(agent, env):
    episode_reward = []
    for _ in range(30):
        # for _ in tqdm(range(30), desc='eval agent'):
        state = env.reset()
        total_reward = 0
        step = 0
        while True:
            step += 1
            action = agent.act(state, train_or_test='test')
            state, reward, isOver, info = env.step(action)
            total_reward += reward
            if isOver:
                break
        episode_reward.append(total_reward)
    eval_reward = np.mean(episode_reward)
    return eval_reward


def train_agent():
    env = get_player(args.rom, train=True)
    test_env = get_player(args.rom)
    exp = ReplayMemory(args.mem_size, IMAGE_SIZE, CONTEXT_LEN)
    action_dim = env.action_space.n

    agent = DoubleDQNModel(IMAGE_SIZE, action_dim, args.gamma, CONTEXT_LEN,
                           args.use_cuda)

    with tqdm(total=MEMORY_WARMUP_SIZE, desc='Memory warmup') as pbar:
        while len(exp) < MEMORY_WARMUP_SIZE:
            total_reward, step = run_train_episode(agent, env, exp)
            pbar.update(step)

    # train
    test_flag = 1
    save_flag = 0
    pbar = tqdm(total=args.epoch)
    recent_100_reward = []
    total_step = 0
    max_reward = None
    save_path = os.path.join(args.model_dirname, '{}-{}'.format(
        "DoubleDQN", os.path.basename(args.rom).split('.')[0]))
    while True:
        # start epoch
        total_reward, step = run_train_episode(agent, env, exp)
        total_step += step

        pbar.set_description('[train]exploration:{}'.format(agent.exploration))
        pbar.update(step)
        if total_step >= args.epoch:
            break
        if total_step // args.test_every_steps - test_flag == 0:
            pbar.write("testing")
            eval_reward = eval_agent(agent, test_env)
            print("eval_agent done, (steps, eval_reward): ({}, {})".format(
                total_step, eval_reward))

            if max_reward is None or eval_reward > max_reward:
                max_reward = eval_reward
                fluid.io.save_inference_model(save_path, ['state'],
                                              agent.pred_value, agent.exe,
                                              agent.predict_program)
            test_flag += 1
    pbar.close()


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--use_cuda', action='store_true', help='if set, use cuda')
    parser.add_argument(
        '--gamma',
        type=float,
        default=0.99,
        help='discount factor for accumulated reward computation')
    parser.add_argument(
        '--mem_size',
        type=int,
        default=1000000,
        help='memory size for experience replay')
    parser.add_argument(
        '--batch_size', type=int, default=64, help='batch size for training')
    parser.add_argument('--rom', help='atari rom', required=True)
    parser.add_argument(
        '--model_dirname',
        type=str,
        default='saved_model',
        help='dirname to save model')
    parser.add_argument(
        '--test_every_steps',
        type=int,
        default=5000,
        help='every steps number to run test')
    parser.add_argument(
        '--epoch',
        type=int,
        default=100000,
        help='every steps number to run test')
    args = parser.parse_args()
    train_agent()
