from collections import deque
import time
import cv2
import sys
import random
import os

sys.path.append("game/")
import game.wrapped_flappy_bird as game
import torchvision.transforms as transforms

# from torch.utils.tensorboard import SummaryWriter
# writer = SummaryWriter('D:/PythonProject/tensorboarddir')
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr
from tqdm import tqdm

# Hyper Parameters:
FRAME_PER_ACTION = 1
GAMMA = 0.99  # decay rate of past observations
OBSERVE = 200.  # timesteps to observe before training
EXPLORE = 15000.  # frames over which to anneal epsilon
FINAL_EPSILON = 0.0001  # 0.001 # final value of epsilon
INITIAL_EPSILON = 0.3  # 0.01 # starting value of epsilon
REPLAY_MEMORY = 32768  # 满二叉树的节点个数：2的k次方-1
BATCH_SIZE = 64  # size of minibatch
UPDATE_TIME = 100
LR = 1e-3
N_ACTIONS = 2  # 可采取的action的数量


# preprocess raw image to 80*80 gray image
def convert_image(observation):
    observation = cv2.cvtColor(cv2.resize(observation, (80, 80)), cv2.COLOR_BGR2GRAY)
    ret, observation = cv2.threshold(observation, 1, 255, cv2.THRESH_BINARY)

    # The storage order of the loaded image is W(width),
    # H(height), C(channel). PaddlePaddle requires
    # the CHW order, so transpose them.
    # observation = observation.transpose((2, 0, 1))  # CHW
    observation = observation / 255.0
    # Add one dimension to mimic the list format.
    # observation = np.expand_dims(observation, axis=0)
    # return im
    return np.reshape(observation, (1, 80, 80))


class ReplayData(object):
    def __init__(self, s, a, r, s_, t):
        self.s = s
        self.a = a
        self.r = r
        self.s_ = s_
        self.t = t


class QNetwork(object):
    def __init__(self, use_cuda=False):
        self.action_dim = 2

        self.img_height = 80
        self.img_width = 80
        self.use_cuda = True

        self._build_net()

    def _get_inputs(self):
        return fluid.layers.data(
            name='state',
            shape=[1, self.img_height, self.img_width],
            dtype='float32'), \
               fluid.layers.data(
                   name='action', shape=[1], dtype='int32'), \
               fluid.layers.data(
                   name='reward', shape=[], dtype='float32'), \
               fluid.layers.data(
                   name='next_s',
                   shape=[1, self.img_height, self.img_width],
                   dtype='float32'), \
               fluid.layers.data(
                   name='isOver', shape=[], dtype='bool')

    def _build_net(self):
        self.predict_program = fluid.Program()
        self.train_program = fluid.Program()
        self._sync_program = fluid.Program()

        with fluid.program_guard(self.predict_program):
            state, action, reward, next_s, isOver = self._get_inputs()
            self.pred_value = self.get_DQN_prediction(state)

        with fluid.program_guard(self.train_program):
            state, action, reward, next_s, isOver = self._get_inputs()
            pred_value = self.get_DQN_prediction(state)

            reward = fluid.layers.clip(reward, min=-1.0, max=1.0)

            action_onehot = fluid.layers.one_hot(action, self.action_dim)
            action_onehot = fluid.layers.cast(action_onehot, dtype='float32')

            pred_action_value = fluid.layers.reduce_sum(
                fluid.layers.elementwise_mul(action_onehot, pred_value), dim=1)

            targetQ_predict_value = self.get_DQN_prediction(next_s, target=True)

            next_s_predcit_value = self.get_DQN_prediction(next_s)
            greedy_action = fluid.layers.argmax(next_s_predcit_value, axis=1)
            greedy_action = fluid.layers.unsqueeze(greedy_action, axes=[1])

            predict_onehot = fluid.layers.one_hot(greedy_action, self.action_dim)
            best_v = fluid.layers.reduce_sum(
                fluid.layers.elementwise_mul(predict_onehot, targetQ_predict_value),
                dim=1)
            best_v.stop_gradient = True

            target = reward + (1.0 - fluid.layers.cast(
                isOver, dtype='float32')) * GAMMA * best_v
            cost = fluid.layers.square_error_cost(pred_action_value, target)
            cost = fluid.layers.reduce_mean(cost)

            optimizer = fluid.optimizer.Adam(LR * 0.5, epsilon=1e-3)
            optimizer.minimize(cost)


        # 获取所有的参数
        vars = list(self.train_program.list_vars())
        target_vars = list(filter(
            lambda x: 'GRAD' not in x.name and 'target' in x.name, vars))
        #policy_vars = list(filter(lambda x: 'GRAD' not in x.name and 'policy' in x.name, vars))

        policy_vars_name = [
            x.name.replace('target', 'policy') for x in target_vars]
        policy_vars = list(filter(
            lambda x: x.name in policy_vars_name, vars))

        # 从主程序中克隆一个程序用于更新参数
        self._sync_program = self.predict_program.clone()
        with fluid.program_guard(self._sync_program):
            sync_ops = []
            for i, var in enumerate(policy_vars):
                sync_op = fluid.layers.assign(policy_vars[i], target_vars[i])
                sync_ops.append(sync_op)
        # 修剪第二个玩了个的参数，完成更新参数
        self._sync_program._prune(sync_ops)

            # vars = list(self.train_program.list_vars())
            # target_vars = list(filter(
            #     lambda x: 'GRAD' not in x.name and 'target' in x.name, vars))
            #
            # policy_vars_name = [
            #     x.name.replace('target', 'policy') for x in target_vars]
            # policy_vars = list(filter(
            #     lambda x: x.name in policy_vars_name, vars))
            #
            # policy_vars.sort(key=lambda x: x.name)
            # target_vars.sort(key=lambda x: x.name)
            #
            # sync_ops = []
            # for i, var in enumerate(policy_vars):
            #     sync_op = fluid.layers.assign(policy_vars[i], target_vars[i])
            #     sync_ops.append(sync_op)

        # fluid exe
        place = fluid.CUDAPlace(0) if self.use_cuda else fluid.CPUPlace()
        self.exe = fluid.Executor(place)
        self.exe.run(fluid.default_startup_program())

    # 这里定义了网络结构
    def get_DQN_prediction(self, image, target=False):
        variable_field = 'target' if target else 'policy'

        conv1 = fluid.layers.conv2d(
            input=image,
            num_filters=8,
            filter_size=3,
            stride=1,
            padding=1,
            act='relu',
            param_attr=ParamAttr(name='{}_conv1'.format(variable_field)),
            bias_attr=ParamAttr(name='{}_conv1_b'.format(variable_field)))

        max_pool1 = fluid.layers.pool2d(
            input=conv1, pool_size=2, pool_stride=2, pool_type='max')

        conv2 = fluid.layers.conv2d(
            input=max_pool1,
            num_filters=16,
            filter_size=3,
            stride=1,
            padding=1,
            act='relu',
            param_attr=ParamAttr(name='{}_conv2'.format(variable_field)),
            bias_attr=ParamAttr(name='{}_conv2_b'.format(variable_field)))

        max_pool2 = fluid.layers.pool2d(
            input=conv2, pool_size=2, pool_stride=2, pool_type='max')

        conv3 = fluid.layers.conv2d(
            input=max_pool2,
            num_filters=16,
            filter_size=3,
            stride=1,
            padding=1,
            act='relu',
            param_attr=ParamAttr(name='{}_conv3'.format(variable_field)),
            bias_attr=ParamAttr(name='{}_conv3_b'.format(variable_field)))

        flatten = fluid.layers.flatten(conv3, axis=1)

        fc1 = fluid.layers.fc(
            input=flatten,
            size=512,
            param_attr=ParamAttr(name='{}_fc1'.format(variable_field)),
            bias_attr=ParamAttr(name='{}_fc1_b'.format(variable_field)))

        out = fluid.layers.fc(
            input=fc1,
            size=self.action_dim,
            param_attr=ParamAttr(name='{}_fc2'.format(variable_field)),
            bias_attr=ParamAttr(name='{}_fc2_b'.format(variable_field)))
        return out

    def action(self, state):
        state = np.expand_dims(state, axis=0)
        pred_Q = self.exe.run(self.predict_program,
                              feed={'state': state.astype('float32')},
                              fetch_list=[self.pred_value])[0]
        pred_Q = np.squeeze(pred_Q, axis=0)
        return np.argmax(pred_Q)

    def train(self, state, action, reward, next_state, is_over):
        action = np.expand_dims(action, -1)
        self.exe.run(self.train_program,
                     feed={
                         'state': state.astype('float32'),
                         'action': action.astype('int32'),
                         'reward': reward.astype('float32'),
                         'next_s': next_state.astype('float32'),
                         'isOver': is_over.astype('bool')
                     })

    def sync_target_network(self):
        self.exe.run(self._sync_program)

    def save_network(self):
        # 保存模型
        # 如果保存路径不存在就创建
        model_save_dir = r".\model"
        if not os.path.exists(model_save_dir):
            os.makedirs(model_save_dir)

        # 保存训练参数到指定路径中，构建一个专门用预测的program
        fluid.io.save_inference_model(model_save_dir,  # 保存推理model的路径
                                      ['state'],  # 推理（inference）需要 feed 的数据
                                      self.pred_value,  # 保存推理（inference）结果的 Variables
                                      self.exe,  # 保存推理（inference）结果的 Variables
                                      self.predict_program)  # exe 保存 inference model



class BrainDQN:
    def __init__(self):
        # init replay memory
        self.dataset = deque()
        # init some parameters
        self.timeStep = 0
        self.epsilon = INITIAL_EPSILON
        # init Q network
        self.eval_net = QNetwork()

        # if IsLoadPkl:
        #     self.eval_net.load_state_dict(torch.load(PklPath))
        #     self.target_net.load_state_dict(torch.load(PklPath))

    def getAction(self, state):
        if self.timeStep % FRAME_PER_ACTION == 0:  # FRAME_PER_ACTION为1，所以总是走这里
            if random.random() <= self.epsilon:  # episilon greedy 这个分支是去探索
                action = random.randrange(N_ACTIONS)
            else:  # 这个分支是按照最佳的行为来
                action = self.eval_net.action(state)
        else:
            action = 1  # do nothing

        # change episilon 随时间变小
        if self.epsilon > FINAL_EPSILON and self.timeStep > OBSERVE:
            self.epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / EXPLORE
        return action

    def setPerception(self, cur_observ, nextObservation, action, reward, terminal):
        data = ReplayData(cur_observ, action, reward, nextObservation, terminal)
        self.dataset.append(data)
        # writer.add_image('observ' + str(self.timeStep), cur_observ, dataformats='HWC')

        if self.timeStep > OBSERVE:  # 观察的足够多了，开始训练
            self.trainQNetwork()

        # print info
        state = ""
        if self.timeStep <= OBSERVE:
            state = "observe"
        elif self.timeStep > OBSERVE and self.timeStep <= OBSERVE + EXPLORE:
            state = "explore"
        else:
            state = "train"

        if self.timeStep % 50 == 0:
            print("TIMESTEP", self.timeStep, "/ STATE", state, "/ EPSILON", self.epsilon)

        self.timeStep += 1

    def trainQNetwork(self):
        minibatch = random.sample(self.dataset, BATCH_SIZE)
        state_batch = np.array([data.s for data in minibatch])
        action_batch = np.array([data.a for data in minibatch])
        reward_batch = np.array([data.r for data in minibatch])
        next_state_batch = np.array([data.s_ for data in minibatch])
        terminal_batch = np.array([data.t for data in minibatch])

        self.eval_net.train(state_batch, action_batch, reward_batch, next_state_batch, terminal_batch)

        # save network every 10000 iteration
        if self.timeStep % 10000 == 0:
            self.eval_net.save_network()

        if self.timeStep % UPDATE_TIME == 0:
            self.eval_net.sync_target_network()


def convert_action(action):
    a = np.zeros(N_ACTIONS)  # len为2的数组
    a[action] = 1
    return a


def playFlappyBird():
    # Step 1: init BrainDQN
    brain = BrainDQN()
    # Step 2: init Flappy Bird Game
    flappyBird = game.GameState()
    # Step 3: play game
    # Step 3.1: obtain init state
    a = 0
    s, _, _ = flappyBird.frame_step(convert_action(a))
    s = convert_image(s)

    # Step 3.2: run the game
    while True:
        a = brain.getAction(s)
        # flappyBird接受的action是个数组，所以用convert_action转一下
        s_, r, t = flappyBird.frame_step(convert_action(a))
        s_ = convert_image(s_)
        brain.setPerception(s, s_, a, r, t)
        s = s_


if __name__ == '__main__':
    playFlappyBird()
