# DQN，部分复制网上代码 (2021.11)

# import tensorflow as tf
import random

import numpy as np
from tensorflow.keras.layers import Input, Dense
from tensorflow.keras.models import Model


class DQN:
    def __init__(self, input_shape, n_output=64, model=None, target_model=None, full_mem_size=50):
        self.gamma = 0
        self.mem_pool = []
        self.full_mem_size = full_mem_size
        self.epsilon = 0.5
        self.epsilon_min = 0
        self.epsilon_decay = 0

        if model:
            self.model = model
        else:
            in_ = Input(shape=input_shape)
            x = Dense(256, activation='relu')(in_)
            x = Dense(128, activation='relu')(x)
            x = Dense(n_output, activation='sigmoid')(x)
            self.model = Model(in_, x)

        if target_model:
            self.target_model = target_model
        else:
            in_ = Input(shape=input_shape)
            x = Dense(256, activation='relu')(in_)
            x = Dense(128, activation='relu')(x)
            x = Dense(n_output, activation='sigmoid')(x)
            self.target_model = Model(in_, x)

        self.model.compile(optimizer='adam', loss='categorical_crossentropy', metrics='acc')
        self.target_model.compile(optimizer='adam', loss='categorical_crossentropy', metrics='acc')

    def egreedy_action(self, state):
        if np.random.rand() <= self.epsilon:
            return np.random.randint(0, 1)
        else:
            q_values = self.model.predict(state)[0]
            return np.argmax(q_values)

    def update_epsilon(self):
        if self.epsilon >= self.epsilon_min:
            self.epsilon *= self.epsilon_decay

    def get_batch(self, batch_size):
        data = random.sample(self.mem_pool, batch_size)
        # Q_target。
        states = np.array([d[0] for d in data])
        next_states = np.array([d[3] for d in data])

        y = self.model.predict(states)
        q = self.target_model.predict(next_states)

        for i, (_, action, reward, _, done) in enumerate(data):
            target = reward
            if not done:
                target += self.gamma * np.amax(q[i])
            y[i][action] = target

        return states, y

    def train(self, step_func, reset_func, epsilon_min=0.1, epsilon_decay=0.95, gamma=0.8, episodes=10
              , batch_size=8):
        self.epsilon = 0.5
        self.epsilon_min = epsilon_min
        self.epsilon_decay = epsilon_decay
        self.gamma = gamma

        history = {'episode': [], 'Episode_reward': [], 'loss': []}
        count = 0

        for episode in range(episodes):
            state = reset_func()
            reward_sum = 0
            loss = np.infty
            done = False

            while not done:
                # 计算当前Q值并做出行动
                action = self.egreedy_action(state)
                next_state, reward, done = step_func(action)
                # 累加奖励
                reward_sum += reward
                # 记忆经验
                if len(self.mem_pool) > self.full_mem_size:
                    self.mem_pool.pop(0)
                self.mem_pool.append((state, action, reward, next_state, done))
                # 从经验中学习目标Q值，并用于训练主网络
                if len(self.mem_pool) > batch_size:
                    x, y = self.get_batch(batch_size)
                    loss = self.model.train_on_batch(x, y)

                if count != 0 and count % 20 == 0:
                    self.target_model.set_weights(self.model.get_weights())
                count += 1

            if episode % 5 == 0:
                history['episode'].append(episode)
                history['Episode_reward'].append(reward_sum)
                history['Loss'].append(loss)

                print(f'Episode: {episode} | Episode reward: {reward_sum} | loss: {loss} | epsilon: {self.epsilon}')


