import random
import numpy as np
from tensorflow.keras import models, layers, optimizers
from tensorflow.keras.layers import Conv2D, MaxPool2D, Flatten, Dropout, Dense, ZeroPadding2D, BatchNormalization, ReLU, Input, Reshape
from collections import deque
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.models import load_model

class DQN(object):
    def __init__(self):
        self.step = 0
        self.update_freq = 200  # 模型更新频率
        self.replay_size = 2000  # 训练集大小
        self.replay_queue = deque(maxlen=self.replay_size)
        # self.model = self.create_model()
        # self.target_model = self.create_model()

        self.model = load_model(r"model/m1.h5")
        self.target_model = load_model(r"model/m1.h5")

    def create_model(object):
        '''
        参照snake，游戏界面为24*24，action 为 上下左右
        '''
        ACTION_DIM = 4
        model = models.Sequential()
        
        model.add(Input(shape=((24, 24,))))
        #压平层
        model.add(Flatten())
        model.add(Dense(128, activation="relu"))
        model.add(Dense(ACTION_DIM, activation="linear"))
        model.compile(loss='mean_squared_error',
                      optimizer=optimizers.Adam(0.001))
        return model

    def act(self, s, epsilon=1):
        """预测动作"""
        # 随着模型的增强，随机探索比重越来越小
        random_flag = epsilon - self.step * 0.01

        random_flag = random_flag if random_flag > 0.05 else 0.05
        random_flag = 0.02
        if np.random.uniform() < random_flag:
            print("随即策略！")
            return np.random.choice([0, 1, 2, 3])
        print("模型策略！")
        return np.argmax(self.model.predict(np.array([s]))[0])

    def train(self, batch_size=64, lr=1, factor=0.95):
        print("Model Train:", len(self.replay_queue), self.replay_size)

        if len(self.replay_queue) < self.replay_size:
            return
        self.step += 1
        # 每 update_freq 步，将 model 的权重赋值给 target_model
        if self.step % self.update_freq == 0:
            self.target_model.set_weights(self.model.get_weights())

        replay_batch = random.sample(self.replay_queue, batch_size)
        s_batch = np.array([replay[0] for replay in replay_batch])
        next_s_batch = np.array([replay[2] for replay in replay_batch])

        Q = self.model.predict(s_batch)
        Q_next = self.target_model.predict(next_s_batch)

        # 使用公式更新训练集中的Q值
        for i, replay in enumerate(replay_batch):
            _, a, _, reward = replay
            Q[i][a] = (1 - lr) * Q[i][a] + lr * (reward + factor * np.amax(Q_next[i]))
        
        # 传入网络进行训练
        self.model.fit(
            s_batch, Q,
            verbose=0,
            )

        self.model.save(r"model/m2.h5")

if __name__ == "__main__":
    pass