import os
import sys
import tensorflow as tf
import numpy as np

class ChessDQN(tf.keras.models.Model):

    def __init__(self):
        super(ChessDQN, self).__init__()

        self.conv1 = tf.keras.layers.Conv2D(32, (8, 8), padding='same', activation='relu')
        self.pool1 = tf.keras.layers.MaxPooling2D()

        self.conv2 = tf.keras.layers.Conv2D(64, (4, 4), padding='same', activation='relu')
        self.pool2 = tf.keras.layers.MaxPooling2D()

        self.conv3 = tf.keras.layers.Conv2D(128, (2, 2), padding='same', activation='relu')
        self.pool3 = tf.keras.layers.MaxPooling2D()

        self.flatten = tf.keras.layers.Flatten()

        self.dense4 = tf.keras.layers.Dense(256, activation='relu')
        self.dense5 = tf.keras.layers.Dense(512, activation='relu')

        self.output_chess_pos = tf.keras.layers.Dense(9)

        self.optimizer = tf.keras.optimizers.Adam()

        self.lossval = tf.keras.losses.CategoricalCrossentropy()

        self.gamma = 0.8

    def __call__(self, input):

        output = self.conv1(input)
        output = self.pool1(output)

        output = self.conv2(output)
        output = self.pool2(output)

        output = self.conv3(output)
        output = self.pool3(output)

        output = self.flatten(output)

        output = self.dense4(output)
        output = self.dense5(output)

        result = self.output_chess_pos(output)

        return result

    def getLoss(self, input_datas):
        '''
        其中cur_chess_img必须是本次要下棋前的图片
        is_done : -1 表示输棋 0 正在下棋 1平局 10 赢了
        reward本次下棋的奖励 普通下棋：1 输棋：-10 赢棋：10
        cur_chess_pos 当前下棋的位置 格式[0 0 0 0 1 0 0 0 0]
        :param input_datas: list(pre_chess_img, reward, is_done=[-1, 0, 1, 10], cur_chess_pos)
        :return:
        '''

        input_datas = [(cur_chess_img, -10 if is_done < 0 else (10 if is_done > 0 else reward), is_done, cur_chess_pos) for (cur_chess_img, reward, is_done, cur_chess_pos) in input_datas]

        results = self(tf.cast(np.array([cur_chess_img for (cur_chess_img, _, _, _) in input_datas]) / 255.0, tf.float32))

        labels = results.numpy().copy()

        for index, (_, reward, is_done, cur_chess_pos) in enumerate(input_datas):
            if is_done < 0:
                labels[index][cur_chess_pos] = labels[index][tf.argmin(labels[index])] - reward

            if is_done >= 0:
                labels[index][cur_chess_pos] = self.gamma * labels[index][tf.argmax(labels[index])] + reward

        loss_val = self.lossval(labels, results)

        return loss_val


    def get_gard(self, input_datas):
        with tf.GradientTape() as tape:
            loss = self.getLoss(input_datas)
        g_grad = tape.gradient(loss, self.trainable_variables)
        self.optimizer.apply_gradients(zip(g_grad, self.trainable_variables))

    def network_learn(self, input_datas):
        self.get_gard(input_datas)

    # def act(self, state):


    def load(self, name):
        if os.path.exists(name):
            self.load_weights(name)

    def save(self, name):
        self.save_weights(name)