import numpy as np
import os
import tensorflow as tf
from tensorflow import keras
from typing import Dict

os.environ['CUDA_VISIBLE_DEVICES'] = '/gpu:0'


def create_vision_net():
    model = keras.models.Sequential()
    model.add(keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(5, 5, 1)))
    model.add(keras.layers.Conv2D(64, (3, 3), activation='relu', input_shape=(3, 3, 1)))
    model.add(keras.layers.Flatten())
    model.add(keras.layers.Dense(32))
    model.add(keras.layers.Dense(16))
    return model


def create_position_net():
    model = keras.models.Sequential()
    model.add(keras.layers.Dense(8, input_shape=(2,)))
    model.add(keras.layers.Dense(16))
    return model


def create_fusion_net():
    model = keras.models.Sequential()
    model.add(keras.layers.Dense(64, input_shape=(32,)))
    model.add(keras.layers.Dense(128))
    model.add(keras.layers.Dense(5))
    return model


class EvalNet:
    vision_net: keras.models.Model
    position_net: keras.models.Model
    fusion_net: keras.models.Model

    def __init__(self, vision_net, position_net, fusion_net):
        self.vision_net = vision_net
        self.position_net = position_net
        self.fusion_net = fusion_net


class QNetwork:
    eval_net: EvalNet
    target_net: EvalNet
    train_count: int
    sync_steps: int
    model_paths: Dict[str, str]

    def __init__(self, sync_steps: int, model_file_dir: str):
        """
        QNetwork构造函数
        :param sync_steps: 同步次数，当经过这么多步的时候，就会让eval_net对target_net进行参数同步
        """
        self.model_file_dir = model_file_dir
        self.model_paths = {'vision': os.path.join(self.model_file_dir, "vision.h5"),
                            'position': os.path.join(self.model_file_dir, "position.h5"),
                            'fusion': os.path.join(self.model_file_dir, "fusion.h5")}

        self.eval_net = EvalNet(create_vision_net(), create_position_net(), create_fusion_net())
        self.target_net = EvalNet(create_vision_net(), create_position_net(), create_fusion_net())
        self.sync_target_net()
        self.train_count = 0
        self.sync_steps = sync_steps
        self.optimizer = tf.keras.optimizers.Adam(1e-4)

    def sync_target_net(self):
        self.eval_net.vision_net.save(self.model_paths['vision'])
        self.eval_net.position_net.save(self.model_paths['position'])
        self.eval_net.fusion_net.save(self.model_paths['fusion'])
        self.target_net.vision_net.load_weights(self.model_paths['vision'])
        self.target_net.position_net.load_weights(self.model_paths['position'])
        self.target_net.fusion_net.load_weights(self.model_paths['fusion'])
        pass

    def predict_with_eval(self, vision: np.ndarray, player_position: np.ndarray):
        vision_out = self.eval_net.vision_net(vision)
        position_out = self.eval_net.position_net(player_position)
        fusion_in = tf.concat([vision_out, position_out], axis=1)
        fusion_out = self.eval_net.fusion_net(fusion_in)
        return fusion_out.numpy()

    def predict_with_target(self, vision: tf.Tensor, player_position: tf.Tensor):
        vision_out = self.target_net.vision_net(vision)
        position_out = self.target_net.position_net(player_position)
        fusion_in = tf.concat([vision_out, position_out], axis=1)
        fusion_out = self.target_net.fusion_net(fusion_in)
        return fusion_out

    def train_eval(self, vision, position, target):
        with tf.GradientTape() as tape_loss:
            vision_out = self.eval_net.vision_net(vision)
            position_out = self.eval_net.position_net(position)
            fusion_in = tf.concat([vision_out, position_out], axis=1)
            fusion_out = self.eval_net.fusion_net(fusion_in)
            target_tensor = tf.constant(target)
            loss = tf.reduce_mean(tf.math.squared_difference(fusion_out, target_tensor))
        variables = self.eval_net.vision_net.trainable_variables + \
                    self.eval_net.position_net.trainable_variables + \
                    self.eval_net.fusion_net.trainable_variables
        gradients = tape_loss.gradient(loss, variables)
        self.optimizer.apply_gradients(zip(gradients, variables))
        if self.train_count % self.sync_steps == 0:
            self.sync_target_net()


q_network_instance = QNetwork(200, os.path.join(os.getcwd(), "models"))
