import numpy as np
import tensorflow as tf

from .game import Game
from .pos_direct import GamePosition, index_2_action, generate_random_action_index
from .network import create_vision_net
from typing import Union, Tuple, List
from tensorflow.keras import models
from random import random
import os

os.environ['CUDA_VISIBLE_DEVICES'] = '/gpu:0'


def generate_pos(height: int, width: int) -> GamePosition:
    py = int(random() * height)
    px = int(random() * width)
    return GamePosition(py, px)


def get_vision_ele_in_multichannels(char: str):
    if char == '#':
        return [1, 0, 0, 0]
    elif char == '~':
        return [0, 1, 0, 0]
    elif char == '.':
        return [0, 0, 1, 0]
    elif char == '$':
        return [0, 0, 0, 1]
    else:
        return [0, 0, 0, 0]


def parse_vision(data: np.ndarray) -> np.ndarray:
    n = data.shape[0]
    v_height = data.shape[1]
    v_width = data.shape[2]
    visions = []
    for cursor in range(n):
        vision = []
        for i in range(v_height):
            row = []
            for j in range(v_width):
                ele = get_vision_ele_in_multichannels(data[cursor][i][j])
                row.append(ele)
            vision.append(row)
        visions.append(vision)
    return np.array(visions).reshape([-1, v_height, v_width, 4])


class Memory:

    def __init__(self, memory_size: int, batch_size: int, game: Game):
        self.memory_size = memory_size
        self.batch_size = batch_size
        self.game = game
        self.count = 0
        self.prev_vision_list: list = [None] * memory_size
        self.vision_list: list = [None] * memory_size
        self.action_list: list = [None] * memory_size
        self.reward_list: list = [None] * memory_size

    def add(self, prev_vision: np.ndarray, vision: np.ndarray, action: int, reward: float):
        cursor = self.count % self.memory_size
        self.prev_vision_list[cursor] = prev_vision.reshape(self.game.vision_range, self.game.vision_range)
        self.vision_list[cursor] = vision.reshape(self.game.vision_range, self.game.vision_range)
        self.action_list[cursor] = action
        self.reward_list[cursor] = reward
        self.count += 1

    def get_batch(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
        max_random = self.count
        if max_random > self.memory_size:
            max_random = self.memory_size
        sample_indexes = np.random.choice(max_random, size=self.batch_size).tolist()
        eles_prev_vision = []
        eles_vision = []
        eles_action = []
        eles_reward = []
        for index in sample_indexes:
            eles_prev_vision.append(self.prev_vision_list[index])
            eles_vision.append(self.vision_list[index])
            eles_action.append(self.action_list[index])
            eles_reward.append(self.reward_list[index])
        eles_prev_vision = np.array(eles_prev_vision)
        eles_vision = np.array(eles_vision)
        eles_action = np.array(eles_action)
        eles_reward = np.array(eles_reward)
        return eles_prev_vision, eles_vision, eles_action, eles_reward


class AI:
    def __init__(self, epochs: int, game: Game, model_path: str, sync_time: int, sigma: float, gamma: float):
        self.epochs = epochs
        self.game = game
        self.model_path = model_path
        self.sync_time = sync_time
        self.sigma = sigma
        self.gamma = gamma

        self.memory = Memory(100, 32, game)
        self.vision: Union[np.ndarray, None] = None
        self.eval_net: models.Model = create_vision_net()
        self.target_net: models.Model = create_vision_net()
        self.eval_net.summary()
        self.sync_target_net()
        self.optimizer: Union[None, tf.optimizers.Optimizer] = None
        self.train_count = 0

    def train(self):
        for i in range(self.epochs):
            self.train_one_epoch()

    def train_one_epoch(self):
        # 获取初始位置
        while True:
            self.vision = self.game.start(generate_pos(self.game.height, self.game.width))
            # self.vision = self.game.start(GamePosition(2, 1))
            # 如果初始位置获取失败 则重新获取初始位置
            if self.vision is not None:
                self.optimizer = tf.keras.optimizers.Adam(1e-4)
                break
        while True:
            # 开始进行训练
            # 根据当前的视野得出action
            prev_vision_numpy = parse_vision(self.vision)
            eval_out = self.eval_net(prev_vision_numpy)
            # 获得采取的动作
            action_index: int
            if random() < self.sigma:
                action_index = np.argmax(eval_out, axis=1).tolist()[0]
            else:
                action_index = generate_random_action_index()
            action = index_2_action[action_index]
            # 执行这个动作
            vision, reward = self.game.move(action)
            self.game.display()
            self.memory.add(self.vision, vision, action_index, reward)
            self.learn()
            if self.game.is_finished():
                print("---------------GAME OVER----------------")
                break

    def sync_target_net(self):
        """
        将target网络的参数 同步与 eval网络相同
        :return:
        """
        self.eval_net.save(self.model_path)
        self.target_net.load_weights(self.model_path)

    def learn(self):
        """
        学习网络
        :return:
        """
        prev_state, state, action, reward = self.memory.get_batch()
        prev_state_numpy = parse_vision(prev_state)
        state_numpy = parse_vision(state)
        # 获得q_target 与 q_eval
        q_eval = self.eval_net(prev_state_numpy)
        q_target = q_eval.numpy()
        q_eval = q_eval.numpy()
        # 获得q_next
        q_next = self.target_net(state_numpy).numpy()
        # 更新q_target
        q_target[:, action] = reward + self.gamma * np.argmax(q_next, axis=1)
        self.backprop(prev_state_numpy, q_target)
        self.train_count += 1
        # 达到一定次数 就可以进行同步网络
        if self.train_count % self.sync_time == 0:
            self.sync_target_net()

    def backprop(self, prev_state, q_target):
        """
        反向传播
        :param prev_state: 需要预先预测的状态
        :param q_target: 需要进行对比的目标状态
        :return:
        """
        with tf.GradientTape() as tape:
            eval_out = self.eval_net(prev_state)
            loss = tf.reduce_mean(tf.math.squared_difference(eval_out, q_target))
        gradients = tape.gradient(loss, self.eval_net.trainable_variables)
        self.optimizer.apply_gradients(zip(gradients, self.eval_net.trainable_variables))
