import json
import tensorflow as tf
import websocket

from .q_network import q_network_instance
from .sending_message import ActionMessage
from typing import List, Union, Tuple
import random
import numpy as np

BATCH_SIZE = 32


def parse_vision_unit(c):
    if c == '~':
        return 0.1
    elif c == '.':
        return 0.3
    elif c == '#':
        return 0.5
    else:
        return 0.7


def parse_vision(vision_strs: List[str], height: int, width: int) -> np.ndarray:
    vision_arrs = []
    for index in range(height - 1, -1, -1):
        row_str = vision_strs[index]
        vision_arr = []
        for i in range(width):
            vision_arr.append(parse_vision_unit(row_str[i]))
        vision_arrs.append(vision_arr)
    vision = np.array(vision_arrs, dtype=float)
    vision = vision.reshape((1, height, width))
    return vision


def debug_parse_vision(vision_strs: List[str], height: int, width: int) -> np.ndarray:
    vision_arrs = []
    for index in range(len(vision_strs) - 1, -1, -1):
        row_str = vision_strs[index]
        vision_arr = []
        for i in range(len(row_str)):
            vision_arr.append(row_str[i])
        vision_arrs.append(vision_arr)
    vision = np.array(vision_arrs)
    vision = vision.reshape((1, height, width))
    return vision


def parse_location(player_locations: List[int]) -> np.ndarray:
    position_arrs = [0] * 2
    for key in player_locations:
        if key == 'y':
            position_arrs[0] = player_locations[key]
        else:
            position_arrs[1] = player_locations[key]
    player_location = np.array(position_arrs, dtype=np.float)
    player_location = player_location.reshape((1, 2))
    return player_location


class TrainningParam:
    sigma: float
    gamma: float
    batch_size: int

    def __init__(self, gamma: float, sigma: float, batch_size: int):
        """
        训练参数构建
        :param gamma: gamma代表对q_target表 的作用系数
        :param sigma: (0~1) 代表采用判定动作的概率
        :param batch_size: 每次训练的批次
        """
        self.gamma = gamma
        self.sigma = sigma
        self.batch_size = batch_size


class AIUnitParams:
    vision_range: int

    def __init__(self, vision_range: int):
        self.vision_range = vision_range
        pass


class UnitMemory:
    memory: np.ndarray
    state_dim: int

    def __init__(self, max_size: int, dims: int, max_vision_dim: int):
        self.memory = np.zeros([max_size, dims])
        self.count = 0
        self.max_size = max_size
        self.state_dim = max_vision_dim + 2

    def get_batch(self, batch_size: int) -> Union[np.ndarray, None]:
        if self.count == 0:
            return None
        sample_indexs: np.ndarray
        if self.count < self.max_size:
            sample_indexs = np.random.choice(self.count, size=self.count)
        else:
            sample_indexs = np.random.choice(self.max_size, size=batch_size)
        batch_memory = self.memory[sample_indexs]
        return batch_memory

    def add_unit(self, memory_unit: np.ndarray):
        self.memory[self.count % self.max_size, :] = memory_unit
        self.count += 1

    @staticmethod
    def convert_memory_unit(pre_state: np.ndarray, state: np.ndarray, action_index: int, reward: float):
        action_index_numpy = np.array([action_index]).reshape((1, 1))
        action_index_numpy = action_index_numpy.astype(np.int)
        reward_numpy = np.array([reward]).reshape((1, 1))
        memory_unit = np.hstack([pre_state, state, action_index_numpy, reward_numpy])
        return memory_unit

    def parse_memory_unit(self, batch_memory: np.ndarray) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
        pre_state = batch_memory[:, :self.state_dim]
        state = batch_memory[:, self.state_dim:self.state_dim * 2]
        action_index_numpy = batch_memory[:, self.state_dim * 2]
        action_index_numpy = action_index_numpy.astype(np.int)
        reward_numpy = batch_memory[:, self.state_dim * 2 + 1]
        return pre_state, state, action_index_numpy, reward_numpy


class AIUnit:
    url: str
    ws: websocket.WebSocketApp
    memory: UnitMemory
    ai_params: AIUnitParams
    train_params: TrainningParam
    buf_vision: Union[np.ndarray, None]
    buf_location: Union[np.ndarray, None]
    buf_action: Union[ActionMessage, None]
    buf_reward: Union[float, None]

    def __init__(self, url: str, ws: websocket.WebSocketApp, ai_param: AIUnitParams, train_param: TrainningParam):
        self.url = url
        self.ws = ws
        self.ai_params = ai_param
        self.train_params = train_param
        self.buf_vision = None
        self.buf_location = None
        self.buf_action = None
        self.max_vision_dim = ai_param.vision_range * ai_param.vision_range
        """
        vision + location == state 
        state维度： 25 + 2
        包含两个state： (pre_state) 与 (state) 所以维度： 27 * 2
        包含有reward与action： 所以维度为 54 + 2 
        最终维度36
        """
        self.unit_memory = UnitMemory(100, (self.max_vision_dim + 2) * 2 + 2, self.max_vision_dim)

    def process_connect_message(self, message: str):
        data = json.loads(message)
        # 获取用户视野信息
        vision = data['vision']
        buf_reward = data['reward']
        vision_debug = debug_parse_vision(vision, self.ai_params.vision_range, self.ai_params.vision_range)
        vision = parse_vision(vision, self.ai_params.vision_range, self.ai_params.vision_range)
        # 获取用户位置信息
        player_location = data['player']
        player_location = parse_location(player_location)
        print(vision_debug)

        # 使用状态信息进行学习
        # 获取到当前的状态信息
        state = self.convert_state(vision, player_location)
        # 获取到之前的状态信息
        if self.buf_action is not None:
            pre_state = self.convert_state(self.buf_vision, self.buf_location)
            # 存储当前的最新的状态
            self.store_memory(pre_state, state, self.buf_action.to_index(), buf_reward)

        self.buf_vision = vision
        self.buf_location = player_location
        # vision_numpy, location_numpy = self.parse_state(state)
        # 结合之前的状态进行学习
        self.learn()

        if random.random() < self.train_params.sigma:
            # 预测当前状态的数组信息
            eval_out = q_network_instance.predict_with_eval(vision, player_location)
            # eval_out_debug = q_network_instance.predict_debug(vision, player_location)
            # 将eval_out 取最大下标值 获得采取的动作
            action_index = np.argmax(eval_out, axis=1).tolist()[0]
        else:
            action_index = ActionMessage.random_action_index()

        # 使用socket发送信息
        send_msg = ActionMessage(action_index)
        # 本次采用的动作状态被塞入缓存
        self.buf_action = send_msg
        self.ws.send(send_msg.to_string())

    def convert_state(self, vision, player_location) -> np.ndarray:
        vision = vision.reshape((-1, self.max_vision_dim))
        player_location = player_location.reshape((-1, 2))
        state = np.hstack([vision, player_location])
        return state

    def parse_state(self, state: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
        state_vision: np.ndarray = state[:, :self.max_vision_dim]
        state_vision = state_vision.reshape((-1, self.ai_params.vision_range, self.ai_params.vision_range))
        state_location = state[:, self.max_vision_dim: self.max_vision_dim + 2]
        return state_vision, state_location

    def store_memory(self, pre_state: np.ndarray, state: np.ndarray, action_index: int, reward: float):
        memory_unit = UnitMemory.convert_memory_unit(pre_state, state, action_index, reward)
        self.unit_memory.add_unit(memory_unit)

    def learn(self):
        batch_memory = self.unit_memory.get_batch(self.train_params.batch_size)
        if batch_memory is None:
            return
        prestate, state, action, reward = self.unit_memory.parse_memory_unit(batch_memory)
        pre_vision, pre_location = self.parse_state(prestate)
        vision, location = self.parse_state(state)
        q_target = q_network_instance.predict_with_eval(pre_vision, pre_location)
        q_next = q_network_instance.predict_with_target(vision, location)
        q_target[:, action] = reward + self.train_params.gamma * np.max(q_next, axis=1)
        q_network_instance.train_eval(pre_vision, pre_location, q_target)
