import numpy as np
from typing import List, Union, Dict, Tuple
import typing
import gym
import gym.spaces as spaces

OBS_T = Dict[str, np.ndarray]
RETURN_T = Tuple[OBS_T, float, bool, dict]


class NetworkEnv(gym.Env):
    def __init__(self,
                 num_user: int = 3,
                 num_channel: int = 2,
                 link_probabilities: List[float] = [0.1, 0.2, 0.5, 0.2],
                 eng_probabilities: List[float] = [0.204, 0.338, 0.266, 0.192],
                 rest_power: int = 3):
        if num_user != 3 and num_channel != 2:
            raise NotImplementedError("该环境只支持num_user=3且num_channel=2")
        # gym
        super().__init__()
        self.action_space = spaces.Discrete(13)
        self.observation_space = spaces.Dict({
            'obs':
                spaces.Box(low=0., high=1., shape=(8,)),
            'channel_state':
                spaces.Box(low=0, high=3, shape=(2, 3), dtype=np.int),
            'rest_power':
                spaces.Discrete(11),
            'harvest_power':
                spaces.Discrete(4)
        })
        self.reward_range = (0., 6.)
        self._max_step = 50
        self._step_counter = 0
        self.done = False

        # env
        self.num_user = num_user  # 接收端用户数
        self.num_channel = num_channel  # 信道数
        self.emax = 10  # AP存储能量的最大值
        self.link_probabilities = link_probabilities  # 链路状态分布
        self.link_probabilities_cdf = NetworkEnv._get_cdf(  # 链路状态累计分布
            self.link_probabilities)
        self.eng_probabilities = eng_probabilities  # 能量吸收模型
        self.rest_power = rest_power  # 整个系统当前时刻的剩余能量
        self.reward = 0
        self.harvest_power = 0  # 当前时刻从外界可以吸收的能量
        self.transmit_power = 1  # 选择转发的动作时消耗的能量（固定）
        self._value_list = [0, 1, 2, 3]
        self.channel_state = None
        self.obs = None

    def set_link_prob(self, prob: Union[List[float], np.ndarray]):
        assert np.isclose(sum(prob), 1.) and len(prob) == 4
        self.link_probabilities = np.array(prob)
        self.link_probabilities_cdf = NetworkEnv._get_cdf(
            self.link_probabilities)

    def set_eng_prob(self, prob: Union[List[float], np.ndarray]):
        assert np.isclose(sum(prob), 1.) and len(prob) == 4
        self.eng_probabilities = np.asarray(prob)

    def set_emax(self, emax: int):
        self.emax = emax

    def _change_harvest_power(self):
        # 环境每次交互后都会改变其能量状态
        self.harvest_power = np.random.choice(self._value_list,
                                              p=self.eng_probabilities)

    def _update_state(self, channel_state: List[int], rest_power: int,
                      harvest_power: int):
        self.channel_state = channel_state
        self.rest_power = rest_power
        self.harvest_power = harvest_power

    @staticmethod
    def _get_cdf(probs: Union[List[float], np.ndarray]):
        """计算累计分布
        """
        tri_low = np.tril(np.ones([len(probs), len(probs)]))  # 下三角矩阵
        cdf = tri_low.dot(probs)
        return cdf

    @staticmethod
    def _cvt2observation(channel_state: Union[List[int], np.ndarray],
                         rest_power: int, harvest_power: int):
        """convert to observation for NN agent

        :param channel_state: [description]
        :type channel_state: [type]
        :param rest_power: [description]
        :type rest_power: [type]
        :param harvest_power: [description]
        :type harvest_power: [type]
        :return: [description]
        :rtype: [type]
        """
        return np.concatenate([
            channel_state / 3,
            [rest_power / 10],  # TODO 跟emax相关
            [harvest_power / 3]
        ])

    def reset(self):
        channel_state = np.random.randint(0, 4, size=6).reshape(2, 3).flatten()
        rest_power = np.random.randint(1, 4)
        harvest_power = np.random.randint(1, 4)
        observation = NetworkEnv._cvt2observation(channel_state, rest_power,
                                                  harvest_power)
        obs = {
            'obs': observation,
            'channel_state': channel_state,
            'rest_power': rest_power,
            'harvest_power': harvest_power
        }
        self._update_state(channel_state, rest_power, harvest_power)
        self._step_counter = 0
        self.done = False
        return obs

    # take action to environment
    def step(self, action: int) -> RETURN_T:

        # 3个用户2个信道,UE代表用户节点，channel代表信道
        # channel state
        # [0: UE0->channel0, 1: UE1->channel0, 2: UE2->channel0,
        # 3: UE0->channel1, 4: UE1->channel1, 5: UE2->channel1]

        # action table
        # [0: UE0->channel0, UE1->channel1,
        # 1: UE0->channel0, UE2->channel1,
        # 2: UE1->channel0, UE0->channel1,
        # 3: UE1->channel0, UE2->channel1,
        # 4: UE2->channel0, UE0->uchannel1,
        # 5: UE2->channel0, UE1->channel1,
        # 6: UE0->channel0,
        # 7: UE0->channel1,
        # 8: UE1->channel0,
        # 9: UE1->channel1,
        # 10: UE2->channel0,
        # 11: UE2->channel1,
        # 12:waiting]

        if self.done:
            raise StopIteration("environment has done, try to reset()")
        assert 0 <= action <= 12, f"expect action in [0, 12],\
            but get{action}"

        if action == 0 and self.rest_power >= 2:
            self.reward = self.channel_state[0] + self.channel_state[4]
            self.rest_power = min(
                self.emax,
                self.rest_power - 2 * self.transmit_power + self.harvest_power)

        if action == 1 and self.rest_power >= 2:
            self.reward = self.channel_state[0] + self.channel_state[5]
            self.rest_power = min(
                self.emax,
                self.rest_power - 2 * self.transmit_power + self.harvest_power)

        if action == 2 and self.rest_power >= 2:
            self.reward = self.channel_state[1] + self.channel_state[3]
            self.rest_power = min(
                self.emax,
                self.rest_power - 2 * self.transmit_power + self.harvest_power)

        if action == 3 and self.rest_power >= 2:
            self.reward = self.channel_state[1] + self.channel_state[5]
            self.rest_power = min(
                self.emax,
                self.rest_power - 2 * self.transmit_power + self.harvest_power)

        if action == 4 and self.rest_power >= 2:
            self.reward = self.channel_state[2] + self.channel_state[3]
            self.rest_power = min(
                self.emax,
                self.rest_power - 2 * self.transmit_power + self.harvest_power)

        if action == 5 and self.rest_power >= 2:
            self.reward = self.channel_state[2] + self.channel_state[4]
            self.rest_power = min(
                self.emax,
                self.rest_power - 2 * self.transmit_power + self.harvest_power)

        if action == 6 and self.rest_power >= 1:
            self.reward = self.channel_state[0]
            self.rest_power = min(
                self.emax,
                self.rest_power - self.transmit_power + self.harvest_power)

        if action == 7 and self.rest_power >= 1:
            self.reward = self.channel_state[3]
            self.rest_power = min(
                self.emax,
                self.rest_power - self.transmit_power + self.harvest_power)

        if action == 8 and self.rest_power >= 1:
            self.reward = self.channel_state[1]
            self.rest_power = min(
                self.emax,
                self.rest_power - self.transmit_power + self.harvest_power)

        if action == 9 and self.rest_power >= 1:
            self.reward = self.channel_state[4]
            self.rest_power = min(
                self.emax,
                self.rest_power - self.transmit_power + self.harvest_power)

        if action == 10 and self.rest_power >= 1:
            self.reward = self.channel_state[2]
            self.rest_power = min(
                self.emax,
                self.rest_power - self.transmit_power + self.harvest_power)

        if action == 11 and self.rest_power >= 1:
            self.reward = self.channel_state[5]
            self.rest_power = min(
                self.emax,
                self.rest_power - self.transmit_power + self.harvest_power)

        if action == 12 and self.rest_power < 2 and self.harvest_power < 1:
            self.reward = 0
            self.rest_power = min(self.emax,
                                  self.rest_power + self.harvest_power)

        # change link state
        self.channel_state = np.zeros(self.num_user * self.num_channel,
                                      dtype=np.int)
        xs = np.random.uniform(0, 1, size=self.channel_state.shape)
        for i in range(1, len(self.link_probabilities)):
            self.channel_state[(self.link_probabilities_cdf[i - 1] < xs)
                               & (xs < self.link_probabilities_cdf[i])] = i

        # change harvest poser
        self._change_harvest_power()

        self.obs = NetworkEnv._cvt2observation(self.channel_state,
                                               self.rest_power,
                                               self.harvest_power)
        obs = {
            'obs': self.obs,
            'channel_state': self.channel_state,
            'rest_power': self.rest_power,
            'harvest_power': self.harvest_power
        }

        self._step_counter += 1
        if self._step_counter == self._max_step:
            self.done = True

        info = {'step': self._step_counter}

        return obs, self.reward, self.done, info

    def render(self, mode='human'):
        pass