from environment.trace_sample import Trace, BaseTrace
from environment.state import State
from config import DRLParameters
import gym
from gym import spaces
import numpy as np


class Environment(gym.Env):
    def __init__(self, parameter_space: DRLParameters, trace: Trace = None, training=True, num_points=999):
        self.parameter_space = parameter_space
        if training:
            self.trace_case_id_set = BaseTrace(self.parameter_space).train_case_ids
        else:
            self.trace_case_id_set = BaseTrace(self.parameter_space).test_case_ids
        self.state_window_size = parameter_space.WINDOW_SIZE
        self._trace = trace
        if self._trace is None:
            self._trace = Trace(self.parameter_space)
        self.action_space = spaces.Discrete(self.parameter_space.ACTION_SPACE)  # 动作空间的大小
        self.state = State(self._trace, self.state_window_size)
        self.observation_space = np.array([self.state_window_size, self.state.feature.shape[1]])  # 状态空间的大小

        self._trace_case_id_dict = self.trace_rewards()

        self.num_points = num_points

        np.random.seed(0)

    def reset(self):
        self._pick_random_trace()
        # self._pick_less_reward(random_epsilon=0.7)
        self.state = State(self._trace, self.state_window_size)
        return self.state

    def step(self, action: int):
        step_length = action + 1
        if self.state.next_index + step_length >= self._trace.trace_length or len(self.state.actions) >= self.num_points:
            reward = self.state.step_to_end()
            self._trace_case_id_dict[self._trace.case_id]["reward"] = reward
            self._trace_case_id_dict[self._trace.case_id]["learn_times"] += 1
            return self.state, reward, True, {}

        reward = self.state.step_next(step_length)
        return self.state, reward, False, {}

    def seed(self, seed=None):
        pass

    def render(self, mode='human'):
        pass

    def trace_rewards(self):
        trace_cid_rewards = {}
        if self.parameter_space.DEBUG:
            for cid in self.parameter_space.INITIAL_CASE_IDS:
                trace_cid_rewards[cid] = {
                    "reward": 0,
                    "learn_times": 0
                }
        else:
            for cid in self.trace_case_id_set:
                trace_cid_rewards[cid] = {
                    "reward": 0,
                    "learn_times": 0
                }
        return trace_cid_rewards

    def _pick_random_trace(self):
        """ 随机选择trace样本进行训练
        :return:
        """
        self._trace = Trace(self.parameter_space)

    def _pick_less_reward(self, random_epsilon=0.5):
        """ 根据每个trace的reward选择训练样本
        :param random_epsilon: 按照reward优先级进行选择的概率
        :return:
        """
        sorted_reward_cids = sorted(self._trace_case_id_dict.items(), key=lambda x: x[1]["reward"], reverse=False)
        print(sorted_reward_cids[:5])
        if np.random.uniform() < random_epsilon:
            if sorted_reward_cids[0][1]["learn_times"] > 2 * sorted_reward_cids[1][1]["learn_times"]:
                self._trace_case_id_dict[sorted_reward_cids[0][0]]["reward"] = 0
                self._trace = Trace(self.parameter_space, case_id=sorted_reward_cids[1][0])
            else:
                self._trace = Trace(self.parameter_space, case_id=sorted_reward_cids[0][0])
        else:
            self._trace = Trace(self.parameter_space)


if __name__ == '__main__':
    pass
