from UTILS.colorful import print亮紫, print亮绿
import numpy as np
import copy
import math
import random
import torch
from operator import itemgetter as iget
from UTILS.tensor_ops import _2cpu2numpy, _2tensor, process_space, np_one_hot, __hash__
from config import ChainVar

class AlgorithmConfig(object): # ADD_TO_CONF_SYSTEM 加入参数搜索路径 do not remove this comment !!!
    step_mul = 8    # how many steps to make an action
    alg = 'qmix'
    print('using alg:', alg)
    def use_one_reward(alg):
        if alg in ['qmix', 'qtran', 'iql_sr']:
            return True   # iql:False, qmix:True
        elif alg in ['iql']:
            return False  # iql:False, qmix:True
        else:
            assert False, ('does alg use agent-wise reward ??')
    one_reward = use_one_reward(alg)
    one_reward_cv = ChainVar(lambda alg:AlgorithmConfig.use_one_reward(alg), chained_with=['alg'])


    n_steps = None # total time steps


    last_action = True # whether to use the last action to choose action
    reuse_network = True # whether to use one network for all agents
    gamma = 0.99 # discount factor
    optimizer = "RMS" # optimizer
    model_dir = None
    load_model = False

    # network
    rnn_hidden_dim = 128
    qmix_hidden_dim = 64
    two_hyper_layers = False
    hyper_hidden_dim = 64
    qtran_hidden_dim = 64
    lr = 1e-4

    # epsilon greedy
    epsilon = 1
    min_epsilon = 0.05
    anneal_steps = 50000
    anneal_epsilon = (epsilon - min_epsilon) / anneal_steps
    epsilon_anneal_scale = 'episode'

    train_interval = 1 # the number of episodes before once training
    train_steps = 1 # important !! the number of the train steps in one epoch

    # experience replay
    batch_size = 64
    buffer_size = int(5e3)

    # how often to save the model
    save_cycle = 5000

    # how often to update the target_net
    target_update_cycle = 200

    # QTRAN lambda
    lambda_opt = 1
    lambda_nopt = 1

    # prevent gradient explosion
    grad_norm_clip = 10

    # MAVEN
    noise_dim = 16
    lambda_mi = 0.001
    lambda_ql = 1
    entropy_coefficient = 0.001

class StarFoundation():
    def __init__(self, n_agent, n_thread, space, mcv):
        from .agent.agent import Agents
        from .common.arguments import get_mixer_args
        from config import GlobalConfig

        space = process_space(space)
        obs_space = eval(space['obs_space'])
        act_space = eval(space['act_space'])

        self.alg_config = AlgorithmConfig
        if self.alg_config.alg in ['qmix','iql','iql_sr']: 
            pass # self.alg_config = get_mixer_args(self.alg_config)
        else:
            assert False, ('blind spot')
        self.alg_config.cuda = ('cuda' in GlobalConfig.device)
        self.alg_config.n_threads = self.n_threads = n_thread

        self.alg_config.model_dir = GlobalConfig.logdir
        self.alg_config.n_agents = self.n_agents = n_agent
        self.alg_config.state_shape = obs_space['state_shape']
        self.alg_config.obs_shape = obs_space['obs_shape']
        self.alg_config.n_actions = act_space['n_actions']
        self.alg_config.map = GlobalConfig.scenario_config.map_
        self.alg_config.episode_limit = GlobalConfig.scenario_config.episode_limit
        self.state_provided = GlobalConfig.scenario_config.state_provided
        self.avail_act_provided = GlobalConfig.scenario_config.avail_act_provided
        self.multi_reward2one = (self.alg_config.one_reward) and (not GlobalConfig.scenario_config.RewardAsUnity)
        self.agents = Agents(self.alg_config)

        self.epsilon = self.alg_config.epsilon
        self.anneal_epsilon = self.alg_config.anneal_epsilon
        self.min_epsilon = self.alg_config.min_epsilon

        self.ReplayBuffer = ReplayBuffer(self.alg_config, train_hook=self.agents.train)
        self.__incomplete_rollout_frag__ = None
        
    def cold_init(self, state_recall):
        state_recall['_HiddenState_'] = np.zeros((self.n_threads, self.n_agents, self.alg_config.rnn_hidden_dim))
        state_recall['_LastActionOnehot_'] = np.zeros((self.n_threads, self.n_agents, self.alg_config.n_actions)) 
        state_recall['_PreStep_'] = state_recall['Current-Obs-Step']
        return state_recall


    def interact_with_env(self, state_recall):
        # init if new train session or new test session
        if not '_HiddenState_' in state_recall:  state_recall = self.cold_init(state_recall)
        # recall hidden states
        keys = ['ENV-PAUSE', 'Latest-Obs','Latest-Team-Info','Env-Suffered-Reset','Current-Obs-Step','_HiddenState_','_LastActionOnehot_','Test-Flag']
        ENV_PAUSE, obs, info, Env_Suffered_Reset, step_cnt, hidden_state, last_action_oh, EvalMod = iget(*keys)(state_recall)

        if not EvalMod: self.ReplayBuffer.train(self.epsilon)

        # this is the core of interaction
        with torch.no_grad():
            new_hidden_state, new_actions, new_action_oh, hook = self.get_actions(ENV_PAUSE, obs, hidden_state, last_action_oh, info, 
                                                                            Env_Suffered_Reset, step_cnt, evaluate=EvalMod)

        # remember and to be recall
        state_recall['_PreStep_'] = state_recall['Current-Obs-Step'].copy()
        state_recall['_HiddenState_'] = new_hidden_state
        state_recall['_LastActionOnehot_'] = new_action_oh
        if not EvalMod: 
            state_recall['_hook_'] = hook 
        return new_actions, state_recall
    ''' <1>                 ↑..........↑
        Push something into platform dictionary, so we can see them again a step later.
        Although you can also do this by creating 'self.' vars instead, but I advise not ^_^
        <2>
        RL alg requires rewards, but rule based alg does not.
        For RL alg the rewards cannot be acquired here 
        because actions has not been fed to env.
        So please just leave a callback hook function here
        when the reward and next obs is ready, 
        the platform will call state_recall['_hook_'](arg),
        with arg={'reward':xxx, 'Latest-Obs':xxx}
    '''


    def get_actions(self, ENV_PAUSE, obs, hidden_state, last_action_oh, info, Env_Suffered_Reset, step_cnt, evaluate=False):
        # ! change epsilon
        if (not evaluate) and (self.alg_config.epsilon_anneal_scale == 'step') and (self.epsilon > self.min_epsilon):  
            self.epsilon -= self.anneal_epsilon*self.n_threads

        # allocate numpy array
        n_threads = hidden_state.shape[0]
        new_hidden_state = np.zeros_like(hidden_state)
        new_action_oh = np.zeros_like(last_action_oh)
        all_avail_actions = np.ones_like(new_action_oh)
        all_state = np.zeros((n_threads, self.alg_config.state_shape))
        new_actions = np.zeros_like(last_action_oh[:,:,0])
        # deal with reset environment and load states
        for thread_idx, done in enumerate(Env_Suffered_Reset):
            if ENV_PAUSE[thread_idx]: 
                continue
            if self.state_provided: all_state[thread_idx,:] = info[thread_idx]['state']
            if self.avail_act_provided: all_avail_actions[thread_idx, :] = info[thread_idx]['avail-act']
            if done: 
                # 环境刚刚复位
                hidden_state[thread_idx, :] = 0
                last_action_oh[thread_idx, :] = 0
                # ! change epsilon
                if (not evaluate) and (self.alg_config.epsilon_anneal_scale == 'episode') and (self.epsilon > self.min_epsilon):
                    self.epsilon -= self.anneal_epsilon

        epsilon = self.epsilon if not evaluate else 0

        for thread_idx, done in enumerate(Env_Suffered_Reset):
            # FOR layer 2: loop through agents
            if ENV_PAUSE[thread_idx]: continue
            obs_agent  =                       obs[thread_idx, :]
            hidden_state_agent =      hidden_state[thread_idx, :]
            last_action_oh_agent =  last_action_oh[thread_idx, :]
            avail_action_agent = all_avail_actions[thread_idx, :]    #  self.env.get_avail_agent_actions(agent_id)
            agent_id = np.eye(self.n_agents)
            assert self.alg_config.alg != 'maven'
            action, new_hidden_state_agent = self.agents.choose_action_batched(obs_agent, hidden_state_agent, last_action_oh_agent, agent_id, avail_action_agent, epsilon, evaluate)

            action = _2cpu2numpy(action)
            new_hidden_state_agent = _2cpu2numpy(new_hidden_state_agent)
            new_hidden_state[thread_idx, :] = new_hidden_state_agent
            # generate onehot vector of th action
            new_action_oh[thread_idx, :] = np_one_hot(action, n=self.alg_config.n_actions)
            # last_action[agent_id] = action_onehot
            new_actions[thread_idx]= action
        pass

        new_actions = np.expand_dims(new_actions, -1)
        rollout_frag_hook = None
        if not evaluate:
            self.__incomplete_rollout_frag__ = {    # wait to be completed with reward in hook function
                '_SKIP_': ENV_PAUSE.copy(),
                'u': new_actions, 
                'o': obs, 
                's': all_state, 
                'u_onehot': new_action_oh, 
                'avail_u': all_avail_actions, 
                'padded': np.zeros((n_threads,1))#[0.]
            }; self.__check_data_hash() # this is important!
            rollout_frag_hook = lambda frag_part2: self.rollout_frag_hook(frag_part2)

        new_actions = new_actions.transpose(1,0,2)
        return new_hidden_state, new_actions, new_action_oh, rollout_frag_hook

    def _collect_states(self, info, n_threads):
        all_state = np.zeros((n_threads, self.alg_config.state_shape))
        all_avail_actions = np.ones((n_threads, self.n_agents, self.alg_config.n_actions))
        for thread_idx in range(n_threads):
            if self.state_provided: all_state[thread_idx,:] = info[thread_idx]['state']
            if self.avail_act_provided: all_avail_actions[thread_idx, :] = info[thread_idx]['avail-act']
        return all_state, all_avail_actions

    def rollout_frag_hook(self, frag_part2):
        self.__check_data_curruption()
        assert self.__incomplete_rollout_frag__ is not None
        assert '_SKIP_' in self.__incomplete_rollout_frag__
        assert 'Latest-Obs' in frag_part2
        assert 'reward' in frag_part2
        assert 'done' in frag_part2
        self.__incomplete_rollout_frag__.update(frag_part2)
        completed_frag = self.__incomplete_rollout_frag__
        n_threads = len(completed_frag['done'])
        all_state_next, avail_u_next = self._collect_states(completed_frag['info'], n_threads)
        completed_frag['state'] = all_state_next
        completed_frag['avail-act'] = avail_u_next
        # unify agent rewards to team reward, if agent rewards list provided
        if self.multi_reward2one: 
            completed_frag['reward'] = completed_frag['reward'].mean(axis=1) #,keepdims=True)
        # load into replay buffer
        self.ReplayBuffer.store_timestep(completed_frag)
        self.__incomplete_rollout_frag__ = None



    # debugging functions
    def __check_data_hash(self):
        if not hasattr(self, 'patience'): self.patience = 1000
        if self.patience > 0: 
            self.hash_debug = {}
            # for debugging, to detect write protection error
            for key in self.__incomplete_rollout_frag__:
                item = self.__incomplete_rollout_frag__[key]
                if isinstance(item, dict):
                    self.hash_debug[key]={}
                    for subkey in item:
                        subitem = item[subkey]
                        self.hash_debug[key][subkey] = __hash__(subitem)
                else:
                    self.hash_debug[key] = __hash__(item)
    def __check_data_curruption(self):
        if self.patience > 0: 
            assert self.__incomplete_rollout_frag__ is not None
            assert self.hash_debug is not None
            for key in self.__incomplete_rollout_frag__:
                item = self.__incomplete_rollout_frag__[key]
                if isinstance(item, dict):
                    for subkey in item:
                        subitem = item[subkey]
                        assert self.hash_debug[key][subkey] == __hash__(subitem), ('Currupted data! 发现腐败数据!')
                else:
                    assert self.hash_debug[key] == __hash__(item), ('Currupted data! 发现腐败数据!')
            self.patience -= 1











class ReplayBuffer:
    def __init__(self, args, train_hook):
        self.args = args
        self.n_actions = self.args.n_actions
        self.n_agents = self.args.n_agents
        self.state_shape = self.args.state_shape
        self.obs_shape = self.args.obs_shape
        self.size = self.args.buffer_size   # size = 5000
        self.episode_limit = self.args.episode_limit
        self.n_threads = self.args.n_threads
        # memory management
        self.current_idx = 0
        self.current_size = 0
        self.finished_episode_cnt = 0
        # trainer 
        self.big_train_steps_cnt = 0
        self.small_train_steps_cnt = 0
        self.train_hook = train_hook

        # create the buffer to store info
        r_coredim = 1 if self.args.one_reward else self.n_agents
        self.buffers = {'o': np.empty([self.size, self.episode_limit, self.n_agents, self.obs_shape]),
                        'u': np.empty([self.size, self.episode_limit, self.n_agents, 1]),
                        's': np.empty([self.size, self.episode_limit, self.state_shape]),
                        'r': np.empty([self.size, self.episode_limit, r_coredim]),
                        'o_next': np.empty([self.size, self.episode_limit, self.n_agents, self.obs_shape]),
                        's_next': np.empty([self.size, self.episode_limit, self.state_shape]),
                        'avail_u': np.empty([self.size, self.episode_limit, self.n_agents, self.n_actions]),
                        'avail_u_next': np.empty([self.size, self.episode_limit, self.n_agents, self.n_actions]),
                        'u_onehot': np.empty([self.size, self.episode_limit, self.n_agents, self.n_actions]),
                        'padded': np.empty([self.size, self.episode_limit, 1]),
                        'terminated': np.empty([self.size, self.episode_limit, 1])
                        }
        if self.args.alg == 'maven':
            self.buffers['z'] = np.empty([self.size, self.args.noise_dim])
        self.rollout_unfinished = [None for _ in range(self.args.n_threads)]
        return

    def train(self, epsilon):
        train_steps_should_be_done = self.finished_episode_cnt//self.args.train_interval
        # print亮绿(' self.finished_episode_cnt',  self.finished_episode_cnt, 'self.alg_config.train_interval', self.args.train_interval)
        while self.big_train_steps_cnt < train_steps_should_be_done:
            self.big_train_steps_cnt += 1
            print('training start! current buffer %.2f percent, current epsilon %.3f'%(self.current_size/self.size *100, epsilon))
            for _ in range(self.args.train_steps):    # 默认为1
                batch = self.sample(min(self.current_size, self.args.batch_size))
                # access agent train
                # print亮紫('self.agents.train(batch, self.small_train_steps_cnt)')
                # print亮紫('self.ReplayBuffer.current_size',self.current_size,'self.alg_config.batch_size',self.args.batch_size)
                self.train_hook(batch, self.small_train_steps_cnt)
                # mark the train progress
                self.small_train_steps_cnt += 1
                # if self.small_train_steps_cnt%10 == 0:
                #     print('train_steps_cnt', self.small_train_steps_cnt, 'epsilon', self.epsilon)

            print('training end!')

    def store_timestep(self, rollout_frag):
        for thread in range(self.n_threads):
            # if rollout_frag['done'][thread]:
            #     print('h')
            if rollout_frag['_SKIP_'][thread]: continue
            if self.rollout_unfinished[thread] is None:
                self.rollout_unfinished[thread] = {
                    'o':[], 'u':[], 'r':[],'s':[], 'o_next':[], 's_next':[], 'avail_u':[],
                     'avail_u_next':[], 'u_onehot':[], 'padded':[], 'terminated':[], }
            r = [rollout_frag['reward'][thread]] if self.args.one_reward else rollout_frag['reward'][thread]
            self.rollout_unfinished[thread]['o'].append(rollout_frag['o'][thread])
            self.rollout_unfinished[thread]['u'].append(rollout_frag['u'][thread])
            self.rollout_unfinished[thread]['s'].append(rollout_frag['s'][thread])
            self.rollout_unfinished[thread]['r'].append(r)
            self.rollout_unfinished[thread]['avail_u'].append(rollout_frag['avail_u'][thread])
            self.rollout_unfinished[thread]['u_onehot'].append(rollout_frag['u_onehot'][thread])
            self.rollout_unfinished[thread]['padded'].append(rollout_frag['padded'][thread])
            self.rollout_unfinished[thread]['terminated'].append([int(rollout_frag['done'][thread])])

            # special terminal processing for (done, obs, state)
            if not rollout_frag['done'][thread]:
                o_next=rollout_frag['Latest-Obs'][thread]
                s_next=rollout_frag['state'][thread]
                avu_next=rollout_frag['avail-act'][thread]
            else:
                o_next=rollout_frag['Terminal-Obs-Echo'][thread]
                s_next=rollout_frag['info'][thread]['state-echo']  if ('state-echo' in rollout_frag['info'][thread]) else np.zeros_like(rollout_frag['state'][thread])
                avu_next=np.array(rollout_frag['info'][thread]['avail-act-echo']) if ('avail-act-echo'  in rollout_frag['info'][thread]) else np.zeros_like(rollout_frag['avail-act'][thread])
            assert o_next is not None
            self.rollout_unfinished[thread]['o_next'].append(o_next)
            self.rollout_unfinished[thread]['s_next'].append(s_next)    # ?
            self.rollout_unfinished[thread]['avail_u_next'].append(avu_next)    # ?



            if rollout_frag['done'][thread]:
                for key in self.rollout_unfinished[thread].keys():
                    self.rollout_unfinished[thread][key] = np.stack(self.rollout_unfinished[thread][key])
                step = len(self.rollout_unfinished[thread]['terminated'])
                if self.episode_limit > step:
                    for key in self.rollout_unfinished[thread].keys():
                        paddings = np.zeros(shape=(self.episode_limit-step, *self.rollout_unfinished[thread][key].shape[1:])) 
                        if key == 'padded' or key == 'terminated':
                            paddings += 1
                        self.rollout_unfinished[thread][key] = np.concatenate((self.rollout_unfinished[thread][key], paddings), axis=0)
                self.store_episode(self.rollout_unfinished[thread])
                self.finished_episode_cnt += 1
                # 清空 准备下一次
                self.rollout_unfinished[thread] = None
            pass
        return
    '''
        现在少了第一个空维度，但似乎没有影响
        o (1, 60, 3, 30)
        s (1, 60, 48)
        u (1, 60, 3, 1)
        r (1, 60, 1)
        avail_u (1, 60, 3, 9)
        o_next (1, 60, 3, 30)
        s_next (1, 60, 48)
        avail_u_next (1, 60, 3, 9)
        u_onehot (1, 60, 3, 9)
        padded (1, 60, 1)
        terminated (1, 60, 1)
    '''
        # store the episode
    def store_episode(self, episode_batch):
        batch_size = 1  # episode_number
        # with self.lock:
        idxs = self._get_storage_idx(inc=batch_size)
        # store the informations
        self.buffers['o'][idxs] = episode_batch['o']
        self.buffers['u'][idxs] = episode_batch['u']
        self.buffers['s'][idxs] = episode_batch['s']
        self.buffers['r'][idxs] = episode_batch['r']
        self.buffers['o_next'][idxs] = episode_batch['o_next']
        self.buffers['s_next'][idxs] = episode_batch['s_next']
        self.buffers['avail_u'][idxs] = episode_batch['avail_u']
        self.buffers['avail_u_next'][idxs] = episode_batch['avail_u_next']
        self.buffers['u_onehot'][idxs] = episode_batch['u_onehot']
        self.buffers['padded'][idxs] = episode_batch['padded']
        self.buffers['terminated'][idxs] = episode_batch['terminated']
        if self.args.alg == 'maven':
            self.buffers['z'][idxs] = episode_batch['z']

    def sample(self, batch_size):
        temp_buffer = {}
        idx = np.random.randint(0, self.current_size, batch_size)
        for key in self.buffers.keys():
            temp_buffer[key] = self.buffers[key][idx]
        return temp_buffer

    def _get_storage_idx(self, inc=None):
        inc = inc or 1
        if self.current_idx + inc <= self.size:
            idx = np.arange(self.current_idx, self.current_idx + inc)
            self.current_idx += inc
        elif self.current_idx < self.size:
            overflow = inc - (self.size - self.current_idx)
            idx_a = np.arange(self.current_idx, self.size)
            idx_b = np.arange(0, overflow)
            idx = np.concatenate([idx_a, idx_b])
            self.current_idx = overflow
        else:
            idx = np.arange(0, inc)
            self.current_idx = inc
        self.current_size = min(self.size, self.current_size + inc)
        if inc == 1:
            idx = idx[0]
        return idx



'''
def get_mixer_args(args):
    # network
    args.rnn_hidden_dim = 64
    args.qmix_hidden_dim = 32
    args.two_hyper_layers = False
    args.hyper_hidden_dim = 64
    args.qtran_hidden_dim = 64
    args.lr = 5e-4

    # epsilon greedy
    args.epsilon = 1
    args.min_epsilon = 0.05
    anneal_steps = 50000
    args.anneal_epsilon = (args.epsilon - args.min_epsilon) / anneal_steps
    args.epsilon_anneal_scale = 'step'

    # the number of the train steps in one epoch
    args.train_steps = 1

    # experience replay
    args.batch_size = 32
    args.buffer_size = int(5e3)

    # how often to save the model
    args.save_cycle = 5000

    # how often to update the target_net
    args.target_update_cycle = 200

    # QTRAN lambda
    args.lambda_opt = 1
    args.lambda_nopt = 1

    # prevent gradient explosion
    args.grad_norm_clip = 10

    # MAVEN
    args.noise_dim = 16
    args.lambda_mi = 0.001
    args.lambda_ql = 1
    args.entropy_coefficient = 0.001
    return args
'''

