import os, torch
import numpy as np
from numba import njit, jit
from UTILS.colorful import *
from UTILS.tensor_ops import copy_clone, my_view, add_onehot_id_at_last_dim, add_obs_container_subject
import pickle
from config import GlobalConfig
DEBUG = True

# @njit
def distance_matrix(A):
    assert A.shape[-1] == 2 # assert 2D situation
    n_subject = A.shape[-2] # is 2
    A = np.repeat(np.expand_dims(A,-2), n_subject, axis=-2) # =>(64, 100, 100, 2)
    At = np.swapaxes(A,-2,-3) # =>(64, 100, 100, 2)
    dis = At-A # =>(64, 100, 100, 2)
    dis = np.linalg.norm(dis, axis=-1)
    return dis

def stack_padding(l):
    import itertools
    return np.column_stack((itertools.zip_longest(*l, fillvalue=0)))

def dir_to_rad_angle(delta_pos):
    result = np.empty(delta_pos.shape[:-1], dtype=complex)
    result.real = delta_pos[...,0]; result.imag = delta_pos[...,1]
    rad_angle = np.angle(result) 
    return rad_angle

def reg_angle_deg(deg):
    return (deg + 180)%360 -180

def reg_angle(rad):
    # it's OK to show "RuntimeWarning: invalid value encountered in remainder"
    return (rad + np.pi)%(2*np.pi) -np.pi

class ShellEnvWrapper(object):
    def __init__(self, n_agent, n_thread, space, mcv, RL_functional, 
                                          alg_config, scenario_config):
        self.n_agent = n_agent
        self.n_thread = n_thread
        self.space = space
        self.mcv = mcv
        self.RL_functional = RL_functional
        self.n_cluster = alg_config.n_cluster
        self.n_basic_dim = scenario_config.obs_vec_length
        self.n_entity = scenario_config.num_entity
        self.num_guards = scenario_config.num_guards
        self.num_attackers = scenario_config.num_attackers
        self.agent_uid = scenario_config.uid_dictionary['agent_uid']
        self.entity_uid = scenario_config.uid_dictionary['entity_uid']
        self.f_uid = scenario_config.uid_dictionary['guards_uid']
        self.h_uid = scenario_config.uid_dictionary['attackers_uid']
        self.dec = scenario_config.dec_dictionary
        self.n_object = scenario_config.num_object
        self.load_checkpoint = alg_config.load_checkpoint
        self.cold_start = True
        self._division_obsR_init = None
        self._division_obsL_init = None

    @staticmethod
    def get_binary_array(n, n_bits, dtype=np.float32):
        arr = np.zeros(n_bits, dtype=dtype)
        pointer = 0
        while True:
            arr[pointer] = int(n%2==1)
            n = n >> 1
            pointer += 1
            if n == 0: break
        return arr

    def interact_with_env(self, State_Recall):
        act = np.zeros(shape=(self.n_thread, self.n_agent), dtype=np.int) - 1 # 初始化全部为 -1
        obs_range = 2.0
        # read internal coop graph info
        obs = State_Recall['Latest-Obs']
        obs = my_view(obs, [0, self.n_object, -1])
        alive = obs[:,:,self.dec['alive']]
        falive = alive[:, self.f_uid]
        halive = alive[:, self.h_uid]
        pos = obs[:,:,self.dec['pos']]
        ang = obs[:,:,self.dec['ang']]
        vel = obs[:,:,self.dec['vel']]
        idx = obs[:,:,self.dec['id']]
        dis = distance_matrix(pos)
        f2h_dis = dis[:, self.f_uid, :][:, :, self.h_uid]
        f2f_dis = dis[:, self.f_uid, :][:, :, self.f_uid]
        agent_emb = obs[:,self.f_uid]
        h_emb = obs[:,self.h_uid]
        agent_emb = add_onehot_id_at_last_dim(agent_emb) # add onehot vec
        n_act = 8
        fpos = pos[:,self.f_uid]
        fvel = vel[:,self.f_uid]
        fang = ang[:,self.f_uid]
        hpos = pos[:,self.h_uid]

        # target_emb 加入一些特殊的点如何，加入了原始的8个动作
        n_thread = pos.shape[0]
        observable = [np.where(
                (f2h_dis[i] < obs_range).any(axis=0) & (halive[i] == 1)
            )[0] for i in range(n_thread)]    # $n_thread.
        target_emb = np.zeros(shape=(n_thread, 50, self.n_basic_dim), dtype=obs.dtype)
        # compatibility[mask.bool()] = -math.inf，即mask=1为pad, =0为正常
        padding_mask = np.ones(shape=(n_thread, self.n_agent, 50), dtype=np.int)
        for i in range(n_thread):
            target_emb[i, :len(observable[i])] = h_emb[i, observable[i]]
            padding_mask[i, :, :len(observable[i])] = 0 # 非pad部分

        dummy_action_target = np.zeros(shape=(n_thread, n_act, self.n_basic_dim), dtype=obs.dtype)
        for i in range(n_act):
            dummy_action_target[:,i] = self.get_binary_array(n=i+15, n_bits=7, dtype=obs.dtype)

        # 前8个entity是原始动作,mask=1为pad, =0为正常
        action_c_padding = np.zeros(shape=(n_thread, self.n_agent, 8), dtype=np.int)
        entity_emb = np.concatenate((dummy_action_target, target_emb), -2)
        padding_mask = np.concatenate((action_c_padding, padding_mask), -1)

        # how to pad to be attention-compatable? a headaches
        # action :: what action [ Discrete(2), for action type || Discrete(n_act), for action|| Discrete(n_target), for which target]
        # action is agent wise
        ENV_PAUSE = State_Recall['ENV-PAUSE']
        all_emb = {'agent_final_emb':agent_emb[~ENV_PAUSE], 
                    'entity_final_emb':entity_emb[~ENV_PAUSE], 
                    'padding_mask':padding_mask[~ENV_PAUSE]}
        Internal_State_Recall = {'threads_active_flag':~ENV_PAUSE, 'all_emb':all_emb, 'Test-Flag':State_Recall['Test-Flag']}
        act_active, internal_recall = self.RL_functional.interact_with_env_genuine(Internal_State_Recall)
        act[~ENV_PAUSE] = act_active
        # print(act_active)
        use_raw_action = (act < 8)
        if (~use_raw_action).any():
            t_act = np.where(use_raw_action, 0, act-8)  # (8=$n_thread, 50=$n_agent)
            observable_padded = stack_padding(observable)   # (8=$n_thread, 4=$max_n_obs)
            T_type_uid = np.take_along_axis(observable_padded, axis=1, indices=t_act)
            target_pos = np.take_along_axis(hpos, axis=1, 
                indices=np.repeat(np.expand_dims(T_type_uid,-1), 2, axis=-1))
            use_raw_action_=np.repeat(np.expand_dims(use_raw_action,-1), 2, axis=-1)
            target_pos = np.where(use_raw_action_, np.nan, target_pos)
            delta_pos = target_pos - fpos
    
            to_target_acc = self.dir_to_action(delta_pos, fvel)
            to_target_acc = to_target_acc.squeeze(-1)
            rad_angle = dir_to_rad_angle(delta_pos) # agent.state.p_ang = np.pi*1/2 if agent.attacker else 0*np.pi/2
            delta_angle = reg_angle(rad_angle - fang)
            delta_angle = np.where(np.abs(delta_angle) < np.pi/8, np.nan, delta_angle)
            delta_angle = reg_angle(delta_angle)
            to_target_acc = np.where(delta_angle<0, 6, to_target_acc)
            to_target_acc = np.where(delta_angle>0, 5, to_target_acc)
            # assert (np.where(use_raw_action, 1, to_target_acc) != 0).all()
            act = np.where(use_raw_action, act, to_target_acc)
            
            # to_target_dir = self.dir_to_daction(delta_pos, fang)
            # all_action = self.dir_to_action(vec=delta_pos, vel=act_dec['agent_vel']) # 矢量指向selected entity
            # to_target_final = to_target_dir + int(to_target_dir==0)*to_target_acc

        actions_list = []
        act = np.expand_dims(act, -1)
        for i in range(self.n_agent): actions_list.append(act[:,i,:])
        actions_list = np.array(actions_list)

        # return necessary handles to main platform
        if self.cold_start: self.cold_start = False

        # <2> call a empty frame to gather reward
        State_Recall['_hook_'] = internal_recall['_hook_']
        assert State_Recall['_hook_'] is not None
        return actions_list, State_Recall # state_recall dictionary will preserve states for next action making


    

    @staticmethod
    def __random_select_init_value_(n_container, n_subject):
        t_final = []; entropy = np.array([])
        for _ in range(20): # max entropy in samples
            tmp = np.random.randint(low=0, high=n_container, size=(n_subject,), dtype=np.long); t_final.append(tmp)
            entropy = np.append(entropy, sum([ -(sum(tmp==i)/n_subject)*np.log(sum(tmp==i)/n_subject) if sum(tmp==i)!=0 else -np.inf for i in range(n_container)]))
        return t_final[np.argmax(entropy)]








    @staticmethod
    @jit(forceobj=True)
    def dir_to_action(vec, vel):
        def np_mat3d_normalize_each_line(mat):
            return mat / np.expand_dims(np.linalg.norm(mat, axis=2) + 1e-16, axis=-1)
        dis2target = np.linalg.norm(vec, axis=2)
        vec = np_mat3d_normalize_each_line(vec) #self.step

        e_u = np.array([0,1])
        e_d = np.array([0,-1])
        e_r = np.array([1,0])
        e_l = np.array([-1,0])

        vel_u = np_mat3d_normalize_each_line(vel + e_u * 0.1)
        vel_d = np_mat3d_normalize_each_line(vel + e_d * 0.1)
        vel_r = np_mat3d_normalize_each_line(vel + e_r * 0.1)
        vel_l = np_mat3d_normalize_each_line(vel + e_l * 0.1)

        proj_u = (vel_u * vec).sum(-1)
        proj_d = (vel_d * vec).sum(-1)
        proj_r = (vel_r * vec).sum(-1)
        proj_l = (vel_l * vec).sum(-1)

        _u = ((vec * e_u).sum(-1)>0).astype(np.int)
        _d = ((vec * e_d).sum(-1)>0).astype(np.int)
        _r = ((vec * e_r).sum(-1)>0).astype(np.int)
        _l = ((vec * e_l).sum(-1)>0).astype(np.int)

        proj_u = proj_u + _u*2
        proj_d = proj_d + _d*2
        proj_r = proj_r + _r*2
        proj_l = proj_l + _l*2

        dot_stack = np.stack([proj_u, proj_d, proj_r, proj_l])
        direct = np.argmax(dot_stack, 0)

        action = np.where(direct == 0, 3, 0)    # 3 up
        action += np.where(direct == 1, 4, 0)   # 4 down
        action += np.where(direct == 2, 1, 0)   # 1 right
        action += np.where(direct == 3, 2, 0)   # 2 left

        action = (dis2target>0.05).astype(np.int)*action
        # make sure that all nan vec become invalid act 0, 
        # be careful when a different numpy version is used
        assert (action[np.isnan(np.sum(dot_stack,0))] == 0).all()
        # action *= 0
        return np.expand_dims(action, axis=-1)
