import os,sys


class ScenarioConfig(object): # ADD_TO_CONF_SYSTEM 加入参数搜索路径 do not remove this comment !!!
    map_path = './maps/1000_1000_fighter10v10.map'
    max_step = 1000
    render = True
    random_pos = False
    log_flag = False
    ext_render = False

    N_TEAM = 2
    N_AGENT_EACH_TEAM = [10, 10]
    AGENT_ID_EACH_TEAM = [range(0,10), range(10,20)]
    TEAM_NAMES = [  
                    'algorithm.MACA.maca_rule_foundation->RuleFoundation',
                    'algorithm.MACA.maca_rule_foundation->RuleFoundation',
                ] 
    ActAsUnity = True
    RewardAsUnity = True


def make_air_fight_env(env_id, rank):
    return Env_Compat_Wrapper(rank)


# 一层套一层。。。这层是为了除了info和参数对齐
class Env_Compat_Wrapper():
    def __init__(self, rank):
        # sys.path.append('./MISSIONS/air_fight/environment')
        # sys.path.append('./MISSIONS/air_fight/configuration')
        # sys.path.append('./MISSIONS/air_fight/maps')
        save_cwd = os.getcwd()
        os.chdir('./MISSIONS/air_fight')
        sys.path.append('./environment')
        from interface import Environment
        self.env = Environment(ScenarioConfig.map_path, 'raw', 'raw', max_step=ScenarioConfig.max_step, render=ScenarioConfig.render,
                        random_pos=ScenarioConfig.random_pos, log=ScenarioConfig.log_flag, external_render=ScenarioConfig.ext_render)
        # print(self.env.get_map_size())
        self.observation_space = None
        self.action_space = None
        self.id = rank
        os.chdir(save_cwd)

        pass

    def step(self, act):

        # team 1
        side1_detector_action = act[0]['detector_act']
        side1_fighter_action = act[0]['fighter_act']
        # team 2
        side2_detector_action = act[1]['detector_act']
        side2_fighter_action = act[1]['fighter_act']

        self.env.step(side1_detector_action, side1_fighter_action, side2_detector_action, side2_fighter_action)
        side1_obs_dict, side2_obs_dict = self.env.get_obs()
        info = [side1_obs_dict, side2_obs_dict]
        ob = None
        o_detector_reward, o_fighter_reward, o_game_reward, e_detector_reward, e_fighter_reward, e_game_reward = self.env.get_reward()
        side1_step_reward = 0
        side2_step_reward = 0
        side1_detector_num = len(o_detector_reward)
        side1_fighter_num = len(o_fighter_reward)
        side2_detector_num = len(e_detector_reward)
        side2_fighter_num = len(e_fighter_reward)
        for y in range(side1_detector_num):
            side1_step_reward += o_detector_reward[y]
        for y in range(side1_fighter_num):
            side1_step_reward += o_fighter_reward[y]
        for y in range(side2_detector_num):
            side2_step_reward += e_detector_reward[y]
        for y in range(side2_fighter_num):
            side2_step_reward += e_fighter_reward[y]
        reward = [side1_step_reward, side2_step_reward]
        done = self.env.get_done()

        return (ob, reward, done, info)

    def reset(self):
        self.env.reset()
        side1_obs_dict, side2_obs_dict = self.env.get_obs()
        info = [side1_obs_dict, side2_obs_dict]
        ob = None
        return ob, info

    def render(self):
        
        return



    # env.reset()
    # env.step(side1_detector_action, side1_fighter_action, side2_detector_action, side2_fighter_action)
    # env.set_surrender(2)
    # side1_obs_raw, side2_obs_raw = env.get_obs_raw()
    # env.get_done()

