from mlagents_envs.environment import UnityEnvironment
from mlagents_envs.side_channel.engine_configuration_channel import EngineConfigurationChannel
from mlagents_envs.side_channel.environment_parameters_channel import EnvironmentParametersChannel
from numpy.core.shape_base import block
from cores.environment_setting_channel import EnvironmentSettingChannel, EnvPara, EnvParaType
from cores.string_log_channel import StringLogChannel
from cores.environment_state_channel import EnvironmentStateChannel
ECC = EngineConfigurationChannel()
EPC = EnvironmentParametersChannel()
ESTC = EnvironmentSettingChannel()
ESC = EnvironmentStateChannel()
SLC = StringLogChannel()

#这两句分别是Windows和Linux下的载入环境语句
#env = UnityEnvironment(side_channels=[ECC, EPC, ESTC, SLC, ESC], worker_id=0, no_graphics=False, file_name="绝对路径\\WindowsBuild")
#env = UnityEnvironment(side_channels=[ECC, EPC, ESTC, SLC, ESC], worker_id=0, no_graphics=True, file_name="绝对路径/LinuxBuild/LinuxBuild.x86_64")
ECC.set_configuration_parameters(time_scale = 1.0, width=1200, height=675)
# env = UnityEnvironment(side_channels=[ECC, EPC, ESTC, SLC, ESC], worker_id=0,
#                                           no_graphics=True, file_name="/home/cxg6/FlyAndDodge/Env/LinuxBuild_new/LinuxBuild.x86_64")
env = UnityEnvironment(side_channels=[ECC, EPC, ESTC, SLC, ESC], worker_id=0,
                                          no_graphics=False)

SLC.send_string("Test StringLogChannel.")

#############################################################################################
###----------------------------Environment Parameter Settings-----------------------------###
#############################################################################################
ESTC.set_environment_parameter(EnvPara.seed, EnvParaType.INT, 5050)
ESTC.set_environment_parameter(EnvPara.playground_number, EnvParaType.INT, 8)

# All the position below is localPosition, which being relative to the playground gameObject
# Target position  
ESTC.set_environment_parameter(EnvPara.target_pos_x, EnvParaType.FLOAT, 0.0)
ESTC.set_environment_parameter(EnvPara.target_pos_y, EnvParaType.FLOAT, 20.0)
ESTC.set_environment_parameter(EnvPara.target_pos_z, EnvParaType.FLOAT, 150.0)

# Agent's spawn position 
ESTC.set_environment_parameter(EnvPara.spawn_pos_x, EnvParaType.FLOAT, 0.0)
ESTC.set_environment_parameter(EnvPara.spawn_pos_y, EnvParaType.FLOAT, 10.0)
ESTC.set_environment_parameter(EnvPara.spawn_pos_z, EnvParaType.FLOAT, 0.0)

# Set random rotation every time agent respawn
ESTC.set_environment_parameter(EnvPara.enable_random_airplane_spawn_rotation, EnvParaType.BOOL, False)

# Agents should only fly in a fixed cuboid area.
# If agents fly out the area, the task failed.
ESTC.set_environment_parameter(EnvPara.enable_boundary_check, EnvParaType.BOOL, True)
ESTC.set_environment_parameter(EnvPara.boundary_forward, EnvParaType.FLOAT, 170.0)
ESTC.set_environment_parameter(EnvPara.boundary_backward, EnvParaType.FLOAT, -10.0)
ESTC.set_environment_parameter(EnvPara.boundary_up, EnvParaType.FLOAT, 100.0)
ESTC.set_environment_parameter(EnvPara.boundary_down, EnvParaType.FLOAT, -10.0)
ESTC.set_environment_parameter(EnvPara.boundary_left, EnvParaType.FLOAT, -40.0)
ESTC.set_environment_parameter(EnvPara.boundary_right, EnvParaType.FLOAT, 40.0)

# If agents is too far from target, the task failed.
ESTC.set_environment_parameter(EnvPara.max_distance_to_target, EnvParaType.FLOAT, 1000.0)

# The number of agent per playground, only support 1 recently.
ESTC.set_environment_parameter(EnvPara.agent_number_per_playground, EnvParaType.INT, 1)

# z coordinate of badSpheres. Also indicate the number of badSpheres.
ESTC.set_environment_parameter(EnvPara.badspheres_pos_z, EnvParaType.LIST_FLOAT, [70,75,80,85])

# The x,y coordinates and scale of badSphere is random. 
ESTC.set_environment_parameter(EnvPara.badsphere_pos_x, EnvParaType.RANDOM_UNIFORM, [0,0,0])
ESTC.set_environment_parameter(EnvPara.badsphere_pos_y, EnvParaType.RANDOM_UNIFORM, [10,10,10])
ESTC.set_environment_parameter(EnvPara.badsphere_scale, EnvParaType.RANDOM_UNIFORM, [20,20,20])


# The scale of the distance reward
# distance reward(every action) = Tanh(distanceToTarget * 0.001f) * scale
ESTC.set_environment_parameter(EnvPara.distance_reward_scale, EnvParaType.FLOAT, 0.001)

# The scale of the angularVelocity punish
# angularVelocity punish(every action) = -Tanh(Abs(angularVelocity.sqrMagnitude * 0.1f)) * scale
ESTC.set_environment_parameter(EnvPara.angular_velocity_punish_scale, EnvParaType.FLOAT, 0.001)

# The scale of the time consume punish
# time consume punish(every action) = -scale * 1
ESTC.set_environment_parameter(EnvPara.time_consume_punish_scale, EnvParaType.FLOAT, 0.001)

# If unifiedEpisode=true, you should using env.reset() to Begin an episode
# In detail, if unifiedEpisode=true, an agent would not immediately reborn after finishing/failing the task. 
# if unifiedEpisode = false, it would.
# 如果你用mlagent风格的代码训练，选false
# 如果你用gym风格的代码训练，选true
ESTC.set_environment_parameter(EnvPara.unified_episode, EnvParaType.BOOL, True)

# Whether to end the episode, if airplane collide with badSphere
ESTC.set_environment_parameter(EnvPara.is_crash_end_episode, EnvParaType.BOOL, True)
#############################################################################################
###----------------------------Environment Parameter Settings-----------------------------###
#############################################################################################

env.reset()

BEHAVIOR_NAME = "FlyAndDodge?team=0"
BEHAVIOR_NAME0 = "DoNothing?team=0"
print(list(env.behavior_specs.keys()))
'''
bhspec = env.behavior_specs[BEHAVIOR_NAME0]
print(bhspec)
#print(next(iter(env.behavior_specs.keys())))
decision_steps, terminal_steps = env.get_steps(BEHAVIOR_NAME0)
print(decision_steps.obs)
print(decision_steps.reward)
print(decision_steps.agent_id)
print(decision_steps.action_mask)
print()
print(terminal_steps.obs)
print(terminal_steps.reward)
print(terminal_steps.agent_id)
print(terminal_steps.interrupted)
'''
bhspec = env.behavior_specs[BEHAVIOR_NAME]
print(bhspec)
#print(next(iter(env.behavior_specs.keys())))
decision_steps, terminal_steps = env.get_steps(BEHAVIOR_NAME)
print(decision_steps.obs)
print(decision_steps.obs[0].shape)


print(decision_steps.reward)
print(decision_steps.agent_id)
print(decision_steps.action_mask)
print()
print(terminal_steps.obs)
print(terminal_steps.reward)
print(terminal_steps.agent_id)
print(terminal_steps.interrupted)

import matplotlib.pyplot as plt

plt.imshow(decision_steps.obs[0][0,:,:,-3:])
plt.show()

for i in range(1000):
    decision_steps, terminal_steps = env.get_steps(BEHAVIOR_NAME)
    actions = bhspec.action_spec.random_action(len(decision_steps))
    env.set_actions(BEHAVIOR_NAME, actions)
    
    
    print('---------------------loop',i,'-------------------',sep='')
    print(decision_steps.obs)
    print(decision_steps.reward)
    print(decision_steps.agent_id)
    print(decision_steps.action_mask)
    print()
    print(terminal_steps.obs)
    print(terminal_steps.reward)
    print(terminal_steps.agent_id)
    print(terminal_steps.interrupted)
    
    print("isDone:", ESC.done)
    env.step()
    if ESC.done == True:
        env.reset()
        decision_steps, terminal_steps = env.get_steps(BEHAVIOR_NAME)
        print("...")
    #input_str = input()



env.close()
print("Done.")

