# prerequisites: tracing_1, tracing_2, tracing_3, balanced_1, balanced_2, balanced_3

import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../')  # 这里要改为你自己的项目的主目录


import math

from env import combat_env, discrete_env
from utils.define import PI, COMBAT_OBS_INFO
from trainer import sac_trainer, dqn_trainer
import random
import tf_agents
import functools
import numpy as np
import tensorflow as tf
import utils.define as ud

from absl import app
from absl import logging
from typing import Tuple
from policy.lazy_policy import LazyPolicy
from policy.random_policy import RandomPolicy
from policy.greedy_policy import GreedyPolicy
from policy.policy_loader import PolicyLoader
from policy.enemy_policy_selection import generate_enemy_policy

_IP = '10.119.10.174'
_PORT = 10089

# TODO 环境超参数
MAX_ENV_STEP = 2500
MAX_POS_X = MAX_POS_Y = 5000
MAX_POS_Z = 2500
MIN_POS_X = MIN_POS_Y = -5000
MIN_POS_Z = 500
MAX_DIST = np.sqrt(np.sum(np.square(np.array([MAX_POS_X, MAX_POS_Y, MAX_POS_Z], dtype='int64') -
                                    np.array([MIN_POS_X, MIN_POS_Y, MIN_POS_Z], dtype='int64')))) * 2.5

PREDICT_LENGTH = 15


def not_zero(x):
    if math.fabs(x) < 1e-7:
        x = 1e-7
    return x


def get_state(obs: np.ndarray) -> np.ndarray:
    """
    TODO: 把拿到的原始Observation转换成用于训练的State
    :param obs[0:3]: 己方位移，xyz
    :param obs[3:6]: 己方欧拉角，uvw(roll, pitch, yaw)
    :param obs[6:9]: 己方线速度，VxVyVz
    :param obs[9:12]: 己方角速度，VuVvVw
    :param obs[12]: 己方在上一个步长中受到的伤害
    :param obs[13]: 己方当前生命值
    :param obs[14:17]: 敌方位移，xyz
    :param obs[17:20]: 敌方欧拉角，uvw(roll, pitch, yaw)
    :param obs[20:23]: 敌方线速度，VxVyVz
    :param obs[23:26]: 敌方角速度，VuVvVw
    :param obs[26]: 敌方在上一个步长中受到的伤害
    :param obs[27]: 敌方当前生命值
    :return: 在训练中作为环境表示的State向量，需以np.ndarray返回
    """

    """
    states:
    [0:2] 我方vw维度的欧拉角(pitch, yaw)
    [2:5] 我方线速度
    [5]: 我方在上一个步长中受到的伤害
    [6]: 我方当前生命值
    [7:10]: 我方欧拉角投影在xyz坐标轴上的坐标分量
    [10]: 我方z坐标
    [11:13]: 我方贪婪策略下的理想pitch, yaw
    [13:15]: 我方贪婪策略下依惯性预测的理想pitch, yaw

    [15:17] 敌方vw维度的欧拉角(pitch, yaw)
    [17:20] 敌方线速度
    [20]: 敌方在上一个步长中受到的伤害
    [21]: 敌方当前生命值
    [22:25]: 敌方欧拉角投影在xyz坐标轴上的坐标分量
    [25]: 敌方z坐标
    [26:28]: 敌方贪婪策略下的理想pitch, yaw
    [28:30]: 敌方贪婪策略下依惯性预测的理想pitch, yaw

    [30:33]: 敌我相对位置
    [33]: 敌我欧氏距离
    """
    state = np.zeros(34, dtype=float)

    state[0:2] = obs[4:6]
    state[2:5] = obs[6:9]
    state[5] = obs[12]
    state[6] = obs[13]
    state[7:10] = np.array([math.cos(obs[4]) * math.cos(obs[5]), 
        math.cos(obs[4]) * math.sin(obs[5]), math.sin(obs[4])])
    state[10] = obs[2]

    rx, ry, rz = obs[14] - obs[0], obs[15] - obs[1], obs[16] - obs[2]
    rx = not_zero(rx)
    ry = not_zero(ry)
    rz = not_zero(rz)
    state[11] = math.atan(rz / math.sqrt(rx ** 2 + ry ** 2))
    target_yaw = math.atan(ry / rx) if rx > 0 else math.atan(ry / rx) + math.pi
    state[12] = target_yaw if target_yaw > 0 else 2 * math.pi + target_yaw

    vx, vy, vz = obs[6], obs[7], obs[8]
    vx_enemy, vy_enemy, vz_enemy = obs[20], obs[21], obs[22]
    rx_predict, ry_predict, rz_predict = rx + (vx_enemy - vx) * PREDICT_LENGTH, \
        ry + (vy_enemy - vy) * PREDICT_LENGTH, rz + (vz_enemy - vz) * PREDICT_LENGTH
    rx_predict = not_zero(rx_predict)
    ry_predict = not_zero(ry_predict)
    rz_predict = not_zero(rz_predict)
    state[13] = math.atan(rz_predict / math.sqrt(rx_predict ** 2 + ry_predict ** 2))
    target_yaw = math.atan(ry_predict / rx_predict) if rx_predict > 0 else math.atan(ry_predict / rx_predict) + math.pi
    state[14] = target_yaw if target_yaw > 0 else 2 * math.pi + target_yaw

    state[15:17] = obs[18:20]
    state[17:20] = obs[20:23]
    state[20] = obs[26]
    state[21] = obs[27]
    state[22:25] = np.array([math.cos(obs[18]) * math.cos(obs[19]), 
        math.cos(obs[18]) * math.sin(obs[19]), math.sin(obs[18])])
    state[25] = obs[16]

    rx, ry, rz = -rx, -ry, -rz
    state[26] = math.atan(rz / math.sqrt(rx ** 2 + ry ** 2))
    target_yaw = math.atan(ry / rx) if rx > 0 else math.atan(ry / rx) + math.pi
    state[27] = target_yaw if target_yaw > 0 else 2 * math.pi + target_yaw

    rx_predict, ry_predict, rz_predict = rx + (vx - vx_enemy) * PREDICT_LENGTH, \
        ry + (vy - vy_enemy) * PREDICT_LENGTH, rz + (vz - vz_enemy) * PREDICT_LENGTH
    state[28] = math.atan(rz_predict / math.sqrt(rx_predict ** 2 + ry_predict ** 2))
    target_yaw = math.atan(ry_predict / rx_predict) if rx_predict > 0 else math.atan(ry_predict / rx_predict) + math.pi
    state[29] = target_yaw if target_yaw > 0 else 2 * math.pi + target_yaw

    state[30:33] = obs[14:17] - obs[0:3]
    state[33] = math.sqrt(rx ** 2 + ry ** 2 + rz ** 2)
    
    return state # 不需要归一化


# TODO: 计算自己定义的State的最小值与最大值。

# 此处因为get_state函数直接返回了原始Observation，State的最大最小值同Observation。
state_min = [-PI / 3, 0, -950, -950, -950, 0., 0., -1, -1, -math.sin(math.pi/3), \
    0, -PI / 3, 0, -PI / 3, 0] * 2 + [-2e5, -2e5, -1e5, 0]
state_max = [PI / 3, PI * 2, 950, 950, 950, 5., 100., 1, 1, math.sin(math.pi/3), \
    1e5, PI / 3, PI * 2, PI / 3, PI * 2] * 2 + [2e5, 2e5, 1e5, 3e5]

def get_game_win_rwd():
    return 0.4


def get_damage_cause_rwd(damage_cause):
    return 0.6 / 100 * damage_cause


def get_damage_suffer_rwd(damage_suffer):
    return -0.6 / 100 * damage_suffer


def get_dist_change_rwd(prev_dist, dist, max_dist):
    return 0.2 * (prev_dist - dist) / max_dist


def get_exist_rwd():
    return -0.85 / MAX_ENV_STEP


def get_self_fire_angle_rwd(pitch, yaw, dist_vec):
    max_fire_angle = ud.DAMAGE_ANGLE_MAX * 4
    vec_head = np.array([math.cos(pitch) * math.cos(yaw), math.cos(pitch) * math.sin(yaw), math.sin(pitch)])
    vec_point = dist_vec
    dot_res = np.dot(vec_head, vec_point)
    norm_res = np.linalg.norm(vec_head) * np.linalg.norm(vec_point)
    self_fire_angle = np.pi if norm_res == 0. else np.arccos(max(min(dot_res / norm_res, 1.), -1.))
    return max(0, 0.15 / MAX_ENV_STEP * (max_fire_angle - self_fire_angle) / max_fire_angle)


def get_enemy_fire_angle_rwd(pitch, yaw, dist_vec):
    max_fire_angle = ud.DAMAGE_ANGLE_MAX * 4
    vec_head = np.array([math.cos(pitch) * math.cos(yaw), math.cos(pitch) * math.sin(yaw), math.sin(pitch)])
    vec_point = -dist_vec
    dot_res = np.dot(vec_head, vec_point)
    norm_res = np.linalg.norm(vec_head) * np.linalg.norm(vec_point)
    enemy_fire_angle = np.pi if norm_res == 0. else np.arccos(max(min(dot_res / norm_res, 1.), -1.))
    return min(0, -0.15 / MAX_ENV_STEP * (max_fire_angle - enemy_fire_angle) / max_fire_angle)


def get_reward(obs: np.ndarray, prev_obs: np.ndarray) -> Tuple[float, bool]:
    """
    TODO: 自定义奖励函数，以及终止条件判断。
    一般而言，整个episode的奖励绝对值应该在1000以内，最好不超过2000。
    :param obs: 当前回合的原始Observation（不是State），每个索引的含义见get_state函数的注释
    :param prev_obs: 上一时刻的Observation，回合刚开始时为None
    :return: (Reward, 是否终止) -> Tuple[float, bool]
    """
    damage_cause, damage_suffer, health, opp_health = obs[26], obs[12], obs[13], obs[27]

    # 如果任意一方生命值降为0，给予高奖励或惩罚，并终止当前episode
    if health <= 0. or opp_health <= 0.:
        return -get_game_win_rwd() if health < opp_health else get_game_win_rwd(), True

    # 双方欧氏距离超过规定
    dist_vec = obs[14:17] - obs[0:3]
    dist = np.sqrt(np.sum(np.square(dist_vec)))

    # if dist > MAX_DIST:
        # return 0., True

    # 需要特殊判断prev_obs是否为None
    if prev_obs is None:
        return 0., False

    prev_dist = np.sqrt(np.sum(np.square(prev_obs[0:3] - prev_obs[14:17])))

    r = get_damage_cause_rwd(damage_cause) + \
        get_damage_suffer_rwd(damage_suffer) + \
        get_dist_change_rwd(prev_dist, dist, MAX_DIST) + \
        get_self_fire_angle_rwd(obs[4], obs[5], dist_vec) + \
        get_enemy_fire_angle_rwd(obs[18], obs[19], dist_vec) + \
        get_exist_rwd()

    return r, False


def _gen_random_init_pos():
    """
    用于生成战局开始时两架战机出初始位移和姿态角
    这里限制了初始xy在[-20000., 20000.]，初始z在[500., 10000.]范围内
    翻滚、俯仰、偏航角按照环境限制最大范围进行随机（环境各个观测量的取值范围见utils.define.COMBAT_OBS_INFO）
    TODO[可选]: 可以自己修改xyz的范围，先从容易探索的小范围开始，训练一段时间发现算法能够收敛之后终止训练程序，扩大范围，然后重新运行训练程序。
    """

    # 2 个 list of float[6]
    return [([random.uniform(MIN_POS_X, MAX_POS_X)]  # x
             +
             [random.uniform(MIN_POS_Y, MAX_POS_Y)]  # y
             +
             [random.uniform(MIN_POS_Z, MAX_POS_Z)]  # z
             +
             [random.uniform(COMBAT_OBS_INFO[1][i], COMBAT_OBS_INFO[2][i]) for i in range(3, 6)]  # observation
             ) for _ in range(2)]


def env_constructor():
    """
    用于构建模拟对战环境的函数
    max_step过大会延长训练时间，过小有可能还没分出胜负就导致回合结束，一般在1000~5000内
    其余参数一般不变动
    """
    # 使用预设的策略进行训练
    # GreedyPolicy.max_rot_speed = 600
    # return combat_env.CombatEnv(
    #     ip=_IP, port=_PORT,
    #     # mock_policy_fcn=RandomPolicy,
    #     mock_policy_fcn=GreedyPolicy,
    #     # mock_policy_fcn=LazyPolicy,
    #     state_size=len(state_min), state_min=state_min, state_max=state_max,
    #     get_state_fcn=get_state, get_reward_fcn=get_reward,
    #     max_step=MAX_ENV_STEP,
    #     gen_init_pos_fcn=_gen_random_init_pos,
    #     introduce_damage=True
    # )
    # print("JYZZZZZ call generate_enemy_policy")
    # dir = generate_enemy_policy()
    # print(dir)
    # 加载自己训练好的DQN策略作为对手，注意parallel_num要设置成1！！！！
    return combat_env.CombatEnv(
        random_mock_policy=True,
        ip=_IP, port=_PORT,
        mock_policy_fcn=PolicyLoader('./save/example/dqn/eval_policy'),
        mock_policy_info=[get_state, discrete_env.DirectionActionWrapper._convert_back, state_min, state_max],
        state_size=len(state_min), state_min=state_min, state_max=state_max,
        get_state_fcn=get_state, get_reward_fcn=get_reward,
        max_step=MAX_ENV_STEP,
        gen_init_pos_fcn=_gen_random_init_pos,
        introduce_damage=True
    )

    # 加载自己训练好的SAC策略作为对手，注意parallel_num要设置成1！！！！
    # return combat_env.CombatEnv(
    #     ip=_IP, port=_PORT,
    #     mock_policy_fcn=PolicyLoader('./save/example/sac/eval_policy'),
    #     mock_policy_info=[get_state, lambda _:_, state_min, state_max],
    #     state_size=len(state_min), state_min=state_min, state_max=state_max,
    #     get_state_fcn=get_state, get_reward_fcn=get_reward,
    #     max_step=MAX_ENV_STEP,
    #     gen_init_pos_fcn=_gen_random_init_pos,
    #     introduce_damage=True
    # )


def eval_printer(trajectory):
    env = env_constructor()
    # Specify a directory for logging data
    logdir = "./save/example/logs"
    # Create a file writer to write data to our logdir
    file_writer = tf.summary.create_file_writer(logdir)
    step = tf.compat.v1.train.get_or_create_global_step()
    rstate = env.convert_state(trajectory.observation[0], True)
    health = rstate[6]
    opp_health = rstate[21]
    damage_suffer = rstate[5]
    damage_cause = rstate[20]
    my_pitch = rstate[0]
    my_yaw = rstate[1]
    enemy_pitch = rstate[15]
    enemy_yaw = rstate[16]
    dist_vec = rstate[30:33]

    if health <= 0.:
        defeat_rwd = -get_game_win_rwd()
        with file_writer.as_default():
            tf.summary.scalar('reward defeat', defeat_rwd, step)
    if opp_health <= 0.:
        win_rwd = get_game_win_rwd()
        with file_writer.as_default():
            tf.summary.scalar('reward win', win_rwd, step)
    
    if health > 0. and opp_health > 0.:
        reward_all = trajectory.reward[0]
        # r1
        damage_cause_rwd = get_damage_cause_rwd(damage_cause)
        # r2
        damage_suffer_rwd = get_damage_suffer_rwd(damage_suffer)
        # r4
        in_my_fire_rwd = get_self_fire_angle_rwd(my_pitch, my_yaw, dist_vec)
        # r5
        in_enemy_fire_rwd = get_enemy_fire_angle_rwd(enemy_pitch, enemy_yaw, dist_vec)
        # r6
        exist_rwd = get_exist_rwd()

        with file_writer.as_default():
            tf.summary.scalar('reward damage_cause', damage_cause_rwd, step)
            tf.summary.scalar('reward damage_suffer', damage_suffer_rwd, step)
            tf.summary.scalar('reward dist_change',
                                reward_all - damage_cause_rwd - damage_suffer_rwd - 
                                in_my_fire_rwd - in_enemy_fire_rwd - exist_rwd, step)
            tf.summary.scalar('reward self_fire_angle', in_my_fire_rwd, step)
            tf.summary.scalar('reward enemy_fire_angle', in_enemy_fire_rwd, step)
            tf.summary.scalar('reward exist', exist_rwd, step)
            tf.summary.scalar('reward win', 0., step)
            tf.summary.scalar('reward defeat', 0., step)


def get_dqn_trainer():
    """
    使用dqn方法进行训练。
    该方法将动作空间离散化了，方法详见'env/discrete_env.py'文件。
    TODO[可选]: 如果有更好的离散化策略可以自行修改_convert_back函数。
    parallel_num: 与环境交互采样数据的进程个数，一般越多训练速度越快。但是多开会占用大量内存，建议根据电脑性能调整。
    """
    parallel_num = 15

    # TODO: 调节超参数
    return dqn_trainer.DQNTrainer(
        env_constructor=env_constructor,  # 无需变动
        # eval_observer=[eval_printer],
        env_wrappers=[discrete_env.DirectionActionWrapper],  # 无需变动
        collect_episodes_per_iter=parallel_num,  # 无需变动
        train_rounds_per_iter=parallel_num * 8,  # 无需变动
        initial_collect_episodes=parallel_num * 3,  # 无需变动
        metric_buffer_num=parallel_num,  # 无需变动
        fc_layer_params=(256, 256),  # Critic全连接隐层节点数量（两层各256节点）
        start_epsilon=0.121,  # 训练开始时采用随机策略探索的概率
        end_epsilon=0.12,  # 训练结束时采用随机策略探索的概率
        epsilon_decay_steps=300000,  # 伴随训练衰减epsilon的持续步数
        q_net_lr=3e-4,  # Critic网络学习率
        gamma=0.996,  # 折扣因子
        reward_scale_factor=1000,  # 奖励放缩，调整参数使其乘上一局游戏总奖励绝对值小于1000
        target_update_tau=0.005,  # 目标网络软更新参数
        target_update_period=1,  # 更新目标网络间隔时间
        batch_size=512,  # 每次更新网络采用的经验回放大小
        replay_cap=128 * 1000,  # 经验回放的大小
        train_summary_dir='./save/example/dqn/summary/train',  # 训练数据记录路径，用于tensorboard可视化
        eval_summary_dir='./save/example/dqn/summary/eval',  # 验证数据记录路径，用于tensorboard可视化
        train_checkpoint_dir='./save/example/dqn/checkpoint/train',  # 用于中断后继续训练
        policy_checkpoint_dir='./save/example/dqn/checkpoint/policy',  # 用于中断后继续训练
        replay_checkpoint_dir='./save/example/dqn/checkpoint/replay',  # 用于中断后继续训练
        eval_policy_save_dir='./save/example/dqn/eval_policy',  # 训练好的策略保存的位置
        using_ddqn=True,  # 是否使用Double-DQN算法
        parallel_num=parallel_num  # 并行采样进程数
    )


def get_sac_trainer():
    """
    使用sac方法进行训练
    parallel_num: 与环境交互采样数据的进程个数，一般越多训练速度越快。但是多开会占用大量内存，建议根据电脑性能调整。
    """
    parallel_num = 5

    # TODO: 调节超参数
    return sac_trainer.SACTrainer(
        env_constructor=env_constructor,  # 无需变动
        eval_observer=[eval_printer],
        collect_episodes_per_iter=parallel_num,  # 无需变动
        train_rounds_per_iter=parallel_num * 8,  # 与环境交互一次之后训练网络的轮数，若交互时间长，可以适当调大
        initial_collect_episodes=parallel_num * 3,  # 无需变动
        metric_buffer_num=parallel_num,  # 无需变动
        train_summary_dir='./save/example/sac/summary/train',  # 训练数据记录路径，用于tensorboard可视化
        eval_summary_dir='./save/example/sac/summary/eval',  # 验证数据记录路径，用于tensorboard可视化
        train_checkpoint_dir='./save/example/sac/checkpoint/train',  # 用于中断后继续训练
        policy_checkpoint_dir='./save/example/sac/checkpoint/policy',  # 用于中断后继续训练
        replay_checkpoint_dir='./save/example/sac/checkpoint/replay',  # 用于中断后继续训练
        eval_policy_save_dir='./save/example/sac/eval_policy',  # 训练好的策略保存的位置
        actor_fc_layer_params=(256, 256),  # Actor全连接层隐层节点数量（两层各256节点）
        critic_observation_fc_layer_params=(128, 128),  # Critic用于编码环境的全连接层隐层节点数量
        critic_action_fc_params=(128, 128),  # Critic用于编码动作的全连接层隐层节点数量
        critic_joint_fc_layer_params=(256, 256),  # Critic经过编码层后连接的全连接层隐层节点数量
        actor_lr=3e-4,  # Actor学习率
        critic_lr=3e-4,  # Critic学习率
        alpha_lr=3e-4,  # SAC算法中用于更新alpha变量的学习率
        gamma=0.99,  # 折扣因子
        reward_scale_factor=1.0,  # 奖励放缩，调整参数使其乘上一局游戏总奖励绝对值小于1000
        target_update_tau=0.005,  # 目标网络软更新参数
        target_update_period=1,  # 更新目标网络间隔时间
        batch_size=256,  # 训练网络的batch大小
        replay_cap=1000000,  # 经验回放的大小
        parallel_num=parallel_num  # 并行采样进程数
    )


def train(_):
    trainer = get_dqn_trainer()
    trainer.train()


if __name__ == '__main__':
    logging.set_verbosity(logging.INFO)
    tf_agents.system.system_multiprocessing.handle_main(functools.partial(app.run, train))
