# 时间 : 2021/10/9 16:25 
# 作者 : Dixit
# 文件 : main_train.py 
# 说明 : 
# 项目 : moziai
# 版权 : 北京华戍防务技术有限公司

import argparse
from gym.spaces import Dict, Box
import os
import json

import ray
from ray import tune
from ray.tune.schedulers import AsyncHyperBandScheduler
from ray.tune.search import ConcurrencyLimiter
from ray.tune.search.hyperopt import HyperOptSearch

from ray.rllib.algorithms.ddpg.ddpg import DDPG, DEFAULT_CONFIG as DDPG_CONFIG
from ray.rllib.algorithms.ddpg.ddpg_torch_policy import DDPGTorchPolicy
from ray.rllib.agents.trainer import Trainer
from ray.rllib.utils.test_utils import check_learning_achieved

from env.env_uav import UAVEnv

import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"

parser = argparse.ArgumentParser()
# 集群口令
parser.add_argument("--address", type=str, default='172.17.94.77:6001')

# ------------------线上训练专用参数------------------
# 线上训练任务ID
parser.add_argument("--training_id", type=int, default=14)
# 线上训练智能体ID
parser.add_argument("--agent_id", type=int, default=23)
# 线上想定名称
parser.add_argument("--scenario_name", type=str, default='-1')
# 线上训练专用，网站后端IP和端口
parser.add_argument("--backend_ip_port", type=str, default='59.110.236.231:8023')

# ------------------训练通用参数------------------
# Worker数量
parser.add_argument("--num_workers", type=int, default=1)
# 停止条件，训练轮数
parser.add_argument("--training_iteration", type=int, default=5)
# 训练结果检测频率
parser.add_argument("--checkpoint_freq", type=int, default=1)
# 训练结果保留个数
parser.add_argument("--keep_checkpoints_num", type=int, default=10)
# 训练模式 development，train_online
parser.add_argument("--platform_mode", type=str, default='eval')
# 从服务器列表，创建的docker个数应该是num_workers+1，比如num_workers=3，那么需要创建4个docker
parser.add_argument("--server_docker_dict", type=str, default='{"172.17.94.77": 2}')
# GPU个数
parser.add_argument("--num_gpus", type=int, default=0)
# 每个worker的GPU数量
parser.add_argument("--num_gpus_per_worker", type=int, default=0)
# sample数量
parser.add_argument("--num_samples", type=int, default=1)
# 训练结果路径
parser.add_argument("--result_dir", type=str, default='/root/ray_training_result')
# check-point路径，设置后，基于该训练结果训练
parser.add_argument("--restore", type=str, default=None)
# 智能体控制推演方名称
parser.add_argument("--side", type=str, default="红方")


# ---------------------训练超参---------------------
parser.add_argument("--framework", type=str, default="torch")
parser.add_argument("--train_batch_size", type=int, default=512)
parser.add_argument("--learning_starts", type=int, default=1000)
parser.add_argument("--type", type=str, default="OrnsteinUhlenbeckNoise")
parser.add_argument("--random_timesteps", type=int, default=1000)
parser.add_argument("--ou_base_scale", type=float, default=0.1)
parser.add_argument("--ou_theta", type=float, default=0.15)
parser.add_argument("--ou_sigma", type=float, default=0.2)
parser.add_argument("--initial_scale", type=float, default=1.0)
parser.add_argument("--final_scale", type=float, default=0.02)
parser.add_argument("--scale_timesteps", type=int, default=10000)

parser.add_argument("--as-test", action="store_true")
parser.add_argument("--torch", action="store_true")
parser.add_argument("--mixed-torch-tf", action="store_true")
parser.add_argument("--stop-reward", type=float, default=150.0)
parser.add_argument("--stop-timesteps", type=int, default=100000)
parser.add_argument("--mozi_server_path", type=str, default='D:\\mozi_4p\\mozi\\Mozi\\MoziServer\\bin')

# 创建的docker个数应该是num_workers+1，比如num_workers=3，那么需要创建4个docker
SERVER_DOCKER_DICT = {'127.0.0.1': 0, }  # {'8.140.121.210': 2, '123.57.137.210': 2}

if __name__ == "__main__":

    args = parser.parse_args()
    if args.platform_mode == 'train_online':
        ray.init(address=args.address)
    elif args.platform_mode == 'development':
        ray.init(address="auto")
        # ray.init(local_mode=True)
    else:
        os.environ['MOZIPATH'] = args.mozi_server_path
        ray.init(local_mode=True)

    ddpg_extra_config = {
        "framework": "torch",
        "train_batch_size": 512,
        "learning_starts": 1000,
        "exploration_config": {
            # DDPG uses OrnsteinUhlenbeck (stateful) noise to be added to NN-output
            # actions (after a possible pure random phase of n timesteps).
            "type": "OrnsteinUhlenbeckNoise",
            # For how many timesteps should we return completely random actions,
            # before we start adding (scaled) noise?
            "random_timesteps": 1000,
            # The OU-base scaling factor to always apply to action-added noise.
            "ou_base_scale": 0.1,
            # The OU theta param.
            "ou_theta": 0.15,
            # The OU sigma param.
            "ou_sigma": 0.2,
            # The initial noise scaling factor.
            "initial_scale": 1.0,
            # The final noise scaling factor.
            "final_scale": 0.02,
            # Timesteps over which to anneal scale (from initial to final values).
            "scale_timesteps": 10000,
        },
    }
    ddpg_config = Trainer.merge_trainer_configs(DDPG_CONFIG, ddpg_extra_config, _allow_unknown_configs=True)

    obs_space = Dict({
        "obs": Box(float("-inf"), float("inf"), shape=(14,)),
        # "action_mask": Box(0, 1, shape=(self.action_size,)),
    })

    act_space = Box(-1, 1, shape=(1,))

    policies = {
        "uav_ddpg_policy": (DDPGTorchPolicy, obs_space, act_space, ddpg_config),
    }


    def policy_mapping_fn(agent_id):
        if agent_id == "agent_0":
            return "uav_ddpg_policy"
        else:
            raise NotImplementedError


    policies_to_train = ["uav_ddpg_policy", ]

    # server_ip_port = ['127.0.0.1:6060', ]
    env_config = {'mode': 'eval',
                  'avail_docker_ip_port': ['127.0.0.1:6060', ],
                  'side_name': '红方',
                  'enemy_side_name': '蓝方',
                  # 'sever_docker_dict': SERVER_DOCKER_DICT
                  }
    if args.platform_mode == 'train_online':
        env_config['mode'] = 'train_online'
        env_config['training_id'] = args.training_id
        env_config['agent_id'] = args.agent_id
        env_config['scenario_name'] = args.scenario_name
        env_config['backend_ip_port'] = args.backend_ip_port
        env_config['sever_docker_dict'] = json.loads(args.server_docker_dict)

    config = {
        "env": UAVEnv,
        "num_workers": args.num_workers,
        "lr": tune.uniform(5e-5, 5e-4),
        "multiagent": {
            "policies": policies,
            "policy_mapping_fn": policy_mapping_fn,
            "policies_to_train": policies_to_train,
        },
        "framework": "torch",
        "env_config": env_config,
        # "num_sgd_iter": 10,
        # "sgd_minibatch_size": 8,
        # "rollout_fragment_length": 32,
        "train_batch_size": 512,
        "learning_starts": 1000,
        "target_network_update_freq": 1500,
        "timesteps_per_iteration": 32,
    }

    stop = {
        "training_iteration": args.training_iteration,
    }
    algo = HyperOptSearch()
    algo = ConcurrencyLimiter(algo, max_concurrent=1)
    scheduler = AsyncHyperBandScheduler(max_t=3000)
    results = tune.run(DDPG,
                       metric="episode_reward_mean",
                       mode="max",
                       search_alg=algo,
                       scheduler=scheduler,
                       num_samples=args.num_samples,
                       checkpoint_freq=args.checkpoint_freq,
                       keep_checkpoints_num=args.keep_checkpoints_num,
                       config=config,
                       # stop=stop,
                       local_dir=args.result_dir,       # 存放训练结果路径
                       restore=args.restore             # 基于哪个结果开始训练
                       )

    if args.as_test:
        check_learning_achieved(results, args.stop_reward)

    ray.shutdown()
