import os
import random
import json
import rl_envs
from net.gpt_simple_tcp_server import main as net_run
from net.gpt_simple_tcp_server import Flag
import threading
from net.msg_handle import msg_handle
import gym
from tools.my_log import log
import numpy as np
import torch
from tqdm import tqdm
from bdtime import tt
import sys


# --- 设置工作路径
curr_dir_name = os.path.dirname(__file__)
if curr_dir_name:
    os.chdir(curr_dir_name)


class Policy:
    """
    人工控制
    """

    action_conv_dc = {
        'exit': -1,
        'e': -1,
        '0': 0,
        'w': 1,
        's': 2,
        'a': 3,
        'd': 4,
    }

    class ChooseActionPolicy:
        sample = 0
        human = 1
        dqn = 2

    model = ChooseActionPolicy.sample
    # model = ChooseActionPolicy.human

    def _choose_action_by_human(self, state):
        _action = input("输入[wsad], 对应[上下左右]. 输入[exit]退出 >>>")
        action = self.action_conv_dc.get(_action.lower())
        return action

    def _choose_action_by_sample(self, state=None):
        action = random.randint(0, 4)
        return action

    def choose_action(self, state=None):
        action = 0
        if self.model == self.ChooseActionPolicy.sample:
            action = self._choose_action_by_sample(state)
        elif self.model == self.ChooseActionPolicy.human:
            action = self._choose_action_by_human(state)
        return action


if __name__ == '__main__':
    seed = 1

    np.random.seed(seed)
    torch.manual_seed(seed)

    first_achieve = False

    env = gym.make('Car2dEnv-v0')

    from policy.dqn import DQN
    from policy.dqn import linear_schedule
    import torch.nn as nn

    class My2LayerNet(nn.Module):
        def __init__(self, n_states, n_actions, neuron_number=(30, 10)):
            super().__init__()

            # self.network = nn.Sequential(
            #     nn.Linear(n_states, 128),
            #     nn.ReLU(),
            #     nn.Linear(128, 64),
            #     nn.ReLU(),
            #     nn.Linear(64, n_actions),
            # )

            self.network = nn.Sequential(
                nn.Linear(n_states, 64),
                nn.ReLU(),
                nn.Linear(64, 32),
                nn.ReLU(),
                nn.Linear(32, n_actions),
            )

        def forward(self, x):
            return self.network(x)

    # policy = DQN(env)
    policy = DQN(
        env,
        # n_actions=None,
        # n_states=None,
        # env_a_shape=None,
        # neuron_number=30,
        # neuron_number=(30, 10),
        gamma=0.99,
        lr=0.01,
        memory_capacity=2000,
        # memory_capacity=500,
        target_replace_iter=500,
        train_frequency=10,
        batch_size=128,
        net=My2LayerNet,
        is_save_memory_to_csv=True,
        # is_save_model_params=True,
    )
    # policy = Policy()

    # is_load_memory_from_csv = True      # 是否从已保存的csv中加载旧的memory
    is_load_memory_from_csv = False      # 是否从已保存的csv中加载旧的memory
    if is_load_memory_from_csv:
        policy.load_memory_from_csv()

    is_save_model_params = True
    # is_load_model = False
    is_load_model = True
    if is_load_model:
        print('=== load model!')
        _model = torch.load(policy.model_params_save_path)
        policy.target_net.load_state_dict(_model)
        policy.eval_net = policy.target_net

    msg_handle.add_listener('MsgTest', env.on_msg_test)
    msg_handle.add_listener('MsgState', env.on_msg_state)

    thread_net = threading.Thread(target=net_run)
    thread_net.setDaemon(True)
    thread_net.start()

    while msg_handle.client_connection is None:
        if tt.now(0) and tt.now(0) % 5 == 0:
            print('等待客户端连接中...', tt.now(1))
        tt.sleep(1)

    print('--- 已和客户端建立连接!')

    total_timesteps = 50000
    global_step = 0
    current_epoch = 0

    tq = tqdm(total=total_timesteps)
    msg = "初始化tqdm"

    tt.__init__()
    reward_epoch = 0
    max_reward = -float('inf')       # 记录`平均奖励值`最大时是哪一局
    max_i = -1
    error_state_counts = 0

    recent_reward_ls = []
    recent_mean_reward = 0      # 最近`recent_mean_len`场平均奖励
    recent_mean_len = 100

    class args:
        seed = seed
        start_e = 1
        # start_e = 0.06
        end_e = 0.05
        if is_load_model:
            start_e = end_e
        exploration_fraction = 0.1
        total_timesteps = total_timesteps

    if args.seed is not None:
        np.random.seed(args.seed)
        torch.manual_seed(args.seed)
        env.seed(args.seed)
        env.action_space.seed(args.seed)
        env.observation_space.seed(args.seed)
        policy.set_seed(args.seed)

    observation, r, done, info = env.reset()
    while True:
        if global_step > total_timesteps:
            print('\n*** Main: `global_step`超过最大值`total_timesteps`')
            break

        if not msg_handle.client_connection:
            print('\n*** Main: 客户端连接已断开...')
            break

        if env.i_action < global_step:     # 处理完当前动作再进行下一个
            print('******************', env.i_action, '---', global_step)
            continue

        epsilon = linear_schedule(args.start_e, args.end_e, args.exploration_fraction * args.total_timesteps,
                                  global_step)
        # epsilon = 0.05

        # _action = input("输入[0-5], 对应[停上下左右] >>>")
        action = policy.choose_action(observation, epsilon)
        # print('=== --- action: ', action)

        if action is not None:
            if action == -1:
                print("--- 输入[exit], 退出游戏, 结束服务端进程.")
                Flag.EXIST_FLAG = True
                break

            observation_, r, done, info = env.step(action)
            # print(observation_, r, done, info, '---', action)
            if observation_ is not None:
                # print(observation_.shape)

                reward_epoch += r

                policy.store_transition(observation, action, r, done, observation_)
                if policy.memory_counter > policy.memory_capacity:
                    policy.learn()

                    # if done:
                    #     tq.desc = f"i_episode: {i_episode} | global_step: {global_step}, Ep_r: {reward_epoch} |" \
                    #               f" max_value_i: {max_reward} --- {max_i}"

                observation = observation_

                if done:
                    reward_epoch = round(reward_epoch, 4)
                    recent_reward_ls.append(reward_epoch)
                    if len(recent_reward_ls) < recent_mean_len:
                        recent_mean_reward = sum(recent_reward_ls) / len(recent_reward_ls)
                    else:
                        recent_mean_reward = sum(recent_reward_ls[-recent_mean_len:]) / recent_mean_len
                    recent_mean_reward = round(recent_mean_reward, 4)
                    # if reward_epoch > max_reward:
                    if current_epoch >= recent_mean_len and recent_mean_reward > max_reward:
                        max_reward = recent_mean_reward
                        max_i = current_epoch

                    msg = f"current_e[{current_epoch}], r: {reward_epoch} |" \
                          f" recent_mean_r[{recent_mean_len}]: {recent_mean_reward} |" \
                          f" max_r: {max_reward}, max_i: {max_i} |" \
                          f" net_error: {error_state_counts}, epsilon: {round(epsilon, 4)} "

                    observation, r, done, info = env.reset()

                    reward_epoch = 0
                    current_epoch += 1

                    # if recent_mean_reward > 0.5 * env.spec.reward_threshold:
                    # # if current_epoch > 10:
                    #     from net.msg_class import MsgInterval
                    #     msg_cls = MsgInterval(100)
                    #     msg_handle.send(msg_cls)

                    if current_epoch >= recent_mean_len and recent_mean_reward > env.spec.reward_threshold and first_achieve is False:
                        first_achieve = True

                        print(f'\n\n=== 第`{current_epoch}`回合, `{global_step}`步的`近{recent_mean_len}场平均奖励`为[{recent_mean_reward}]'
                              f'达到最大奖励阈值`{env.spec.reward_threshold}`, 结束运行.')
                        # tt.sleep(0.1)

                        if is_save_model_params:
                            torch.save(policy.target_net.state_dict(), policy.model_params_save_path)

                        # break

                        from net.msg_class import MsgInterval
                        msg_cls = MsgInterval(200)
                        msg_handle.send(msg_cls)
                        # env.exceed_time = 2
                        env.set_exceed_time(1.5)

            else:
                msg = f"*** ret is None!"
                print(msg)
                error_state_counts += 1
            tq.desc = msg

            tq.update(1)
            global_step += 1
        else:
            print(f"--- 输入错误... 输入[wsad]移动, [0]退出...")

    print(f'速度: {round(tq.n / tt.now(), 3)} 条/秒')
    msg_handle.close()

    tt.sleep(1)

    if thread_net.is_alive():
        print('等待线程thread_net释放...')
        tt.sleep(3)
        if thread_net.is_alive():
            print('*** 无法自动释放, 强制结束线程!')
            sys.exit()

