import argparse
import threading
import time
import json
import os
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy import io

from env.env_client import EnvClient
from env.env_cmd import EnvCmd
from env.env_runner import EnvRunner

from agent.red.red_RL_agent import RedAgent
from agent.blue.blue_rule_agent import BlueAgent
from record.attach2docker import net_todocker_init


def connect_loop(rpyc_port):
    """根据映射出来的宿主机端口号rpyc_port，与容器内的仿真平台建立rpyc连接"""
    while True:
        try:
            env_client = EnvClient('127.0.0.1', rpyc_port)
            observation = env_client.get_observation()
            print("连接前的环境检测：")
            print(observation)
            '''
            此处使用units来进行判断，若飞机没有起飞则死循环，更改使用airports进行判断
            '''
            if len(observation['red']['airports']) != 0:
                return env_client
        except Exception as e:
            print(e)
            print("rpyc connect failed")
        time.sleep(3)


def print_info(units):
    for unit in units:
        if unit['LX'] != 41:
            print('id: {:3d}, tmid: {:4d}, speed: {:3.0f}, x: {:6.0f}, y: {:6.0f}, z: {:5.0f}, '
                  'type: {:3d}, state: {:3d}, alive: {:2d}, hang: {:3.0f}'.format
                  (unit['ID'], unit['TMID'], unit['SP'], unit['X'], unit['Y'], unit['Z'],
                   unit['LX'], unit['ST'], unit['WH'], unit['Hang']))


# 打印情报信息
def print_qb(units):
    for unit in units:
        if unit['LX'] != 42:
            print('id: {:3d}, tmid: {:4d}, jb:{:2d}, x: {:6.0f}, y: {:6.0f}, z: {:5.0f}, type: {:3d}, dam: {:2d}'.format
                  (unit['ID'], unit['TMID'], unit['JB'], unit['X'], unit['Y'], unit['Z'], unit['LX'], unit['DA']))


class WarRunner(EnvRunner):
    def __init__(self, env_id, server_port, agents, config, replay):
        # 780 6100
        EnvRunner.__init__(self, env_id, server_port, agents, config, replay)  # 仿真环境初始化
        print("————————————仿真环境初始化完成！————————————")

    def run(self, num_episodes, speed):
        """对战调度程序"""
        # 启动仿真环境, 与服务端建立rpyc连接
        self._start_env()
        print("————————————仿真环境启动完成！————————————")
        self.env_client = connect_loop(self.env_manager.get_server_port())
        self.env_client.take_action([EnvCmd.make_simulation("SPEED", "", speed)])
        print("————————————仿真环境远程连接完成！————————————")
        # f = open("state.json", "w")
        battle_results = [0, 0, 0]  # [红方获胜局数, 平局数量, 蓝方获胜局数]

        red_result = []  # 红方历史结果
        red_reward = []  # 红方历史奖励
        x_episode = []  # 绘图横坐标，回合数
        y_reward = []  # 绘图纵坐标，红方奖励值
        battle_results[0] = 0
        battle_results[1] = 0
        battle_results[2] = 0
        lunci = 0
        
        with open("result_onlyreward.txt",'r+') as f:
            while True:
                lines = f.readline()
                if not lines:
                    break
                lines = lines.split()
                p = float(lines[0])
                y_reward.append(p)


        for i in range(lunci):
             x_episode.append(i + 1)

        for i in range(lunci,num_episodes):
            num_frames = 0
            self.logger.debug("开始第{}局对战\n".format(i + 1))
            self._run_env()

            # 开启记录本轮数据的两个线程
            if self.save_replay:
                data_port = self.env_manager.get_data_port()
                folder = self.replay_dir
                net_todocker_init('127.0.0.1', data_port, self.agents[0].name, self.agents[1].name, i, folder)

            while True:
                # try:
                num_frames += 1
                observation = self._get_observation()  # 获取态势
                print("————————————双方得到的情报信息————————————")
                print('red')
                print_qb(observation['red']['qb'])
                print()
                print_info(observation['red']['units'])
                print()
                print('blue')
                print_qb(observation['blue']['qb'])
                print()
                print_info(observation['blue']['units'])
                print()
                print("—————————————————————————————————————")
                # print("(x,y):" ,observation['red']['units'])
                sim_time = observation['sim_time']
                print(i + 1, sim_time)
                print()

                # if len(observation['red']['rockets']) > 0:
                # 写入所得的json会在同一行里, 打开文件后按Ctrl+Alt+L可自动转换成字典格式
                # f.write(json.dumps(observation, ensure_ascii=False))

                self._run_agents(observation,i)  # 发送指令

                for agent in self.agents:
                    if agent.side == 'red':
                        done = self._get_done(observation,agent.get_final_dis())  # 推演结束(分出胜负或达到最大时长)
                if done[0]:  # 对战结束后环境重置
                    # 统计胜负结果
                    if done[1] > done[2]:
                        battle_results[0] += 1
                        red_res = 1
                    elif done[1] == done[2]:
                        battle_results[1] += 1
                        red_res = 0
                    else:
                        battle_results[2] += 1
                        red_res = -1

                    # 计算平均奖励
                    for agent in self.agents:
                        if agent.side == 'red':
                            average_reward = agent.get_average_reward()
                            final_dis = agent.get_final_dis()
                    # print("average_reward", average_reward)

                    # 记录结果
                    with open('result2.txt', 'a') as f:
                        str1 = "第%d局： red: %d  blue: %d.....红色奖励: %f  红方追踪成功：%d局  红方追踪失败：%d局  最终距离：%f\n" % (
                            i + 1, done[1], done[2], average_reward, battle_results[0],
                            battle_results[2], final_dis)
                        f.write(str1)

                    with open('result_onlyreward.txt', 'a') as f:
                        str2 = "%f\n" % average_reward
                        f.write(str2)
                    with open('result_finaldis.txt', 'a') as f:
                        str3 = "%f\n" % final_dis
                        f.write(str3)

                    # # 绘图
                    # x_episode.append(i + 1)
                    # y_reward.append(average_reward)
                    # # 每50回合保存打印一次图像
                    # if (i + 1) % 5 == 0:
                    #     print("保存图像")
                    #     plt.title('reward at episode: %d' % (i + 1))
                    #     plt.plot(x_episode, y_reward, color='red', label='reward')
                    #     # plt.legend()  # 显示图例
                    #     plt.xlabel('x episode')
                    #     plt.ylabel('reward')
                    #     plt.savefig('img/reward_episode_%d.png' % (i + 1))

                    # 保存数据
                    red_result.append(red_res)
                    red_reward.append(average_reward)
                    io.savemat('result.mat', {'red_result': red_result, 'red_reward': red_reward})

                    # 环境重置
                    self.env_manager.reset()
                    self.env_client = connect_loop(self.env_manager.get_server_port())
                    self.env_client.take_action([EnvCmd.make_simulation("SPEED", "", speed)])
                    self._reset()  # 智能体重置
                    break
                self._run_env()
                # except Exception as e:
                #     print(e)
                #     print("容器运行出现异常需要重启")
                #     # 记录错误信息
                #     self.logger.debug("程序运行{}秒后出错, 错误信息为: {}\n".format(sim_time, e))
                #     self._start_env()
                #     self.env_client = connect_loop(self.env_manager.get_server_port())
                #     self.env_client.take_action([EnvCmd.make_simulation("SPEED", "", speed)])
                #     self._reset()  # 智能体重置
                #     break
        # 关闭文件
        # f.close()
        with open('result.txt', 'a') as f:
            str1 = "最终结果： 红方追踪成功：%d  平局：%d 蓝方追踪成功：%d\n" % (battle_results[0], battle_results[1], battle_results[2])
            f.write(str1)

        return battle_results


config = {
    'server_port': 6100,
    'config': {
        'scene_name': '/home/Joint_Operation_scenario_fusai.ntedt',  # 容器里面想定文件绝对路径
        'prefix': './',  # 容器管理的脚本manage_client所在路径(这里给的相对路径)
        'image_name': 'sim_fast:v3.1',  # 镜像名
        # 'volume_list': [],
        'volume_list': [(os.path.join(os.path.dirname(os.path.abspath(__file__)), "scen/default8v8.xml"),
                         '/home/TSServer/bin/ScenFile/default.xml')],
        'max_game_len': 350  # 最大决策次数
    },
    'agents': {
        'red_name': {  # 战队名
            'class': RedAgent,  # 智能体类名
            'side': 'red'  # 智能体所属军别(不可更改!)
        },
        'blue_name': {  # 战队名
            'class': BlueAgent,  # 智能体类名
            'side': 'blue'  # 智能体所属军别(不可更改!)
        }
    },
    'replay': {  # 记录回放相关设置
        'save_replay': False,  # 是否记录
        'replay_dir': './replays'  # 回放保存路径
    }
}


def main(env_id, num_episodes, speed):
    """根据环境编号env_id、对战轮数num_episodes和配置config, 构建并运行仿真环境"""
    dg_runner = WarRunner(env_id, **config)
    results = dg_runner.run(num_episodes, speed)
    print('battle results: {}'.format(results))


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--env_id', type=int, required=False, default=801, help='id of environment')
    parser.add_argument('--speed', type=int, required=False, default=10, help='simulation speed')
    parser.add_argument('--num_episode', type=int, required=False, default=480, help='num episodes per env')

    args = vars(parser.parse_args())

    env_id = args['env_id']
    speed = args['speed']
    num_episodes = args['num_episode']

    main(env_id, num_episodes, speed)
