import sys
import zmq
import socket
import struct
import multiprocessing as mp
from typing import Dict, Tuple, Sequence, List
from utils.utils import *
import argparse
import pickle
import time
import numpy as np
import random
from copy import deepcopy
from enum import Enum
import math
import os, shutil
import torch

# Case 1 : 起一个Decision程序，用循环来接收和处理数据   （√）
# Case 2 : 按照ID分别起多个Decision程序，分布式处理之

def States_Recv(_states_list : list, recv_port : int, _update : mp.Value):
    # 接收DataServer转发的数据，类型为list
    print("Into_State_Recv!")
    udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    udp_socket.setblocking(0)
    try:
        udp_socket.bind(("127.0.0.1", recv_port))
    except:
        print("Ports_occupied!: ", recv_port)
    udp_socket.setblocking(0)
    lock = _update.get_lock()
    while True:
        time.sleep(0.001)
        lock.acquire()
        try:
            _update.value = 0
            # 接收数据
            data, address = udp_socket.recvfrom(4096)
            # print(address)
            # 反序列化数据
            states = pickle.loads(data) # list
            _states_list[:] = states # _states_dict[states[0]]
            # print("Decision_Recv!", _states_list)
            _update.value = 1 
        except:
            continue
        finally:
            lock.release()

def Action_Feedback(_action_dict : dict, send_port : int, _decides : dict, total_steps: mp.Value, opt=None, agent=None):
    # 整合单机决策动作信息并转发回Env，类型为Dict
    context = zmq.Context(1)
    send_socket = context.socket(zmq.PUSH)
    send_socket.bind("tcp://127.0.0.1:" + str(send_port))

    while True:
        
        send_msg = []
        if any(val.value == 0 for key, val in _decides.items()):    # 同步，保证所有决策都被做到
            continue
        
        for key, value in _action_dict.items():
            send_msg.append(value)
        
        try:
            send_pkg = pickle.dumps(send_msg)
            # print("Send Decision", send_msg)
            
            for key, val in _decides.items():
                val.value = 0
            send_socket.send(send_pkg, zmq.NOBLOCK)
            
        except:
            # print("No Receiver!")
            continue

def Decision_Make(_states_list : list, _action_dict : dict, _update : mp.Value, _decide : mp.Value, decision_cfg : dict, total_steps : mp.Value, opt=None, agent=None):    # total_steps : mp.Value,
    # 单机决策模块，获取配置
    formation_cfg = decision_cfg['formation_cfg']
    general_cfg = decision_cfg['general_cfg']
    algo_cfg = decision_cfg['algo_cfg']
    # 初始化记忆模块
    last_memory = {'s':None, 'a':None}
    
    lock = _decide.get_lock()
    
    time_start = time.time()
    time_pick = time.time()
    total_steps.value, total_episodes, total_scores = 0, 0, 0.
    disable_flag = False
    
    while True:
        time.sleep(0.001)
        # print("Decision Make!", _update.value)
        # 消息接收未更新则跳过
        if _update.value == 0:
            continue
        # 决策动作  # id, 动作，编队 # [[id], [dvel, dangvel], [leader_id, formation_type, formation_dist]]
        lock.acquire()
        _decide.value = 0
        # print("Decision Maker!", _states_list)
        idx = _states_list[0]
        
        if (time.time() - time_pick)/10. >= 1.:
            if random.random() < 0.1:
                disable_flag = True
            time_pick = time.time()
        
        if all(item == None for item in _states_list[1:6]):
            _action_dict[idx] = [None for _ in range(5)]
            disable_flag = False
        else:
            # 获取整理后的状态向量 vector(16) [[self_states:4][observation_map:12]]
            # 强化学习的观测空间处理
            cur_state = generate_collision_map(_states_list[1:6])
            # 取出单机stateList内容，存入记忆模块，形成马尔可夫链 
            
            s = last_memory['s']
            a = last_memory['a']
            s_next = cur_state    
            r = _states_list[6]
            # print("rewards: ", _states_list)
            dw = _states_list[7]   # done or win
            tr = _states_list[8]   # terminate
            steps = _states_list[9]
            cur_eps = _states_list[10]
            done = (dw or tr)
            
            if not steps:
                total_scores = 0
            
            # print("Memory_block:", _memory_block[idx])
            
            if idx == formation_cfg.leader_id:
                # 长机  # 规则控制 # 之后加避障（以及任务点）
                stage_flag = int((time.time() - time_start)/20.)
                if stage_flag % 4 == 1:
                    action = [0.0, 0.01]
                elif stage_flag % 4 == 3:
                    action = [0.0, -0.01]
                else:
                    action = [0.0, 0.0]
                # action = [0.0, 0.0]    
                _action_dict[idx] = [idx] + action + [None for _ in range(3)]  # 朴素飞行
                
            else:
                # 僚机
                if general_cfg.algo_name == 'APF':
                    if disable_flag:
                        a = [-0.1, 0.]
                    else:
                        a = agent.compute_action(_states_list)
                    # action = list(np.clip([random.uniform(-1, 1), random.uniform(-1, 1)], *[-1, 1]))
                    
                else:
                    '''Interact & train'''
                    if general_cfg.mode.lower() == 'train':
                        
                        if steps and s is not None and a is not None: # 非reset状态
                            # replay_buffer
                            # print("states: ", np.array(s))
                            # print("actions: ",np.array(a))
                            print("rewards:", r)
                            agent.replay_buffer.add(np.array(s), np.array(a), r, np.array(s_next), dw)
                        
                        if total_steps.value < (10*opt.max_e_steps): 
                            # data warming
                            # a = [0., 0.]
                            a = list(np.clip([random.uniform(-1, 1), random.uniform(-1, 1)], *[-1, 1])) # warm up
                        # elif mode == 'eval':
                        #     # evaluating
                        #     total_scores += r
                        #     a = agent.select_action(np.array(cur_state), deterministic=True)
                        else: 
                            # training
                            a = agent.select_action(np.array(cur_state), deterministic=False)

                        total_steps.value += 1
                        
                        # if done:

                        # RL 的计数
                        '''train if its time'''
                        # train 50 times every 50 steps rather than 1 training per step. Better!
                        if (total_steps.value >= 2*opt.max_e_steps) and (total_steps.value % (opt.update_every) == 0):
                            for j in range(opt.update_every):
                                # print(total_steps.value)
                                print("TRAIN!")
                                agent.train()
                        
                        '''record & log'''
                        # if total_steps.value % opt.eval_interval == 0:
                        #     agent.explore_noise *= opt.explore_noise_decay
                        #     # ep_r = evaluate_policy(eval_env, agent, turns=3)
                        #     if opt.write: writer.add_scalar('ep_r', ep_r, global_step=total_steps)
                        #     print(f'EnvName:{EnvName[opt.EnvIdex]}, Steps: {int(total_steps/1000)}k, Episode Reward:{ep_r}')

                        '''save model'''
                        # if total_steps.value % opt.save_interval == 0:
                        #     agent.save(EnvName[opt.EnvIdex], int(total_steps/1000))
                        
                    elif general_cfg.mode.lower() == 'test':
                        pass
                    
                _action_dict[idx] = [idx] + list(a) # list(np.clip([random.uniform(-1, 1), random.uniform(-1, 1)], *[-1, 1]))
                _action_dict[idx].append(formation_cfg)
            
            last_memory["s"] = cur_state
            if a is not None:
                last_memory['a'] = list(a)
        
        # print("Decision Make!", _action_dict[idx])
        _decide.value = 1
        _update.value = 0
        lock.release()

if __name__ == '__main__':
    
    mp.set_start_method('spawn')

    # 读取初始化配置
    init_cfgs = process_yaml_cfg(path_to_configs='/root/code/workspace/src/MVS/Configs/', filename='InitConfigs.yaml')
    
    veh_params = init_cfgs.vehicle_init_msg['params']
    recv_ports_head = init_cfgs.decider_ports_cfg['recv_ports_head']
    send_port = init_cfgs.decider_ports_cfg['send_port']
    # 读取决策端配置
    decision_cfg = init_cfgs.decision_cfg
    # 通用配置
    formation_cfg = decision_cfg['formation_cfg']
    general_cfg = decision_cfg['general_cfg']
    algo_cfg = decision_cfg['algo_cfg']
    merge_cfg = MergedConfig()
    merge_cfg = merge_class_attrs(merge_cfg, general_cfg)
    merge_cfg = merge_class_attrs(merge_cfg, algo_cfg)
    
    # # 导入决策算法模块
    agent_mod = __import__(f"algos.{merge_cfg.algo_name}.agent", fromlist=['Agent'])
    # print(merge_cfg.device)
    # agent = agent_mod.Agent(merge_cfg)
    # agent = agent_mod.TD3_agent()
    
    parser = argparse.ArgumentParser()
    parser.add_argument('--dvc', type=str, default='cuda', help='running device: cuda or cpu')
    parser.add_argument('--EnvIdex', type=int, default=0, help='PV1, Lch_Cv2, Humanv4, HCv4, BWv3, BWHv3')
    parser.add_argument('--write', type=str2bool, default=False, help='Use SummaryWriter to record the training')
    parser.add_argument('--render', type=str2bool, default=False, help='Render or Not')
    parser.add_argument('--Loadmodel', type=str2bool, default=False, help='Load pretrained model or Not')
    parser.add_argument('--ModelIdex', type=int, default=30, help='which model to load')

    parser.add_argument('--seed', type=int, default=0, help='random seed')
    parser.add_argument('--update_every', type=int, default=50, help='training frequency')
    parser.add_argument('--Max_train_steps', type=int, default=int(5e6), help='Max training steps')
    parser.add_argument('--save_interval', type=int, default=int(1e5), help='Model saving interval, in steps.')
    parser.add_argument('--eval_interval', type=int, default=int(10), help='Model evaluating interval, in steps.')

    parser.add_argument('--delay_freq', type=int, default=1, help='Delayed frequency for Actor and Target Net')
    parser.add_argument('--gamma', type=float, default=0.99, help='Discounted Factor')
    parser.add_argument('--net_width', type=int, default=256, help='Hidden net width, s_dim-400-300-a_dim')
    parser.add_argument('--a_lr', type=float, default=1e-4, help='Learning rate of actor')
    parser.add_argument('--c_lr', type=float, default=1e-4, help='Learning rate of critic')
    parser.add_argument('--batch_size', type=int, default=256, help='batch_size of training')
    parser.add_argument('--explore_noise', type=float, default=0.15, help='exploring noise when interacting')
    parser.add_argument('--explore_noise_decay', type=float, default=0.998, help='Decay rate of explore noise')
    opt = parser.parse_args()
    opt.dvc = torch.device(opt.dvc) # from str to torch.device
    print(opt)
    
    EnvName = ["CFF"]
    opt.state_dim = 16
    opt.action_dim = 2
    opt.max_action = 1.   #remark: action space【-max,max】
    opt.max_e_steps = 1000
    print(f'Env:{EnvName[opt.EnvIdex]}  state_dim:{opt.state_dim}  action_dim:{opt.action_dim}  '
          f'max_a:{opt.max_action}  min_a:{-1.}  max_e_steps:{opt.max_e_steps}')

    # Seed Everything
    env_seed = opt.seed
    np.random.seed(opt.seed)
    torch.manual_seed(opt.seed)
    torch.cuda.manual_seed(opt.seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    print("Random Seed: {}".format(opt.seed))

    # Build SummaryWriter to record training curves
    if opt.write:
        from torch.utils.tensorboard import SummaryWriter
        timenow = str(datetime.now())[0:-10]
        timenow = ' ' + timenow[0:13] + '_' + timenow[-2::]
        writepath = 'runs/{}'.format(EnvName[opt.EnvIdex]) + timenow
        if os.path.exists(writepath): shutil.rmtree(writepath)
        writer = SummaryWriter(log_dir=writepath)
    
    if not os.path.exists('model'): os.mkdir('model')
    agent = agent_mod.TD3_agent(**vars(opt)) # var: transfer argparse to dictionary
    if opt.Loadmodel: agent.load(EnvName[opt.EnvIdex], opt.ModelIdex)
    
    # 数据类型
    mgr = mp.Manager()
    _action_dict = mgr.dict()
    _memory_dict = mgr.dict()
    _total_steps = mp.Value('i', 0)
    
    pcs_recv, pcs_decd = {}, {}
    _states_dict, _updates, _decides, recv_ports = {}, {}, {}, {}
        
    for i, params in enumerate(veh_params):
        # 变量初始化
        idx = int(params[0])

        _states_dict[idx] = mgr.list()
        _memory_dict[idx] = {}
        _updates[idx] = mp.Value('i', 0)
        _decides[idx] = mp.Value('i', 0)
        
        recv_ports[idx] = recv_ports_head + idx
        PORT = recv_ports[idx]
        
        procs = find_procs_by_port(PORT)
        if procs:
            for proc in procs:
                print(f"Found process {proc.pid} occupying port {PORT}. Attempting to kill.")
                try:
                    kill_proc_tree(proc.pid)
                except psutil.NoSuchProcess:
                    pass
        else:
            print(f"No process found occupying port {PORT}.")

        pcs_recv[idx] = mp.Process(target=States_Recv, args=(_states_dict[idx], recv_ports[idx], _updates[idx], ))
        pcs_recv[idx].start()
        
        pcs_decd[idx] = mp.Process(target=Decision_Make, args=(_states_dict[idx], _action_dict, _updates[idx], _decides[idx], decision_cfg, _total_steps, opt, agent))
        pcs_decd[idx].start()
    
    pcs_send = mp.Process(target=Action_Feedback, args=(_action_dict, send_port, _decides, _total_steps, opt, agent))
    pcs_send.start()
        
    for id, pc in pcs_recv.items():
        pc.join()
    
    for id, pc in pcs_decd.items():
        pc.join()
        
    pcs_send.join()