# 运行一个周期，返回reward，啥的。看具体的情况。
"""Actor to generate trajactories"""

import torch

import hfo
import torch.multiprocessing as mp
from hfo_game import SelectAction, HFOGameState, StartHFOServer, GetAction, GetRandomHFOAction
from model import Net
from utils import v_wrap, record, push_and_pull
import logging, random
module_logger = logging.getLogger("mainModule.actor")

class Actor(mp.Process):
    def __init__(self, gnet, opt, global_ep, global_ep_r, res_queue, name, args):
        super(Actor, self).__init__()
        self.args = args
        self.name = 'w%i' % name
        self.g_ep, self.g_ep_r, self.res_queue = global_ep, global_ep_r, res_queue
        # gnet 是全局网络， lent是学习网络
        self.gnet, self.opt = gnet, opt
        self.lnet = Net()           # local network
        self.lnet.load_state_dict(gnet.state_dict())
        self.port = 6000 + name * 1000
        StartHFOServer(str(self.port), '1', '0', '0', '0', args.HFODir)

    def run(self):
        '''
        主要的工作步骤，负责每一个演员去进行动作，然后给global以反馈
        多个演员异步训练，定期同步更新参数
        :return:
        '''
        self.hfo_env = hfo.HFOEnvironment()
        self.hfo_env.connectToServer(hfo.LOW_LEVEL_FEATURE_SET,
                      self.args.HFODir + '/bin/teams/base/config/formations-dt', self.port,
                      'localhost', 'base_left', False)
        self.game = HFOGameState(self.hfo_env.getUnum())
        # hfogamestate 本身没有环境的数据，都是依靠hfo-env提供
        total_step = 1
        while self.g_ep.value < self.args.MAX_EP:
            s = self.hfo_env.getState()
            # s is state features
            buffer_s, buffer_at, buffer_ap, buffer_r = [], [], [], []
            # to store state , action,action_para, reword
            ep_r = 0.
            # this episode the sum of reward
            while True:

                # 随机获取动作：action(at), action_param(ap)
                if random.random() < self.args.epsilon:
                    at, ap = GetRandomHFOAction()
                    # print("随机动作")
         
                    
                else:
                    at, ap = self.lnet.choose_action(v_wrap(s[None, :]))
                    # s[None, :] 相当于多增加一个维度
                    # state.shape : [59,]--->[1,59]
                    
                # 随机获取动作

                module_logger.debug(at)

                action = GetAction(at,ap)
                #if action[0]==0:
                    #print("dash")
                if action[0] == hfo.TURN or action[0] == hfo.TACKLE:
                    #print("TURN,TACKLE")
                    self.hfo_env.act(action[0], float(action[1]))
                else:
                    #print("others")
                    self.hfo_env.act(action[0], float(action[1]), float(action[2]))
                 
                s_ = self.hfo_env.getState()
                # 因为做了动作，所以环境更新一次
                self.game.update(self.hfo_env)
                # hfogame类也更新一次(是因为环境更新了，他需要手动更新，并不是更新了两次)
                r = self.game.reward()
                module_logger.debug("reward"+ str(r))
                
                done = self.game.episode_over
                if done: r = -1

                ep_r += r
                buffer_at.append(at)
                buffer_ap.append(ap)
                buffer_s.append(s)
                buffer_r.append(r)

                if total_step % self.args.UPDATE_GLOBAL_ITER == 0 or done:  
                    # update global and assign to local net
                    # print("total_step ",total_step)
                    push_and_pull(self.opt, self.lnet, self.gnet, done, s_, buffer_s, buffer_at, buffer_ap, buffer_r, self.args.GAMMA)
                    buffer_s, buffer_at, buffer_r = [], [], []

                    if done:  # done and print information
                        record(self.g_ep, self.g_ep_r, ep_r, self.res_queue, self.name)
                        break
                s = s_
                total_step += 1
        self.res_queue.put(None)


