import copy
import os, re
from absl import app
import numpy as np

from reinforce import Reinforce
from rl_environment import Environment
from utils import get_logger, BufferForNet


class Evaluator(object):
    def __init__(self, **kwargs):
        self.n = kwargs["n"]
        self.checkpoint_dir = kwargs["checkpoint_dir"]
        self.checkpoint = kwargs["checkpoint"]
        self.lock = kwargs["lock"]
        self.current_player_id = kwargs["current_player_id"]
        self.train_dir = kwargs["train_dir"]
        self.debug = kwargs["debug"]
        self.game = kwargs["game"]
        self.game_setting = kwargs["game_setting"]
        self.max_update_times = kwargs["max_update_times"]
        self.max_eval_eps = kwargs["max_eval_eps"]
        self.sample_eps = kwargs["sample_eps"]

        self.env = Environment(self.game, **self.game_setting)
        self.info_state_size = self.env.observation_spec()["info_state"][0]
        self.num_actions = self.env.action_spec()["num_actions"]

        self.agents = [
            Reinforce(player_id=idx,
                      info_state_size=self.info_state_size,
                      num_actions=self.num_actions,
                      **kwargs) for idx in range(2)
        ]
        # 加载检查点权重
        for index, agent in enumerate(self.agents):
            checkpoint_path = self.checkpoint_dir + f"/{index}.pkg"
            agent.load_checkpoint(checkpoint_path)

        # 同组相同，不同组不同
        self.nash_logger = get_logger(name=f"nash_conv_{self.train_dir}",
                                      log_dir=f"train_info/{self.train_dir}/log/nash_conv.log")

        path = f"train_info/{self.train_dir}/log/trajs/"
        if not os.path.exists(path):
            os.makedirs(path)
        # 每个评估器都应该不同
        name = f"{self.checkpoint}_{self.current_player_id + 1}"
        self.logger = get_logger(name=f"{name}_{self.train_dir}", log_dir=f"{path}{name}.log", to_stream=self.debug)
        self.buffer = BufferForNet(self.n)

    def evaluate(self):
        eps = 0
        r = []
        while eps < self.max_eval_eps:
            time_step = self.sample_one_eps(is_evaluation=True)
            eps += 1
            r.append(time_step.rewards[self.current_player_id])

        return np.mean(r)

    def sample_one_eps(self, is_evaluation=False):
        # 这个评估指的是仅仅采样
        time_step = self.env.reset()
        while not time_step.last():
            agents_output = [agent.step(time_step) for agent in self.agents]
            action_list = [agent_output.action for agent_output in agents_output]

            if not is_evaluation:
                self.buffer.push(time_step, agents_output)

            time_step = self.env.step(action_list)
        # Episode is over, step all agents with final info state.
        for agent in self.agents:
            agent.step(time_step)

        if not is_evaluation:
            self.buffer.push(reward=time_step.rewards)  # 同时更新轨迹数据

        return time_step

    def part_train(self):

        while 1:
            self.sample_one_eps()

            # 检查更新
            if len(self.buffer) >= self.sample_eps:
                agent = self.agents[self.current_player_id]
                agent.update(self.buffer.get_rewards(), self.buffer.get_trajectories(self.current_player_id))

                update_counter = agent.get_counter()
                win_rate = self.stats(self.current_player_id, update_counter)  # 日志
                self.buffer.initial()

                if update_counter > self.max_update_times or win_rate > 0.98:
                    break

    def run(self):

        self.part_train()
        er = self.evaluate()
        if self.lock is not None:
            self.lock.acquire()
        self.nash_logger.info("检查点: %s  玩家%s: %s", self.checkpoint, self.current_player_id + 1, er)
        if self.lock is not None:
            self.lock.release()

    def stats(self, player_id, update_counter):

        win, draw, lose = 0, 0, 0
        for rew in self.buffer.get_rewards():  # 训练时，使用玩家1的奖励
            r = rew[player_id]
            if r == 1:
                win += 1
            elif r == -1:
                lose += 1
            else:
                draw += 1
        self.logger.info("更新次数: %s: win: %s lose: %s draw: %s",
                         update_counter, win, lose, draw)
        return win / len(self.buffer)
