import os

import numpy as np
from spiel_env import Env
from utils import Buffer, softmax_grad, gen_softmax, ModelSaver, get_logger


class Evaluator(object):
    def __init__(self, **kwargs):
        self.checkpoint_dir = kwargs["checkpoint_dir"]
        self.checkpoint = kwargs["checkpoint"]
        self.lock = kwargs["lock"]
        self.player_id = kwargs["current_player_id"]
        self.train_dir = kwargs["train_dir"]
        self.debug = kwargs["debug"]
        self.n = kwargs["n"]
        self.k = kwargs["k"]
        self.max_eval_eps = kwargs["max_eval_eps"]
        self.learn_rate = kwargs["max_learn_rate"]
        self.learn_eps = kwargs["learn_eps"]
        self.max_update_times = kwargs["max_update_times"]
        self.max_eval_eps = kwargs["max_eval_eps"]

        self.env = Env(self.n, self.k)
        self.saver = ModelSaver(lock=self.lock)  # 用于加载权重
        self.buffer = Buffer()
        self.policy1, self.policy2 = self.saver.load(self.checkpoint_dir)
        if self.player_id == 0:
            self.policy = self.policy1  # 引用类型
        else:
            self.policy = self.policy2

        log_dir = f"train_info/{self.train_dir}/log/"
        # 多组评估，需要logger的名字不同, 但是同组的要相同， 不能加时间戳
        self.nash_logger = get_logger(name=f"nash_conv_{self.train_dir}", log_dir=log_dir + "nash_conv.log")
        self.log_dir = log_dir + "trajs/"

        if not os.path.exists(self.log_dir):
            os.makedirs(self.log_dir)

        name = f"{self.checkpoint}_{self.player_id + 1}"
        self.eval_logger = get_logger(name=name + f"_{self.train_dir}", log_dir=f"{self.log_dir}{name}.log",
                                      to_stream=self.debug)

        self.update_counter = 0  # 更新次数

    def evaluate(self, policy1, policy2):
        """ r1, r2 is the average of reward"""
        eps = 0
        rewards = []
        while eps < self.max_eval_eps:
            obs, done = self.env.init()
            while not done:
                actions = self.get_actions(obs, policy1, policy2)
                obs, done = self.env.step(actions)

            rewards.append(self.env.get_reward()[self.player_id])
            eps += 1

        return np.mean(rewards)

    def evaluate1(self, policy1, policy2, max_evaluate_eps=None):
        """ r1, r2 is the average of reward"""
        eps = 0
        rewards = []
        if max_evaluate_eps is None:
            max_evaluate_eps = self.max_eval_eps
        while eps < max_evaluate_eps:
            obs, done = self.env.init()
            while not done:
                actions = self.get_actions(obs, policy1, policy2)
                obs, done = self.env.step(actions)

            rewards.append(self.env.get_reward()[self.player_id])
            eps += 1

        return np.sum(rewards) / len(rewards), np.mean(rewards), np.var(rewards)

    def get_actions(self, obs, policy1, policy2):
        softmax1 = gen_softmax(policy1[obs])
        action1 = np.random.choice(range(len(softmax1)), p=softmax1)  # 动作0合法， action即index

        softmax2 = gen_softmax(policy2[obs])
        action2 = np.random.choice(range(len(softmax2)), p=softmax2)

        return action1, action2

    def get_loss(self):

        def cal(thetas, a, r):
            if r == 0:
                return np.zeros_like(thetas)
            softmax = gen_softmax(thetas)
            grad = softmax_grad(softmax)[a]
            p = softmax[a]
            return grad / p * r

        grad_map = {}

        rewards = self.buffer.get_rewards()
        for i, trajectory in enumerate(self.buffer.get_trajectories()):
            rew = rewards[i]
            for obs, act in trajectory:
                if obs in grad_map:
                    grad_map[obs] += cal(self.policy[obs], act[self.player_id], rew[self.player_id])  # 评估时使用各自的奖励
                else:
                    grad_map[obs] = cal(self.policy[obs], act[self.player_id], rew[self.player_id])

                    # 轨迹内是相加，轨迹间也是先相加，求平均放在最后一步
        for obs in grad_map:
            grad_map[obs] /= len(self.buffer)

        return grad_map

    def update_policy(self):

        grad_map = self.get_loss()

        # 更新 theta softmax
        for obs in grad_map:
            self.policy[obs] += self.learn_rate * grad_map[obs]

    def stats(self):
        draw = 0
        win = 0
        lose = 0
        for rew in self.buffer.get_rewards():
            r = rew[self.player_id]
            if r == 0:
                draw += 1
            elif r == 1:
                win += 1
            elif r == -1:
                lose += 1
        self.eval_logger.info("检查点: %s 玩家%s 更新次数: %s  win: %s lose: %s draw: %s",
                              self.checkpoint, self.player_id + 1, self.update_counter, win, lose, draw)
        return win / len(self.buffer)

    def part_train(self):
        while 1:
            obs, done = self.env.init()
            while not done:
                actions = self.get_actions(obs, self.policy1, self.policy2)

                next_obs, done = self.env.step(actions)
                self.buffer.push(pair=(obs, actions))  # 保存一条轨迹中的（s,a）
                obs = next_obs
            self.buffer.push(reward=self.env.get_reward())
            self.buffer.append()

            if len(self.buffer) % self.learn_eps == 0:
                self.update_policy()
                self.update_counter += 1
                win_rate = self.stats()
                self.buffer.initial()

                if self.update_counter > self.max_update_times or win_rate > 0.9:
                    break

    def run(self):
        sr = self.evaluate(self.policy1, self.policy2)

        # if self.lock is not None:
        #     self.lock.acquire()
        # self.nash_logger.info("检查点: %s  玩家%s sr: %s  ", self.checkpoint, self.player_id + 1, sr)
        # if self.lock is not None:
        #     self.lock.release()

        self.part_train()
        er = self.evaluate(self.policy1, self.policy2)
        if self.lock is not None:
            self.lock.acquire()
        self.nash_logger.info("检查点: %s  玩家%s er: %s", self.checkpoint, self.player_id + 1, er)
        if self.lock is not None:
            self.lock.release()
