import copy
import datetime
import json
import os
import numpy as np
from spiel_env import Env
from utils import gen_state_policy_pair, Buffer, softmax_grad, gen_softmax, ModelSaver, get_logger


class Trainer(object):
    def __init__(self, **kwargs):
        self.learn_eps = kwargs["learn_eps"]  # 每次更新的采样局数
        self.is_extra_grad_method = kwargs["is_extra_grad_method"]
        self.learn_rate = kwargs["max_learn_rate"]
        self.debug = kwargs["debug"]
        self.train_dir = kwargs["train_dir"]
        self.save_interval = kwargs["save_interval"]  # 每次保存的更新次数间隔
        self.game_setting = kwargs["game_setting"]
        self.max_update_times = kwargs["max_update_times"]  # 最大更新次数
        self.max_learn_rate = kwargs["max_learn_rate"]
        self.min_learn_rate = kwargs["min_learn_rate"]
        self.learn_rate_decay_interval = kwargs["learn_rate_decay_interval"]
        self.to_decay = kwargs["to_decay"]  # 是否启动学习率衰减
        self.n = kwargs["n"]
        self.k = kwargs["k"]

        self.learn_rate_decay = (self.max_learn_rate - self.min_learn_rate) / self.max_update_times

        self.env = Env(self.n, self.k)
        self.policy1, self.policy2 = gen_state_policy_pair(self.n, self.k)

        log_dir = f"train_info/{self.train_dir}/log/"
        checkpoint_dir = f"train_info/{self.train_dir}/checkpoints/"
        if not os.path.exists(log_dir):
            os.makedirs(log_dir)
        if not os.path.exists(checkpoint_dir):
            os.makedirs(checkpoint_dir)

        self.logger = get_logger(name=self.train_dir, log_dir=f"{log_dir}reward.log", to_stream=False)
        self.saver = ModelSaver(default_path=checkpoint_dir)
        self.buffer = Buffer()

        self.first_policy1 = None
        self.first_policy2 = None
        self.is_update_over = True

    def get_action(self, obs):

        softmax1 = gen_softmax(self.policy1[obs])
        action1 = np.random.choice(range(len(softmax1)), p=softmax1)  # 动作0合法， action即index

        softmax2 = gen_softmax(self.policy2[obs])
        action2 = np.random.choice(range(len(softmax2)), p=softmax2)

        return action1, action2

    def cal_grad(self):
        grad_map1 = {}  # 玩家1的梯度
        grad_map2 = {}
        rewards = self.buffer.get_rewards()

        def cal(thetas, a, r):
            if r == 0:
                return np.zeros_like(thetas)
            softmax = gen_softmax(thetas)
            grad = softmax_grad(softmax)[a]
            p = softmax[a]
            return grad / p * r

        for i, trajectory in enumerate(self.buffer.get_trajectories()):
            rew = rewards[i]

            for obs, act in trajectory:
                # 当0为合法动作时，轨迹内会出现状态重复的情况
                if obs in grad_map1:
                    grad_map1[obs] += cal(self.policy1[obs], act[0], rew[0])
                    grad_map2[obs] += cal(self.policy2[obs], act[1], rew[0])  # 训练使用同一个奖励
                else:
                    grad_map1[obs] = cal(self.policy1[obs], act[0], rew[0])
                    grad_map2[obs] = cal(self.policy2[obs], act[1], rew[0])  # 训练使用同一个奖励

        # 轨迹内是相加，轨迹间也是先相加，求平均放在最后一步
        for obs in grad_map1:
            grad_map1[obs] /= len(self.buffer)
            grad_map2[obs] /= len(self.buffer)

        return grad_map1, grad_map2

    def update_policy(self):
        grad_map1, grad_map2 = self.cal_grad()

        # 更新 theta
        if self.is_extra_grad_method:
            if self.is_update_over:
                self.first_policy1 = copy.deepcopy(self.policy1)
                self.first_policy2 = copy.deepcopy(self.policy2)
                learn_rate = self.learn_rate
            else:
                self.policy1 = copy.deepcopy(self.first_policy1)
                self.policy2 = copy.deepcopy(self.first_policy2)
                learn_rate = 0.5 * self.learn_rate

            self.is_update_over = not self.is_update_over

        else:
            learn_rate = self.learn_rate

        for obs in grad_map1:  # policy1 和 policy2的key是一样的，都是所有可能的状态
            self.policy1[obs] += learn_rate * grad_map1[obs]  # 玩家1梯度上升，使用玩家1的奖励，玩家以必须max
            self.policy2[obs] -= learn_rate * grad_map2[obs]  # 玩家2梯度下降

    def stats(self, update_times):
        draw = 0
        win = 0
        lose = 0
        for r, _ in self.buffer.get_rewards():
            if r == 0:
                draw += 1
            elif r == 1:
                win += 1
            elif r == -1:
                lose += 1

        self.logger.info("更新次数: %s: win: %s lose: %s draw: %s",
                         update_times, win, lose, draw)

    def get_checkpoint_paths(self):
        return self.saver.get_checkpoints()

    def run(self):
        update_times = 0
        try:
            while update_times < self.max_update_times:
                obs, done = self.env.init()
                while not done:
                    actions = self.get_action(obs)  # 动作0合法，动作值即动作索引

                    next_obs, done = self.env.step(actions)
                    self.buffer.push(pair=(obs, actions))  # 保存一条轨迹中的（s,a）
                    obs = next_obs
                self.buffer.push(reward=self.env.get_reward())
                self.buffer.append()

                # 检查是否满足更新条件
                if len(self.buffer) % self.learn_eps == 0:
                    self.update_policy()

                    # 最小最大 self.is_update_over 一直是TRUE
                    if self.is_update_over:
                        update_times += 1

                        self.stats(update_times)

                        # 每次完整更新后，检查保存条件
                        if update_times % self.save_interval == 0:
                            self.saver.save(self.policy1, self.policy2, update_times)

                        # 每次完整更新后，检查是否满足学习率下降条件
                        if self.to_decay and update_times % self.learn_rate_decay_interval == 0:
                            self.learn_rate = self.max_learn_rate - self.learn_rate_decay * update_times

                    self.buffer.initial()
        except Exception as e:
            print(e)

        finally:
            if self.is_update_over:
                self.saver.save(self.policy1, self.policy2, update_times)
            self.logger.info("结束时间：%s", datetime.datetime.now().strftime("%m-%d-%H_%M"))

            # 记录状态动作访问次数
            stat_path = f"train_info/{self.train_dir}/stats.json"
            with open(stat_path, 'w') as fp:
                json.dump(self.buffer.get_stats(), fp, indent=4)
