from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import json
import os

from absl import app
import rl_environment
from reinforce import Reinforce
from utils import BufferForNet, get_logger, get_time


class Trainer:
    def __init__(self, **kwargs):
        self.n = kwargs["n"]
        self.game = kwargs["game"]
        self.game_setting = kwargs["game_setting"]
        self.max_update_times = kwargs["max_update_times"]
        self.debug = kwargs["debug"]
        self.is_extra_grad_method = kwargs["is_extra_grad_method"]
        self.sample_eps = kwargs["sample_eps"]
        self.train_dir = kwargs["train_dir"]
        self.save_interval = kwargs["save_interval"]
        self.is_part_train = kwargs.get("is_part_train", False)
        self.checkpoint = kwargs.get("checkpoint", None)  # 评估才有

        self.to_decay = kwargs["to_decay"]
        self.learn_rate_decay_interval = kwargs["learn_rate_decay_interval"]
        self.learn_rate = kwargs["max_learn_rate"]
        self.max_learn_rate = kwargs["max_learn_rate"]
        self.min_learn_rate = kwargs["min_learn_rate"]
        self.max_update_times = kwargs["max_update_times"]
        self.learn_rate_decay = (self.max_learn_rate - self.min_learn_rate) / self.max_update_times

        self.save_path = f"train_info/{self.train_dir}/checkpoints/"  # 训练时用
        self.json_path = f"train_info/{self.train_dir}/json/"
        if not os.path.exists(self.json_path):
            os.makedirs(self.json_path)

        self.env = rl_environment.Environment(self.game, **self.game_setting)
        self.info_state_size = self.env.observation_spec()["info_state"][0]
        self.num_actions = self.env.action_spec()["num_actions"]

        self.agents = [
            Reinforce(player_id=idx,
                      info_state_size=self.info_state_size,
                      num_actions=self.num_actions,
                      **kwargs) for idx in range(2)
        ]

        self.buffer = BufferForNet(self.n)

        path = f"train_info/{self.train_dir}/log/"
        if not os.path.exists(path):
            os.makedirs(path)
        # 记录更新过程中输赢情况，记录玩家1
        # 多次训练时，防止重名
        self.logger = get_logger(name=f"train_{get_time()}", log_dir=path + "train.log",
                                 to_stream=True)

    def run(self):
        try:
            update_counter = 0
            while update_counter < self.max_update_times:
                # time_step 命名元组 可以通过time_step.observations来访问数据，或time_step[0]
                time_step = self.env.reset()
                while not time_step.last():
                    agents_output = [agent.step(time_step) for agent in self.agents]
                    action_list = [agent_output.action for agent_output in agents_output]

                    self.buffer.push(time_step, agents_output)

                    time_step = self.env.step(action_list)

                for agent in self.agents:
                    agent.step(time_step)
                self.buffer.push(reward=time_step.rewards)  # 同时更新轨迹数据

                # 检查更新
                if len(self.buffer) >= self.sample_eps:
                    for i, agent in enumerate(self.agents):
                        loss = agent.update(self.buffer.get_rewards(), self.buffer.get_trajectories(i))

                        if agent.check_update_over():
                            update_counter = agent.get_counter()
                            self.stats(i, update_counter, loss)  # 日志
                            # 检查保存
                            if update_counter % self.save_interval == 0:
                                agent.save()
                                if i == 0:
                                    infos = self.buffer.smaple_for_display()
                                    with open(f'{self.json_path}/{update_counter}.json', 'w') as fp:
                                        json.dump(infos, fp, indent=4)

                            # 每次完整更新后，检查是否满足学习率下降条件
                            if self.to_decay and update_counter % self.learn_rate_decay_interval == 0:
                                self.learn_rate = self.max_learn_rate - self.learn_rate_decay * update_counter

                        if self.is_extra_grad_method and self.to_decay:
                            agent.set_new_lr(self.learn_rate)  # set_new_lr 只有eg有

                    self.buffer.initial()

        finally:
            if not self.debug:
                for agent in self.agents:
                    agent.save()

    def get_checkpoint_paths(self):
        # 得到的检查点的目录，该目录下有两个pkg文件，玩家1存，玩家2不存；
        return list(set(self.agents[0].get_checkpoint_paths()))

    def stats(self, player_id, update_counter, loss):
        if player_id != 0:
            return
        win, draw, lose = 0, 0, 0
        for rew in self.buffer.get_rewards():  # 训练时，使用玩家1的奖励
            r = rew[0]
            if r == 1:
                win += 1
            elif r == -1:
                lose += 1
            else:
                draw += 1
        self.logger.info("更新次数: %s: win: %s lose: %s draw: %s | loss: %s",
                         update_counter, win, lose, draw, loss)
