from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os
import shutil

import rl_agent
from utils import *


class BaseAlgo(rl_agent.AbstractAgent):

    def __init__(self, player_id, num_actions, info_state_size, **kwargs):
        super().__init__(player_id, **kwargs)

        self.player_id = player_id
        self.num_actions = num_actions
        self.info_state_size = info_state_size
        self.hidden_layers_sizes = kwargs["hidden_layers_sizes"]
        self.is_extra_grad_method = kwargs["is_extra_grad_method"]
        self.learn_rate = kwargs["max_learn_rate"]
        self.debug = kwargs["debug"]
        self.train_dir = kwargs["train_dir"]
        self.save_path = f"train_info/{self.train_dir}/checkpoints/"  # 训练时用

        self.update_counter = 0
        self.is_update_over = True
        # self.device = "cuda:1" if torch.cuda.is_available() else "cpu"
        self.device = "cpu"
        self.checkpoint_paths = []

        self.pi_network = None
        self.pi_optimizer = None

    def act(self, info_state, legal_actions):
        info_state = torch.tensor(np.reshape(info_state, [1, -1]), dtype=torch.float).to(self.device)
        policy_logits = self.pi_network(info_state)

        policy_probs = F.softmax(policy_logits.cpu(), dim=1).detach()

        # Remove illegal actions, re-normalize probs
        # probs = np.zeros(self.num_actions)
        probs = np.zeros(len(legal_actions))

        probs[legal_actions] = policy_probs[0][legal_actions]
        if sum(probs) != 0:
            probs /= sum(probs)
        else:
            probs[legal_actions] = 1 / len(legal_actions)

        action = np.random.choice(len(probs), p=probs)

        return action, probs

    def step(self, time_step, is_evaluation=False):
        pass

    def save(self):
        parameters = self.pi_network.state_dict()
        path = self.save_path + f"{self.update_counter}/"
        if not os.path.exists(path):
            os.makedirs(path)

        torch.save(parameters, path + f"{self.player_id}.pkg")
        if self.player_id == 0:
            self.checkpoint_paths.append(path)

    def get_is_update_over(self):
        return self.is_update_over

    def get_update_counter(self):
        return self.update_counter

    def get_checkpoint_paths(self):
        return self.checkpoint_paths

    def load_checkpoint(self, path):

        self.pi_network.load_state_dict(torch.load(path))

    def check_update_over(self):
        return self.is_update_over

    def get_counter(self):
        return self.update_counter

    def set_new_lr(self, lr):
        if self.is_extra_grad_method:
            self.pi_optimizer.set_new_lr(lr)
        else:
            raise ValueError("没有衰减功能")
