from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import random

import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from scipy import stats

import datetime

from absl import logging
import logging

from logging.handlers import RotatingFileHandler

Transition = collections.namedtuple(
    "Transition", "info_state action reward discount legal_actions_mask")


class MLPTorso(nn.Module):
    """A specialized half-MLP module when constructing multiple heads.

    Note that every layer includes a ReLU non-linearity activation.
    """

    def __init__(self, input_size, hidden_sizes):
        """Create the MLPTorso.

        Args:
          input_size: (int) number of inputs
          hidden_sizes: (list) sizes (number of units) of each hidden layer
        """

        super(MLPTorso, self).__init__()
        self._layers = []
        # Hidden layers
        for size in hidden_sizes:
            self._layers.append(SonnetLinear(in_size=input_size, out_size=size))
            input_size = size

        self.model = nn.ModuleList(self._layers)

    def forward(self, x):
        for layer in self.model:
            x = layer(x)
        return x


class SonnetLinear(nn.Module):
    """A Sonnet linear module.

    Always includes biases and only supports ReLU activations.
    """

    def __init__(self, in_size, out_size, activate_relu=True):
        """Creates a Sonnet linear layer.

        Args:
          in_size: (int) number of inputs
          out_size: (int) number of outputs
          activate_relu: (bool) whether to include a ReLU activation layer
        """
        super(SonnetLinear, self).__init__()
        self._activate_relu = activate_relu
        stddev = 1.0 / math.sqrt(in_size)
        mean = 0
        lower = (-2 * stddev - mean) / stddev
        upper = (2 * stddev - mean) / stddev
        # Weight initialization inspired by Sonnet's Linear layer,
        # which cites https://arxiv.org/abs/1502.03167v3
        # pytorch default: initialized from
        # uniform(-sqrt(1/in_features), sqrt(1/in_features))
        self._weight = nn.Parameter(
            torch.tensor(stats.truncnorm.rvs(lower, upper, loc=mean, scale=stddev, size=[out_size, in_size]),
                         dtype=torch.float))
        self._bias = nn.Parameter(torch.zeros([out_size]))

    def forward(self, tensor):
        y = F.linear(tensor, self._weight, self._bias)
        return F.relu(y) if self._activate_relu else y


class BufferForNet:
    def __init__(self, n):
        self.n = n
        self.trajectories1 = []
        self.trajectories2 = []
        self.rewards = []

        # 记录一条轨迹
        self.info_state = []
        self.la1 = []
        self.la2 = []
        self.a1 = []
        self.a2 = []
        self.probs1 = []
        self.probs2 = []

    def _append(self, reward):
        self.trajectories1.append(
            dict(info_state=self.info_state, legal_actions=self.la1, actions=self.a1, probs=self.probs1)
        )
        self.trajectories2.append(
            dict(info_state=self.info_state, legal_actions=self.la2, actions=self.a2, probs=self.probs2)
        )

        self.info_state = []
        self.la1 = []
        self.la2 = []
        self.a1 = []
        self.a2 = []
        self.probs1 = []
        self.probs2 = []

    def push(self, time_step=None, agents_output=None, reward=None):
        """单步信息"""
        if time_step is not None and agents_output is not None:
            # 观察值（状态）
            info_state = time_step.observations["info_state"][0]

            o = np.where(np.array(info_state) == 1)[0]
            o = o - np.array([0, self.n + 1, (self.n + 1) * 2])

            # 合法动作
            la1 = time_step.observations["legal_actions"][0]
            la2 = time_step.observations["legal_actions"][1]
            # 动作
            a1 = agents_output[0].action
            a2 = agents_output[1].action
            # 动作概率
            probs1 = [round(i, 2) for i in agents_output[0].probs.tolist()]
            probs2 = [round(i, 2) for i in agents_output[1].probs.tolist()]

            self.info_state.append(info_state)
            self.la1.append(la1)
            self.la2.append(la2)
            self.a1.append(a1)
            self.a2.append(a2)
            self.probs1.append(probs1)
            self.probs2.append(probs2)

        if reward is not None:
            self.rewards.append(reward)
            # 奖励只在轨迹最后添加，同时更新轨迹信息
            self._append(reward)

    def initial(self):
        self.trajectories1 = []
        self.trajectories2 = []

        self.rewards = []
        # 记录一条轨迹
        self.info_state = []
        self.la1 = []
        self.la2 = []
        self.a1 = []
        self.a2 = []
        self.probs1 = []
        self.probs2 = []

    def get_trajectories(self, player_id):
        return self.trajectories1 if player_id == 0 else self.trajectories2

    def get_rewards(self):
        return self.rewards

    def smaple_for_display(self):
        """每次更新前（上一次更新后的策略）的一条采样轨迹的策略， 即是probs"""
        # 随机获取玩家1和2的同一条轨迹
        index = random.sample(range(len(self.trajectories1)), 1)[0]  # 拿索引
        traj1 = self.trajectories1[index]
        traj2 = self.trajectories2[index]
        rewards = self.rewards[index]
        obs = self.info_state_to_obs(traj1["info_state"])
        act1 = traj1["actions"]
        act2 = traj2["actions"]
        probs1 = traj1["probs"]
        probs2 = traj2["probs"]

        data = dict(obs=obs, act1=act1, act2=act2, probs1=probs1, probs2=probs2, rewards=rewards)
        new_data = {}
        for k in data:
            new_data[k] = str(data[k])
        return new_data

    def __len__(self):
        return len(self.rewards)

    def info_state_to_obs(self, info_states):
        obs = []
        for info_state in info_states:
            o = np.where(np.array(info_state) == 1)[0]
            o = o - np.array([0, self.n + 1, (self.n + 1) * 2])
            obs.append(tuple(o))
        return obs


def get_time():
    return datetime.datetime.now().strftime('%d%H%M%S')


def get_logger(name=__name__, log_dir="./log.log", to_stream=False, to_file=True):
    """
    一个actor一个logger，从最头（trainer,程序的起始入口）开始传入，
    通过设置debug值可实现是否同时也打印到“屏幕”
    logger作为参数传入actorloop,作为其属性，再在actorloop中设置为player、model、coordinator的属性
    """
    logger = logging.getLogger(name)

    logger.setLevel(logging.DEBUG)
    formatter = logging.Formatter('%(message)s')  # %(name)s:
    if not logger.handlers:  # 防止重复创建，重复打印
        if to_file:
            fh = RotatingFileHandler(filename=log_dir, maxBytes=5 * 1024 * 1024, backupCount=20, encoding='utf-8')
            fh.setLevel(logging.INFO)
            fh.setFormatter(formatter)
            logger.addHandler(fh)
            logger.propagate = False
        if to_stream:
            ch = logging.StreamHandler()
            ch.setLevel(logging.DEBUG)
            ch.setFormatter(formatter)
            logger.addHandler(ch)
    return logger


def get_sub_paths(path):
    """
    :param path: 检查点保存文件的父目录
    :return:
    """
    if not os.path.exists(path):
        raise Exception("训练文件路径不存在")
    filenames = os.listdir(path)
    filenames = sorted([int(i) for i in filenames])

    checkpoints_path = [os.path.join(path, str(i)) for i in filenames]
    return checkpoints_path
