import copy
import datetime
import json
import logging
import os
import pickle
from logging.handlers import RotatingFileHandler

import numpy as np


def softmax_grad(softmax):
    s = softmax.reshape(-1, 1)
    return np.diagflat(s) - np.dot(s, s.T)


def gen_softmax(ll):
    if not isinstance(ll, list):
        ll = list(ll)
    e_x = np.exp(ll)

    return e_x / e_x.sum()


def gen_state_policy_pair(n, k):
    policy1 = dict()
    policy2 = dict()

    for i in reversed(range(n + 1)):  # player1
        for j in reversed(range(n + 1)):  # player2
            for p in range(2 * k + 3):  # position
                # # 动作0也放进去，至于min_bid的问题在求softmax时，再利用min_bid来截断
                # # 动作值从0开始的好处就是动作值和动作的索引是一样的，方便处理
                # # np.random.rand(i+1) 0到1的均匀分布
                # # 均值，方差，形状 np.random.normal(0, 1, i+1)
                # policy1[(i, j, p)] = np.random.normal(0, 1, i + 1)
                # policy2[(i, j, p)] = np.random.normal(0, 1, j + 1)

                policy1[(i, j, p)] = np.zeros(i + 1)
                policy2[(i, j, p)] = np.zeros(j + 1)

    return policy1, policy2


def get_time():
    return datetime.datetime.now().strftime('%d%H%M%S')


def get_logger(name=__name__, log_dir="./log.log", to_stream=False, to_file=True):
    """
    一个actor一个logger，从最头（trainer,程序的起始入口）开始传入，
    通过设置debug值可实现是否同时也打印到“屏幕”
    logger作为参数传入actorloop,作为其属性，再在actorloop中设置为player、model、coordinator的属性
    """
    logger = logging.getLogger(name)

    logger.setLevel(logging.DEBUG)
    formatter = logging.Formatter('%(message)s')  # %(name)s:
    if not logger.handlers:  # 防止重复创建，重复打印
        if to_file:
            fh = RotatingFileHandler(filename=log_dir, maxBytes=5 * 1024 * 1024, backupCount=20, encoding='utf-8')
            fh.setLevel(logging.INFO)
            fh.setFormatter(formatter)
            logger.addHandler(fh)
            logger.propagate = False
        if to_stream:
            ch = logging.StreamHandler()
            ch.setLevel(logging.DEBUG)
            ch.setFormatter(formatter)
            logger.addHandler(ch)
    return logger


def get_checkpoints_path(path):
    if not os.path.exists(path):
        raise Exception("训练文件路径不存在")
    checkpoints_path = []
    filenames = os.listdir(path)
    fn_dict = {}
    for fn in filenames:
        if fn != "terminal.pkg":
            fn_dict[int(fn.split(".")[0])] = fn
    for key in sorted(fn_dict.keys()):
        checkpoints_path.append(path + str(fn_dict[key]))
    return checkpoints_path


class Buffer(object):
    """on policy 用于N步td更新的"""

    def __init__(self):
        self.sa_pairs = []
        self.trajectories = []
        self.rewards = []

        self.stats = dict()

    def append(self):
        self.trajectories.append(self.sa_pairs)
        self.sa_pairs = []

    def push(self, pair=None, reward=None):
        if pair is not None:
            self.sa_pairs.append(pair)
            obs = str(pair[0])
            act = str(pair[1])
            if obs not in self.stats:
                self.stats[obs] = dict()
                self.stats[obs][act] = 1
            else:
                if act in self.stats[obs]:
                    self.stats[obs][act] += 1
                else:
                    self.stats[obs][act] = 1

        if reward is not None:
            self.rewards.append(reward)

    def initial(self):
        self.sa_pairs = []
        self.trajectories = []
        self.rewards = []

    def get_trajectories(self):
        return self.trajectories

    def get_rewards(self):
        return self.rewards

    def get_stats(self):
        return self.stats

    def __len__(self):
        return len(self.trajectories)


class ModelSaver(object):
    def __init__(self, default_path=".", lock=None):

        self.lock = lock

        if not os.path.exists(default_path):
            os.makedirs(default_path)
        self.path = default_path
        # 用于记录已经保存的检查点的路径
        self.recorder = []

    def save(self, policy1, policy2, eps):
        policy = [policy1, policy2]
        path = self.path + f"{eps}.pkl"
        if self.lock is not None:
            self.lock.acquire()
        with open(path, "wb") as file:
            pickle.dump(policy, file, True)
        if self.lock is not None:
            self.lock.release()
        self.recorder.append(path)

    def load(self, path=None):
        if path is None:
            with open(self.path, "rb") as file:
                policy1, policy2 = pickle.load(file)
        else:
            if self.lock is not None:
                self.lock.acquire()

            with open(path, "rb") as file:
                policy1, policy2 = pickle.load(file)

            if self.lock is not None:
                self.lock.release()

        return policy1, policy2

    def get_checkpoints(self):
        return list(set(self.recorder))
