import json
import os

import matplotlib
import matplotlib.pyplot as plt
import numpy as np

from reinforce import Reinforce
from rl_environment import Environment
from utils import get_time, BufferForNet
from fpdf import FPDF


def get_data(path):
    """
    :param path: one json path
    :return:
    """
    with open(path, "r") as f:
        traj = json.load(f)
    new_traj = dict()
    for k in traj:
        new_traj[k] = eval(traj[k])

    return new_traj


def plot_action(path):
    a1s, a2s, r = get_data(path)
    if r == 1:
        label1 = "玩家1 win"
        label2 = "玩家2"
    elif r == -1:
        label1 = "玩家1"
        label2 = "玩家2 win"
    else:
        label1 = "玩家1"
        label2 = "玩家2"

    matplotlib.rc("font", family='MicroSoft YaHei')
    list1 = np.array(a1s)  # 柱状图第一组数据
    list2 = np.array(a2s)  # 柱状图第二组数据
    length = len(list1)
    x = np.arange(1, length + 1)  # 横坐标范围

    plt.figure()
    width = 0.4
    # 画并列
    # total_width, n = 0.4, 2  # 柱状图总宽度，有几组数据
    # width = total_width / n  # 单个柱状图的宽度
    # x1 = x - width / 2  # 第一组数据柱状图横坐标起始位置
    # x2 = x1 + width  # 第二组数据柱状图横坐标起始位置

    plt.title("单局博弈图")  # 柱状图标题
    # plt.xlabel("星期")   # 横坐标label 此处可以不添加
    plt.ylabel("动作")  # 纵坐标label
    plt.xlabel("步数")  # 纵坐标label

    plt.bar(x, list1, width=width, label=label1, bottom=list2)
    plt.bar(x, list2, width=width, label=label2)
    plt.xticks(range(1, 16))
    plt.yticks(range(0, 13))

    # plt.xlim(0, 10)
    plt.legend()  # 给出图例
    plt.grid(alpha=0.2)
    # plt.show()
    save_path = rf"E:\CodeHub\实验室项目\alesia\Alesia\net\train_info\debug\pic\{dir.split('.')[0]}"
    if not os.path.exists(save_path):
        os.makedirs(save_path)
    plt.savefig(save_path + f"/{get_time()}.png", dpi=300)  # 矢量图


def get_policy(data):
    nums = 2  # 策略的小数位数
    lines = []
    for index, obs in enumerate(data["obs"]):
        probs1 = data["probs1"][index]
        probs2 = data["probs2"][index]
        act1 = data["act1"][index]
        act2 = data["act2"][index]

        line = f"{obs},  {act1}, {act2}:  "
        for i in probs1:
            s = str(i)
            s += "0" * (nums + 2 - len(s))
            line += s + "  "
        line += " |   "

        for i in probs2:
            s = str(i)
            s += "0" * (nums + 2 - len(s))
            line += s + "  "
        lines.append(line)
    return lines


def write_pdf(record, position):
    pdf = FPDF()
    pdf.add_page()
    pdf.set_font('Times', '', 13)  # 必须要， 字体，类型（粗体B，斜体I）， 大小
    for line in record:
        # 第一个参数 cell宽度 左距；第二个参数 cell高度
        pdf.cell(40, 10, txt=line, ln=1)  # ln 该单位格后的光标位置 1：下行开始
        # pdf.ln()        # 换行
    pdf.output(position, 'F')


class Evaluator():
    def __init__(self):
        kwargs = dict(
            game="oshi_zumo",
            n=6,
            k=1,
            max_learn_rate=1e-3,
            min_learn_rate=1e-6,
            is_extra_grad_method=True,
            to_decay=False,
            debug=False,
            hidden_layers_sizes=(32, 16),
            policy_type="net"  # 直接参数化还是神经网络
        )
        kwargs["train_dir"] = ""

        self.env = Environment("oshi_zumo", **dict(coins=6, size=1, horizon=15))
        self.info_state_size = self.env.observation_spec()["info_state"][0]
        self.num_actions = self.env.action_spec()["num_actions"]
        self.buffer = BufferForNet(kwargs["n"])

        self.agents = [
            Reinforce(player_id=idx,
                      info_state_size=self.info_state_size,
                      num_actions=self.num_actions,
                      **kwargs) for idx in range(2)
        ]

    def sample_one_eps(self):
        # 这个评估指的是仅仅采样
        time_step = self.env.reset()
        while not time_step.last():
            agents_output = [agent.step(time_step) for agent in self.agents]
            action_list = [agent_output.action for agent_output in agents_output]

            self.buffer.push(time_step, agents_output)

            time_step = self.env.step(action_list)
        # Episode is over, step all agents with final info state.
        for agent in self.agents:
            agent.step(time_step)

        self.buffer.push(reward=time_step.rewards)  # 同时更新轨迹数据

        return self.buffer.smaple_for_display()

    def reset_checkpoint(self, checkpoint_path):
        # 加载检查点权重
        for index, agent in enumerate(self.agents):
            agent.load_checkpoint(checkpoint_path + f"/{index}.pkg")


def json_display():
    """利用保存的json 展示每个检查点的策略"""
    ppath = r"E:\CodeHub\实验室项目\alesia\Alesia\analysis\train11\train_info\eg_0.01_-_solid_net\json"
    files = os.listdir(ppath)
    no = [int(i.split(".")[0]) for i in files]
    no.sort()
    sorted_files = [str(i) + ".json" for i in no]
    paths = [ppath + rf"\{f}" for f in sorted_files]
    record = []
    for path in paths:
        record.append("*" * 10 + os.path.basename(path).split(".")[0] + "*" * 10)
        record.extend(get_policy(get_data(path)))
    position = os.path.dirname(ppath) + "/policy.pdf"
    write_pdf(record, position)


def sample_display():
    """每个检查点的策略采样，然后展示策略"""
    checkpoint_path = r"/root/projects/jzhou/Alesia/0518/net/train_info/eg_0.01_-_solid_net/checkpoints/"
    paths = os.listdir(checkpoint_path)
    paths = [str(j) for j in sorted([int(i) for i in paths])]
    record = []
    evaluator = Evaluator()

    for index, path in enumerate(paths):
        record.append("*" * 20 + "update times: " + path + "*" * 20)
        evaluator.reset_checkpoint(checkpoint_path + path)
        info = evaluator.sample_one_eps()
        new_info = {}
        for k in info:
            new_info[k] = eval(info[k])
        record.extend(get_policy(new_info))
    position = checkpoint_path.split("/checkpoints")[0] + ".pdf"
    write_pdf(record, position)


if __name__ == '__main__':
    sample_display()
