import os
import time

import numpy as np
from matplotlib import pyplot as plt

import Agents
from tools import basic_environment as be

def plot_lines(y_data_list, save_to_file=False, filename="plot.png", title="Multiple Lines Plot",
               x_label="X Axis", y_label="Y Axis", line_labels=None, figsize=(10, 6)):
    plt.figure(figsize=figsize)

    # 为每条线生成对应的x值(从0开始，步长为1)
    for i, y_data in enumerate(y_data_list):
        x_data = range(len(y_data))  # 根据y数据长度自动生成x值

        # 如果有提供标签则使用，否则使用默认标签
        label = line_labels[i] if line_labels and i < len(line_labels) else f"Line {i + 1}"
        plt.plot(x_data, y_data, label=label)

    # 添加图表元素
    plt.title(title)
    plt.xlabel(x_label)
    plt.ylabel(y_label)
    plt.grid(True)

    # 如果有标签则显示图例
    if line_labels or len(y_data_list) > 1:
        plt.legend()

    # 处理输出方式
    if save_to_file:
        # 确保目录存在
        os.makedirs(os.path.dirname(filename), exist_ok=True)
        plt.savefig(filename, dpi=300, bbox_inches='tight')
        plt.close()
        print(f"图表已保存到: {filename}")
    else:
        plt.show()

#===================================设置参数=================================#
min_buffer_size = 20000
max_train = 200000

batchSize = 128

exe_path = 'C:/Users/XuShunJiang/Desktop/Game/project2/ML_project.exe'
env = be.EnvironmentV1(time_scale=20, no_graphics=True, buffer_size=max_train + min_buffer_size, agent_num=4,exe_path=exe_path)
state_dim = env.observation_dim
action_dim = env.action_dim
max_action = env.max_action

agent = Agents.SAC(state_dim=state_dim, hidden_dim=256, action_dim=action_dim, action_bound=max_action)

print("Device:",agent.device)

if __name__ == "__main__":
    current_train = 0
    start_time = None

    states, ids = env.reset()
    while current_train < max_train:
        if env.exp_buffer.size() < min_buffer_size:
            actions = np.random.normal(0, max_action*0.5,size=(len(states), action_dim)).clip(-max_action,max_action)
            states, ids = env.step(actions, ids)
            print(f'准备阶段{env.exp_buffer.size()}/{min_buffer_size}')
        else:
            if start_time is None:
                start_time = time.time()

            actions = agent.select_action(states)
            states, ids = env.step(actions, ids)  # 会在这里面自动收集数据。

            agent.train(env.exp_buffer, batchSize)
            current_train += 1

            if current_train%5000==0:
                time_cost = (time.time() - start_time) / 60  # 已经花了多少分钟
                speed = current_train / time_cost  # 训练速度 （步/分钟）
                remaining_time = (max_train - current_train) / speed
                print(f"训练进度: {current_train}/{max_train}, 已用时间: {time_cost:.2f} 分钟,"
                      f" 预计剩余时间: {remaining_time:.2f} 分钟")

    env.unity_env.close()

    data_list = [cell.all_returns for cell in env.recorder]

    plot_lines(data_list)

    Agents.export_to_onnx(model=agent.actor,state_dim=state_dim)
