import numpy as np
import matplotlib.pyplot as plt
import sys

sys.path.append('../')


def smooth(x, timestamps=9):
    # last 100
    # x = np.array(x)
    # x = np.concatenate(x).astype(float)  # 将嵌套列表展平成 1D 数组
    
    n = len(x)
    y = np.zeros(n)
    for i in range(n):
        start = max(0, i - timestamps)
        y[i] = float(x[start:(i + 1)].sum()) / (i - start + 1)
    return y
def smooth1(x, timestamps=149): # 简单滑动平均 每一点是前 10 个数据的平均值（包括当前点）
    n = len(x)
    y = np.zeros(n)
    for i in range(n):
        start = max(0, i - timestamps)
        y[i] = float(x[start:(i + 1)].sum()) / (i - start + 1)
    return y

def smooth2(x, timestamps=199): # 简单滑动平均 每一点是前 10 个数据的平均值（包括当前点）
    n = len(x)
    y = np.zeros(n)
    for i in range(n):
        start = max(0, i - timestamps)
        y[i] = float(x[start:(i + 1)].sum()) / (i - start + 1)
    return y


def ewma1(x, alpha=0.005):
    """
    对输入数据进行指数加权移动平均处理。

    参数:
    x (array-like): 输入的待平滑数据。
    alpha (float): 平滑系数，取值范围 (0, 1)，值越小曲线越平滑。

    返回:
    np.ndarray: 平滑后的数组。
    """
    y = np.zeros_like(x)
    y[0] = x[0]
    for i in range(1, len(x)):
        y[i] = alpha * x[i] + (1 - alpha) * y[i - 1]
    return y

def plot_reward():
    # reward_hard_bs = np.load('/home/orange/PycharmProjects/MARL_AD_U/MARL/results/Apr-11_15:14:34/eval_rewards.npy')
    reward_maa2c = np.load('/home/tianxj/code/MARL_CAVs/MARL/results/videos/mappo_pis/hard-0-8/episode_rewards.npy',allow_pickle=True)
    reward_mappo = np.load('/home/tianxj/code/MARL_CAVs/MARL/results/videos/maacktr/hard-2025/episode_rewards.npy',allow_pickle=True)
    reward_new = np.load('/home/tianxj/code/MARL_CAVs/MARL/results/videos/mappo/hard-0/episode_rewards.npy',allow_pickle=True)
    # reward_hard_bs = np.load('/home/tianxj/code/MARL_CAVs/MARL/results/videos/maa2c/20250317-155422/eval_logs/eval_rewards.npy')
    # reward_hard_bs = np.load('/home/orange/PycharmProjects/MARL_AD_U/MARL/episode_rewards.npy')
    # reward_lstm = np.load(
    #     '/home/dong/PycharmProjects/MARL_AD_U_v0/MARL/results/Mar-20_00:38:36/episode_rewards.npy')
    # reward_lstm1 = np.load(
    #     '/home/dong/PycharmProjects/MARL_AD_U_v1/MARL/results/Mar-20_03:40:28/episode_rewards.npy')
    # # 调试：检查数据格式
    # print(f"数据类型: {type(reward_hard_bs)}")  
    # print(f"数据维度: {reward_hard_bs.shape if isinstance(reward_hard_bs, np.ndarray) else 'Not an array'}")
    # print(f"示例数据: {reward_hard_bs[:5]}")
    # 解决 `dtype=object` 问题，展开为 1D 数组
    # print(type(reward_hard_bs), reward_hard_bs.shape)
    # reward_hard_bs = np.concatenate(reward_hard_bs).astype(float)  
    reward_maa2c = np.hstack(reward_maa2c).astype(float)
    reward_mappo = np.hstack(reward_mappo).astype(float)
    reward_new = np.hstack(reward_new).astype(float)
    plt.figure()
    plt.xlabel("Episodes", fontsize=12)
    plt.ylabel("Reward", fontsize=12)
    plt.title("Training Rewards Comparison", fontsize=14)
    
    # plt.xlabel("epochs")
    # plt.ylabel("Reward")
    # plt.title("Epoch Reward")
    # plt.plot(smooth(reward_hard_bs), label='bs-easy')
    # plt.plot(ewma1(reward_maa2c), label='MAA2C', color='#FAEE85', linewidth=1.5)
    # plt.plot(ewma1(reward_mappo), label='MAPPO', color='#A8CAE8', linewidth=1.5)
    # plt.plot(ewma1(reward_new), label='SE-MAPPO', color='#F58383', linewidth=1.5)
    plt.plot(ewma1(reward_maa2c), label='MAA2C', color='#A8CAE8', linewidth=1.3)
    plt.plot(ewma1(reward_mappo), label='MAPPO', color='#FFBE7A', linewidth=1.3)
    plt.plot(ewma1(reward_new), label='SE-MAPPO', color='#F58383', linewidth=1.3)
    
    # plt.plot(smooth(reward_lstm), label='lstm')
    # plt.plot(smooth(reward_lstm1), label='lstm1')
    # plt.xlim([20, 20000])
    plt.ylim([-50, 50])
    plt.legend(loc="lower right", ncol=2, frameon=True)
    # plt.show()
    plt.savefig('/home/tianxj/code/MARL_CAVs/MARL/results/hard1.png')   # 保存图片
    print("Saved as reward_plot.png")
    

if __name__ == '__main__':
    plot_reward()