import pandas as pd
import numpy as np
import torch

from app.services.energy_dispatch.rl_energy_dispatch.a2c_agent import A2CAgent
from app.services.energy_dispatch.rl_energy_dispatch.dqn_agent import PPOAgent
from app.services.energy_dispatch.rl_energy_dispatch.micro_grid_env import MicroGridEnv
from app.services.energy_dispatch.rl_energy_dispatch.ppo_agent import DQNAgent

# --------------------------
# 5. 统一训练（100回合）
# --------------------------
if __name__ == "__main__":
    # 配置参数
    config = {
        "初始电量(kWh)": 50.0,
        "电池容量(kWh)": 100.0,
        "最大充放电功率(kW)": 10.0,
        "碳排放系数(kg/kWh)": 0.5,
        "充电效率": 0.9,
        "放电效率": 0.9
    }

    # 加载数据
    try:
        env_df = pd.read_csv("模拟环境输入数据.csv")
    except:
        np.random.seed(42)
        env_df = pd.DataFrame({
            "太阳辐照度(W/m²)": np.random.uniform(0, 1000, 24),
            "风速(m/s)": np.random.uniform(0, 10, 24),
            "负荷功率(kW)": np.random.uniform(5, 20, 24),
            "电价_国网(元/kWh)": np.random.uniform(0.5, 1.0, 24),
            "电价_风能(元/kWh)": np.random.uniform(0.3, 0.8, 24),
            "电价_太阳能(元/kWh)": np.random.uniform(0.2, 0.7, 24)
        })
        env_df.to_csv("模拟环境输入数据.csv", index=False)

    state_dim = 7
    episodes = 100  # 统一训练回合数
    results = []

    # 训练DQN
    print("训练DQN智能体（100回合）...")
    env = MicroGridEnv(env_df, config)
    dqn_agent = DQNAgent(state_dim, config, env_df)
    best_dqn_reward = -float('inf')
    best_dqn_stats = None
    best_dqn_actions = None

    for e in range(episodes):
        state = env.reset()
        total_reward = 0
        while True:
            action = dqn_agent.act(state)
            next_state, reward, done, _ = env.step(action)
            dqn_agent.memory.append((state, action, reward, next_state, done))
            total_reward += reward
            state = next_state

            if done:
                if len(dqn_agent.memory) > 64:
                    dqn_agent.replay(64)

                if total_reward > best_dqn_reward:
                    best_dqn_reward = total_reward
                    battery_metrics = env.calculate_battery_metrics()
                    best_dqn_stats = {
                        "总成本": env.total_cost,
                        "总碳排": env.total_carbon,
                        "平均电池电量百分比": battery_metrics["平均电池电量百分比"],
                        "充放电次数": battery_metrics["充放电次数"],
                        "最大电池电量(kWh)": battery_metrics["最大电池电量(kWh)"],
                        "最小电池电量(kWh)": battery_metrics["最小电池电量(kWh)"],
                        "深度放电次数": battery_metrics["深度放电次数"],
                        "供电不足次数": env.power_shortage_count,
                        "训练回合数": episodes
                    }
                    best_dqn_actions = env.get_action_history()

                if (e + 1) % 20 == 0:  # 每20回合打印一次
                    print(f"DQN 回合 {e+1}/{episodes}，总奖励: {total_reward:.2f}，供电不足: {env.power_shortage_count}次")
                break

    # 保存DQN最佳调度动作
    best_dqn_actions.to_csv("DQN策略调度动作.csv", index=False)
    print("DQN策略调度动作已保存为 DQN策略调度动作.csv")

    # 训练PPO（200回合）
    print("\n训练PPO智能体（100回合）...")
    env = MicroGridEnv(env_df, config)
    ppo_agent = PPOAgent(state_dim, config, env_df)
    best_ppo_reward = -float('inf')
    best_ppo_stats = None
    best_ppo_actions = None

    for e in range(episodes):
        state = env.reset()
        total_reward = 0
        while True:
            action, log_prob, value = ppo_agent.act(state)
            next_state, reward, done, _ = env.step(action)
            ppo_agent.store_transition(state, action, log_prob, reward, value)
            total_reward += reward
            state = next_state

            if done:
                ppo_agent.update()  # 使用优化后的update方法

                if total_reward > best_ppo_reward:
                    best_ppo_reward = total_reward
                    battery_metrics = env.calculate_battery_metrics()
                    best_ppo_stats = {
                        "总成本": env.total_cost,
                        "总碳排": env.total_carbon,
                        "平均电池电量百分比": battery_metrics["平均电池电量百分比"],
                        "充放电次数": battery_metrics["充放电次数"],
                        "最大电池电量(kWh)": battery_metrics["最大电池电量(kWh)"],
                        "最小电池电量(kWh)": battery_metrics["最小电池电量(kWh)"],
                        "深度放电次数": battery_metrics["深度放电次数"],
                        "供电不足次数": env.power_shortage_count,
                        "训练回合数": episodes
                    }
                    best_ppo_actions = env.get_action_history()

                if (e + 1) % 20 == 0:
                    print(f"PPO 回合 {e+1}/{episodes}，总奖励: {total_reward:.2f}，供电不足: {env.power_shortage_count}次")
                break

    # 保存PPO最佳调度动作
    best_ppo_actions.to_csv("PPO策略调度动作.csv", index=False)
    print("PPO策略调度动作已保存为 PPO策略调度动作.csv")

    # 训练A2C（200回合）
    print("\n训练A2C智能体（100回合）...")
    env = MicroGridEnv(env_df, config)
    a2c_agent = A2CAgent(state_dim, config, env_df)
    best_a2c_reward = -float('inf')
    best_a2c_stats = None
    best_a2c_actions = None

    for e in range(episodes):
        state = env.reset()
        total_reward = 0
        while True:
            action, log_prob, value = a2c_agent.act(state)
            next_state, reward, done, _ = env.step(action)

            next_value = a2c_agent.critic(torch.FloatTensor(next_state).unsqueeze(0)).item() if not done else 0
            a2c_agent.update(reward, log_prob, value, next_value, done)  # 优化后的update

            total_reward += reward
            state = next_state

            if done:
                if total_reward > best_a2c_reward:
                    best_a2c_reward = total_reward
                    battery_metrics = env.calculate_battery_metrics()
                    best_a2c_stats = {
                        "总成本": env.total_cost,
                        "总碳排": env.total_carbon,
                        "平均电池电量百分比": battery_metrics["平均电池电量百分比"],
                        "充放电次数": battery_metrics["充放电次数"],
                        "最大电池电量(kWh)": battery_metrics["最大电池电量(kWh)"],
                        "最小电池电量(kWh)": battery_metrics["最小电池电量(kWh)"],
                        "深度放电次数": battery_metrics["深度放电次数"],
                        "供电不足次数": env.power_shortage_count,
                        "训练回合数": episodes
                    }
                    best_a2c_actions = env.get_action_history()

                if (e + 1) % 20 == 0:
                    print(f"A2C 回合 {e+1}/{episodes}，总奖励: {total_reward:.2f}，供电不足: {env.power_shortage_count}次")
                break

    # 保存A2C最佳调度动作
    best_a2c_actions.to_csv("A2C策略调度动作.csv", index=False)
    print("A2C策略调度动作已保存为 A2C策略调度动作.csv")

    # 整理结果
    results = [
        {"策略名称": "优化后的DQN策略", **best_dqn_stats},
        {"策略名称": "优化后的PPO策略", **best_ppo_stats},
        {"策略名称": "优化后的A2C策略", **best_a2c_stats}
    ]

    # 保存结果
    result_df = pd.DataFrame(results)
    result_df = result_df[["策略名称", "总成本", "总碳排", "平均电池电量百分比", "充放电次数",
                           "最大电池电量(kWh)", "最小电池电量(kWh)", "深度放电次数", "供电不足次数", "训练回合数"]]
    result_df.index = result_df.index + 1
    result_df.to_csv("优化后策略对比结果.csv", index_label="序号")

    print("\n最终策略对比（100回合）：")
    print(result_df.to_string(index=True))