from decimal import Decimal

import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
from torch.distributions import Normal

from app.services.energy_dispatch.energy_dispatch_simple.energy_dispatch_simple import calculate_wind_output
from app.services.energy_dispatch.energy_management_input import EnergyInputSeries
from app.services.energy_dispatch.rl_energy_dispatch.micro_grid_env import MicroGridEnv


# --------------------------
# 2. 优化后的PPO算法
# --------------------------
class PPOAgent:
    def __init__(self, state_dim, config, env_df, lr_actor=3e-4, lr_critic=1e-3, gamma=0.95, eps_clip=0.2):
        self.state_dim = state_dim
        self.config = config
        self.env_df = env_df
        self.gamma = gamma
        self.eps_clip = eps_clip  # 适当减小clip范围，提高稳定性

        # 策略网络（增加一层提高表达能力）
        self.actor = nn.Sequential(
            nn.Linear(state_dim, 256),  # 从128→256
            nn.ReLU(),
            nn.Linear(256, 128),
            nn.ReLU(),
            nn.Linear(128, 64),
            nn.ReLU(),
            nn.Linear(64, 3)  # [充电概率, 功率均值, 功率标准差]
        )

        # 价值网络（同上）
        self.critic = nn.Sequential(
            nn.Linear(state_dim, 256),
            nn.ReLU(),
            nn.Linear(256, 128),
            nn.ReLU(),
            nn.Linear(128, 64),
            nn.ReLU(),
            nn.Linear(64, 1)
        )

        self.optimizer_actor = optim.Adam(self.actor.parameters(), lr=lr_actor)
        self.optimizer_critic = optim.Adam(self.critic.parameters(), lr=lr_critic)
        self.memory = []

    def act(self, state):
        state = torch.FloatTensor(state).unsqueeze(0)
        with torch.no_grad():
            actor_out = self.actor(state)
            value = self.critic(state).item()

        # 解析动作分布
        charge_prob_tensor = torch.sigmoid(actor_out[0, 0])
        power_mean = torch.exp(actor_out[0, 1])  # 确保非负
        power_std = torch.sigmoid(actor_out[0, 2]) * 1.5 + 0.05  # 缩小标准差，减少极端动作

        # 动作采样（倾向于保守策略）
        charge_flag = 1 if np.random.rand() < charge_prob_tensor.item() else 0
        power_dist = Normal(power_mean, power_std)
        power = power_dist.sample().item()
        power = max(0, min(power, self.config["最大充放电功率(kW)"]))  # 限制功率范围

        # 状态反归一化
        max_vals = [1000, 20, 50, 2, 1, 1, self.config["电池容量(kWh)"]]
        solar_irr = state[0, 0].item() * max_vals[0]
        wind_spd = state[0, 1].item() * max_vals[1]
        load = state[0, 2].item() * max_vals[2]
        battery = state[0, 6].item() * max_vals[6]

        # 可再生能源与负荷计算
        # 参数
        solar_area = float("500.0")  # 光伏面积 m²
        solar_efficiency = float("0.20")  # 光伏效率
        wind_turbine_count = 3  # 风机数量
        single_wind_power = float("30.0")  # 单台风机额定功率 kW

        # 获取环境数据
        solar_irradiance = float(solar_irr)
        wind_speed = float(str(wind_spd))  # 注意转换为 Decimal 类型

        # 光伏出力
        solar_output = (solar_irradiance * solar_area * solar_efficiency) / float("1000")  # kW

        # 风电出力（调用你的函数 × 风机数量）
        wind_output = float(calculate_wind_output(Decimal(wind_speed), Decimal(single_wind_power)) * wind_turbine_count)

        # 总可再生能源出力
        total_renewable = solar_output + wind_output

        # 优化购电量计算：保守估计，预留余量
        if charge_flag == 1:
            max_charge = min(self.config["最大充放电功率(kW)"], self.config["电池容量(kWh)"] - battery)
            input_power = min(power, max_charge / self.config["充电效率"])
            # 购电 = 负荷+充电需求 - 可再生能源 + 5%余量（避免不足）
            grid_purchase = max(0, (load + input_power) - total_renewable) * 1.05
        else:
            max_discharge = min(self.config["最大充放电功率(kW)"], battery)
            discharge_power = min(power, max_discharge)
            output_power = discharge_power * self.config["放电效率"]
            # 购电 = 负荷 - (可再生+放电) + 5%余量
            grid_purchase = max(0, load - (total_renewable + output_power)) * 1.05

        # 计算动作概率
        log_prob_charge = torch.log(charge_prob_tensor) if charge_flag else torch.log(1 - charge_prob_tensor)
        log_prob_power = power_dist.log_prob(torch.tensor(power, dtype=torch.float32))
        total_log_prob = (log_prob_charge + log_prob_power).item()

        return [charge_flag, power, grid_purchase], total_log_prob, value

    def update(self, batch_size=64):  # 增大batch_size，提高稳定性
        if len(self.memory) < batch_size:
            return
        states, actions, old_log_probs, rewards, values = zip(*self.memory)

        # 解决警告：先将状态列表转换为单个numpy数组
        states_np = np.array(states)
        states = torch.FloatTensor(states_np)

        # 计算GAE优势函数（更稳定的优势估计）
        returns = []
        advantages = []
        gae = 0
        for i in reversed(range(len(rewards))):
            delta = rewards[i] + self.gamma * (values[i+1] if i+1 < len(values) else 0) - values[i]
            gae = delta + self.gamma * 0.97 * gae  # 提高GAE衰减系数，增强稳定性
            returns.insert(0, gae + values[i])
            advantages.insert(0, gae)

        # 归一化优势
        advantages = torch.tensor(advantages, dtype=torch.float32)
        advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)

        # 转换为张量
        old_log_probs = torch.tensor(old_log_probs, dtype=torch.float32)
        returns = torch.tensor(returns, dtype=torch.float32)

        # 多轮更新（增加更新次数）
        for _ in range(15):
            for i in range(0, len(states), batch_size):
                batch_states = states[i:i+batch_size]
                batch_old_log_probs = old_log_probs[i:i+batch_size]
                batch_advantages = advantages[i:i+batch_size]
                batch_returns = returns[i:i+batch_size]

                # 计算当前策略概率
                actor_out = self.actor(batch_states)
                current_values = self.critic(batch_states).squeeze()

                current_log_probs = []
                for j in range(len(batch_states)):
                    action = actions[i+j]
                    charge_flag = action[0]
                    power = action[1]

                    charge_prob_tensor = torch.sigmoid(actor_out[j, 0])
                    power_mean = torch.exp(actor_out[j, 1])
                    power_std = torch.sigmoid(actor_out[j, 2]) * 1.5 + 0.05
                    power_dist = Normal(power_mean, power_std)

                    log_prob_charge = torch.log(charge_prob_tensor) if charge_flag else torch.log(1 - charge_prob_tensor)
                    log_prob_power = power_dist.log_prob(torch.tensor(power, dtype=torch.float32))
                    current_log_probs.append(log_prob_charge + log_prob_power)

                current_log_probs = torch.stack(current_log_probs)

                # PPO剪辑
                ratio = torch.exp(current_log_probs - batch_old_log_probs)
                surr1 = ratio * batch_advantages
                surr2 = torch.clamp(ratio, 1 - self.eps_clip, 1 + self.eps_clip) * batch_advantages
                actor_loss = -torch.min(surr1, surr2).mean()

                # 价值损失（增加权重）
                critic_loss = 0.7 * nn.MSELoss()(current_values, batch_returns)  # 提高价值损失权重

                # 优化
                self.optimizer_actor.zero_grad()
                actor_loss.backward(retain_graph=True)
                nn.utils.clip_grad_norm_(self.actor.parameters(), 0.5)  # 梯度裁剪，防止爆炸
                self.optimizer_actor.step()

                self.optimizer_critic.zero_grad()
                critic_loss.backward()
                nn.utils.clip_grad_norm_(self.critic.parameters(), 0.5)
                self.optimizer_critic.step()

        self.memory = []  # 清空记忆

    def store_transition(self, state, action, log_prob, reward, value):
        self.memory.append((state, action, log_prob, reward, value))


def convert_inputs_to_dataframe(input_series: EnergyInputSeries) -> pd.DataFrame:
    return pd.DataFrame({
        "太阳辐照度(W/m²)": input_series.solar_values,
        "风速(m/s)": input_series.wind_values,
        "负荷功率(kW)": input_series.load_values,
        "电价_国网(元/kWh)": input_series.normal_price_values,
        "电价_风能(元/kWh)": input_series.wind_price_values,
        "电价_太阳能(元/kWh)": input_series.solar_price_values,
    })

def train(
        input_list:EnergyInputSeries,
        initial_capacity,
        battery_info,
        episodes: int = 100
):
    env_df = convert_inputs_to_dataframe(input_list)
    battery_capacity=battery_info["capacity"]
    battery_max_discharge_power=battery_info["max_output_power"]
    print(f'{battery_capacity}', type(battery_capacity))

    # 配置参数
    config = {
        "初始电量(kWh)": float(initial_capacity),
        "电池容量(kWh)": float(battery_capacity),
        "最大充放电功率(kW)": float(battery_max_discharge_power),
        "碳排放系数(kg/kWh)": 0.5,
        "充电效率": 0.9,
        "放电效率": 0.9
    }

    state_dim = 7

    # 训练DQN
    print("训练PPO智能体（100回合）...")
    env = MicroGridEnv(env_df, config)
    ppo_agent = PPOAgent(state_dim, config, env_df)
    best_reward = -float('inf')
    best_stats = None
    best_actions = None

    for e in range(episodes):
        state = env.reset()
        total_reward = 0
        while True:
            action, log_prob, value = ppo_agent.act(state)
            next_state, reward, done, _ = env.step(action)
            ppo_agent.store_transition(state, action, log_prob, reward, value)
            total_reward += reward
            state = next_state

            if done:
                ppo_agent.update()  # 使用优化后的update方法

                if total_reward > best_reward:
                    best_reward = total_reward
                    battery_metrics = env.calculate_battery_metrics()
                    best_stats = {
                        "总成本": env.total_cost,
                        "总碳排": env.total_carbon,
                        "平均电池电量百分比": battery_metrics["平均电池电量百分比"],
                        "充放电次数": battery_metrics["充放电次数"],
                        "最大电池电量(kWh)": battery_metrics["最大电池电量(kWh)"],
                        "最小电池电量(kWh)": battery_metrics["最小电池电量(kWh)"],
                        "深度放电次数": battery_metrics["深度放电次数"],
                        "供电不足次数": env.power_shortage_count,
                        "训练回合数": episodes
                    }
                    best_actions = env.get_action_history()

                if (e + 1) % 20 == 0:
                    print(f"PPO 回合 {e+1}/{episodes}，总奖励: {total_reward:.2f}，供电不足: {env.power_shortage_count}次")
                break

    return {
        "best_stats": best_stats,
        "best_actions": best_actions
    }
