from decimal import Decimal

import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
from torch.distributions import Normal

from app.services.energy_dispatch.energy_dispatch_simple.energy_dispatch_simple import calculate_wind_output
from app.services.energy_dispatch.energy_management_input import EnergyInputSeries
from app.services.energy_dispatch.rl_energy_dispatch.micro_grid_env import MicroGridEnv


# --------------------------
# 3. 优化后的A2C算法
# --------------------------
class A2CAgent:
    def __init__(self, state_dim, config, env_df, lr=3e-4, gamma=0.95):
        self.state_dim = state_dim
        self.config = config
        self.env_df = env_df
        self.gamma = gamma

        #  Actor-Critic网络（加深网络）
        self.actor = nn.Sequential(
            nn.Linear(state_dim, 256),
            nn.ReLU(),
            nn.Linear(256, 128),
            nn.ReLU(),
            nn.Linear(128, 64),
            nn.ReLU(),
            nn.Linear(64, 3)
        )
        self.critic = nn.Sequential(
            nn.Linear(state_dim, 256),
            nn.ReLU(),
            nn.Linear(256, 128),
            nn.ReLU(),
            nn.Linear(128, 64),
            nn.ReLU(),
            nn.Linear(64, 1)
        )

        self.optimizer = optim.Adam(
            list(self.actor.parameters()) + list(self.critic.parameters()),
            lr=lr,
            weight_decay=1e-5  # 增加L2正则化，防止过拟合
        )

    def act(self, state):
        state = torch.FloatTensor(state).unsqueeze(0)
        actor_out = self.actor(state)
        critic_out = self.critic(state)

        # 解析动作分布
        charge_prob_tensor = torch.sigmoid(actor_out[0, 0])

        # power_mean = torch.exp(actor_out[0, 1])
        # power_std = torch.sigmoid(actor_out[0, 2]) * 1.5 + 0.05  # 缩小标准差

        # 改进power_mean计算，增加数值稳定性
        # 使用tanh限制输入范围，然后缩放和平移到合理区间
        power_mean_raw = actor_out[0, 1]
        power_mean_tanh = torch.tanh(power_mean_raw)  # 将值限制在[-1, 1]
        max_power = self.config["最大充放电功率(kW)"]
        power_mean = (power_mean_tanh + 1) * max_power / 2  # 缩放到[0, max_power]

        # 改进power_std计算，确保标准差在有效范围内
        power_std_raw = actor_out[0, 2]
        power_std = torch.sigmoid(power_std_raw) * 1.5 + 0.05  # 保持原有缩放

        # 动作采样（保守策略）
        charge_flag = 1 if np.random.rand() < charge_prob_tensor.item() else 0
        power_dist = Normal(power_mean, power_std)
        power = power_dist.sample().item()
        power = max(0, min(power, self.config["最大充放电功率(kW)"]))

        # 状态反归一化
        max_vals = [1000, 20, 50, 2, 1, 1, self.config["电池容量(kWh)"]]
        solar_irr = state[0, 0].item() * max_vals[0]
        wind_spd = state[0, 1].item() * max_vals[1]
        load = state[0, 2].item() * max_vals[2]
        battery = state[0, 6].item() * max_vals[6]

        # 可再生能源计算
        # 参数
        solar_area = float("500.0")  # 光伏面积 m²
        solar_efficiency = float("0.20")  # 光伏效率
        wind_turbine_count = 3  # 风机数量
        single_wind_power = float("30.0")  # 单台风机额定功率 kW

        # 获取环境数据
        solar_irradiance = float(solar_irr)
        wind_speed = float(str(wind_spd))  # 注意转换为 Decimal 类型

        # 光伏出力
        solar_output = (solar_irradiance * solar_area * solar_efficiency) / float("1000")  # kW

        # 风电出力（调用你的函数 × 风机数量）
        wind_output = float(calculate_wind_output(Decimal(wind_speed), Decimal(single_wind_power)) * wind_turbine_count)

        # 总可再生能源出力
        total_renewable = solar_output + wind_output

        # 保守购电计算（同PPO，增加余量）
        if charge_flag == 1:
            max_charge = min(self.config["最大充放电功率(kW)"], self.config["电池容量(kWh)"] - battery)
            input_power = min(power, max_charge / self.config["充电效率"])
            grid_purchase = max(0, (load + input_power) - total_renewable) * 1.05
        else:
            max_discharge = min(self.config["最大充放电功率(kW)"], battery)
            discharge_power = min(power, max_discharge)
            output_power = discharge_power * self.config["放电效率"]
            grid_purchase = max(0, load - (total_renewable + output_power)) * 1.05

        # 计算log概率
        log_prob_charge = torch.log(charge_prob_tensor) if charge_flag else torch.log(1 - charge_prob_tensor)
        log_prob_power = power_dist.log_prob(torch.tensor(power, dtype=torch.float32))
        total_log_prob = log_prob_charge + log_prob_power  # 保持张量类型

        return [charge_flag, power, grid_purchase], total_log_prob, critic_out.item()

    def update(self, reward, log_prob, value, next_value, done):
        # 确保log_prob可导
        if not log_prob.requires_grad:
            log_prob = log_prob.detach().requires_grad_(True)

        # 计算目标值
        target = reward + (1 - done) * self.gamma * next_value

        # 计算优势
        advantage = target - value

        # 计算损失（增加价值损失权重）
        actor_loss = -log_prob * advantage
        critic_loss = 0.7 * nn.MSELoss()(torch.tensor(value, dtype=torch.float32), torch.tensor(target, dtype=torch.float32))
        total_loss = actor_loss + critic_loss

        # 梯度裁剪（关键优化）
        self.optimizer.zero_grad()
        total_loss.backward()
        nn.utils.clip_grad_norm_(self.actor.parameters(), 0.5)
        nn.utils.clip_grad_norm_(self.critic.parameters(), 0.5)
        self.optimizer.step()



def convert_inputs_to_dataframe(input_series: EnergyInputSeries) -> pd.DataFrame:
    return pd.DataFrame({
        "太阳辐照度(W/m²)": input_series.solar_values,
        "风速(m/s)": input_series.wind_values,
        "负荷功率(kW)": input_series.load_values,
        "电价_国网(元/kWh)": input_series.normal_price_values,
        "电价_风能(元/kWh)": input_series.wind_price_values,
        "电价_太阳能(元/kWh)": input_series.solar_price_values,
    })

def train(
        input_list:EnergyInputSeries,
        initial_capacity,
        battery_info,
        episodes: int = 100
):
    env_df = convert_inputs_to_dataframe(input_list)
    battery_capacity=battery_info["capacity"]
    battery_max_discharge_power=battery_info["max_output_power"]
    print(f'{battery_capacity}', type(battery_capacity))

    # 配置参数
    config = {
        "初始电量(kWh)": float(initial_capacity),
        "电池容量(kWh)": float(battery_capacity),
        "最大充放电功率(kW)": float(battery_max_discharge_power),
        "碳排放系数(kg/kWh)": 0.5,
        "充电效率": 0.9,
        "放电效率": 0.9
    }

    state_dim = 7

    # 训练DQN
    print("训练A2c智能体（100回合）...")
    env = MicroGridEnv(env_df, config)
    a2c_agent = A2CAgent(state_dim, config, env_df)
    best_reward = -float('inf')
    best_stats = None
    best_actions = None

    for e in range(episodes):
        state = env.reset()
        total_reward = 0
        while True:
            action, log_prob, value = a2c_agent.act(state)
            next_state, reward, done, _ = env.step(action)

            next_value = a2c_agent.critic(torch.FloatTensor(next_state).unsqueeze(0)).item() if not done else 0
            a2c_agent.update(reward, log_prob, value, next_value, done)  # 优化后的update

            total_reward += reward
            state = next_state

            if done:
                if total_reward > best_reward:
                    best_reward = total_reward
                    battery_metrics = env.calculate_battery_metrics()
                    best_stats = {
                        "总成本": env.total_cost,
                        "总碳排": env.total_carbon,
                        "平均电池电量百分比": battery_metrics["平均电池电量百分比"],
                        "充放电次数": battery_metrics["充放电次数"],
                        "最大电池电量(kWh)": battery_metrics["最大电池电量(kWh)"],
                        "最小电池电量(kWh)": battery_metrics["最小电池电量(kWh)"],
                        "深度放电次数": battery_metrics["深度放电次数"],
                        "供电不足次数": env.power_shortage_count,
                        "训练回合数": episodes
                    }
                    best_actions = env.get_action_history()

                if (e + 1) % 20 == 0:
                    print(f"A2C 回合 {e+1}/{episodes}，总奖励: {total_reward:.2f}，供电不足: {env.power_shortage_count}次")
                break

    return {
        "best_stats": best_stats,
        "best_actions": best_actions
    }
