from decimal import Decimal
from typing import List

import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from collections import deque
import random

from app.services.energy_dispatch.energy_dispatch_simple.energy_dispatch_simple import calculate_wind_output
from app.services.energy_dispatch.energy_management_input import EnergyInputSeries
from app.services.energy_dispatch.rl_energy_dispatch.micro_grid_env import MicroGridEnv

# --------------------------
# 4. DQN算法（保持与之前一致）
# --------------------------
class DQNAgent:
    def __init__(self, state_dim, config, env_df):
        self.state_dim = state_dim
        self.config = config
        self.env_df = env_df
        self.memory = deque(maxlen=4000)  # 增大经验池
        self.gamma = 0.95
        self.epsilon = 1.0
        self.epsilon_min = 0.01
        self.epsilon_decay = 0.995
        self.model = self._build_model()
        self.optimizer = optim.Adam(self.model.parameters(), lr=0.001)
        self.criterion = nn.MSELoss()

    def _build_model(self):
        return nn.Sequential(
            nn.Linear(self.state_dim, 256),
            nn.ReLU(),
            nn.Linear(256, 128),
            nn.ReLU(),
            nn.Linear(128, 64),
            nn.ReLU(),
            nn.Linear(64, 3)
        )

    def act(self, state, deterministic=False):
        max_vals = [1000, 20, 50, 2, 1, 1, self.config["电池容量(kWh)"]]
        solar_irr = state[0] * max_vals[0]
        wind_spd = state[1] * max_vals[1]
        load = state[2] * max_vals[2]
        battery = state[6] * max_vals[6]

        # 参数
        solar_area = float("500.0")  # 光伏面积 m²
        solar_efficiency = float("0.20")  # 光伏效率
        wind_turbine_count = 3  # 风机数量
        single_wind_power = float("30.0")  # 单台风机额定功率 kW

        # 获取环境数据
        solar_irradiance = float(solar_irr)
        wind_speed = float(str(wind_spd))  # 注意转换为 Decimal 类型

        # 光伏出力
        solar_output = (solar_irradiance * solar_area * solar_efficiency) / float("1000")  # kW

        # 风电出力（调用你的函数 × 风机数量）
        wind_output = float(calculate_wind_output(Decimal(wind_speed), Decimal(single_wind_power)) * wind_turbine_count)

        # 总可再生能源出力
        total_renewable = solar_output + wind_output

        # 探索阶段（减少探索率衰减速度）
        if not deterministic and np.random.rand() <= self.epsilon:
            charge_flag = np.random.choice([0, 1])
            if charge_flag == 1:
                max_charge = min(self.config["最大充放电功率(kW)"], self.config["电池容量(kWh)"] - battery)
                input_power = np.random.uniform(0, max_charge / self.config["充电效率"])
                grid_purchase = max(0, (load + input_power) - total_renewable) * 1.05  # 余量
            else:
                max_discharge = min(self.config["最大充放电功率(kW)"], battery)
                discharge_power = np.random.uniform(0, max_discharge)
                output_power = discharge_power * self.config["放电效率"]
                grid_purchase = max(0, load - (total_renewable + output_power)) * 1.05
            return [charge_flag, input_power if charge_flag else discharge_power, grid_purchase]

        # 利用阶段
        state_tensor = torch.FloatTensor(state).unsqueeze(0)
        with torch.no_grad():
            q_values = self.model(state_tensor).numpy()[0]

        charge_flag = 1 if q_values[0] > 0.5 else 0
        power = abs(q_values[1]) * self.config["最大充放电功率(kW)"]

        if charge_flag == 1:
            max_charge = min(self.config["最大充放电功率(kW)"], self.config["电池容量(kWh)"] - battery)
            input_power = min(power, max_charge / self.config["充电效率"])
            grid_purchase = max(0, (load + input_power) - total_renewable) * 1.05
        else:
            max_discharge = min(self.config["最大充放电功率(kW)"], battery)
            discharge_power = min(power, max_discharge)
            output_power = discharge_power * self.config["放电效率"]
            grid_purchase = max(0, load - (total_renewable + output_power)) * 1.05

        return [charge_flag, input_power if charge_flag else discharge_power, grid_purchase]

    def replay(self, batch_size=64):
        if len(self.memory) < batch_size:
            return
        batch = random.sample(self.memory, batch_size)

        for state, action, reward, next_state, done in batch:
            state_tensor = torch.FloatTensor(state)
            next_state_tensor = torch.FloatTensor(next_state) if not done else None

            current_q = self.model(state_tensor)
            target_q = torch.tensor(reward, dtype=torch.float32)
            if not done:
                target_q += self.gamma * torch.max(self.model(next_state_tensor))

            target = current_q.clone()
            action_idx = 0 if action[0] == 1 else 1
            target[action_idx] = target_q

            self.optimizer.zero_grad()
            loss = self.criterion(current_q, target)
            loss.backward()
            self.optimizer.step()

        if self.epsilon > self.epsilon_min:
            self.epsilon *= self.epsilon_decay

def convert_inputs_to_dataframe(input_series: EnergyInputSeries) -> pd.DataFrame:
    return pd.DataFrame({
        "太阳辐照度(W/m²)": input_series.solar_values,
        "风速(m/s)": input_series.wind_values,
        "负荷功率(kW)": input_series.load_values,
        "电价_国网(元/kWh)": input_series.normal_price_values,
        "电价_风能(元/kWh)": input_series.wind_price_values,
        "电价_太阳能(元/kWh)": input_series.solar_price_values,
    })

def train(
        input_list:EnergyInputSeries,
        initial_capacity,
        battery_info,
        episodes: int = 100
):
    env_df = convert_inputs_to_dataframe(input_list)
    battery_capacity=battery_info["capacity"]
    battery_max_discharge_power=battery_info["max_output_power"]

# 配置参数
    config = {
        "初始电量(kWh)": float(initial_capacity),
        "电池容量(kWh)": float(battery_capacity),
        "最大充放电功率(kW)": float(battery_max_discharge_power),
        "碳排放系数(kg/kWh)": 0.5,
        "充电效率": 0.9,
        "放电效率": 0.9
    }

    state_dim = 7

    # 训练DQN
    print("训练DQN智能体（100回合）...")
    env = MicroGridEnv(env_df, config)
    dqn_agent = DQNAgent(state_dim, config, env_df)
    print("开始训练...")
    best_reward = -float('inf')
    best_stats = None
    best_actions = None

    for e in range(episodes):
        state = env.reset()
        total_reward = 0
        while True:
            action = dqn_agent.act(state)
            next_state, reward, done, _ = env.step(action)
            dqn_agent.memory.append((state, action, reward, next_state, done))
            total_reward += reward
            state = next_state

            if done:
                if len(dqn_agent.memory) > 64:
                    dqn_agent.replay(64)

                if total_reward > best_reward:
                    best_reward = total_reward
                    battery_metrics = env.calculate_battery_metrics()
                    best_stats = {
                        "总成本": env.total_cost,
                        "总碳排": env.total_carbon,
                        "平均电池电量百分比": battery_metrics["平均电池电量百分比"],
                        "充放电次数": battery_metrics["充放电次数"],
                        "最大电池电量(kWh)": battery_metrics["最大电池电量(kWh)"],
                        "最小电池电量(kWh)": battery_metrics["最小电池电量(kWh)"],
                        "深度放电次数": battery_metrics["深度放电次数"],
                        "供电不足次数": env.power_shortage_count,
                        "训练回合数": episodes
                    }
                    best_actions = env.get_action_history()

                if (e + 1) % 20 == 0:  # 每20回合打印一次
                    print(f"DQN 回合 {e+1}/{episodes}，总奖励: {total_reward:.2f}，供电不足: {env.power_shortage_count}次")
                break

    return {
        "best_stats": best_stats,
        "best_actions": best_actions
    }

