import gymnasium as gym
from gymnasium import spaces
import numpy as np

class WRPEnvMultiAgent(gym.Env):
    def __init__(self, benefit_df, Remin, Remax, Smin, Smax, Sinitial, Starget, Qr, punish_rate):
        super(WRPEnvMultiAgent, self).__init__()
        
        # 环境参数（保持不变）
        self.benefit = np.array(benefit_df)
        self.Remin = np.array(Remin)
        self.Remax = np.array(Remax)
        self.Smin = np.array(Smin)
        self.Smax = np.array(Smax)
        self.Sinitial = np.array(Sinitial)
        self.Starget = np.array(Starget)
        self.Qr = np.array(Qr)
        self.punish_rate = punish_rate
        self.num_agents = len(Remin)
        self.num_time = len(benefit_df)

        # 多智能体动作空间（每个智能体独立）
        self.action_spaces = [
            spaces.Box(low=Remin[i], high=Remax[i], shape=(1,), dtype=np.float32)
            for i in range(self.num_agents)
        ]

        # 局部观测空间（每个智能体独立）
        self.observation_spaces = [
            spaces.Box(low=-np.inf, high=np.inf, shape=(5,), dtype=np.float32)  # Qr_i, Storage_i, time, Re_1, R e_3（下游依赖）
            for _ in range(self.num_agents)
        ]

        # 全局状态空间（供Critic使用）
        self.global_state_space = spaces.Box(
            low=-np.inf, high=np.inf,
            shape=(self.num_agents*3 + 1,),  # [Qr1-4, Storage1-4, Re1-4, time]
            dtype=np.float32
        )

        self.reset()

    def prepare_global_state(self):
        return np.concatenate([
            self.state['Qr'],
            self.state['Storage'],
            self.last_actions,
            [self.state['time']]
        ], dtype=np.float32)

    def get_agent_obs(self, agent_id):
        # """生成5维观测：Qr_i, Storage_i, time, 上游动作1, 上游动作2"""
        # 获取上游水库的放水量（需根据实际拓扑关系定义）
        upstream_actions = []
        
        # 定义每个智能体的上游依赖关系
        if agent_id == 2:  # 水库3接收水库2的流量（agent_id=1）
            upstream_actions.append(self.last_actions[1])
        elif agent_id == 3:  # 水库4接收水库1（agent_id=0）和水库3（agent_id=2）的流量
            upstream_actions.append(self.last_actions[0])  # 水库1的放水量
            upstream_actions.append(self.last_actions[2])  # 水库3的放水量
        
        # 补全上游动作到固定长度2（没有上游时填充0）
        upstream_actions += [0.0] * (2 - len(upstream_actions))
        
        # 构建5维观测数组
        obs = np.array([
            self.state['Qr'][agent_id],     # 当前水库的来水流量
            self.state['Storage'][agent_id],# 当前水库的库容
            self.state['time'],             # 当前时间步
            upstream_actions[0],            # 第一个上游水库的放水量
            upstream_actions[1]             # 第二个上游水库的放水量
        ], dtype=np.float32)
        
        # 可选：添加维度检查（调试时启用）
        assert len(obs) == 5, f"观测维度错误：Agent {agent_id} 的观测维度为 {len(obs)}"
        return obs

    def reset(self):
        self.state = {
            'Qr': self.Qr.copy(),
            'Storage': self.Sinitial.copy().astype(np.float64),
            'time': 1
        }
        self.last_actions = np.zeros(self.num_agents)
        return [self.get_agent_obs(i) for i in range(self.num_agents)]

    def step(self, actions):
        # 1. 按水库顺序更新（1→2→3→4）
        # 确保每个动作是标量
        actions = np.array([a[0] if isinstance(a, (np.ndarray, list)) else a for a in actions])
        Re = np.clip(actions, self.Remin, self.Remax)
        self.last_actions = Re.flatten()

        # 2. 顺序更新库容（反映上下游依赖）
        new_storage = self.state['Storage'].copy()
        
        new_storage[0] += self.state['Qr'][0] - Re[0]
        new_storage[1] += self.state['Qr'][1] - Re[1]
        new_storage[2] += self.state['Qr'][2] - Re[2] + Re[1]  # 水库3接收水库2
        new_storage[3] += self.state['Qr'][3] - Re[3] + Re[0] + Re[2]  # 水库4接收水库1和3
        self.state['Storage'] = new_storage.copy() 
        # self.state['Storage'] = np.clip(new_storage, self.Smin, self.Smax)
        # 3. 计算奖励（独立奖励+全局奖励）
        t = self.state['time'] - 1
        rewards = []
        for i in range(self.num_agents):
            # 独立奖励
            local_reward = self.benefit[i, t] * Re[i]
            # 惩罚项（仅自身）
            punish = (
                max(Re[i]-self.Remax[i], 0) + max(self.Remin[i]-Re[i], 0) +
                max(self.state['Storage'][i]-self.Smax[i], 0) + 
                max(self.Smin[i]-self.state['Storage'][i], 0)
            )
            if self.state['time'] == 12:
                punish += abs(self.Starget[i] - self.state['Storage'][i])
            rewards.append(local_reward - punish * self.punish_rate)

        # 全局奖励（原逻辑）
        global_reward = sum(rewards)  # 可以额外添加全局惩罚项

        # 4. 更新时间
        self.state['time'] += 1
        done = self.state['time'] > 12

        return (
            [self.get_agent_obs(i) for i in range(self.num_agents)],
            rewards,
            [done]*self.num_agents,
            {'global_reward': global_reward}
        )

# 使用示例
