import torch
import torch.optim as optim
import numpy as np


from network_V4_OURS import ConditionedAgentNetwork, MultiObjectiveMixingNetwork
from environmen_V10 import ElectricBusChargingEnv
import torch.nn as nn
import time
from collections import deque
import os
import pandas as pd
import torch.nn.functional as F


torch.autograd.set_detect_anomaly(True)
'''
#*****************************************************************************    更 改 后 的 MOMIX 代 码 （对训练机制进行修改 增加了分段权重调整和优先经验回放机制 ******************************************************************************************************************************
'''
# Check if a GPU is available and if so, use it
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


class ExplorationGuide:
    def __init__(self, num_subspaces, sampling_interval=0.025):
        """
        初始化探索向导类。

        参数：
        num_subspaces (int): 子空间的数量，定义探索的维度数。
        sampling_interval (float): 采样间隔，用于创建一系列子空间。默认值为0.025。
        """
        self.num_subspaces = num_subspaces
        self.sampling_interval = sampling_interval
        # 创建一组子空间，范围为 [i * 采样间隔, 1 - i * 采样间隔]
        self.subspaces = [(i * sampling_interval, 1 - i * sampling_interval) for i in
                          range(int(1 / sampling_interval) + 1)]
        # 初始化子空间的概率，每个子空间的概率均匀分布
        self.probabilities = np.ones(len(self.subspaces)) / len(self.subspaces)

    def update_probabilities(self, non_dominated_set):
        """
        更新子空间的采样概率，根据当前的非支配解集来调整。

        参数：
        non_dominated_set (list): 非支配解的列表，包含每个解的偏好信息。
        """
        # 初始化一个计数数组，用于统计每个子空间的解的数量
        solution_count = np.zeros(len(self.subspaces))
        for solution in non_dominated_set:
            # 计算当前解到每个子空间的距离
            distances = [np.linalg.norm(np.array(solution['pref']) - np.array(omega)) for omega in self.subspaces]
            # 找到距离最小的子空间索引
            closest_index = np.argmin(distances)
            # 增加该子空间对应的解的数量
            solution_count[closest_index] += 1

        # 计算每个子空间的反密度，避免被非支配解过多覆盖
        # 使用 (solution_count + 1) 来防止除零的情况
        inverse_density = 1 / (solution_count + 1)
        # 将反密度归一化，得到新的概率分布
        self.probabilities = inverse_density / inverse_density.sum()

    def sample_preference(self):
        """
        从子空间中根据概率分布采样偏好。

        返回：
        tuple: 采样得到的子空间范围。
        """
        # 根据当前的概率分布随机选择一个子空间的索引
        index = np.random.choice(len(self.subspaces), p=self.probabilities)
        sampled_range = self.subspaces[index]


        # 返回对应索引的子空间
        return sampled_range


# 示例使用：
# exploration_guide = ExplorationGuide(num_subspaces=10)
# non_dominated_set = [{'pref': (0.3, 0.7)}, {'pref': (0.4, 0.6)}]
# exploration_guide.update_probabilities(non_dominated_set)
# preference = exploration_guide.sample_preference()
# print(preference)


class M3RL:
    def __init__(self, env, max_size, num_agents, num_objectives, global_obs_dim, bus_obs_dim, storge_obs_dim,
                 history_action_dim, bus_action_dim, storge_action_dim, hidden_dim, lr, batch_size):
        """
        初始化 MOMIX 类，用于多目标强化学习中的代理决策。

        参数：
        env: 环境对象，提供环境信息和接口。 size: 5
        max_size (int): 经验回放缓冲区的最大容量。size: 10000
        num_agents (int): 代理的数量，包括存储代理和公交代理。size: 25
        num_objectives (int): 多目标的数量。size: 2
        global_obs_dim (int): 全局观察维度。size: 151= 5+6*24+2
        bus_obs_dim (int): 公交代理的观察维度。size: 6
        storge_obs_dim (int): 存储代理的观察维度。size: 2
        history_action_dim (int): 历史动作的维度。size: 1
        bus_action_dim (int): 公交代理的动作维度。size: 4
        storge_action_dim (int): 存储代理的动作维度。size: 2
        hidden_dim (int): 神经网络的隐藏层维度。size: 128
        lr (float): 学习率。size: 0.001
        batch_size (int): 每次训练时的批量大小。size: 16
        """
        self.env = env
        self.num_lines = env.num_lines  # 环境中线路的数量
        self.num_bus_agents = num_agents - 1  # 公交代理的数量，减去一个表示存储代理
        self.num_objectives = num_objectives  # 多目标数量
        self.hidden_dim = hidden_dim  # 隐藏层维度
        self.bus_action_dim = bus_action_dim

        # 创建公交代理的条件网络，每条线路对应一个代理网络，使用辅助函数创建以减少冗余
        self.can_networks_bus = self._create_networks(self.num_lines, bus_obs_dim, history_action_dim, bus_action_dim,
                                                      hidden_dim, num_objectives, batch_size)

        # 创建存储代理的条件网络
        self.can_networks_storge = ConditionedAgentNetwork(
            storge_obs_dim, history_action_dim, storge_action_dim, hidden_dim, num_objectives, batch_size
        ).to(device)

        # 创建 MOM 网络，用于多目标混合
        self.mom_network = MultiObjectiveMixingNetwork(num_agents, num_objectives, global_obs_dim, hidden_dim).to(
            device)

        # 创建目标网络，用于稳定训练过程，使用辅助函数创建以减少冗余
        self.target_networks_bus = self._create_networks(self.num_lines, bus_obs_dim, history_action_dim,
                                                         bus_action_dim, hidden_dim, num_objectives, batch_size)

        self.target_networks_storge = ConditionedAgentNetwork(
            storge_obs_dim, history_action_dim, storge_action_dim, hidden_dim, num_objectives, batch_size
        ).to(device)

        # 优化器列表，包括公交代理网络、存储代理网络和 MOM 网络的优化器
        # 初始化优化器
        self.optimizer = optim.Adam(
            list(self.mom_network.parameters()) +
            list(self.can_networks_storge.parameters()) +
            [p for net in self.can_networks_bus for p in net.parameters()],
            lr=lr  # 保持与之前一致或稍作调整
        )

        # new_lr = lr # 学习率降低到原来的 10%
        # for param_group in self.optimizer.param_groups:
        #     param_group['lr'] = new_lr


        # 初始化探索引导器，用于探索子空间的采样 这一步可能浪费时间
        self.exploration_guide = ExplorationGuide(num_subspaces=2)
        self.mae_loss = nn.L1Loss()
        # 创建经验回放缓冲区，用于存储经验
        # self.replay_buffer = deque(maxlen=max_size)
        self.extreme_replay_buffer = deque(maxlen=3000)
        self.normal_replay_buffer = deque(maxlen=7000)
        # 定义训练阶段，初始设为 "normal"
        self.training_phase = "normal"
        # self.training_phase = "mixed"
        # 存储每个经验的优先级，用于优先级经验回放
        self.max_size = max_size

        # 初始化非支配解集，存储最优的非支配解
        self.non_dominated_set = deque(maxlen=10000)
        # 从环境中获取极端列表，用于评估多目标
        # self.extrame_list = env.extrame_list
        # 折扣因子，用于计算未来奖励的折现值
        self.discount_facto = 0.99
        # 更新频率，用于控制目标网络更新的频率
        self.update_frequency = 10
        # 批量大小
        self.batch_size = batch_size
        # 惩罚项系数，用于目标函数中对特定行为进行惩罚
        self.penalty_term = 0.1

    # 加载已保存的模型权重

    def _create_networks(self, num_networks, obs_dim, history_action_dim, action_dim, hidden_dim, num_objectives,
                         batch_size):
        """
        辅助函数，用于创建多个条件代理网络，减少代码冗余。

        参数：
        num_networks (int): 要创建的网络数量。
        obs_dim (int): 观察维度。
        history_action_dim (int): 历史动作的维度。
        action_dim (int): 动作维度。
        hidden_dim (int): 隐藏层维度。
        num_objectives (int): 多目标数量。
        batch_size (int): 批量大小。

        返回：
        一个包含创建的网络的列表。
        """
        return [ConditionedAgentNetwork(
            obs_dim, history_action_dim, action_dim, hidden_dim, num_objectives, batch_size
        ).to(device) for _ in range(num_networks)]

    def store_experience(self, obs, bus_hidden_states, storge_hidden_state, pre_bus_action, pre_storge_action,
                         bus_actions, storge_action, reward1, reward2, next_obs, bus_new_hidden_states,
                         storge_new_hidden_state, pref, step_count1, date=None, is_extreme=False, violate_constraint=0):
        # 创建经验元组，现在包含日期和极端天气字段
        experience = (
            obs, bus_hidden_states, storge_hidden_state, pre_bus_action, pre_storge_action, bus_actions, storge_action,
            reward1, reward2, next_obs, bus_new_hidden_states, storge_new_hidden_state, pref, step_count1,date, is_extreme, violate_constraint
        )

        # 存储经验到对应的经验池，超出 buffer_size 时移除旧数据
        if is_extreme:
            self.extreme_replay_buffer.append(experience)
        else:
            self.normal_replay_buffer.append(experience)
            # print("开始测定h1", len(self.normal_replay_buffer))


    def sample_experiences(self, batch_size, extreme_ratio_normal=0.1, extreme_ratio_extreme=0.5, task_type="normal"):
        """
        根据任务头类型和训练阶段从经验池中采样。

        参数：
        - batch_size: 采样的批次大小。
        - extreme_ratio_normal: 日常任务头在混合阶段的极端数据比例。
        - extreme_ratio_extreme: 极端任务头在混合阶段的极端数据比例。
        - task_type: 当前任务头类型，"normal"表示日常任务头，"extreme"表示极端任务头。

        返回：采样的经验批次。
        """

        # 确定当前任务头的极端数据比例
        if self.training_phase == "normal":
            # 初期训练阶段，任务头均使用日常数据进行训练
            normal_batch_size = batch_size
            extreme_batch_size = 0
        else:
            # 混合训练阶段，根据任务头类型设置不同的极端数据比例
            if task_type == "normal":
                extreme_batch_size = int(batch_size * extreme_ratio_normal)
            else:
                extreme_batch_size = int(batch_size * extreme_ratio_extreme)

            normal_batch_size = batch_size - extreme_batch_size

        # 采样日常和极端数据
        normal_batch, extreme_batch = [], []

        # 如果需要采样日常数据，确保缓冲区非空
        if normal_batch_size > 0:
            # print("开始测试", len(self.normal_replay_buffer))
            if len(self.normal_replay_buffer) < normal_batch_size:
                print("Not enough samples in normal replay buffer.")
                normal_batch_size = 0
                extreme_batch_size = batch_size - normal_batch_size

            normal_indices = np.random.choice(len(self.normal_replay_buffer), normal_batch_size, replace=False)
            normal_batch = [self.normal_replay_buffer[i] for i in normal_indices]


        # 如果需要采样极端数据，确保缓冲区非空
        if extreme_batch_size > 0:
            if len(self.extreme_replay_buffer) < extreme_batch_size:
                print("Not enough samples in extreme replay buffer.")
                extreme_batch_size = 0
                normal_batch_size = batch_size - extreme_batch_size
                normal_indices = np.random.choice(len(self.normal_replay_buffer), normal_batch_size, replace=False)
                normal_batch = [self.normal_replay_buffer[i] for i in normal_indices]
            extreme_indices = np.random.choice(len(self.extreme_replay_buffer), extreme_batch_size, replace=False)
            extreme_batch = [self.extreme_replay_buffer[i] for i in extreme_indices]

        # 合并采样结果
        batch = normal_batch + extreme_batch

        # 提取经验的各个组成部分
        obs_batch, bus_hidden_states_batch, storge_hidden_states_batch, pre_bus_action_batch = [], [], [], []
        pre_storge_action_batch, bus_actions_batch, storge_actions_batch = [], [], []
        rewards_batch, next_obs_batch, bus_new_hidden_states_batch, storge_new_hidden_state_batch = [], [], [], []
        pref_batch, date_batch, step_count1_batch, is_extreme_batch, violate_constraint_batch = [], [], [], [], []

        for experience in batch:
            (obs, bus_hidden, storge_hidden, pre_bus_action, pre_storge_action, bus_action, storge_action,
             reward1, reward2, next_obs, bus_new_hidden, storge_new_hidden, pref, step_count1, date, is_extreme,
             violate_constraint) = experience

            bus_hidden_change = torch.cat(bus_hidden, dim=0)
            bus_new_hidden_change = torch.cat(bus_new_hidden, dim=0)

            obs_batch.append(obs)
            bus_hidden_states_batch.append(bus_hidden_change)
            storge_hidden_states_batch.append(storge_hidden)
            pre_bus_action_batch.append(pre_bus_action)
            pre_storge_action_batch.append(pre_storge_action)
            bus_actions_batch.append(bus_action)
            storge_actions_batch.append(storge_action)
            rewards_batch.append([reward1, reward2])
            next_obs_batch.append(next_obs)
            bus_new_hidden_states_batch.append(bus_new_hidden_change)
            storge_new_hidden_state_batch.append(storge_new_hidden)
            pref_batch.append(pref)
            date_batch.append(date)
            step_count1_batch.append(step_count1)
            is_extreme_batch.append(is_extreme)
            violate_constraint_batch.append(violate_constraint)

        # 将张量化的批次数据返回
        return (
            obs_batch,
            torch.stack(bus_hidden_states_batch),
            torch.stack(storge_hidden_states_batch),
            pre_bus_action_batch,
            pre_storge_action_batch,
            bus_actions_batch,
            storge_actions_batch,
            torch.tensor(rewards_batch, dtype=torch.float32),
            next_obs_batch,
            torch.stack(bus_new_hidden_states_batch),
            torch.stack(storge_new_hidden_state_batch),
            torch.tensor(pref_batch, dtype=torch.float32),
            torch.tensor(date_batch, dtype=torch.float32),
            torch.tensor(step_count1_batch, dtype=torch.float32),
            torch.tensor(is_extreme_batch, dtype=torch.float32),
            torch.tensor(violate_constraint_batch, dtype=torch.float32)
        )

    def update_non_dominated_set_and_probabilities(self, is_extreme):
        # 将回放缓冲区合并，并分批处理
        if is_extreme:
            latest_experience = list(self.extreme_replay_buffer)[-1440:]
        else:
            latest_experience = list(self.normal_replay_buffer)[-1440:]
        start_time20 = time.time()
        # 分批次更新非支配集合
        # print("未更新前非支配解集的长度是", len(self.non_dominated_set))
        for experience in latest_experience:
                obs, _, _, _, _, _, _, reward1, reward2, _, _, _, pref,_, _, _, _ = experience
                reward = np.array([reward1, reward2])
                new_solution = {'pref': pref, 'obs': obs, 'objectives': reward}
                self.update_non_dominated_set(new_solution)
        # print("更新后非支配解集的长度是", len(self.non_dominated_set))

        end_time20 = time.time()
        # print("更新非支配解共用时", end_time20 - start_time20)
        # 更新采样概率
        start_time21 = time.time()
        self.exploration_guide.update_probabilities(self.non_dominated_set)
        end_time21 = time.time()
        # print("更新偏好权重用时", end_time21 - start_time21)

    def update_non_dominated_set(self, new_solution):
        dominated = False
        to_remove = []

        # 遍历非支配解集合中的每个解
        for solution in self.non_dominated_set:
            current_objectives = solution['objectives']
            new_objectives = new_solution['objectives']

            # 检查支配关系
            if new_solution['obs'] == solution['obs']:
                if np.all(new_objectives >= current_objectives) and np.any(new_objectives > current_objectives):
                    # 如果 new_solution 支配 solution
                    to_remove.append(solution)
                elif np.all(current_objectives >= new_objectives) and np.any(current_objectives > new_objectives):
                    # 如果 solution 支配 new_solution
                    dominated = True
                    break

        # 如果新解没有被支配
        if not dominated:
            # 移除所有被 new_solution 支配的解
            for sol in to_remove:
                self.non_dominated_set.remove(sol)

            # 检查 non_dominated_set 中是否已存在等同于 new_solution 的解
            already_exists = any(
                np.array_equal(new_solution['objectives'], sol['objectives']) and new_solution['obs'] == sol['obs']
                for sol in self.non_dominated_set
            )

            # 如果不存在，则添加 new_solution
            if not already_exists:
                self.non_dominated_set.append(new_solution)

    def update_target_networks(self):
        # 同步目标网络
        for i in range(3):
            self.target_networks_bus[i].load_state_dict(self.can_networks_bus[i].state_dict())
        self.target_networks_storge.load_state_dict(self.can_networks_storge.state_dict())

    def load_saved_weights(self):
        """
        加载所有网络和优化器的状态字典
        """
        try:
            # 加载公交代理网络的权重
            for i, can_network in enumerate(self.can_networks_bus):
                saved_path = f'can_network_bus_{i}.pth'
                print(f"Loading {saved_path}")
                can_network.load_state_dict(torch.load(saved_path))

            # 加载存储代理网络的权重
            saved_path = f'can_network_storge.pth'
            print(f"Loading {saved_path}")
            self.can_networks_storge.load_state_dict(torch.load(saved_path))

            # 加载 MOM 网络的权重
            saved_path = f'mom_network.pth'
            print(f"Loading {saved_path}")
            self.mom_network.load_state_dict(torch.load(saved_path))

            # 加载公交目标网络权重
            for i, target_network in enumerate(self.target_networks_bus):
                saved_path = f'target_network_bus_{i}.pth'
                print(f"Loading {saved_path}")
                target_network.load_state_dict(torch.load(saved_path))

            saved_path = f'target_network_storge.pth'
            print(f"Loading {saved_path}")
            self.target_networks_storge.load_state_dict(torch.load(saved_path))

            # 加载优化器的状态
            optimizer_saved_path = 'optimizer_state_dict.pth'
            print(f"Loading optimizer state from {optimizer_saved_path}")
            self.optimizer.load_state_dict(torch.load(optimizer_saved_path))

            print("All weights and optimizer state loaded successfully.")
        except FileNotFoundError as e:
            print(f"Error: Some files not found. {e}")
        except Exception as ex:
            print(f"An unexpected error occurred: {ex}")


    def train(self, num_episodes, batch_size):
        self.batch_size = batch_size

        loss_data = []
        reward1_data = []
        reward2_data = []
        reward3_data = []
        reward4_data = []
        all_reward1 = []
        all_reward2 = []
        charge_fee_night_data = []
        pv_self_use = []
        # 初始化极端数据的采样比例
        epsilon_start = 1.0
        epsilon_end = 0.01
        # 假设每个episode有固定的步数，这里设为10步

        self.load_saved_weights()

        # 初始化极端数据的采样比例
        # extreme_ratio_normal = 0.1  # 初期日常任务头的极端数据比例
        # extreme_ratio_extreme = 0.5  # 初期极端任务头的极端数据比例
        extreme_ratio_normal = 0.2  # 初期日常任务头的极端数据比例
        extreme_ratio_extreme = 0.8  # 初期极端任务头的极端数据比例

        for episode in range(num_episodes):
            epsilon = epsilon_start - (epsilon_start - epsilon_end) * (episode / num_episodes)
            pv_supply_daily, pv_power_daily, storge_charge_daily = 0, 0,  0
            start_time = time.time()
            print(f"第 {episode + 1} 轮开始运行: {start_time} 秒")
            obs, ini_action, current_date, is_extreme = self.env.reset(episode)
            print("训练日期", current_date, "是否是恶劣天气", is_extreme)
            date = torch.tensor([current_date.year, current_date.month, current_date.day],
                                dtype=torch.float32).view(1, 3)  # 年、月、日
            date = date.mean(dim=-1).view(1, 1)  # 将年、月、日合并为一个值 (1, 1)
            date_change = date.repeat(self.batch_size, 1)  # 将其变为 (32, 1) 的张量
            _, buses_tensor, storage_tensor, global_state = self.env.state_to_tensor(obs)
            buses_tensor = buses_tensor.to(device)
            storage_tensor = storage_tensor.to(device)
            pref = self.exploration_guide.sample_preference()
            print("pref2", pref)
            pref_tensor = torch.tensor(pref, dtype=torch.float32).to(device)

            ini_bus_hidden_state = [None] * self.num_bus_agents
            ini_storge_hidden_state = None
            done = False
            step_count = 0
            charging_fee_night = 0
            reward1_text, reward2_text, reward3_text, reward4_text = [], [], [], []
            all_reward1_text, all_reward2_text = [], []
            loss1 = []
            date_tensor = date_change.to(device)
            violate_constraint_input = 0

            # 动态阶段控制  秋季的数据在前面，秋季的数据比较稳定，并且恶劣天气比较少 先学习一下共用经验
            # if self.training_phase == "normal" and episode >= 50:
            #     self.training_phase = "mixed"
            #     print("切换到混合训练阶段")

            self.training_phase = "mixed"
            print("切换到混合训练阶段")

            if is_extreme:
                task_type = "extreme"
            else:
                task_type = "normal"

            '''00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'''
            total_energy_consumption = sum(trip_info.get('energy_consumption', 0)
                                           for route in env.trip_weather_info
                                           for trip_info in env.trip_weather_info[route])
            print("训练日期", current_date, "总耗能", total_energy_consumption)


            while not done:
                start_time1 = time.time()
                bus_actions = []
                bus_hidden_states = []
                violate_constraint_input = torch.tensor(violate_constraint_input, dtype=torch.float32).to(device)

                # 计算每个公交代理的动作
                for agent_idx in range(self.num_bus_agents):
                    buses_tensor_change = buses_tensor[agent_idx].expand(self.batch_size, 6).to(device)
                    action_change = torch.tensor([ini_action[0][agent_idx]], dtype=torch.float32).expand(self.batch_size, 1).to(device)
                    pref_tensor_change = pref_tensor.expand(self.batch_size, 2)
                    date_tensor = date_tensor.expand(self.batch_size, 1)
                    step_count1 = torch.tensor(step_count).expand(self.batch_size, 1).to(device)
                    violate_constraint_input = violate_constraint_input.expand(self.batch_size, 1)
                    # 选择任务头（假设 bus_agents 被分配到特定的任务头）
                    can_network = self.can_networks_bus[min(agent_idx // 8, 2)]
                    q_values, bus_hidden_state = can_network(True, date_tensor, buses_tensor_change,
                                                             action_change, pref_tensor_change,
                                                             violate_constraint_input,  step_count1,
                                                             ini_bus_hidden_state[agent_idx],
                                                             task_type=task_type)

                    q_values = q_values[0]
                    expanded_pref_tensor = pref_tensor.unsqueeze(1).expand_as(q_values)
                    weighted_q_value = (q_values * expanded_pref_tensor).sum(dim=0)
                    # if np.random.rand() < epsilon:
                    #     # 以epsilon的概率随机选择一个动作
                    #     action = np.random.choice(self.bus_action_dim)
                    # else:
                    #     action = weighted_q_value.argmax().item()
                    action = weighted_q_value.argmax().item()
                    bus_actions.append(action)
                    bus_hidden_states.append(bus_hidden_state)

                storge_action_change = torch.tensor([ini_action[1]], dtype=torch.float32).expand(self.batch_size, 1).to(device)
                pref_tensor_change = pref_tensor.expand(self.batch_size, 2)
                date_tensor = date_tensor.expand(self.batch_size, 1)
                violate_constraint_input = violate_constraint_input.expand(self.batch_size, 1)

                storge_q_values, storge_hidden_state = self.can_networks_storge(True, date_tensor,
                                                                                storage_tensor.expand(self.batch_size, 2),
                                                                                storge_action_change,
                                                                                pref_tensor_change,
                                                                                violate_constraint_input,step_count1,
                                                                                ini_storge_hidden_state,
                                                                                task_type=task_type)  # 传入任务头类型
                storge_q_values = storge_q_values[0]
                expanded_pref_tensor = pref_tensor.unsqueeze(1).expand_as(storge_q_values)
                storge_weighted_q_value = (storge_q_values * expanded_pref_tensor).sum(dim=0)
                storge_action = storge_weighted_q_value.argmax().item()

                (next_obs, reward1, reward2, reward_3, reward_1, reward_2, done, charging_fee, fina_bus_actions,
                 fina_storge_action, violate_constraint, pv_supply, storge_charge_energy, pv_power, reward_4) = self.env.step(obs, ini_action, bus_actions, storge_action, episode)

                reward1_text.append(reward1)
                reward2_text.append(reward2)
                reward3_text.append(reward_3)
                reward4_text.append(reward_4)
                all_reward1_text.append(reward1)
                all_reward2_text.append(reward2)

                pv_supply_daily += pv_supply
                pv_power_daily += pv_power
                storge_charge_daily += storge_charge_energy

                if ini_storge_hidden_state is not None:
                    # print("开始测试3 存储了经验")
                    self.store_experience(obs, ini_bus_hidden_state, ini_storge_hidden_state, ini_action[0], ini_action[1],
                                          fina_bus_actions, fina_storge_action, reward_1, reward_2,
                                          next_obs, bus_hidden_states, storge_hidden_state, pref, step_count, date, is_extreme, violate_constraint)

                obs = next_obs
                _, buses_tensor, storage_tensor, global_state = self.env.state_to_tensor(obs)
                buses_tensor = buses_tensor.to(device)
                storage_tensor = storage_tensor.to(device)
                ini_action = (fina_bus_actions, fina_storge_action)
                ini_bus_hidden_state = bus_hidden_states
                ini_storge_hidden_state = storge_hidden_state
                # print("ini_storge_hidden_state", ini_storge_hidden_state)
                violate_constraint_input = violate_constraint

                step_count += 1

                dynamic_interval = min(720, 240 + 10 * episode) # 动态调整更新的间隔  这是为了模型稳定后不做过多的训练 但是感觉这个值可以再调整一下的
                # dynamic_interval = 360  # 动态调整更新的间隔  这是为了模型稳定后不做过多的训练 但是感觉这个值可以再调整一下的
                if len(list(self.extreme_replay_buffer) + list(self.normal_replay_buffer)) > self.batch_size and step_count % dynamic_interval == 0:

                    # if len(self.replay_buffer) > batch_size and step_count % 5 == 0:
                    start_time2 = time.time()
                    # 这里task_type是谁就是训练谁的网络 相当于极端任务头没5代训练一次，用自己的数据多
                    task_type1 = "normal" if step_count / dynamic_interval == 4 else "extreme"
                    loss2 = self.update(False, extreme_ratio_normal, extreme_ratio_extreme, batch_size, task_type1)
                    loss1.append(loss2)
                    end_time2 = time.time()
                    execution_time2 = end_time2 - start_time2
                    print(f"第 {step_count / dynamic_interval} 次更新阶段的运行时间: {execution_time2} 秒")

                if done:
                    charging_fee_night = charging_fee

            start_time3 = time.time()

            if episode % self.update_frequency == 0:
                self.update_target_networks()
                end_time3 = time.time()
                execution_time3 = end_time3 - start_time3
                print(f"第 {episode / self.update_frequency} 次更新目标网络的运行时间: {execution_time3} 秒")

            start_time4 = time.time()

            buffer_length = len(list(self.extreme_replay_buffer) + list(self.normal_replay_buffer))
            print("经验池总长度是", buffer_length)
            self.update_non_dominated_set_and_probabilities(is_extreme)
            end_time4 = time.time()
            execution_time4 = end_time4 - start_time4
            print(f"第 {episode + 1} 次更新非支配解集和采样概率的运行时间: {execution_time4} 秒")

            # # 动态调整极端数据采样比例
            # if self.training_phase == "mixed" and episode % 5 == 0:
            #     extreme_ratio_normal = min(extreme_ratio_normal + 0.05, 0.2)
            #     extreme_ratio_extreme = min(extreme_ratio_extreme + 0.1, 0.8)

            loss3 = np.mean(loss1) if loss1 else 0
            reward1_all = np.sum(reward1_text)
            reward2_all = np.sum(reward2_text)
            reward3_all = np.sum(reward3_text)
            reward4_all = np.sum(reward4_text)
            reward11_all = np.sum(all_reward1_text)
            reward22_all = np.sum(all_reward2_text) #
            end_time = time.time()
            execution_time = end_time - start_time
            print(f"第 {episode + 1} 轮的运行时间: {execution_time} 秒")
            print(f"第 {episode + 1} 轮, 平均损失: {loss3}, 总奖励1（日间充电费用）: {reward1_all}, 总奖励2（日间充电碳排放）: {reward2_all}, "
                  f"惩罚函数3： {reward3_all}, 夜间充电成本：{charging_fee_night}, "
                  f"公交车所用光伏数：{pv_supply_daily- storge_charge_daily }, 储能光伏所用数：{storge_charge_daily}, 光伏发电量：{pv_power_daily}光伏自用率：{pv_supply_daily   / pv_power_daily}")
            loss_data.append(loss3)
            reward1_data.append(reward1_all)
            reward2_data.append(reward2_all)
            reward3_data.append(reward3_all)
            reward4_data.append(reward4_all)
            all_reward1.append(reward11_all)
            all_reward2.append(reward22_all)
            charge_fee_night_data.append(charging_fee_night)
            pv_self_use.append((pv_supply_daily +storge_charge_daily)  / pv_power_daily)

        print("训练结束，开始保存网络参数")
            # 保存训练结果

        for i, can_network in enumerate(self.can_networks_bus):
            torch.save(can_network.state_dict(), f'can_network_bus_{i}.pth')

        torch.save(self.can_networks_storge.state_dict(), 'can_network_storge.pth')
        torch.save(self.mom_network.state_dict(), 'mom_network.pth')

        for i, target_network in enumerate(self.target_networks_bus):
            torch.save(target_network.state_dict(), f'target_network_bus_{i}.pth')

        torch.save(self.target_networks_storge.state_dict(), 'target_network_storge.pth')
        torch.save(self.optimizer.state_dict(), 'optimizer_state_dict.pth')

        print("参数保存成功")

        self.save_training_results(loss_data, reward1_data, reward2_data, reward3_data,  reward4_data, all_reward1, all_reward2, charge_fee_night_data, pv_self_use)

        return loss_data, reward1_data, reward2_data, reward3_data, charge_fee_night_data

    def save_training_results(self, loss_data, reward1_data, reward2_data, reward3_data, reward4_data, all_reward1, all_reward2, charge_fee_night_data, pv_self_use):
        # 创建保存结果的文件夹
        output_folder = 'training_results_go_on'
        if not os.path.exists(output_folder):
            os.makedirs(output_folder)

        # 将数据组织为字典
        data = {
            'Episode': list(range(1, len(loss_data) + 1)),
            'Loss': loss_data,
            'Reward1': reward1_data,
            'Reward2': reward2_data,
            'Reward3': reward3_data,
            'Reward4': reward4_data,
            'Reward1_all': all_reward1,
            'Reward2_all': all_reward2,
            'Charge_fee_night': charge_fee_night_data,
            "pv_self_use": pv_self_use
        }

        # 转换为 DataFrame 并保存为 CSV
        df = pd.DataFrame(data)
        csv_file_path = os.path.join(output_folder, 'training_results.csv')
        df.to_csv(csv_file_path, index=False)

        print(f'Training results saved to {csv_file_path}')

    def update(self, u, extreme_ratio_normal, extreme_ratio_extreme, batch_size, task_type="normal"):
        # 从经验回放池中采样经验，包括优先级的采样权重
        obs_batch, bus_hidden_states_batch, storge_hidden_states_batch, pre_bus_action_batch, pre_storge_action_batch, \
            bus_actions_batch, storge_actions_batch, rewards_batch, next_obs_batch, \
            bus_new_hidden_state_batch, storge_new_hidden_state_batch, pref_batch, step_batch,\
            date_batch, is_extreme_batch,  violate_constraint_batch = self.sample_experiences(batch_size, extreme_ratio_normal, extreme_ratio_extreme, task_type)

                # 将采样的偏好向量、奖励和权重转移到合适的设备
        rewards_batch = rewards_batch.to(device)
        pref_batch = pref_batch.to(device)
        date_batch = date_batch.to(device)
        step_batch = step_batch.to(device)
        violate_constraint_batch = violate_constraint_batch.to(device)

        environment_tensor_batch, buses_tensor_batch, storage_tensor_batch, global_state_batch = self.env.state_to_tensor(
            obs_batch)
        buses_tensor_batch = buses_tensor_batch.to(device)
        storage_tensor_batch = storage_tensor_batch.to(device)
        global_state_batch = global_state_batch.to(device)

        environment_tensor_target_batch, buses_tensor_target_batch, storage_tensor_target_batch, global_state_target_batch = self.env.state_to_tensor(
            next_obs_batch)
        buses_tensor_target_batch = buses_tensor_target_batch.to(device)
        storage_tensor_target_batch = storage_tensor_target_batch.to(device)
        global_state_target_batch = global_state_target_batch.to(device)

        # 将 pre_bus_action_batch 和 pre_storge_action_batch 转换为张量，避免每次循环重复创建
        pre_bus_action_batch_tensor = torch.tensor(pre_bus_action_batch).to(device)
        bus_actions_batch_tensor = torch.tensor(bus_actions_batch).to(device)
        pre_storge_action_batch_tensor = torch.tensor(pre_storge_action_batch).to(device)
        storge_actions_batch_tensor = torch.tensor(storge_actions_batch).to(device)

        q_values_batch = []
        q_values_target_batch = []
        bus_q_values_batch = []
        # 更新每个公交智能体
        for agent_idx in range(self.num_bus_agents):
            # 计算当前 Q 值
            batch_action_change = pre_bus_action_batch_tensor[:, agent_idx].unsqueeze(1)
            date_batch = date_batch.squeeze().unsqueeze(1)
            step_batch = step_batch.squeeze().unsqueeze(1)
            violate_constraint_batch = violate_constraint_batch.squeeze().unsqueeze(1)
            # batch_hidden_change = bus_hidden_states_batch[agent_idx].unsqueeze(0)

            # 根据智能体索引选择适当的网络
            can_network = self.can_networks_bus[(agent_idx // 8) if agent_idx <= 16 else 2]
            q_values_ini, _ = can_network(u, date_batch, buses_tensor_batch[:, agent_idx, :], batch_action_change,
                                          pref_batch, step_batch, violate_constraint_batch, bus_hidden_states_batch[0][agent_idx].unsqueeze(0), task_type=task_type )

            action_batch_temporary = bus_actions_batch_tensor[:, agent_idx].unsqueeze(1).unsqueeze(2).repeat(1, 2, 1)
            q_values = q_values_ini.gather(2, action_batch_temporary).squeeze(2)
            q_values_batch.append(q_values)
            bus_q_values_batch.append(q_values_ini)

            # 计算下一个状态的最大 Q 值
            next_batch_action_change = bus_actions_batch_tensor[:, agent_idx].unsqueeze(1)
            # next_batch_hidden_change = bus_new_hidden_state_batch[agent_idx].unsqueeze(0)

            next_q_values_w_1, _ = can_network(u, date_batch, buses_tensor_target_batch[:, agent_idx, :],
                                                  next_batch_action_change, pref_batch,step_batch, violate_constraint_batch,
                                               bus_hidden_states_batch[0][agent_idx].unsqueeze(0), task_type=task_type)

            q_values_transposed = next_q_values_w_1.permute(0, 2, 1)  # Shape: (batch_size, action_dim, num_objectives)
            weighted_q_values = torch.matmul(q_values_transposed, pref_batch.unsqueeze(2)).squeeze(2)  # Shape: (batch_size, action_dim)
            max_actions = weighted_q_values.argmax(dim=1)  # Shape: (batch_size,)
            expanded_max_actions = max_actions.unsqueeze(1).unsqueeze(2).repeat(1, next_q_values_w_1.size(1), 1)

            target_network = self.target_networks_bus[(agent_idx // 8) if agent_idx <= 16 else 2]
            next_q_values_w, _ = target_network(u, date_batch, buses_tensor_target_batch[:, agent_idx, :],
                                                next_batch_action_change, pref_batch, step_batch, violate_constraint_batch,
                                                bus_new_hidden_state_batch[0][agent_idx].unsqueeze(0), task_type=task_type)
            selected_q_values = next_q_values_w.gather(2, expanded_max_actions).squeeze(
                2)  # Shape: (batch_size, num_objectives)
            q_values_target_batch.append(selected_q_values)

        combined_bus_q_values_batch = torch.stack(bus_q_values_batch, dim=1).to(device)

        # 对于储能智能体同样需要计算
        storge_batch_action_change = pre_storge_action_batch_tensor.unsqueeze(1)
        # storge_batch_hidden_change = storge_hidden_states_batch.unsqueeze(0)

        storge_q_values_ini, _ = self.can_networks_storge(u, date_batch, storage_tensor_batch,
                                                          storge_batch_action_change, pref_batch,step_batch,
                                                          violate_constraint_batch, storge_hidden_states_batch, task_type=task_type)
        storge_action_batch_temporary = storge_actions_batch_tensor.unsqueeze(1).unsqueeze(2).repeat(1, 2, 1).to(device)
        q_values = storge_q_values_ini.gather(2, storge_action_batch_temporary).squeeze(2)
        q_values_batch.append(q_values)

        next_storge_batch_action_change = storge_actions_batch_tensor.unsqueeze(1)
        # next_storge_batch_hidden_change = storge_new_hidden_state_batch.unsqueeze(0)

        next_q_values_w_1, _ = self.can_networks_storge(u, date_batch, storage_tensor_target_batch,
                                                        next_storge_batch_action_change, pref_batch,step_batch,
                                                        violate_constraint_batch, storge_new_hidden_state_batch, task_type=task_type)
        q_values_transposed = next_q_values_w_1.permute(0, 2, 1)  # Shape: (batch_size, action_dim, num_objectives)
        weighted_q_values = torch.matmul(q_values_transposed, pref_batch.unsqueeze(2)).squeeze(
            2)  # Shape: (batch_size, action_dim)
        max_actions = weighted_q_values.argmax(dim=1)  # Shape: (batch_size,)
        expanded_max_actions = max_actions.unsqueeze(1).unsqueeze(2).repeat(1, next_q_values_w_1.size(1), 1)

        next_q_values_w, _ = self.target_networks_storge(u, date_batch, storage_tensor_target_batch,
                                                         next_storge_batch_action_change, pref_batch,step_batch,
                                                         violate_constraint_batch, storge_new_hidden_state_batch, task_type=task_type)
        selected_q_values = next_q_values_w.gather(2, expanded_max_actions).squeeze(2)
        q_values_target_batch.append(selected_q_values)

        combined_q_values_batch = torch.stack(q_values_batch, dim=1).to(device)
        combined_q_values_target_batch = torch.stack(q_values_target_batch, dim=1).to(device)

        # 计算 q_tot
        q_tot = self.mom_network(combined_q_values_batch, global_state_batch)  # (batch_size, num_objectives)

        # 计算目标 q_tot
        y_tot = self.mom_network(combined_q_values_target_batch,
                                 global_state_target_batch)  # (batch_size, num_objectives)
        td_targets = rewards_batch + self.discount_facto * y_tot  # (batch_size, num_objectives)

        # 使用重要性采样权重对 TD 误差进行加权，计算加权后的损失
        td_loss = self.adaptive_weighted_loss(q_tot, td_targets)
        td_loss = td_loss.mean()

        # 反向传播和优化
        td_loss.backward(retain_graph=True)
        self.optimizer.step()
        self.optimizer.zero_grad()

        return td_loss.item()


    def adaptive_weighted_loss(self, q_t, q_target):
        """
        Args:
            q_t (Tensor): 当前状态的 Q 值, 形状为 [batch, 2]
            q_target (Tensor): 目标 Q 值, 形状为 [batch, 2]
            preference (Tensor): 初始偏好权重, 形状为 形状为 [batch, 2]
            normalize (bool): 是否对目标值进行归一化处理

        Returns:
            Tensor: 最终加权损失
        """

        # 分别计算每个目标的损失
        loss_1 = F.l1_loss(q_t[:, 0], q_target[:, 0])
        loss_2 = F.l1_loss(q_t[:, 1], q_target[:, 1])

        total_loss = loss_1 + loss_2

        return total_loss


# 初始化环境
env = ElectricBusChargingEnv()
# 训练MO-MIX
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

momix = M3RL(env, max_size=10000, num_agents=25, num_objectives=2, global_obs_dim=151, bus_obs_dim=6, storge_obs_dim=2, history_action_dim=1, bus_action_dim=4, storge_action_dim=2, hidden_dim=128, lr=0.001, batch_size=16)
momix.train(num_episodes=500, batch_size=16)
