# -*- coding: utf-8 -*-

import torch
import torch.nn as nn
"""
#*****************************************************************************   网 络 搭 建 部 分   ******************************************************************************************************************************
"""

class MLP(nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim):
        super(MLP, self).__init__()
        # 定义三层全连接层（MLP）
        self.fc1 = nn.Linear(input_dim, hidden_dim)
        self.fc2 = nn.Linear(hidden_dim, hidden_dim)
        self.fc3 = nn.Linear(hidden_dim, output_dim)
        self.relu = nn.ReLU()  # ReLU激活函数

    def forward(self, x):
        # 前向传播过程，依次通过三层全连接层和ReLU激活函数
        # torch.Size([batch_size, input_dim])
        x = self.relu(self.fc1(x))  # torch.Size([batch_size, hidden_dim])
        x = self.relu(self.fc2(x)) #  torch.Size([batch_size, hidden_dim])
        x = self.fc3(x)  # torch.Size([batch_size, output_dim])
        return x


class GRUNet(nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim, batch_size):
        super(GRUNet, self).__init__()
        self.gru = nn.GRU(input_dim, hidden_dim, batch_first=True)
        self.fc = nn.Linear(hidden_dim, output_dim)
        self.batch_size = batch_size
        self.hidden_dim = hidden_dim
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.hn = torch.zeros(1, self.batch_size, self.hidden_dim).to(self.device)

    def forward(self, u, x, h):
        if h is None:
            # Initialize hidden state with correct shape: (num_layers, batch_size, hidden_dim)
            h = torch.zeros(1, x.size(0), self.hidden_dim).to(self.device)
        # x = x.unsqueeze(1)  # (batch_size, seq_len, input_dim)
        # print("x",x.size())
        # print("h", h.size())
        if u == True:
            out, h1 = self.gru(x, h)
        else:
            out, h1 = self.gru(x, self.hn)
        # print("out",out.size())
        # print("h", h.size())
        out = self.fc(out[:, -1, :])  # (batch_size, output_dim)
        # out = self.fc(x[:, -1, :])  # (batch_size, output_dim)

        return out, h1


# 在 ConditionedAgentNetwork 中增加多任务的任务头
class ConditionedAgentNetwork(nn.Module):
    def __init__(self, obs_dim, history_action_dim, action_dim, hidden_dim, num_objectives, batch_size):
        super(ConditionedAgentNetwork, self).__init__()
        self.num_objectives = num_objectives

        # 共享层：观测信息和历史信息处理
        self.mlp_obs = MLP(obs_dim + history_action_dim + 2 + 1 + 1, hidden_dim, hidden_dim)
        self.gru = GRUNet(hidden_dim, hidden_dim, hidden_dim, batch_size)
        self.mlp_obs2 = MLP(hidden_dim, hidden_dim, hidden_dim)

        # 为不同任务添加任务头
        self.normal_task_head = nn.ModuleList([MLP(hidden_dim, hidden_dim, action_dim) for _ in range(num_objectives)])  # 日常任务头
        self.extreme_task_head = nn.ModuleList([MLP(hidden_dim, hidden_dim, action_dim) for _ in range(num_objectives)])  # 极端任务头

    def forward(self, u, date, obs, action, preference, violate_constraint, hidden_state=None, task_type="normal"):

        obs_action = torch.cat([date, obs, action, preference, violate_constraint], dim=-1)
        x = self.mlp_obs(obs_action)
        x = x.unsqueeze(1)
        x, hidden_state = self.gru(u, x, hidden_state)
        x = self.mlp_obs2(x)
        x = x.squeeze(1)


        # 根据任务类型选择任务头
        if task_type == "normal":
            q_values = [mlp(x) for mlp in self.normal_task_head]  # 使用日常任务头 # 这是一个2维列表，每个数是torch.Size([batch_size, action_dim])
        else:
            q_values = [mlp(x) for mlp in self.extreme_task_head]# 使用极端任务头 # 这是一个2维列表，每个数是torch.Size([batch_size, action_dim])

        stacked_q_values = torch.stack(q_values, dim=1)  # torch.Size([batch_size, num_objectives, action_dim])

        return stacked_q_values, hidden_state


class MultiObjectiveMixingNetwork(nn.Module):
    def __init__(self, num_agents, num_objectives, global_state_dim, hidden_dim):
        super(MultiObjectiveMixingNetwork, self).__init__()
        self.num_agents = num_agents
        self.num_objectives = num_objectives

        # 为每个目标定义多个超网络，以生成权重和偏置
        self.hypernetworks_w1 = nn.ModuleList([
            nn.Linear(global_state_dim, num_agents) for _ in range(num_objectives)
        ])

        self.hypernetworks_w2 = nn.ModuleList([
            nn.Linear(global_state_dim, num_agents) for _ in range(num_objectives)
        ])

        self.hypernetworks_b1 = nn.ModuleList([
            nn.Sequential(
                nn.Linear(global_state_dim, num_agents)
            ) for _ in range(num_objectives)
        ])

        self.hypernetworks_b2 = nn.ModuleList([
            nn.Sequential(
                nn.Linear(global_state_dim, hidden_dim),
                nn.ReLU(),
                nn.Linear(hidden_dim, 1)
            ) for _ in range(num_objectives)
        ])

    def forward(self, q_values, global_state ):
        q_totals = []
        hypernetworks_w1 = self.hypernetworks_w1
        hypernetworks_w2 = self.hypernetworks_w2
        hypernetworks_b1 = self.hypernetworks_b1
        hypernetworks_b2 = self.hypernetworks_b2


        # 并行处理每个目标的 Q 值混合
        for obj_idx in range(self.num_objectives):
            # 通过超网络生成第一个 MLP 层的权重和偏置
            w1 = hypernetworks_w1[obj_idx](global_state)
            b1 = hypernetworks_b1[obj_idx](global_state)

            # 生成第二个 MLP 层的权重和偏置
            w2 = hypernetworks_w2[obj_idx](global_state)
            b2 = hypernetworks_b2[obj_idx](global_state)

            # 确保第一个 MLP 层的权重为非负
            w1 = torch.abs(w1)
            w2 = torch.abs(w2)

            # 对每个目标进行 Q 值混合
            q_values_obj_idx = q_values[:, :, obj_idx]  # (batch_size, num_agents)

            # 第一层混合
            q_combined_1 = q_values_obj_idx * w1 + b1

            # 第二层混合
            q_combined_2 = (q_combined_1 * w2).sum(dim=1, keepdim=True) + b2
            q_totals.append(q_combined_2)

        # 将所有目标的 Q 值堆叠成一个张量
        return torch.cat(q_totals, dim=-1)
