# import torch as th
# import torch.nn as nn
# import torch.nn.functional as F
# import numpy as np
# import math
#
# #state计算
# class ConvNet3D1(nn.Module):
#     def __init__(self):
#         super(ConvNet3D1, self).__init__()
#         # 3D卷积层1
#         self.conv1 = nn.Conv3d(in_channels=1, out_channels=1, kernel_size=(1,6,6), padding=0)
#         # 批次标准化层1
#         self.bn1 = nn.BatchNorm3d(num_features=1)
#         # 激活函数层1
#         self.relu1 = nn.ReLU(inplace=True)
#         # # 最大池化层
#         # self.pool = nn.MaxPool3d(kernel_size=(1,5,5))
#         # 空间金字塔池化层
#         self.pyramid_pooling = SPPLayer(5)
#         # 全连接层
#         self.fc = nn.Linear(in_features=55, out_features=55)
#     def forward(self, x):
#         # 前向传播
#         # 3D卷积
#         x = self.conv1(x)
#         # 批次标准化
#         x = self.bn1(x)
#         # 激活函数
#         x = self.relu1(x)
#         # 最大池化
#         # x = self.pool(x)
#         # 空间金字塔池化
#         x = self.pyramid_pooling(x)
#         # 展平张量
#         x = x.view(-1, 55)
#         # 全连接层（分类层）
#         x = self.fc(x)
#         return x
#
#
# # 构建SPP层(空间金字塔池化层)
# class SPPLayer(th.nn.Module):
#
#     def __init__(self, num_levels, pool_type='max_pool'):
#         super(SPPLayer, self).__init__()
#         self.num_levels = num_levels
#         self.pool_type = pool_type
#
#     def forward(self, x):
#         num, c, d,h, w = x.size() # num:样本数量 c:通道数d:深度 h:高 w:宽
#         for i in range(self.num_levels):
#             level=i+1
#             kernel_size = (1,math.ceil(h / level), math.ceil(w / level))
#             stride = (1,math.ceil(h / level), math.ceil(w / level))
#             pooling = (0,math.ceil((math.ceil(h / level)*level-w)/2), math.ceil((math.ceil(h / level)*level-w)/2))
#
#             # 选择池化方式
#             if self.pool_type == 'max_pool':
#                 tensor = F.max_pool3d(x, kernel_size=kernel_size, stride=stride,padding=pooling).view(num, -1)
#             else:
#                 tensor = F.avg_pool3d(x, kernel_size=kernel_size, stride=stride,padding=pooling).view(num, -1)
#
#             # 展开、拼接
#             if (i== 0):
#                 x_flatten = tensor.view(num, d,-1)
#             else:
#                 x_flatten = th.cat((x_flatten, tensor.view(num, d,-1)), 2)
#         return x_flatten
#
# class QMixer(nn.Module):
#     def __init__(self, args):
#         super(QMixer, self).__init__()
#
#         self.args = args
#         self.n_agents = args.n_agents
#         self.state_dim = int(np.prod(args.state_shape))
#
#         self.embed_dim = args.mixing_embed_dim  #32
#
#         if getattr(args, "hypernet_layers", 1) == 1:
#             self.hyper_w_1 = nn.Linear(55, self.embed_dim * self.n_agents)
#             self.hyper_w_final = nn.Linear(55, self.embed_dim)
#         elif getattr(args, "hypernet_layers", 1) == 2:
#             hypernet_embed = self.args.hypernet_embed
#             self.hyper_w_1 = nn.Sequential(nn.Linear(55, hypernet_embed),
#                                            nn.ReLU(),
#                                            nn.Linear(hypernet_embed, self.embed_dim * 8))
#             self.hyper_w_final = nn.Sequential(nn.Linear(55, hypernet_embed),
#                                            nn.ReLU(),
#                                            nn.Linear(hypernet_embed, self.embed_dim))
#         elif getattr(args, "hypernet_layers", 1) > 2:
#             raise Exception("Sorry >2 hypernet layers is not implemented!")
#         else:
#             raise Exception("Error setting number of hypernet layers.")
#
#         # State dependent bias for hidden layer
#         self.hyper_b_1 = nn.Linear(55, self.embed_dim)
#
#         # V(s) instead of a bias for the last layers
#         self.V = nn.Sequential(nn.Linear(55, self.embed_dim),
#                                nn.ReLU(),
#                                nn.Linear(self.embed_dim, 1))
#         self.conv1=SPPLayer(5)
#
#     def forward(self, agent_qs, states):
#
#         bs = agent_qs.size(0)
#         # self.state_dim = states.shape[2]
#         # 对states进行变换
#         states = states.transpose(0, 1).unsqueeze(1)
#         states = states.unsqueeze(4)
#         states = states.expand([states.shape[0], states.shape[1], states.shape[2],
#                                 states.shape[3], states.shape[3]])
#         # states=th.tensor(np.tile(states,self.state_dim))
#         # conv1=ConvNet3D1().cuda()
#         states = self.conv1(states)  # (batch_size,steps,60)
#
#         agent_qs = agent_qs.view(-1, 1, self.n_agents)
#         # agent_qs1=0.9*agent_qs
#         # agent_qs2=1.1*agent_qs
#         # agent_qs=th.cat((agent_qs1,agent_qs,agent_qs2),dim=2)
#         # agent_qs=agent_qs[:,:,:-1]
#         # agent_qs_tuple=th.chunk(agent_qs,2,dim=0)
#         # agent_qs_new=0.5*agent_qs_tuple[0]+0.5*agent_qs_tuple[1]
#         # agent_qs=th.cat((agent_qs_tuple[0],agent_qs_new,agent_qs_tuple[1]),dim=2)
#         # First layer
#         w1 = th.abs(self.hyper_w_1(states))
#         b1 = self.hyper_b_1(states)
#         w1 = w1.view(-1, 8, self.embed_dim)
#         b1 = b1.view(-1, 1, self.embed_dim)
#         hidden = F.elu(th.bmm(agent_qs, w1) + b1)
#         # Second layer
#         w_final = th.abs(self.hyper_w_final(states))
#         w_final = w_final.view(-1, self.embed_dim, 1)
#         # State-dependent bias
#         v = self.V(states).view(-1, 1, 1)
#         # Compute final output
#         y = th.bmm(hidden, w_final) + v
#         # Reshape and return
#         q_tot = y.view(bs, -1, 1)
#         return q_tot

import torch as th
import torch.nn as nn
import torch.nn.functional as F
import numpy as np

class QMixer(nn.Module):
    def __init__(self, args):
        super(QMixer, self).__init__()

        self.args = args
        self.n_agents = args.n_agents
        self.state_dim = int(np.prod(args.state_shape))

        self.embed_dim = args.mixing_embed_dim

        if getattr(args, "hypernet_layers", 1) == 1:
            self.hyper_w_1 = nn.Linear(self.state_dim, self.embed_dim * self.n_agents)
            self.hyper_w_final = nn.Linear(self.state_dim, self.embed_dim)
        elif getattr(args, "hypernet_layers", 1) == 2:
            hypernet_embed = self.args.hypernet_embed
            self.hyper_w_1 = nn.Sequential(nn.Linear(self.state_dim, hypernet_embed),
                                           nn.ReLU(),
                                           nn.Linear(hypernet_embed, self.embed_dim * self.n_agents))
            self.hyper_w_final = nn.Sequential(nn.Linear(self.state_dim, hypernet_embed),
                                           nn.ReLU(),
                                           nn.Linear(hypernet_embed, self.embed_dim))
        elif getattr(args, "hypernet_layers", 1) > 2:
            raise Exception("Sorry >2 hypernet layers is not implemented!")
        else:
            raise Exception("Error setting number of hypernet layers.")

        # State dependent bias for hidden layer
        self.hyper_b_1 = nn.Linear(self.state_dim, self.embed_dim)

        # V(s) instead of a bias for the last layers
        self.V = nn.Sequential(nn.Linear(self.state_dim, self.embed_dim),
                               nn.ReLU(),
                               nn.Linear(self.embed_dim, 1))

    def forward(self, agent_qs, states):
        bs = agent_qs.size(0)
        states = states.reshape(-1, self.state_dim)
        agent_qs = agent_qs.view(-1, 1, self.n_agents)
        # First layer
        w1 = th.abs(self.hyper_w_1(states))
        b1 = self.hyper_b_1(states)
        w1 = w1.view(-1, self.n_agents, self.embed_dim)
        b1 = b1.view(-1, 1, self.embed_dim)
        hidden = F.elu(th.bmm(agent_qs, w1) + b1)
        # Second layer
        w_final = th.abs(self.hyper_w_final(states))
        w_final = w_final.view(-1, self.embed_dim, 1)
        # State-dependent bias
        v = self.V(states).view(-1, 1, 1)
        # Compute final output
        y = th.bmm(hidden, w_final) + v
        # Reshape and return
        q_tot = y.view(bs, -1, 1)
        return q_tot