import torch
from torch import nn
from params import params_dict

class Embedding_Block(nn.Module):
    def __init__(self, vocab_size, embedding_dim):
        """
        嵌入模块，将谓词转化为嵌入向量
        :param vocab_size: 谓词数量
        :param embedding_dim: 嵌入向量的长度
        """
        super(Embedding_Block, self).__init__()
        self.embedding = nn.Embedding(vocab_size, embedding_dim)

    def forward(self, x):
        output = self.embedding(x)
        return output


class Implicit_Logic_Encoder(nn.Module):
    def __init__(self, embedding_dim, n_head, num_layers):
        """
        transformers编码器
        :param embedding_dim: 嵌入层的维度
        :param n_head: 多头注意力机制
        :param num_layers: 自注意力机层的层数
        """
        super(Implicit_Logic_Encoder, self).__init__()
        self.emdedding_dim = embedding_dim
        self.n_head = n_head
        self.num_layers = num_layers
        self.encoder = nn.TransformerEncoderLayer(d_model=self.emdedding_dim, nhead=self.n_head)
        self.transformerencoder = nn.TransformerEncoder(self.encoder, num_layers=self.num_layers)
        # self.gru = nn.GRU(self.emdedding_dim, self.emdedding_dim, batch_first=True)

    def forward(self, x):
        x = self.transformerencoder(x)
        # x, _ = self.gru(x)
        return x


class Logic_Or(nn.Module):
    def __init__(self, v_vector_size, is_train=True):
        super(Logic_Or, self).__init__()
        self.v_vector_size = v_vector_size
        self.is_train = is_train
        self.or_layer = nn.Sequential(
            nn.Linear(self.v_vector_size * 2, self.v_vector_size * 2),
            nn.ReLU(True),
            nn.Dropout(p=0.1),
            nn.Linear(self.v_vector_size * 2, self.v_vector_size)
        )

    def _uniform_size(self, vector1, vector2):
        if len(vector1.size()) < len(vector2.size()):
            vector1 = vector1.expand_as(vector2)
        else:
            vector2 = vector2.expand_as(vector1)
        return vector1, vector2

    def _logic_or(self, vector1, vector2):
        """
        使用全连接层实现逻辑或
        :param vector1: 向量1
        :param vector2: 向量2
        :return:
        """
        vector1, vector2 = self._uniform_size(vector1, vector2)
        vector = torch.cat((vector1, vector2), dim=-1)
        vector = self.or_layer(vector)
        return vector

    def forward(self, vector1, vector2):
        vector = self._logic_or(vector1, vector2)
        return vector


class ISRG(nn.Module):
    def __init__(self, vocab_size, embedding_dim, n_head, num_layers, v_vector_size, is_train=True):
        """
        :param vocab_size:
        :param embedding_dim: 嵌入向量维度
        :param n_head: 多头注意力机制
        :param num_layers: 几层注意力
        :param v_vector_size: 向量维数
        :param imgs: 模型是否训练
        """
        super(ISRG, self).__init__()
        self.vocab_size = vocab_size
        self.embedding_dim = embedding_dim
        self.n_head = n_head
        self.num_layers = num_layers
        self.v_vector_size = v_vector_size
        self.is_train = is_train
        self.Embedding = Embedding_Block(self.vocab_size, self.embedding_dim)
        self.Implicit_Logic_Encoder = Implicit_Logic_Encoder(self.embedding_dim, self.n_head, self.num_layers)
        self.Logic_Or = Logic_Or(self.v_vector_size, self.is_train)
        self.avgpool = nn.AdaptiveAvgPool1d(1)
        self.Linear1 = nn.Linear(params_dict['input_dim'], params_dict['hidden_dim']).to(params_dict['device'])
        self.Linear2 = nn.Linear(params_dict['hidden_dim'], params_dict['output_dim']).to(params_dict['device'])

    def forward(self, x):
        x = self.Embedding(x)
        # x = self.Implicit_Logic_Encoder(x)
        # self.init_weight(params_dict['input_dim'], params_dict['input_dim'], params_dict['input_dim'])
        batch_s, v_num, _ = x.size()
        combinations_list = self._free_combination(9)
        output = torch.randn((batch_s, 1, 64)).to(params_dict['device'])
        for num_tuple in combinations_list:
            v = x[:, num_tuple[0], :]
            for i in range(1, len(num_tuple)):
                v1 = x[:, num_tuple[i], :]
                v = self.Logic_Or(v, v1)
            output = torch.cat((output, v.unsqueeze(1)), dim=1)
        output = torch.tensor(output[:, 1:, :])
        return output

    def _free_combination(self, num):
        from itertools import combinations
        a = list(range(num))
        combinations_list = []
        for k in range(1, 10):
            for c in combinations(a, k):
                combinations_list.append(c)
        return combinations_list



if __name__ == '__main__':
    x = torch.randint(1, 256, (24, 10))
    model = ISRG(256, 64, 4, 2, 64)
    x = model(x)
    print(x.shape)
