class SmilesTransformer(paddle.nn.Layer):
    def __init__(self,
                 emb_dim=512,
                 n_head=8,
                 num_encoder_layers=6,
                 num_decoder_layers=6,
                 dim_feedforward=2048,
                 encoder_lib_size=5538,
                 output_lib_size=389,
                 max_len=24):
        super(SmilesTransformer, self).__init__()

        # 字典大小
        pretrained_attr = paddle.ParamAttr(
            name="embedding",
            trainable=False,
        )
        self.emb_scr = embedding_layer = paddle.nn.Embedding(
            num_embeddings=encoder_lib_size,
            embedding_dim=emb_dim,
            weight_attr=pretrained_attr
        )

        pretrained_attr1 = paddle.ParamAttr(
            name="embedding1",
            trainable=False,
        )
        self.emb_tgt = embedding_layer = paddle.nn.Embedding(
            num_embeddings=output_lib_size,
            embedding_dim=emb_dim,
            weight_attr=pretrained_attr1
        )


        # self.emb_scr = paddle.nn.Embedding(encoder_lib_size, emb_dim, sparse=True,trainable=False)
        # self.emb_tgt = paddle.nn.Embedding(output_lib_size, emb_dim, sparse=True,trainable=False)
        self.transformer = paddle.nn.Transformer(d_model=emb_dim,
                                                 nhead=n_head,
                                                 num_encoder_layers=num_encoder_layers,
                                                 num_decoder_layers=num_decoder_layers,
                                                 dim_feedforward=dim_feedforward,
                                                 dropout=0.1,
                                                 activation='relu',
                                                 attn_dropout=None,
                                                 act_dropout=None,
                                                 normalize_before=False,
                                                 weight_attr=None,
                                                 bias_attr=None,
                                                 custom_encoder=None,
                                                 custom_decoder=None)
        self.mask = self.transformer.generate_square_subsequent_mask(max_len)

        self.fc = paddle.nn.Linear(emb_dim, output_lib_size)
        # self.fc = paddle.nn.Linear(emb_dim, 4134 + 4)

        # self.softmax = paddle.nn.Softmax()

    def forward(self, scr, tgt):
        scr = self.emb_scr(scr)
        tgt = self.emb_tgt(tgt)
        x = self.transformer(scr, tgt, self.mask, self.mask, self.mask)
        out = self.fc(x)
        # out = self.softmax(x)
        return out
    
    def excloud_amp_layer(self):
        return [self.emb_scr,self.emb_tgt]
