from torch.autograd import Function
from nn import *

class ReverseLayerF(Function):
    @staticmethod
    def forward(ctx, x, alpha):
        ctx.alpha = alpha
        return x

    @staticmethod
    def backward(ctx, grad_output):
        reverse_grad_output = grad_output.neg() * ctx.alpha
        return reverse_grad_output, None


class ClassifierModel(nn.Module):
    def __init__(self, arg):
        super(ClassifierModel, self).__init__()
        self.config = arg
        self.Linear = nn.Linear(arg.rnn_hidden * 2, arg.rnn_hidden, True)
        self.MLP = MLP(
            input_size = arg.rnn_hidden,
            layer_size = arg.rnn_hidden,
            depth = 1,
            activation = 'LeakyReLU',
            dropout= False
        )
        self.output = nn.Linear(arg.rnn_hidden, 2, False)

    def forward(self, rnn_hidden,len):
        hidden = avg_pooling(rnn_hidden,len)
        hidden = self.Linear.forward(hidden)
        hidden = ReverseLayerF.apply(hidden, self.config.alpha)
        mlp_hidden = self.MLP(hidden)
        score = self.output(mlp_hidden)
        return score