import torch
import torch.nn
from torch import nn
from torch.autograd import Variable


class AWNetwork(torch.nn.Module):

    def __init__(self, hiddena=20, hiddenb=20):
        super(AWNetwork, self).__init__()
        self.tanh = nn.Tanh()
        self.relu = nn.ReLU()
        self.fc_oa1 = nn.Linear(6, hiddena)
        self.fc_oa2 = nn.Linear(hiddena, 1)
        self.fc_wa1 = nn.Linear(6, 10)
        self.fc_wa2 = nn.Linear(10, 1)
        self.fc_ob1 = nn.Linear(19, hiddenb)
        self.fc_ob2 = nn.Linear(hiddenb, 1)
        self.fc_wb1 = nn.Linear(19, 10)
        self.fc_wb2 = nn.Linear(10, 1)

    def forward(self, Xa, Xb):
        oa = self.fc_oa1(Xa)
        oa = self.relu(oa)
        oa = torch.squeeze(self.fc_oa2(oa))

        ob = self.fc_ob1(Xb)
        ob = self.relu(ob)
        ob = torch.squeeze(self.fc_ob2(ob))

        wa = self.fc_wa1(Xa)
        wa = self.relu(wa)
        wa = self.fc_wa2(wa)
        wa = self.tanh(wa)
        wa = (1 + wa) / 2
        wa = torch.squeeze(wa)

        wb = self.fc_wb1(Xb)
        wb = self.relu(wb)
        wb = self.fc_wb2(wb)
        wb = self.tanh(wb)
        wb = (1 + wa) / 2
        wb = torch.squeeze(wb)

        o = oa * wa + ob * wb / (wa + wb)
        return oa, wa, ob, wb, o


class AWNetworkPlus(torch.nn.Module):

    def __init__(self, hiddena=20, hiddenb=20):
        super(AWNetworkPlus, self).__init__()
        self.tanh = nn.Tanh()
        self.relu = nn.ReLU()
        self.fc_oa1 = nn.Linear(6, hiddena)
        self.fc_oa2 = nn.Linear(hiddena, 1)
        self.fc_wa1 = nn.Linear(7, 10)
        self.fc_wa2 = nn.Linear(10, 1)
        self.fc_ob1 = nn.Linear(19, hiddenb)
        self.fc_ob2 = nn.Linear(hiddenb, 1)
        self.fc_wb1 = nn.Linear(20, 10)
        self.fc_wb2 = nn.Linear(10, 1)

    def forward(self, Xa, Xb):
        oa = self.fc_oa1(Xa)
        oa = self.relu(oa)
        oa = self.fc_oa2(oa)

        ob = self.fc_ob1(Xb)
        ob = self.relu(ob)
        ob = self.fc_ob2(ob)

        wa = self.fc_wa1(torch.cat([Xa, oa], dim=1))
        wa = self.relu(wa)
        wa = self.fc_wa2(wa)
        wa = self.tanh(wa)
        wa = (1 + wa) / 2
        wa = torch.squeeze(wa)

        wb = self.fc_wb1(torch.cat([Xb, ob], dim=1))
        wb = self.relu(wb)
        wb = self.fc_wb2(wb)
        wb = self.tanh(wb)
        wb = (1 + wa) / 2
        wb = torch.squeeze(wb)

        oa = torch.squeeze(oa)
        ob = torch.squeeze(ob)

        o = oa * wa + ob * wb / (wa + wb)
        return oa, wa, ob, wb, o

