import torch.nn as nn
import torch.nn.functional as F
import torch
from transformers import RobertaModel
class M3Net(nn.Module):
    def __init__(self, prot_dim=1024,smile_dim=768, hidden_dim=256, output_dim=2,dropout=0.1):
        super(M3Net, self).__init__()
        self.mol_encoder = RobertaModel.from_pretrained("models/seyonec/ChemBERTa-zinc-base-v1")
        self.smile_linear = nn.Linear(smile_dim, hidden_dim)
        self.prot_linear = nn.Linear(prot_dim, hidden_dim)
        self.embedding = nn.Linear(hidden_dim*2, hidden_dim)        
        self.classifier = nn.Sequential(
            nn.BatchNorm1d(hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim*2),
            nn.BatchNorm1d(hidden_dim*2),
            nn.ReLU(),
            nn.Dropout(0.1),
            nn.Linear(hidden_dim*2, hidden_dim),
            nn.BatchNorm1d(hidden_dim),
            nn.ReLU(),
            nn.Dropout(0.1),
            nn.Linear(hidden_dim, output_dim),
        )
        self.criterion = nn.CrossEntropyLoss()
    
    def forward(self, smile_token,prot_emb):
        with torch.no_grad():
            smile_emb = self.mol_encoder(**smile_token).pooler_output
        smile_emb = self.mol_encoder(**smile_token)
        smile_emb = self.smile_linear(smile_emb)
        prot_emb = self.prot_linear(prot_emb)
        x = torch.concat((smile_emb,prot_emb),dim=1)
        x = self.embedding(x)
        x = x.view(x.size(0), -1)
        x = self.classifier(x)
        return x