import torch
import torch.nn as nn   

class Bert(nn.Module):
    def __init__(self, base_model):
        super(Bert, self).__init__()
        self.base_model = base_model 
        self.config = base_model.config           
    def forward(self, **inputs):
        raw_outputs = self.base_model(**inputs)
        return raw_outputs.last_hidden_state
    
class Bert_FNN(nn.Module):
    def __init__(self, base_model, num_classes:int):
        super(Bert_FNN, self).__init__()
        self.base_model = base_model            
        self.num_classes = num_classes
        self.size = self.base_model.config.hidden_size  # 768
        self.cls =nn.Sequential(
                nn.Linear(self.size, self.size ),
                nn.Linear(self.size , num_classes),
                nn.Dropout(0.2)
        )
    def forward(self, **inputs):
        raw_outputs = self.base_model(**inputs)
        cls_feats = raw_outputs.last_hidden_state[:, 0, :]
        logits =  self.cls(cls_feats)
        return logits


class Bert_GRU(nn.Module):
    def __init__(self, base_model, num_classes:int):
        super(Bert_GRU, self).__init__()
        self.base_model = base_model            
        self.num_classes = num_classes
        self.size = self.base_model.config.hidden_size  # 768
        self.GRU = nn.GRU(input_size=self.size ,
                          hidden_size=self.size,
                          num_layers=1,
                          batch_first=True)
        self.cls = nn.Sequential(nn.Dropout(0.1),
                                nn.Linear(self.size, self.size),
                                nn.Linear(self.size, self.size //2),
                                nn.Linear(self.size // 2, self.num_classes),
                                )
        
    def forward(self, **inputs):
        raw_outputs = self.base_model(**inputs)
        gru_outputs, _ = self.GRU(raw_outputs.last_hidden_state)
        cls_feats = gru_outputs[:, -1, :]
        logits = self.cls(cls_feats)
        return  logits
    
class Bert_RNN(nn.Module):
    def __init__(self, base_model, num_classes:int):
        super(Bert_RNN, self).__init__()
        self.base_model = base_model            
        self.num_classes = num_classes
        self.size = self.base_model.config.hidden_size  # 768
        self.RNN = nn.RNN(input_size=self.size ,
                          hidden_size=self.size,
                          num_layers=1,
                          batch_first=True)
        self.cls = nn.Sequential(nn.Dropout(0.1),
                                nn.Linear(self.size, self.size),
                                nn.Linear(self.size, self.size // 2),
                                nn.Linear(self.size // 2, self.num_classes),
                                )
        
    def forward(self, **inputs):
        raw_outputs = self.base_model(**inputs)
        rnn_outputs, _ = self.RNN(raw_outputs.last_hidden_state)
        cls_feats = rnn_outputs[:, -1, :]
        logits = self.cls(cls_feats)
        return  logits  
    
class Bert_LSTM(nn.Module):
    def __init__(self, base_model, num_classes:int):
        super(Bert_LSTM, self).__init__()
        self.base_model = base_model            
        self.num_classes = num_classes
        self.size = self.base_model.config.hidden_size  # 768
        self.LSTM = nn.LSTM(input_size=self.size ,
                          hidden_size=self.size,
                          num_layers=1,
                          batch_first=True)
        self.cls = nn.Sequential(nn.Dropout(0.1),
                                nn.Linear(self.size, self.size),
                                nn.Linear(self.size, self.size //2),
                                nn.Linear(self.size // 2, self.num_classes),
                                )
        
    def forward(self, **inputs):
        raw_outputs = self.base_model(**inputs)
        lstm_outputs, _ = self.LSTM(raw_outputs.last_hidden_state)
        cls_feats = lstm_outputs[:, -1, :]
        logits = self.cls(cls_feats)
        return  logits  
    
class Bert_BiLSTM(nn.Module):   
    def __init__(self, base_model, num_classes:int):
        super(Bert_BiLSTM, self).__init__()
        self.base_model = base_model            
        self.num_classes = num_classes
        self.size = self.base_model.config.hidden_size  # 768
        self.BiLSTM = nn.LSTM(input_size=self.size ,
                          hidden_size=self.size,
                          num_layers=1,
                          batch_first=True,
                          bidirectional=True)
        self.cls = nn.Sequential(nn.Dropout(0.1),
                                nn.Linear(self.size * 2, self.size* 2),
                                nn.Linear(self.size * 2, self.size),
                                nn.Linear(self.size, self.size //2),
                                nn.Linear(self.size // 2, self.num_classes),
                                )
        
    def forward(self, **inputs):
        raw_outputs = self.base_model(**inputs)
        bilstm_outputs, _ = self.BiLSTM(raw_outputs.last_hidden_state)
        cls_feats = bilstm_outputs[:, -1, :]
        logits = self.cls(cls_feats)
        return  logits  
    
if __name__ == '__main__':
    from data import get_data
    from transformers import AutoModel, AutoTokenizer
    tokenizer = AutoTokenizer.from_pretrained("D:/hugging_face/models/bert-base-uncased")
    model = AutoModel.from_pretrained("D:/hugging_face/models/bert-base-uncased")
    train_dataloader , eval_dataloader  = get_data(tokenizer,datasets_path="D:/datasets/imdb",train_batch_size=2)
    # bert = Transformer(model,2)
    gru = Bert_GRU(model,2)
    criterion = nn.CrossEntropyLoss()
    for i,batch in enumerate(train_dataloader):
        batch = {k:v for k,v in batch.items()}
        labels = batch.pop('labels')
        outputs = gru(**batch)
        print(f'{outputs= }')
        print(f'{outputs.argmax(-1)= }')
        loss = criterion(outputs,labels)
        print(f'{loss= }')
        if i==0:
            break