import pickle
import torch
import torch.nn as nn
from transformers import AutoTokenizer , AutoModel
import os
import numpy as np

def get_root_dir(path):
    path_list=path.split(os.path.sep)
    index=path_list.index("featurelib")
    return os.path.sep.join(path_list[:index+1])

class TransformersEmbedder(nn.Module):
    def __init__(self, transformer_model_name):
        super(TransformersEmbedder, self).__init__()
        output_hidden_states = False
        self.model = AutoModel.from_pretrained(transformer_model_name,
                                               output_hidden_states= output_hidden_states,
                                               return_dict=True)

    def forward(self, subword_input_ids: torch.Tensor, attention_mask):
        outputs = self.model(**{"input_ids": subword_input_ids, "attention_mask": attention_mask})
        return outputs

class BertForTextClassification(nn.Module):

    def __init__(self, embedder_type, label2idx):
        super(BertForTextClassification, self).__init__()
        self.transformer = TransformersEmbedder(transformer_model_name=embedder_type)
        self.fc = nn.Linear(self.transformer.model.config.hidden_size, len(label2idx))
        self.dropout = nn.Dropout(0.5)

    def forward(self, subword_input_ids, attention_mask):
        outputs = self.transformer(subword_input_ids, attention_mask)
        pooled_output = outputs[1]
        pooled_output = self.dropout(pooled_output)
        logits = self.fc(pooled_output)
        return logits

class SmsEnLoan0V1ClfModel:
    
    def __init__(self):
        np.random.seed(42)
        torch.manual_seed(42)
        torch.cuda.manual_seed_all(42)
        torch.backends.cudnn.deterministic = True
        # torch.set_num_threads(16)
        
        self.root_dir = get_root_dir(os.path.abspath("."))
        self.conf_dir = os.path.join(
            self.root_dir, "feature_conf", "sms", "un", "sms_en_loan0_v1"
        )
        self.model_conf_dir = os.path.join(self.conf_dir, "model_conf")
        self.state_dict_path = os.path.join(self.model_conf_dir, "BertCLF.m")
        
        self.idx2labels = ['col', 'ovd', 'ntf', 'ad', 'loan', 'repy', 'ver', 'otr', 'apf']
        self.label2idx = {'col': 0, 'ovd': 1, 'ntf': 2, 'ad': 3, 'loan': 4, 'repy': 5, 'ver': 6, 'otr': 7, 'apf': 8}

        self.embedder_type = os.path.join(self.model_conf_dir, 'bert-pretrained')
        self.tokenizer = AutoTokenizer.from_pretrained(self.embedder_type, add_prefix_space=True, use_fast=True)
        self.model = BertForTextClassification(self.embedder_type, self.label2idx)
        self.device = 'cpu'
        self.model.load_state_dict(torch.load(f"{self.model_conf_dir}/BertCLF.m", map_location=self.device), strict=False)
        self.model.eval()
        

    def get_dataloader(self, msg_list, pad_size=128, batch_size=128):
        dataloader = []
        for idx in range(0, len(msg_list), batch_size):
            batch_msg = msg_list[idx: idx + batch_size]
            encoding = self.tokenizer(batch_msg, 
                                      is_split_into_words=False, 
                                      padding=True, 
                                      truncation=True, 
                                      max_length=pad_size, 
                                      return_tensors='pt')
            dataloader.append(encoding)
        return dataloader

    def predict(self, msg_list):
        dataloader = self.get_dataloader(msg_list)
        pred_labels = []
        for batch in dataloader:
            input_ids = batch.input_ids.to(self.device)
            attention_mask = batch.attention_mask.to(self.device)
            output = self.model(input_ids, attention_mask)
            batch_pred_labels = output.argmax(dim=1).tolist()
            for pred in batch_pred_labels:
                pred_labels.append(self.idx2labels[pred])
        return pred_labels