###競品短信子類分類模型

import os
import re

import numpy as np
import torch
import torch.nn as nn
from transformers import AutoModel, AutoTokenizer

def get_root_dir(path):
    path_list=path.split(os.path.sep)
    index=path_list.index("featurelib")
    return os.path.sep.join(path_list[:index+1])


class TransformersEmbedder(nn.Module):

    def __init__(self, transformer_model_name):
        super(TransformersEmbedder, self).__init__()
        output_hidden_states = False
        self.model = AutoModel.from_pretrained(
            transformer_model_name,
            output_hidden_states=output_hidden_states,
            return_dict=True,
        )

    def forward(self, subword_input_ids: torch.Tensor, attention_mask):
        outputs = self.model(
            **{"input_ids": subword_input_ids, "attention_mask": attention_mask}
        )
        return outputs


class BertForTextClassification(nn.Module):

    def __init__(self, embedder_type, label2idx):
        super(BertForTextClassification, self).__init__()
        self.transformer = TransformersEmbedder(transformer_model_name=embedder_type)
        self.fc = nn.Linear(self.transformer.model.config.hidden_size, len(label2idx))
        self.dropout = nn.Dropout(0.5)

    def forward(self, subword_input_ids, attention_mask):
        outputs = self.transformer(subword_input_ids, attention_mask)
        pooled_output = outputs[1]
        pooled_output = self.dropout(pooled_output)
        logits = self.fc(pooled_output)
        return logits


class SmsEsLoan0V1ClfModel:

    def __init__(self, country_type="es"):
        np.random.seed(42)
        torch.manual_seed(42)
        torch.cuda.manual_seed_all(42)
        torch.backends.cudnn.deterministic = True
        torch.set_num_threads(8)
        self.root_dir = get_root_dir(os.path.abspath("."))
        self.conf_dir = os.path.join(
            self.root_dir, "feature_conf", "sms", "un", "sms_es_loan0_v1"
        )
        self.model_conf_dir = os.path.join(self.conf_dir, "model_conf")
        self.model_path = os.path.join(self.model_conf_dir, "albert-tiny-spanish")
        self.state_dict_path = os.path.join(
            self.model_conf_dir, "BertCLF.m" if country_type == "es" else "BertCLF_mx.m"
        )

        self.idx2labels = ["ad", "col", "ovd", "loan", "ntf", "otr", "repy", "ver"]
        self.label2idx = {label: idx for idx, label in enumerate(self.idx2labels)}

        self.tokenizer = AutoTokenizer.from_pretrained(
            self.model_path, add_prefix_space=True, use_fast=True
        )
        self.model = BertForTextClassification(self.model_path, self.label2idx)
        self.device = (
            "cuda"
            if torch.cuda.is_available()
            else "mps" if torch.backends.mps.is_available() else "cpu"
        )
        self.model.load_state_dict(
            torch.load(self.state_dict_path, map_location=self.device), strict=False
        )
        self.model.eval()

    def get_dataloader(self, msg_list, pad_size=128, batch_size=32):
        dataloader = []
        for idx in range(0, len(msg_list), batch_size):
            batch_msg = msg_list[idx : idx + batch_size]
            encoding = self.tokenizer(
                batch_msg,
                is_split_into_words=False,
                padding=True,
                truncation=True,
                max_length=pad_size,
                return_tensors="pt",
            )
            dataloader.append(encoding)
        return dataloader

    def predict(self, msg_list):
        dataloader = self.get_dataloader(msg_list)
        pred_labels = []
        for batch in dataloader:
            input_ids = batch.input_ids.to(self.device)
            attention_mask = batch.attention_mask.to(self.device)
            output = self.model(input_ids, attention_mask)
            batch_pred_labels = output.argmax(dim=1).tolist()
            for pred in batch_pred_labels:
                pred_labels.append(self.idx2labels[pred])
        return pred_labels
