import logging

import torch
import torch.nn as nn
# from transformers import AutoModel
# from transformers import AutoTokenizer

# from modelscope import AutoModel
# from transformers import AutoConfig
# from transformers.models.bert.modeling_bert import BertEmbeddings

from config import STRING_MASS_MODELSCOPE

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

class Embedding_T(nn.Module):
    def __init__(self, config):
        super().__init__()
        embedding_config=config["embedding"]
        self.word_embeddings = nn.Embedding(embedding_config["vocab_size"]
                                            , embedding_config["embedding_dim"], padding_idx=embedding_config["pad_token_id"])

        self.LayerNorm = nn.LayerNorm(embedding_config["embedding_dim"], eps=embedding_config["layer_norm_eps"])
        self.dropout = nn.Dropout(embedding_config["hidden_dropout_prob"])

    def forward(self, intput_ids):
        embeddings = self.word_embeddings(intput_ids)
        embeddings = self.LayerNorm(embeddings)
        embeddings = self.dropout(embeddings)
        return embeddings


class LSTM_T(nn.Module):
    def __init__(self, config) -> None:
        super().__init__()
        logger.info("use lstm model")
        lstm_config = config["lstm"]
        self.lstm = nn.LSTM(lstm_config['hidden_size'], lstm_config['hidden_size'], num_layers=lstm_config['rnn_num_layers'],
                            batch_first=True)
        # 使用bertEmbedding
        # config = AutoConfig.from_pretrained(config["model_dir"])
        # self.embedding = BertEmbeddings(config)
        self.embedding = Embedding_T(config)


    def forward(self, intput_ids):
        x = self.embedding(intput_ids)
        _, x = self.lstm(x)
        return x[0].squeeze()

class Bert_T(nn.Module):
    def __init__(self,config):
        super().__init__()
        logger.info("use bert model")
        if config.get("mass") == STRING_MASS_MODELSCOPE:
            from modelscope import AutoModel  # 本质好像都是走的transformers 加载的所以有依赖
        else:
            from transformers import AutoModel
        self.encoder = AutoModel.from_pretrained(config.get("model_dir"), return_dict=False)

    def forward(self, intput_ids):
        _,x = self.encoder(intput_ids)
        return x


class XModel(nn.Module):

    def __init__(self, config) -> None:
        super().__init__()
        self.config = config
        self.model_dir = self.config["model_dir"]

        model_type = self.config['model_type']

        # self.model_list = nn.ModuleList()
        if model_type == 'lstm':
            self.encoder = LSTM_T(config)
        elif model_type == 'bert':
            self.encoder = Bert_T(config)

        self.classify = nn.Linear(self.config['classify_hidden_size'], self.config['num_class'])
        self.loss = nn.functional.cross_entropy  # loss采用交叉熵损失

    def forward(self, intput_ids, target=None):
        x = self.encoder(intput_ids)
        x = self.classify(x)
        if target is not None:
            return self.loss(x, target)
        return x

    def encode(self, sentences):
        if self.config.get("mass") == STRING_MASS_MODELSCOPE:
            from modelscope import AutoTokenizer
        else:
            from transformers import AutoTokenizer
        self.tokenizer = AutoTokenizer.from_pretrained(self.model_dir)
        tokens = self.tokenizer.encode(sentences, max_length=5, pad_to_max_length=True)
        return torch.LongTensor(tokens)


if __name__ == '__main__':
    xmodel = XModel()
    input_ids = xmodel.encode("我在家你在家")
    print(input_ids)
    # x = xmodel.forward(input_ids)
