import torch
from loguru import logger
from torchtext.data.utils import get_tokenizer

from config.config import cfg
from model.TextSentiment import TextClassificationModel
from utils.utils import build_vocab
from utils.utils import log_init

if __name__ == '__main__':
    log_init('test_model', log_dir=cfg.log_dir)
    tokenizer = get_tokenizer("basic_english")
    vocab = build_vocab(cfg.dataset_dir, 'train', tokenizer)
    model = TextClassificationModel(vocab_size=len(vocab),
                                    embed_dim=cfg.embed_dim,
                                    num_class=cfg.num_class).to(cfg.device)
    text = "Reuters - Short-sellers, Wall Street's dwindling\band of ultra-cynics, are seeing green again."
    logger.debug(f'text: {text}')
    # 构建文本idx索引向量
    tokens = tokenizer(text)
    logger.debug(f'tokens: {tokens}')
    text_idx = vocab.lookup_indices(tokens)
    logger.debug(f'text_index: {text_idx}')

    # text_idx 转化为 tensor
    text_tensor = torch.tensor(text_idx, dtype=torch.int64)
    offsets = torch.tensor([0])

    # 使用模型对text样本进行embedding操作
    embedding = model.embedding(text_tensor, offsets)
    logger.debug(f'embedding: {embedding}')
    logger.debug(f'embedding shape: {embedding.shape}')
