# encoding: utf-8

import os
import json
import torch
from torch import nn
from typing import Union, List
from torch.utils.data import dataset, DataLoader
from transformers import BertTokenizer
from tqdm import tqdm

from models import EncoderRNN, DecoderRNN, Seq2Seq

pretrained_model = r"D:\codes\nlp_about\pretrained_model\hfl_chinese-roberta-wwm-ext"

tokenizer = BertTokenizer.from_pretrained(pretrained_model)


class MyDataSet(dataset.Dataset):
    def __init__(self, text_list: Union[str, List[str]]):
        self.data = []
        self.label = []
        self.load_data_from_text(text_list)

    def load_data_from_text(self, text_list: Union[str, List[str]]):
        if isinstance(text_list, str):
            text_list = [text_list]
        for text in text_list:
            inputs = torch.LongTensor(tokenizer.encode(text, max_length=64, truncation=True, padding="max_length"))
            labels = torch.LongTensor(tokenizer.encode(text, max_length=64, truncation=True, padding="max_length"))
            self.data.append(inputs)
            self.label.append(labels)

    def __len__(self):
        return len(self.data)

    def __getitem__(self, item):
        return self.data[item], self.label[item]


batch_size = 1
epoch_num = 25

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

input_size = tokenizer.vocab_size
hidden_size = 20
n_layers = 2
emb_size = 256

encoder = EncoderRNN(input_size, hidden_size, n_layers, emb_size, dropout=0.1)
decoder = DecoderRNN(input_size, hidden_size, n_layers, emb_size, dropout=0.1)

model = Seq2Seq(encoder, decoder, device, max_len=batch_size)
model.to(device)
model.eval()


def predict(text: Union[str, List[str]]):
    my_datasets = MyDataSet(text)
    my_dataloader = DataLoader(my_datasets, batch_size=batch_size, drop_last=False)

    for inputs, _ in my_dataloader:
        inputs = inputs.to(device).long()
        outputs = model(inputs)
        outputs = outputs.view(-1, input_size)
        outputs = torch.softmax(outputs, dim=0)
        token_ids = outputs.cpu().tolist()
        for token_id in token_ids:
            result = tokenizer.decode(token_id, skip_special_tokens=True)
            print("skuShortName:", result)
            # 结果不太对


if __name__ == '__main__':
    while True:
        text = input("请输入商品名称：")
        predict(text)
