import time
import torch
import torch.nn as nn
import torch.optim as optim
from torchtext.datasets import IMDB
from torchtext.data.utils import get_tokenizer
from torchtext.vocab import build_vocab_from_iterator
from torch.utils.data import DataLoader
from torch.nn.utils.rnn import pad_sequence
from torchtext.data.functional import to_map_style_dataset
from torchtext.transforms import VocabTransform, ToTensor
from torchtext.data.utils import get_tokenizer
from torchtext.vocab import build_vocab_from_iterator
from torch.utils.data import DataLoader
from torch.nn.utils.rnn import pad_sequence
from torchtext.data.functional import to_map_style_dataset
from torchtext.transforms import VocabTransform, ToTensor
from transformer_demo_deepseek02 import BATCH_SIZE, collate_batch, TransformerModel


# 定义文本预处理函数
def preprocess_text(text, vocab, tokenizer, max_seq_len):
    # 分词并转换为索引
    tokens = tokenizer(text)
    indexed_tokens = vocab(tokens)

    # 转换为张量并填充/截断到固定长度
    tensor = torch.tensor(indexed_tokens, dtype=torch.long)
    if len(tensor) > max_seq_len:
        tensor = tensor[:max_seq_len]  # 截断
    else:
        # 填充
        padding_size = max_seq_len - len(tensor)
        tensor = torch.cat([tensor, torch.zeros(padding_size, dtype=torch.long)], dim=0)

    # 添加批次维度
    tensor = tensor.unsqueeze(0)  # (1, max_seq_len)
    return tensor


# 定义分类函数
def classify_text(model, text, vocab, tokenizer, max_seq_len):
    # 预处理文本
    input_tensor = preprocess_text(text, vocab, tokenizer, max_seq_len)
    print(f'input_tensor: {input_tensor}')
    # 模型推理
    with torch.no_grad():
        output = model(input_tensor)
        probabilities = torch.softmax(output, dim=1)
        predicted_class = torch.argmax(probabilities, dim=1).item()

    # 返回类别和概率
    return predicted_class, probabilities.squeeze().tolist()
MAX_SEQ_LEN = 16
tokenizer = get_tokenizer("basic_english")
model: TransformerModel = torch.load('my_first_transformer_from_deepseek_0.pth')
model.eval()
vocab = torch.load("vocab.pth")  # 加载词汇表
# 输入文本
texts = []
texts.append("The story centers around Barry McKenzie who must go to England if he wishes to claim his inheritance. "
             "Being about the grossest Aussie shearer ever to set foot outside this great Nation of ours there is something "
             "of a culture clash and much fun and games ensue. The songs of Barry McKenzie(Barry Crocker) are highlights.")
texts.append("This movie was a complete fantastic. The pacing was brilliant, and I was completely excited.")
# 分类
for text in texts:
    predicted_class, probabilities = classify_text(model, text, vocab, tokenizer, MAX_SEQ_LEN)
    # 输出结果
    class_label = "positive" if predicted_class == 1 else "negative"
    label_transform = lambda x: 1 if x == 'pos' else 0
    print(f"Predicted class: {class_label}")
    print(f"Class probabilities: [negative: {probabilities[0]:.4f}, positive: {probabilities[1]:.4f}]")