import torch

from dataset.vocab import WordVocab  # 确保这是你第一段代码中定义的 WordVocab
from model.bert import BERT

# --- 1. Configuration (CRITICAL: Match these to your train.py settings) ---
model_path = "bert.model.ep4"  # <---  CHANGE THIS
vocab_path = "/Users/jiangfeng/PycharmProjects/Net/BERT/bert_pytorch/dataset/vocab.txt"  # 你的 vocab.txt 文件的路径

# Hyperparameters:  MUST match the values used during training!
hidden_size = 256
num_layers = 8
num_attention_heads = 8
max_seq_len = 30  # CRITICAL
# vocab_size 不需要了，因为我们直接从加载的 vocab 中获取

# --- 2. Device Setup ---
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")

# --- 3. Load Vocab ---
vocab = WordVocab.load_vocab(vocab_path)  # 使用 load_vocab 方法
vocab_size = len(vocab)  # 现在可以确定 vocab_size
print(f"Vocabulary size: {vocab_size}")

# --- 4. Create BERT Model Instance ---
bert_model = BERT(vocab_size, hidden=hidden_size, n_layers=num_layers, attn_heads=num_attention_heads)
bert_model.to(device)

# --- 5. Load Model State Dictionary ---
try:
    bert_model.load_state_dict(torch.load(model_path, map_location=device))
    print(f"Successfully loaded model state_dict from: {model_path}")
except FileNotFoundError:
    print(f"ERROR: Model file not found at: {model_path}")
    exit()
except RuntimeError as e:
    print(f"ERROR: RuntimeError while loading state_dict: {e}")
    exit()
except Exception as e:
    print(f"ERROR: An unexpected error occurred: {e}")
    exit()

bert_model.eval()
print("Model loaded and set to evaluation mode.")

# --- 6. (Optional but Recommended) Verify Vocabulary Size ---
try:
    loaded_vocab_size = bert_model.embedding.token.num_embeddings  # Corrected path
    print(f"Loaded model vocabulary size (num_embeddings): {loaded_vocab_size}")
    if loaded_vocab_size != vocab_size:
        print(
            f"WARNING: Loaded model vocab size ({loaded_vocab_size}) does NOT match expected vocab size ({vocab_size})!")
    else:
        print("Vocabulary size check passed.")
except AttributeError:
    print("WARNING: Could not verify vocabulary size. Double-check the attribute path.")
except Exception as e:
    print(f"ERROR during vocab size check: {e}")


# --- 7. Encoding Function ---
def encode_sentence(sentence, vocab, max_seq_len=max_seq_len):
    tokens = sentence.split()
    tokens = ["<sos>"] + tokens + ["<eos>"]  # 使用 vocab 中的特殊 token 符号
    input_ids = [vocab.stoi.get(token, vocab.unk_index) for token in tokens]
    segment_ids = [1] * len(input_ids)

    padding_len = max_seq_len - len(input_ids)
    input_ids += [vocab.pad_index] * padding_len
    segment_ids += [0] * padding_len

    return torch.tensor(input_ids).unsqueeze(0), torch.tensor(segment_ids).unsqueeze(0)


# --- 8. Example Usage ---

# --- 8.1  Simple Forward Pass ---
sentence = "Without the support of Maximilian or the chance to prove themselves in the war with Gallia , it is Dahau 's last trump card in creating a new Darcsen nation ."  # Example sentence
input_tensor, segment_tensor = encode_sentence(sentence, vocab)
input_tensor = input_tensor.to(device)
segment_tensor = segment_tensor.to(device)

print("\n--- Simple Forward Pass Example ---")
print(f"Input sentence: {sentence}")
with torch.no_grad():
    output = bert_model(input_tensor, segment_tensor)
    print("Output shape:", output.shape)
    print("Output (first 5 elements of first sequence, first 5 features):", output[0, :5, :5])

# --- 8.2  Masked Language Model (MLM) Prediction ---
masked_sentence = "Without the support <mask> Maximilian or the chance to prove themselves in the war with Gallia , it is Dahau 's last trump card in creating a new Darcsen nation ."  # [MASK] 应该替换为 <mask>
masked_sentence = masked_sentence.replace("[MASK]", "<mask>")  # 更正：使用 <mask>
input_tensor, segment_tensor = encode_sentence(masked_sentence, vocab)
input_tensor = input_tensor.to(device)
segment_tensor = segment_tensor.to(device)

print("\n--- Masked Language Model (MLM) Example ---")
print(f"Masked sentence: {masked_sentence}")

with torch.no_grad():
    outputs = bert_model(input_tensor, segment_tensor)
    predictions = outputs

    #  找到 <mask> token 的索引。
    mask_indices = (input_tensor == vocab.mask_index).nonzero()

    if mask_indices.nelement() > 0:
        mask_index = mask_indices[0, 1]  # 获取第一个 <mask> token 的索引
        predicted_token_logits = predictions[0, mask_index]
        predicted_token_id = torch.argmax(predicted_token_logits).item()
        predicted_word = vocab.itos[predicted_token_id]  # 使用 itos 进行解码

        print(f"Predicted word for <mask>: {predicted_word}")
    else:
        print("No <mask> token found in the input sequence.")
print("\n--- Test Complete ---")
