import os
import nltk
import numpy as np
from transformers import AutoTokenizer,AutoModelForCausalLM
from nltk.util import ngrams
from collections import Counter
import torch
import psutil

# TODO
# download tokenizer and put in readme
tokenizer = AutoTokenizer.from_pretrained('./model/Qwen3-0.6B',trust_remote_code=True)

#####################################
########## Do not modify ###########
#####################################
corpus = []
for file_name in os.listdir('poem-db'):
    #print(file_name)
    with open(os.path.join('poem-db',file_name)) as file:
        corpus += [line.strip() for line in file]

# tokenize sentences into tokens
# do not modify
sent_all = []
for text in corpus:
    words = tokenizer.tokenize(text, add_special_tokens=True)
    sent_all += words


###########################
# Generate Bi-Gram counter
###########################
unigrams = Counter([w[0] for w in list(ngrams(sent_all, 1))])
bigrams = Counter(list(ngrams(sent_all, 2)))
V = len(unigrams)  # vocabulary size

# tokenize THE QUESTION
the_question = "To be, or not to be: that is the question"
the_question_tokens = tokenizer.tokenize(the_question)

# TODO Q2: compute bigram PPL here
def bigram_sentence_ppl(sentence_tokens):
    """Compute Bigram Perplexity with Laplace smoothing."""
    
    N = len(sentence_tokens)
    log_prob_sum = 0
    
    for i in range(1, N):
        w1 = sentence_tokens[i - 1]
        w2 = sentence_tokens[i]
        
        bigram = (w1, w2)
        C_bigram = bigrams[bigram]
        C_unigram = unigrams[w1]
        
        # Laplace smoothing
        prob = (C_bigram + 1) / (C_unigram + V)

        log_prob_sum += -np.log(prob)

    ppl = np.exp(log_prob_sum / (N - 1))
    return ppl

print("=== TOKENIZATION RESULT ===")
print(the_question_tokens)

# Compute Bigram PPL
ppl_bigram = bigram_sentence_ppl(the_question_tokens)
print("\n=== Bigram PPL ===")
print(ppl_bigram)

# TODO bonus: compute LLM PPL here
def compute_llm_ppl(text):
    """Compute perplexity using Qwen3-0.6B."""
    device = "cuda" if torch.cuda.is_available() else "cpu"

    model = AutoModelForCausalLM.from_pretrained(
        "./model/Qwen3-0.6B", 
        torch_dtype=torch.float32,
        trust_remote_code=True
    ).to(device)

    print("\n=== 当前的设备 ===")
    print("设备:", torch.device("cuda" if torch.cuda.is_available() else "cpu")) 

    process = psutil.Process(os.getpid())
    mem = process.memory_info().rss / (1024 ** 2)
    print(f"当前脚本内存占用：{mem:.2f} MB") 

    inputs = tokenizer(text, return_tensors="pt").to(device)
    input_ids = inputs["input_ids"]

    with torch.no_grad():
        outputs = model(input_ids, labels=input_ids)
        neg_log_likelihood = outputs.loss

    ppl = torch.exp(neg_log_likelihood)
    return ppl.item()


print("\n=== LLM PPL (Qwen3-0.6B) ===")
try:
    ppl_llm = compute_llm_ppl(the_question)
    print(ppl_llm)
except Exception as e:
    print("LLM PPL error:", e)