import os

import sentencepiece as spm
import numpy as np
from collections import defaultdict

import nltk
from nltk.util import ngrams
from collections import Counter

nltk.download('punkt')
nltk.download('punkt_tab') #下载punkt_tab

debug = False
###############################################
# generate Bi-Gram counter for training corpus
###############################################
corpus_text = '''I play tennis. I like tennis friends. I talk with tennis players. I play with tennis friends. I have friends who like tennis. Tennis players are very handsome. My Chinese friends play tennis every weekend. We play tennis because Chinese friends play tennis. Chinese friends like to play tennis. I play tennis with Chinese friends. Chinese friends play and watch tennis.'''
token = nltk.word_tokenize(corpus_text)

###########################
# Generate Bi-Gram counter
###########################
unigrams = Counter([w[0] for w in list(ngrams(token, 1))])
bigrams = Counter(list(ngrams(token, 2)))


###########################
# generate query Bi-Gram
###########################
query_text_1 = "I play with tennis friends"
query_text_2 = "Chinese friends play tennis"
query_token = nltk.word_tokenize(query_text_1)
query_bigram = list(ngrams(query_token, 2))

# DO NOT MODIFY ABOVE

if debug:
    print(token)
    print(unigrams)
    print(bigrams)
    print(query_bigram)

###########################
# TODO: lookup each query bigram in each query_text
# compute Uni-Counter[bg[0]] /  Bi-Counter[(bg[0],bg[1])]
# convert to PPL and output


# 定义计算句子 Bi-Gram PPL 的函数
def compute_bigram_ppl(query_bigrams):
    """
    输入：一个句子的 bi-gram 列表
    输出：该句子的 Perplexity 值
    """

    probs = []

    for bg in query_bigrams:
        w1, w2 = bg   # bigram 的两个词

        # 取出 unigram(w1) 和 bigram(w1, w2) 的计数
        count_w1 = unigrams.get(w1, 0)
        count_bigram = bigrams.get((w1, w2), 0)

        # 加 1 平滑，否则出现 0 概率会崩溃
        # P(w2|w1) = (count_bigram + 1) / (count_w1 + 词汇表大小)
        V = len(unigrams)
        prob = (count_bigram + 1) / (count_w1 + V)

        probs.append(prob)

    # perplexity = exp( - (1/N) * Σ log(prob_i) )
    N = len(probs)
    ppl = np.exp(-np.sum(np.log(probs)) / N)

    return ppl

query_token_1 = nltk.word_tokenize(query_text_1)
query_token_2 = nltk.word_tokenize(query_text_2)

query_bigram_1 = list(ngrams(query_token_1, 2))
query_bigram_2 = list(ngrams(query_token_2, 2))

# 计算两个句子的 Bi-Gram PPL
ppl_1 = compute_bigram_ppl(query_bigram_1)
ppl_2 = compute_bigram_ppl(query_bigram_2)

# 输出结果
print("Query 1:", query_text_1)
print("Bi-Gram PPL =", ppl_1)
print()

print("Query 2:", query_text_2)
print("Bi-Gram PPL =", ppl_2)
print()

# 给出谁更小及原因解释
if ppl_1 < ppl_2:
    print("结论：句子 1 的 PPL 更低（更符合训练语料）")
else:
    print("结论：句子 2 的 PPL 更低（更符合训练语料）")