from functools import reduce, partial

START, END = "<start{}>", "<end>"


def gram_key(words: list, i: int, n: int) -> str:
    keys = []
    if i < n - 1:
        for j in range(n - i - 1):
            keys.append(START.format(j + i+1))
    for j in range(max(0, i + 1 - n), i):
        keys.append(words[j])
    return '_'.join(keys)


def ngram_counting(text: str, n: int):
    # <w_i-1, w_i-2> : {w_i: c}
    ngram_counts = {}
    words = train_text.lower().split()
    for i in range(len(words) + 1):
        key = gram_key(words, i, n)
        if i != len(words):
            word = words[i]
        else:
            word = END
        if key in ngram_counts:
            counts = ngram_counts[key]
            if word in counts:
                counts[word] += 1
            else:
                counts[word] = 1
        else:
            ngram_counts[key] = {word: 1}
    unigrams = set(words)
    return (ngram_counts, unigrams)


def p(words: list, i: int, ngram_counts: dict, v: int, n: int, laplacian_smooth: bool = True):
    key = gram_key(words, i, n)
    if key in ngram_counts:
        counts = ngram_counts[key]
        if i < len(words):
            word = words[i]
        else:
            word = END
        if word in counts:
            a = counts[word]
        else:
            a = 0
        b = sum([v for k, v in counts.items()])
    else:
        a = 0
        b = 0
    if laplacian_smooth:
        return (a + 1) / (b + v)
    else:
        if b == 0:
            print("warn: i = {}, key <{}> = 0".format(i, key))
        return a / b


def perplexity(ngram_counts, unigrams: dict, test_text: str, n: int, laplacian_smooth: bool = True):
    words = test_text.lower().split()
    N = len(words)
    v = len(unigrams) + 1
    print("V = {}, N = {}".format(v, N))
    f = partial(p, words=words, ngram_counts=ngram_counts,
                v=v, laplacian_smooth=laplacian_smooth, n=n)
    prod = reduce(lambda x, y: x*y, [f(i=i) for i in range(len(words) + 1)])
    if prod == 0.0:
        return float("inf")
    else:
        return prod ** (- 1.0 / N)


def sentence_prob(ngram_counts: dict, test_text: str, n: int):
    words = test_text.lower().split()
    N = len(words)
    f = partial(p, words=words, ngram_counts=ngram_counts,
                v=0, laplacian_smooth=False, n=n)
    prod = reduce(lambda x, y: x*y, [f(i=i) for i in range(N + 1)])
    return prod


# Q2
train_text = "This is the cow with the crumpled horn that tossed the dog that worried the cat that killed the rat that ate the malt that lay in the house that Jack built"
ngram_counts, unigrams = ngram_counting(train_text, 2)

test_text = "This is the rat that worried the dog that Jack built"
try:
    v = sentence_prob(ngram_counts, test_text, 2)
    print("Q2: sentence prob", v)
except ZeroDivisionError:
    print("Q2: sentence prob 0/0")
print("train: ngram counts => ", ngram_counts)
print("train: unigrams => ", unigrams)

# Q3
train_text = "This is the rat that ate the malt that lay in the house that Jack built"
test_text = "This is the house that Jack built"

ngram_counts, unigrams = ngram_counting(train_text, 3)
v = perplexity(ngram_counts, unigrams, test_text, 3, laplacian_smooth=False)
print("Q3: ", v)
print("train: ngram counts => ", ngram_counts)
print("train: unigrams => ", unigrams)

# Q4
train_text = "This is the rat that ate the malt that lay in the house that Jack built"
ngram_counts, unigrams = ngram_counting(train_text, 3)

test_text = "This is the house that Jack built"
v = perplexity(ngram_counts, unigrams, test_text, 3, laplacian_smooth=True)
print("Q4:", v)
print("train: ngram counts => ", ngram_counts)
print("train: unigrams => ", unigrams)
