import nltk
# import german_nouns
# from german_nouns.lookup import Nouns
# nouns = Nouns()
# from nltk.stem.snowball import GermanStemmer
# stemmer = GermanStemmer("german")
import re
from collections import defaultdict
import random
import itertools

from typing import List, Tuple

import logging

with open("data/pg31537.txt", "r") as f:
    koch1 = f.read()

logging.info("read raw corpus file of %s bytes" % len(koch1))

koch1r = re.sub('\n',' ',re.sub('[_\ufeff]','',koch1))
sent_tok1 : List = nltk.sent_tokenize(koch1r)
wts1 : List = [nltk.word_tokenize(sent) for sent in sent_tok1]

k1grams1 : List[str] = sum([[x for x in nltk.ngrams(wts1[i], n=1)] for i in range(len(wts1))], [])
k1grams2 : List[str] = sum([[x for x in nltk.ngrams(wts1[i], n=2)] for i in range(len(wts1))], [])
k1grams3 : List[str] = sum([[x for x in nltk.ngrams(wts1[i], n=3)] for i in range(len(wts1))], [])
k1grams4 : List[str] = sum([[x for x in nltk.ngrams(wts1[i], n=4)] for i in range(len(wts1))], [])

logging.info("n-gram lengths: %s" % ((len(k1grams1),len(k1grams2),len(k1grams3),len(k1grams4)),))

k1grams1freq = nltk.FreqDist(k1grams1)
k1grams2freq = nltk.FreqDist(k1grams2)
k1grams3freq = nltk.FreqDist(k1grams3)
k1grams4freq = nltk.FreqDist(k1grams4)

s2back: List[Tuple[str, Tuple[str, int]]] = sorted([(w2, (w1,k)) for ((w1,w2), k) in k1grams2freq.items()])
s2 = sorted([(w1, (w2,k)) for ((w1,w2), k) in k1grams2freq.items()])
s3 = sorted([(w1, (w2,w3,k)) for ((w1,w2,w3),k) in k1grams3freq.items()])
s4 = sorted([(w1, (w2,w3,w4,k)) for ((w1,w2,w3,w4),k) in k1grams4freq.items()])
s3f2 = sorted([((w1, w2), (w3,k)) for ((w1,w2,w3),k) in k1grams3freq.items()])

def nextfrom1() -> List[str]:
    items = k1grams1freq.items() # : dict_items
    total : int = sum(map(lambda x: x[1], items))
    probs : List[float] = [k/total for (_,k) in items]
    stops : List[Tuple[int, float]] = enumerate(itertools.accumulate(probs))
    r : float = random.random()
    idx : int = list(filter(lambda x: x[1] > r, stops))[0][0]
    return [list(items)[idx][0][0]]

def nextfrom2(word: str) -> List[str]:
# without stemmer:
#    conti : List[Tuple[str, int]] = [(w2,k) for (w1, (w2,k)) in s2 if w1 == word]
    conti : List[Tuple[str, int]] = [(w2,k) for (w1, (w2,k)) in s2 if stemmer.stem(w1.lower()) == stemmer.stem(word.lower())]
    # if len(conti) == 0:
    #     query = set(nouns.parse_compound(stemmer.stem(word.lower())))
    #     conti = [(w2,k) for (w1, (w2,k)) in s2 if len(set(nouns.parse_compound(stemmer.stem(w1.lower()))).intersection(query))>0]
    if len(conti) == 0:
        return nextfrom1()
    total : int = sum(map(lambda x: x[1], conti))
    probs : float = [k/total for (w2,k) in conti]
    stops : enumerate[float] = enumerate(itertools.accumulate(probs))
    r : float = random.random()
    try:
        idx : int = list(filter(lambda x: x[1] > r, stops))[0][0]
        return [conti[idx][0]]
    except:
        return nextfrom1()

def nextfrom3(words: List[str]) -> List[str]:
    conti : List[Tuple[List[str], int]] = []
    if len(words)==0:
        return nextfrom1()
    if len(words)==1:
        return nextfrom2(words[0])
    else:
        conti = [([w3],k) for (w1,(w2,w3,k)) in s3 if [w1,w2] == words[-3:]]
    if len(conti) == 0:
        return nextfrom2(words[-1])
    total : int = sum(map(lambda x: x[1], conti))
    probs : List[float] = [k/total for (ws,k) in conti]
    stops : enumerate[float] = enumerate(itertools.accumulate(probs))
    r : float = random.random()
    try:
        idx = list(filter(lambda x: x[1] > r, stops))[0]
        return [conti[idx][0]]
    except:
        return nextfrom2(words[-1] if len(words)>0 else "x")

def nextfrom4(words: List[str]) -> List[str]:
    conti : List[Tuple[List[str], int]] = []
    if len(words)==0:
        return nextfrom1()
    elif len(words)<3:
        return nextfrom3(words)
    else:
        conti = [([w4],k) for (w1,(w2,w3,w4,k)) in s3 if [w1,w2,w3] == words[-3:]]
    if len(conti) == 0:
        return nextfrom3(words)
    total : int = sum(map(lambda x: x[1], conti))
    probs : List[float] = [k/total for (ws,k) in conti]
    stops : enumerate[float] = enumerate(itertools.accumulate(probs))
    r : float = random.random()
    try:
        idx = list(filter(lambda x: x[1] > r, stops))[0]
        return [conti[idx][0]]
    except:
        return nextfrom2(words[-1] if len(words)>0 else "x")


def prevfrom2(word) -> List[str]:
    # conti = [(w2,k) for (w1, (w2,k)) in s2back if w1 == word]
    # siehe oben
    conti = [(w2,k) for (w1, (w2,k)) in s2back if stemmer.stem(w1.lower()) == stemmer.stem(word.lower())]
    # if len(conti) == 0:
    #     query = set(nouns.parse_compound(stemmer.stem(word.lower())))
    #     conti = [(w2,k) for (w1, (w2,k)) in s2back if len(set(nouns.parse_compound(stemmer.stem(w1.lower()))).intersection(query))>0]
    if len(conti) == 0:
        return nextfrom1()
    total = sum(map(lambda x: x[1], conti))
    probs = [k/total for (w2,k) in conti]
    stops: enumerate[float] = enumerate(itertools.accumulate(probs))
    r = random.random()
    try:
        idx = list(filter(lambda x: x[1] > r, stops))[0][0]
        return [conti[idx][0]]
    except:
        return []


maxit = 100

class MyKochi:
    def makeprefix(self, word: str) -> List[str]:
        outputl = [word]
        it = 0
        while len(outputl)==0 or not re.match("[.,]", outputl[-1]) and it < maxit:
            prevw = prevfrom2(outputl[-1])
            outputl += prevw
            it += 1
        return outputl[-2::-1]
    def sentence(self, first):
        outputl = self.makeprefix(first)
        it = 0
        while len(outputl)==0 or outputl[-1] != '.' and it < maxit:
            nextws = nextfrom4(outputl[-2:])
            outputl += nextws
            it += 1
        return re.sub(" ([,'.;])", "\\1", " ".join([w for w in outputl if w is not None]))


if __name__=='__main__':
    import sys
    w = MyKochi()
    try:
        arg = sys.argv[1]
    except IndexError:
        arg = "Quitten"
    print(w.sentence(arg))
