#!/usr/bin/env python

"""This is our text library. There are useful text functions in this library that you can use for NLP."""

def unique_tokens(tokens):
    """
    Create a list with unique tokens from a list with tokens.
    @param tokens (list): A list with tokens
    @return (list): A list with unique tokens
    """
    data = []
    for token in tokens:
        if token not in data:
            data.append(token)
    return data

def read_example(folder, name, extension='txt'):
    """
    Returns the contents of an examplefile.
    @param folder (string): The folder of the examplefile
    @param name (string: 'txt'): The name of the examplefile
    @param extension (string): The extension of the examplefile
    @return (string): The contents of the examplefile
    """
    from os.path import join
    name = name + '.' + extension
    filename = join('data', folder, name)
    filedata = open(filename, 'r')
    data = filedata.read()
    filedata.close()
    return data

def circumstances(word, text, n=3):
    """
    Retrieve a list with all circumstances of a given word in a text.
    @param word (string): The word to find circumstances for
    @param text (string): The text
    @param n (int: 3): The n from n-gram (2 for bigrams, 3 for trigrams, ...)
    @return (list): All circumstances of the given word
    """
    data = []
    newword = word.lower()
    for sentence in sentences(text):
        tokens = tokenize(sentence)
        ngramdata = ngrams(n, tokens)
        lst = [
            ngram for ngram in ngramdata
            for words in ngram
            if words.lower() == newword
        ]
        if len(lst)>0:
            data.append(lst)
    return data

def ngrams(n, tokens):
    """
    Retrieve one list with a n-gram (if n=2 then all bigrams, if n=3 then all trigrams, ...).
    @param n (int): The kind of n-gram
    @param tokens (list): A list with tokens that will be in the n-gram
    @return n-gram (list): The n-gram
    """
    counter = 0
    ngram = []
    for token in tokens:
        listdata = []
        for subcounter in range(0, n): # Find all tokens from current position to current position + n
            try:
                current_token = tokens[counter + subcounter]
                listdata.append(current_token)
            except:
                listdata = [] # Not enough tokens! Clear the list
                break
        if len(listdata)==n: # Enough tokens!
            ngram.append(listdata)
        counter += 1
    return ngram

def sequences(tokens):
    """
    Create a list with all n-grams (bigrams and trigrams and ...).
    @param tokens (list): A list with tokens that will be in the n-grams
    @return sequences (list): The n-grams
    """
    n = 1
    sequences = []
    sequences.append([])
    while True:
        data = ngrams(n, tokens)
        if len(data)>=1:
            sequences.append(data)
        else:
            break
        n += 1
    return sequences

def tokenize(text):
    """
    Create a list of tokens from a string.
    @param text (string): The string to tokenize
    @return tokens (list): The tokens
    """
    punctation = ['.', ',', '!', '?', ':', ';', '(', ')', '[', ']']
    tokens = []
    for character in punctation: # Punctations are also tokens
        text = text.replace(character, ' ' + character + ' ')
    rawtokens = text.split(' ')
    for rawtoken in rawtokens:
        token = rawtoken.strip()
        if len(token)>0:
            tokens.append(token)
    return tokens

def sentences(text, index=0):
    """
    Get all sentences from a given text.
    @param text (string or list): Text to get sentences from
    @param index (int: 0): A iteration parameter
    @return (list): A list with all sentences
    """
    end_of_sentence = ['.', '?', '!']
    try:
        execute = True
        character = end_of_sentence[index]
    except:
        # Index out of range, so it's done!
        execute = False
        return text 
    if execute == True:
        if type(text) == type('string'):
            newdata = text.split(character)
        else:
            newdata = []
            for sentence in text:
                subsentences = sentence.split(character)
                for subsentence in subsentences:
                    textdata = subsentence.strip()
                    if len(textdata)>0:
                        newdata.append(textdata)
        return sentences(newdata, index + 1)
