#!/usr/bin/env python


import config
import dicts
import roman


# Exceptional words that are allowed to begin or end with punctuation:
EXCEPTION_WORDS = ("th'",)


def _sanitize_word(word):
    """Strip a word of punctuation, whitespace, etc."""

    # Lowercase the word.
    word = word.lower()

    if word == '':
        # Oops, the word is already empty.  Nothing to do.
        return word

    if word in EXCEPTION_WORDS:
        # will.i.am decided to save himself some typing by making a contraction
        # out of the word "the".  Also, he tries very hard to make our lives
        # hell.  Make an exception for just this word "th'".
        return word

    if not word[0].isalpha():
        # The naughty word is trying to begin with a non-alpha character.
        # Strip off the first character, and return the sanitized rest of the
        # word.
        return _sanitize_word(word[1:])

    if not word[-1].isalpha():
        # The naughty word is trying to end with a non-alpha character.  Strip
        # off the last character, and return the sanitized rest of the word.
        return _sanitize_word(word[:-1])

    # All tests passed.  The word must be sane.  Return it.
    return word


def _read_all_words(path_to_sonnets=config.PATH_TO_SONNETS):
    """Given a path to the sonnets, return a set of all of the unique words."""

    # First, read all of the words from the text file as a single string.
    with open(path_to_sonnets) as f:
        words = f.read()

    # Next, split the string up into a list of strings.  We split at any
    # whitespace.
    words = words.split()

    # Iterate through the words, one at a time, lowercasing each word and
    # removing characters like punctuation.
    num_words = len(words)
    for index in range(num_words):
        words[index] = _sanitize_word(words[index])

    # Remove all of the "empty" words.  For example, if a "word" had been just
    # a single hyphen, our previous loop would've removed that hyphen, leaving
    # just the empty string.  We're not interested in that word.
    words = [word for word in words if word]

    # Uniquify the word list into a word set.  We're not interested in
    # duplicates.
    words = set(words)

    # All done!
    return words


def _compute_unknown_words(pron_dict, all_words):
    """Given the pron_dict as The Canonical Dict (TM), and all the words
    of the sonnets, return the words that are in the sonnets but not The
    Canonical Dict (TM). """

    # First, get all of the words in the Unilex dictionary.
    known_words = pron_dict.keys()

    # Next, make the Unilex word list a word set, so we can get all Set
    # Theoretic.
    known_words = set(known_words)

    # Finally, compute the difference between the sonnet word set and the
    # Unilex word set.
    unknown_words = all_words - known_words
    return unknown_words


def _remove_roman_numerals(unknown_words):
    """Given a set of words, remove all of the Roman numerals from the set."""
    unknown_words = [word for word in unknown_words if not roman.is_roman(word)]
    return unknown_words


def get_interesting_words():
    pron_dict = dicts.PronDict()
    all_words = _read_all_words()
    unknown_words = _compute_unknown_words(pron_dict, all_words)
    unknown_words = _remove_roman_numerals(unknown_words)
    return unknown_words
