from __future__ import print_function
import codecs
import sys
import string
import collections

# my modules
import util
from program_settings import ProgramSettings

#sys.path = [r'D:\vann\dev\python'] + sys.path
#from py3eg import Console

SUCCESS, TAG_NOT_FOUND, START_TAG_FOUND = range(3)

# We could have just as easily made this into a read-only class; akin to enum/constants
Tag = collections.namedtuple("_",
    "alpha_letter_start "
    "alpha_letter_end "
    "term_start "
    "term_end "
    "viet_usage_start "
    "viet_usage_end "
    "synonym_start "
    "synonym_end "
    "antonym_start "
    "antonym_end "
    "sino_vietnamese_start "
    "sino_vietnamese_end "
    "french_start "
    "french_end "
    "restricted_start "
    "restricted_end "
    "noun_classifier_start "
    "noun_classifier_end "
)("<h1>", "</h1>", "<h2>", "</h2>", "<b>", "</b>", "(=", ")", "[opp.", "]", "[SV", "]", "[Fr", "]", "[R", "]", "[CL", "]")


class ParseException(Exception):
    def __init__(self, m):
        self.msg = m

# -----------------------------------------------------------------------------
def get_element_value(start_tag, end_tag, stream):
    """
    Returns the value between the start_tag and end_tag; the tags (e.g. <h1>, </h1>)
    must be on the same line)
    The tag may be
    1) non-existent in the stream --> returns TAG_NOT_FOUND
    2) has beginning, but no end  --> START_TAG_FOUND
    3) contains both              --> SUCCESS, return value
    Note, for 1-3 above, the stream gets chewed up.
    """
    rc = TAG_NOT_FOUND
    tag_value = ""
    remainder_str = ""

    start_index = stream.find(start_tag)
    if start_index > -1:
        value_begin_index = start_index + len(start_tag)
        end_index = stream.find(end_tag, value_begin_index)
        if end_index > -1:
            tag_value = stream[value_begin_index:end_index]
            rc = SUCCESS
            remainder_begin_index = end_index + len(end_tag)
            remainder_str = stream[remainder_begin_index:]
        else:
            tag_value = stream[value_begin_index:]
            rc = START_TAG_FOUND
    return rc, tag_value, remainder_str

# -----------------------------------------------------------------------------
def get_data_up_to_tag(tag, stream):
    """
    Returns all the data up to <tag>, if found.
    Returns:
        ({SUCCESS|TAG_NOT_FOUND}, data, remainder)
    """
    index = stream.find(tag)
    pre_tag_str = ""
    remainder_from_tag = ""
    rc = TAG_NOT_FOUND
    if index > -1:
        rc = SUCCESS
        pre_tag_str = stream[:index]
        remainder_from_tag = stream[index:]
    else:
        pre_tag_str = stream
    return rc, pre_tag_str, remainder_from_tag


# -----------------------------------------------------------------------------
from myprojects.vtdict import settings
from django.core.management import setup_environ
setup_environ(settings)

from myprojects.vtdict.maindict.models import MainWord, DefinitionPartOfSpeech, WordUsage
from django.db import models

PART_OF_SPEECH_SUFFIX = "."
PART_OF_SPEECH_DEFINITION_PART_SEPERATOR = ";"

# -----------------------------------------------------------------------------
def process_word_and_definition(term_str, word_definition, options):
    """
    Defininition entry rules incorporated from RFC 2396

    word_definition = part_of_speech_definition | 2*(delimiter LWS part_of_speech_definition)
    part_of_speech_definition = [word_clarification] part_of_speech "." LWS pos_definition_part
    part_of_speech = 'adj' | 'adv' | 'conj' | 'intj' | 'n' | 'num' | 'prep' | 'pron' | 'v'
    pos_definition_part = word_usages [";" word_usages]
    word_usages = english_phrase | [english_phrase], 1*(vietnamese_phrase, english_phrase)

    english_phrase = 1*ALPHA
    vietnamese_phrase = '<b>' 1*ALPHA '</b>'
    word_clarification = 1*ALPHA

    LWS  =  [*WSP CRLF] 1*WSP ; linear whitespace
    delimiter = DIGIT 
    """

    # parse definition
    #print("------------------------------------- processing term: ", term_str, sep='')
    part_of_speech_definitions = split_part_of_speech_definitions(word_definition)

    parsed_pos_definitions = process_part_of_speech_definition(part_of_speech_definitions)

    # save to wherever
    if options.save_model:
        try:
            term = MainWord.objects.get(word=term_str)
            #print("process_word_and_definition> ", term_str, sep='')
        except models.ObjectDoesNotExist:
            term = MainWord(word=term_str)
            term.save()

    if options.verbose:
        print ("[", (term_str,), "]", sep='')

    # reminder: parsed_pos_definitions is a list of DefinitionPartOfSpeechTuple
    for def_ct, definitions in enumerate(parsed_pos_definitions, start=1):
        if options.verbose:
            print ("\t", ("" if len(parsed_pos_definitions) == 1 else def_ct), " ", definitions.word_clarification, " ", sep='', end='')
            for pos_ct, pos in enumerate(definitions.part_of_speech_list):
                if pos_ct == 0:
                    print (pos, ".", sep='', end='')
                else:
                    print ("{", pos, ".}", sep='', end='')
            print("")
        if options.save_model:
            definition_model= DefinitionPartOfSpeech(
                                    word_clarification = definitions.word_clarification,
                                    part_of_speech = definitions.part_of_speech_list[0],
                                    part_of_speech_alt = definitions.part_of_speech_list[1] if len(definitions.part_of_speech_list) > 1 else "",
                                    word_fk = term)
            definition_model.save()

        for usage in definitions.parsed_usages:
            if options.save_model:
                usage_model  = WordUsage(
                                    main_definition=usage.main_definition,
                                    synonym=usage.synonym,
                                    antonym=usage.antonym,
                                    sino_vietnamese=usage.sino_vietnamese,
                                    french=usage.french,
                                    restricted=usage.restricted,
                                    noun_classifier=usage.noun_classifier,
                                    vietnamese_phrase=usage.vietnamese_phrase,
                                    english_phrase=usage.english_phrase,
                                    part_of_speech_fk=definition_model)
                usage_model.save()

            if options.verbose:
                if usage.main_definition:
                    print ("\t\t", util.hexdump(usage.main_definition))

                if usage.vietnamese_phrase:
                    print ("\t\t", "[viet:", util.hexdump(usage.vietnamese_phrase),
                           "][eng:", util.hexdump(usage.english_phrase), "]", sep='', end='')
                elif usage.english_phrase:
                    print ("\t\t", util.hexdump(usage.english_phrase), end='')
                    raise ParseException("Check this out!")

                if usage.synonym:
                    print("[syn:", util.hexdump(usage.synonym), "]", sep='', end='')
                if usage.antonym:
                    print("[ant:", util.hexdump(usage.antonym), "]", sep='', end='')
                if usage.sino_vietnamese:
                    print("[SV:", util.hexdump(usage.sino_vietnamese), "]", sep='', end='')
                if usage.french:
                    print("[Fr:", util.hexdump(usage.french), "]", sep='', end='')
                if usage.restricted:
                    print("[R:", util.hexdump(usage.restricted), "]", sep='', end='')
                if usage.noun_classifier:
                    print("[CL:", util.hexdump(usage.noun_classifier), "]", sep='', end='')
                print("")

# -----------------------------------------------------------------------------
def rstrip_char(suffix, stream):
    """
    """
    stripped = 0
    for ch in reversed(stream):
        if ch not in suffix:
            break
        stripped += 1
    return stripped, stream[:-1*stripped] if stripped > 0 else stream

# -----------------------------------------------------------------------------
def lstrip_char(suffix, stream):
    """
    """
    stripped = 0
    for ch in stream:
        if ch not in suffix:
            break
        stripped += 1
    return stripped, stream[stripped-1:] if stripped > 0 else stream

# -----------------------------------------------------------------------------
def sanity_check(*stream):
    """
    Check the stream to see whether we've missed any indicator words
    """
    tags = ("CL ", "SV ", "Fr ", "R ")
    for s in stream:
        for tag in tags:
            if s.find(tag) > -1:
                raise ParseException("Possibly missed parsing of tag: '" + tag + "' for stream: '" + s + "'")

# -----------------------------------------------------------------------------
def get_value_strict(start_tag, end_tag, src, remove_element=True, remove_tag=True, default_value=''):
    """
    Extract value between the two passed-in tags. Similar to 'get_element_value',
    with the exception that IF the 'start_tag' exists, the 'end_tag' MUST exist
    in the src parameter.

    Returns the value and the src (less value) string
    NOTE:
        remove_element - if True, remove_tag is implicitly true as well.
    """
    value = ""
    ret_src = src       # could 1) be original src 2) less entire tag and value 3) just the tag-start and tag-end
    start = src.find(start_tag)
    if (start > -1):
        end = src.find(end_tag, start+len(start_tag))
        if (end > -1):
            value = src[start+len(start_tag):end]
            if remove_element:
                ret_src = src[:start] + src[end+len(end_tag):]
            elif remove_tag:
                ret_src = src[:start] + value + src[end+len(end_tag):]
        else:
            msg = "start-tag '" + start_tag + "' has no end-tag: '" + end_tag + "'"
            raise ParseException(msg)

    if default_value and not value.strip():
        value = default_value
    return value, ret_src

# -----------------------------------------------------------------------------
def process_word_usages(word_usages):
    """
    Transform stream of 'word_usages', see BNF-like definition above, into parsed usages.
    Example of format:
        1) ao --> '<b>ao ta</b> pond with stagnant water'
        2) An Nam -> 'Vietnam: <b>dan An Nam</b> Vietnamese'
    Returns:
        list of namedtuple, akin to
        struct WordUsageTuple:
            str main_definition
            ...
    """

    MAIN_DEFINITION_CHAR_TO_STRIP = ",: "
    MAIN_DEF_PREFIX_CHAR_TO_STRIP = (string.digits + string.punctuation).replace("[", "")

    WordUsageTuple = collections.namedtuple("WordUsageTuple____",
            "main_definition "
            "synonym "
            "antonym "
            "sino_vietnamese "
            "french "
            "restricted "
            "noun_classifier "
            "vietnamese_phrase "
            "english_phrase")
    parsed_usages = []

    for i, usage in enumerate(word_usages, start=1):   # reminder: usages are delimited by PART_OF_SPEECH_DEFINITION_PART_SEPERATOR (;)
        main_definition = ""

        embedded_words = {"synonym":"", "antonym":"", "sino_vietnamese":"", "french":"", "restricted":"", "noun_classifier":""}
        usage = usage.strip()

        # there may be definition before 'Tag.viet_usage_start' which forms the main part of the definition;
        # we need to strip it out. That part may be:
        # 1) empty string --> no main definition
        # 2) </b> --> error!!!
        # 3) 'valid' string --> main-def
        first_index = usage.find(Tag.viet_usage_start)

        #print("PROCESSING [usage:", util.hexdump(usage), "] INDEX for '", Tag.viet_usage_start, "' is ", first_index, sep='')
        if first_index == 0:
            pass
        else:
            if first_index > 0:
                main_definition = usage[:first_index].strip()
            else:
                main_definition = usage

            if main_definition.find(Tag.viet_usage_end) > -1:
                raise ParseException(Tag.viet_usage_end + "-tag mis-match - only end-tag exists: '" + util.hexdump(main_definition) + "'")

            embedded_words["synonym"], main_definition = get_value_strict(Tag.synonym_start, Tag.synonym_end, main_definition)
            embedded_words["antonym"], main_definition = get_value_strict(Tag.antonym_start, Tag.antonym_end, main_definition)
            embedded_words["sino_vietnamese"], main_definition = get_value_strict(Tag.sino_vietnamese_start, Tag.sino_vietnamese_end, main_definition)
            embedded_words["french"], main_definition = get_value_strict(Tag.french_start, Tag.french_end, main_definition)
            embedded_words["restricted"], main_definition = get_value_strict(Tag.restricted_start, Tag.restricted_end, main_definition, remove_element=False, remove_tag=True)
            embedded_words["noun_classifier"], main_definition = get_value_strict(Tag.noun_classifier_start, Tag.noun_classifier_end, main_definition, remove_element=False, remove_tag=True)

            if first_index > 0:
                usage = usage[first_index:] # keep the start-tag, for our loop
            else:
                main_definition = main_definition.strip()
                _, main_definition = rstrip_char(MAIN_DEFINITION_CHAR_TO_STRIP, main_definition)
                stripped, _ = lstrip_char(MAIN_DEF_PREFIX_CHAR_TO_STRIP, main_definition)
                if stripped:
                    raise ParseException("At least one chars in "+MAIN_DEF_PREFIX_CHAR_TO_STRIP+" were stripped from '" + main_definition + "' may want to check/fix.")

                sanity_check(main_definition, *embedded_words)
                parsed_usages.append(WordUsageTuple(
                                                    main_definition=main_definition,
                                                    synonym=embedded_words["synonym"],
                                                    antonym=embedded_words["antonym"],
                                                    sino_vietnamese=embedded_words["sino_vietnamese"],
                                                    french=embedded_words["french"],
                                                    restricted=embedded_words["restricted"],
                                                    noun_classifier=embedded_words["noun_classifier"],
                                                    english_phrase="",
                                                    vietnamese_phrase="") )
                continue

        usage_parts = usage.split(Tag.viet_usage_start)
        for part_on_start_tag in usage_parts:
            # print("~~~~~~~~~~~ processing [usage:", util.hexdump(part_on_start_tag), "]", sep='')
            english_phrase = ""
            vietnamese_phrase = ""

            part_on_start_tag = part_on_start_tag.strip()
            if not part_on_start_tag:   # the first part will always be empty
                continue

            usage_with_all_tag_stripped = part_on_start_tag.split(Tag.viet_usage_end)
            parts_count = len(usage_with_all_tag_stripped)
            if parts_count == 2:
                vietnamese_phrase = usage_with_all_tag_stripped[0].strip()
                english_phrase = usage_with_all_tag_stripped[1].strip()

                def get_tag_value_raise_duplicate_or_assign(tag_start, tag_end, value_dictionary, value_key, stream, remove_element=True, default_value=''):
                    value_in_tag, stream = get_value_strict(tag_start, tag_end, stream, remove_element, remove_tag=True, default_value=default_value)
                    if value_dictionary[value_key] and value_in_tag:
                        raise ParseException("Have not accounted for more than one '" + value_key + "' in [phrase:" + util.hexdump(stream) + "]")
                    elif value_in_tag:
                        value_dictionary[value_key] = value_in_tag
                    return stream

                # extract synonym and antonym from the two phrases.
                # TODO might be a good idea to put this into a local def, but need to pass some param by reference: vietnamese_phrase & synonym 
                vietnamese_phrase = get_tag_value_raise_duplicate_or_assign(Tag.synonym_start, Tag.synonym_end, embedded_words, "synonym", vietnamese_phrase)
                vietnamese_phrase = get_tag_value_raise_duplicate_or_assign(Tag.antonym_start, Tag.antonym_end, embedded_words, "antonym", vietnamese_phrase)
                vietnamese_phrase = get_tag_value_raise_duplicate_or_assign(Tag.sino_vietnamese_start, Tag.sino_vietnamese_end, embedded_words, "sino_vietnamese", vietnamese_phrase)
                vietnamese_phrase = get_tag_value_raise_duplicate_or_assign(Tag.french_start, Tag.french_end, embedded_words, "french", vietnamese_phrase)
                vietnamese_phrase = get_tag_value_raise_duplicate_or_assign(Tag.restricted_start, Tag.restricted_end, embedded_words, "restricted", vietnamese_phrase, remove_element=False)
                vietnamese_phrase = get_tag_value_raise_duplicate_or_assign(Tag.noun_classifier_start, Tag.noun_classifier_end, embedded_words, "noun_classifier", vietnamese_phrase, remove_element=False)
                # do the same for english_phrase
                english_phrase = get_tag_value_raise_duplicate_or_assign(Tag.synonym_start, Tag.synonym_end, embedded_words, "synonym", english_phrase)
                english_phrase = get_tag_value_raise_duplicate_or_assign(Tag.antonym_start, Tag.antonym_end, embedded_words, "antonym", english_phrase)
                english_phrase = get_tag_value_raise_duplicate_or_assign(Tag.sino_vietnamese_start, Tag.sino_vietnamese_end, embedded_words, "sino_vietnamese", english_phrase)
                english_phrase = get_tag_value_raise_duplicate_or_assign(Tag.french_start, Tag.french_end, embedded_words, "french", english_phrase)
                english_phrase = get_tag_value_raise_duplicate_or_assign(Tag.restricted_start, Tag.restricted_end, embedded_words, "restricted", english_phrase, remove_element=False)
                english_phrase = get_tag_value_raise_duplicate_or_assign(Tag.noun_classifier_start, Tag.noun_classifier_end, embedded_words, "noun_classifier", english_phrase, remove_element=False)

            elif parts_count == 1:
                raise ParseException("No '" + Tag.viet_usage_end + "'-tag in usage: '" + util.hexdump(part_on_start_tag) + "'")
            else:
                raise ParseException("Multiple '" + Tag.viet_usage_end + "'-tag exists: '" + util.hexdump(part_on_start_tag) + "'")

            main_definition = main_definition.strip()
            _, main_definition = rstrip_char(MAIN_DEFINITION_CHAR_TO_STRIP, main_definition)
            stripped, _ = lstrip_char(MAIN_DEF_PREFIX_CHAR_TO_STRIP, main_definition)
            if stripped:
                raise ParseException("At least one chars in "+MAIN_DEF_PREFIX_CHAR_TO_STRIP+" were stripped from '" + main_definition + "' may want to check/fix.")
            sanity_check(main_definition, english_phrase, vietnamese_phrase, *embedded_words)
            parsed_usages.append(WordUsageTuple(main_definition=main_definition,
                                                synonym=embedded_words["synonym"],
                                                antonym=embedded_words["antonym"],
                                                sino_vietnamese=embedded_words["sino_vietnamese"],
                                                french=embedded_words["french"],
                                                restricted=embedded_words["restricted"],
                                                noun_classifier=embedded_words["noun_classifier"],
                                                english_phrase=english_phrase,
                                                vietnamese_phrase=vietnamese_phrase) ) 
            main_definition = '' # only used once, the 1st usage
            for key in embedded_words:
                embedded_words[key] = ''
    return parsed_usages

# -----------------------------------------------------------------------------
def process_part_of_speech_definition(part_of_speech_definitions):
    """
    Given a list of part_of_speech_definitions (v. grandfather is the father of a person's dad or mom)
    Returns:
        a list of DefinitionPartOfSpeechTuple
    """
    parsed_pos_definitions = []

    DefinitionPartOfSpeechTuple = collections.namedtuple("DefinitionPartOfSpeechTuple", "word_clarification  part_of_speech_list  parsed_usages")

    for i, one_pos_definition in enumerate(part_of_speech_definitions, start=1):
        word_clarification, part_of_speech_list, pos_definition_part = split_one_part_of_speech_definition(one_pos_definition)

        word_usages = pos_definition_part.split(PART_OF_SPEECH_DEFINITION_PART_SEPERATOR)
        # process word usages into a list of WordUsageTuple 
        parsed_usages = process_word_usages(word_usages)
        parsed_pos_definitions.append(DefinitionPartOfSpeechTuple(word_clarification=word_clarification, part_of_speech_list=part_of_speech_list, parsed_usages=parsed_usages))

    return parsed_pos_definitions

# -----------------------------------------------------------------------------
def split_one_part_of_speech_definition(one_pos_definition):
    """
    Given one part-of-speech definition: e.g. for 'ba', "1 adj. three", return the
    part-of-speech character(s), and the definition part and also the seldomly occurring
    word-clarification (word just before part-of-speech)

    Sample part-of-speech section could be:
    1) <POS> <definition>
    2) <POS> "," <POS> <definition>
    3) <clarifier> <POS> <definition>
    4) <clarifier> <POS> "," <POS> <definition>
    """
    pos_definition_part = ""
    word_clarification = ""
    part_of_speech = []
    valid_part_of_speeches = ['adj', 'adv', 'conj', 'intj', 'n', 'num', 'prep', 'pron', 'v']
    part_of_speech_separator = ", "
    MAX_POS_SUPPORT = 2

    # keep on finding part-of-speech until the first invalid.
    left_over = one_pos_definition.strip()

    while left_over:
        pos_index = left_over.find(PART_OF_SPEECH_SUFFIX)
        if pos_index > 0:
            current_POS_definition_section = left_over[pos_index+len(PART_OF_SPEECH_SUFFIX):].lstrip()
            part_of_speech_section = left_over[:pos_index].lstrip()

            if part_of_speech_section in valid_part_of_speeches:
                if len(part_of_speech) == MAX_POS_SUPPORT:
                    raise ParseException("Have NOT accounted for more than " + str(MAX_POS_SUPPORT) + " part of speech")
                pos_definition_part = current_POS_definition_section
                part_of_speech.append(part_of_speech_section)
            elif len(part_of_speech) == 0:
                # this suite better find a valid POS, otherwise MUST throw 'cause we haven't found anything yet!
                if part_of_speech_section:       # for the case where there are words in front of the part of speech. 'a! intj.' for example.
                    word_clarification, space, pos = part_of_speech_section.partition(" ")
                    pos = pos.strip()
                    if pos not in valid_part_of_speeches:
                        raise ParseException("Unknown part of speech: '" + pos + "' in string '" + part_of_speech_section + "'")
                    pos_definition_part = current_POS_definition_section
                    part_of_speech.append(pos)
                else:
                    raise ParseException("Unknown part of speech. Stopped parsing '" + part_of_speech_section + "'")
            else:
                # no more valid part of speech, just get out!
                break

            # to get here we MUST have appended something, otherwise an exception would have been thrown. Prepare for next loop.
            left_over = current_POS_definition_section
            for i, ch in enumerate(left_over):
                if ch not in part_of_speech_separator:
                    break
            left_over = left_over[i:]
        elif len(part_of_speech) == 0:
            msg = "Part of speech suffix ('" + PART_OF_SPEECH_SUFFIX + "') not found! : [str: " + util.hexdump(one_pos_definition + "]")
            raise ParseException(msg)
        else:
            break

    return word_clarification, part_of_speech, pos_definition_part

# -----------------------------------------------------------------------------
def split_part_of_speech_definitions(word_definition):
    """
    Returns a list of sub-definition for each part of speech of the word. The
    definitions are separated by a numeric value. See 'word_definition' in schema
    definition above.

    E.g. 'ba' 1 adj. three 2 n. dad father. Sections 1 and 2 are sub-definition/part-of-speech.
    """
    word_definition = word_definition.lstrip()
    part_of_speech_definition = []

    delimiter_start = "1 "
    start_delimeter_index = word_definition.find(delimiter_start)
    if start_delimeter_index > -1:
        # 2*(DIGIT LWS part_of_speech_definition)
        word_definition = word_definition[start_delimeter_index+len(delimiter_start):]
        # Check if there's anything before "1"
        text_before_delimeter = word_definition[:start_delimeter_index]
        if text_before_delimeter.strip():
            assert False, "Haven't handled text before definition delimeter 1, 2, 3, etc..."

        # word_definition now contains the string with the first delimeter removed, traverse the string
        # looking and breaking it up based on the rest of the delimeter, 2, 3, etc.
        for delimiter in range(2, 100):
            word_definition = word_definition.strip()
            if not word_definition:
                break
            next_del = " " + str(delimiter) + " "
            next_del_index = word_definition.find(next_del)
            if next_del_index > -1:
                part_of_speech_definition.append( word_definition[:next_del_index].lstrip() )
                word_definition = word_definition[next_del_index+len(next_del):]
            else:
                # nothing left
                part_of_speech_definition.append(word_definition.lstrip())
                assert len(part_of_speech_definition) > 1, "Found 1, but not 2 for multiple definitions."
                break
    else:
        # part_of_speech_definition
        part_of_speech_definition.append(word_definition)
    return part_of_speech_definition



# -----------------------------------------------------------------------------
def main():
    """
    The raw dictionary file has the following format:
    h1>title</h1>
    """

    options = ProgramSettings()
    try:
        options.parse(sys.argv)
        if options.help or not options.file:
            raise ProgramSettings.UsageError
        print ("\nProgram parameters: ", options)
        printable_chars = string.digits + string.letters + string.punctuation

        definition_entry = ""
        entry_in_progress = False
        current_word = ""
        current_word_starts_at_line = 0

        my_dict = {}
        #TODO char_to_remove = string.maketrans("\r\n", "  ")  # replace LF with SP and remove CR - only Py.v3 permits 3rd remove paramater
        # the line above and str.translate does NOT work! grrr
        for line_num, line in enumerate(codecs.open(options.file, encoding="utf-8"), start=1) :
            try:
                # strip off <h1>value</h2> and get <h2>word</h2> to start us off; 'line_processing' comes after the closing tag.
                _, line_processing = get_value_strict(Tag.alpha_letter_start, Tag.alpha_letter_end, line)     # making it clear!!
                line_processing = line_processing.strip()
                while line_processing:
                    rc = None
                    #print("Currently processing [current_word:", util.hexdump(current_word), "][in-progress:", "T" if entry_in_progress else "F",
                    #      "][", line_num, "]\t\t[line:", util.hexdump(line), "]", sep='', end='')
                    if not entry_in_progress:
                        rc, current_word, line_processing = get_element_value(Tag.term_start, Tag.term_end, line_processing)
                        current_word_starts_at_line = line_num

                    #print("[line_processing:", util.hexdump(line_processing), "][rc:", rc, "]", sep='')
                    if entry_in_progress or rc == SUCCESS:
                        # Get the rest of the data belonging to this word up to the start of the next word start-tag.
                        line_processing = line_processing.replace("\r", "")
                        line_processing = line_processing.replace("\n", " ")
                        up_to_tag_rc, in_progress_str, line_processing = get_data_up_to_tag(Tag.term_start, line_processing)
                        if up_to_tag_rc == SUCCESS:
                            # Found the next start-tag, end of current word entry
                            entry_in_progress = False
                            definition_entry = "".join([definition_entry, in_progress_str])
                            my_dict[current_word] = definition_entry
                            #print(">>>> processing entry: '", util.hexdump(current_word), "' '", util.hexdump(definition_entry), sep='')
                            process_word_and_definition(current_word, definition_entry, options)
                            current_word = ""
                            current_word_starts_at_line = -1
                            definition_entry = ""
                        elif up_to_tag_rc == TAG_NOT_FOUND:
                            entry_in_progress = True
                            definition_entry = "".join([definition_entry, in_progress_str])
                            assert not line_processing # shouldn't be anything left, read new line.
                        else:
                            assert False, "get_data_up_to_tag - returned unexpected result"
                    elif rc != SUCCESS:
                        if options.verbose:
                            print("[line:", line_num, "] SKIPPED [data:", util.hexdump(line), "]", sep='')
                    else:
                        #print("[line:", line_num, "][data:", util.hexdump(line), "]", sep='')
                        assert False, "What to do here?!"

            except ParseException as ex:
                print ("\nException caught:\n\n[msg:", util.hexdump(ex.msg), "][current-word:", util.hexdump(current_word), "][definition:",
                       util.hexdump(definition_entry), "][file:", options.file, "][line:", current_word_starts_at_line, "]", sep='')
                break
        else:
            try:
                if entry_in_progress:
                    entry_in_progress = False
                    assert current_word
                    my_dict[current_word] = definition_entry
                    process_word_and_definition(current_word, definition_entry, options)
                    current_word = ""
                    definition_entry = ""
            except ParseException as ex:
                print ("\nException caught:\n\n[msg:", util.hexdump(ex.msg), "][current-word:", util.hexdump(current_word), "][definition:",
                       util.hexdump(definition_entry), "][file:", options.file, "][line:", current_word_starts_at_line, "]", sep='')

    except ProgramSettings.UsageError:
        options.usage(sys.argv)

main()
