'''
A program that uses the MarkovManger to generate random text based on input
from text files.

Created on Mar 23, 2011

@author: matt
'''

from heavenlyfodder.util.text import TextAtom
from heavenlyfodder.markov.builder import MarkovManager
  
import re

def sanitize_text(raw_text):
    """
    Converts the specified text into all caps, remove (some) special
    characters, etc.
    
    NOTE: this is not optimized for runtime performance; it has to make
    multiple passes through the text when doing the substitutions.
    """
    
    # replace stuff we don't want with reasonable substitutes
    
    clean_text = raw_text.strip()
    
    clean_text = re.sub(r' & ', ' AND ', clean_text)
    clean_text = re.sub(r'@', 'AT', clean_text)
    clean_text = re.sub(r'-', ' ', clean_text)
    clean_text = re.sub(r'@', 'AT', clean_text)
    clean_text = re.sub(r'\?', '', clean_text)
    clean_text = re.sub(r'\x85', '', clean_text)
    clean_text = re.sub(r'St\.', 'Saint', clean_text)
    clean_text = re.sub(r',', '', clean_text)
    clean_text = re.sub(r'!', '', clean_text)
    clean_text = re.sub(r'\n *\n', '\n', clean_text)
    # This is a temporary way of handling newlines:
    clean_text = re.sub(r'\n', ' \n ', clean_text)
#    clean_text = re.sub(r'\.', '', clean_text)

    # finally, convert to uppercase
#    clean_text = clean_text.upper()
        
    return clean_text

def load_one_atom_per_line (filename, input_text_atoms):
    """
    Creates TextAtoms from the contents of the specified file, with one
    TextAtom per line; new TextAtoms are appended to the list specified as
    input_text_atoms.
    """
    
    input_file = open(filename)
    
    # For the source_spec, we'll use the filename followed by a ':', then the
    # line number
    line_num = 0
    
    for line in input_file:
        line_num += 1
        source_spec = filename + ':' + str(line_num)
        
        # Do any necessary replacement on the text we read:
        line = sanitize_text(line)
        
        # Create the TextAtom to store the contents of this line
        atom = TextAtom(line, source_spec)
        
        # Add the TextAtom to the list we're building
        input_text_atoms.append(atom)

    input_file.close()

def load_one_atom_per_file (filename, input_text_atoms):
    """
    Load the entire contents of the specified file into a single TextAtom;
    the new TextAtom is appended to the input_text_atoms list.
    """
    input_file = open(filename)
    
    # For the source_spec, we'll use the filename followed by a ':', then the
    # line number

    # Read the entire contents of the file
    text = input_file.read()
    
    # Sanitize the input text (replace 'Dr.' with 'Doctor,' etc.)
    text = sanitize_text(text)
    
    # Create a TextAtom to represent the file contents
    atom = TextAtom(text, filename)
    
    # Add the new TextAtom to the end of the list
    input_text_atoms.append(atom)
    
    input_file.close()
    
    
if __name__ == '__main__':
        
    import argparse
    parser = argparse.ArgumentParser(description='Create some goofy text')
    parser.add_argument('--depth', '-d', type=int, dest='depth',
                        help='''The number of words or letters that must overlap
                                in order for two sequences to be joined''')
    parser.add_argument('--input-files', '-f', dest='input_filenames', metavar='FILE',
                        nargs='+', help="""The text files to be used as the basis
                                           for the generated text""")
    parser.add_argument('--num-groups', '-n', type=int, dest='num_groups',
                        default='10000',
                        help="""The number of word/letter groups that the program
                                will generate before exiting""")

    parser.add_argument('--split-mode', '-s', dest='split_mode',
                        help="""Whether to generate one long run, or multiple
                                independent runs.""",
                        choices=['long', 'short'],
                        default='short')
    
    parser.add_argument('--output-format' '-o', dest='output_format',
                        help="""The formatting to be applied to the output""",
                        choices=['title', 'prose'],
                        default='prose')
    
    parser.add_argument('--textual-units', '-u', dest='textual_mode',
                        help="""Whether to use whole words or letters as the"""
                             """basic textual unit""",
                        choices=['words', 'letters'],
                        default='words')
    
    args = parser.parse_args()
    
    # TODO: instead of having if/then/else clauses strewn throughout the code,
    # pass the user-specified string to a factory that returns a Formatter
    # object (or something like that) 
        
    input_text_atoms = []
    
    #
    # Load the contents of each file
    #
    
    for filename in args.input_filenames:
        if args.split_mode.lower() == 'short':
            load_one_atom_per_line(filename, input_text_atoms)
        else:
            load_one_atom_per_file(filename, input_text_atoms)
    
    # determine whether to use words or letters
    if args.textual_mode.lower() == 'letters':
        split_fcn = TextAtom.split_by_length
        delimiter = ''
    else:
        split_fcn = TextAtom.split_on_words
        delimiter = ' '
    
    mark_man = MarkovManager()        
    mark_man.load_text(input_text_atoms, args.depth, delimiter, split_fcn)
            
#    random_text_group = mark_man.generate_text(delimiter, args.num_groups)
    text_group_list = mark_man.generate_text(delimiter, args.num_groups)

    # TODO: Iterate through the list of TextGroup objects that will be returned
#    print random_text_group
    for text_group in text_group_list:
        print text_group, '\n'
