#!/usr/bin/python 
#===========================================================================#
# csv2conf -- Copyright 2011 by Martin Ponweiser
#
#    This program is free software: you can redistribute it and/or modify
#    it under the terms of the GNU General Public License as published by
#    the Free Software Foundation, either version 3 of the License, or
#    (at your option) any later version.
#
#    This program is distributed in the hope that it will be useful,
#    but WITHOUT ANY WARRANTY; without even the implied warranty of
#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#    GNU General Public License for more details.
#
#    You should have received a copy of the GNU General Public License
#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
#
#-----------------------------------------------------------------------------#

# Python target versions: 2.5-2.7 (3?)

import csv
import string # excel_x()
import re
import os.path # pathnames 
import os # makedirs()
from optparse import OptionParser # deprecated in favor of argparse, however I want to support old Python versions.
import logging
import glob # find files in auto/

MARKUP_PREFIX = "csv2conf_"
MARKUP_FILE_NAME = "file_name"
MARKUP_FILE_FOLDER = "file_folder"
MARKUP_OUTPUT_MODE = "output_mode"
MARKUP_OUTPUT_BACKUP = "output_backup"
MARKUP_FILE_FORMAT = "file_format"
MARKUP_FORMAT_DEFAULT = "format_default"
MARKUP_COLUMN = "column"

COLUMNS_VALID = ['identifier','value','format','comment','order']
MODES_VALID = ['rewrite','folder_parents_create','merge','append']
FILE_FORMATS_VALID = ['linebreaks_win','linebreaks_unix']
FORMAT_OPTIONS_VALID = re.compile(r"\W*(sep|comment_prefix|comment_mode|value_lookup_file)([=:])(.+)",flags=re.IGNORECASE)
FORMAT_FLAGS_VALID = re.compile(r"\W*(output_id_only|format_default)",flags=re.IGNORECASE)
COMMENT_MODES_VALID = ['insert_before_line','append_to_value']
BACKUPS_VALID = ['yes']

AUTO_FOLDERNAME = 'auto/'

def dict_invert(ddict):
    # http://code.activestate.com/recipes/252143-invert-a-dictionary-one-liner/
    ans = {}
    for kkey, vvalue in ddict.iteritems():
        anslist = ans.setdefault(vvalue, [])
        anslist.append(kkey)
    return ans

def dict_invert_list(ddict):
    ans = {}
    for kkey, llist in ddict.iteritems():
        for iitem in llist:
            anslist = ans.setdefault(iitem, [])
            anslist.append(kkey)
    return ans

def excel_x(iindex):
    ## see: Python Cookbook 2nd ed., Recipe 18.2 
    result = [ ]
    addon = result.append
    while iindex > 26:
        iindex, rrem = divmod(iindex, 26)
        addon(string.ascii_uppercase[rrem-1])
    addon(string.ascii_uppercase[iindex-1])
    result.reverse()
    return ''.join(result)

def format_parse(format_string):
    result = {}
    format_list = list(csv.reader([format_string]))[0]
    for fformat in format_list:
        matched = FORMAT_OPTIONS_VALID.match(fformat)
        if matched:
            matched_key = matched.group(1).lower()
            # overwrite previous entries
            result[matched_key] = matched.group(3)
            continue
        matched = FORMAT_FLAGS_VALID.match(fformat)
        if matched:
            matched_key = matched.group(1).lower()
            result[matched_key] = ""
            continue
        if 'sep' not in result:
            # Take fformat as separator. The last non-matched format token is therefore the sep!
            result['sep'] = fformat
    return result

def int_or_str(sstring):
    try:
        return(int(sstring))
    except ValueError:
        return(sstring)

def output_buffer_sort(buffer_dict, column_index, logger):
    for ffilename in buffer_dict:
        # see conditional expression and sorting mini-howto.
        # sorting is stable, which means that cells without order markup will keep their original order.

        # ignore mentions in buffer_dict that are empty. Even for later ignored filenames a buffer is created.
        if buffer_dict[ffilename] != [] and 'order' in column_index[ffilename]:
            logger.info("%s: Sorting." % ffilename)
            ## first try to interprete value as int, this makes 
            ## order: 1. if can be interpreted as int, order as int, 2. order by strings, 3. '' keep order, append at
            ## list end (stable sorting)
            buffer_dict[ffilename].sort(key=lambda x: int_or_str(x['order'][0]) if x['order'][0]!="" else 'zzzzzzzzzzz')

def output_buffer_format(buffer_dict, format_default, logger):
    ## returns 'filename':[['identifier_a','separator_a','value_a',...] ,...]
    buffer_processed = {}  
    for ffilename in buffer_dict:
        if buffer_dict[ffilename] == []:
            break
        logger.info("%s: Format output buffer." % ffilename)
        #if ffilename in format_default:
            #print ffilename, 'defaults',format_default[ffilename]
        buffer_processed[ffilename] = []
        for buffer_row in buffer_dict[ffilename]:
            ## we must have at least one dict entry, will use it to extract row
            for iitem in buffer_row:
                row_number = buffer_row[iitem][2]
                break
            #print buffer_row
            ## here comes the program's core function, extend as required
            ## TODO: if just comment (and order), put out.
            if 'identifier' not in buffer_row or buffer_row['identifier'][0]=='':
                logger.warn("%s Row %4i: Identifier cell missing. Skipping filename in this row." % (ffilename,
                    row_number))
                continue
            if 'format' not in buffer_row or buffer_row['format'][0]=='':
                logger.warn("%s Row %4i: Format missing. Using default format dict (might be empty, too)." % (ffilename,
                    row_number))
                format_local_dict = format_default[ffilename]
            else:
                format_local_dict = format_parse(buffer_row['format'][0])
            ## if defaults as base, update the global dictionary in a local copy
            # 'format_default' only applies for formats that do already have non-format_default content
            if 'format_default' in format_default[ffilename] or 'format_default' in format_local_dict:
                temp_dict = format_default[ffilename].copy()
                temp_dict.update(format_local_dict)
                #print 'format_default temp_dict', temp_dict
                format_local_dict = temp_dict

            if 'comment' in buffer_row:
                local_comment = buffer_row['comment'][0]
                if 'comment_prefix' not in format_local_dict:
                    logger.warn("%s Row %4i: Comment prefix missing. Using default '# '." % (ffilename,
                        row_number))
                    format_local_dict['comment_prefix'] = '# '
                if 'comment_mode' not in format_local_dict:
                    logger.warn("%s Row %4i: Comment mode missing. Using default 'insert_before_line'." % (ffilename,
                        row_number))
                    format_local_dict['comment_mode'] = 'insert_before_line'
                if format_local_dict['comment_mode'] not in COMMENT_MODES_VALID:
                    logger.error("%s Row %4i: Comment mode unknown: %s. Exiting." % (ffilename,
                        row_number, format_local_dict['comment_mode']))
                    exit(1)

            #print format_local_dict

            if 'sep' not in format_local_dict:
                logger.warn("%s Row %4i: Separator not specified. assuming default separator=' '." % (ffilename,
                    row_number))
                #local_separator= ' '
                format_local_dict['sep'] = ' '
                
            # checks have passed, now assign cell values
            local_identifier = buffer_row['identifier'][0]
            local_separator = format_local_dict['sep']
            if 'value' not in buffer_row:
                local_value = ''
            else:
                local_value = buffer_row['value'][0]

            if 'output_id_only' in format_local_dict:
                ## override anything before
                local_separator = ''
                local_value = ''
            if 'comment' in buffer_row:
                if format_local_dict['comment_mode'] == 'insert_before_line':
                    buffer_processed[ffilename].append([ format_local_dict['comment_prefix'] + local_comment,"",""] )
                    buffer_processed[ffilename].append([ local_identifier, local_separator, local_value] )
                if format_local_dict['comment_mode'] == 'append_to_value':
                    local_value = local_value + " " + format_local_dict['comment_prefix'] + local_comment
                    buffer_processed[ffilename].append([ local_identifier, local_separator, local_value] )
            else:
                buffer_processed[ffilename].append([ local_identifier, local_separator, local_value] )
        #print buffer_processed[ffilename]
    return(buffer_processed)


def output_buffer_write(bbuffer, outputmode_dict, folder_dict, backupmode_dict, format_dict, options, logger):
    for ffilename in bbuffer:
        if ffilename in outputmode_dict:
            logger.debug("%s: outputmode_dict setting: %s" % (ffilename, outputmode_dict[ffilename]))
        else:
            logger.warn("%s: output_mode not specified. Assuming default: 'rewrite'." % ffilename)
            outputmode_dict[ffilename] = ['rewrite']

        filepath = ffilename
        if ffilename in folder_dict:
            if folder_dict[ffilename] != os.path.dirname(ffilename):
                logger.warn("%s %s setting: Overriding to: %s" % (ffilename, MARKUP_FILE_FOLDER, folder_dict[ffilename]))
                filepath = os.path.join(folder_dict[ffilename], os.path.basename(ffilename))
        #print ffilename,'relpath',os.path.relpath(filepath)

        if options.mode_auto:
            filepath = os.path.join(AUTO_FOLDERNAME, filepath)
            logger.debug("%s: Auto mode: Setting filepath to: %s" % (ffilename, filepath))

        folder = os.path.dirname(filepath)
        ## Set to always create folders
        outputmode_dict['folder_parents_create'] = ''
        if folder != "" and not os.path.isdir(folder):
            if 'folder_parents_create' not in outputmode_dict[ffilename]:
                logger.error("%s %s Not a valid directory: %s. Exiting." % (ffilename, MARKUP_FILE_FOLDER,
                    os.path.dirname(filepath)))
                exit(1)
            else:
                print ffilename, MARKUP_FILE_FOLDER, 'Trying to create folder: ',os.path.dirname(filepath)
                logger.warn("%s: %s Trying to create folder: %s." % (ffilename, MARKUP_FILE_FOLDER,
                    os.path.dirname(filepath)))
                try:
                    os.makedirs(os.path.dirname(filepath))
                except:
                    logger.error("%s: %s Error creating folder: %s. Exiting." % (ffilename, MARKUP_FILE_FOLDER,
                        os.path.dirname(filepath)))
                    exit(1)

        if ffilename in backupmode_dict:
            if backupmode_dict[ffilename] == 'yes':
                logger.warn("%s: Backup mode setting: %s. Not yet implemented." % (ffilename,
                    backupmode_dict[ffilename]))

        if 'rewrite' not in outputmode_dict[ffilename]:
            logger.warn("%s: Output mode setting: %s. Not yet implemented. Setting default: 'rewrite'" % (ffilename,
                outputmode_dict[ffilename]))
            ## TODO careful, this is a list that could later contain other items
            outputmode_dict[ffilename] = ['rewrite']

        if 'rewrite' in outputmode_dict[ffilename]:
            logger.info("%s: Buffer will be written out." % filepath)
            if format_dict[ffilename] == []:
                ## Python universal linebreaks, library.pdf p23 does not work for writing.
                output_file = open(filepath, 'wb')
                linebreak='\n'
            else:
                output_file = open(filepath, 'wb')
                if 'linebreaks_win' in format_dict[ffilename]:
                    linebreak='\r\n'
                if 'linebreaks_unix' in format_dict[ffilename]:
                    linebreak='\n'
                if 'linebreaks_mac' in format_dict[ffilename]:
                    linebreak='\r'
            for sublist in bbuffer[ffilename]:
                output_file.write(''.join(sublist)+linebreak)
                logger.debug(''.join(sublist))
            logger.info("%s: Butter written out." % filepath)
    

def reader_process(reader, options, logger):
    filename_index = {} ## 'filename':[column index_a, column index_b,...]
    ## 'filename':{'columnname':column_index,...}, all following are similar dicts
    column_index = {}   
    output_buffer = {}  
    output_mode = {}
    file_folder = {}
    backup_mode = {}
    file_format = {}
    format_default = {}

    excel_y = 0
    for row_current in reader:
        ## can't use reader.line_num because it also counts linebreaks within cells
        excel_y += 1
        if excel_y == 1 :
            first_row_cells = len(row_current)
            if first_row_cells <= 2: 
                logger.error("Row %4i: I need at least one markup column and one identifier column. Check file contents!" % excel_y,)
                exit(1)

        if MARKUP_PREFIX + MARKUP_FILE_NAME in row_current[0]:
            filename_index = {}
            if column_index != {}: 
                ## cleanup from previous filename markup
                output_buffer_sort(output_buffer, column_index, logger)
                buf = output_buffer_format(output_buffer, format_default, logger)
                output_buffer_write(buf, output_mode, file_folder, backup_mode, file_format, options, logger)
                column_index = {}

            ## read in new markup
            row_current_dict = dict(enumerate(row_current[1:], start=1))
            # filenames are still single strings, therefore tokenize them to lists
            for iindex in row_current_dict:
                ## list...[0] somewhat hacky wrapper for csv.reader
                row_current_dict[iindex] = list(csv.reader([row_current_dict[iindex]], skipinitialspace=True))[0]
                row_current_dict[iindex] = [iitem.strip() for iitem in row_current_dict[iindex]]
                #for iitem in row_current_dict[iindex]:
            filename_index = dict_invert_list(row_current_dict)

            ## blank filename keys will not be needed, kick them out
            if '' in filename_index:
                del filename_index['']

            for ffilename in filename_index:
                if os.path.basename(ffilename)=='':
                    logger.error("%s, Row %4i: Filename invalid: %s. Exiting." % (ffilename, excel_y, ffilename ))
                    exit(1)
                file_folder[ffilename] = os.path.dirname(ffilename)
                ## prepare output buffer dict
                # TODO rewrite without prep?
                output_buffer[ffilename] = []
                format_default[ffilename] = {}
            ## also possible: filename_index still remains {}, for example to close the output section

        elif MARKUP_PREFIX + MARKUP_COLUMN in row_current[0]:
            if filename_index == {}:
                logger.warn("Row %4i, %s: No filenames specified yet, skipping row." % (excel_y,  MARKUP_COLUMN))
                continue
            else:
                #print 'Row', excel_y, MARKUP_COLUMN, ": Assigning columns."
                column_index == {}
                for ffilename in filename_index:
                    columns_ffilename = dict([(x, row_current[x].strip()) for x in filename_index[ffilename]])
                    columns_ffilename_index = dict_invert(columns_ffilename)
                    column_index[ffilename] = {}
                    if 'identifier' not in columns_ffilename_index:
                        logger.warn("%s, Row %4i, %s: 'identifier' not found. Skipping filename." % (ffilename, excel_y,  MARKUP_COLUMN))
                        del column_index[ffilename]
                        continue
                    for ccolumn in columns_ffilename_index:
                        if ccolumn not in COLUMNS_VALID:
                            logger.warn("%s, Row %4i, %s: Invalid column name: %s. Skipping." % (ffilename, excel_y,  MARKUP_COLUMN, ccolumn))
                            continue
                        elif len(columns_ffilename_index[ccolumn]) >= 2:
                            logger.error("%s, Row %4i, %s: Multiple column assignments: %s. Exiting." % (ffilename, excel_y, MARKUP_COLUMN, columns_ffilename_index[ccolumn]))
                            #print ffilename, 'row', excel_y, MARKUP_COLUMN, ': Multiple column assignments:',columns_ffilename_index[ccolumn],'Exiting.'
                            exit(1)
                        else:
                            column_index[ffilename][ccolumn] = columns_ffilename_index[ccolumn][0]
                    #print 'column_index',ffilename, column_index[ffilename]

        elif MARKUP_PREFIX + MARKUP_FILE_FOLDER in row_current[0]:
            if filename_index == {}:
                logger.warn("Row %4i, %s: No filenames specified yet, skipping row." % (excel_y,  MARKUP_FILE_FOLDER))
                continue
            else:
                for ffilename in filename_index:
                    columns_ffilename = dict([(x, row_current[x].strip()) for x in filename_index[ffilename]])
                    path_index = dict_invert(columns_ffilename)
                    if '' in path_index:
                        del path_index['']
                    if len(path_index) >= 2: 
                        logger.error("Row %4i, %s: Settings ambiguous. Exiting." % (excel_y,  MARKUP_FILE_FOLDER))
                        exit(1)
                    if path_index == {}:
                        continue
                    path_string = row_current[path_index[list(path_index)[0]][0]]
                    file_folder[ffilename] = path_string

        elif MARKUP_PREFIX + MARKUP_OUTPUT_MODE in row_current[0]:
            if filename_index == {}:
                logger.warn("Row %4i, %s: No filenames specified yet, skipping row." % (excel_y,  MARKUP_OUTPUT_MODE))
                continue
            else:
                for ffilename in filename_index:
                    # these are the columns that were assigned to a filename
                    columns_ffilename = dict([(x, row_current[x].strip()) for x in filename_index[ffilename]])
                    mode_index = dict_invert(columns_ffilename)
                    if '' in mode_index:
                        del mode_index['']
                    if len(mode_index) >= 2: 
                        logger.error("Row %4i, %s: Settings ambiguous. Exiting." % (excel_y,  MARKUP_OUTPUT_MODE))
                        exit(1)
                    if mode_index == {}:
                        continue
                    # even if there are multiple identical cells, just take the first
                    mode_string = row_current[mode_index[list(mode_index)[0]][0]]
                    # still a single string, therefore tokenize to list 
                    mode_list = list(csv.reader([mode_string]))[0]
                    modes_invalid = set(MODES_VALID).union(set(mode_list)) - set(MODES_VALID)
                    if modes_invalid != set():
                        logger.warn("Row %4i, %s: Mode(s) unknown: %s. Skipping." % (excel_y,  MARKUP_OUTPUT_MODE, modes_invalid))
                    else:
                        output_mode[ffilename] = mode_list

        elif MARKUP_PREFIX + MARKUP_OUTPUT_BACKUP in row_current[0]:
            if filename_index == {}:
                logger.warn("Row %4i, %s: No filenames specified yet, skipping row." % (excel_y, MARKUP_OUTPUT_BACKUP))
                continue
            else:
                for ffilename in filename_index:
                    columns_ffilename = dict([(x, row_current[x].strip()) for x in filename_index[ffilename]])
                    backup_index = dict_invert(columns_ffilename)
                    if '' in backup_index:
                        del backup_index['']
                    if len(backup_index) >= 2: 
                        logger.error("Row %4i, %s: Settings ambiguous. Exiting." % (excel_y,  MARKUP_OUTPUT_BACKUP))
                        exit(1)
                    if backup_index == {}:
                        continue
                    # even if there are multiple identical cells, just take the first
                    backup_string = row_current[backup_index[list(backup_index)[0]][0]]
                    # still a single string, therefore tokenize to list -- not necessary.
                    #mode_list = list(csv.reader([mode_string]))[0]
                    if backup_string not in BACKUPS_VALID:
                        logger.warn("Row %4i, %s: Mode(s) unknown: %s. Skipping." % (excel_y,  MARKUP_OUTPUT_BACKUP, backup_string))
                    else:
                        backup_mode[ffilename] = backup_string

        elif MARKUP_PREFIX + MARKUP_FILE_FORMAT in row_current[0]:
            if filename_index == {}:
                logger.warn("Row %4i, %s: No filenames specified yet, skipping row." % (excel_y, MARKUP_FILE_FORMAT))
                continue
            else:
                for ffilename in filename_index:
                    file_format[ffilename] =  [] 
                    columns_ffilename = dict([(x, row_current[x].strip()) for x in filename_index[ffilename]])
                    format_index = dict_invert(columns_ffilename)
                    if '' in format_index:
                        del format_index['']
                    if len(format_index) >= 2: 
                        logger.error("Row %4i, %s: Settings ambiguous. Exiting." \
                               % (excel_y,  MARKUP_FILE_FORMAT))
                        exit(1)
                    if format_index == {}:
                        continue
                    # even if there are multiple identical cells, just take the first
                    format_string = row_current[format_index[list(format_index)[0]][0]]
                    ## this also weeds out mutually exclusive 'linebreaks_dos,linebreaks_unix' assignments
                    if format_string not in FILE_FORMATS_VALID:
                        logger.warn("Row %4i, %s: Format unknown: %s. Skipping." % (excel_y,  MARKUP_FILE_FORMAT, format_string))
                    else:
                        file_format[ffilename] =  [ format_string ] 

        elif MARKUP_PREFIX + MARKUP_FORMAT_DEFAULT in row_current[0]:
            if filename_index == {}:
                logger.warn("Row %4i, %s: No filenames specified yet, skipping row." \
                        % (excel_y, MARKUP_FORMAT_DEFAULT))
                continue
            else:
                for ffilename in filename_index:
                    columns_ffilename = dict([(x, row_current[x]) for x in filename_index[ffilename]])
                    format_index = dict_invert(columns_ffilename)
                    if '' in format_index:
                        del format_index['']
                    if len(format_index) >= 2: 
                        logger.error("Row %4i, %s: Settings ambiguous. Exiting." \
                               % (excel_y,  MARKUP_FORMAT_DEFAULT))
                        exit(1)
                    elif format_index != {}:
                        format_string = row_current[format_index[list(format_index)[0]][0]]
                        format_default[ffilename] = format_parse(format_string)

        elif column_index != {}: 
            ## in current row no markup has been found, therefore we take this row as core data
            for ffilename in column_index:
                buffer_row = {}
                for ccolumn in column_index[ffilename]:
                    # perhaps remove this check because there are no checks for markup either.
                    if column_index[ffilename][ccolumn] + 1 > len(row_current):
                        logger.error("%s Row %4i, %s: Output data: cells missing. \
                                Check CSV format and line breaks within cells. Exiting." \
                               % (ffilename, excel_y,  MARKUP_FORMAT_DEFAULT))
                        exit(1)
                    buffer_row[ccolumn] = [ row_current[column_index[ffilename][ccolumn]],
                            column_index[ffilename][ccolumn] + 1,
                            excel_y ]
                output_buffer[ffilename].append(buffer_row)
    logger.info('All CSV rows read in!')
    if column_index != {}: 
        ## csv2conf_filename are still in effect, therefore write out before exit
        output_buffer_sort(output_buffer, column_index, logger)
        buf = output_buffer_format(output_buffer, format_default, logger)
        output_buffer_write(buf, output_mode, file_folder, backup_mode, file_format, options, logger)

def logger_setup(options):
    logger = logging.getLogger('csv2conf')
    logger.setLevel(logging.DEBUG)

    loghandler_stdout = logging.StreamHandler()
    logformatter = logging.Formatter('%(levelname)-8s %(message)s')
    loghandler_stdout.setFormatter(logformatter)
    
    ## http://docs.python.org/howto/logging-cookbook.html#logging-cookbook
    ## DEBUG: never, WARNING: option, INFO/ERROR/CRITICAL: always
    class FilterStdout(logging.Filter):
        def __init__(self, warnings_show):
            self.warnings_show = warnings_show
            pass
        def filter(self, record):
            ## http://docs.python.org/library/logging.html#logrecord-attributes
            if record.levelno == logging.DEBUG:
                return False
            if record.levelno == logging.WARNING and self.warnings_show == False:
                return False
            return True

    loghandler_stdout.addFilter(FilterStdout(options.stdout_warnings_show))
    logger.addHandler(loghandler_stdout)

    if options.log_filename:
        loghandler_logfile = logging.FileHandler(options.log_filename)
        logformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        loghandler_logfile.setFormatter(logformatter)
        logger.addHandler(loghandler_logfile)
        logger.info('============== Script started. ==============')
        logformatter = logging.Formatter('%(levelname)-8s %(message)s')
        loghandler_logfile.setFormatter(logformatter)
    """
    logger.debug('debug message')
    logger.info('info message')
    logger.warn('warn message')
    logger.error('error message')
    logger.critical('critical message')
    """
    return logger

def main():
    #csv.register_dialect('tab_quote_no', delimiter='\t', quoting=csv.QUOTE_NONE)
    #csv.register_dialect('comma_quote_double', delimiter=',', quoting=csv.QUOTE_NONE)

    parser = OptionParser(usage="usage: %prog [options] file1.csv file2.csv ..."
)
    parser.add_option("-d", "--csv-dialect",
                      default="excel",
                      dest="csv_dialect",
                      help="How to handle CSV field separators and quotes. Valid are: %s. " % \
                      ', '.join(csv.list_dialects()) + '[default: %default]')
    parser.add_option("-w", "--warnings-show",
                      action="store_true",
                      dest="stdout_warnings_show",
                      default=False,
                      help="show warnings on stdout. [default:no warnings]")
    parser.add_option("-l", "--log-filename",
                      default="",
                      dest="log_filename",
                      help="If provided, append all logging (including DEBUG and WARNING) to this file.")
    parser.add_option("-a", "--mode-auto",
                      action="store_true",
                      dest="mode_auto",
                      default=False,
                      help="Process all .CSVs in folder 'auto' and write out there.")
                      #with log files. [default mode:manual]")
    (options, args) = parser.parse_args()

    # DEBUG defaults
    #args = ['examples/arcadesetup-ooo-defaults.csv']
    logger = logger_setup(options)

    if args == []:
        logger.info("------------ No arguments (CSV files) specified. ------------")
    if options.mode_auto == True:
        DIALECT_BY_FILE_EXTENSION = {'csv':['excel'], 'txt':['excel-tab'] }
        logger.info("------------ Auto mode: looking for files in 'auto'. ------------")
        auto_filenames = []
        if options.csv_dialect in DIALECT_BY_FILE_EXTENSION['csv']:
            auto_filenames = glob.glob(AUTO_FOLDERNAME+'*.csv') + glob.glob(AUTO_FOLDERNAME+'*.CSV')
        elif options.csv_dialect in DIALECT_BY_FILE_EXTENSION['txt']:
            auto_filenames = glob.glob(AUTO_FOLDERNAME+'*.txt') + glob.glob(AUTO_FOLDERNAME+'*.TXT')
        args = args + auto_filenames
    for csv_filename in args:
        with open(csv_filename, 'Urb') as csv_file:
            logger.info("------------ %s: Begin ------------" % csv_filename)
            reader_process(csv.reader(csv_file, dialect=options.csv_dialect), options, logger)
            logger.info("------------ %s: End ------------" % csv_filename)

        # force logging? no., can be similarly achieved via -lauto/auto.log

if __name__ == '__main__':
    main()
