# TODO: Docstring left in to explain what this module does, but it needs
#       extensive modification to fit the Betelgeuse project.
''' BFRNet (Blood Flow Response Network) is software,
    developed in Python and Cython, that builds and
    optimizes an artificial neural network (ANN) for
    predicting blood flow response in individuals who
    are confined to a wheelchair due to spinal injuries.
    Current optimizers include genetic algorithm (GA)
    and particle swarm optimization (PSO). The current
    ANN generator creates a feedforward multilayer
    perceptron architecture with a single hidden layer,
    which can be created for either classification or
    regression learning.

    BFRNet accepts attribute-relation (.arff) files as
    input, and outputs comma-separated value (.csv)
    spreadsheets. ANN can be trained using "test with
    training set" or "test with testing set" options.
    If the "test with testing set" option is chosen,
    BFRnet can create a series of testing/training sets
    from a single, master attribute-relation file.

    BFRNet can be configured with a multitude of
    options, using a configuration file and command line
    flags. Command line flags are as follows:

    -i <training_data>
    -c <master_training_set> <output_directory>

    The -i option is used to specify a training data file
    or directly. It has the same effect as the
    "training_data" option in the "General" section of the
    configuration file. The -c option is used to generate
    a series of testing/training attribute-relation files,
    given a single master file. master_training_set is the
    path to the master attribute-relation file, and
    output_directory is the path of the directory to output
    the testing/training files. If the directory does not
    exist, it will be created. If files are already present,
    BFRNet will not overwrite them (currently, this results
    in an error).
    
    Copyright (C) 2013 Jerrad Michael Genson

    This program is free software: you can redistribute it and/or modify
    it under the terms of the BSD 3-Clause License as published by
    the Open Source Initiative.
    
    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    BSD 3-Clause License for more details.
    
    You should have received a copy of the BSD 3-Clause License
    along with this program.  If not, see 
    <https://betelgeuse-ml.googlecode.com/hg/LICENSE>        
    '''

# Python built-in modules
import configparser
import os
import itertools
import sys
import errno
import cProfile
import time

# Third-party modules
import numpy as np

# Betelgeuse modules
from lib import csv
from lib import arff
from lib.ml import optimize
from lib.ml import ga
from lib.ml import pso

# Module header
__author__ = 'Jerrad Genson'
__contact__ = 'jerradgenson@gmail.com'
__copyright__ = 'Copyright 2013 Jerrad Michael Genson'
__license__ = 'BSD 3-Clause'

# Global constants
# Path to the configuration file.
CONFIG_PATH = 'config.txt'

# Default values for certain configuration options.
DEFAULT_LAMBDA = 0.2
DEFAULT_LFACTOR = 2
DEFAULT_VMAX = 10

# The multiplier to use for computing starting population size.
# Formula: size = state_length * POP_MULTIPLIER
POP_MULTIPLIER = 10


def get_config_options(config_path, command_line=True):
    ''' Retrieve and validate options from the configuration file.
        @return A dictionary of config options.
        '''

    # Set up the configuration parser.
    config = configparser.ConfigParser()
    success = config.read(config_path)
    error = ' '.join(["Error in", config_path, "in section '{0}':"])
    options = {}
    if not success:
        err = '{0} is missing or corrupt.'.format(config_path)
        raise IOError(err)


    ### General section ###

    # training_data option
    training_data = config.get('General', 'training_data')
    options['training_data'] = training_data
    if not os.path.exists(training_data):
        # The file or directory does not exist.
        error += 'training_data specifies the file or directory containing '
        error += 'Weka .arff files with which to train ANN. If given a file, '
        error += 'program will test with training data. If given a directory, '
        error += 'program will test with testing sets.'
        error += 'Training files are named train0.arff, train1.arff, etc. '
        error += 'Testing files are named test0.arff, test1.arff, etc. Files '
        error += 'must be numbered in sequence.'
        error = error.format('General')
        raise IOError(error)

    # experiments option
    try:
        # Get number of experiments to run per data set.
        experiments = int(config.get('General', 'experiments'))
        options['experiments'] = experiments

    except (TypeError, ValueError):
        # Experiments must be an integer.
        error += 'experiments specifies the number of experiments to '
        error += 'run per data set. It must be an integer.'
        error = error.format('General')
        raise ValueError(error)

    # iterations option
    try:
        # Get number of iterations to run the optimizer.
        iterations = int(config.get('General', 'iterations'))
        options['iterations'] = iterations

    except (TypeError, ValueError):
        # Iterations must be an integer.
        error += 'iterations specifies the number of iterations to run '
        error += 'the optimizer. It must be an integer.'
        error = error.format('General')
        raise ValueError(error)

    # verbosity option
    verbosity = config.get('General', 'verbosity').lower()
    if verbosity in ('verbose', 'normal', 'quiet'):
        options['verbosity'] = verbosity

    else:
        # Verbosity not a correct value.
        error += 'verbosity must be either verbose, normal, or quiet.'
        error = error.format('General')
        raise ValueError(error)


    ### ANN section ###

    # mode option
    mode = config.get('ANN', 'mode').strip().lower()
    options['mode'] = mode
    if mode != 'classify' and mode != 'predict' and mode != 'auto':
        # Mode must be either classify or predict.
        error += "mode must be either 'classify', 'predict', or auto."
        error = error.format('ANN')
        raise ValueError

    # hidden_nodes option
    try:
        # Get number of hidden nodes in ANN.
        hidden_nodes = config.get('ANN', 'hidden_nodes').lower()
        if hidden_nodes == 'default':
            hidden_nodes = DEFAULT_HIDDEN

        hidden_nodes = int(hidden_nodes)
        assert (hidden_nodes >= 0)
        options['hidden_nodes'] = hidden_nodes

    except TypeError:
        # Hidden nodes must be an integer.
        error += 'hidden_nodes must be an integer value.'
        error = error.format('ANN')
        raise TypeError(error)

    except AssertionError:
        # Hidden nodes must be >= 0.
        error += 'hidden_nodes must be >= 0.'
        error = error.format('ANN')
        raise AssertionError(error)

    # lambda option
    try:
        # Get lambda value for ANN activation function.
        lmbda = config.get('ANN', 'lambda').lower()
        if lmbda == 'default':
            lmbda = DEFAULT_LAMBDA

        lmbda = float(lmbda)
        assert (lmbda > 0)
        options['lmbda'] = lmbda

    except TypeError:
        # Lambda must be a number.
        error += 'lambda must be a number.'
        error = error.format('ANN')
        raise TypeError(error)

    except AssertionError:
        # Lambda must be > 0.
        error += 'lambda must be > 0.'
        error = error.format('ANN')
        raise AssertionError


    ### GA/PSO section ###

    # ga_rate/pso_rate options
    try:
        # Get GA and PSO ratios
        ga_rate = int(config.get('GA', 'ga_rate'))
        pso_rate = int(config.get('PSO', 'pso_rate'))
        assert (ga_rate >= 0)
        assert (pso_rate >= 0)
        options['ga_rate'] = ga_rate
        options['pso_rate'] = pso_rate

    except TypeError:
        # GA and PSO ratios must be integers.
        error += 'PSO and GA ratios must be integer values.'
        error = error.format('GA or PSO')
        raise TypeError(error)

    except AssertionError:
        # GA and PSO ratios must be >= 0.
        error += 'PSO and GA ratios must be >= 0.'
        error = error.format('GA or PSO')
        raise AssertionError(error)

    # mut_size option
    try:
        mut_size = config.get('GA', 'mut_size').lower().strip()
        if mut_size == 'default':
            mut_size = ga.MIN_MUTATION_RATE

        else:
            mut_size = float(mut_size)
            assert (mut_size >= 0)
            assert (mut_size <= 1)

        options['mut_size'] = mut_size

    except (ValueError, AssertionError):
        error += 'mut_size must be a real number >= 0 and <= 1.'
        error = error.format('GA')
        raise ValueError(error)

    # min_mutation option
    try:
        min_mutation = config.get('GA', 'min_mutation').lower().strip()
        if min_mutation == 'default':
            min_mutation = ga.MIN_MUTATION_RATE

        else:
            min_mutation = float(min_mutation)
            assert (min_mutation >= 0)
            assert (min_mutation <= 1)

        options['min_mutation'] = min_mutation

    except (ValueError, AssertionError):
        error += 'min_mutation must be a real number >= 0 and <= 1.'
        error = error.format('GA')
        raise ValueError(error)

    # max_mut_decrease option
    try:
        max_mut_decrease = config.get('GA', 'max_mut_decrease').lower().strip()
        if max_mut_decrease == 'default':
            max_mut_decrease = ga.MAX_MUTATION_RATE

        else:
            max_mut_decrease = float(max_mut_decrease)
            assert (max_mut_decrease >= 0)
            assert (max_mut_decrease <= 1)

        options['max_mut_decrease'] = max_mut_decrease

    except (ValueError, AssertionError):
        error += 'max_mut_decrease must be a real number >= 0 and <= 1.'
        error = error.format('GA')
        raise ValueError(error)

    # adaptation_rate option
    try:
        adaptation_rate = config.get('GA', 'adaptation_rate').lower().strip()
        if adaptation_rate == 'default':
            adaptation_rate = ga.ADAPTATION_RATE

        else:
            adaptation_rate = int(adaptation_rate)
            assert (adaptation_rate >= 0)

        options['adaptation_rate'] = adaptation_rate

    except (ValueError, AssertionError):
        error += 'adaptation_rate must be an integer >= 0.'
        error = error.format('GA')
        raise ValueError(error)

    # max_pop_decrease option
    try:
        max_pop_decrease = config.get('GA', 'max_pop_decrease').lower().strip()
        if max_pop_decrease == 'default':
            max_pop_decrease = ga.MAX_POPULATION_DECREASE

        else:
            max_pop_decrease = float(max_pop_decrease)
            assert (max_pop_decrease >= 0)
            assert (max_pop_decrease <= 1)

        options['max_pop_decrease'] = max_pop_decrease

    except (ValueError, AssertionError):
        error += 'max_pop_decrease must be a real number >= 0 and <= 1.'
        error = error.format('GA')
        raise ValueError(error)

    # min_pop_size option
    try:
        min_pop_size = config.get('GA', 'min_pop_size').lower().strip()
        if min_pop_size == 'default':
            min_pop_size = ga.MIN_POPULATION_SIZE

        else:
            min_pop_size = int(min_pop_size)
            assert (min_pop_size >= 0)

        options['min_pop_size'] = min_pop_size

    except (ValueError, AssertionError):
        error += 'min_pop_size must be an integer >= 0.'
        error = error.format('GA')
        raise ValueError(error)

    # velocity_max option
    try:
        vmax = config.get('PSO', 'velocity_max').lower().strip()
        if vmax == 'default':
            options['vmax'] = DEFAULT_VMAX

        else:
            assert (float(vmax) > 0)
            options['vmax'] = float(vmax)

    except (TypeError, AssertionError):
        error += 'velcoity_max must be a real number > 0.'
        error = error.format('PSO')
        raise ValueError(error)

    # learning_factor option
    try:
        learning_factor = config.get('PSO', 'learning_factor').lower().strip()
        if learning_factor == 'default':
            options['learning_factor'] = DEFAULT_LFACTOR

        else:
            assert (float(learning_factor) > 0)
            options['learning_factor'] = float(learning_factor)

    except (TypeError, AssertionError):
        error += 'learning_factor must be a real number > 0.'
        error = error.format('PSO')
        raise ValueError(error)

    return options


def get_cl_options():
    '''Get options from the command line.
       @return A dictionary of CL options.
       '''

    # TODO: This section, while it does work, needs to be rewritten to use
    #       argparse.
    options = {}
    try:
        index = sys.argv.index('-i')
        options['training_data'] = sys.argv[index + 1]

    except ValueError:
        pass

    try:
        index = sys.argv.index('-c')
        options['create_sets'] = sys.argv[index + 1]

    except ValueError:
        options['create_sets'] = False

    return options


def get_options(config_path, command_line=True):
    ''' Retrieve and validate options from the command line and configuration file.
        @param config_path Path to the configuration file.
        @param command_line Set True to parse command line options.
        @return A dictionary of program options.
        '''

    # Get options from the configuration file.
    options = get_config_options(config_path)
    # Get options from the command line.
    options.update(get_cl_options())
    return options


def run_experiment(training_data, testing_data, mode, seed=None):
    ''' Run an experiment training ANN using GA.
        @param training_data A matrix of attribute data for training ANN.
        @param testing_data A matrix of attribute data for testing ANN.
        @param mode Either predict or classify.
        @option seed A starting chromosome from which all others are derived.
        @return The fittest solution found, training accuracy, and testing accuracy.
        '''

    options = get_options(CONFIG_PATH)
    hidden_nodes = options['hidden_nodes']
    lmbda = options['lmbda']
    vmax = options['vmax']
    learning_factor = options['learning_factor']
    ga_rate = options['ga_rate']
    pso_rate = options['pso_rate']
    iterations = options['iterations']

    # Create fitness function.
    fitness = ann.fitness_factory(training_data, testing_data, mode,
                                  hidden_nodes, lmbda)

    # Configure adaptor.
    adapt = ga.create_adaptor(fitness,
                              options['max_mut_decrease'],
                              options['min_mutation'],
                              options['max_pop_decrease'],
                              options['min_pop_size'],
                              adaptation_rate=options['adaptation_rate'])

    # Create ANN optimizer.
    crossover = ga.create_point_crossover()
    mutate = ga.create_generic_mutate(bounds=(-ann.BOUND, ann.BOUND))
    ga_optimize = ga.create_optimizer(fitness, mutate, crossover,
                                      adapt=adapt,
                                      mutation_rate=options['mut_size'])

    pso_optimize = pso.create_optimizer(fitness, bounds=(-options['vmax'],
                                                         options['vmax']))

    optimize = optimize.combine_optimizers((ga_optimize, pso_optimize),
                                           (options['ga_rate'], options['pso_rate']))

    # Create initial particle and particle swarm.
    if mode == 'predict':
        net_size = ann.INPUT_NODES * hidden_nodes + hidden_nodes + 1

    else:
        net_size = (ann.INPUT_NODES * hidden_nodes +
                    (hidden_nodes + 1) * ann.OUTPUT_NODES)

    particle = pso.Particle([net_size])
    particle.data = np.zeros([net_size]).data
    swarm = optimize.create_states(particle, len(particle) * POP_MULTIPLIER, mutate)

    # Run the optimizer on the particle swarm.
    optswarm = optimize.conditional_optimize(swarm, optimize, options['iterations'])

    best_particle = optimize.find_best(optswarm, fitness)[0]

    # Test the accuracy of ANN against the testing data set.
    if mode == 'classify':
        testing_accuracy = ann.classification_accuracy(testing_data,
                                                       best_particle,
                                                       hidden_nodes,
                                                       lmbda)

    elif mode == 'predict':
        testing_accuracy = ann.prediction_accuracy(testing_data,
                                                   best_particle,
                                                   hidden_nodes,
                                                   lmbda)

    return testing_accuracy


def create_sets(input_path, output_path):
    ''' Create testing/training attribute-relation files.
        @param input_path Path to the master attribute-relation file.
        @param output_path Path to the directory to put testing/training sets.
                           If the directory does not exist, it will be created.
                           Preexisting files will not be overwritten.
        @return None
        '''

    # Read in master attribute-relation file.
    with open(input_path, 'r') as input_fd:
        master_relation = input_fd.read()

    # Create testing/training data if it does not exist.
    try:
        os.mkdir(output_path)

    except OSError as ose:
        if errno.errorcode[ose[0]] == 'EEXIST':
            # Directory already exists.
            pass

        else:
            raise ose

    data_sets = arff.generate_sets(master_relation)
    for count, data_set in enumerate(data_sets):
        # Construct path to sets.
        directory = os.path.dirname(output_path)
        training_name = 'train{0}.arff'.format(count)
        testing_name = 'test{0}.arff'.format(count)
        training_path = os.path.join(directory, training_name)
        testing_path = os.path.join(directory, testing_name)
        # Do not overwrite existing files.
        if os.path.exists(training_path) or os.path.exists(testing_path):
            error = 'Testing/training sets already exist.'
            raise IOError(error)

        else:
            with open(training_path, 'w') as output_fd:
                output_fd.write(data_set[0])

            with open(testing_path, 'w') as output_fd:
                output_fd.write(data_set[1])


def main():
    '''Main program function.'''

    # Get configuration options.
    options = get_options(CONFIG_PATH)
    training_data = options['training_data']
    experiments = options['experiments']
    verbosity = options['verbosity']
    mode = options['mode']

    if options['create_sets']:
        # Create testing/training sets, then exit.
        create_sets(training_data, options['create_sets'])
        return

    spreadsheet = None
    # Perform experiments on each data set.
    for data_set, none in enumerate(itertools.repeat(None)):
        if os.path.isdir(training_data):
            # Test with testing data.
            train_file = os.path.join(training_data, 'train{0}.arff'.format(data_set))
            test_file = os.path.join(training_data, 'test{0}.arff'.format(data_set))
            testwtrain = False
            if verbosity != 'quiet':
                # Print path to training data file.
                print('Training data set: {0}'.format(train_file))

            try:
                train_data = arff.parse(train_file)
                test_data = arff.parse(test_file)

            except IOError:
                # No more data sets to train with.
                break

        else:
            # Test with training data.
            print('Testing with training set.')
            train_data = arff.parse(training_data)
            test_data = train_data
            testwtrain = True

        if mode == 'auto':
            # TODO: Although this always works with the Blood Perfusion Model,
            #       it will not always work in general. It needs updated.
            # Automatically detect which ANN mode to use.
            if train_data.attr_types[-1] == 'numeric':
                mode = 'predict'

            else:
                mode = 'classify'

        if not spreadsheet:
            # Create a spreadsheet to store experiment results.
            if mode == 'classify':
                # Classification format
                spreadsheet = csv.ClassifySpreadsheet(train_data.name,
                                                      train_data.attributes +
                                                      ['model output'])

            elif mode == 'predict':
                # Regression learning format
                spreadsheet = csv.PredictSpreadsheet(train_data.name,
                                                     train_data.attributes +
                                                     ['model output'])

            else:
                # TODO: Is this branch even reachable?
                # Generic format
                spreadsheet = csv.CSVSpreadsheet(train_data.name,
                                                 train_data.attributes +
                                                 ['model output'])

        best = None
        for experiment in range(experiments):
            if verbosity != 'quiet' and experiments > 1:
                # Print experiment.
                print('Experiment: {0}'.format(experiment + 1))

            result = run_experiment(train_data, test_data, mode)
            if mode == 'predict':
                if not best or result[0] < best[0]:
                    best = result

            elif mode == 'classify':
                if not best or result[0] > best[0]:
                    best = result

            else:
                best = result

        for index, relation in enumerate(test_data):
            # Create a new row in spreadsheet for best results.
            spreadsheet.add_row()
            spreadsheet.row.extend(relation.row)
            spreadsheet.row.append(str(best[1][index]))

            if verbosity == 'verbose':
                # Print spreadsheet so far.
                print(spreadsheet)

        if testwtrain:
            break

    abstractname = spreadsheet.name + '{0}.csv'
    filename = abstractname.format('')
    counter = 1
    while os.path.exists(filename):
        # File exists. Append a number.
        counter += 1
        filename = abstractname.format(counter)

    with open(filename, 'w') as output_fd:
        output_fd.write(str(spreadsheet))

    if verbosity == 'verbose':
        print(spreadsheet)


if __name__ == '__main__':
    if '--profile' in sys.argv:
        print('Running profiler...')
        cProfile.run('main()')

    else:
        start = time.time()
        main()
        end = time.time()
        print('Program completed execution in {0} seconds.'.format(end - start))
