#!/bin/env python2
# -*- coding: utf-8 -*-
import sys, subprocess, re
from numpy import arange
from ConfigParser import ConfigParser
from os import path, unlink
from itertools import product 
from multiprocessing import Pool
from utils import dict_to_str, augmented_statistics, AttrDict
from plotting import plot_values
from collections import defaultdict
from alignment import AlignmentModel
from munteanu import MunteanuModel
from yasa import yasa_evaluation, yasa_evaluation_with_reference

def run_experiment(params):
    params.setdefault(plot=False, iterations=1)

    output_filename = path.join(params.path, params.name)
    try:
        unlink(output_filename)
    except:
        pass

    def write(txt):
        with open(output_filename, 'a') as output_file:
            output_file.write(txt + '\n')

    init_vars = ['lexicon_threshold', 'lexicon_max', 'giza_model_path', 'giza_model_type']
    train_vars = ['n_positive', 'n_negative', 'train_corpus', 'filtering', 'min_overlap', 'length_ratio']
    test_vars = ['n_test', 'threshold', 'test_corpus', 'noise_level', 'evaluation_method']

    def param_product(keys):
        res = []
        pars = [params[v] for v in keys]
        for values in product(*[list(x) if hasattr(x, '__iter__') else [x] for x in pars]):
            res.append(AttrDict(zip(keys, values)))
        return res

    init_param_list = param_product(init_vars)
    train_param_list = param_product(train_vars)
    test_param_list = param_product(test_vars)

    variables = [] # Names of the varying parameters
    for k, v in params.items():
        if k in init_vars or k in train_vars or k in test_vars:
            if hasattr(v, '__iter__') and len(v) > 1:
                variables.append(k)
    params.plot = params.plot and len(variables) == 1 # 2D plotting

    Y = defaultdict(list)
    X = []

    for init_params, train_params, test_params in product(init_param_list, train_param_list, test_param_list):
        pars = AttrDict(**test_params)
        pars.setdefault(**init_params)
        pars.setdefault(**train_params)
        pars.setdefault(**params) 
    
        header_params = AttrDict([(k, pars[k]) for k in variables])
        for k in header_params:
            if k.endswith('corpus') or k.endswith('path'):
                header_params[k] = path.basename(header_params[k])
        write('#' + dict_to_str(header_params))
        
        res = run(pars, variables)
        write(dict_to_str(res))
        
        if params.plot:
            x = pars[variables[0]]
            if isinstance(x, str):
                x = path.basename(x)
            X.append(x)
            for label in params.plot_y_labels:
                Y[label].append(res[label])
    
    if params.plot:
        plot_values(X, Y, variables[0], path.join(params.path, params.name))

def run(params, variables):
    params.setdefault(noise_level=None)

    corpus = params.test_corpus
    if params.with_reference and params.noise_level is not None:
            corpus = '{}.{}'.format(corpus, params.noise_level)

    average = defaultdict(float)
    for i in range(params.iterations):
        res = dict()
        if params.statistics:
            alignment_model = AlignmentModel(
                giza_model_path=params.giza_model_path,
                params=params)

            model = MunteanuModel(
                alignment_model=alignment_model,
                training_corpus=params.train_corpus,
                params=params)

            evaluation = model.evaluation_with_reference if params.with_reference else model.evaluation
            statistics = evaluation(corpus=corpus, params=params)

            statistics = augmented_statistics(statistics)

            for x in params.statistics:
                res[x] = statistics[x]

        if params.yasa_statistics:
            evaluation = yasa_evaluation_with_reference if params.with_reference else yasa_evaluation
            statistics = evaluation(corpus=corpus, yasa_path=params.yasa_path, params=params)

            for x in params.yasa_statistics:
                res[x + '_yasa'] = statistics[x]
            
        for k, v in res.items():
            average[k] += v / float(params.iterations)

    return average

if __name__ == '__main__':
    cfgparser = ConfigParser()
    try:
        cfgparser.read(sys.argv[1])
    except IndexError:
        sys.exit('Usage: %s CONFIG_FILE' % sys.argv[0])
    except Exception as e:
        sys.exit(e)

    param_list = []
    
    for exp in cfgparser.sections():
        params = AttrDict(name=exp)
        
        items = cfgparser.items(exp)
        output_directory = dict(items)['path']
        with open(path.join(output_directory, exp + '.cfg'), 'w') as output_file:
            output_file.write('[DEFAULT]\n')
            for k, v in items:
                output_file.write('{} = {}\n'.format(k, v))
                try:
                    params[k] = eval(v)
                except:
                    params[k] = v
        param_list.append(params)

    #for params in param_list:
    #    run_experiment(params)
    pool = Pool()
    pool.map(run_experiment, param_list)
