#!/bin/env python2
# -*- coding: utf-8 -*-
from __future__ import division
import argparse 
from time import time
from utils import summary, AttrDict
from collections import defaultdict
from alignment import AlignmentModel
from munteanu import MunteanuModel

def average_dicts(dicts):
    res = defaultdict(float)
    for d in dicts:
        for k, v in d.items():
            res[k] += float(v) / len(dicts)
    return res

def interactive_mode(args):
    pass

def run_munteanu(args):
    params = AttrDict(args.__dict__)

    if args.evaluation:
        res = []
        for _ in range(args.iterations):
            t = time()
            alignment_model = AlignmentModel(args.giza_model, params)
            print('Loading: {:.2f} seconds'.format(time() - t))
            t = time()
            model = MunteanuModel(alignment_model, args.train_corpus, params)
            print('Training: {:.2f} seconds'.format(time() - t))

            if args.reference is not None:
                res.append(model.evaluation_with_reference(args.input_corpus, args.reference))
            else:
                res.append(model.evaluation(args.input_corpus))

        statistics = average_dicts(res)
        summary(statistics)
    if args.output_corpus is not None:
        alignment_model = AlignmentModel(args.giza_model, params)
        model = MunteanuModel(alignment_model, args.train_corpus, params)
        alignments = model.extract_pairs_from_corpus(args.input_corpus, args.output_corpus)

    #print('[' + ', '.join('{} = {}'.format(*item) for item in params.items()) + ']')

if __name__ == '__main__':
    """
    Example of usage:
    ./main.py ~/cluster/data/wikipedia.train ~/cluster/data/wikipedia.test ~/cluster/model/europarl_100k -n 400 -l 400
    """
    parser = argparse.ArgumentParser(description='Extraction of parallel sentences from a bilingual corpus.',
            argument_default=argparse.SUPPRESS)
    
    parser.add_argument('train_corpus', help='training corpus')
    parser.add_argument('input_corpus', help='input corpus')
    parser.add_argument('giza_model', help='giza model')
    parser.add_argument('src_lang', help='source language (en, fr, etc.)')
    parser.add_argument('trg_lang', help='target language (en, fr, etc.)')

    parser.add_argument('-i', '--iterations', type=int, help='number of iterations', default=1)
    parser.add_argument('-n', '--train', type=int, help='training size', dest='n_positive')
    parser.add_argument('-t', '--threshold', type=float, help='classifier threshold')
    parser.add_argument('-o', '--output-corpus', help='output corpus', dest='output_corpus', default=None)

    parser.add_argument('-a', '--train-neg', type=int, help='training size (negative)', dest='n_negative')
    parser.add_argument('-b', '--no-filter', dest='filtering', help='enable candidate filtering', action='store_false')
    parser.add_argument('-c', '--giza', help='type of model used', dest='giza_model_type')
   
    """ Evaluation parameters """
    parser.add_argument('-e', '--evaluation', action='store_true', default=False)
    parser.add_argument('-l', '--test', type=int, help='test size', dest='n_test')
    parser.add_argument('-f', '--noise-level', type=int, help='noise level', dest='noise_level')
    parser.add_argument('-g', '--reference', help='location of the alignment reference, if the input is not a bitext', default=None)
    parser.add_argument('-k', '--evaluation-method',
            help='evaluation method (default, one_one, sentence_level)', dest='evaluation_method')

    args = parser.parse_args()

    run_munteanu(args)

