#!/bin/env python2
# -*- coding: utf-8 -*-
import sys
from itertools import izip
from utils import read_articles, write_article, uopen, AttrDict, open_corpus, filenames
from random import shuffle
from yasa import run_yasa
from nltk.classify.maxent import MaxentClassifier

def build_training_corpus(input_corpus, pos_corpus, neg_corpus, munt_corpus, params=AttrDict()):
    """
    Uses MUNT's output to find documents which are likely to be parallel:
    Documents with the greatest ratio of aligned sentences.
    """
    params.setdefault(n_positive=100, n_negative=300, threshold=0.7, src_lang='fr', trg_lang='en')

    def get_counts(lang):
        with uopen(munt_corpus + '.' + lang) as munt_file:
            parallel = set([line.strip() for line in munt_file])

        documents = dict()

        for index, txt in read_articles(input_corpus + '.' + lang):
            lines = txt.split('\n')
            count = sum(1 for line in lines if line in parallel)
            documents[index] = (count, len(lines))
        return documents

    fr_counts = get_counts(params.src_lang)
    en_counts = get_counts(params.trg_lang)

    documents = []
    for index in fr_counts:
        fr_count, fr_length = fr_counts[index]
        en_count, en_length = en_counts[index]
        score = (fr_count + en_count) / float(fr_length + en_length)
        documents.append((score, index))

    shuffle(documents)
    #sort(documents)
    
    pos, neg = [], []
    for score, index in documents:
        if score >= params.threshold:
            pos.append(index)
        else:
            neg.append(index)
   
    pos = set(pos[:params.n_positive])
    neg = set(neg[-params.n_negative:])
    
    src_input_filename, trg_input_filename = filenames(input_corpus, params)
    src_articles = read_articles(src_input_filename) 
    trg_articles = read_articles(trg_input_filename)

    with open_corpus(pos_corpus, params, 'w') as (src_pos_file, trg_pos_file), \
         open_corpus(neg_corpus, params, 'w') as (src_neg_file, trg_neg_file):

        for (index, src_txt), (_, trg_txt) in izip(src_articles, trg_articles):
            if index in pos:
                write_article(src_txt, index, src_pos_file)
                write_article(trg_txt, index, trg_pos_file)
            elif index in neg:
                write_article(src_txt, index, src_neg_file)
                write_article(trg_txt, index, trg_neg_file)

"""

Parallel document extractor
---------------------------

Resources: corpus + MUNT-aligned corpus

"""

def parallel_article_features(src_article, trg_article, yasa_path, munt=None):
    """
    Compute features for a given pair of documents: length-based features,
    and features based on YASA and MUNT alignment.
    """
    src_index, src_txt = src_article
    trg_index, trg_txt = trg_article
    
    src_sentences = src_txt.split('\n')
    trg_sentences = trg_txt.split('\n')
    src_sentences_tok = [sentence.split(' ') for sentence in src_sentences]
    trg_sentences_tok = [sentence.split(' ') for sentence in trg_sentences]
    total = float(len(src_sentences) + len(trg_sentences))

    f = []
    f += [len(src_sentences) - float(len(trg_sentences))]
    f += [len(src_sentences) / float(len(trg_sentences))]

    src_word_count = sum(len(sentence) for sentence in src_sentences_tok)
    trg_word_count = sum(len(sentence) for sentence in trg_sentences_tok)
    f += [src_word_count - float(trg_word_count)]
    f += [src_word_count / float(trg_word_count)]

    yasa_alignments = run_yasa(src_txt, trg_txt, yasa_path) # Tuples of tuples

    null_alignments = sum(1 for x, y in yasa_alignments if not x or not y)
    # Proportion of sentences which are not aligned.
    f += [null_alignments / total] 
    # Proportion of alignments which are null alignments.
    f += [1.0 if not yasa_alignments else null_alignments / float(len(yasa_alignments))]        
    one_one_alignments = [(x[0], y[0]) for x, y in yasa_alignments if len(x) == len(y) == 1]
    set_yasa = set(one_one_alignments)
    yasa_non_null = len(yasa_alignments) - null_alignments

    if munt is not None:
        index = int(src_index)
        munt_alignments = munt[index] if index in munt else 0
        f += [2 * munt_alignments / total]
        f += [munt_alignments]

    return dict(enumerate(f))

def get_munt_alignments(input_filename):
    """ Get the number of sentence pairs returned for each article pair (input_filename = wiki.munt.articles.fr) """
    munt = dict((int(x), len(txt.split('\n'))) for x, txt in list(read_articles(input_filename)))
    return munt

def parallel_articles(input_corpus, output_corpus, pos_corpus, neg_corpus, munt_filename, yasa_path, params=AttrDict()):
    """
    ``input_corpus'': article corpus (with <article> tags) from which to extract parallel documents
    ``munt_filename'': path of the source or target file of the MUNT aligned article corpus (with the <article> tags)
    """
    params.setdefault(n_positive=100, n_negative=300, threshold=0.7, src_lang='fr', trg_lang='en')
    munt = get_munt_alignments(munt_filename)
    features = features_from_corpus(pos_corpus, neg_corpus, yasa_path, params, munt)

    model = MaxentClassifier.train(features, trace=0)
   
    src_input_filename, trg_input_filename = filenames(input_corpus, params)
    src_articles = read_articles(src_input_filename) 
    trg_articles = read_articles(trg_input_filename)

    with open_corpus(output_corpus, params, 'w') as (src_output_file, trg_output_file), \
         uopen(output_corpus + '.scores', 'w') as score_output_file, \
         uopen(output_corpus + '.feats', 'w') as feat_output_file:
        
        for src_article, trg_article in izip(src_articles, trg_articles):
            index, src_txt = src_article
            _, trg_txt = trg_article
        
            feat = parallel_article_features(src_article, trg_article, yasa_path, munt=munt)
            write_article(','.join(str(y) for x, y in sorted(feat.items())) + '\n', index, feat_output_file)
            
            score = model.prob_classify(feat).prob(True)
            if score >= params.threshold:
                write_article(src_txt, index, src_output_file)
                write_article(trg_txt, index, trg_output_file)
            write_article(str(score) + '\n', index, score_output_file)

def features_from_corpus(pos_corpus, neg_corpus, yasa_path, params, munt=None):
    src_pos_corpus, trg_pos_corpus = filenames(pos_corpus, params)
    src_neg_corpus, trg_neg_corpus = filenames(neg_corpus, params)

    pos_articles = [((src_article, trg_article), True) for src_article, trg_article
            in izip(read_articles(src_pos_corpus), read_articles(trg_pos_corpus))]
    neg_articles = [((src_article, trg_article), False) for src_article, trg_article
            in izip(read_articles(src_neg_corpus), read_articles(trg_neg_corpus))]
    shuffle(pos_articles)
    shuffle(neg_articles)

    articles = pos_articles[:params.n_positive] + neg_articles[:params.n_negative]
    shuffle(articles)
    features = [(parallel_article_features(src_article, trg_article, yasa_path, munt=munt), cls)
            for (src_article, trg_article), cls in articles]

    return features
   
def evaluation(pos_corpus, neg_corpus, munt_filename, yasa_path, params=AttrDict(), folds=10):
    params.setdefault(n_positive=100, n_negative=300, threshold=0.7, src_lang='fr', trg_lang='en')
    munt = get_munt_alignments(munt_filename)
    features = features_from_corpus(pos_corpus, neg_corpus, yasa_path, params, munt)
   
    """ Cross-validation """
    n_correct, n_true, n_predicted = 0, 0, 0
    test_size = len(features) // folds

    for i in range(folds):
        test_set = features[:test_size]
        train_set = features[test_size:]
        features = train_set + test_set
        model = MaxentClassifier.train(train_set, trace=0)
        test_feats, test_classes = zip(*test_set)
        scores = [model.prob_classify(feat).prob(True) for feat in test_feats]
        for score, cls in izip(scores, test_classes):
            n_true += int(cls)
            n_predicted += int(score >= params.threshold)
            n_correct += int(cls and score >= params.threshold)

    print 'Correct: {}, True: {}, Predicted: {}'.format(n_correct, n_true, n_predicted)
    print 'Precision: {:.3f}, recall: {:.3f}'.format(n_correct / float(n_predicted), n_correct / float(n_true))

def main_run():
    try:
        input_corpus, output_corpus, pos_corpus, neg_corpus, munt_filename, yasa_path, threshold, n_pos, n_neg = sys.argv[2:]
    except:
        sys.exit(0)

    params = AttrDict(threshold=float(threshold), n_positive=int(n_pos), n_negative=int(n_neg)) 
    parallel_articles(input_corpus, output_corpus, pos_corpus, neg_corpus, munt_filename, yasa_path, params)

def main_build():
    help_msg = """
    Program to build training data for the parallel document selection system.
    Positive and negative examples are picked according to the MUNT alignment score.

    usage: {} build INPUT_CORPUS POS_CORPUS NEG_CORPUS MUNT_CORPUS THRESHOLD N_POS N_NEG 

    INPUT_CORPUS: article corpus from which to extract document pairs (with <article> tags).
    MUNT_CORPUS: MUNT-aligned version of this corpus (without the <article> tags).
    POS_CORPUS: output corpus in which to write the positive examples.
    NEG_CORPUS: output corpus in which to write the negative examples.
    THRESHOLD: score threshold to decide whether a two documents are parallel or not.
    N_POS: number of positive examples to pick.
    N_NEG: number of negative examples to pick.
    """

    try:
        input_corpus, pos_corpus, neg_corpus, munt_corpus, threshold, n_pos, n_neg = sys.argv[2:]
    except:
        sys.exit(help_msg.format(sys.argv[0]))
    
    params = AttrDict(threshold=float(threshold), n_positive=int(n_pos), n_negative=int(n_neg)) 
    build_training_corpus(input_corpus, pos_corpus, neg_corpus, munt_corpus, params)

def main_eval():
    help_msg = """
    Test the parallel document selection system by cross-validation, with the training data.

    usage: {} eval POS_CORPUS NEG_CORPUS MUNT_ARTICLE_FILE YASA_BIN THRESHOLD N_POS N_NEG 

    POS_CORPUS: output corpus in which to write the positive examples.
    NEG_CORPUS: output corpus in which to write the negative examples.
    MUNT_ARTICLE_FILE: source or target file of the MUNT-aligned corpus, with the <article> tags.
    THRESHOLD: score threshold to decide whether a two documents are parallel or not.
    N_POS: number of positive examples to pick.
    N_NEG: number of negative examples to pick.
    """

    try:
        pos_corpus, neg_corpus, munt_filename, yasa_path, threshold, n_pos, n_neg = sys.argv[2:]
    except:
        sys.exit(help_msg.format(sys.argv[0]))
    
    params = AttrDict(threshold=float(threshold), n_positive=int(n_pos), n_negative=int(n_neg)) 
    evaluation(pos_corpus, neg_corpus, munt_filename, yasa_path, params)

if __name__ == '__main__':
    help_msg = """
    usage: {} ACTION ...

    ACTION: build, eval, run
    'build' builds a training corpus for parallel document extraction
    'eval' does a cross-validation on a training corpus
    'run' runs the parallel document extraction method on a corpus
    """
    try:
        action = sys.argv[1]
    except:
        sys.exit(help_msg.format(sys.argv[0]))

    if action == 'run':
        main_run()
    elif action == 'build':
        main_build()
    elif action == 'eval':
        main_eval()
    
