import tempfile, re
from subprocess import Popen, PIPE
from utils import AttrDict, stats, article_corpus_apply 
from munteanu import goutte_noise

def run_yasa(src_txt, trg_txt, yasa_path):
    with tempfile.NamedTemporaryFile('w+') as src_file, \
         tempfile.NamedTemporaryFile('w+') as trg_file:

        src_file.write(src_txt.encode('utf-8'))
        src_file.seek(0)
        trg_file.write(trg_txt.encode('utf-8'))
        trg_file.seek(0)

        p = Popen([yasa_path, '-i', 'o', '-o', 'a', src_file.name, trg_file.name], stdout=PIPE)
        yasa_alignment = p.communicate()[0]

        yasa_alignment = re.findall('xtargets="(.*?)"', yasa_alignment)
        alignments = []
        for left, right in (alignment.split(';') for alignment in yasa_alignment):
            left = tuple(int(i) - 1 for i in left.split())
            right = tuple(int(i) - 1 for i in right.split())
            alignments.append((left, right))

        return alignments

def yasa_evaluation(corpus, yasa_path, params):
    src_sentences, trg_sentences, reference = goutte_noise(corpus, params)
    reference = set(((i,), (j,)) for i, j in reference)
    alignments = run_yasa('\n'.join(src_sentences), '\n'.join(trg_sentences), yasa_path)
    return reference_comparison(alignments, reference, params)

def yasa_evaluation_with_reference(corpus, reference_filename, yasa_path, params):
    src_sentences, trg_sentences = read_corpus(corpus, params) 
    alignments = run_yasa('\n'.join(src_sentences), '\n'.join(trg_sentences), yasa_path)
    
    reference = set()
    with open(reference_filename) as align_file:
        for line in align_file:
            left, right = [tuple(map(int, x.split())) for x in line.split(';')]
            if left and right:
                reference.add((left, right))

    return reference_comparison(alignments, reference, params)

def reference_comparison(alignments, reference, params):
    if params.evaluation_method == 'sentence_level':
        alignments = set(x for l, r in alignments for x in product(l, r))
        reference = set(x for l, r in reference for x in product(l, r))
    elif params.evaluation_method == 'one_one':
        alignments = set((l, r) for l, r in alignments if len(l) == len(r) == 1)
        reference = set((l, r) for l, r in reference if len(l) == len(r) == 1)
    else:
        alignments = set(alignments)

    correct = float(len(alignments.intersection(reference)))

    recall, precision, f1 = stats(correct, len(reference), len(alignments))
    return AttrDict(recall=recall, precision=precision, f1=f1)

def yasa_on_articles(input_corpus, output_corpus, params, yasa_path):
    def function(src_txt, trg_txt):
        alignments = run_yasa(src_txt, trg_txt, yasa_path)
        sentence_pairs = alignments_to_bitext(src_txt, trg_txt, alignments)
        src_lines, trg_lines = zip(*sentence_pairs)
        return '\n'.join(src_lines), '\n'.join(trg_lines)

    article_corpus_apply(input_corpus, output_corpus, params, function)

def alignments_to_bitext(src_txt, trg_txt, alignments):
    src_lines = src_txt.split('\n')
    trg_lines = trg_txt.split('\n')

    sentence_pairs = []
    for left, right in alignments:
        if left and right:
            src_line = ' '.join(src_lines[i] for i in left)
            trg_line = ' '.join(trg_lines[i] for i in right)
            sentence_pairs.append((src_line, trg_line))

    return sentence_pairs
