# -*- coding: utf-8 -*-
from __future__ import division
import nltk
from alignment import AlignmentModel
from utils import AttrDict, read_corpus, open_corpus
from nltk.classify.maxent import MaxentClassifier
from itertools import product
from random import shuffle, sample
from time import time

class MunteanuModel():
    """
    ``alignment_model''
    ``training_corpus''
    ``params'' is a dictionary (AttrDict) containing the parameters:
        ``params.n_positive'' (default: 800) number of positive sentence pairs used in the classifier training.
        ``params.n_negative'' (default: same value as n_positive) number of negative (non-parallel) sentence pairs.
        ``params.threshold'' (default: 0.8) probability threshold (in [0, 1]) used at the output of the classifier,
            used to decide whether a sentence pair is parallel.
        ``params.src_lang'' (default: 'fr') file extension for the source side of the corpus (usually source language code).
        ``params.trg_lang'' (default: 'en') file extension for the target side of the corpus (usually target language code).
        ``params.filtering'' (default: True) whether or not to perform candidate filtering, using the word-overlap filter.
        ``params.min_overlap'' (default: 0.5) minimum overlap parameter of the filter:
            ratio of source words that need a translation on the target side.
        ``params.length_ratio'' (default: 0.5) maximum length ratio parameter of the filter.
        
        ``params.n_test'' (default: 400) number of lines to take from each side of the input corpus for the evaluation.
        ``params.noise_level'' (default: 0) percentage of noise to introduce artificially in the input corpus [0, 100[.
    """
    def __init__(self, alignment_model, training_corpus, params):
        self.alignment_model = alignment_model
        self.params = AttrDict(**params)
        self.params.setdefault(n_positive=800, n_test=400, filtering=True, min_overlap=0.5, length_ratio=2,
                threshold=0.8, noise_level=0, evaluation_method='default',
                src_lang='fr', trg_lang='en', by_index=False, keep_score=False)
        self.params.setdefault(n_negative=self.params.n_positive)

        self.train(training_corpus)

    def features(self, src_string, trg_string):
        return features(self.alignment_model, self.alignment_model.align(src_string, trg_string))

    def word_overlap(self, src_string, trg_string):
        """
        Perform a candidate filtering over the given pair of sentences (return True if the pair passes the test).
        This is the word overlap filter defined by [Munteanu & Marcu, 2005].
        The word overlap ratio is checked in only one direction (checking in the other direction reduces the recall).
        """
        if not self.params.filtering:
            return True

        src_words = src_string.split()
        trg_words = trg_string.split()

        src_len = len(src_words)
        trg_len = len(trg_words)

        if src_len == 0 or trg_len == 0:
            return False

        ratio = max(src_len / trg_len, trg_len / src_len)
        if ratio > self.params.length_ratio:
            return False
       
        trg_voc = set(trg_words)
        lexicon = self.alignment_model.src_trg_lexicon
        src_overlap = sum(1 for src_word in src_words
                if any(trg_word in trg_voc for trg_word in lexicon[src_word]))

        if src_overlap / src_len < self.params.min_overlap:
            return False

        return True

    def train(self, corpus):
        # Read a maximum of 10**5 lines, in case of really big files.
        src_lines, trg_lines = read_corpus(corpus, self.params, 10**5)

        # Indices of parallel pairs.
        #lines = sample(range(len(english)), params.n_positive)
        lines = range(self.params.n_positive)
        positive = zip(lines, lines)
       
        # Real random, but slow...
        #pairs = ((randrange(len(english)), randrange(len(english))) for _ in count())
        l1 = range(len(src_lines))
        l2 = l1[:]
        shuffle(l1)
        shuffle(l2)
        
        pairs = product(l1, l2) # Not truly random, but who cares?
        
        # Indices of non-parallel pairs satisfying the word overlap filter. 
        negative = []
       
        while len(negative) < self.params.n_negative:
            i, j = next(pairs)
            
            if i != j and self.word_overlap(src_lines[i], trg_lines[j]):
                negative.append((i, j))

        data = [(self.features(src_lines[i], trg_lines[j]), True) for i, j in positive]
        data += [(self.features(src_lines[i], trg_lines[j]), False) for i, j in negative]
        shuffle(data)

        self.classifier = MaxentClassifier.train(data, trace=0)

    def overwrite_params(self, params):
        for k, v in params.items():
            self.params[k] = v
    def evaluation_with_reference(self, corpus, reference_filename, params=AttrDict()):
        self.overwrite_params(params)
        return evaluation_with_reference(self, corpus, reference_filename)
    def evaluation(self, corpus, params=AttrDict()):
        self.overwrite_params(params)
        return evaluation_noise(self, corpus)
    def extract_pairs_from_corpus(self, corpus, output_corpus=None, params=AttrDict()):
        self.overwrite_params(params)
        alignments = extract_pairs_from_corpus(self, corpus)
        if output_corpus is not None:
            with open_corpus(output_corpus, self.params, 'w') as (src_file, trg_file):
                for src_line, trg_line in alignments:
                    src_file.write(src_line + '\n')
                    trg_file.write(trg_line + '\n')
        return alignments

    def extract_pairs(self, src_lines, trg_lines):
        return extract_pairs(self, src_lines, trg_lines)

    def is_parallel(self, src_string, trg_string):
        if not word_overlap(src_string, trg_string):
            return False

        f = self.features(src_string, trg_string)
        return self.classifier.prob_classify(f).prob(True) >= self.params.threshold

# TODO: feature selection
def features(alignment_model, alignment):
    """
    Return the features of the given alignment. Features are computed for both directions of the alignment.
    It comprises features specific to the alignment itself: such as the number of connected words,
    but also the score of each alignment, and length related features, and lexicon related features.
    """
    src_len = alignment.src_len
    trg_len = alignment.trg_len
    f = alignment_features(alignment.src_alignment, src_len).values()
    f += alignment_features(alignment.trg_alignment, trg_len).values()
    
    # log probability of the alignment (Smith)
    f += [alignment.src_score]
    f += [alignment.trg_score]

    # length ratio (Munteanu)
    f += [1.0 * src_len / trg_len]
    # length difference (Munteanu)
    f += [abs(src_len - trg_len)]
    # lengths of the sentences (Munteanu)
    f += [src_len, trg_len]

    # percentage of French words that have a translation on the English side (Munteanu)
    src_voc = set(alignment.src_words)
    trg_voc = set(alignment.trg_words)
    cf = sum(1 for src_word in alignment.src_words
            if any(trg_word in trg_voc for trg_word in alignment_model.src_trg_lexicon[src_word]))
    f += [1.0 * cf / src_len]
    # percentage of English words that have a translation on the French side (Munteanu)
    ce = sum(1 for trg_word in alignment.trg_words
            if any(src_word in src_voc for src_word in alignment_model.trg_src_lexicon[trg_word]))
    f += [1.0 * ce / trg_len]
    # TODO, sentence length feature, relative position (Smith)
    # Relative position: must train on a non-parallel corpus?

    #f += [len(src_voc.intersection(trg_voc)) / len(src_voc.union(trg_voc))]
    
    return dict(enumerate(f))

def alignment_features(a, src_len):
    """
    Compute the features of alignment array a.
    ``src_len'' is the length of the source side of the alignment.
    These features are taken from [Smith et al., 2010] and [Munteanu & Marcu, 2005].
    """
    f = dict()
    s = set(a) - set([0])
    trg_len = len(a)

    f[1] = len(s) # number of connected words (Smith)
    f[2] = src_len - len(s) # number of unconnected words (Munteanu, Smith)
    f[3] = 1.0 * len(s) / src_len # percentage of connected words (Smith)
    f[4] = 1.0 * (src_len - len(s)) / src_len # percentage of unconnected words (Munteanu, Smith)
    
    # top 3 fertilities (Munteanu)
    f[5], f[6], f[7] = (sorted([a.count(i) for i in s], reverse=True) + [1] * 3)[:3]
    
    # percentage of words with fertility 1, 2, and 3+ (Smith)
    f[8] = 1.0 * sum(1 for i in s if a.count(i) == 1) / src_len
    f[9] = 1.0 * sum(1 for i in s if a.count(i) == 2) / src_len
    f[10] = 1.0 * sum(1 for i in s if a.count(i) >= 3) / src_len

    longest_span = lambda l: max((y - x - 1) for x, y in zip(l, l[1:]))
    # longest unconnected substring (Smith)
    f[11] = longest_span([-1] + [i for i, x in enumerate(a) if x != 0] + [len(a)])
    # longest connected substring (Smith)
    f[12] = longest_span([-1] + [i for i, x in enumerate(a) if x == 0] + [len(a)])
   
    return f

def extract_pairs_from_corpus(model, corpus):
    src_lines, trg_lines = read_corpus(corpus, model.params) 
    return extract_pairs(model, src_lines, trg_lines)

def extract_pairs(model, src_lines, trg_lines):
    pairs = [(i, j) for i, j in product(range(len(src_lines)), range(len(trg_lines)))
            if model.word_overlap(src_lines[i], trg_lines[j])]
    feats = [model.features(src_lines[i], trg_lines[j]) for i, j in pairs]
    
    alignments = [(model.classifier.prob_classify(f).prob(True), k) for k, f in enumerate(feats)]
    alignments.sort(reverse=True)
   
    sentence_pairs = []
    sentence_pairs_indices = []

    seen_src = set()
    seen_trg = set()
    for s, k in alignments:
        i, j = pairs[k]
        if s < model.params.threshold or i in seen_src or j in seen_trg:
            continue

        seen_src.add(i)
        seen_trg.add(j)
        score = (s,) if model.params.keep_score else tuple()
        sentence_pairs.append((src_lines[i], trg_lines[j]) + score)
        sentence_pairs_indices.append((i, j) + score)
    
    return sentence_pairs_indices if model.params.by_index else sentence_pairs

def evaluation_noise(model, corpus):
    """
    Evaluation on a bitext, with [Goutte, 2012] noise generation method.
    """
    src_sentences, trg_sentences, reference = goutte_noise(corpus, model.params)
    return evaluation(model, src_sentences, trg_sentences, reference) 

def evaluation_with_reference(model, corpus, reference_filename):
    """
    Evaluation of Munteanu's method using an alignment reference.
    ``corpus.en'', ``corpus.fr'' are the English and French texts, with one sentence per line.
    ``reference_filename'' is the reference alignment file, with one alignment per line, under the format:

        27 29;32 33 35
    
    where French sentences number 27 and 29 (starting at 0) are aligned with English sentences number 32, 32 and 35.

    Three evaluation methods are available: default, sentence_level, and one_one.
        'default' is an evaluation at the alignment level: all alignments from the reference are counted,
            except the 0-k alignments.
        'one_one' counts only the 1-1 alignments. As Munteanu is unable to produce k-n alignment with k > 1,
            this evaluation method is likely to yield a better recall. It is not very realistic though.
        'sentence_level' converts all k-n alignments into k*n 1-1 alignments.

    'default' and 'sentence_level' are the measures described by [Langlais, 1998] (FA and FS).

    All sentence pairs are taken, regardless of the value of the parameter ``params.n_test``.
    """
    src_lines, trg_lines = read_corpus(corpus, model.params)
    with open(reference_filename) as align_file:
        reference = set()
        reference_one_one = set()
        reference_sentence_level = set()
        for l in align_file:
            left, right = [tuple(map(int, x.split())) for x in l.split(';')]

            if len(left) == len(right) == 1:
                reference.add((left[0], right[0]))
                reference_one_one.add((left[0], right[0]))
            elif left and right:
                reference.add((left, right))

            reference_sentence_level = reference_sentence_level.union(set(product(left, right)))
    
    if model.params.evaluation_method == 'sentence_level':
        reference = reference_sentence_level
    elif model.params.evaluation_method == 'one_one':
        reference = reference_one_one

    return evaluation(model, src_lines, trg_lines, reference) 

def evaluation(model, src_lines, trg_lines, reference):
    """
    ``reference'' is the alignment reference.
    It is a set of tuples (i, j), indices of the English and French sentence of each alignment.
    """
    statistics = AttrDict()

    t = time()
    pairs = [(i, j) for i, j in product(range(len(src_lines)), range(len(trg_lines)))
            if model.word_overlap(src_lines[i], trg_lines[j])]

    if model.params.filtering:
        statistics.n_predicted_filter = len(pairs)
        statistics.n_correct_filter = len(set(pairs).intersection(reference))
        statistics.filter_time = time() - t

    t = time()
    feats = [model.features(src_lines[i], trg_lines[j]) for i, j in pairs]

    alignments = [(model.classifier.prob_classify(f).prob(True), k) for k, f in enumerate(feats)]
    statistics.classifier_time = time() - t
   
    statistics.n_true = len(reference) 
    statistics.n_predicted_prior = 0
    statistics.n_correct_prior = 0
    statistics.n_predicted = 0
    statistics.n_correct = 0
    
    # Greedy algorithm
    alignments.sort(reverse=True)

    seen_src = set()
    seen_trg = set()
    for s, k in alignments:
        i, j = pairs[k]
        if s < model.params.threshold:
            continue

        statistics.n_predicted_prior += 1
        statistics.n_correct_prior += int((i, j) in reference)

        if i not in seen_src and j not in seen_trg:
            seen_src.add(i)
            seen_trg.add(j)
            statistics.n_predicted += 1
            statistics.n_correct += int((i, j) in reference)

    return statistics

def goutte_noise(corpus, params):
    """
    Apply Goutte noise on a bitext.
    ``params.n_test'': number of sentence pairs taken at the beginning of the file.
    ``params.noise_level'': percentage of these sentence pairs, whose target side is replaced by another sentence.
    The sentence pairs are taken at random.

    Replacement sentences are taken from the rest of the corpus target side.
    There should be at least ``n_test'' lines in the source file,
    and ``n_test * (noise_level/100 + 1)'' lines in the target file.
    """
    size = params.n_test
    noise_size = params.noise_level * size // 100
    src_sentences, trg_sentences = read_corpus(corpus, params, size, size + noise_size)

    trg_sentences, noisy_sentences = trg_sentences[:size], trg_sentences[size:]

    noise_indices = set(sample(range(size), noise_size))
    for i, sentence in zip(noise_indices, noisy_sentences):
        trg_sentences[i] = sentence

    reference = set((i, i) for i in range(len(trg_sentences)) if i not in noise_indices)
    return src_sentences, trg_sentences, reference

def lamraoui_noise(corpus, params):
    """
    Apply Lamraoui noise on a bitext.
    Noise level steps of 10, in the range [0, 90]
    """
    size = params.n_test
    noise_level = params.noise_level
    src_sentences, trg_sentences = read_corpus(corpus, params, size)
    print type(noise_level), type(size)

    noisy_sentences = []
    reference = set()
    for i in range(1 + size // 10):
        step = min(10, size - i * 10)
        indices = set(sample(range(step), noise_level * step // 100))

        for j in range(step):
            k = i * 10 + j
            if j not in indices:
                noisy_sentences.append(src_sentences[k])
                reference.add((len(reference), k))

    return noisy_sentences, trg_sentences, reference

