# -*- coding: utf-8 -*-
from __future__ import division
import os, re, codecs, time, sys
import nltk
from itertools import izip, islice
from random import sample, shuffle
from contextlib import contextmanager

class AttrDict(dict):
    """
    Dictionary whose elements can be accessed like attributes.
    
    >>> d = AttrDict(x=1, y=2)
    >>> d.x = 2
    d => {'x': 2, 'y': 2}
    """
    def __init__(self, *args, **kwargs):
        super(AttrDict, self).__init__(*args, **kwargs)
        self.__dict__ = self # Pure magic
    def setdefault(self, *args, **kwargs):
        """
        Set default values for the given keys.
        The entries whose key is already present in the dictionary are not modified.
        Modify the dictionary in-place, and return a reference to self.

        >>> d = AttrDict(x=1, y=2)
        >>> d.setdefault(y=3, z=3)
        >>> {'x': 2, 'y': 2, 'z': 3}
        
        'z' does not exist and is inserted,
        'y' already exists, it is not modified.

        This method is still compatible with dict.setdefault:
        >>> d = AttrDict(x=1, y=2)
        >>> d.setdefault('z', 4)
        >>> 4
        >>> d.setdefault('y', 3)
        >>> 2
        """
        if args: # For retro-compatiblity with dict
            return super(AttrDict, self).setdefault(*args)
        for k, v in kwargs.items():
            super(AttrDict, self).setdefault(k, v)
        return self

def dict_to_str(d):
    return ', '.join(('{}={:.4f}' if isinstance(v, float) else '{}={}').format(k.upper(), v)
            for k, v in d.items())

def uopen(filename, mode='r'):
    """ Open a file in unicode. """
    return codecs.open(filename, mode, 'utf-8')

def count_lines(filename):
    with open(filename) as file_:
        return sum(1 for line in file_)

def delete_file(filename):
    try:
        os.unlink(filename)
    except:
        pass

def word_tokenize(string):
    return nltk.word_tokenize(string)

def sent_tokenize(txt):
    return nltk.sent_tokenize(txt)

"""
Corpus processing functions: a corpus consists of two files:
    corpus.src_lang (source side of the corpus), and corpus.trg_lang (target side of the corpus),
    where ``src_lang'' and ``trg_lang'' are attributes of the ``params'' dictionary.
"""
default_params = AttrDict(src_lang='fr', trg_lang='en')

def filenames(corpus, params=default_params):
    return (corpus + '.' + params['src_lang'],
            corpus + '.' + params['trg_lang'])

@contextmanager
def open_corpus(corpus, params=default_params, mode='r'):
    src_filename, trg_filename = filenames(corpus, params)
    with uopen(src_filename, mode) as src_file, uopen(trg_filename, mode) as trg_file:
        yield src_file, trg_file

def read_corpus(corpus, params=default_params, src_size=None, trg_size=None):
    trg_size = src_size if src_size is not None and trg_size is None else trg_size

    with open_corpus(corpus, params) as (src_file, trg_file): 
        if src_size is None and trg_size is None:
            src_lines = [l.strip() for l in src_file]
            trg_lines = [l.strip() for l in trg_file]
        else:
            src_lines = list(islice((l.strip() for l in src_file), src_size))
            trg_lines = list(islice((l.strip() for l in trg_file), trg_size))
        return src_lines, trg_lines

def tokenize_and_clean_corpus(input_corpus, output_corpus, params=default_params, min_len=5, max_len=80):
    """ Tokenize a corpus into words, and remove the sentences that are too long or too short. """
    with open_corpus(input_corpus, params) as (src_input_file, trg_input_file), \
         open_corpus(output_corpus, params, 'w') as (src_output_file, trg_output_file):
        
        for src_line, trg_line in izip(src_input_file, trg_input_file):
            src_words = word_tokenize(src_line)
            trg_words = word_tokenize(trg_line)
            
            if min_len <= len(src_words) <= max_len and min_len <= len(trg_words) <= max_len:
                src_output_file.write(' '.join(src_words) + '\n')
                trg_output_file.write(' '.join(trg_words) + '\n')

def eliminate_duplicates(input_corpus, output_corpus, params=default_params):
    """
    Remove duplicate line pairs from the given corpus. The order is not preserved.
    The files must be small enough to hold entirely in memory.
    Example of usage: python -c "import utils; utils.eliminate_duplicates('corpus', 'corpus.nodup')"
    """
    src_lines, trg_lines = read_corpus(input_corpus, params)
    lines = list(set(zip(src_lines, trg_lines)))
    shuffle(lines)
    src_lines, trg_lines = zip(*lines)

    with open_corpus(output_corpus, params, 'w') as (src_output_file, trg_output_file):
        src_output_file.write('\n'.join(src_lines))
        trg_output_file.write('\n'.join(trg_lines))

def sample_corpus(input_corpus, output_corpus, size, params=default_params, rand=True, start=0):
    """ Reduce the size of a parallel corpus (in number of sentences). """
    with open_corpus(input_corpus, params) as (src_input_file, trg_input_file), \
         open_corpus(output_corpus, params, 'w') as (src_output_file, trg_output_file):

        total_size = sum(1 for _ in src_input_file)
        src_input_file.seek(0)
        
        if rand:
            lines = sample(range(total_size), size)
        else:
            lines = range(start if start >= 0 else start + total_size + 1, size)

        lines = set(lines)

        for i, pair in enumerate(izip(src_input_file, trg_input_file)):
            src_line, trg_line = pair
            if i in lines:
                src_output_file.write(src_line)
                trg_output_file.write(trg_line)

"""
Munteanu utility functions to compute and print statistics (recall, precision, etc.)
"""

def augmented_statistics(statistics):
    """
    Compute performance measures such as: recall, precision and F-measure
    and add them to statistics.
    """
    res = AttrDict(**statistics)
    
    res.recall, res.precision, res.f1 = stats(res.n_correct, res.n_true, res.n_predicted)
    
    res.recall_prior, res.precision_prior, res.f1_prior = stats(
            res.n_correct_prior, res.n_true, res.n_predicted_prior)
    
    res.time = res.classifier_time
    if 'filter_time' in res:
        res.time += res.filter_time

    if 'n_correct_filter' and 'n_predicted_filter' in res:
        res.recall_filter, res.precision_filter, res.f1_filter = stats(
                res.n_correct_filter, res.n_true, res.n_predicted_filter)

        res.filter_drop = 1 - res.n_predicted_filter / res.n_true**2

    return res

def from_statistics(statistics):
    """ Return recall, precision and F-measure from statistics. """
    statistics = augmented_statistics(statistics)
    return statistics.recall, statistics.precision, statistics.f1

def stats(correct, true, predicted):
    """
    Compute recall, precision and F-measure from the numbers of correctly predicted,
    true, and predicted values.
    """
    recall = correct / true if true else 1.0
    precision = correct / predicted if predicted else 1.0
    f1 = 2 * recall * precision / (recall + precision) if recall + precision else 0.0
    return recall, precision, f1

def summary(statistics):
    """ Display a nice looking summary of statistics. """
    statistics = augmented_statistics(statistics)
    print('Correct: {}, predicted: {}, true: {}'.format(statistics.n_correct,
        statistics.n_predicted, statistics.n_true))
    print('\033[36mPrecision: %.3f, recall: %.3f, f1: %.3f\033[0m'
            % (statistics.precision, statistics.recall, statistics.f1))
    print('Evaluation: %.2f seconds' % statistics.time)

"""
Articles processing: several documents are stored in the same file, separated by <article> tags.
e.g.,
<article id="168">
Brazil
Brazil is the fifth largest country in the world , and third largest in the Americas , with a total area of , including of water .
...
</article>
<article id="169">
...

A comparable corpus consists of two such files. Articles belonging to a pair have the same id, and same position in the file.
"""

def read_articles(filename):
    """
    Read the articles from a given file. Yields (index, article) tuples.
    """
    with uopen(filename) as input_file:
        txt, index = None, None
        for line in input_file:
            line = line.strip()
            match = re.match(r'<article( id="(.*?)")?( \w+=.*?)*>', line)
            if match:
                txt = ''
                index = match.group(2)
            elif line == '</article>' and txt is not None:
                yield(index, txt)
                txt = None
            elif txt is not None:
                txt += line + '\n'

def clean_article_tags(input_filename, output_filename):
    """
    Remove <article> tags from input file, and save text in output file.
    Same as: grep -ve "^</\?article" < INPUT_FILE > OUTPUT_FILE
    """
    with open(input_filename) as input_file, open(output_filename, 'w') as output_file:
        for line in input_file:
            if not line.startswith('<article') and not line.startswith('</article'):
                output_file.write(line)

def write_article(article, index, output_file, **kargs):
    """
    Write new article in output file.
    """
    properties = ' '.join('{}="{}"'.format(prop, value) for prop, value in kargs.items())
    if properties:
        properties = ' ' + properties
    output_file.write('<article id="{}"{}>\n'.format(index, properties))
    output_file.write(article)
    output_file.write('</article>\n')

def segment_article_corpus(input_corpus, output_corpus, total_articles, params=default_params, nb_segments=8):
    """
    Segment a corpus of articles into several corpora (for parallel processing).
    """
    indices = range(total_articles)
    shuffle(indices)

    segment_size = 1 + total_articles / nb_segments
    segments = [set(indices[i * segment_size: (i + 1) * segment_size])
            for i in range(1 + total_articles // segment_size)]

    def segment_file(lang):
        files = [uopen('{}.{}.{}'.format(output_corpus, i, lang), 'w')
                for i in range(len(segments))]

        for index, article in read_articles('{}.{}'.format(input_corpus, lang)):
            for s, f in izip(segments, files):
                if int(index) in s:
                    write_article(article, index, f)

        for f in files:
            f.close()

    segment_file(params.src_lang)
    segment_file(params.trg_lang)

def article_corpus_apply(input_corpus, output_corpus, params, function):
    """
    Apply a function on each article pair of the input corpus, and build a new article corpus.
    """
    src_input_filename, trg_input_filename = filenames(input_corpus, params)
    src_articles = read_articles(src_input_filename)
    trg_articles = read_articles(trg_input_filename)

    with open_corpus(output_corpus, params, 'w') as (src_output_file, trg_output_file):
        for src_article, trg_article in izip(src_articles, trg_articles):
            index, src_txt = src_article
            _, trg_txt = trg_article

            src_result, trg_result = function(src_txt, trg_txt)
            write_article(src_result + '\n', index, src_output_file)
            write_article(trg_result + '\n', index, trg_output_file)

