#!/usr/bin/env python
# Cross-Language Information Retrieval System
# Implements a simple CLIR system on the EUROPARL documents contained in the 
# NLTK corpora.
#
# David Coles (239715), 2010

import codecs
import logging
from optparse import OptionParser
import os
import sys

# Import desired corpora
import nltk.corpus.europarl_raw as europarl_raw

from query import BM25, snippet
from index import SimpleIndex

DEFAULT_ENCODING = 'utf-8'
SUPPORTED_LANGS = ["english", "french"]
SNIPPET_SIZE = 500
TOPN = 10

# Ignore warnings below error
logging.basicConfig(level=logging.error)


def get_chapters(corpus, fileid):
    """ Given a europarl_raw corpus (such as enlish or french) and a corpus 
    fileid returns the chapters as a list of strings.
    """

    text = []
    chapters = corpus.chapters(fileid)
    for chapter in chapters:
        # Flatten the chapter down to a string for indexing
        text.append('\n'.join([' '.join(sentence) for sentence in chapter]))

    return text


def load_indexes(regenerate=False):
    """ Load the Indexes for supported languages. Will use a cached index 
    unless one does not exist or regenerate flag is set.
    """
    indexes = {}
    for lang in SUPPORTED_LANGS:
        filename = "%s.dat"%lang
        if os.path.exists(filename) and not regenerate:
            # Use the cached index
            with open(filename) as f:
                indexes[lang] = SimpleIndex(f)
        else:
            # Generate new index
            print >> sys.stderr, "Generating index for %s"%lang
            with open(filename, 'w') as f:
                corpus = europarl_raw.__dict__[lang]
                indexes[lang] = generate_index(corpus, f)

    return indexes


def generate_index(corpus, file_obj):
    """ Given a europarl_raw corpus (such as english or french), generate an 
    index and save a serialization to a file-like object provided.
    """
    # The Europarl corpus in NLTK contains approximately 10 days procedings 
    # for each language. To make this interesting for indexing we'll index the 
    # chapters for each day giving us ~80 "documents".
    # http://code.google.com/p/nltk/issues/detail?id=415

    # Index the chapters for all the sessions in the corpus
    index = SimpleIndex()
    for fileid in corpus.fileids():
        chapters = get_chapters(corpus, fileid)
        for chapterid in range(len(chapters)):
            text = chapters[chapterid]
            index.add_document((fileid, chapterid), text)

    index.save(file_obj)
    return index


def do_query(query, indexes, lang):
    """ Runs a search query for a particular languages and return the ranked 
    results as a list of (score, lang, document_id) tuples.
    """
    # Apply the BM25 model to our index
    index = indexes[lang]
    bm25 = BM25(index)

    # Do the search
    results = bm25.search(query)

    # Return a list of (score, lang, doc) tuples sorted by score
    return [(score, lang, doc_id) for score, doc_id in results]


def load_dictionaries():
    """ Loads up all the required bilingual dictionaries.
    Raises an IOError if one of the required dictionaries could not be found.
    """
    dictionaries = {}
    for lang in SUPPORTED_LANGS:
        if lang != query_lang:
            try:
                filename = "%s-%s.dict"%(query_lang, lang)
                with codecs.open(filename, 'r', DEFAULT_ENCODING) as f:
                    dictionaries[lang] = load_dictionary(f)
            except IOError:
                raise IOError(
                        "Could not find required dictionary: %s"%filename)

    return dictionaries


def load_dictionary(file_obj):
    """ Loads up a bilingual dictionary from a file-like object.
    Expects file to be of the format `WORD: MOT1; MOT2; MOT3; ...`.

    Ignores multi-word translations.

    Returns a dictionary of sets of strings.
    """
    dictionary = {}

    for line in file_obj:
        try:
            l = line.strip()
            e, fs = l.split(':')
            # Collect up the set of translations
            translations = set()
            for phrase in fs.split(';'):
                words = phrase.split()
                # Only want 1-1 word translations
                if len(words) == 1:
                    translations.add(words[0])
            dictionary[e] = translations
        except ValueError:
            # Make the reader slightly robust
            logging.warning("Could not parse dictionary line: %s"%l)

    return dictionary


def query_translation(query, dictionary):
    """ Translates a query using a bilingial dictionary
    """
    translated_query = []

    # Simple unbalanced query
    for term in query:
        translated_query.extend(dictionary.get(term, []))

    return translated_query


def generate_snippet(corpus, document_id, query, size=SNIPPET_SIZE):
    """ Generates a snippet for a particular document given a query """
    # Work out snippets
    offsets = []
    fileid, chapter = document_id
    text = get_chapters(corpus, fileid)[chapter]
    for term in set(queries[lang]):
        offsets += indexes[lang][term].get(document, [])

    # Stick together snippets, remove newlines and limit to size
    snip = '... ...'.join(snippet(text, offsets))
    snip = snip.replace('\n', '  ')

    return snip[:size]


# Main Function
if __name__ == "__main__":
    # Get command line options
    parser = OptionParser()
    parser.add_option("-i", "--index", action="store_true", 
            dest="regenerate_index", help="Force regenerate indexes")
    parser.add_option("-e", "--encoding",
            dest="output_encoding",
            default=DEFAULT_ENCODING,
            help="Set the output encoding (defaults to utf-8)")

    (options, args) = parser.parse_args()

    # Wrap stdout in SteamWriter to ensure we can always write unicode chars
    try:
        output_writer = codecs.getwriter(options.output_encoding)
        sys.stdout = output_writer(sys.stdout, 'replace')
    except LookupError, e:
        print >> sys.stderr, "Error: %s"%str(e)
        sys.exit(1)

    # Read Query
    if len(args) < 1:
        print >> sys.stderr, (
                "Error: Must provide query language as first argument")
        sys.exit(1)
    elif len(args) <2:
        print >> sys.stderr, "Error: Must provide query on command line"
        sys.exit(1)

    query_lang = args[0].lower()
    query = args[1:]

    if query_lang not in SUPPORTED_LANGS:
        print >> sys.stderr, "Error: Language '%s' is not supported."%(
                query_lang)
        print >> sys.stderr, "Supported languages: %s"%(
                ', '.join(SUPPORTED_LANGS))
        sys.exit(1)

    print "Searching for '%s' (%s)"%(' '.join(query), query_lang)
    print

    # Load indexes
    indexes = load_indexes(options.regenerate_index)

    # Load dictionaries
    try:
        dictionaries = load_dictionaries()
    except IOError, e:
        print >> sys.stderr, "Error: %s"%e
        sys.exit(2)

    # Search each language
    results = []
    queries = {}
    for lang in SUPPORTED_LANGS:
        if lang == query_lang:
            # Normal IR
            queries[lang] = query
            results += do_query(query, indexes, lang)
        else:
            # Cross Language IR
            queries[lang] = query_translation(query, dictionaries[lang])
            results += do_query(queries[lang], indexes, lang)


    # Print out queries
    print "Queries"
    print "======="
    for lang in SUPPORTED_LANGS:
        print "%s: %s"%(lang.upper(), ' '.join(queries[lang]))
    print

    # Find top 10 results
    print "Results"
    print "======="
    results.sort(reverse=True)
    top = results[:TOPN]
    for i in range(len(top)):
        score, lang, document = top[i]
        # Use fileid, chapter for name
        name = "%s:%d"%(document[0], document[1])
        corpus = europarl_raw.__dict__[lang]

        # Stop if score hits 0
        if score <= 0:
            break

        # Print result
        print "%d - %s (Score: %.2f)"%(i+1, name, score)
        print generate_snippet(corpus, document, queries[lang])
        print
