#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
from subprocess import Popen, PIPE, call

help_msg = """
Script to segment a text file into sentences and words.
The file may contain <article> tags (article corpus), the tags will remain untouched.

Usage:
    {prog} OPENNLP_BIN SENT_MODEL TOK_MODEL < INPUT_FILE > OUTPUT_FILE
Example of usage:
    {prog} ~/opennlp/bin/opennlp ~/opennlp/models/en-sent.bin ~/opennlp/models/en-token.bin < wiki.en > wiki.tok.en

The script uses OpenNLP for sentence segmentation, and moses' tokenizer.perl for word tokenization.

OPENNLP_BIN: path of the OpenNLP binary
SENT_MODEL: path of the sentence model used with OpenNLP
TOK_MODEL: path of the word tokenization model used with OpenNLP
INPUT_FILE: text file to tokenize, encoded in UTF-8.
"""
    
regex1 = "s/^(&lt;|<)\s*article\s+id\s*=\s*(\"|&quot;)\s*([0-9]+)\s*(\"|&quot;)\s*(&gt;|>)/<article id=\"\\3\">/"
regex2 = "s/^(&lt;|<)\s*\\/\s*article\s*(&gt;|>)/<\\/article>/"

def opennlp_tokenize():
    try:
        opennlp_bin, sent_model, tok_model = sys.argv[1:]
    except:
        sys.exit(help_msg.format(prog=sys.argv[0]))
        #sys.exit('Usage: {} OPENNLP_BIN SENT_MODEL TOK_MODEL < INPUT_FILE'.format(sys.argv[0]))

    cmd0 = [opennlp_bin, 'SentenceDetector', sent_model]
    cmd1 = ['sed', "/^$/d",]
    cmd2 = [opennlp_bin, 'TokenizerME', tok_model]
    # Fixes the tokenization of the article tags.
    cmd3 = ['sed', '-re', regex1]
    cmd4 = ['sed', '-re', regex2]

    p0 = Popen(cmd0, stdout=PIPE)
    p1 = Popen(cmd1, stdin=p0.stdout, stdout=PIPE)
    p2 = Popen(cmd2, stdin=p1.stdout, stdout=PIPE)
    p3 = Popen(cmd3, stdin=p2.stdout, stdout=PIPE)
    p4 = Popen(cmd4, stdin=p3.stdout, stdout=sys.stdout)

def moses_tokenize():
    """
    Word tokenization using Moses' tokenizer.perl instead.
    """
    try:
        opennlp_bin, opennlp_model, tokenizer_bin, lang = sys.argv[1:]
    except:
        sys.exit('Usage: {} OPENNLP_BIN SENT_MODEL TOKENIZER_BIN LANG < INPUT_FILE'.format(sys.argv[0]))

    cmd0 = [opennlp_bin, 'SentenceDetector', opennlp_model]
    cmd1 = ['sed', "/^$/d",]
    cmd2 = [tokenizer_bin, '-l', lang, '-threads', '8']
    cmd3 = ['sed', '-re', regex1]
    cmd4 = ['sed', '-re', regex2]

    p0 = Popen(cmd0, stdout=PIPE)
    p1 = Popen(cmd1, stdin=p0.stdout, stdout=PIPE)
    p2 = Popen(cmd2, stdin=p1.stdout, stdout=PIPE)
    p3 = Popen(cmd3, stdin=p2.stdout, stdout=PIPE)
    p4 = Popen(cmd4, stdin=p3.stdout, stdout=sys.stdout)

if __name__ == '__main__':
    opennlp_tokenize()

