# -*- coding: utf-8 -*-
# Python 2.7
# Pattern based natural language processing tools for Estonian.
# Copyright (C) 2013 University of Tartu
# 
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# 
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
# 
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.

from pandas import Series, DataFrame, ExcelFile, ExcelWriter
from nltk.stem.snowball import EnglishStemmer, SpanishStemmer
from itertools import izip
from pdict import PersistentDict as PyCorpus
from treetagger import TreeTagger
from paths import *
import pandas
import nltk
import codecs
import re
import sys
import os
import json
import string
import subprocess
import tempfile
import cPickle
import cStringIO
import numpy as np
import xlrd
import math

from patnlp import *
from wordnet import Wordnet

################################################################################
# Helper functions for working with standard PyCorpus instances.
################################################################################

def doc_sentences_positions(doc):
    '''Generator for tuples (sent_start, sent_end).'''
    starts = list(doc.start)
    first = 0
    for i in range(1, len(starts)):
        if starts[i] == True:
            yield (first, i)
            first = i
    yield (first, len(starts))
    
def doc_plain_sentences(doc):
    '''Retrieve a list of plain text sentences from doc.'''
    words = list(doc.word)
    sents = []
    for start, end in doc_sentences_positions(doc):
        sents.append(' '.join(words[start:end]))
    return sents

def doc_plain_text(doc):
    return '\n'.join(doc_plain_sentences(doc))

################################################################################
# Parsing plain text corpora
################################################################################

def sentence_splitter(lineiter):
    '''From a generator of lines, create a generator of sentences.'''
    sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
    for line in lineiter:
        for sentence in sent_detector.tokenize(line.strip()):
            yield sentence

def line_iterator(file_obj):
    '''Given a file object, create a line generator.'''
    line = file_obj.readline()
    while line != '':
        yield line
        line = file_obj.readline()

def parse_plain_doc(path):
    '''Parse a plain text corpus from a file.'''
    f = codecs.open(path, 'rb', 'utf-8-sig')
    doc = parse_plain_doc_from_stream(f)
    f.close()
    return doc

def parse_plain_doc_from_string(s):
    return parse_plain_doc_from_stream(codecs.getreader('utf-8')(cStringIO.StringIO(s.encode('utf-8'))))

def parse_plain_doc_from_stream(stream):
    '''Parse a plain text corpus from a stream.'''
    ss = sentence_splitter(line_iterator(stream))
    words  = []; starts = []; ends   = []
    word_tokenizer = nltk.PunktWordTokenizer()
    for s in ss:
        ws = word_tokenizer.tokenize(s)
        words.extend(ws)
        if len(ws) >= 1:
            starts.append(True)
            starts.extend([False]*(len(ws) - 1))
            ends.extend([False]*(len(ws) - 1))
            ends.append(True)
    return DataFrame({'word': words,
                      'start': starts,
                      'end': ends})

def parse_plain_corpus(plainpath, corpuspath):
    corpus = PyCorpus(corpuspath)
    data = codecs.open(plainpath, 'rb', 'utf-8-sig').read()
    docs = re.split('s*?\r?\n\r?\n', data)
    data = None
    corpus.autocommit(False)
    for doc in docs:
        lines = re.split('\r?\n', doc.strip())
        title = lines[0].strip()
        contents = '\n'.join(lines[1:]).strip()
        text_stream = cStringIO.StringIO(contents.encode('utf-8'))
        utf8_stream = codecs.getreader('utf-8')(text_stream)
        corpus[title] = parse_plain_doc_from_stream(utf8_stream)
    corpus.commit()
    corpus.close()

################################################################################
# Parsing Wikipedia corpora created with annotated_wikiextractor.py
################################################################################

def parse_wiki_doc_from_stream(stream):
    '''Parse a plain text corpus from a stream.'''
    ss = sentence_splitter(line_iterator(stream))
    words, starts, ends, links = [], [], [], []
    word_tokenizer = nltk.PunktWordTokenizer()
    for s in ss:
        # parse links
        lastpos = 0
        wws = []
        for m in re.finditer('\[\[[^\]]*\]\]', s):
            pos = (m.start(0), m.end(0))
            mtext = m.group(0)
            # add everything before
            ws = word_tokenizer.tokenize(s[lastpos:pos[0]])
            wws.extend(ws)
            links.extend(['']*len(ws))
            # process the link
            word = mtext[2:-2]
            link = word
            if '|' in mtext:
                idx = mtext.index('|')
                link = mtext[2:idx]
                word = mtext[idx+1:-2]
            ws = word_tokenizer.tokenize(word)
            wws.extend(ws)
            links.extend([link]*len(ws))
            lastpos = pos[1]
        # add the last chunk
        ws = word_tokenizer.tokenize(s[lastpos:])
        wws.extend(ws)
        links.extend(['']*len(ws))
        words.extend(wws)
        if len(wws) >= 1:
            starts.append(True)
            starts.extend([False]*(len(wws) - 1))
            ends.extend([False]*(len(wws) - 1))
            ends.append(True)
    assert (len(words) == len(starts))
    assert (len(words) == len(ends))
    assert (len(words) == len(links))
    return DataFrame({'word': words,
                      'start': starts,
                      'end': ends,
                      'link': links})

def import_wikipedia(path, corpus_path):
    '''path - the directory containing the extracted documents by
              wikiextractor.py
       name to store the parsed corpus.
    '''
    corpus = PyCorpus(corpus_path)
    corpus.autocommit(False)
    
    for f in os.listdir(path):
        newpath = os.path.join(path, f)
        if os.path.isdir(newpath):
            continue
        f = codecs.open(newpath, 'rb', 'utf-8-sig')
        line = f.readline()
        while line != '':
            docdata = json.loads(line)
            doc_id = unicode(docdata['title'])
            text_stream = cStringIO.StringIO(docdata['text'].encode('utf-8'))
            utf8_stream = codecs.getreader('utf-8')(text_stream)
            corpus[doc_id]  = parse_wiki_doc_from_stream(utf8_stream)
            
            line = f.readline()

    corpus.commit()

    return corpus

################################################################################
# Parsing t3mesta corpora
################################################################################

class T3Data(object):
    '''Parser for t3mesta output with optional ner type column
    '''

    word_types = set(['S', 'V', 'C', 'H', 'K', 'D', 'N', 'Z', 'U', 'J', 'Y',
                      'I', 'P', 'A', 'O', 'X', 'G'])
    cases      = set(['n', 'g', 'p', 'ill', 'in', 'el', 'all', 'ad', 'abl',
                      'tr', 'ter', 'es', 'ab', 'kom', 'adt'])
    verb_types = set(['n', 'd', 'b',    # mina teen, sina teed, tema teeb
                  'me', 'te', 'vad',    # meie teeme, teie teete, nemad teevad
                  'sin', 'sid', 's',    # mina tegin, sina tegid, tema tegi
                  'sime', 'site', 'sid',# meie tegime, teie tegite, nemad tegid
                  'ma', 'mas', 'mast',  # tegema, tegemas, tegemast
                  'ti', 'taks', 'takse',# midagi tehti, midagi tehtaks, tehakse
                  'o', 'ge',  'gem',    # tee seda, tehke seda, tehkem seda
                  'vat', 'maks', 'tagu',# olevat, selgitamaks, langetagu
                  'nuks', 'tavat',      # laulnuks, kahtlustatavat
                  'nuksin', 'tav',      # laulnuksin, arvutatav
                  'nuksid', 'v',        # laulnuksid, seonduma,
                  'nuksime', 'nuvat',   # saanuksime, põletanuvat?
                  'tuks',               # koormatuks
                  'tud', 'nud', 'da',   # tehtud laps, jooksnud mees, teha
                  'des', 'ks', 'ksin',  # tehes midagi, teeks midagi, teeksin
                  'ksite', 'ksime',     # teeksite midagi, teeksime midagi
                  'ksid', 'gu', 'mata', # teeksid midagi, tehku, tegemata
                  'ksime', 'tama', 'ta' # teeksime midagi, langetama puid, saada
                  ])

def parse_t3_doc(path):
    '''Parse a t3 document from path.'''
    f = codecs.open(path, 'rb', 'utf-8')
    doc = parse_t3_doc_from_stream(f)
    f.close()
    return doc

def parse_t3_doc_from_stream(stream):
    '''Parse a t3 document from stream.'''
    sentences = read_t3_sentences(stream)
    # (orig, lemma, wtype, case, plur, vtype, negation, label)
    doc = [[] for _ in range(8)]
    starts = []; ends = []
    for idx, s in enumerate(sentences):
        series = zip(*s)
        doc[0].extend(series[0])
        doc[1].extend(series[1])
        doc[2].extend(series[2])
        doc[3].extend(series[3])
        doc[4].extend(series[4])
        doc[5].extend(series[5])
        doc[6].extend(series[6])
        doc[7].extend(series[7])
        ends.extend([False]*(len(series[0])-1))
        if len(series[0]) > 0:
            starts.append(True)
            ends.append(True)
        starts.extend([False]*(len(series[0])-1))
    return DataFrame({'word': doc[0],
                      'lemma': doc[1],
                      'wtype': doc[2],
                      'case': doc[3],
                      'plur': doc[4],
                      'vtype': doc[5],
                      'negation': doc[6],
                      'ne_type': doc[7],
                      'start': starts,
                      'end': ends})

def parse_t3_doc_from_string(string):
    plain_stream = cStringIO.StringIO(string.encode("utf-8"))
    utf_stream   = codecs.getreader('utf-8')(plain_stream)
    return parse_t3_doc_from_stream(utf_stream)

def read_t3_sentences(stream):
    sentences = []
    sentence = read_t3_sentence(stream)
    while sentence != []:
        sentences.append(sentence)
        sentence = read_t3_sentence(stream)
    return sentences

def read_t3_sentence(stream):
    sentence = []
    word = read_t3_word(stream)
    while word != None:
        sentence.append(word)
        word = read_t3_word(stream)
    return sentence

def read_t3_word(stream):
    # read next line
    line = stream.readline().strip()
    if line == '':
        return None

    # t3mesta attributes are separated by tabs
    tokens = [token.strip() for token in line.split('\t')]
    if len(tokens) < 3:
        raise Exception('Could not read line {0}'.format(line))

    # get the attributes and create the word representation
    attributes = get_t3_attributes(tokens[2])

    # extract basic features
    orig      = tokens[0]
    lemma     = fix_t3_lemma(tokens[1])
    wtype     = get_t3_word_type(attributes)
    label = ''
    if len(tokens) == 4:
        label     = get_t3_label(tokens[3])

    # extract other features
    case     = None
    plur     = None
    negation = None
    vtype    = None

    if wtype == 'V':
        negation = get_t3_negation(attributes)
        vtype    = get_t3_verb_type(attributes)
        if vtype == None and negation == False:
            uwriter = codecs.getwriter('utf-8')(sys.stderr)
            uwriter.write(u'WARNING: Invalid verb {0}\n'.format(line))
    else:
        case = get_t3_case(attributes)
        plur = get_t3_plurality(attributes)

    # put together the word instance
    word = (orig, lemma, wtype, case, plur, vtype, negation, label)
    return word

def fix_t3_lemma(lemma):
    '''Keep only the canonical form of the lemma i.e. remove all the additional
       notation.
    '''
    lemma = lemma.strip().lower()
    if len(lemma) > 1:
        lemma = lemma.replace('_', '').replace('=', '')
        try:
            idx = lemma.index('+')
            if idx != -1 and idx != 0:
                lemma = lemma[:idx]
        except ValueError:
            pass
    return lemma

def get_t3_attributes(attributes):
    '''Get the list of attributes from t3mesta format for the word.
    Function removes pipe and underscore characters (used in pfe program)
    from the attributes automatically.
    '''
    attrs = []
    for attr in attributes.split():
        attr = attr.strip().replace('|', '').replace('_', '')
        attrs.append(attr)
    return attrs

def get_t3_word_type(attributes):
    '''Extract the word type from given attributes.
    '''
    for attr in attributes:
        if attr in T3Data.word_types:
            return attr
    return None

def get_t3_case(attributes):
    '''Extract the case from given attributes.
    '''
    for attr in attributes:
        if attr in T3Data.cases:
            return attr
    return None

def get_t3_plurality(attributes):
    '''Get the plurality from given attributes.
    '''
    for attr in attributes:
        if attr == 'sg':
            return 'sg'
        elif attr == 'pl':
            return 'pl'
    return None

def get_t3_verb_type(attributes):
    '''Get the verb type from given attributes.sg g
    '''
    for attr in attributes:
        if attr in T3Data.verb_types:
            return attr
    return None

def get_t3_negation(attributes):
    '''get the negation from given attributes.
    '''
    for attr in attributes:
        if attr == 'neg':
            return True
    return False

def get_t3_label(label):
    '''Get the annotated label from given attributes.'''
    #if label[:2] in ['B-', 'I-']:
    #    return label[2:]
    return label

def as_t3doc(doc):
    '''Convert any document to t3 document. Uses only words and start end
       series from the input document.'''
    SEP = '\n**********\n'
    # analyze the code with t3mesta
    f = tempfile.TemporaryFile()
    g = codecs.getwriter('utf-8')(f, 'strict')
    first = True
    for word, start, end in izip(doc['word'], doc['start'], doc['end']):
        if start and not first:
            f.write(SEP)
        first = False
        g.write(word + ' ')
    g.flush()
    f.flush()
    f.seek(0)
    p = subprocess.Popen(['t3mesta', '-cio', 'utf8', '-Y', '+1'],
                         stdin=f,
                         stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    out, err = p.communicate()
    out = unicode(out, 'utf-8')
    f.close()
    # convert and separate sentences
    f = tempfile.TemporaryFile()
    g = codecs.getwriter('utf-8')(f, 'strict')
    for line in out.strip().split('\n'):
        print line
        if line.startswith('**********'):
            g.write('\n')
            continue
        parts = line.split()
        if len(parts) >= 3:
            word, lemma = parts[:2]
            attrs = u' '.join(parts[2:])
            i = attrs.find('//')
            j = attrs.find('//', i+2)
            attrs = attrs[i:j]
            attrs = string.replace(attrs.strip(), '/', '')
            attrs = string.replace(attrs, '_', '')
            attrs = string.replace(attrs, ',', '')
            g.write(u'{0}\t{1}\t{2}\t\n'.format(word, lemma, attrs))
    g.flush(); f.flush()
    # now parse it with t3mesta parser
    f.seek(0)
    doc = parse_t3_doc_from_stream(codecs.getreader('utf-8')(f, 'strict'))
    f.close()
    return doc

def as_t3corpus(orig_path, t3_path):
    '''Convert a corpus at orig_path to t3mesta corpus to t3_path.'''
    orig_corpus = PyCorpus(orig_path)
    dest_corpus = PyCorpus(t3_path)
    dest_corpus.autocommit(False)

    dest_keys = set(dest_corpus.keys())
    for key in orig_corpus.keys():
        if key not in dest_keys:
            dest_corpus[key] = as_t3doc(orig_corpus[key])

    dest_corpus.commit()

    orig_corpus.close()
    dest_corpus.close()

def parse_cnll_chunk(stream):
    '''Parse a two newline separated set of content with title.'''
    title = stream.readline()
    if title == '':
        return
    rest = []
    line = stream.readline()
    while line != '':
        if len(line.strip()) == 0:
            break
        rest.append(line)
        line = stream.readline()
    if len(rest) > 0:
        return title, '\n'.join(rest)

def parse_cnll_corpus(conll_path, t3_path):
    '''Parse a special cnll format corpus, where documents are separated with two newlines and
       also contain a title.'''
    f = codecs.open(conll_path, 'rb', 'utf-8')
    corpus = PyCorpus(t3_path)
    corpus.autocommit(False)
    out = parse_cnll_chunk(f)
    while out != None:
        title, rest = out
        corpus[title] = parse_t3_doc_from_string(rest)
        out = parse_cnll_chunk(f)
    corpus.commit()
    corpus.close()


################################################################################
# Creating POS-tagged corpora for various other languages.
################################################################################

def as_eng_postagged_doc(doc):
    '''Uses nltk default tagger.'''
    tags    = [t for _, t in nltk.pos_tag(list(doc.word))]
    stemmer = EnglishStemmer()
    lemmata = [stemmer.stem(w) for w in list(doc.word)]
    doc['pos']   = Series(tags)
    doc['lemma'] = Series(lemmata)
    return doc

def as_eng_postagged_corpus(orig_path, eng_path):
    '''Uses nltk default tagger.'''
    assert (orig_path != eng_path)
    orig = PyCorpus(orig_path)
    dest = PyCorpus(eng_path)
    dest.autocommit(False)
    for doc_id in orig.keys():
        dest[doc_id] = as_eng_postagged_doc(orig[doc_id])
    dest.commit()
    orig.close()
    dest.close()

def as_treetagger_doc(doc, encoding='latin-1', language='english'):
    '''Use treetagger for tagging the documents. Note that
    encoding `utf-8` is specified as `utf8` (different from Python notation).
    '''
    tg = TreeTagger(encoding=encoding, language=language)

    starts, ends, words, tags, lemmata = [], [], [], [], []
    for start, end in doc_sentences_positions(doc):
        txt = ' '.join(list(doc.word)[start:end])
        output = tg.tag(txt)
        words.extend([t[0] for t in output])
        tags.extend([t[1] for t in output])
        lemmata.extend([fix_t3_lemma(t[2]) for t in output])
        starts.append(True)
        starts.extend([False]*(len(output)-1))
        ends.extend([False]*(len(output)-1))
        ends.append(True)
    return DataFrame({'start': starts, 'end': ends, 'word': words,
                     'pos': tags, 'lemma': lemmata})

def as_treetagger_corpus(orig_path, dest_path, encoding='latin-1', language='english'):
    assert (orig_path != dest_path)
    orig = PyCorpus(orig_path)
    dest = PyCorpus(dest_path)
    dest.autocommit(False)
    for doc_id in orig.keys():
        try:
            dest[doc_id] = as_treetagger_doc(orig[doc_id], encoding=encoding, language=language)
        except IndexError, e:
            sys.stderr.write(str(e) + '\n')
    dest.commit()
    orig.close()
    dest.close()


################################################################################
# Load/save/import/export/convert documents/corpora
# Ordinarily, the corpora are stored as shelves of DataFrames
################################################################################

def save_doc(path, doc):
    '''Save a single document (DataFrame) as CSV file.'''
    f = open(path, 'w')
    doc.to_csv(f, encoding='utf-8')
    f.close()

def load_doc(path):
    '''Load a csv file as a DataFrame.'''
    return pandas.read_csv(path, encoding='utf-8')

def corpus_to_excel(corpus_path, excel_path):
    '''NB! Make sure to use .xls file extension for Excel files.'''
    corpus = PyCorpus(corpus_path)
    writer = ExcelWriter(excel_path)
    for key in corpus:
        corpus[key].to_excel(writer, sheet_name=key)
    writer.save()
    corpus.close()

def excel_to_corpus(excel_path, corpus_path):
    '''NB! Make sure to use .xls file extension for Excel files.'''
    corpus = PyCorpus(corpus_path)
    excel  = ExcelFile(excel_path)
    # as we do not know the number of sheets, we parse all of them
    # until we obtain a error
    idx = 0
    while True:
        try:
            df = excel.parse(str(idx))
            # recreate some information that was modified when exporting to xls
            new_df = dict()
            for col in df.columns:
                data = []
                for v in df[col]:
                    if type(v) == float and math.isnan(v):
                        data.append(None)
                    elif v == 0:
                        data.append(False)
                    elif v == 1:
                        data.append(True)
                    else:
                        data.append(v)
                new_df[col] = Series(data)
            corpus[str(idx)] = DataFrame(new_df)
        except xlrd.biffh.XLRDError:
            break
        idx += 1
    corpus.close()
