# -*- coding: utf-8 -*-
# Python 2.7
# Pattern based natural language processing tools for Estonian.
# Copyright (C) 2013 University of Tartu
# 
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# 
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
# 
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
#
# Some code is copy/pasted from CrfSuite examples

################################################################################
# a list of functions for basic feature extractions
################################################################################

from wordnet import Wordnet
from pandas import DataFrame, Series
from patnlp import StringVector, Document
from scipy import sparse
from patnlp import *
from rule import *
from rule import conjunction_dataframe, conjunction_covers
from itertools import izip
import types
import os
import sys
import codecs

def get_identity(token):
    return token

def get_shape(token):
    '''Token shape is determines uppercase, lowercase, digit and punctuation
       characters specially, which can be interpreted as the shape.
       '''
    r = u''
    for c in token:
        if c.isupper():
            r += 'U'
        elif c.islower():
            r += 'L'
        elif c.isdigit():
            r += 'D'
        elif c in ('.', ','):
            r += '.'
        elif c in (';', ':', '?', '!'):
            r += ';'
        elif c in ('+', '-', '*', '/', '=', '|', '_'):
            r += '-'
        elif c in ('(', '{', '[', '<'):
            r += '('
        elif c in (')', '}', ']', '>'):
            r += ')'
        else:
            r += c
    return r

def get_2d(token):
    '''Is a 2d digit?'''
    return len(token) == 2 and token.isdigit()

def get_4d(token):
    '''Is a 4d digit?'''
    return len(token) == 4 and token.isdigit()

def get_da(token):
    '''Does contain only digits and alphabetical characters.'''
    bd = False
    ba = False
    for c in token:
        if c.isdigit():
            bd = True
        elif c.isalpha():
            ba = True
        else:
            return False
    return bd and ba

def get_all_other(token):
    '''Does not contain alphanumeric characters?'''
    for c in token:
        if c.isalnum():
            return False
    return True

def get_capperiod(token):
    '''Is a token a capitalized plus period character?'''
    return len(token) == 2 and token[0].isupper() and token[1] == '.'

def contains_upper(token):
    b = False
    for c in token:
        b |= c.isupper()
    return b

def all_upper(token):
    return token.isupper()

def starts_upper(token):
    return token[:1].isupper()

def contains_lower(token):
    b = False
    for c in token:
        b |= c.islower()
    return b

def contains_alpha(token):
    b = False
    for c in token:
        b |= c.isalpha()
    return b

def contains_digit(token):
    b = False
    for c in token:
        b |= c.isdigit()
    return b

def contains_symbol(token):
    for c in token:
        if not c.isalnum():
            return True
    return False

def get_prefix(token, n):
    return token.lower()[:n]

def get_suffix(token, n):
    return token.lower()[-n:]

def get_ngrams(token, k=2):
    token = token.lower()
    n = len(token)
    return [token[i:i+k] for i in range(n-k+1)]

get_bigrams  = lambda x: get_ngrams(x, 2)
get_trigrams = lambda x: get_ngrams(x, 3)

def get_most_frequent(tokens):
    f, c, d = None, 0, {}
    for t in tokens:
        tc = d.get(t, 0)
        if tc >= c:
            c = tc
            f = t
            d[t] = tc + 1
    return f

def get_true(token):
    return True

def get_false(token):
    return False

class WordnetFeatureExtractor(object):
    '''Class for using Wordnet as external feature source.'''
    
    def __init__(self, path):
        self.wordnet = Wordnet()
        self.path = path

    def load(self):
        self.wordnet.load(self.path)
    
    def get_relation_extractor(self, relation_name):
        return lambda lemma: self.wordnet.get_value(lemma, relation_name)
    
    def get_synonym_extractor(self):
        return lambda lemma, wtype: self.wordnet.get_synonyms([lemma], [wtype])

class WordListFeatureExtractor(object):
    '''Primitive wordlist based feature extractor.'''
    
    def __init__(self, path):
        self.d = {}
        f = codecs.open(path, 'rb', 'utf-8')
        line = f.readline()
        while line != '':
            ss = line.split('\t')
            line = f.readline()
            if len(line) == 2:
                self[line[0].strip()] = line[1].strip()

    def contains(self, token):
        return token in self.d

    def extract(self, token):  
        if self.contains(token):
            return self.d[token]

################################################################################
# some higher level functions for feature extraction
# these emit a list of (feature name, offset, value) tuples for each token,
# which are in turn stored in a list.
################################################################################

def extract_local(doc, series_name, feat_name, fun=get_identity, radius=0):
    '''Extract local features from a series in a document.
    `series_name` - the series such as `word`
    `feat_name`   - the feature name
    `fun` - the function to extract the feature from token in the series
    `radius` - the context radius to extract the features from
    The `fun` is allowed to emit a list of features.
    '''
    toks = doc[series_name]
    n = len(toks)
    features = []
    for idx in range(n):
        tok_features = []
        for o in range(-radius, radius+1):
            if idx+o < 0 or idx+o >= n:
                continue
            v = fun(toks[idx+o])
            if isinstance(v, types.ListType):
                for e in v:
                    tok_features.append((feat_name, o, e))
            else:
                tok_features.append((feat_name, o, v))
        features.append(list(set(tok_features)))
    return features

def get_local_extractors(series_names, radius=0):
    '''Given the series names of a document, create all identity extractors
    with given radius.'''
    extractors = []
    for name in series_names:
        ex = lambda doc, name=name: extract_local(doc, name, name, get_identity, radius)
        extractors.append(ex)
    return extractors

def extract_global(doc, src_series, dst_series, feat_name, src_fun, filter_fun, dst_fun, dst_offset, aggr_fun):
    '''Extract global features in a document.
    `src_series` - the source series name, where the same values get same features.
    `dst_series' - the series name, where we extract the features fro src values.
    `feat_name` - the resulting feature name.
    `src_fun` - function to extract the source value.
    `filter_fun` - function is given extracted source feature. The feature is used only of `filter_fun` returns true.
    `dst_fun' - the function to extract the destination value.
    `dst_offset` - the offset of the destination value.
    `aggr_fun` - aggregate functions that is given a list of values and which must
                 decide the real output.
    The src_fun, dst_fun, aggr_fun are allowed to emit a list of features.
    '''
    src_toks = doc[src_series]
    dst_toks = doc[dst_series]
    globs = {}
    n = len(src_toks)
    src_vals = []
    for i in range(n):
        src_v = src_fun(src_toks[i])
        src_vals.append(src_v)
        if not filter_fun(src_v):
            continue
        dst_v = None
        if i+dst_offset >= 0 and i+dst_offset<n:
            dst_v = dst_fun(dst_toks[i+dst_offset])
        l = globs.get(src_v, [])
        if isinstance(dst_v, types.ListType):
            l.extend(dst_v)
        else:
            l.append(dst_v)
        globs[src_v] = l
    features = []
    for i, src_v in enumerate(src_vals):
        v = aggr_fun(list(set(globs.get(src_v, []))))
        if isinstance(v, types.ListType):
            features.append([(feat_name, 0, e) for e in v])
        else:
            features.append([(feat_name, 0, v)])
    return features

def create_basic_extractors(radius=1):
    exs = []
     # sentence start/sentence end features
    exs.append(lambda doc: extract_local(doc, 'start', 'start', get_identity, radius))
    exs.append(lambda doc: extract_local(doc, 'end', 'end', get_identity, radius))
    # shape and other descriptive features
    exs.append(lambda doc: extract_local(doc, 'word', 'shape', get_shape, radius))
    exs.append(lambda doc: extract_local(doc, 'word', '2d', get_2d, radius))
    exs.append(lambda doc: extract_local(doc, 'word', '4f', get_4d, radius))
    exs.append(lambda doc: extract_local(doc, 'word', 'da', get_da, radius))
    exs.append(lambda doc: extract_local(doc, 'word', 'other', get_all_other, radius))
    exs.append(lambda doc: extract_local(doc, 'word', 'capperiod', get_capperiod, radius))
    exs.append(lambda doc: extract_local(doc, 'word', 'contupper', contains_upper, radius))
    exs.append(lambda doc: extract_local(doc, 'word', 'contlower', contains_lower, radius))
    exs.append(lambda doc: extract_local(doc, 'word', 'contalpha', contains_alpha, radius))
    exs.append(lambda doc: extract_local(doc, 'word', 'contdigit', contains_digit, radius))
    exs.append(lambda doc: extract_local(doc, 'word', 'contsymbol', contains_symbol, radius))
    exs.append(lambda doc: extract_local(doc, 'word', 'isupper', starts_upper, radius))
    # prefixes / suffixes / ngrams
    exs.append(lambda doc: extract_local(doc, 'word', 'p1', lambda tok: get_prefix(tok, 1), radius))
    exs.append(lambda doc: extract_local(doc, 'word', 'p2', lambda tok: get_prefix(tok, 2), radius))
    exs.append(lambda doc: extract_local(doc, 'word', 'p3', lambda tok: get_prefix(tok, 3), radius))
    exs.append(lambda doc: extract_local(doc, 'word', 'p4', lambda tok: get_prefix(tok, 4), radius))
    exs.append(lambda doc: extract_local(doc, 'word', 's1', lambda tok: get_suffix(tok, 1), radius))
    exs.append(lambda doc: extract_local(doc, 'word', 's2', lambda tok: get_suffix(tok, 2), radius))
    exs.append(lambda doc: extract_local(doc, 'word', 's3', lambda tok: get_suffix(tok, 3), radius))
    exs.append(lambda doc: extract_local(doc, 'word', 's4', lambda tok: get_suffix(tok, 4), radius))
    exs.append(lambda doc: extract_local(doc, 'word', 'bigram', lambda tok: get_ngrams(tok, 2), radius))
    exs.append(lambda doc: extract_local(doc, 'word', 'trigram', lambda tok: get_ngrams(tok, 3), radius))
    return exs

def create_lemma_extractors(radius=1):
    exs = []
    # some local extractors
    exs.append(lambda doc: extract_local(doc, 'lemma', 'lemma', get_identity, radius))
    exs.append(lambda doc: extract_local(doc, 'lemma', 'lemshape', get_shape, radius))
    exs.append(lambda doc: extract_local(doc, 'lemma', 'lems1', lambda tok: get_suffix(tok, 1), radius))
    exs.append(lambda doc: extract_local(doc, 'lemma', 'lems2', lambda tok: get_suffix(tok, 2), radius))
    exs.append(lambda doc: extract_local(doc, 'lemma', 'lems3', lambda tok: get_suffix(tok, 3), radius))
    exs.append(lambda doc: extract_local(doc, 'lemma', 'lems4', lambda tok: get_suffix(tok, 4), radius))
    # some global extractors
    #exs.append(lambda doc: extract_global(doc, 'lemma', 'lemma', 'glob_next_lemma', 
    #                                                get_identity, get_true, get_identity, 1, get_identity))
    #exs.append(lambda doc: extract_global(doc, 'lemma', 'lemma', 'glob_prev_lemma', 
    #                                                get_identity, get_true, get_identity, -1, get_identity))
    return exs

def create_morph_extractors(cols, radius=1, glob_context_radius=1):
    exs = []
    for col in cols:
        exs.append(lambda doc: extract_local(doc, col, col, get_identity, radius))
        #for i in range(-glob_context_radius, glob_context_radius+1):
        #    exs.append(lambda doc, col=col, i=i: extract_global(doc, 'lemma', col, 'glob_' + col, 
        #                                         get_identity, get_true, get_identity, i, get_most_frequent))
    return exs


class TupleFeatureExtractor(object):
    '''Class to combine all feature extractors.
       Transforms the given documents to lists of tuple lists.'''
    
    def __init__(self, *args, **kwargs):
        self.extractors = args

    def fit(self, X, y=None):
        pass
    
    def transform(self, doc):
        '''Expects X to be a pandas dataframe object.'''
        n = doc.shape[0]
        m = len(self.extractors)
        extracted = [e(doc) for e in self.extractors]
        concatenated = []
        for i in range(n):
            feats = []
            for j in range(m):
                feats.extend(extracted[j][i])
            concatenated.append(feats)
        return concatenated

class TupleStringFeatureExtractor(TupleFeatureExtractor):
    '''Class that converts tuples to strings, so that each string represents a single document and
       can be easily processed by TfIdf vector space builders.'''
    
    def __init__(self, *args, **kwargs):
        self._nooffset = kwargs.get('nooffset', False)
        TupleFeatureExtractor.__init__(self, *args)

    def _tos(self, doc):
        ts = TupleFeatureExtractor.transform(self, doc)
        lines = []
        if self._nooffset:
            for word in ts:
                lines.append([u'{0}={1}'.format(elem[0], elem[2]) for elem in word])
        else:
            for word in ts:
                lines.append([u'{0}[{1}]={2}'.format(elem[0], elem[1], elem[2]) for elem in word])
        return lines
    
    def fit(self, doc, y=None):
        '''X is a corpus.'''
        return self
            
    def transform(self, doc):
        return self._tos(doc)

class DocumentFeatureExtractor(TupleFeatureExtractor):
    '''Transforms the given documents to C++ Document style in form
       feature_name[offset]=value.'''
    
    def __init__(self, *args, **kwargs):
        TupleFeatureExtractor.__init__(self, *args, **kwargs)
        self._nooffset = kwargs.get('nooffset', False)
    
    def transform(self, doc):
        concatenated = TupleFeatureExtractor.transform(self, doc)
        words = []
        if self._nooffset:
            for l in concatenated:
                words.append(StringVector([u'{0}={1}'.format(e[0], e[2]).encode('utf-8') for e in l]))
        else:
            for l in concatenated:
                words.append(StringVector([u'{0}[{1}]={2}'.format(e[0], e[1], e[2]).encode('utf-8') for e in l]))
        return Document(words)

class DataFrameFeatureExtractor(TupleFeatureExtractor):
    '''Transforms the given documents to pandas DataFrame objects.'''
    
    def __init__(self, *args):
        TupleFeatureExtractor.__init__(self, *args)

    def _get_key(self, feat, offset):
        return u'{0}[{1}]'.format(feat, offset)

    def transform(self, doc):
        n = doc.shape[0]
        conc = TupleFeatureExtractor.transform(self, doc)
        # initialize a temporary dictionary
        names = frozenset(self._get_key(t[0], t[1]) for l in conc for t in l)
        d = dict()
        for name in names:
            d[name] = Series([None]*n)

        # fill the dictionary
        for i in range(n):
            for e in conc[i]:
                d[self._get_key(e[0], e[1])][i] = e[2]
        return DataFrame(d)

class BinaryDataFrameFeatureExtractor(DataFrameFeatureExtractor):
    '''Transforms given documents to binary feature dataframes.
       Each feature/value combination is transformed to a new column and
       values become 0/1 features.'''
    
    def __init__(self, *args):
        DataFrameFeatureExtractor.__init__(self, *args)

    def transform(self, doc):
        n = doc.shape[0]
        df = DataFrameFeatureExtractor.transform(self, doc)
        d = {}
        for col in df.columns:
            series = dict()
            for v in frozenset(df[col]):
                series[v] = Series([0]*n)
            for i, v in enumerate(df[col]):
                series[v][i] = 1
            for v in series:
                name = u'{0}={1}'.format(col, v)
                d[name] = series[v]
        return DataFrame(d).to_sparse(fill_value=0)

class CorpusFeatureExtractor(object):
    '''Takes a featureextractor and stores the values per document
       in a dictionary.'''
    
    def __init__(self, extractor):
        self._extractor = extractor
        
    def fit(self, X, y=None):
        return self
    
    def transform(self, X):
        Xt = {}
        for doc_id in X.keys():
            Xt[doc_id] = self._extractor.transform(X[doc_id])
        return Xt

class HrAprioriMiner(object):
    '''Class for mining frequent patterns with apriori
       and using these as features.'''
    
    def __init__(self, **kwargs):
        self._radius = kwargs.get('radius', 2)
        self._treshold = kwargs.get('treshold', 0.05)
        self._size_limit = kwargs.get('size_limit', 2)
        self._significance_treshold = kwargs.get('significance_treshold', 0.05)
        self._background_corpus = kwargs.get('background', None)
        self._background_cov = kwargs.get('background_cover', None)
        self._frequent = None

    def get_frequent(self):
        if self._frequent != None:
            return ConjunctionVector([Conjunction([Rule(o, v) for o, v in c]) for c in self._frequent])

    def fit(self, corp, true_cover):
        rule_covers = basic_rule_covers(corp, self._radius)
        initial = ConjunctionVector([Conjunction([rule]) for rule in rule_covers.keys()])
        frequent = hr_apriori(initial, rule_covers, true_cover, self._treshold, self._size_limit, self._radius)
        # perform significance filtering using background corpus
        if self._background_corpus != None:
            sys.stderr.write('Computing significance\n')
            covers = conjunction_covers(frequent, rule_covers)
            backrulecovers = basic_rule_covers(self._background_corpus, self._radius)
            docmetrics = conjunction_docmetrics(frequent, backrulecovers, self._background_cov)
            metmat = metric_matrix(docmetrics, self._background_corpus.keys(), 'recall')
            significant = ConjunctionVector()
            for idx, (conj, cover) in enumerate(izip(frequent, covers)):
                recall = cover.metrics(true_cover).recall()
                pvalue = np.sum(recall <= metmat[idx,:]) / float(metmat.shape[1])
                if pvalue <= self._significance_treshold:
                    significant.append(conj)
            sys.stderr.write('{0} of {1} patterns are significant\n'.format(significant.size(), frequent.size()))
            frequent = significant
            self._background_corpus = None
            self._background_cov = None
        self._frequent = list(frequent)
        return self
    
    def transform(self, corp):
        frequent = self.get_frequent()
        rule_covers = basic_rule_covers(corp, self._radius)
        covers = conjunction_covers(frequent, rule_covers)
        res = {}
        for doc_id in corp.keys():
            res[doc_id] = conjunction_dataframe(frequent, covers, doc_id)
        return res

