# -*- coding: utf-8 -*-
# Python 2.7
# Pattern based natural language processing tools for Estonian.
# Copyright (C) 2013 University of Tartu
# 
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# 
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
# 
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.

from pdict import PersistentDict as PyCorpus
from sklearn import metrics
from random import sample
from features import *
from paths import *
import crfsuite
import sys
import tempfile
import numpy as np

class Trainer(crfsuite.Trainer):
    
    def message(self, s):
        sys.stderr.write(s)

class Tagger(crfsuite.Tagger):
    
    def message(self, s):
        sys.stderr.write(s)

class BasicNer(object):
    '''Simple language agnostic NER that uses very basic
       features. Requires `start`, `end` and `word` fields to
       be available with the label column specified by the user.
    '''
    
    def __init__(self, **kwargs):
        self._src_col    = kwargs.get('src_col', 'ne_type')
        self._target_col = kwargs.get('target_col', 'target_col')
        self._prob_col   = kwargs.get('prob_col', 'prob_col')
        self._kwargs     = kwargs
        self._extractors = []
        self._model      = None
        
        self._create_basic_extractors()

    def src_col(self, col=None):
        if col == None:
            return self._src_col
        else:
            self._src_col = col

    def target_col(self, col=None):
        if col == None:
            return self._target_col
        else:
            self._target_col = col

    def prob_col(self, col=None):
        if col == None:
            return self._prob_col
        else:
            self._prob_col = col

    def _create_basic_extractors(self):
       self._extractors.extend(create_basic_extractors())
    
    def add_extractor(self, ex):
        if self._model != None:
            raise Exception('Model is already fitted! Cannot add more extractors!.')
        self._extractors.append(ex)


    def _get_doc_xs(self, doc, fe):
        xseq = crfsuite.ItemSequence()
        for ts in fe.transform(doc):
            item = crfsuite.Item()
            for t in ts:
                item.append(crfsuite.Attribute(u'{0}[{1}]={2}'.format(t[0], t[1], t[2]).encode('utf-8')))
            xseq.append(item)
        return xseq

    def _get_doc_ys(self, doc):
        return crfsuite.StringList([unicode(l).encode('utf-8') for l in doc[self.src_col()]])

    def fit(self, X, y=None):
        '''Fit a basic CRF model. Expects `word`, `start`, `end` be present.
        X - a dictionary of documents.
        '''
        fe = TupleFeatureExtractor(*self._extractors)
        trainer = Trainer()
        for doc_id in X:
            doc  = X[doc_id]
            xseq = self._get_doc_xs(doc, fe)
            yseq = self._get_doc_ys(doc)
            trainer.append(xseq, yseq, 0)

        trainer.select('lbfgs', 'crf1d')
        f = tempfile.NamedTemporaryFile()
        trainer.train(f.name, -1)
        f.flush()
        f.seek(0)
        self._model = f.read()
        f.close()

    def transform(self, X):   # shape and other descriptive features
        '''Transform the given documents. Warning: automatically commits the
           the data to the corpus.'''
        autocommit = X.autocommit()
        X.autocommit(False)
        fe = TupleFeatureExtractor(*self._extractors)
        # load the model into the tagger
        tagger = Tagger()
        f = tempfile.NamedTemporaryFile()
        f.write(self._model)
        f.flush()
        tagger.open(f.name)
        # tag all documents
        for doc_id in X:
            doc = X[doc_id]
            tagger.set(self._get_doc_xs(doc, fe))
            yseq = tagger.viterbi()
            doc[self.target_col()] = yseq
            if self.prob_col() != None:
                doc[self.prob_col()] = [tagger.marginal(t, i) for i, t in enumerate(yseq)]
            X[doc_id] = doc
        f.close()
        X.commit()
        X.autocommit(autocommit)
        return X

    def test(self, X, f=sys.stdout):
        self.transform(X)
        ts, ys, ps = [], [], []
        for doc_id in X.keys():
            ts.extend(X[doc_id][self.src_col()])
            ys.extend(X[doc_id][self.target_col()])
            ps.extend(X[doc_id][self.prob_col()])
        assert (len(ts) == len(ys))

        labels = list(set(ts) | set(ys))
        oidx = labels.index('O')
        xs = [labels.index(t) for t in ts]
        ys = [labels.index(y) for y in ys]
        xs, ys = zip(*filter(lambda (x,y): x != oidx or y != oidx, zip(xs, ys)))
        precision = metrics.precision_score(xs, ys)
        recall    = metrics.recall_score(xs, ys)
        fscore    = metrics.f1_score(xs, ys)
        f.write('{0}: {1}\n'.format('Precision: ', precision))
        f.write('{0}: {1}\n'.format('   Recall: ', recall))
        f.write('{0}: {1}\n'.format(' F1-score: ', fscore))
        print labels
        f.write('{0}\n'.format(metrics.confusion_matrix(xs, ys)))
        
        return precision, recall, fscore

    def save(self, path):
        open(path, 'wb').write(self._model)
    
    def load(self, path):
        self._model = open(path, 'rb').read()

class LemmaNer(BasicNer):
    '''NER that can make use of a lemma and use dictionaries.'''
    
    def __init__(self, **kwargs):
        BasicNer.__init__(self, **kwargs)
        self.wordlist = None
        if 'wordlist_path' in kwargs:
            self.wordlist = WordListFeatureExtractor(kwargs['wordlist_path'])
        self._create_lemma_extractors()
        self._extractors.extend(create_lemma_extractors())

class EstNer(LemmaNer):
    '''Convinient class to make use of Estonian NER tagging.'''

    def __init__(self, **kwargs):
        LemmaNer.__init__(self, **kwargs)
        self._cols = ['case', 'wtype', 'vtype', 'plur']
        self._wordnet_path = bool(kwargs.get('wordnet_path', None))
        self._global_context_radius = kwargs.get('global_context_radius', 1)
        self._extractors.append(create_morph_extractors(self._cols, self._global_context_radius))
            
