# -*- coding: utf-8 -*-
# Python 2.7
# Pattern based natural language processing tools for Estonian.
# Copyright (C) 2013 University of Tartu
# 
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# 
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
# 
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.

import sys
import os
import numpy as np

from cover import regex_cover
from features import *
from paths import save_obj, load_obj
from pdict import PersistentDict as PyCorpus
from patnlp import *

from sklearn.decomposition import PCA
from sklearn.svm import OneClassSVM

class BaseAI(object):
    '''Base class for authorship identification.'''
    
    def __init__(self, **kwargs):
        self._radius = kwargs.get('radius', 1)
        self._tresh = kwargs.get('treshold', 0.05)
        self._sigtresh = kwargs.get('significance_treshold', 0.05)
        self._reference_dir = kwargs.get('reference_dir', 'reference')
        self._kwargs = kwargs

    def _get_fe(self):
        '''Get a standard feature extractor for the problem.'''
        exs = get_local_extractors(['lemma','case', 'wtype', 'vtype'], 0)
        exs.append(lambda doc: extract_local(doc, 'word', 'su', starts_upper, 0))
        exs.append(lambda doc: extract_local(doc, 'word', 'au', all_upper, 0))
        exs.append(lambda doc: extract_local(doc, 'word', 'cd', contains_digit, 0))
        exs.append(lambda doc: extract_local(doc, 'word', 'cs', contains_symbol, 0))
        #exs.append(lambda doc: extract_local(doc, 'word', 's2', lambda tok: get_suffix(tok, 2), 0))
        #exs.append(lambda doc: extract_local(doc, 'word', 's3', lambda tok: get_suffix(tok, 3), 0))
        #exs.append(lambda doc: extract_local(doc, 'word', 'p1', lambda tok: get_prefix(tok, 1), 0))
        #exs.append(lambda doc: extract_local(doc, 'word', 'p2', lambda tok: get_prefix(tok, 2), 0))
        #exs.append(lambda doc: extract_local(doc, 'word', 'b', lambda tok: get_ngrams(tok, 2), 0))
        #exs.append(lambda doc: extract_local(doc, 'word', 't', lambda tok: get_ngrams(tok, 3), 0))
        return exs

    def _get_covers(self, pycorp):
        '''Extract positive covers for various data.'''
        return {'substantive': regex_cover(pycorp, 'wtype', 'S'),
                'verb': regex_cover(pycorp, 'wtype', 'V'),
                'adjective': regex_cover(pycorp, 'wtype', 'A'),
                'punctuation': regex_cover(pycorp, 'wtype', 'Z'),
                'everything': regex_cover(pycorp, 'word', '.')}

    def make_reference(self, reference_dir):
        '''Preprocess a reference corpus and store it in a directory.'''
        print 'Preparing reference corpus.'
        pycorp = PyCorpus(os.path.join(reference_dir, 'reference.pycorp'), readonly=True)
        fe = CorpusFeatureExtractor(DocumentFeatureExtractor(*self._get_fe(), nooffset=True))
        corp = Corpus(fe.transform(pycorp))
        print 'Storing preprocessed reference corpus.'
        write_corpus_to_file(os.path.join(reference_dir, 'reference.corp'), corp)
        print 'Obtaining covers'
        covers = self._get_covers(pycorp)
        for key in covers:
            covers[key] = dict(as_bv_from_oc(covers[key].map()))
        print 'Storing covers.'
        save_obj(covers, os.path.join(reference_dir, 'covers.obj'))

    def _load_reference(self, reference_dir):
        '''Load a previously preprocessed reference corpus with its covers.'''
        corp = read_corpus_from_file(os.path.join(reference_dir, 'reference.corp'))
        covers = load_obj(os.path.join(reference_dir, 'covers.obj'))
        for key in covers:
            covers[key] = OrderedCover(as_oc(StrMapBv(covers[key])))
        return corp, covers

    def _mine_everything(self, corp, cover, background, background_cover):
        '''Mine all frequent patterns.'''
        miners = {}
        for key in cover:
            sys.stderr.write(u'\nMining {0} patterns\n'.format(key).encode('utf-8'))
            miner = HrAprioriMiner(radius=self._radius,
                                   treshold=self._tresh,
                                   significance_treshold=self._sigtresh,
                                   size_limit = 2,
                                   background = background,
                                   background_cover = background_cover[key])
            miner.fit(corp, cover[key])
            miners[key] = miner
        return miners

    def _correlation_matrices(self, miners, corp):
        rule_covers = basic_rule_covers(corp, self._radius)
        corr_matrices = {}
        for key in miners:
            sig_covers = conjunction_covers(miners[key].get_frequent(), rule_covers)
            matmat = matthews_matrices(sig_covers, list(sorted(corp.keys())), True)
            corrmat = conj_corr_matrix(matmat)
            corr_matrices[key] = corrmat
        return corr_matrices

    def _flatten_matrices(self, corr_matrices):
        corr_matrices = [corr_matrices[key] for key in sorted(corr_matrices.keys())]
        corr_matrix = np.hstack(corr_matrices)
        corr_matrix[np.isnan(corr_matrix)] = 0
        return corr_matrix

def show_plot(X, n):
    import matplotlib.pyplot as plt
    fig, ax = plt.subplots()
    ax.plot(X[:n,0], X[:n,1], 'o', color='blue')
    ax.plot(X[n:,0], X[n:,1], 'o', color='red')
    fig.show()

class OneClassAI(BaseAI):
    '''One-class authorship identification.'''
    
    def __init__(self, **kwargs):
        BaseAI.__init__(self, **kwargs)
        self._miners = []
        self._pca = PCA(n_components=2)
        self._svm = OneClassSVM()
        self._corr_matrix = None
        self._predict_corr_matrix = None

    def fit(self, pycorp, y=None):
        sys.stderr.write('TRAIN: Preprocessing corpus.\n')
        fe = CorpusFeatureExtractor(DocumentFeatureExtractor(*self._get_fe(), nooffset=True))
        corp = Corpus(fe.transform(pycorp))
        covers = self._get_covers(pycorp)
        
        back_corp, back_covers = self._load_reference(self._reference_dir)
        self._miners = self._mine_everything(corp, covers, back_corp, back_covers)
        
        sys.stderr.write('TRAIN: Computing correlation matrices\n')
        corr_matrices = self._correlation_matrices(self._miners, corp)
        #back_corr_matrices = self._correlation_matrices(self._miners, back_corp)
        
        corr_matrix = self._flatten_matrices(corr_matrices)
        del corr_matrices
        
        #back_corr_matrix = self._flatten_matrices(back_corr_matrices)
        #del back_corr_matrices
        
        #total_matrix = np.vstack((corr_matrix, back_corr_matrix))
        #del back_corr_matrix
        
        sys.stderr.write('TRAIN: Performing PCA\n')
        #Xt = self._pca.fit_transform(corr_matrix)
        sys.stderr.write('TRAIN: Fitting OneClassSVM\n')
        self._svm.fit(corr_matrix)
        self._corr_matrix = corr_matrix
        return self

    def predict(self, pycorp):
        sys.stderr.write('TEST: Preprocessing corpus.\n')
        fe = CorpusFeatureExtractor(DocumentFeatureExtractor(*self._get_fe(), nooffset=True))
        corp = Corpus(fe.transform(pycorp))
        
        sys.stderr.write('TEST: Computing correlation matrices\n')
        corr_matrices = self._correlation_matrices(self._miners, corp)
        corr_matrix = self._flatten_matrices(corr_matrices)
        del corr_matrices
        
        sys.stderr.write('TEST: Performing PCA\n')
        #Xt = self._pca.transform(corr_matrix)
        sys.stderr.write('TEST: Predicting with OneClassSVM\n')
        self._predict_corr_matrix = corr_matrix
        return self._svm.predict(corr_matrix)

