# -*- coding: utf-8 -*-


# cmap2owl -- Helper application to convert from concept maps to OWL ontologies
# Copyright (c) 2008-2013  Rodrigo Rizzi Starr
#  
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#  
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#  
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.


'''
Saves and loads classifiers from a file

@author: Rodrigo Rizzi Starr
@copyright: Copyright © 2008-2013 Rodrigo Rizzi Starr
@license: MIT License
@contact: rodrigo.starr@gmail.com
'''


import pickle
import re


# Project imports
from log import log, statLog
log.debug('Loaded...')


class Classifiers:
    '''Manages classifiers. A classifier is a feature that tells if a given
    phrase means a special kind of relation. This class loads and saves them.
    '''
    
    def __init__(self, db = None,
                       filename = 'classifiers_en.pickle',
                       cmap = None):
        self.db = db
        self.filename = filename
        self.cmap = cmap
        
        self.classifiers = {}
        self.activeClassifiers = set()

    def load(self):
        'Load the classifier information from a file'
        with open(self.filename, 'rb') as fille:
            self.classifiers = pickle.load(fille)
        
    def save(self):
        'Save the current classifier information to a file'
        
        # Fill the classifier dictionary, according to the current language
        classifiers = self.classifiers
        
        for classifier in self.activeClassifiers:
            try:
                classifierSet = classifiers[classifier]
            except KeyError:
                classifierSet = set()
            
            result = self.db.query(classifier + '(X)')
            for answer in result:
                value = unicode(answer['X'])
                
                for element in set(classifierSet):
                    if re.match('^%s$' % element, value, 
                                re.IGNORECASE | re.UNICODE) == None:
                        classifierSet.add(value)
            classifiers[classifier] = classifierSet
        
        self.classifiers = classifiers
        self._realSave()
        
    def _realSave(self):
        '''Do the pickling'''

        with open(self.filename, 'wb') as fille:
            pickle.dump(self.classifiers, fille)
    
    def useClassifiers(self, classifiers):
        '''Adds one or more classifiers to the list of active classifiers'''
        
        if isinstance(classifiers, basestring):
            classifiers = [classifiers]
        
        for classifier in classifiers:
            self.activeClassifiers.add(classifier)

    def fillKB(self):
        '''Assert the classifiers in the knowledge base'''
        
        # Fill the classifier dictionary, according to the current language
        try:
            classifiers = self.classifiers
        except KeyError:
            classifiers = {}
        
        # Fetch every label in the cmap and do a list with them
        labels = []
        for lp in self.cmap.getLinkingPhrases():
            labels.append(lp.label)

        # This will be used so we do not assert the same information twice
        assertedFacts = set()
        
        for classifier in self.activeClassifiers:
            self.db.dynamic(classifier + '/1')
            classifierRelation = classifier.replace('Phrases', 'Relation')
            self.db.assertz("%s('')" % classifierRelation)
            

            try:
                classifierSet = classifiers[classifier]
            except KeyError:
                classifierSet = set()

            classifierMatched = False
            for element in classifierSet:
                for label in labels:
                    if re.match('^%s$' % element, label, 
                                re.IGNORECASE | re.UNICODE) != None:
                        fact = "%s('%s')" % (classifier, label)
                        if fact not in assertedFacts:
                            classifierMatched = True
                            assertedFacts.add(fact)
                            statLog.info(fact)
                            self.db.assertz(fact)
                        
            # Generate rules of the form 'hasPartRelation(R) :-
            # hasPartPhrases(L), label(R, L).' for every classifier type that is
            # instantiated
            if classifierMatched:
                classifierRelation = classifier.replace('Phrases', 'Relation')
                rule = '%s(R) :- %s(L), label(R, L)' % (classifierRelation, classifier)
                self.db.assertz(rule)
        

def generateEnglish():
    '''Generates a base dictionary for english'''
    ddb = Prolog()
    
    cl = Classifiers(ddb, 'classifiers_en.pickle')
    cl.useClassifiers([
                       'hasPartPhrases', 'partOfPhrases',          # Composition
                       'enumerationPhrases', "definitionPhrases",  # Enumeration
                       'functionalRelationPhrases',                # Functional
                       'specializationPhrases', 'individualizationPhrases', 
                       'generalizationPhrases', 'classificationPhrases',
                       ])
    
    phrases = []
        
    phrases.extend([
                       "hasPartPhrases('composed of')",
                       "hasPartPhrases('is composed of')",
                       "hasPartPhrases('are composed of')",
                       "hasPartPhrases('composed by')",
                       "hasPartPhrases('is composed by')",
                       "hasPartPhrases('are composed by')",
                       "hasPartPhrases('has part')",
                       "hasPartPhrases('has parts')",
                       "hasPartPhrases('is made of')",
                       "hasPartPhrases('is formed by')",
                       "hasPartPhrases('formed by')",
                       
                       "partOfPhrases('part of')",
                       "partOfPhrases('are part of')",
                       "partOfPhrases('used to make')",
                       "partOfPhrases('are used to make')",
                       "partOfPhrases('forms')",
                       "partOfPhrases('form')",
                        ])
    
    phrases.extend(["enumerationPhrases('is one of')",
                       "enumerationPhrases('one of')",
                       "enumerationPhrases('any of')",
                       "enumerationPhrases('who are any of')",
                       "enumerationPhrases('any of the following')",
                       ])
    
    phrases.extend(["definitionPhrases('define a')",
                    "definitionPhrases('define')",
                    "definitionPhrases('any of')",
                    ])
                    
    phrases.extend([
                       "functionalRelationPhrases('has')",
                       "functionalRelationPhrases('have')",
                         ])
    
    phrases.extend([
                       "generalizationPhrases('are a kind of')",
                       "generalizationPhrases('is a kind of')",
                       "generalizationPhrases('is a')",
                       "generalizationPhrases('is any')",
                       "generalizationPhrases('refers to any')",
                       
                       "classificationPhrases('is a')",
                       "classificationPhrases('instance of')",
                        ])

    phrases.extend([
                       "specializationPhrases('can be')",
                       "specializationPhrases('like')",
                       "specializationPhrases('for example')",
                       "specializationPhrases('e.g.')",
                       
                       "individualizationPhrases('for example')",
                       "individualizationPhrases('e.g.')",
                       "individualizationPhrases('e. g.')",
                       "individualizationPhrases('such as')",
                       "individualizationPhrases('like')",
                         ])

    for phrase in phrases:
        ddb.assertz(phrase)
    
    cl.save()

    
def generatePortuguese():
    '''Generates a base dictionary for portuguese'''
    ddb = Prolog()
    
    cl = Classifiers(ddb, 'classifiers_pt.pickle')
    cl.useClassifiers([
                       'hasPartPhrases', 'partOfPhrases',          # Composition
                       'enumerationPhrases', "definitionPhrases",  # Enumeration
                       'functionalRelationPhrases',                # Functional
                       'specializationPhrases', 'individualizationPhrases', 
                       'generalizationPhrases', 'classificationPhrases',
                       ])
    
    phrases = []
        
    phrases.extend([
                       u"hasPartPhrases('composto por')",
                       u"hasPartPhrases('é composto por')",
                       u"hasPartPhrases('são compostos por')",
                       u"hasPartPhrases('composto de')",
                       u"hasPartPhrases('é composto de')",
                       u"hasPartPhrases('são compostos de')",
                       u"hasPartPhrases('tem como parte')",
                       u"hasPartPhrases('tem como partes')",
                       u"hasPartPhrases('têm como parte')",
                       u"hasPartPhrases('têm como partes')",
                       u"hasPartPhrases('é feito de')",
                       u"hasPartPhrases('são feitos de')",
                       u"hasPartPhrases('formado por')",
                       u"hasPartPhrases('formados por')",
                       u"hasPartPhrases('é formado por')",
                       u"hasPartPhrases('são formados por')",
                       u"hasPartPhrases('contem')",
                       u"hasPartPhrases('contêm')",
                       u"hasPartPhrases('contendo')",
                       
                       u"partOfPhrases('parte de')",
                       u"partOfPhrases('é parte de')",
                       u"partOfPhrases('são parte de')",
                       u"partOfPhrases('usado para fazer')",
                       u"partOfPhrases('é usado para fazer')",
                       u"partOfPhrases('são usados para fazer')",
                       u"partOfPhrases('formam')",
                       u"partOfPhrases('forma')",
                       u"partOfPhrases('formando')",
                        ])
    
    phrases.extend([
                       u"enumerationPhrases('um dos')",
                       u"enumerationPhrases('um dos seguintes')",
                       u"enumerationPhrases('é um dos')",
                       u"enumerationPhrases('é um dos seguintes')",
                       u"enumerationPhrases('qualquer um dos')",
                       u"enumerationPhrases('qualquer um dos seguintes')",
                       u"enumerationPhrases('é qualquer um dos')",
                       u"enumerationPhrases('é qualquer um dos seguintes')",
                       ])
    
    phrases.extend([
                    u"definitionPhrases('define')",
                    u"definitionPhrases('define um')",
                    ])
                    
    phrases.extend([
                       u"functionalRelationPhrases('tem')",
                       u"functionalRelationPhrases('têm')",
                         ])
    
    phrases.extend([
                       u"generalizationPhrases('um')",
                       u"generalizationPhrases('é')",
                       u"generalizationPhrases('são')",
                       u"generalizationPhrases('é um')",
                       u"generalizationPhrases('é um tipo')",
                       u"generalizationPhrases('é um tipo de')",
                       u"generalizationPhrases('são um')",
                       u"generalizationPhrases('são um tipo')",
                       u"generalizationPhrases('são um tipo de')",
                       u"generalizationPhrases('é uma forma')",
                       u"generalizationPhrases('é uma forma de')",
                       u"generalizationPhrases('são uma forma')",
                       u"generalizationPhrases('são uma forma de')",
                       u"generalizationPhrases('é qualquer')",
                       u"generalizationPhrases('são qualquer')",
                       u"generalizationPhrases('se refere a qualquer')",
                       u"generalizationPhrases('se referem a qualquer')",
                       u"generalizationPhrases('refere-se a qualquer')",
                       u"generalizationPhrases('referem-se a qualquer')",
                       
                       u"classificationPhrases('é um')",
                       u"classificationPhrases('instância de')",
                        ])

    phrases.extend([
                       u"specializationPhrases('pode ser')",
                       u"specializationPhrases('podem ser')",
                       u"specializationPhrases('como')",
                       u"specializationPhrases('por exemplo')",
                       u"specializationPhrases('como por exemplo')",
                       u"specializationPhrases('e.g.')",
                       u"specializationPhrases('e. g.')",
                       
                       u"individualizationPhrases('por exemplo')",
                       u"individualizationPhrases('como por exemplo')",
                       u"individualizationPhrases('e.g.')",
                       u"individualizationPhrases('e. g.')",
                       u"individualizationPhrases('como')",
                       u"individualizationPhrases('tal como')",
                       u"individualizationPhrases('tais como')",
                         ])

    for phrase in phrases:
        ddb.assertz(phrase)
    
    cl.save()

    
if __name__ == '__main__':
    # Sets a starting point for the classifiers db
#    from prologWrapper import Prolog
#    generateEnglish()
#    generatePortuguese()
    pass
    
