#!/usr/python
# -*- coding: utf-8 -*-


# cmap2owl -- Helper application to convert from concept maps to OWL ontologies
# Copyright (c) 2008-2013  Rodrigo Rizzi Starr
#  
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#  
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#  
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.


'''
Helper scripts used while analysing phrases from the corpus

@author: Rodrigo Rizzi Starr
@copyright: Copyright © 2008-2013 Rodrigo Rizzi Starr
@license: MIT License
@contact: rodrigo.starr@gmail.com
'''


import os
import re
import glob
import nltk
import collections
import cPickle
from operator import itemgetter

from CXLReader import CXLReader
from phrase import Phrase
from linguisticRules import ParticipleRelation, \
                            ConjunctionClasses, DisjunctionClasses, \
                            DerivedClass, AdjectivePhrase


if __name__ == '__main__':
    basePath = r'/home/rrs/01 - Doutorado/05 - MP ITA/02 - Aquisicao/*/*.cxl'

    allPhrases = []
    files = sorted(glob.glob(basePath))
    files.append(r'/home/rrs/01 - Doutorado/05 - MP ITA/01 - Codigo/src/test/test conceptRules.cxl')
    lps = []
    tokens = set()
    stemmedTokens = set()
    tokenizer = nltk.tokenize.WordPunctTokenizer()
    pst = nltk.stem.PortugueseStemmer('portuguese')
    for f in files:
        reader = CXLReader(f)
        reader.parse()
        graph = reader.graph
        graph.generatePhrases()
        phrases = graph.phrases
        allPhrases.extend(phrases)
        
        for lp in graph.getLinkingPhrases():
            lps.append(lp.label)
            tokens.update(tokenizer.tokenize(lp.label.lower()))

    for token in tokens:
        stemmedTokens.add(pst.stem(token))
    ## for phrase in allPhrases:
    ##     print(phrase.getPhrase())

    with open('tagger_pt.pickle', 'rb') as f:
        tritagger = cPickle.load(f)
    print('\n\n\nTagged phrases\n')
    for phrase in allPhrases:
        phrase.tag(tritagger)
        print(' '.join('%s/%s' % pair for pair in phrase.getTagged()))

    print('\n\n\n\n\n'*5 + 'Regras\n')
    pr = ParticipleRelation()
    dc = DerivedClass()
    cc = ConjunctionClasses()
    dr = DisjunctionClasses()
    ap = AdjectivePhrase()
    for phrase in allPhrases:
        rule1 = pr.apply(phrase)
        rule2 = dc.apply(phrase)
        rule3 = cc.apply(phrase)
        rule4 = dr.apply(phrase)
        rule5 = ap.apply(phrase)
        
        text = ' '.join('%s/%s' % pair for pair in phrase.getTagged())
        text.ljust(230)
        text += ('\t' + str(rule1)) if len(rule1) > 0 else '\t'
        text += ('\t' + str(rule2)) if len(rule2) > 0 else '\t'
        text += ('\t' + str(rule3)) if len(rule3) > 0 else '\t'
        text += ('\t' + str(rule4)) if len(rule4) > 0 else '\t'
        text += ('\t' + str(rule5)) if len(rule5) > 0 else '\t'
        print(text)
    
    
            
    # Rule sketch
    # A more detailed class/subclass relationship
    print('\n\n\nNoun + PCP\n')
    for phrase in allPhrases:
        string = ' '.join('%s/%s' % pair for pair in phrase.getTagged())
        if re.match(r'([^/]*)/PREP[^ ]* +([^/]*)/PCP', string, re.UNICODE): print(string)

    ## # Find properties from a PCP + Preposition
    ## print('\n\n\nPCP\n')
    ## for phrase in allPhrases:
    ##     string = ' '.join('%s/%s' % pair for pair in phrase.getTagged())
    ##     if re.match(r'.* +([^/]*)/PCP +([^/]*)/PREP', string, re.UNICODE): print(string)
    
