#!/usr/python
# -*- coding: utf-8 -*-


# cmap2owl -- Helper application to convert from concept maps to OWL ontologies
# Copyright (c) 2008-2013  Rodrigo Rizzi Starr
#  
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#  
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#  
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.


'''
Script to generate a POS tagger for portuguese-BR that is good for the cmap2owl
application. It is based on a corpus available in NLTK


@author: Rodrigo Rizzi Starr
@copyright: Copyright © 2008-2013 Rodrigo Rizzi Starr
@license: MIT License
@contact: rodrigo.starr@gmail.com
'''


'''
Word classes (from NLTK portuguese corpus). The most important are: N, NPROP, V, ADJ, PCP, NUM, ADV, VAUX

for ln,tag in sorted(((len(ls), tag) for tag, ls in words.iteritems()), reverse=True):
    print tag.ljust(13) + ' ' + str(ln)
N             20624
NPROP         19856
V             12927
ADJ           8340 
PCP           5050 
NUM           2646    <- May need some cleaning (numbers and words mixed)
V|+           1403 
ADV           1177 
N|EST         1076    <- Contractions
N|AP          596     <- Apostos (inúteis)
VAUX          542  
N|TEL         454  
N|DAT         230  
PREP          221  
PROADJ        187  
PROSUB        172  
PREP|+        153  
IN            152     <- Interjeições
N|HOR         139  
N|DAD         131  
KS            119  
PDEN          109  
ADJ|EST       96   
KC            79   
PROPESS       61   
ART           35   
VAUX|+        27   
PRO-KS-REL    24   
PRO-KS        21   
KC|[          12      <- Disjunctions
ADV-KS        12   
ADJ|+         11   
PREP|[        10      <- Disjunctions
KC|]          9    
CUR           9    
ADV-KS-REL    9    
PROP          7    
ADV|EST       7    
NPROP|+       6    
ADV|HOR       6    
KS|[          5       <- Disjunctions
ADV|]         4       <- Disjunctions
,             4    
ART|EST       3    
V|EST         2    
PROPESS|+     2    
PROADJ|+      2    
PREP|]        2       <- Disjunctions
PREP|EST      2    
PREP|+]       2       <- Disjunctions
NUM|TEL       2    
NPRO          2       <- Error ('Congresso', 'Folha')
KS|]          2       <- Disjunctions
KC|EST        2    
KC|+          2    
ART|+         2    
ADV|[         2       <- Disjunctions
'             2    
`             1    
[             1    
V|!           1    
VAUX|!        1    
PROPESS|EST   1    
PREP|         1    
PDEN|EST      1    
IN|EST        1    
ADV|+         1    
?             1    
=             1    
;             1    
:             1    
/             1    
...           1    
.             1    
-             1    
))            1    
)             1    
((            1    
(             1    
$             1    
"             1    
!             1    




for ln,tag in sorted(((len(ls), tag) for tag, ls in wordCounts.iteritems()), reverse=True):
    print tag.ljust(13) + ' ' + str(ln)
(Total: 1170095)
N             236462
ART           151891
NPROP         114318
PREP          104364
V             98056
PREP|+        78274
,             68494
ADJ           53372
ADV           30653
KC            28262
PCP           23092
"             21069
PROADJ        20919
NUM           18110
VAUX          17832
PROPESS       14718
KS            14320
PRO-KS-REL    11347
PROSUB        8598
)             7741
(             7713
PDEN          6786
:             6736
N|AP          4350
-             3117
N|EST         2807
CUR           2706
V|+           2373
PRO-KS        2150
?             1510
;             1376
!             919
N|HOR         913
ADV-KS-REL    898
N|TEL         866
'             510
IN            415
ADV-KS        392
N|DAT         285
N|DAD         255
ADJ|EST       174
KC|[          143
KC|]          133
VAUX|+        99
/             95
...           84
.             82
$             60
NPROP|+       35
[             23
PREP|[        20
ADV|[         18
=             14
ADV|]         13
ADJ|+         11
))            10
((            10
PREP|]        9
ADV|HOR       9
ADV|EST       9
ADV|+         8
PROP          7
PREP|+]       7
ART|+         7
V|EST         5
KS|[          5
KS|]          4
ART|EST       4
`             3
PREP|EST      3
NPRO          3
PROPESS|EST   2
PROPESS|+     2
PROADJ|+      2
PREP|         2
PDEN|EST      2
NUM|TEL       2
KC|EST        2
KC|+          2
V|!           1
VAUX|!        1
IN|EST        1
'''


import os
import sys
import nltk
import collections
import cPickle
from nltk.corpus import mac_morpho
from nltk.tag import DefaultTagger, RegexpTagger, UnigramTagger, BigramTagger, TrigramTagger

from portugueseNLP import PortugueseRegexpTagger

class RizziMorpho(object):
    '''This class is initialized on mac_morpho tagged sentences list and changes
       the split contractions (such as de/PREP|+ a|ART) to the contracted form,
       retagging it as PREP|ART. This is to teach the tagger about contractions
       (since they are not always obvious, such as nos -> em + os | nos|PRON)
    '''

    def __init__(self):
        self._tagged_sents = []
        count = 0
        for sent, localCount in [self.joinContractions(sent) for sent in mac_morpho.tagged_sents()]:
            sent = self.replaceNprop(sent)
            self._tagged_sents.append(sent)
            count += localCount
        print('Contracted %d pairs' % count)
        self._tagged_words = []
        for sent in self._tagged_sents:
            for word in sent:
                self._tagged_words.append(word)

    def tagged_sents(self):
        return self._tagged_sents

    def tagged_words(self):
        return self._tagged_words

    def replaceNprop(self, sent):
        replacedSent = []
        for word, tag in sent:
            if tag[0:5] == u'NPROP':
                tag = u'N'
            elif tag[0:2] == u'KC':
                tag = u'KC'
            replacedSent.append((word, tag))

        return replacedSent

    def joinContractions(self, sent):
        '''Seaches for split contractions and join them again, also changing 
           the tag.
        '''

        contractedSent = []
        i = 0
        contractedCount = 0
        while i < len(sent):
            if i == len(sent) - 1:
                contractedSent.append(sent[i])
                i += 1
            elif sent[i][1] == u'PREP|+':
                if sent[i+1][1] == u'ART':
                    (found, word) = self.getJointWord(sent[i], sent[i+1])
                    if found:
                        contractedSent.append(word)
                        i += 2
                        contractedCount += 1
                    else:
                        contractedSent.append(sent[i])
                        i += 1
                else:
                    contractedSent.append(sent[i])
                    i += 1
            else:
                contractedSent.append(sent[i])
                i += 1

        return (contractedSent, contractedCount)

    def getJointWord(self, w1, w2):

        firstWord = w1[0]
        secondWord = w2[0]
        
        found = True
        word = None
        if firstWord == u'de':
            word = u'd' + secondWord
        elif firstWord == u'De':
            word = u'D' + secondWord
        elif firstWord == u'em':
            word = u'n' + secondWord
        elif firstWord == u'Em':
            word = u'N' + secondWord
        elif firstWord == u'por':
            word = u'pel' + secondWord
        elif firstWord == u'Por':
            word = u'Pel' + secondWord
        elif firstWord == u'para':
            if secondWord == u'o':
                word = u'pro'
            elif secondWord == u'a':
                word = u'pra'
            elif secondWord == u'os':
                word = u'pros'
            elif secondWord == u'as':
                word = u'pras'
            else:
                found = False
                print('Unknown word pair: %s %s' % 
                    (firstWord, secondWord))
        elif firstWord == u'Para':
            if secondWord == u'o':
                word = u'Pro'
            elif secondWord == u'a':
                word = u'Pra'
            elif secondWord == u'os':
                word = u'Pros'
            elif secondWord == u'as':
                word = u'Pras'
            else:
                found = False
                print('Unknown word pair: %s %s' % 
                    (firstWord, secondWord))
        elif firstWord == u'a':
            if secondWord == u'o':
                word = u'ao'
            elif secondWord == u'a':
                word = u'à'
            elif secondWord == u'os':
                word = u'aos'
            elif secondWord == u'as':
                word = u'às'
            else:
                found = False
                print('Unknown word pair: %s %s' % 
                    (firstWord, secondWord))
        elif firstWord == u'A':
            if secondWord == u'o':
                word = u'Ao'
            elif secondWord == u'a':
                word = u'À'
            elif secondWord == u'os':
                word = u'Aos'
            elif secondWord == u'as':
                word = u'Às'
            else:
                found = False
                print('Unknown word pair: %s %s' % 
                    (firstWord, secondWord))
        else:
            found = False
            print('Unknown word pair: %s %s' % 
                            (firstWord, secondWord))
            ## raise Exception('Unknown word pair: %s %s' % 
            ##                 (firstWord, secondWord))
        
        return (found, (word, u'PREP|ART'))

    

def printClass(words, klass, min=100):
    klassDict = collections.defaultdict(int)
    for word in words[klass]:
        if len(word) < 1:
            print(klass, word)
        else:
            length = 4 if word[-1] == 's' else 3
            if len(word) > length:
                klassDict[word[-length:]] += 1

    klassTally = sorted([(tally, end) for end, tally in klassDict.iteritems()
                         if tally >= min], reverse=True)
    print 'Terminações de %s' % klass
    for tally, end in klassTally:
        print '%6s\t%d' % (end, tally)
    print ''

    return klassTally


def loadTagger():
    with open('tagger_pt.pickle', 'rb') as f:
        return cPickle.load(f)


def createTagger(corpus):
    dtagger = nltk.tag.DefaultTagger('N')  # If I don't know, it is a noun

    # N, NPROP, V, ADJ, PCP, NUM, ADV, VAUX
    

    patterns = [
        (r'.*nte$', 'ADV'),
        (r'.*(a|i)d(o|a)(s)?$', 'PCP'),
        (r'.*ic(o|a)(s)?$', 'ADJ'),
        (r'.*ntes$', 'ADJ'),
        (r'.*ve(l|is)$', 'ADJ'),
        (r'ndo|ram|ava|rem|ria|amos|iam|vam|sse|tou|ará|tam|emos|rou', 'V'),
        (r'^-?[0-9]+((.|,)[0-9]+)*$', 'NUM'),
        (r'^&$', 'KC'),
        (r'^\.$', '.'),
        (r'^;$', ';'),
        (r'^:$', ':'),
        (r'.*ção$', 'N'),
        (r'.*ções$', 'N'),
    ]
    
    retagger = PortugueseRegexpTagger(backoff=dtagger)
        
    unitagger = UnigramTagger(corpus.tagged_sents(), backoff=retagger)
    bitagger = BigramTagger(corpus.tagged_sents(), backoff=unitagger)
    tritagger = TrigramTagger(corpus.tagged_sents(), backoff=bitagger)

    # This manual correction is due to the problems of learning because we
    # confound NPROP with N in the training set
    tritagger._context_to_tag[((u',', u'N'), u'&')] = u'KC'

    with open('tagger_pt.pickle', 'wb') as out:
        cPickle.dump(tritagger, out, -1)

    return tritagger


if __name__ == '__main__':

    rm = RizziMorpho()

    createTagger(rm)
    
    words = collections.defaultdict(set)
    for word, tag in rm.tagged_words():
        words[tag].add(word)
        
    adverbios = printClass(words, 'ADV', 5)
    participio = printClass(words, 'PCP', 100)
    adjetivos = printClass(words, 'ADJ', 15)
    verbos = printClass(words, 'V', 10)
    substantivos = printClass(words, 'N', 50)

    
    wordCounts = collections.defaultdict(list)
    for word, tag in rm.tagged_words():
        wordCounts[tag].append(word)

    print('\n\n************** Absolute word counts ***************\n\n')

    adverbios = printClass(wordCounts, 'ADV', 5)
    participio = printClass(wordCounts, 'PCP', 100)
    adjetivos = printClass(wordCounts, 'ADJ', 15)
    verbos = printClass(wordCounts, 'V', 10)
    substantivos = printClass(wordCounts, 'N', 50)
    
