import nltk
from nltk.corpus import stopwords  
import re
from ttsct_src import TokenMatcher, Sentence
#===============================================================================
# from ttsct_src.Segment import Segment
# from ttsct_src.SCTPreprocessors.DescriptionToConcept import getConcepts
# from textSection import Sectioner
#===============================================================================

print "Tokenized by Paragraph or Sentence"
        
class Preprocessor:       
    
    def __init__(self, tokenizerName, stemmerName, additionalStopwordFile, stopwordsLanguage, insFac = None, sectionKeys = None):
        self.stopwordsList = stopwords.words(stopwordsLanguage)
        additionalStopwords = open(additionalStopwordFile, 'rU').read().lower().split()
        self.stopwordsList.extend(additionalStopwords)
        # self.tokenizer = nltk.WordTokenizer()
        self.lexicalWordSet = ['MOBY', 'UMLS', 'SNOMED']
        self.stemmer = getattr(nltk, stemmerName)();
        self.insFac = insFac
        self.subTextSectioner = Sectioner('None')
        self.sectioners = []
        if sectionKeys != None:
            for k in sectionKeys:
                self.sectioners.append(Sectioner(k))
        
    
    def tokenize(self, dataNode, separateSection = False, needConceptId = False, replaceStrWithId = False, keepIdOnly = False, gram_N = 1):
        """
        <TODO>
            tokenize Name, Terminologies, Abbreviation at first,
        </TODO>         
        then low-case and tokenize other words
        """
        # words = self.tokenizer.tokenize(fileContentStr.lower())    # including numerical characters
        dataNode.textStr = self.getSections(dataNode.textStr)
        text = dataNode.textStr
 
        if separateSection:            
            secBagDict = {}
            words = []
            titles = self.subTextSectioner.getTitleNames(text)
            sections = []
            secWords = []
            for title in titles:
                textStrings = self.subTextSectioner.getSectionsByTitle(dataNode.textStr, title)                              
                for ts in textStrings:
                    conceptIdList = []
                    noises = []
                    if needConceptId:
                        (conceptIdList, noises, ts) = self.sectionConceptId(ts, dataNode, replaceStrWithId, keepIdOnly)
                    secWords = re.findall(r"[a-z]+", ts.lower())
                    extra_gram = []
                    subTextType = self.insFac.xmlConfigurator.getSubTextType()
                    getSubTexts = self.getSubTextsGetter(subTextType)
                    for subText in getSubTexts(ts):        
                        partialWords = re.findall(r"[a-z]+", subText.lower())
                        if gram_N >= 2:
                            extra_gram.extend(self.bigram(partialWords))
                        if gram_N >= 3:
                            extra_gram.extend(self.trigram(partialWords))            
                    secWords.extend(extra_gram)
                    secWords.extend(conceptIdList)
                    sections.extend(ts)                    
                tag = self.subTextSectioner.getTitleTag(title)
                for i in xrange(len(secWords)):
                    secWords[i] = secWords[i]+"-%s"%tag
                secBagDict[tag] = secWords
            for sw in secBagDict.values():
                words.extend(sw)
            dataNode.textStr = '\n'.join(sections)
            return nltk.FreqDist(words)
        else:                      
            if needConceptId:
                (conceptIdList, noises, text) = self.addConceptId(dataNode, replaceStrWithId, keepIdOnly)           
            words = re.findall(r"[a-z]+", text.lower())           # only alphabetical letters            
            for noise in noises:
                try:
                    words.remove(noise.lower())
                except Exception:
                    import sys
                    sys.stderr.write(str(noises)+' >>>> ')
                    sys.stderr.write(noise+'\n')                
            extra_gram = []
            subTextType = self.insFac.xmlConfigurator.getSubTextType()
            getSubTexts = self.getSubTextsGetter(subTextType)
            for subText in getSubTexts(text):        
                partialWords = re.findall(r"[a-z]+", subText.lower())
                if gram_N >= 2:
                    extra_gram.extend(self.bigram(partialWords))
                if gram_N >= 3:
                    extra_gram.extend(self.trigram(partialWords))            
            words.extend(extra_gram)
            words.extend(conceptIdList)
        return nltk.FreqDist(words)
    
    def getSubTextsGetter(self, subTextType):
        if subTextType == 'Paragraphs':
            getSubTexts = self.subTextSectioner.getParagraphs
        elif subTextType == 'Sentences':
            getSubTexts = self.subTextSectioner.getSentences
        elif subTextType == 'FullText':
            getSubTexts = self.subTextSectioner.getFullText        
        return getSubTexts
    
    def sectionConceptId(self, secText, dataNode, replaceStrWithId, keepIdOnly):
        cidList = []
        conceptDict = nltk.FreqDist()
        concepts = []
        noises = []
        lines = secText.split('\n')
        for line in lines:
            sentence = Sentence.Sentence(line)
            sentence = TokenMatcher.matchToken(sentence)
            snomedTerms = sentence.getSegmentByType(Segment.SCT_TERM)            
            for t in snomedTerms:            
                concept = t.match.expression
                concepts.append(concept)
                conceptDict[concept] = len(concept.split()) # index concepts by their words count
                cidList.extend(getConcepts(t.match.ids))
                     
        if replaceStrWithId:            
            correctWords = re.findall(r"[a-zA-Z]+", secText)
            conceptItems =  conceptDict.sorted() # order the replacement concept using their words-count
            for c in conceptItems:
                secText = secText.replace(str(c), ' ')            
            leftWords = re.findall(r"[a-zA-Z]+", secText)
            for word in leftWords:
                if word not in correctWords:
                    noises.append(word)
            print "<replaced_concept> %s:%s </replaced_concept>"%(dataNode.reqID, str(concepts))
            if noises:
                print "<noises> %s:%s </noises>"%(dataNode.reqID, str(noises))
            
        elif keepIdOnly:
            secText = ' '
        return (cidList, noises, secText)
        
    def addConceptId(self, dataNode, replaceStrWithId, keepIdOnly):
        cidList = []
        conceptDict = nltk.FreqDist()
        concepts = []
        noises = []
        lines = dataNode.textStr.split('\n')
        for line in lines:
            sentence = Sentence.Sentence(line)
            sentence = TokenMatcher.matchToken(sentence)
            snomedTerms = sentence.getSegmentByType(Segment.SCT_TERM)            
            for t in snomedTerms:            
                concept = t.match.expression
                concepts.append(concept)
                conceptDict[concept] = len(concept.split()) # index concepts by their words count
                cidList.extend(getConcepts(t.match.ids))
                     
        if replaceStrWithId:            
            correctWords = re.findall(r"[a-zA-Z]+", dataNode.textStr)
            conceptItems =  conceptDict.sorted() # order the replacement concept using their words-count
            for c in conceptItems:
                dataNode.textStr = dataNode.textStr.replace(str(c), ' ')            
            leftWords = re.findall(r"[a-zA-Z]+", dataNode.textStr)
            for word in leftWords:
                if word not in correctWords:
                    noises.append(word)
            print "<replaced_concept> %s:%s </replaced_concept>"%(dataNode.reqID, str(concepts))
            if noises:
                print "<noises> %s:%s </noises>"%(dataNode.reqID, str(noises))
            
        elif keepIdOnly:
            dataNode.textStr = ' '
        """
        #try to see TTSCT:
        if not replaceStrWithId and not keepIdOnly:
            for c in concepts:
                if c not in dataNode.textStr:
                    import sys
                    sys.stderr.write("R-%s,K-%s:%s\n"%(replaceStrWithId,keepIdOnly,c))
        #/try
        """
        return (cidList, noises, dataNode.textStr)
    
    def getSections(self, textStr):
        if len(self.sectioners) == 0:
            return textStr
        else:
            secStrs = []
            for sec in self.sectioners:
                secStrs.extend(sec.getSectionList(textStr))
            return ' '.join(secStrs)
    
    def bigram(self, wordList):
        biList = []
        length = len(wordList)
        if length > 2:
            for i in xrange(len(wordList)-1):
                word0 = wordList[i]
                word1 = wordList[i+1]
                biList.append("%s_%s"%(word0, word1))
        return biList
        
    def trigram(self, wordList):
        triList = []
        length = len(wordList)
        if length > 2:
            for i in xrange(length-2):
                word0 = wordList[i]
                word1 = wordList[i+1]
                word2 = wordList[i+2]
                triList.append("%s_%s_%s"%(word0, word1, word2))
        return triList
    
    def removeStopwords(self, wordFreqDist):
        """
        filter out stopwords
        """
        stoppedWordFreqDist = nltk.FreqDist()
        wordList = wordFreqDist.keys()
        for word in wordList:
            try:
                untaggedWord = word[:word.rindex('-')]
            except Exception:
                untaggedWord = word
            if untaggedWord in self.stopwordsList:
                stoppedWordFreqDist.inc(untaggedWord, wordFreqDist[word])
                del(wordFreqDist[word])
        return (wordFreqDist, stoppedWordFreqDist)
    
    def lexicalVerify(self, wordFreqDist):
        """
        """
        invalidWordFreqDist = nltk.FreqDist()
        invalidWordFreqDist.inc("invalidWord", 999)  #delete this
#===============================================================================
        """    Waiting for lexical dictionaries    """
#        wordList = wordFreqDist.keys()
#        for word in wordList:
#            if word not in self.lexicalWordSet:
#                invalidWordFreqDist.inc(word, wordFreqDist[word])
#                del(wordFreqDist[word])        
#===============================================================================
        return (wordFreqDist, invalidWordFreqDist)
        
    def stem(self, wordFreqDist, scale = 'All'):
        method = 'stem' + scale
        _stem = getattr(self, method)
        _stem(wordFreqDist)          
        return
    
    def stemAll(self, wordFreqDist):
        """
        stem all words in the collect
        """
        rawWords = wordFreqDist.keys()
        for word in rawWords:
            stemmedWord = self.stemmer.stem(word)
            wordFreqDist[stemmedWord] += wordFreqDist[word]
            if stemmedWord != word:
                del(wordFreqDist[word])
        return
    
    def stemVerbs(self, wordFreqDist):
        """
        stem only verbs within the collect
        """ 
        wholeWords = wordFreqDist.keys()          
        for word in wholeWords:
            if self.isVerb(word):
                stemmedWord = self.stemmer.stem(word)
                wordFreqDist[stemmedWord] += wordFreqDist[word]
                if stemmedWord != word:
                    del(wordFreqDist[word])
        return
    
    ## <TODO>
    def isVerb(self, word):            
        """
        'judge whether word is a verb
        'return word in POS-VerbSet
        """
        pass        # delete this
        return True # delete this
    ## </TODO> 
    
    def stemNouns(self, wordFreqDist):
        pass
        return
    
    def stemNone(self, wordFreqDist):
        pass
        return    