'''
Created on Sep 1, 2012

@author: LongDT
'''
import csv 
import nltk
import re
import commands
import pickle
from numpy.oldnumeric.arrayfns import reverse
from nltk.stem.wordnet import WordNetLemmatizer

class UnigramExtract(object):
    def __init__(self, trainfileName):
        self.trainfileName = trainfileName
        self.lmtzr = WordNetLemmatizer()
    def createFeatureList(self, inputfileName):
        file = open(inputfileName)
        ## Construct unigram dictionary 
        unigram = {}
        for line in file:
            line = line.strip()
            wordPOS = line.split(" ")
            for item in wordPOS:
                if (not unigram.has_key(item)):
                    unigram[item] = 1
                else:
                    unigram[item] = unigram[item] + 1
        file.close()
        # Now load the except list 
        file = open ("../../data/unigramTrainPOSStopWords.txt")
        exceptWord = set()
        for line in file:
            line = line.strip()
            exceptWord.add(line.upper())
        file.close()
        ## ok, now sort the unigram
        file = open(self.outputFile,'w')
        threshold = 0.03 * len(unigram.keys())
        count = 0
        pattern1=re.compile('[a-zA-Z0-9.,]')
        for w in sorted(unigram, key = unigram.get, reverse=True):
            count +=1 
            if (count <threshold):
                #file.write(w + "            " + str(unigram[w])+ "\n")
                if (not(w.upper() in exceptWord)):
                    word = w.split('_')[0]
                    if (pattern1.match(word)):
                        file.write(w + "\n")
            else:
                break
        file.close()       
    
    def createFeatureListLematized(self, inputfileName,outputFileName):
        file = open(inputfileName)
        ## Construct unigram dictionary 
        unigram = {}
        for line in file:
            line = line.strip()
            wordPOS = line.split(" ")
            for item in wordPOS:
                if (not unigram.has_key(item)):
                    unigram[item] = 1
                else:
                    unigram[item] = unigram[item] + 1
        file.close()
        # Now load the except list 
        file = open ("../../data/unigramTrainStopWords.txt")
        exceptWord = set()
        for line in file:
            line = line.strip()
            exceptWord.add(line.upper())
        file.close()
        ## ok, now sort the unigram
        file = open(outputFileName,'w')
        threshold = 0.2 * len(unigram.keys())
        count = 0
        pattern1=re.compile('[a-zA-Z0-9.,]')
        for w in sorted(unigram, key = unigram.get, reverse=True):
            count +=1 
            if (count <threshold):
                #file.write(w + "            " + str(unigram[w])+ "\n")
                if (not(w.upper() in exceptWord)):                    
                    if (pattern1.match(w)):
                        file.write(w + "\n")
            else:
                break
        file.close()       
    
    def createPOSTrainfile(self, outputFile):
        resultFile = open(outputFile,'w')        
        spamReader = csv.reader(open(self.trainfileName, 'rb'), delimiter=',')
        count =0
        resultMap = {}
        for line in spamReader:
            count+=1
            resultText = ""
            if (count >1):
                #print line
                if ((count -1) % 6 ==0):
                    sourceText = line[4]
                    DocID = line[2]
                    SentID = line[3]
                    key = DocID + "_" + SentID
                    tokenizedWord =nltk.word_tokenize(sourceText)
                    resultWord = []
                    for word in tokenizedWord:
                        temp = word
                        if (re.search('\d+',word) != None):
                            # replace word 
                            temp = "100"
                        resultText = resultText + temp+ " "  
                    # Now tag it 
                    resultText = commands.getoutput('echo "'+ resultText+'" |  ../../lib/medpost/medpost -text')
                    resultMap[key] = resultText
        # Write obj to file
        pickle.dump(resultMap, resultFile)
        resultFile.close()
        # Now perform pos tagging
        
    def createLematizedTrainfile(self, outputFile):
        resultFile = open(outputFile,'w')
        
        spamReader = csv.reader(open("../../data/train.csv", 'rb'), delimiter=',')
        count =0
        for line in spamReader:
            count+=1
            resultText = ""
            if (count >1):
                #print line
                if ((count -1) % 6 ==0):
                    sourceText = line[4]
                    tokenizedWord =nltk.word_tokenize(sourceText)
                    resultWord = []
                    for word in tokenizedWord:
                        temp = word
                        if (re.search('\d+',word) != None):
                            # replace word 
                            temp = "100"
                        resultWord.append(temp)  
                    
                    for word in resultWord:
                        resultText = resultText + word+ " "

                    resultFile.write(resultText+ '\n')
                    #print posText
        resultFile.close()
    def createLematizedTestfile(self,outputFile):
        resultFile = open(outputFile,'w')
        
        spamReader = csv.reader(open("../../data/test.csv", 'rb'), delimiter=',')
        count =0
        for line in spamReader:
            count+=1
            resultText = ""
            if (count >1):
                #print line
                if ((count -1) % 6 ==0):
                    sourceText = line[3]
                    tokenizedWord =nltk.word_tokenize(sourceText)
                    resultWord = []
                    for word in tokenizedWord:
                        temp = word
                        if (re.search('\d+',word) != None):
                            # replace word 
                            temp = "100"
                        resultWord.append(temp)  
                    
                    for word in resultWord:
                        resultText = resultText + word+ " "
                        
                    resultFile.write(resultText+ '\n')
                    #print posText
        resultFile.close()
        
    def createPOSTestfile(self,outputFile):
        resultFile = open(outputFile,'w')
        
        spamReader = csv.reader(open("../../data/test.csv", 'rb'), delimiter=',')
        count =0
        for line in spamReader:
            count+=1
            resultText = ""
            if (count >1):
                #print line
                if ((count -1) % 6 ==0):
                    sourceText = line[3]
                    tokenizedWord =nltk.word_tokenize(sourceText)
                    resultWord = []
                    for word in tokenizedWord:
                        temp = word
                        if (re.search('\d+',word) != None):
                            # replace word 
                            temp = "100"
                        resultWord.append(temp)  
                    #print tokenizedWord
                    posText = nltk.pos_tag(resultWord)
                    #print posText 
                    
                    for (word,pos) in posText:
                        resultText = resultText + word+'_'+pos + " "
                        
                    resultFile.write(resultText+ '\n')
                    #print posText
        resultFile.close()
    
    def convertSentenceToUnigramFeatures(self,sentence,featureFileName):
        file = open (featureFileName)
        featureHeading = [] # List of feature
        for line in file:
            line = line.strip()
            featureHeading.append(line)
        file.close()
        ###############
        result= [] 
        for item in featureHeading:
            value = 0 
            value = sentence.count(item)
            result.append(str(value)) 
        return result
        
    def createFeatures(self, inputfileName, featureFileName, outFileName):
        # First read feature heading 
        file = open (featureFileName)
        featureHeading = [] # List of feature
        for line in file:
            line = line.strip()
            featureHeading.append(line)
        file.close()
        # Second made a real feature
        out = open(outFileName,"w")
        
        file = open (inputfileName)
        #count =0 
        for line in file:
            #count+=1
            #if (count > 500): break # for test only
            
            
            result= "" 
            for item in featureHeading:
                value = 0 
                value = line.count(item)
                #if (item in line):
                #    value = "1"
                #else:
                #    value = "0"
                result = result + str(value) + "," 
                
            out.write(result+"\n")     
        file.close()
        out.close()
            