import os
import csv
import TweetParse

#!/usr/bin/env python

#For each of the twelve vector components in the POMS psychometric, each vector component has its
#own counter (i.e. the value of the counter key), which is used later for calculation of the vectors.
#vectorComponentCounter = dict(composed = 0, anxious = 0, elated = 0, depressed = 0, energetic = 0, tired = 0, agreeable = 0, hostile = 0, confident = 0, unsure = 0, clearheaded = 0, confused = 0)

vectorComponentCounter = [0,0,0,0,0,0,0,0,0,0,0,0,0]

#The following variables correspond to the locations of the directory of the tweets and the location of the SPOMS word library file.
tweetDirectory = "NONE"
parsedTwitterData = []
totalTweets = 0
tweetsAnalyzed = 0
wordLibraryDir = "NONE"
rawLibCounter = [0,0,0,0,0,0,0,0,0,0,0,0,0]

#Core SPOM Words -- This serves as the official reference.
compCoreWords = []
anxCoreWords = []
elatCoreWords = []
depCoreWords = []
enerCoreWords = []
tireCoreWords = []
agreCoreWords = []
hostCoreWords = []
confiCoreWords = []
unsCoreWords = []
cleaCoreWords = []
confuCoreWords = []

#Each of the twelve vector components is initialized as an empty dictionary, waiting to be loaded
#with words in the loadWordLibrary() method.

compWords = {}
anxWords = {}
elatWords = {}
depWords = {}
enerWords = {}
tireWords = {}
agreWords = {}
hostWords = {}
confiWords = {}
unsWords = {}
cleaWords = {}
confuWords = {}
nonPOMSWords = {}

#For each word 
def loadWordLibrary():
    global wordLibraryDir
    
    global compCoreWords 
    global anxCoreWords 
    global elatCoreWords 
    global depCoreWords 
    global enerCoreWords 
    global tireCoreWords 
    global agreCoreWords 
    global hostCoreWords 
    global confiCoreWords 
    global unsCoreWords 
    global cleaCoreWords 
    global confuCoreWords
    
    global compWords 
    global anxWords 
    global elatWords 
    global depWords 
    global enerWords 
    global tireWords 
    global agreWords 
    global hostWords 
    global confiWords 
    global unsWords 
    global cleaWords 
    global confuWords  
    
    
    libraryRead = csv.reader(open(wordLibraryDir, 'rU'), delimiter=',', dialect='excel')
    rowCount = 0
    for eachRow in libraryRead:
        eachLine = ", ".join(eachRow)
        eachLine = eachLine.strip()
        eachLine = eachLine.lower()
        #print "Reading Line: " + eachLine
        moodData = eachLine.split(",")
        
        for i in range(0,12):
            moodData[i] = moodData[i].strip()
            if i == 0:
                compCoreWords.append(moodData[i])
                compWords[moodData[i]] = 0
            elif i == 1:
                anxCoreWords.append(moodData[i])
                anxWords[moodData[i]] = 0
            elif i == 2:
                elatCoreWords.append(moodData[i])
                elatWords[moodData[i]] = 0
            elif i == 3:
                depCoreWords.append(moodData[i])
                depWords[moodData[i]] = 0
            elif i == 4:
                enerCoreWords.append(moodData[i])
                enerWords[moodData[i]] = 0
            elif i == 5:
                tireCoreWords.append(moodData[i])
                tireWords[moodData[i]] = 0
            elif i == 6:
                agreCoreWords.append(moodData[i])
                agreWords[moodData[i]] = 0
            elif i == 7:
                hostCoreWords.append(moodData[i])
                hostWords[moodData[i]] = 0
            elif i == 8:
                confiCoreWords.append(moodData[i])
                confiWords[moodData[i]] = 0
            elif i == 9:
                unsCoreWords.append(moodData[i])
                unsWords[moodData[i]] = 0
            elif i == 10:
                cleaCoreWords.append(moodData[i])
                cleaWords[moodData[i]] = 0
            else:
                confuCoreWords.append(moodData[i])
                confuWords[moodData[i]] = 0
    del compWords["null"]
    del anxWords["null"]
    del elatWords["null"]
    del depWords["null"]
    del enerWords["null"]
    del tireWords["null"]
    del agreWords["null"]
    del hostWords["null"]
    del confiWords["null"]
    del unsWords["null"]
    del cleaWords["null"]
    del confuWords["null"]
    print "Word Library has been successfully loaded. :)"
    
def readWordLibrary():
    global compWords 
    global anxWords 
    global elatWords 
    global depWords 
    global enerWords 
    global tireWords 
    global agreWords 
    global hostWords 
    global confiWords 
    global unsWords 
    global cleaWords 
    global confuWords
    global nonPOMSWords
    
    tempTuple = []
    compWrds = [] 
    anxWrds = []
    elatWrds = []
    depWrds = []
    enerWrds = []
    tireWrds = []
    agreWrds = []
    hostWrds = []
    confiWrds = []
    unsWrds = []
    cleaWrds = []
    confuWrds = []
    nonPOMSWrds = []
    
    for eachKey in compWords.keys():
        tempTuple = []
        tempTuple.append(compWords[eachKey])
        tempTuple.append(eachKey)
        compWrds.append(tempTuple)
    for eachKey in anxWords.keys():
        tempTuple = []
        tempTuple.append(anxWords[eachKey])
        tempTuple.append(eachKey)
        anxWrds.append(tempTuple)
    for eachKey in elatWords.keys():
        tempTuple = []
        tempTuple.append(elatWords[eachKey])
        tempTuple.append(eachKey)
        elatWrds.append(tempTuple)
    for eachKey in depWords.keys():
        tempTuple = []
        tempTuple.append(depWords[eachKey])
        tempTuple.append(eachKey)
        depWrds.append(tempTuple)
    for eachKey in enerWords.keys():
        tempTuple = []
        tempTuple.append(enerWords[eachKey])
        tempTuple.append(eachKey)
        enerWrds.append(tempTuple)
    for eachKey in tireWords.keys():
        tempTuple = []
        tempTuple.append(tireWords[eachKey])
        tempTuple.append(eachKey)
        tireWrds.append(tempTuple)
    for eachKey in agreWords.keys():
        tempTuple = []
        tempTuple.append(agreWords[eachKey])
        tempTuple.append(eachKey)
        agreWrds.append(tempTuple)
    for eachKey in hostWords.keys():
        tempTuple = []
        tempTuple.append(hostWords[eachKey])
        tempTuple.append(eachKey)
        hostWrds.append(tempTuple)
    for eachKey in confiWords.keys():
        tempTuple = []
        tempTuple.append(confiWords[eachKey])
        tempTuple.append(eachKey)
        confiWrds.append(tempTuple)
    for eachKey in unsWords.keys():
        tempTuple = []
        tempTuple.append(unsWords[eachKey])
        tempTuple.append(eachKey)
        unsWrds.append(tempTuple)
    for eachKey in cleaWords.keys():
        tempTuple = []
        tempTuple.append(cleaWords[eachKey])
        tempTuple.append(eachKey)
        cleaWrds.append(tempTuple)
    for eachKey in confuWords.keys():
        tempTuple = []
        tempTuple.append(confuWords[eachKey])
        tempTuple.append(eachKey)
        confuWrds.append(tempTuple)
    for eachKey in nonPOMSWords.keys():
        tempTuple = []
        tempTuple.append(nonPOMSWords[eachKey])
        tempTuple.append(eachKey)
        nonPOMSWrds.append(tempTuple)
    
    compWrds.sort() 
    anxWrds.sort()
    elatWrds.sort()
    depWrds.sort()
    enerWrds.sort()
    tireWrds.sort()
    agreWrds.sort()
    hostWrds.sort()
    confiWrds.sort()
    unsWrds.sort()
    cleaWrds.sort()
    confuWrds.sort()
    nonPOMSWrds.sort()
    
    topAssociations = raw_input("Enter Number of Top Associations to Show:")
    tpAs = int(topAssociations) + 1
    
    print "Top " + str(tpAs) + " Word-Mood Associations"
    
    print "Composed:"
    for i in range(len(compWrds)-1,len(compWrds)-tpAs,-1):
        print compWrds[i]
    print ""
    print "Anxious:"
    for i in range(len(anxWrds)-1,len(anxWrds)-tpAs,-1):
        print anxWrds[i]
    print ""
    print "Elated:"
    for i in range(len(elatWrds)-1,len(elatWrds)-tpAs,-1):
        print elatWrds[i]
    print ""
    print "Depressed:"
    for i in range(len(depWrds)-1,len(depWrds)-tpAs,-1):
        print depWrds[i]
    print ""
    print "Energetic:"
    for i in range(len(enerWrds)-1,len(enerWrds)-tpAs,-1):
        print enerWrds[i]
    print ""
    print "Tired:"
    for i in range(len(tireWrds)-1,len(tireWrds)-tpAs,-1):
        print tireWrds[i]
    print ""
    print "Agreeable:"
    for i in range(len(agreWrds)-1,len(agreWrds)-tpAs,-1):
        print agreWrds[i]
    print "Hostile:"
    for i in range(len(hostWrds)-1,len(hostWrds)-tpAs,-1):
        print hostWrds[i]
    print ""
    print "Confident:"
    for i in range(len(confiWrds)-1,len(confiWrds)-tpAs,-1):
        print confiWrds[i]
    print ""
    print "Unsure:"
    for i in range(len(unsWrds)-1,len(unsWrds)-tpAs,-1):
        print unsWrds[i]
    print ""
    print "Clearheaded:"
    for i in range(len(cleaWrds)-1,len(cleaWrds)-tpAs,-1):
        print cleaWrds[i]
    print ""
    print "Confused:"
    for i in range(len(confuWrds)-1,len(confuWrds)-tpAs,-1):
        print confuWrds[i]
    print "No Identifiable Mood:"
    for i in range(len(nonPOMSWrds)-1,len(nonPOMSWrds)-tpAs,-1):
        print nonPOMSWrds[i]
    print ""
    print "END OF WORD-MOOD ASSOCIATIONS"

def findMaxLocations(moodStats):
    
    wordNoise = moodStats[12]
    moodStats[12] = 0
    
    searchRegion = max(moodStats)
    locations = []
    
    for indexPosition in range(0,13):
        if moodStats[indexPosition] == searchRegion:
            locations.append(indexPosition)
    
    if wordNoise > 0:
        locations.append(12)
    
    #print "Relevant Location(s)" + str(locations)
    return locations

def analyzeText(tweetData):
    global totalTweets
    global tweetsAnalyzed
    global vectorComponentCounter
    
    global compCoreWords 
    global anxCoreWords 
    global elatCoreWords 
    global depCoreWords 
    global enerCoreWords 
    global tireCoreWords 
    global agreCoreWords 
    global hostCoreWords 
    global confiCoreWords 
    global unsCoreWords 
    global cleaCoreWords 
    global confuCoreWords
    
    global compWords 
    global anxWords 
    global elatWords 
    global depWords 
    global enerWords 
    global tireWords 
    global agreWords 
    global hostWords 
    global confiWords 
    global unsWords 
    global cleaWords 
    global confuWords  
    global nonPOMSWords
    
    totalTweets = totalTweets + 1
    tweetData = tweetData.lower()
    twitterMessage = tweetData.split(" ")
    maxLocations = []
    print "Processing Tweet #" + str(totalTweets)
    librarySize = len(compWords) + len(anxWords) + len(elatWords) + len(depWords) + len(enerWords) + len(tireWords) + len(agreWords) + len(hostWords) + len(confiWords) + len(unsWords) + len(cleaWords) + len(confuWords)
    print "Word Library Size: " + str(librarySize)
    moodFilter = [0,0,0,0,0,0,0,0,0,0,0,0,0]
    
    #Words in the Twitter message are queried in each of the dictionaries.
    
    for eachWord in twitterMessage:
        eachWord = eachWord.strip()
        #Hyperlink Filter
        if (eachWord[0:5] == "http:" or eachWord[0:4] == "www."):
            eachWord = ""
        #print "Analyzing Word: " + eachWord
        if (eachWord in compCoreWords) == True:
            moodFilter[0] = moodFilter[0] + 1
        if (eachWord in anxCoreWords) == True:
            moodFilter[1] = moodFilter[1] + 1
        if (eachWord in elatCoreWords) == True:
            moodFilter[2] = moodFilter[2] + 1
        if (eachWord in depCoreWords) == True:
            moodFilter[3] = moodFilter[3] + 1
        if (eachWord in enerCoreWords) == True:
            moodFilter[4] = moodFilter[4] + 1
        if (eachWord in tireCoreWords) == True:
            moodFilter[5] = moodFilter[5] + 1
        if (eachWord in agreCoreWords) == True:
            moodFilter[6] = moodFilter[6] + 1
        if (eachWord in hostCoreWords) == True:
            moodFilter[7] = moodFilter[7] + 1
        if (eachWord in confiCoreWords) == True:
            moodFilter[8] = moodFilter[8] + 1
        if (eachWord in unsCoreWords) == True:
            moodFilter[9] = moodFilter[9] + 1
        if (eachWord in cleaCoreWords) == True:
            moodFilter[10] = moodFilter[10] + 1
        if (eachWord in confuCoreWords) == True:
            moodFilter[11] = moodFilter[11] + 1
        if (eachWord not in compCoreWords and eachWord not in anxCoreWords and eachWord not in elatCoreWords and eachWord not in depCoreWords and eachWord not in enerCoreWords and eachWord not in tireCoreWords and eachWord not in agreCoreWords and eachWord not in hostCoreWords and eachWord not in confiCoreWords and eachWord not in unsCoreWords and eachWord not in cleaCoreWords and eachWord not in confuCoreWords) == True:
            moodFilter[12] = moodFilter[12] + 1
    
    print moodFilter
    if max(moodFilter) > 0:
        maxLocations = findMaxLocations(moodFilter)
        tweetsAnalyzed = tweetsAnalyzed + 1
          
    for eachLocation in maxLocations:
        associatedGroup = eachLocation
        vectorComponentCounter[associatedGroup] += 1
        if associatedGroup == 0:
            for eachWord in twitterMessage:
                compWords[eachWord] = 1 + compWords.get(eachWord,0)
        elif associatedGroup == 1:
            for eachWord in twitterMessage:
                anxWords[eachWord] = 1 + anxWords.get(eachWord,0)
        elif associatedGroup == 2:
            for eachWord in twitterMessage:
                elatWords[eachWord] = 1 + elatWords.get(eachWord,0)
        elif associatedGroup == 3:
            for eachWord in twitterMessage:
                depWords[eachWord] = 1 + depWords.get(eachWord,0)
        elif associatedGroup == 4:
            for eachWord in twitterMessage:
                enerWords[eachWord] = 1 + enerWords.get(eachWord,0)
        elif associatedGroup == 5:
            for eachWord in twitterMessage:
                tireWords[eachWord] = 1 + tireWords.get(eachWord,0)
        elif associatedGroup == 6:
            for eachWord in twitterMessage:
                agreWords[eachWord] = 1 + agreWords.get(eachWord,0)
        elif associatedGroup == 7:
            for eachWord in twitterMessage:
                hostWords[eachWord] = 1 + hostWords.get(eachWord,0)
        elif associatedGroup == 8:
            for eachWord in twitterMessage:
                confiWords[eachWord] = 1 + confiWords.get(eachWord,0)
        elif associatedGroup == 9:
            for eachWord in twitterMessage:
                unsWords[eachWord] = 1 + unsWords.get(eachWord,0)
        elif associatedGroup == 10:
            for eachWord in twitterMessage:
                cleaWords[eachWord] = 1 + cleaWords.get(eachWord,0)
        elif associatedGroup == 11:
            for eachWord in twitterMessage:
                confuWords[eachWord] = 1 + confuWords.get(eachWord,0)
        elif associatedGroup == 12:
            for eachWord in twitterMessage:
                nonPOMSWords[eachWord] = 1 + nonPOMSWords.get(eachWord,0)

def languageCorrection(mainComponent, cmpAgnst1, cmpAgnst2, cmpAgnst3, cmpAgnst4, cmpAgnst5, cmpAgnst6, cmpAgnst7, cmpAgnst8, cmpAgnst9, cmpAgnst10, cmpAgnst11, cmpAgnst12):
    deletionPile = []
    for eachKey in mainComponent:
        comparativeValues = [cmpAgnst1.get(eachKey,0), cmpAgnst2.get(eachKey,0), cmpAgnst3.get(eachKey,0), cmpAgnst4.get(eachKey,0), cmpAgnst5.get(eachKey,0), cmpAgnst6.get(eachKey,0), cmpAgnst7.get(eachKey,0), cmpAgnst8.get(eachKey,0), cmpAgnst9.get(eachKey,0), cmpAgnst10.get(eachKey,0), cmpAgnst11.get(eachKey,0), cmpAgnst12.get(eachKey,0)]
        mainComponent[eachKey] -= max(comparativeValues)

def proportionalize():
    global compWords 
    global anxWords 
    global elatWords 
    global depWords 
    global enerWords 
    global tireWords 
    global agreWords 
    global hostWords 
    global confiWords 
    global unsWords 
    global cleaWords 
    global confuWords
    global nonPOMSWords
    
    for eachKey in compWords:
        compWords[eachKey] = float(compWords[eachKey]) / float(len(compWords))
    for eachKey in anxWords:
        anxWords[eachKey] = float(anxWords[eachKey]) / float(len(anxWords))
    for eachKey in elatWords:
        elatWords[eachKey] = float(elatWords[eachKey]) / float(len(elatWords))
    for eachKey in depWords:
        depWords[eachKey] = float(depWords[eachKey]) / float(len(depWords))
    for eachKey in enerWords:
        enerWords[eachKey] = float(enerWords[eachKey]) / float(len(enerWords))
    for eachKey in tireWords:
        tireWords[eachKey] = float(tireWords[eachKey]) / float(len(tireWords))
    for eachKey in agreWords:
        agreWords[eachKey] = float(agreWords[eachKey]) / float(len(agreWords))
    for eachKey in hostWords:
        hostWords[eachKey] = float(hostWords[eachKey]) / float(len(hostWords))
    for eachKey in confiWords:
        confiWords[eachKey] = float(confiWords[eachKey]) / float(len(confiWords))
    for eachKey in unsWords:
        unsWords[eachKey] = float(unsWords[eachKey]) / float(len(unsWords))
    for eachKey in cleaWords:
        cleaWords[eachKey] = float(cleaWords[eachKey]) / float(len(cleaWords))
    for eachKey in confuWords:
        confuWords[eachKey] = float(confuWords[eachKey]) / float(len(confuWords))
    for eachKey in nonPOMSWords:
        nonPOMSWords[eachKey] = float(nonPOMSWords[eachKey]) / float(len(nonPOMSWords))

def cleanDictionary(dictionary, deletionStack):
    for eachKey in deletionStack:
        dictionary.pop(eachKey)

def removeNegativeAssociations():
    deletionPile = []
    for eachKey in compWords:
        if compWords[eachKey] <= 0:
            deletionPile.append(eachKey)
    cleanDictionary(compWords, deletionPile)
    deletionPile = []
    for eachKey in anxWords:
        if anxWords[eachKey] <= 0:
            deletionPile.append(eachKey)
    cleanDictionary(anxWords, deletionPile)
    deletionPile = []
    for eachKey in elatWords:
        if elatWords[eachKey] <= 0:
            deletionPile.append(eachKey)
    cleanDictionary(elatWords, deletionPile)
    deletionPile = []
    for eachKey in depWords:
        if depWords[eachKey] <= 0:
            deletionPile.append(eachKey)
    cleanDictionary(depWords, deletionPile)
    deletionPile = []
    for eachKey in enerWords:
        if enerWords[eachKey] <= 0:
            deletionPile.append(eachKey)
    cleanDictionary(enerWords, deletionPile)
    deletionPile = []
    for eachKey in tireWords:
        if tireWords[eachKey] <= 0:
            deletionPile.append(eachKey)
    cleanDictionary(tireWords, deletionPile)
    deletionPile = []
    for eachKey in agreWords:
        if agreWords[eachKey] <= 0:
            deletionPile.append(eachKey)
    cleanDictionary(agreWords, deletionPile)
    deletionPile = []
    for eachKey in hostWords:
        if hostWords[eachKey] <= 0:
            deletionPile.append(eachKey)
    cleanDictionary(hostWords, deletionPile)
    deletionPile = []
    for eachKey in confiWords:
        if confiWords[eachKey] <= 0:
            deletionPile.append(eachKey)
    cleanDictionary(confiWords, deletionPile)
    deletionPile = []
    for eachKey in unsWords:
        if unsWords[eachKey] <= 0:
            deletionPile.append(eachKey)
    cleanDictionary(unsWords, deletionPile)
    deletionPile = []
    for eachKey in cleaWords:
        if cleaWords[eachKey] <= 0:
            deletionPile.append(eachKey)
    cleanDictionary(cleaWords, deletionPile)
    deletionPile = []
    for eachKey in confuWords:
        if confuWords[eachKey] <= 0:
            deletionPile.append(eachKey)
    cleanDictionary(confuWords, deletionPile)
    deletionPile = []
    for eachKey in nonPOMSWords:
        if nonPOMSWords[eachKey] <= 0:
            deletionPile.append(eachKey)
    cleanDictionary(nonPOMSWords, deletionPile)

def gatherData():
    global tweetDirectory
    global parsedTwitterData
    tweetFiles = []
    if (tweetDirectory == "NONE"):
        print "The program has not been set-up for textual analysis."
        print "Please go to PROGRAM SET-UP and then re-try textual analysis."
    else:
        print "Attemping to gather data. . ."
        os.chdir(tweetDirectory)
        for files in os.listdir("."):
            if files.endswith(".txt"):
                tweetFiles.append(files)
        print "Files Indexed!"
        os.system("""echo "" > parsedTweets.txt""")
        os.system("""egrep -o '"text":"[^"]*"' *.txt >> parsedTweets.txt""")
        print "Data Gathered!"
        
        rawFileData = open("parsedTweets.txt", 'r')
        formattedFile = rawFileData.read()
        
        for eachIndexedFile in tweetFiles:
            formattedFile = formattedFile.replace(eachIndexedFile + """:"text":""", "")
        
        formattedFile = formattedFile.replace('"', "")
        parsedTwitterData = formattedFile.split('\n')
        iterativeAnalysis()
        
        
    
    main()

def iterativeAnalysis():
    global compWords 
    global anxWords 
    global elatWords 
    global depWords 
    global enerWords 
    global tireWords 
    global agreWords 
    global hostWords 
    global confiWords 
    global unsWords 
    global cleaWords 
    global confuWords
    global nonPOMSWords
    print "Processing Directory: " + tweetDirectory
    global parsedTwitterData
    
    for eachTweet in parsedTwitterData:
        analyzeText(eachTweet)
    
    print "Recording Raw Dictionary Data. . ."
    
    rawLibCounter[0] = len(compWords)
    rawLibCounter[1] = len(anxWords)
    rawLibCounter[2] = len(elatWords)
    rawLibCounter[3] = len(depWords)
    rawLibCounter[4] = len(enerWords)
    rawLibCounter[5] = len(tireWords)
    rawLibCounter[6] = len(agreWords)
    rawLibCounter[7] = len(hostWords)
    rawLibCounter[8] = len(confiWords)
    rawLibCounter[9] = len(unsWords)
    rawLibCounter[10] = len(cleaWords)
    rawLibCounter[11] = len(confuWords)
    rawLibCounter[12] = len(nonPOMSWords)
    print "Raw Dictionary Data Recored!"
    
    print "Proportionalizing Dictionary Entries. . ."
    proportionalize()
    print "Dictionary Entries Proportionalized!"
    
    
    print "Correcting Dictionaries. . ."
    languageCorrection(compWords, anxWords, elatWords, depWords, enerWords, tireWords, agreWords, hostWords, confiWords, unsWords, cleaWords, confuWords, nonPOMSWords)
    print "7.7% Corrected"
    languageCorrection(anxWords, compWords, elatWords, depWords, enerWords, tireWords, agreWords, hostWords, confiWords, unsWords, cleaWords, confuWords, nonPOMSWords)
    print "15.4% Corrected"
    languageCorrection(elatWords, compWords, anxWords, depWords, enerWords, tireWords, agreWords, hostWords, confiWords, unsWords, cleaWords, confuWords, nonPOMSWords)
    print "23.1% Corrected"
    languageCorrection(depWords, compWords, anxWords, elatWords, enerWords, tireWords, agreWords, hostWords, confiWords, unsWords, cleaWords, confuWords, nonPOMSWords)
    print "30.8% Corrected"
    languageCorrection(enerWords, compWords, anxWords, elatWords, depWords, tireWords, agreWords, hostWords, confiWords, unsWords, cleaWords, confuWords, nonPOMSWords)
    print "38.5% Corrected"
    languageCorrection(tireWords, compWords, anxWords, elatWords, depWords, enerWords, agreWords, hostWords, confiWords, unsWords, cleaWords, confuWords, nonPOMSWords)
    print "46.2% Corrected"
    languageCorrection(agreWords, compWords, anxWords, elatWords, depWords, enerWords, tireWords, hostWords, confiWords, unsWords, cleaWords, confuWords, nonPOMSWords)
    print "53.8% Corrected"
    languageCorrection(hostWords, compWords, anxWords, elatWords, depWords, enerWords, tireWords, agreWords, confiWords, unsWords, cleaWords, confuWords, nonPOMSWords)
    print "61.5% Corrected"
    languageCorrection(confiWords, compWords, anxWords, elatWords, depWords, enerWords, tireWords, agreWords, hostWords, unsWords, cleaWords, confuWords, nonPOMSWords)
    print "69.2% Corrected"
    languageCorrection(unsWords, compWords, anxWords, elatWords, depWords, enerWords, tireWords, agreWords, hostWords, confiWords, cleaWords, confuWords, nonPOMSWords)
    print "76.9% Corrected"
    languageCorrection(cleaWords, compWords, anxWords, elatWords, depWords, enerWords, tireWords, agreWords, hostWords, confiWords, unsWords, confuWords, nonPOMSWords)
    print "84.6% Corrected"
    languageCorrection(confuWords, compWords, anxWords, elatWords, depWords, enerWords, tireWords, agreWords, hostWords, confiWords, unsWords, cleaWords, nonPOMSWords)
    print "92.3% Corrected"
    languageCorrection(nonPOMSWords, compWords, anxWords, elatWords, depWords, enerWords, tireWords, agreWords, hostWords, confiWords, unsWords, cleaWords, confuWords)
    print "100.0% Corrected"
    
    print "Cleaning Up Data. . ."
    removeNegativeAssociations()
    print "Data Cleaned!"
    
    print "Twitter Data Analyzed! :)"
    
def mood(positiveEmotion, negativeEmotion, score):
    if (score > 0):
        return positiveEmotion
    elif (score < 0):
        return negativeEmotion
    else:
        return "Neutral"

def statistics():
    global vectorComponentCounter
    #global tweetsAnalyzed
    #global totalTweets
    
    global compWords 
    global anxWords 
    global elatWords 
    global depWords 
    global enerWords 
    global tireWords 
    global agreWords 
    global hostWords 
    global confiWords 
    global unsWords 
    global cleaWords 
    global confuWords
    
    compAnx = vectorComponentCounter[0] - vectorComponentCounter[1]
    elatDep = vectorComponentCounter[2] - vectorComponentCounter[3]
    enerTire = vectorComponentCounter[4] - vectorComponentCounter[5]
    agreHost = vectorComponentCounter[6] - vectorComponentCounter[7]
    confiUns = vectorComponentCounter[8] - vectorComponentCounter[9]
    cleaConfu = vectorComponentCounter[10] - vectorComponentCounter[11]
    #percentAnalyzed = tweetsAnalzyed / totalTweets * 100.0
    
    print "-----------------------STATISTICS-----------------------"
    print "|     MOOD VECTOR    | # |  EMOTION  |"
    print "----------------------------------------------------"
    print "|  Composed-Anxious  |" + str(compAnx) + "|" + mood("Composed", "Anxious", compAnx) + "|"
    print "|  Elated-Depressed  |" + str(elatDep) + "|" + mood("Elated", "Depressed", elatDep) + "|"
    print "|  Energetic-Tired   |" + str(enerTire) + "|" + mood("Energetic", "Tired", enerTire) + "|"
    print "|  Agreeable-Hostile |" + str(agreHost) + "|" + mood("Agreeable", "Hostile", agreHost) + "|"
    print "|  Confident-Unsure  |" + str(confiUns) + "|" + mood("Confident", "Unsure", confiUns) + "|"
    print "|Clearheaded-Confused|" + str(cleaConfu) + "|" + mood("Clearheaded", "Confused", cleaConfu) + "|"
    print "|No Identifiable Mood|" + str(vectorComponentCounter[12]) + "| Not Applicable |"
    
    print "---------VECTOR BREAKDOWN------------"
    print "|     MOOD VECTOR    |      #       |"
    print "----------------------------------------"
    print "|      COMPOSED      |" + str(vectorComponentCounter[0]) + "|"
    print "|      ANXIOUS       |" + str(vectorComponentCounter[1]) + "|"
    print "|      ELATED        |" + str(vectorComponentCounter[2]) + "|"
    print "|      DEPRESSED     |" + str(vectorComponentCounter[3]) + "|"
    print "|      ENERGETIC     |" + str(vectorComponentCounter[4]) + "|"
    print "|      TIRED         |" + str(vectorComponentCounter[5]) + "|"
    print "|      AGREEABLE     |" + str(vectorComponentCounter[6]) + "|"
    print "|      HOSTILE       |" + str(vectorComponentCounter[7]) + "|"
    print "|      CONFIDENT     |" + str(vectorComponentCounter[8]) + "|"
    print "|      UNSURE        |" + str(vectorComponentCounter[9]) + "|"
    print "|      CLEARHEADED   |" + str(vectorComponentCounter[10]) + "|"
    print "|      CONFUSED      |" + str(vectorComponentCounter[11]) + "|"
    print "|NO IDENTIFIABLE MOOD|" + str(vectorComponentCounter[12]) + "|"
    
    print "--# OF WORD-MOOD ASSOCIATIONS PER VECTOR COMPONENT--"
    print "|   COMPONENT  |  BEFORE CORRECTION  | AFTER CORRECTION |"
    print "-----------------------------------"
    print "|   Composed   |" + str(rawLibCounter[0]) + "|" + str(len(compWords)) + "|"
    print "|   Anxious    |" + str(rawLibCounter[1]) + "|" +  str(len(anxWords)) + "|"
    print "|   Elated     |" + str(rawLibCounter[2]) + "|" +  str(len(elatWords)) + "|"
    print "|   Depressed  |" + str(rawLibCounter[3]) + "|" +  str(len(depWords)) + "|"
    print "|   Energetic  |" + str(rawLibCounter[4]) + "|" +  str(len(enerWords)) + "|"
    print "|   Tired      |" + str(rawLibCounter[5]) + "|" +  str(len(tireWords)) + "|"
    print "|   Agreeable  |" + str(rawLibCounter[6]) + "|" +  str(len(agreWords)) + "|"
    print "|   Hostile    |" + str(rawLibCounter[7]) + "|" +  str(len(hostWords)) + "|"
    print "|   Confident  |" + str(rawLibCounter[8]) + "|" +  str(len(confiWords)) + "|"
    print "|   Unsure     |" + str(rawLibCounter[9]) + "|" +  str(len(unsWords)) + "|"
    print "| Clearheaded  |" + str(rawLibCounter[10]) + "|" +  str(len(cleaWords)) + "|"
    print "|   Confused   |" + str(rawLibCounter[11]) + "|" +  str(len(confuWords)) + "|"
    print "| No ID-d Mood |" + str(rawLibCounter[12]) + "|" + str(len(nonPOMSWords)) + "|"
    
    #print "Total Tweets Processed: " + str(totalTweets)
    #print "Total Tweets Analyzed in SPOMS: " + str(tweetsAnalyzed)
    #print "Percentage of Tweets Succesfully Analyzed: " + str(percentAnalyzed) + "%"
    
def totals():
    global tweetsAnalyzed
    global totalTweets
    print "Total Tweets: " + str(totalTweets)
    print "Tweets Analyzed: " + str(tweetsAnalyzed)

def main():
    global compWords
    print "SPOMS Twitter Analyzer - By A.J. Gaba - McGill University"
    userChoice = 0
    while (userChoice != 5):
        print "1. Analyze Twitter Dataset"
        print "2. Show Statistics"
        print "3. Program Setup"
        print "4. Word-Mood Associations"
        print "5. Leave SPOMS Analyzer"
        userInput = raw_input("Choice: ")
        userChoice = int(userInput)
        
        if (userChoice == 1):
            gatherData()
        elif (userChoice == 2):
            statistics()
        elif (userChoice == 3):
            prgSetup()
        elif (userChoice == 4):
            readWordLibrary()
        elif (userChoice == 10):
            analyzeText("I am very confused about what to call Prince.")
        elif (userChoice == 5):
            break
        
        
    print ""

def prgSetup():
    global tweetDirectory
    global wordLibraryDir
    
    print "--------------------PROGRAM SETUP------------------------"
    setupOptionOne = raw_input("Would you like to modify the Twitter data source? (Y/N): ")
    
    if (setupOptionOne == "Y" or setupOptionOne == "y"):
        print "*Twitter Data Source*"
        print "Current Twitter Data Source From Disk: " + tweetDirectory 
        tweetDirectory = raw_input("Enter Tweet Directory: ")
    
    setupOptionTwo = raw_input("Would you like to modify the location of the SPOMS word library? (Y/N): ")
    
    if (setupOptionTwo == "Y" or setupOptionTwo == "y"):
        print "*SPOMS Word Library*"
        print "Current Word Library Source From Disk: " + wordLibraryDir
        wordLibraryDir = raw_input("Enter Location of POMS Word Library: ")
        loadWordLibrary()

main()
