#!/usr/bin/python 
#
# (originally entered at https://gist.github.com/1035399)
#
# License: GPLv3
#
# To download the AFINN word list do:
# wget http://www2.imm.dtu.dk/pubdb/views/edoc_download.php/6010/zip/imm6010.zip
# unzip imm6010.zip
#
# Note that for pedagogic reasons there is a UNICODE/UTF-8 error in the code.

import operator
import math
import re
import sys
from topia.termextract import tag
from topia.termextract import extract


# AFINN-111 is as of June 2011 the most recent version of AFINN
filenameAFINN = 'AFINN-111.txt'
afinn = dict(map(lambda (w, s): (w, int(s)), [
    ws.strip().split('\t') for ws in open(filenameAFINN)]))

# Word splitter pattern
pattern_split = re.compile(r"\W+")


def sentimentAnalysis(text):
    """
    Returns a float for sentiment_score strength based on the input text.
    Positive values are positive valence, negative value are negative valence. 
    """
    words = pattern_split.split(text.lower())
    sentiments = map(lambda word: afinn.get(word, 0), words)
    if sentiments:
        # How should you weight the individual word sentiments? 
        # You could do N, sqrt(N) or 1 for example. Here I use sqrt(N)
        sentiment_score = float(sum(sentiments)) / math.sqrt(len(sentiments))
    else:
        sentiment_score = 0
    return sentiment_score


def buzzExtraction(tweet_text,category_term):
    extractor = extract.TermExtractor()
    all_buzzes = extractor(tweet_text)

    #remove those entries whose length of key is less than 3 or equal category_term
    for buzz in all_buzzes:
        if len(buzz[0])<3 or category_term.lower() ==buzz[0].lower() :
            all_buzzes.remove(buzz)

    #sort all the buzzes and return top 10 of them with highest occurrences rate
    return sorted(all_buzzes, key=operator.itemgetter(1), reverse=True)[0:10]


def significantWordCounter(text):
    words = pattern_split.split(text.lower())
    sentiments = map(lambda word: afinn.get(word, 0), words)
    return len(sentiments)


if __name__ == '__main__':
    # Example with downloading from Twitter:
    import simplejson
    import urllib

    query = "Very Positive"
    json = simplejson.load(urllib.urlopen("http://search.twitter.com/search.json?rpp=100&q=" + query))
    sentiments = map(sentimentAnalysis, [tweet['text'] for tweet in json['results']])
    print("%6.2f %s" % (sum(sentiments) / math.sqrt(len(sentiments)), query))

    query = "Positive"
    json = simplejson.load(urllib.urlopen("http://search.twitter.com/search.json?rpp=100&q=" + query))
    sentiments = map(sentimentAnalysis, [tweet['text'] for tweet in json['results']])
    print("%6.2f %s" % (sum(sentiments) / math.sqrt(len(sentiments)), query))

    query = "neutral"
    json = simplejson.load(urllib.urlopen("http://search.twitter.com/search.json?rpp=100&q=" + query))
    sentiments = map(sentimentAnalysis, [tweet['text'] for tweet in json['results']])
    print("%6.2f %s" % (sum(sentiments) / math.sqrt(len(sentiments)), query))

    query = "negative"
    json = simplejson.load(urllib.urlopen("http://search.twitter.com/search.json?rpp=100&q=" + query))
    sentiments = map(sentimentAnalysis, [tweet['text'] for tweet in json['results']])
    print("%6.2f %s" % (sum(sentiments) / math.sqrt(len(sentiments)), query))

    query = "very negative"
    json = simplejson.load(urllib.urlopen("http://search.twitter.com/search.json?rpp=100&q=" + query))
    sentiments = map(sentimentAnalysis, [tweet['text'] for tweet in json['results']])
    print("%6.2f %s" % (sum(sentiments) / math.sqrt(len(sentiments)), query))