'''
Created on May 25, 2011

@author: Sergiy Kulyk
'''

from twitter.api import Twitter, TwitterError
from twitter.oauth import OAuth, read_token_file
from log import Log

import nltk

import codecs

class Keyword:
    '''
    This class is designed to determine a keyword/category of a Twitter user.
    It looks up the most frequent noun in the user recent tweets. It only looks it
    up once, saving the information into a file provided to the contructor. For all
    following requests for the same user, this class will simply return the stored
    result. It also keeps all keywords in a map, so it is faster to instantiate it
    once, and then repeatedly ask for the keywords for different users.
    '''


    def __init__(self, file_name):
        '''
        Constructor. The file_name if the file where this class keeps keywords.
        It sets up the ban list of words - words that we are specifically remove
        from our noun list. This can be non-words as well and common words that
        simply create 'noise' in our analysis.
        '''
        self.file_name = file_name
        self.ban_words = ['@', '%', '"', 'http', 'tweet', 'link']

        try:
            f = codecs.open(file_name, mode='r', encoding='utf-8')
            lines = f.readlines()
            f.close()
            tuples = [line.split(" ") for line in lines]
            self.tuple_map = {}
            for t in tuples:
                self.tuple_map[t[0]] = (t[1], t[2])
        except IOError, ValueError:
            self.tuple_map = {}
    
    def add_banwords(self, banwords):
        '''
        Add a list of banwords to the internal list.
        '''
        self.ban_words += banwords


    def get_keyword(self, twitter, uid):
        '''
        Return a keyword that represents the main category if interest of a Twitter user.
         @param twitter initialized Twitter() object.
         @param uid Twitter user ID.
         @return (score, keyword) tuple, where 'score' represents how many times
                 this method has found the returned keyword.
        '''
        if (self.tuple_map.has_key(uid)) :
            return self.tuple_map[uid]
        else:
            twits = []
            try:
              recent = twitter.statuses.user_timeline(user_id=str(uid), count=200)
            except TwitterError as twitter_error:
              Log.println("Error " + str(twitter_error.e.code) + " in keywords.")
              return None
            #recent = twitter.statuses.user_timeline(screen_name=uid, count=200)
            
            # make one big text, and remove all noise
            for line in recent:
                twits.append(line['text'])
            twit_text = '\n'.join(twits)
            for word in self.ban_words:
                twit_text = twit_text.replace(word, '')
            
            # build the noun ranking as [(25, noun1), (10, noun2)]
            tokenized = nltk.tokenize.word_tokenize(twit_text)
            tokenized = [w.strip() for w in tokenized if w.strip() not in nltk.corpus.stopwords.words('english')]
            nouns = [token[0] for token in nltk.tag.pos_tag(tokenized) if token[1] == 'NN']
            unique_nouns = set(nouns)
            nouns_ranked = sorted([(nouns.count(word), word) for word in unique_nouns], reverse=True)
            
            # save the new keyword tuple to the internal map
            self.tuple_map[uid] = nouns_ranked[0]
            
            # save the new found keyword to the file
            f = codecs.open(self.file_name, mode='a', encoding='utf-8')
            f.write(str(uid) + ' ' + str(nouns_ranked[0][0]) + ' ' + nouns_ranked[0][1] + '\n')
            f.close()
            
        return nouns_ranked[0]
        