#!/usr/bin/env python
# -*- coding: utf-8 -*-
from nltk.corpus import stopwords
import sys
import json

common_words = []

def get_text(obj):
    status = obj['status']
    text = [obj['text']]
    num = [1 + obj['retweet']]
    if 1 == status:
        text.append(obj['quoted_quoted_text'])
        num.append(obj['quoted_retweet'])
    elif 2 == status:
        text.append(obj['retweeted_text'])
        num.append(obj['retweeted_retweet'])
        num[0] -= obj['retweeted_retweet']
    elif 3 == status:
        text.append(obj['retweeted_text'])
        text.append(obj['quoted_quoted_text'])
        num.append(obj['retweeted_retweet'])
        num.append(obj['quoted_retweet'])
    else:
        text += ''
    return [text, num]

def init_common_words():
    global common_words
    input_file = '/home/ivy/git/Social/Social/resource/filter.txt'
    with open(input_file, 'r+') as f:
        for line in f:
            common_words.append(line.strip())

def main():
    #http://www.wordfrequency.info/top5000.asp
    #awk '{if($3!="n" || $4>50000) print $2 }' 5000.txt > filter.txt
    global common_words
    init_common_words()
    
    stop = stopwords.words('english')
    operators = ['http', 'co', 'rt', 'https', 'via', 'amp', 're', 'fuck', 'shit']

    for line in sys.stdin:
        obj = json.loads(line)
        arr = get_text(obj)
        text = arr[0]
        num = arr[1]
        i = 0
        for t in text:
            for word in t.split():
                if word.lower() not in stop \
                    and word.lower() not in common_words \
                    and word.lower() not in operators \
                    and not word.isdigit() \
                    and len(word) > 1:
                    print '%s\t%s' % (word.lower(), num[i])
            i += 1

if __name__ == '__main__':
    main()     
    
