'''
content.py
Nicholas Swartzendruber

This file contains a class with two methods: one to find topics
based on a related keyword, and one to find topics
for a general crowd. 

Some of getTargetContent largely inspired from
http://www.slideshare.net/shanbady/nltk-natural-language-processing-in-python
'''
from nltk.corpus import wordnet as wn
from log import *
import simplejson
import urllib2
import re


class Content:
  def __init__(self):
    '''
    Constructor
    '''
    #The minimum similarity needed between a keyword and
    #subreddit to math.
    self.min_sim = 0.50
    
    self.subreddit_file = 'subreddits.txt'
    banned_words_file = 'banned_words.txt'
    
    #Load banned words.
    self.banned_words = []
    
    #Load all of the valid subreddits.
    with open(banned_words_file, 'r+') as f:
      while 1:
        line = f.readline()
        if not line:
          break
        self.banned_words.append(line.split('\n')[0])
        
  def _readSubreddit(self, subreddit):
    """
    When passed a valid subreddit, the top content
    of that subreddit will be placed into an array.
    Pending a language filter, each index will have
    a story represented as a dict.
    """
    reddit_url = 'www.reddit.com' + subreddit
    
    ret = []
    
    req = urllib2.Request("http://" + reddit_url + "/.json", None, {'user-agent':'Reddit-Reader'})
    opener = urllib2.build_opener()
    try:
      f = opener.open(req)
      data = simplejson.load(f)
      
      #For each story, make sure the title is good.
      #If so, add it to our return.
      for story in data['data']['children']:
        title = story['data']['title'].lower()
        add = True
        for bad_word in self.banned_words:
          for word in re.findall(r"[\w']+|[.,!?;]", title):
            if bad_word == word:
              add = False
        if add:
          ret.append(story)
    except urllib2.HTTPError:
      Log.println("Subreddit not found: " + subreddit)
    
    return ret
    
  def getTargetContent(self, keyword): 
    '''
    When passed a keyword, getTargetContent will
    search www.reddit.com and relevant subreddits for content.
    Content from matching subreddits will be passed back in a list
    of dictionaries. Each dictionary can be accessed by "<return-data/index>['data'][category]"
    Where category can be anything found in this example:
    
    {u'domain': u'i.imgur.com', 
     u'media_embed': {}, 
     u'levenshtein': None,
     u'subreddit': u'gaming',
     u'selftext_html': None,
     u'selftext': u'',
     u'likes': None,
     u'saved': False, 
     u'id': u'hib5a', 
     u'clicked': False, 
     u'author': u'xarius214', 
     u'media': None, 
     u'score': 347, 
     u'over_18': False, 
     u'hidden': False, 
     u'thumbnail': u'http://thumbs.reddit.com/t3_hib5a.png', 
     u'subreddit_id': u't5_2qh03', 
     u'downs': 335, 
     u'is_self': False, 
     u'permalink': u'/r/gaming/comments/hib5a/seeing_this_in_the_work_parking_lot_made_my_day/', 
     u'name': u't3_hib5a', 
     u'created': 1306207192.0, 
     u'url': u'http://i.imgur.com/ozUju.jpg', 
     u'title': u'Seeing this in the work parking lot made my day.', 
     u'created_utc': 1306181992.0, 
     u'num_comments': 43, 
     u'ups': 682}
    '''
    '''
    ===============================
    Search for relevant subreddits
    ===============================
    '''
    #Initial word.
    word1 = str(keyword).replace(' ', '_')
    
    Log.println("Looking for content related to: " + word1)
    
    subreddits = []
    
    #Load all of the valid subreddits.
    with open(self.subreddit_file, 'r+') as f:
      while 1:
        line = f.readline()
        if not line:
          break
        tok = line.split()
        meaning = tok[0]
        
        if len(tok) != 1:
          meaning = tok[1]
          
        #First index is the actual url, second is the meaning 
        #that we want to compare our keyword with.
        subreddits.append((tok[0], meaning))
        
    similars = []
    
    synsets1 = wn.synsets(word1)
    group1 = [wn.synset(str(synset.name)) for synset in synsets1]
    
    #For every subreddit, make a comparison.
    for reddit in subreddits:
      synsets2 = wn.synsets(reddit[1])
      group2 = [wn.synset(str(synset.name)) for synset in synsets2]
      
      sim2 = []
      for sseta in group1:
        for ssetb in group2:
          wup_similarity = sseta.wup_similarity(ssetb)
          if wup_similarity is not None and wup_similarity > self.min_sim:
            sim2.append({
               'wup':wup_similarity,
               'subreddit':reddit[0]
            })
      
      #Sort our secondary sim array, and grab the top (highest match)
      sim2 = sorted(sim2, key=\
      lambda item: item['wup'], reverse=True)
            
      if len(sim2) > 0:
        similars.append(sim2[0])
      
    similars = sorted(similars, key=\
      lambda item: item['wup'], reverse=True)
    
    '''
    =====================================
    Subreddits found, find misc content.
    =====================================
    '''
    #The return dictionaries.
    ret = []
    
    for subreddit in similars:
      ret += self._readSubreddit('/r/' + subreddit['subreddit'])
      
    
    Log.println(str(len(ret)) + " articles found related to " + word1)
    
    return ret
  
  def getGeneralContent(self):
    '''
    Grabs content that MAY be cool to a general group.
    It grabs articles from the reddit homepage, and 
    askreddit subreddit.
    
    return value is in the same format as grabbing targeted
    articles. 
    '''
    ret = []
    ret += self._readSubreddit('');
    ret += self._readSubreddit('/r/askreddit');
    Log.println(str(len(ret)) + " general articles found.")
    return ret

'''
Test the content class.
'''
if __name__ == '__main__': 
  content = Content()
  keyword = 'video games';
  print 'Targeted'
  print "{0:15}".format('Subreddit'), "{0:33}".format('Title') , 'URL'
  print'------------------------------------------------------------------------------------'
  for story in content.getTargetContent(keyword):
    title = story['data']['title'];
    if(len(title) > 30):
      title = title[:27]
      title += '...'
    print "{0:15}".format(story['data']['subreddit']), "{0:33}".format(title) , story['data']['url']
    
  print '\nGeneral'
  
  print "{0:15}".format('Subreddit'), "{0:33}".format('Title') , 'URL'
  print'------------------------------------------------------------------------------------'
  for story in content.getGeneralContent():
    title = story['data']['title'];
    if(len(title) > 30):
      title = title[:27]
      title += '...'
    print "{0:15}".format(story['data']['subreddit']), "{0:33}".format(title) , story['data']['url']
      