# -*- coding:utf-8 -*-
"""
Created on 21.12.2010

@author: Leon
"""

import feedparser as fp
import re
re.LOCALE

from BeautifulSoup import *
import urllib
import nltk
import threading
from threading import Thread

def transformUmlaute(h):
    p = ''
    p.encode("utf-8")
    for c in h:
        c = str(c)
        if c=='\xc3':
            pass
        elif c=='\xbc' or c=='ü': 
            p+='ue'
        elif c=='\xa4' or c=='ä': 
            p+='ae'
        elif c=='\xb6' or c=='ö': 
            p+='oe'
        elif c=='\x9c' or c=='Ü': 
            p+='Ue'
        elif c=='\x84' or c=='Ä': 
            p+='Ae'
        elif c=='\x96' or c=='Ö': 
            p+='Oe'
        elif c=='\x9f': 
            p+='ss'
        else:
            p+=c
    return p

# german stopwords, Umlaute transformed.
# stopwords = list(transformUmlaute(w) for w in nltk.corpus.stopwords.words('german'))
stopwords = nltk.corpus.stopwords.words('english')

feedlist =[#'http://rss1.smashingmagazine.com/feed/',
           #'http://rss1.smashingmagazine.com/feed/?f=smashing-network'
           'http://feeds.reuters.com/reuters/topNews',
           'http://feeds.reuters.com/reuters/businessNews',
           'http://feeds.reuters.com/reuters/worldNews',
           'http://feeds2.feedburner.com/time/world',
           'http://feeds2.feedburner.com/time/business',
           'http://feeds2.feedburner.com/time/politics',
           'http://rss.cnn.com/rss/edition.rss',
           'http://rss.cnn.com/rss/edition_world.rss',
           'http://www.nytimes.com/services/xml/rss/nyt/World.xml',
           'http://www.nytimes.com/services/xml/rss/nyt/Economy.xml',
           #'http://api.leakfeed.com/v1/cables/latest.rss',
           #'http://www.whitehouse.gov/feed/press'
           ]

def stripTags(html, invalid_tags):
    soup = BeautifulSoup(html)
    for tag in soup.findAll(True):
        if tag.name in invalid_tags:
            s = ""
            for c in tag.contents:
                if type(c) != NavigableString:
                    c = stripTags(unicode(c), invalid_tags)
                s += unicode(c)
            tag.replaceWith(s)
    return soup

#replaces <tags> with space ' '
def stripHTML(h):
    p=''
    s=0
    for c in h:
        if c=='<':
            s=1
        elif c=='>':
            s=0
            p+=' '
        elif s==0:
            p+=c
    return p


# only allow 1 thread to use the posTagger at a time
posLock = threading._allocate_lock()

# Only returns text parts with at least minWords words in a row without 
# too much whitespace between the words
def getPosWordsFromLongParas(text, minWords = 5, maxGap = 5):
    result = []
    nouns = []
    desc = []
    # 1. replace all whitespace with space character
    text = re.sub("\s", " ", text)
    
    # 2. split the text where at least maxGap whitespace occurs.
    text = text.split((maxGap)*' ')
    for para in text:       
        # 3. If para is not empty string:
        #    Split the paragraphs into words
        #    Additionally, pre-test for enough words
        if para != '' and len(para.split(' ')) >= minWords:
            #print "        para: %s" %para
            tok = nltk.word_tokenize(para)
            #print "        tokenized: %s" %tok
            # attention: at the moment only 1 thread for pos tagging allowed at once.
            with posLock:
                posTagged = nltk.pos_tag(tok)
                
            #print "        posTagged: %s" %posTagged
            
            # no empty strings           
            for posWord in posTagged:
                # only add words.
                if posWord[1] in ("NN", "NNP", "NNS", "NNPS", "FW") :
                    nouns.append(posWord)
                elif posWord[1] in ("RB", "RBR", "RBS", "JJ", "JJR", "JJS"):
                    desc.append(posWord)
            # 4. if there are enough words in it, add the paragraph to the result            
    # 6. Return all paragraphs which are long enough
    return (nouns, desc)

def cleanHTML(html):
    "Remove <script> and <style> tags."
    soup = BeautifulSoup(html)
    for script in soup.findAll("script"):
        script.extract()
    for style in soup.findAll("style"):
        style.extract()
    comments = soup.findAll(text=lambda text:isinstance(text, Comment))
    [comment.extract() for comment in comments]

    return soup

import time


def getWordsFromUrl(url):  
    nouns = []
    t0 = time.time()
    # Get a file-like object for the Python Web site's home page.
    try:
        f = urllib.urlopen(url)
        # Read from the object, storing the page's contents in 's'.
        s = f.read()
        f.close()    
        t1 = time.time()
        # print "    processing nouns from: %s" %url
        # strip formatting tags
        s = stripTags(s, ['b', 'i', 'u', 'h', 'p'])
        tStripTags = time.time()
        #print "    tStripTags = %s" % (tStripTags-t1)
        s = cleanHTML(str(s))
        s = str(s)
        tCleanHTML = time.time()
        #print "    tCleanHTML = %s" % (tCleanHTML-tStripTags)
        # replace tags with white space
        s = stripHTML(s)
        tStripHTML = time.time()
        s = str(s)
        #print "    tStripHTML = %s" % (tStripHTML-tCleanHTML)
        posTaggedWords = getPosWordsFromLongParas(s, minWords=50, maxGap=3)
        nouns = posTaggedWords[0]
        desc = posTaggedWords[1]
        
        print "    Nouns: %s" % nouns
        print "    Descriptions: %s" % desc
        tPosTag = time.time()
        print "    tPosTag = %s" % (tPosTag - tStripHTML)
        
        res = []
        for wordPos in nouns:
            word = wordPos[0]
            pos = wordPos[1]
            # only add Nouns.

            # strip all non-characters and make lower-case and transform Umlaute
            word = transformUmlaute(word)
            word = re.sub("\W", "", word).lower()
            # dont add empty nouns or stopwords
            if word != '' and word not in stopwords:
                res.append(word)
        
        t2 = time.time()
        
        print "    url: %s | download: %s | processing: %s | total: %s" % (url, (t1-t0), (t2-t1), (t2-t0))
    except:
        " something went wrong getting words from url"
    return res

def getWordDict(words):
    wDict = {}
    for w in words:
        try:
            wDict[w] += 1
        except:
            wDict[w] = 1 
    return wDict

# Print title + description
#output = ''
#for url in feedlist:
#    output += "URL: %s\n" % url
#    f = fp.parse(url)
#    
#    output += "Title of feed:  %s\n" % f['feed']['title']
#    
#    for item in f.entries:
#        title = stripHTML(item.title)
#        text = stripHTML(item.description)
#        link = item.link
#        
#        output += "    %s: %s\n        %s\n" % (title, link, text)
#
#print output


###################################
import operator
import matplotlib.pyplot as plt
from graphThing import *


entryList = []
entryLock = threading._allocate_lock()

curETlock = threading._allocate_lock()
curET = []

class EntryThread(Thread):
    def __init__(self, nodeTarget):
        super(EntryThread, self).__init__()
        self._stop = threading.Event()
        self.nodeTarget = nodeTarget
    
    def stop(self):
        self._stop.set()
    
    def getNextEntry(self):
        entry = None
        with entryLock:
            if len(entryList)>0:
                entry = entryList.pop()
        return entry
        
    def run(self):
        entry = self.getNextEntry()
        while not self._stop.is_set():
            if self._stop.is_set():
                break
            if entry != None:
                words = []            
                words = getWordsFromUrl(entry.link)
                wDict = getWordDict(words)
                wList = sorted(wDict.iteritems(), key=operator.itemgetter(1), reverse=True)
                
                # create tagcloud
                top10 = []
                # get top 10
                for i in range(0,min(10,len(wList))):
                    top10.append(wList[i])
    
                try:
                    if len(top10) > 0:
                        self.nodeTarget.updateNodes(top10)
                except:
                    print "  could not pass top10"
                
            entry = self.getNextEntry()

class CrawlThread(Thread):
    def __init__(self, nodeTarget, feedLock):
        super(CrawlThread, self).__init__()
        self._stop = threading.Event()
        self.nodeTarget = nodeTarget
        self.feedLock = feedLock
        self.name = ""
        
    def stop(self):
        self._stop.set()
    
    def getNextFeed(self):
        nextFeed = None
        with self.feedLock:
            if len(feedlist)>0:
                nextFeed = feedlist.pop()
        return nextFeed
    
    def run(self):
        nextFeed = self.getNextFeed()
        while nextFeed != None:
            if self._stop.is_set():
                break
            
            print "Starting to Crawl Feed: %s" % nextFeed
            feed = fp.parse(nextFeed)
            
            for entry in feed.entries:
                if self._stop.is_set():
                    break
                with entryLock:
                    entryList.append(entry)
            
            # start 3 Threads per Feed.
            with curETlock:
                if len(curET) < 4:
                    print "starting entry thread number %s" % len(curET)
                    t = EntryThread(self.nodeTarget)
                    curET.append(t)
                    t.start()
                
            nextFeed = self.getNextFeed()
            
        
class FeedAggregator(Thread):
    def __init__(self, nodeTarget):
        super(FeedAggregator, self).__init__()
        self._stop = threading.Event()
        self.nodeTarget = nodeTarget
        self.feedLock = threading._allocate_lock()
        self.currentThreads = []
        
    def stop(self):
        for current in self.currentThreads:
            current.stop()
        self._stop.set()
    
    def run(self):
        # own thread for each feed, but max. 10
        n = len(feedlist)
        for i in range (0,min(n,3)):
            print "starting feed thread number %s" % (i+1)
            newCrawler = CrawlThread(self.nodeTarget, self.feedLock)
            self.currentThreads.append(newCrawler)
            newCrawler.start()
        
        for current in self.currentThreads:
            current.join()
                
                
s = graphMain()
fg = FeedAggregator(s)
s.nodeGetter = fg

s.run()

