# -*- coding:utf-8 -*-
"""
Created on 21.12.2010

@author: Leon
"""

import feedparser as fp
import re
import urllib
import nltk
import threading
import pickle
from threading import Thread
import numpy
from BeautifulSoup import *

###### Globals #########

# dictionary to save URL -> Paragraph Lists
sourceDict = {}
# Lock on the sourceDict
sourceDictLock = threading._allocate_lock()

try:
    file = open('sourceDict.dump', 'r')
    sourceDict = pickle.load(file)
    file.close()
except:
    pass

# Dictionary saving a frequency distribution of PARAGRAPHS.
# This is necessary to identify paragraphs occuring often, like disclaimers,
# but don't carry any information content
paraDict = {}
# Lock on the paraDict
paraDictLock = threading._allocate_lock()

try:
    file = open('paraDict.dump', 'r')
    paraDict = pickle.load(file)
    file.close()
except:
    pass



def transformUmlaute(h):
    p = ''
    p.encode("utf-8")
    for c in h:
        c = str(c)
        if c=='\xc3':
            pass
        elif c=='\xbc' or c=='ü': 
            p+='ue'
        elif c=='\xa4' or c=='ä': 
            p+='ae'
        elif c=='\xb6' or c=='ö': 
            p+='oe'
        elif c=='\x9c' or c=='Ü': 
            p+='Ue'
        elif c=='\x84' or c=='Ä': 
            p+='Ae'
        elif c=='\x96' or c=='Ö': 
            p+='Oe'
        elif c=='\x9f': 
            p+='ss'
        else:
            p+=c
    return p

# German stopwords, Umlaute transformed.

stopwordsGer = list(transformUmlaute(w) for w in nltk.corpus.stopwords.words('german'))

# English stopwords.
stopwordsEn = nltk.corpus.stopwords.words('english')

feedlist =['http://rss1.smashingmagazine.com/feed/',
           'http://rss1.smashingmagazine.com/feed/?f=smashing-network'
           'http://feeds.reuters.com/reuters/topNews',
           'http://feeds.reuters.com/reuters/businessNews',
           'http://feeds.reuters.com/reuters/worldNews',
           'http://feeds2.feedburner.com/time/world',
           'http://feeds2.feedburner.com/time/business',
           'http://feeds2.feedburner.com/time/politics',
           'http://rss.cnn.com/rss/edition.rss',
           'http://rss.cnn.com/rss/edition_world.rss',
           'http://www.nytimes.com/services/xml/rss/nyt/World.xml',
           'http://www.nytimes.com/services/xml/rss/nyt/Economy.xml',
           #'http://api.leakfeed.com/v1/cables/latest.rss',
           #'http://www.whitehouse.gov/feed/press'
           ]


def stripTags(html, invalid_tags, keepContent=True):
    """# Removes all given HTML-Tags
# keepContent decides, if the text nodes within the removed HTML nodes should
# remain or not."""
    soup = BeautifulSoup(html)
    
    if keepContent:
        # Keep the content of the removed HTML Nodes
        for tag in soup.findAll(True):
            if tag.name in invalid_tags:
                s = ""
                for c in tag.contents:
                    if type(c) != NavigableString:
                        c = stripTags(unicode(c), invalid_tags)
                    s += unicode(c)
                tag.replaceWith(s)
    else:      
        # Dismiss everything inside the HTML nodes
        for tag in soup.findAll(True):
            if tag.name in invalid_tags:
                tag.extract()
                 
    return str(soup)

def stripComments(html):
    "# Strip all HTML comments out of a document"
    soup = BeautifulSoup(html)
    
    comments = soup.findAll(text=lambda text:isinstance(text, Comment))
    [comment.extract() for comment in comments]
            
    return str(soup)

def stripHTML(h):
    "# Removes all HTML-Tags and replaces them with space ' '."
    p=''
    s=0
    for c in h:
        if c=='<':
            s=1
        elif c=='>':
            s=0
            p+=' '
        elif s==0:
            p+=c
    return p

def getWordDict(words):
    "Transforms a word list to a dictionary with frequency distribution of the words"
    wDict = {}
    for w in words:
        try:
            wDict[w] += 1
        except:
            wDict[w] = 1 
    return wDict

def getParagraphs(text, wordGap=2, minWords=10, wordLength=2):
    """ Input: A text with large whitespace gaps.
        Result: Paragraphs out of the given text, which are 1. not too much splitted
                by whitespace and 2. have enough words of a minimum length in it

        The result is a list of paragraphs (strings)."""
        
    # replace whitespace with uniform space ' '.
    text = re.sub("\s", " ", text)
    
    # split at whitespace chunks 
    wsChunk = (wordGap+1)*' '
    text = text.split(wsChunk)
    
    result = []
    
    for para in text:
        # split the paragraph at its spaces
        para = para.split(' ')
        
        paraText = ''
        wordCount = 0
        
        for word in para:
            # add all non-empty words, but only increase counter if they're long enough
            if len(word) > 0:
                paraText += word+' '
            if len(word) >= wordLength:                
                wordCount += 1
        
        if wordCount >= minWords:
            result.append(paraText)
    
    return result

def getUrlsFromFeedlist(feedList, numThreads = 4):
    "Returns all titles + URLs pointing to entries, out of the feeds in the feedlist"
    urlList = []
    urlListLock = threading._allocate_lock()
    feedListLock = threading._allocate_lock()
    
    class FeedThread(Thread):
        def __init__(self):
            super(FeedThread, self).__init__()
        
        def getNextFeed(self):
            nextFeed = None
            with feedListLock:
                if len(feedList)>0:
                    nextFeed = feedList.pop()
            return nextFeed
        
        def run(self):
            nextFeed = self.getNextFeed()
            
            while nextFeed != None:
                print "Parsing feed: %s" % nextFeed
                feed = fp.parse(nextFeed)
                
                for entry in feed.entries:
                    with urlListLock:
                        urlList.append((entry.title, entry.link))
                        
                nextFeed = self.getNextFeed()   
    
    currentThreads = []
    # Start some Threads to get the URLs out of the RSS feeds
    for i in range (0, numThreads):
        newThread = FeedThread()
        currentThreads.append(newThread)
        newThread.start()
    # join the Threads
    for t in currentThreads:
        t.join()
    
    return urlList
                
def updateDictionaries(numThreads=5):
    "Updates the sourceDict and paraDict with content of the new entries of the feedlist"
     
    urlList = getUrlsFromFeedlist(feedlist)
    
    # Get new URLs:
    newUrls = set(urlList).difference(set(sourceDict.keys()))
    newUrlLock = threading._allocate_lock()
    
    class ParseThread(Thread):
        def __init__(self):
            super(ParseThread, self).__init__()
        
        def getNextUrl(self):
            newUrl = None
            with newUrlLock:
                if len(newUrls)>0:
                    newUrl = newUrls.pop()
            return newUrl
        
        def run(self):
            newUrl = self.getNextUrl()
            print "    new url: %s" % str(newUrl)
            
            while newUrl != None:
                print "    Get content and parse it from URL: %s" % str(newUrl)
                content = ''
                try:
                    f = urllib.urlopen(newUrl[1])
                    # Read from the object, storing the page's contents in 'html'.
                    content = f.read()
                    f.close()  
                except:
                    print "    opening url %s failed." % newUrl[1]
                
                # strip comments, <style> and <script>
                content = stripComments(content)
                content = stripTags(content, ['script', 'style'], keepContent=False)
                
                # remove formatting tags, but keep the content of them
                content = stripTags(content, ['b', 'i', 'u', 'h', 'h1', 'h2', 'h3', 'p'], keepContent=True)
                
                # replace remaining HTML tags with space ' '
                content = stripHTML(content) 
                
                # get relevant paragraphs out of the content
                text = getParagraphs(content)   
                
                # add the result to the sourceDict
                with sourceDictLock:
                    sourceDict[newUrl] = text  
                
                # update paraDict:
                for para in text:
                    with paraDictLock:
                        try:
                            paraDict[para] += 1
                        except:
                            paraDict[para] = 1
                            
                newUrl = self.getNextUrl()  

    # start some threads to parse HTML
    currentThreads = []
    for i in range(0,numThreads):
        t = ParseThread()
        currentThreads.append(t)
        t.start()
    
    # join the threads
    for t in currentThreads:
        t.join()
        
def getUniqueParas(maxOccurance = 1):
    """Returns a dictionary 'URL' -> 'list of paragraphs', which only
    containts paragraphs that are unique (no disclaimers, etc.)"""
    
    uniqueParas = {}
    
    for url in sourceDict.keys():
        uniqueParas[url] = []
        for para in sourceDict[url]:
            if paraDict[para] <= 1:
                uniqueParas[url].append(para)
    
    return uniqueParas


updateDictionaries()

print "## Saving Source Dict ##"
try:
    file = open('sourceDict.dump', 'w')
    pickle.dump(sourceDict, file)
    file.close()
except:
    print "saving went wrong"
    
print "## Saving Para Dict ##"
try:
    file = open('paraDict.dump', 'w')
    pickle.dump(paraDict, file)
    file.close()
except:
    print "saving went wrong"

uniqueParas = getUniqueParas()

#print "Unique Paras:"
#for url in uniqueParas.keys():
#    print "    URL: %s" %url
#    for para in uniqueParas[url]:
#        print "        %s"%para

# Abweichung: Wir speichern in allwords Tupel der Form (Anzahl, [TitelListe]),
# wobei die TitelListe aus den Titeln besteht, in deren Artikeln das Wort
# vorkommt.
allwords = {}
articlewords = []
articletitles = []

def getarticlewords():
    for key in uniqueParas.keys():
        url = key[1]
        title = key[0]
        paras = uniqueParas[key]
        wordDict = {}
        for para in paras:        
            words = para.split(' ')
            for word in words:
                # remove all non-alphabetic
                word = re.sub("\W+", "", word)
                word = word.lower()
                # Anmerkung: Mit Stopwörtern werden die Matrizen zu groß
                if len(word) > 0 and word not in stopwordsEn:
                    try:
                        count = allwords[word][0]
                        titleList = allwords[word][1]
                        titleList.append(title)
                        allwords[word] = (count+1, titleList)
                    except:
                        allwords[word]=(1,[title])
                    try:
                        wordDict[word]+=1                    
                    except:
                        wordDict[word]=1   
        articlewords.append(wordDict)
        articletitles.append(title)
            
getarticlewords()

### Ab hier entsprechend Vorgabe ###

def makematrix(allw, articlew):
    wordvec = []
    wordInArt = []
    numTitles = len(articlew)
    for word in allw.keys():
        (count, titleList) = allw[word]
        ratio = float(len(titleList)) / float(numTitles)
        # Wir skalieren count an der Anzahl der gelesenen Artikel
        # und haben die ratio angepasst
        # da wir sonst zu große Matrizen erhalten
        if count > len(articletitles)/6 and ratio < 0.6:
            wordvec.append(word)
    
    
    for wordDict in articlew:
        line = []
        zeroLine = True
        for word in wordvec:
            wordCount = 0
            try:
                wordCount = wordDict[word]
                zeroLine = False
            except:
                wordCount = 0
            line.append(wordCount)
        if not zeroLine:
            wordInArt.append(line)
        else:
            index = articlewords.index(wordDict)
            articlewords.remove(wordDict)
            articletitles.remove(articletitles[index])
    
    wordInArt = numpy.array(wordInArt)
    return wordvec, wordInArt

(wordvec, wordInArt) =  makematrix(allwords, articlewords)

head = ''
for word in wordvec:
    head += str(word) + ','
head += "\n"
body = ''
for line in wordInArt:
    for el in line:
        body += str(el)+','
    body += "\n"
    
try:
    file = open('matrix.txt', 'w')
    file.write(head+body)
    file.close()
except:
    print "writing to matrix.txt failed"

def cost(A,B):
    sum=0

    (n,m) = A.shape
    
    aA = numpy.array(A)
    aB = numpy.array(B)
    for i in range (0,n):
        for j in range (0,m):
            sum += pow(aA[i][j]-aB[i][j], 2)
            
    return sum

def nnmf(A, m, it):
    print "starting nnmf"
    (r,c) = A.shape
    
    H = []
    for i in range(0,m):
        row = []
        for j in range (0,c):
            row.append(numpy.random.random())
        H.append(row)    
    H = numpy.matrix(H)
    
    print "    built initial H"
    W = []
    for i in range(0,r):
        row = []
        for j in range (0,m):
            row.append(10.0*numpy.random.random())
        W.append(row)    
    W = numpy.matrix(W)
    print "    built initial W"
    
    # scale threshold by number of articles and wordvec
    kS = len(articletitles)*len(wordvec)/2
    print "    threshold = %s" % kS
    k = 2*kS
    i = 0
    
    print "    starting loop ..."
    while (k > kS) and (i < it):
        B = W*H
        k = cost(A, B)
                
        wT = W.transpose()
        hT = H.transpose()
        
        aH = numpy.array(H)        
        hZ = wT*A
        aHZ = numpy.array(hZ)        
        hN = wT*W*H
        aHN = numpy.array(hN)
        
        H = numpy.matrix(aH*(aHZ/aHN))   
           
        aW = numpy.array(W)
        wZ = A*hT
        aWZ = numpy.array(wZ)
        wN = W*H*hT
        aWN = numpy.array(wN)
        
        W = numpy.matrix(aW*aWZ/aWN)
                
        i += 1    
    print "Finished after %s iterations with a cost of k = %s." % (i, round(k))   
    return H, W

(H, W) = nnmf(wordInArt, 6, 30)

def showfeatures(w,h,titles,wordvec):
    (mH,nH) = h.shape
    aH = numpy.array(h)
    
    # Funktion zum Anzeigen von Clustern
    def getClusterWords(cIndex, indent=0):
        merkmal = aH[cIndex]
        wmList = []
        for j in range(0,nH):
            wmList.append((wordvec[j], merkmal[j]))
        wmList = sorted(wmList, key=lambda t:t[1], reverse=True)
        
        rS = ""
        rS += "\n"+ indent*"\t"+"cluster %s:\n" % (cIndex)
        for z in range(0, min(6, len(wmList))):
            rS += indent*"\t"+"    " + str(wmList[z]) + "\n"
        
        return rS
    
    
    # Cluster anzeigen
    for i in range(0,mH):
        print getClusterWords(i)
    
    (mW,nW) = w.shape
    print "mW: %s |  nW: %s" % w.shape
    aW = numpy.array(w)
    
    # Zufallsfunktion
    def rnd(x):
        return numpy.random.randint(0,x)
    
    # Der Übersicht halber: Die wichtigsten Cluster von 10 zufälligen Artikeln
    randList = [rnd(mW), rnd(mW), rnd(mW), rnd(mW), rnd(mW)
              , rnd(mW), rnd(mW), rnd(mW), rnd(mW), rnd(mW)]
    for i in randList:
        clusterWeights = aW[i]
        title = titles[i]
        aList = []
        for j in range(0,nW):
            aList.append((j, clusterWeights[j]))
        aList = sorted(aList, key=lambda t:t[1], reverse=True)
        
        print "\n\"%s\":" % title
        for z in range(0, min(3, len(aList))):
            clusterIndex = aList[z][0]
            print "    %s. cluster (%s): %s" % (z, aList[z][1], str(getClusterWords(clusterIndex, indent=2)))

showfeatures(W,H,articletitles,wordvec)