from symbol import for_stmt
import sgmllib
import string
import urllib2
from BeautifulSoup import BeautifulSoup
import pylab
import numpy

class StrippingParser(sgmllib.SGMLParser):

    from htmlentitydefs import entitydefs # replace entitydefs from sgmllib

    def __init__(self):
        sgmllib.SGMLParser.__init__(self)
        self.text = ""

    def handle_data(self, data):
        if data:
            self.text = self.text + data

    def handle_charref(self, name):
        self.text = "%s&#%s;" % (self.text, name)

    def handle_entityref(self, name):
        if self.entitydefs.has_key(name):
            x = ';'
        else:
            # this breaks unstandard entities that end with ';'
            x = ''
        self.result = "%s&%s%s" % (self.text, name, x)

    def unknown_starttag(self, tag, attrs):
        self.text = self.text + "<tag>"

    def unknown_endtag(self, tag):
        self.text = self.text + "<tag>"

class WebPreprocessor():

    def __init__(self, url):
        #first fetch the webpage
        data = None
        try:
            headers = {"User-Agent": "Creole/0.1a"} # set correct user agent so we can fool website
            request = urllib2.Request(url, headers=headers)
            data = urllib2.urlopen(request).read()
        except urllib2.HTTPError, e:
            print "HTTP error: %d" % e.code
        except urllib2.URLError, e:
            print "Network error: %s" % e.reason.args[1]

        #strip scripts, etc.
        soup = BeautifulSoup(data)
        for tag in soup.findAll():
            if tag.name.lower() in [ 'script', 'style', 'meta', 'head']: #[ 'script', 'style', 'meta', 'link', 'head']:
                # blacklisted tags are removed in their entirety
                tag.extract()

        #get html and remove emptylines
        tagged_text = soup.renderContents()

        #strip tags
        parser = StrippingParser()
        parser.feed(tagged_text)
        parser.close()
        stripped_text = parser.text

        #determine the tag ratio per line length
        self.textLines = []
        self.textRatios = []

        for index, item in enumerate(stripped_text.splitlines()):
        #for each line:
            tagCount = item.count("<tag>")
            item = item.replace("<tag>","").strip()
            if (len(item) != 0):
                divider = max(tagCount,1) #prevent division through zero
                ratio = float(len(item)) / float(divider)
                self.textRatios.append(ratio)
                self.textLines.append(item)
        self.smoothedRatios = self.smooth(self.textRatios,5)
        self.deviation = pylab.std(self.smoothedRatios)

    def getFilteredLines(self):
        result = []
        for index, item in enumerate(self.textLines):
            if(self.smoothedRatios[index] > self.deviation):
                result.append(item)
        return result

    def getWhichLinesAreFiltered(self):
        result = []
        for index, item in enumerate(self.textLines):
           result.append(self.smoothedRatios[index] > self.deviation)
        return result

    def getTextToTagRatios(self):
        return self.textRatios

    def getSmoothedTextToTagRatios(self):
        return self.smoothedRatios

    def getUnfilteredLines(self):
        return self.textLines

    def getTagFilterGraphData(self):
        list = zip(self.textRatios, self.smoothedRatios)
        x_axis = range(0, len(list))
        value = pylab.std([s for r, s in list])
        baselines = [value for x in x_axis]
        # print smooth([1,5,6],1)
        pylab.plot(x_axis,[r for r, s in list],'r--', x_axis, [s for r, s in list], 'b', x_axis, baselines, 'y')
        
        
        pylab.show()

    def getDeviation(self):
        return self.deviation

    def smooth(self, pointList, r):
        resultList = []
        for i in range(0, len(pointList)):
            start = max(0, i - r)
            end = min(len(pointList) - 1, i + r)
            sampleCount = end - start + 1
            sum = 0.0
    #        print (start,end,sampleCount,i)
            for j in range(start, end + 1):
                sum += float(pointList[j])
            result = sum / float(sampleCount)
            resultList.append(result)
        return resultList

#wp = WebPreprocessor("http://en.wikipedia.org/wiki/Tina_Turner/")
#wp.getTagFilterGraphData()
#print wp.getFilteredLines()