#! /usr/bin/python
#  -*- coding:utf-8 -*-


from __future__ import division

from functions import *

from BeautifulSoup import BeautifulSoup
import re
import string
import urllib2


class htmler:


    def __init__(self, ** arg):
        self.url = arg.pop('url', [])
       
        if self.url:
            html = self.getHtml(self.url)

        else:
            html = arg.pop('html', [])

        self.BeautifulSoup(html)


        
        
    def setUrl(self, url):
        self.url = url

        self.BeautifulSoup(self.getHtml(url))

    def getHtml(self, url):
        try:
            
            urlpath = hostParser(url)
    
            uh = urllib2.urlopen(url)
            return  uh.read()
        except:
            return ''



    def BeautifulSoup(self, html):
        '''
???
        '''
        html = safeDecode(html)
        re_hc = re.compile('<!(.+?)>', re.S)
        html = re_hc.sub('', html)
        re_cdata = re.compile('//<!\[CDATA\[[^>]*//\]\]>', re.S) #???CDATA
        re_script = re.compile('<\s*script[^>]*>[^<]*<\s*/\s*script\s*>', re.S)#Script
        re_style = re.compile('<\s*style[^>]*>[^<]*<\s*/\s*style\s*>', re.S)#style
        html = re_cdata.sub('', html)
        html = re_script.sub('', html)
        html = re_style.sub('', html)

        self.soup = BeautifulSoup(html)


    def getTags(self, tag):

        return self.soup.findAll(tag)


    def clearns(self, s, tag):
        len2 = self._len(s)
        #print len2
        lens = len2
        if len2 < 1:

            return [0, '']

        if self.tagCount(tag, s) < 3:
           
            return [lens, s]

        soup = BeautifulSoup(s)
        arr = soup.findAll(tag)
        del arr[0]

        for a in arr:
            #print a
            len1 = self._len(a.__str__())
 
            #print "len1 %s len2 %s" % (len1, len2)
            tmp = len1 / len2
            if tmp < 0.15:

                s = string.replace(s, a.__str__(), '')

               
            elif len1 == len2:

                return [0, '']
            else:
                return [0, '']

        return [self._len(s), s]
    
    def safeGBK(self, str):
        
    
        return str
            
            


    def getTitle(self):
       
        i = 1

        try:
            body = str(self.soup.html.body)
            title = self.safeGBK(self.soup.html.head.title.string)
            while str(title[:i]) in body:
                i = i + 1
            return title[:i]

        except:
            return None


    def getContents(self, tags='div'):
        block = []

        b = self.getTags(tags)
        max = 0
        str = ''

        i = 0
   



        for b1 in b:


            tmp = self.clearns(b1.__str__(), tags)
         
            if tmp[0] > max:
                str = tmp[1]
                max = tmp[0]
            i = i + 1


        return  self.safeGBK(str)


    def _len(self, s):
        s = str(s)
        lens = 0
        re_script = re.compile('<\s*script[^>]*>.+<\s*/\s*script\s*>', re.I)#Script
        s = re_script.sub('', s)
        reg = re.compile('<\/?(li|ul|dl|dd|dt|ol)[^<>]>', re.I)
        tmp = reg.split(s)
       
        len_1 = len(tmp)
        if len_1:
            i=0
            for j in tmp:
                if j.count('<p') < 3 and j.count('<br')<3 and j.count('<P') < 3 and j.count('<BR')<3 and j.count('<div')<3 and j.count('</div') < 3 :
                   s = s.replace(j, '', 1)
                i=i+1

        
        



        a = re.compile('<a[^<>]+>[^<>]+<\/a>', re.I)

        s = a.sub('', s)#
    
        reg = re.compile('<img\s*[^<>]*alt[^<>]*>', re.I)
        img = reg.findall(s)
        lens = len(img) * 30
        reg = re.compile('<(P|br|font|strong)', re.I)
        img = reg.findall(s)
        lens = lens + len(img) * 5

        if s.count('<P') < 1 and s.count('<p') < 1 and s.count('<br') < 8 and s.count('<BR') < 8:
            return 0


        tag = re.compile('<[^<>]*>', re.I)
        s = tag.sub('', s)
        tag = re.compile('[\n\r]+', re.I)
        s = tag.sub('', s)
        lens = lens + len(s)
    
        return  lens






    def tagCount(self, tag, s):

        s = s.lower()
        c = 0
        for t in tag:
            c = c + s.count("<%s" % t)
            c = c + s.count("%s>" % t)
        return c



    def getPage(self, num):
        '''
        分页
        '''
        page_re = re.compile('\d+_\d+\.htm', re.I)#分页正则

        a = self.getTags('a')
        page = []
        host = hostParser(self.url)
        for u in a:
            try:
                links = u['href']
                links = replairUrl(host, links)


                if links not in page and  page_re.search(links):
                    if strDiff(self.url, links)[2] > 0.95:
                        page.append(links)

            except:
                pass
        return page







if __name__ == '__main__':
    c = []
    htmlers = htmler(url='http://www.meishichina.com/Eat/RMenu/200906/62437.html')

    c.append(htmlers.getContents())
     
    p = htmlers.getPage(12)
    t = htmlers.getTitle()
    for i in p:
        htmlers.setUrl(str(i))
        if htmlers.getTitle() == t:
            clearn = True
            c.append(htmlers.getContents())
    s = ''.join(c)
    if clearn:
        re_cdata = re.compile('<a [^\:<>]+>(上一页|[\[\]0-9]+|下一页)</a>', re.I) #???CDATA
        s = re_cdata.sub('', s)
        
    
    print s.encode('gbk', 'ignore')
            


















