import re
from bs4 import BeautifulSoup



class HtmlParser(object):

    def parseToGetUrl(self,htmlCont):
        if htmlCont is None:
            print("htmlCont is None")
            return
        soup = BeautifulSoup(htmlCont, 'html.parser', from_encoding="utf-8")
        urls = self.getNewUrls(soup)
        return urls

    def parse(self, newUrl, htmlCont):
        if newUrl is None or htmlCont is None:
            return
        soup = BeautifulSoup(htmlCont,'html.parser', from_encoding="utf-8")
        newData = self.getNewData(newUrl,soup)
        return newData

    def getNewUrls(self, soup):
        newUrls = set()
        links = soup.find_all('a', href=re.compile(r"http://vip.book.sina.com.cn/weibobook/book/\d+\.html"))
        for link in links:
            newUrl = link['href']
            newUrls.add(newUrl)
        return newUrls

    def getNewData(self,newUrl,soup):
        data = {}
        data['url'] = newUrl
        #<h1 class="book_name"><em>出租之城<span class="short"></span></em></h1>
        bookName = soup.find('h1',class_="book_name").find('em')
        data['bookName'] = bookName.get_text()
        print(data['bookName'])
        #< div class ="authorName" > 桑榆未晚 < / div >
        authorName = soup.find('div',class_="authorName")
        data['authorName'] = authorName.get_text()
        #<p class="copyRight">  版权来源：北京掌文信息技术有限公司</p>
        publisher = soup.find('p',class_="copyRight")
        data['publisher'] = publisher.get_text()
        #<div class="info_txt" style="height: 88px; display: block; overflow: hidden;">传言</div>
        info_text = soup.find('div',class_= "info_txt")
        data['info_text']=info_text.get_text();
        #< div class ="book_img" > < img src = ""alt = "曾想盛装嫁给你" >< / div >
        book_img = soup.find('div',class_="book_img").find('img')
        data['book_img'] = book_img['src']
        #<span class="pop_height"><em>1.3<i>万</i></em>人气</span>
        pop_height = soup.find('span',class_="pop_height").find('em')
        data['pop_height'] = pop_height.get_text()

        comments = soup.find_all('div',class_="content-text")
        print(len(comments))
        i=0
        str = ""
        while i<len(comments):
            comment = soup.find_all('div', class_="content-text")[i]
            str = str + comment.get_text() + '<br>'
            i = i+1
        print("str ="+str)
        data["comment"]=str
        return data