#!usr/bin/env python
#-*- coding:utf-8 _*-
"""
@author:煜
@file: HtmlParser.py
@time: 2018/10/20
"""
import re
from lxml import etree



class HtmlParser(object):

    def parserIndex(self,html_content):
        '''
        用于解析网页中的书籍详情的URL
        :param html_content: 首页中所有书籍的界面的内容
        :return: 书籍详情界面的URL
        '''
        if html_content is None:
            return
        page=etree.HTML(html_content)
        hrefList=[]
        hrefs=page.xpath('//*[@id="waterfall"]/div')
        for href in hrefs:
            href=href.xpath('//div[2]/a/@href')
        href.remove('https://www.xiashu.la')
        for h in href:
            hrefList.append('https://www.xiashu.la'+h)
        return hrefList


    def parserDetailPage(self,html_Content):
        '''
        :param html_Content:返回书籍的详情界面
        :return:返回书籍的详情信息
        '''

        if html_Content is None:
            return
        page=etree.HTML(html_Content)

        id=re.search(r'([1-9]\d*)', page.xpath('/html/head/meta[7]/@content')[0]).group(0)
        #书籍详细信息
        image=page.xpath('//*[@id="picbox"]/div[1]/img/@data-original')[0]
        bookName=page.xpath('//*[@id="info"]/div[1]/h1/text()')[0].replace('《','').replace('》','')
        descript=page.xpath('//*[@id="aboutbook"]/text()')
        author=page.xpath('//*[@id="infobox"]/div/div[2]/a/text()')[0]
        type=page.xpath('//*[@id="mainright"]/div[1]/ul/li[1]/text()')[0].replace('作品类别：','')
        totalChapter=page.xpath('//*[@id="mainright"]/div[1]/ul/li[2]/text()')[0].replace('章节数目：','').replace('约','').replace('章节','')
        totalRemarkPeople=page.xpath('//*[@id="picbox"]/div[2]/a/text()')[0].replace('共(','').replace(')条评论','')
        print('当前获取书籍为：%s'%bookName)


        #书籍章节目录
        Chapterlist=[]
        topList=page.xpath('//ul[@id="toplist"]/li')
        for li in topList:
            title=li.xpath('a/text()')[0]
            href=li.xpath('a/@href')[0]
            Chapterlist.append({'title':title,'href':'https://www.xiashu.la'+ href})

        hideChapterCount=page.xpath('//*[@id="yc"]/span[2]/text()')[0]
        hideUrl='https://www.xiashu.la/api/ajax/zj?id='+id+'&num='+hideChapterCount+'&order=asc'
        try:
            import requests
            hidePage=etree.HTML(requests.get(hideUrl).text)
            hide_li=hidePage.xpath('*/li')
            for li in hide_li:
                title = li.xpath('a/text()')[0]
                href = li.xpath('a/@href')[0]
                Chapterlist.append({'title': title, 'href':'https://www.xiashu.la'+ href})
        except:
            return None
        leftList=page.xpath('//*[@id="lastchapter"]/li')
        for li in leftList:
            title=li.xpath('a/text()')[0]
            href=li.xpath('a/@href')[0]
            Chapterlist.append({'title':title,'href':'https://www.xiashu.la'+ href})



        #获取评论信息
        remarkInfo=[]
        remark_li=page.xpath('//*[@id="comment"]/ul/li')
        for li in remark_li:
            user_names=li.xpath('//*[@class="userinfo"]/span[@class="username"]/text()')
            remark_contents = li.xpath('//*[@class="userinfo"]/span[@class="usercom"]/text()')
            remark_times = li.xpath('//*[@class="userinfo"]/span[@class="time"]/text()')
        content=[]
        for user_name,remark_content,remark_time in zip(user_names,remark_contents,remark_times):
            if len(remark_content)>300 or remark_content in content:
                continue
            else:
                content.append(remark_content)
                remarkInfo.append({'user_name':user_name,'remark_ontent':remark_content,'remark_time':remark_time})

        # 返回书籍的详细信息
        bookInfo = {'id': id, 'image': image, 'bookName': bookName, 'descript': ''.join(descript), 'author': author,
                    'type': type, 'totalChapter': totalChapter, 'totalRemarkPeople': totalRemarkPeople,'remarkList':remarkInfo,'ChapterList':Chapterlist}


        return bookInfo


if __name__=='__main__':
    import requests
    html=requests.get("https://www.xiashu.la/15916/")
    htmlParser=HtmlParser()
    info=htmlParser.parserDetailPage(html.text)
    print(info['image'])
    # href=htmlParser.parserIndex(html.text)
    # print(len(href))