"""
   一级页面：
      li_list: //li[contains(@id,"menu-item-")]
      parent_title: ./a/text()
      parent_href:  ./a/@href

   二级页面：
      art_list: //article
      son_title: ./a/text()
      son_href: ./a/@href

   三级页面：
      ['段落1','段落2',...]
      小说内容：//article/p/text()
"""
import requests
from lxml import etree
import time
import random
from fake_useragent import UserAgent

class DaomuSpider:
    def __init__(self):
        self.index_url = 'http://www.daomubiji.com/'

    def get_html(self, url):
        """功能函数1：请求获取html"""
        headers = {'User-Agent':UserAgent(path='fake_useragent.json').random}
        html = requests.get(url=url, headers=headers).text

        return html

    def xfunc(self, html, x):
        """功能函数2：xpath解析提取数据"""
        eobj = etree.HTML(html)
        r_list = eobj.xpath(x)

        return r_list

    def parse_html(self):
        """爬虫程序逻辑函数"""
        first_html = self.get_html(url=self.index_url)
        first_x = '//li[contains(@id,"menu-item-20")]'
        li_list = self.xfunc(first_html, first_x)
        for li in li_list:
            # 提取大标题 和 大链接
            item1 = {}
            item1['parent_title'] = li.xpath('./a/text()')[0]
            item1['parent_href'] = li.xpath('./a/@href')[0]
            # 解析提取二级页面数据
            self.parse_second_page(item1)

    def parse_second_page(self, item1):
        """二级页面解析：小标题和小链接"""
        second_url = item1['parent_href']
        second_html = self.get_html(second_url)
        second_x = '//article'
        # art_list: [<章节1>,<章节2>,...]
        art_list = self.xfunc(second_html, second_x)
        for art in art_list:
            item2 = {}
            item2['parent_title'] = item1['parent_title']
            item2['parent_href'] = item1['parent_href']
            item2['son_title'] = art.xpath('./a/text()')[0]
            item2['son_href'] = art.xpath('./a/@href')[0]
            # 向son_href发请求,提取具体小说内容
            self.parse_third_page(item2)
            # 控制频率
            time.sleep(random.uniform(0, 1))

    def parse_third_page(self, item2):
        third_url = item2['son_href']
        third_html = self.get_html(third_url)
        third_x = '//article/p/text()'
        # p_list: ['段落1','段落2',... ...]
        p_list = self.xfunc(third_html, third_x)
        novel_content = '\n'.join(p_list)
        # 完善item2
        item2['novel_conent'] = novel_content.replace('\u3000', ' ')

        print(item2)

    def crawl(self):
        self.parse_html()

if __name__ == '__main__':
    spider = DaomuSpider()
    spider.crawl()




'''
{
    parent_title : xxxx,
    parent_href  : xxxx,
    son_title    : xxxx,
    son_href     : xxxx,
    novel_content: xxxx,
}
'''



















