'''
官网地址：https://www.biqukan.la/
笔趣阁小说两个页面数据抓取
一级页面：小说名称，链接，作者，描述
二级页面：最新章节名称，链接
'''
import re
import requests
import time
import random

class NovelSpider:
    def __init__(self):
        self.url = "https://www.biqukan.la/fenlei1/{}.html"
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.164 Safari/537.36",
            "cookie":"Hm_lvt_cc13cd690e17410a96647c14d9e29aba = 1629793677;Hm_lpvt_cc13cd690e17410a96647c14d9e29aba = 1629794532"
        }

    def get_html(self,url):
        html = requests.get(url=url,headers=self.headers).text
        return html

    def refunc(self,html):
        '''正则解析一级页面函数'''
        regex = '<div class="caption">.*?<a href="(.*?)" title="(.*?)">.*?<small class="text-muted fs-12">' \
                '(.*?)</small><p class="text-muted fs-12 hidden-xs">(.*?)</p>'
        novel_info_list = re.findall(regex,html,re.S)
        for one_novel_info_tuple in novel_info_list:
            item = {}
            item['href'] = one_novel_info_tuple[0].strip()
            item['title'] = one_novel_info_tuple[1].strip()
            item['author'] = one_novel_info_tuple[2].strip()
            item['comment'] = one_novel_info_tuple[3].strip()
            self.secondReFunc(item)
            time.sleep(random.randint(1, 3))


    def secondReFunc(self,item):
        '''二级页面解析函数，章节名称+章节链接'''
        second_html = self.get_html(url=item['href'])
        second_regex = '<dd class="col-md-4"><a href="(.*?)">(.*?)</a></dd>'
        # [("87654467.html","第100章节"),(),(),(),.....]
        second_list = re.findall(second_regex,second_html,re.S)
        item['novel_info'] = second_list
        print(item)

    def crawl(self):
        for page in range(1,6):
            page_url = self.url.format(page)
            first_html = self.get_html(url = page_url)
            self.refunc(first_html)
            time.sleep(random.randint(1,3))

if __name__ == '__main__':
    novel = NovelSpider()
    novel.crawl()
