import crawlertool as tool
from bs4 import BeautifulSoup
from pprint import pprint
# pip install fake_useragent
from fake_useragent import UserAgent

'''
练习：起点中文网小说本周强推榜及章节目录数据爬虫
'''
class SpiderQiDianWeekRecommendList():
    def __init__(self):
        self.url = 'https://www.qidian.com/'
        self.headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Cache-Control': 'max-age=0',
            'Connection': 'keep-alive',
            'Host': 'www.qidian.com',
            'Sec-Fetch-Dest': 'document',
            'Sec-Fetch-Mode': 'navigate',
            'Sec-Fetch-Site': 'none',
            'Upgrade-Insecure-Requests': '1',
            'User-Agent': UserAgent().chrome,
        }
        self.book_info_headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Cache-Control': 'max-age=0',
            'Connection': 'keep-alive',
            'Host': 'book.qidian.com',
            'Referer': 'https://book.qidian.com/info/1035861107/',
            'Sec-Fetch-Dest': 'document',
            'Sec-Fetch-Mode': 'navigate',
            'Sec-Fetch-Site': 'same-origin',
            'Sec-Fetch-User': '?1',
            'Upgrade-Insecure-Requests': '1',
            'User-Agent': UserAgent().chrome
        }

    '''主程序'''
    def main(self):
        # 请求url获取返回
        response = tool.do_request(self.url, headers = self.headers)
        # 获取html内容
        bs = BeautifulSoup(response.content.decode(errors="ignore"), 'lxml')
        # 解析html，获取指定内容
        book_list = []
        book_label_list = bs.select('.index-two-wrap')[0].select('.book-list-wrap > div > ul > li')
        for book_label in book_label_list:
            book_type = book_label.select_one('li > .channel').text
            book_name_parent_element = book_label.select_one('li > strong')
            book_name = ''
            book_info_url = None
            book_info = []
            if book_name_parent_element is not None:
                book_name = book_label.select_one('li > strong > a').text
                book_info_url = 'https:' + book_label.select_one('li > strong > a')['href']
            else:
                book_name = book_label.select_one('li > .name').text
            author_name_element = book_label.select_one('li > .author')
            author_name = '无'
            if author_name_element is not None:
                author_name = author_name_element.text
            if book_info_url is not None:
                book_info = self.get_book_info(book_info_url)
            book_list.append({
                'book_type': book_type.replace('「', '').replace('」', ''),
                'book_name': book_name,
                'author_name': author_name,
                'book_info': book_info,
            })
        return book_list

    '''获取小说作品信息页数据'''
    def get_book_info(self, book_info_url):
        book_info = []
        book_res = tool.do_request(book_info_url, headers = self.book_info_headers)
        # 由于会出现响应202 Accepted 表示服务器端已经收到请求消息，但是尚未进行处理。但是对于请求的处理确实无保证的，这里循环请求直到返回200
        while book_res.status_code != 200:
            book_res = tool.do_request(book_info_url, headers = self.book_info_headers)
        book_bs = BeautifulSoup(book_res.content.decode(errors="ignore"), 'lxml')
        # 封面
        cover = 'https:' + book_bs.select_one('.book-information > .book-img > a > img')['src']
        # 标签
        tags = []
        tag_elements_span = book_bs.select('.book-information > .book-info > .tag > span')
        for tag in tag_elements_span:
            tags.append(tag.text)
        tag_elements_a = book_bs.select('.book-information > .book-info > .tag > a')
        for tag in tag_elements_a:
            tags.append(tag.text)
        # 简介
        intro = book_bs.select_one('.book-information > .book-info > .intro').text

        # 字数
        words = book_bs.select('.book-information > .book-info > p')[2].select('p > em')[0].text + book_bs.select('.book-information > .book-info > p')[2].select('p > cite')[0].text

        # 总推荐数
        total_recommends = book_bs.select('.book-information > .book-info > p')[2].select('p > em')[1].text

        # 周推荐数
        week_recommends = book_bs.select('.book-information > .book-info > p')[2].select('p > em')[2].text

        # 阅读地址
        book_read_url = book_bs.select('.book-information > .book-info > p')[3].select('p > a')[0]['href']

        # 目录
        catalog_list = self.get_catalog_list(book_bs)

        book_info.append({
            'cover': cover,
            'tags': tags,
            'intro': intro,
            'words': words,
            'total_recommends': total_recommends,
            'week_recommends': week_recommends,
            'book_read_url': book_read_url,
            'catalog_list': catalog_list,
        })
        return book_info

    '''获取小说目录'''
    def get_catalog_list(self, book_bs):
        catalog_list = []
        catalog_elements = book_bs.select('.catalog-content-wrap > .volume-wrap > div')[0].select('div > ul > li')
        for catalog_element in catalog_elements:
            catalog_list.append({
                # 章节标题
                'title': catalog_element.select_one('li > h2 > a').text,
                # 阅读地址
                'read_url': 'https:' + catalog_element.select_one('li > h2 > a')['href'],
            })
        return catalog_list

if __name__ == '__main__':
    spider = SpiderQiDianWeekRecommendList()
    print('正在运行起点中文网小说本周强推榜及章节目录数据爬虫，请稍后。。。')
    pprint(spider.main())
