# -*- coding: utf-8 -*-

"""
DateTime   : 2021/04/14 19:54
Author     : ZhangYafei
Description: 笔趣阁
https://www.ibiquge.net/
"""
import asyncio
import os

import aiofiles as aiofiles
import aiohttp
import async_timeout
import requests
from lxml import etree
from zyf.timer import timeit


class IbiqugeCrawler:

    def __init__(self):
        self.base_url = 'https://www.ibiquge.net'
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36 Edg/89.0.774.75'}
        self.book_title = None
        self.book_author = None
        self.save_dir = 'download'
        self.error_list = []

    def get_all_chapter_urls(self, book_id):
        url = f'{self.base_url}/{book_id}'
        response = requests.get(url=url, headers=self.headers)
        if response.status_code != 200:
            return
        html = etree.HTML(response.content)
        self.book_title = html.xpath('//div[@id="info"]/h1/text()')[0].replace(':', '-')
        self.book_author = html.xpath('//div[@id="info"]/p[1]/a/text()')[0]
        if not os.path.exists(f'{self.save_dir}/{self.book_title}'):
            os.makedirs(f'{self.save_dir}/{self.book_title}')
        return {f'{self.base_url}{dd}' for dd in html.xpath('//div[@id="list"]/dl/dd/a/@href')}

    async def download_chapter_content(self, session, url):
        print(f'make request to {url}')
        try:
            with async_timeout.timeout(60):
                async with session.get(url=url) as response:
                    content = await response.text(encoding='utf-8')
                    html = etree.HTML(content)
                    # title = html.xpath('//div[@class="bookname"]/h1/text()')[0]
                    title = html.xpath('//div[@class="bookname"]/h1/text()')[0].replace('?', '_').replace('、', '_').replace('？', '_').replace('*', '_')
                    content = '\n'.join(row.strip() for row in html.xpath('//div[@id="content"]/text()') if row.strip())
                    async with aiofiles.open(f'{self.save_dir}/{self.book_title}/{title}.txt', mode='w',
                                             encoding='utf-8') as f:
                        await f.write(content)
                    print(f'{title}写入成功')
        except Exception as e:
            self.error_list.append((url, e))

    async def run(self, book_id):
        all_chapter_urls = self.get_all_chapter_urls(book_id=book_id)
        print(f"{book_id} -> {self.book_title} 共检索到 {len(all_chapter_urls)} 个章节")
        print("开始下载所有章节内容")
        conn = aiohttp.TCPConnector(ssl=False, limit=50, use_dns_cache=True)
        async with aiohttp.ClientSession(connector=conn, headers=self.headers) as session:
            await asyncio.gather(
                *[asyncio.create_task(self.download_chapter_content(session, url)) for url in all_chapter_urls])

        if len(self.error_list) > 0:
            for url, e in self.error_list:
                print(f'{url} 请求失败', e)

    @timeit
    def start(self, book_id):
        asyncio.run(self.run(book_id))


if __name__ == '__main__':
    cralwer = IbiqugeCrawler()
    while True:
        book_id = input('请选择下载的小说ID(例如：48_48800) >>  ')
        if not book_id:
            print('小说ID不能为空, 请重新输入')
            continue
        break
    cralwer.start(book_id)
