import os.path
import random

import requests
from tqdm import tqdm

from xunter_utils.work_utils import work_logger


class BQGClient:
    def __init__(self):
        """
        初始化
        https://www.3bqg.cc/book/小说id/页数.html
        这里面的参数都是字符串每个字符串之间不要用逗号间隔不然就成了元组
        例如：
        self.url_temp 与 self.hearders之间不要用逗号直接换行就行
        """
        self.url_temp = 'https://www.3bqg.cc/book'
        self.hearders = {
            'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36'
        }
    @work_logger(path='bqg_logs')  #爬虫工作日志装饰器： 记录send_request运行情况 并且进行send_request重试
    def send_request(self, url: str) -> requests.Response:
        """
        发送请求
        :param url: 请求地址
        :return: 请求的响应对象
        timeout=6 表示响应超过6秒就报超时错误
        """
        tqdm.write('开始请求：%s' %url)
        res = requests.get(url,headers=self.hearders,timeout=6)
        r = random.randint(0,9)
        if r >= 5:
            raise TimeoutError('出现异常啦！！')
        if res.status_code // 100 != 2:
            raise TimeoutError('响应状态码异常')
        return res

    @work_logger(path='bqg_logs',is_retry=False)
    def get_page(self,book_id:int,page:int) -> str:
        """
        获取指定的一页数据
        :param book_id:书籍id
        :param page: 页数
        :return: 返回响应数据解码后的内容
        任务级别的方法只进行任务的执行(获取数据)不考虑任务能不能完成不能完成就跳过不需要重试
        """

        url = '%s/%d/%d.html' % (self.url_temp, book_id, page)
        # url = f'{self.url_temp}/{book_id}/{page}.html'
        # 这里的send_request方法重试3次后不行，就会抛出异常 为了不运行程序运行可以选择不进行重试操作传入参数 is_retry=False
        res = self.send_request(url)
        return res.text

    def get_pages(self,book_id:int,pages: list | range) ->list:

        return [self.get_page(book_id,page) for page in tqdm(pages)]
        # test_page = []
        # for page in tqdm(pages):
        #     test_page.append(self.get_page(book_id,page))
        # return test_page

    def save_txt(self,path: str ,data: str) -> None:
        """
        存储函数
        :param path: 保存地址
        :param data:要保存的数据
        :return:
        """
        with open(path, 'w', encoding='utf-8') as f:
            f.write(data)


    def download(self, book_id: int, dir_path: str, pages: list | range):
        """
        解析函数
        :param book_id:
        :param dir_path:
        :param pages:
        :return:
        """
        if not os.path.isdir(dir_path):
            os.mkdir(dir_path)  # mkdir 创建文件夹(文件夹的名字)

        text_list = self.get_pages(book_id,pages)
        for i, test in enumerate(text_list):
            self.save_txt(rf'{dir_path}/{pages[i]}.html', test)

    # def download(self, book_id: int, dir_path: str, pages: list | range) -> None:
    #     """
    #     下载指定小说的指定章节
    #     :param book_id: 小说的id
    #     :param dir_path: 小说保存目录
    #     :param pages: 要下载的章节列表
    #     :return: 无
    #     """
    #     # 判断保存目录是否存在，不存在则创建
    #     if not os.path.isdir(dir_path):
    #         os.mkdir(dir_path)  # mkdir 创建文件夹(文件夹的名字)
    #
    #     # 获取要抓取的每页值
    #     for page in tqdm(pages):  # [1] range
    #
    #         # 抓取每页文章书籍
    #         url = '%s/%d/%d.html' % (self.url_temp, book_id, page)
    #         # url = f'{self.url_temp}/{book_id}/{page}.html'
    #         res = requests.get(url)
    #
    #         if res.status_code == 200:
    #             # 保存到对应的文件中
    #             with open(rf'{dir_path}/{page}.html', 'w', encoding='utf-8') as f:
    #                 f.write(res.text)


if __name__ == '__main__':
    client = BQGClient()
    # client.download(60417, '道诡异仙', range(1, 11))  # 抓取前10篇
    # client.download(60417, '道诡异仙', [20])  # 抓取1篇
    # client.download(60417, '道诡异仙', [21, 22, 24, 25])

    # test = client.get_page(60417,1)
    # client.save_txt(r'道诡异仙/1.html', test)
    # print(test)

    # pages = [25,26,27,28]
    # text_list = client.get_pages(60417,pages)
    # for i,test in enumerate(text_list):
    #     client.save_txt(f'道诡异仙/{pages[i]}.html',test)

    client.download(60417, '道诡异仙', range(60,71))
