import time
import requests
from bs4 import BeautifulSoup
from tqdm import tqdm

SERVER = 'https://www.biqukan8.cc'
HEADERS = {
    'authority': 'developer.mozilla.org',
    'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
    'accept-language': 'zh-CN,zh;q=0.9',
    'cache-control': 'max-age=0',
    'if-modified-since': 'Mon, 01 Jan 2024 01:39:55 GMT',
    'if-none-match': 'W/"918634a4ef0ed92c43fa7a08593e3df5"',
    'sec-ch-ua': '"Not_A Brand";v="8", "Chromium";v="120", "Google Chrome";v="120"',
    'sec-ch-ua-mobile': '?0',
    'sec-ch-ua-platform': '"Windows"',
    'sec-fetch-dest': 'document',
    'sec-fetch-mode': 'navigate',
    'sec-fetch-site': 'same-origin',
    'sec-fetch-user': '?1',
    'upgrade-insecure-requests': '1',
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
    }


class Spider_demo(object):

    def __init__(self) -> None:
        self.server = SERVER
        self.header = HEADERS
        self.urls = []
        self.chapter_names = []
        self.num = 0


    def get_content(self, url):
        """
        从给定的URL获取内容，并从中提取特定class名为'showtxt'的div的文本内容。
        :param url: 要获取内容的URL
        :return: 提取的文本内容
        """
        try:
            response = requests.get(url, headers=self.header)
            response.raise_for_status()
            # response.encoding = 'utf-8'
            html = response.text
            soup = BeautifulSoup(html, 'lxml')
            showtxt = soup.find('div', class_='showtxt')
            if showtxt:
                text = showtxt.get_text()
                content = text.replace('\xa0'*8, '\n\n')
            else:
                raise ValueError("没有找到class名为'showtxt'的div元素。")
        except requests.RequestException as e:
            print(f"请求错误: {e}")
        except ValueError as e:
            print(f"解析错误: {e}")
        except Exception as e:
            print(f"其他错误: {e}")

        return content

    def get_urls(self, url):
        '''
        从给定的URL中提取特定链接。
        :param url: 要爬取的URL
        :return: 匹配的链接列表
        '''
        response = requests.get(url, headers=self.header)
        response.raise_for_status()
        html = response.text
        div_soups = BeautifulSoup(html, 'lxml').find_all('div', class_='listmain')
        a_soups = BeautifulSoup(str(div_soups[0]), 'lxml').find_all('a')
        for a in a_soups:
            url_path = a.get('href')
            if url_path:
                self.urls.append(self.server + url_path)
                self.chapter_names.append(a.string) 
        start_url_index =  self.urls.index('https://www.biqukan8.cc/3_3276/1499352.html')
        self.urls = self.urls[start_url_index:]
        self.chapter_names = self.chapter_names[start_url_index:]
        self.num = len(self.urls)

    @staticmethod
    def writer(content, file_name, chapter_name, mode='w'):
        with open(file_name, mode=mode, encoding='utf-8') as f:
            f.write(chapter_name + '\n\n')
            f.writelines(content)
            f.write('\n\n')


if __name__ == "__main__":
    target_url = 'https://www.biqukan8.cc/3_3276/'
    dl = Spider_demo()
    dl.get_urls(target_url)
    file_name = f'./txt/仙逆.txt'
    print('开始下载：')
    for i in tqdm(range(dl.num)):
        content = dl.get_content(dl.urls[i])
        dl.writer(content, file_name, dl.chapter_names[i], mode='a')
        time.sleep(1)
    print('下载完成')
