# -*- coding = utf-8 -*-
# @Time    : 2025/3/29 下午3:47
# @Author  : yqk
# @File    : 多线程爬虫.py
# @Software: PyCharm
import requests
from bs4 import BeautifulSoup
import concurrent.futures
import queue

headers = {
    "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
    "accept-language": "zh-CN,zh;q=0.9",
    "cache-control": "max-age=0",
    "priority": "u=0, i",
    "referer": "https://www.qidian.com/chapter/1042537549/815073954/",
    "sec-ch-ua": "\"Chromium\";v=\"134\", \"Not:A-Brand\";v=\"24\", \"Google Chrome\";v=\"134\"",
    "sec-ch-ua-mobile": "?0",
    "sec-ch-ua-platform": "\"Windows\"",
    "sec-fetch-dest": "document",
    "sec-fetch-mode": "navigate",
    "sec-fetch-site": "same-origin",
    "upgrade-insecure-requests": "1",
    "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36"
}
cookies = {
    "_csrfToken": "RhO9yY70jgHE9exkMji7RBjHgs30h03uKZQWyLam",
    "Hm_lvt_f00f67093ce2f38f215010b699629083": "1743231924",
    "HMACCOUNT": "E85503690916CA6F",
    "newstatisticUUID": "1743231923_622984586",
    "fu": "1754913736",
    "_gid": "GA1.2.1927443532.1743231924",
    "supportwebp": "true",
    "e1": "%7B%22l6%22%3A%22%22%2C%22l7%22%3A%22%22%2C%22l1%22%3A3%2C%22l3%22%3A%22%22%2C%22pid%22%3A%22qd_p_qidian%22%2C%22eid%22%3A%22qd_A18%22%7D",
    "e2": "%7B%22l6%22%3A%22%22%2C%22l7%22%3A%22%22%2C%22l1%22%3A3%2C%22l3%22%3A%22%22%2C%22pid%22%3A%22qd_p_qidian%22%2C%22eid%22%3A%22qd_A71%22%7D",
    "supportWebp": "true",
    "Hm_lpvt_f00f67093ce2f38f215010b699629083": "1743231954",
    "_ga": "GA1.1.524932475.1743231924",
    "traffic_utm_referer": "https%3A%2F%2Fwww.baidu.com%2Flink",
    "traffic_search_engine": "",
    "se_ref": "",
    "_ga_FZMMH98S83": "GS1.1.1743231923.1.1.1743231994.0.0.0",
    "_ga_PFYW0QLV3P": "GS1.1.1743231923.1.1.1743231994.0.0.0",
    "x-waf-captcha-referer": "https%3A%2F%2Fwww.qidian.com%2Fbook%2F1042537549%2F",
    "w_tsfp": "ltv2UU8E3ewC6mwF46vukEisFTondzkilgxsXqNmeJ94Q7ErU5mB0oR5vMv1MXPc5sxnt9jMsoszd3qAUdImfxIWRc6QdYoVkB/Gy99yicxUQ0k5VYnWSwVKK7x1v2IULW5XJEDvimd9ItJAmOY1i1AK4Xdyzf5zXvFqL5kXjB0ZufzCkpxuDW3HlFWQRzaZciVfKr/c9OtwraxQ9z/c5Vv7LFt0A6hewgfHg31dWzox6wPjMK0ddgmuUtutLvgy23S0hSe2M8T1iEg9sg9qpRxLUIqrgiDIWXdEJAluZl+3hrcxedi5KvRg+2gLVr4XDR1V6hoaxPc7j1FNe2/6J2vOXK4j5F4eFa0E6s/aKnOUlt65Yh1bntwukVkyuJUA6zFnZR//LNtaT2LCYHcae/sSd47obH9hCgAWACVG9BFFaHtdCrp1aI+WuUS2eBBag7I7ZbHrKOMKPyuVVPC2F/s="
}
# 获取章节内容
def get_cont(url, headers, cookies):
    response = requests.get(url, headers=headers, cookies=cookies)
    soup = BeautifulSoup(response.text, 'html.parser')
    chapter_title = soup.find('h1', class_='title').get_text(strip=True)
    content_div = soup.find('main', id=lambda x: x and x.startswith('c-'))
    paragraphs = content_div.find_all('p')
    chapter_content = '\n'.join([p.get_text(strip=True) for p in paragraphs])
    return chapter_title, chapter_content


# 获取所有章节链接
def get_link(url_link, headers, cookies):
    response = requests.get(url_link, headers=headers, cookies=cookies)
    soup = BeautifulSoup(response.text, 'html.parser')

    chapters = []
    volume_list = soup.find_all('div', class_='catalog-volume')
    for volume in volume_list:
        volume_name = volume.find('h3', class_='volume-name').get_text(strip=True)
        chapter_items = volume.find_all('li', class_='chapter-item')
        for item in chapter_items:
            chapter_link = item.find('a')['href']
            chapter_name = item.find('a')['title'].split(' 首发时间')[0]  # 清理标题
            chapters.append({
                'volume': volume_name,
                'name': chapter_name,
                'url': 'https:' + chapter_link if not chapter_link.startswith('http') else chapter_link
            })

    return chapters


# 处理每个章节内容
def fetch_chapter(chapter, headers, cookies, output_queue):
    chapter_url = chapter['url']
    chapter_title, chapter_content = get_cont(chapter_url, headers, cookies)
    output_queue.put(f"{chapter_title}\n{chapter_content}\n\n")


# 保存所有章节到文件
def save_chapters_to_txt(url_link, headers, cookies, filename):
    chapters = get_link(url_link, headers, cookies)

    # 使用队列来存储结果
    output_queue = queue.Queue()

    # 使用ThreadPoolExecutor执行多线程
    with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
        futures = []
        for chapter in chapters:
            futures.append(executor.submit(fetch_chapter, chapter, headers, cookies, output_queue))

        # 等待所有任务完成
        concurrent.futures.wait(futures)

    # 将结果保存到文件
    with open(filename, 'w', encoding='utf-8') as f:
        while not output_queue.empty():
            chapter_data = output_queue.get()
            f.write(chapter_data)
            print(f"已保存章节：{chapter_data.split('\n')[0]}")

    print(f"所有章节已保存到 {filename}")


# 示例使用
url_link ="https://www.qidian.com/book/1042537549/"
save_chapters_to_txt(url_link, headers, cookies, 'chapters.txt')
