from curl_cffi import requests | |
import json | |
from concurrent.futures import ThreadPoolExecutor, as_completed | |
from tqdm.rich import tqdm | |
# 定义爬取函数 | |
def fetch_url(page): | |
url = f"https://yande.re/post.json?limit=100&page={page}" | |
response = requests.get(url, impersonate="chrome110") | |
# 检查请求是否成功 | |
if response.status_code == 200: | |
return response.json() | |
else: | |
print(f"Failed to retrieve data from {url}") | |
return None | |
# 定义保存为 JSON 的函数 | |
def save_json(data, page,): | |
if data: | |
with open(f"jsonpart/page_{page}.json", 'w', encoding='utf-8') as f: | |
json.dump(data, f, ensure_ascii=False, indent=4) | |
# 使用 ThreadPoolExecutor 创建线程池来并发爬取 | |
def main(): | |
bar = tqdm(total=25000) | |
pages = range(25001) # 0 到 25000 页 | |
# 设置线程池的最大线程数,例如此处设置为 20 | |
with ThreadPoolExecutor(max_workers=20) as executor: | |
# 创建 future 到 URL 的映射 | |
future_to_url = {executor.submit(fetch_url, page): page for page in pages} | |
# 等待线程完成 | |
for future in as_completed(future_to_url): | |
page = future_to_url[future] | |
data = future.result() | |
save_json(data, page) | |
bar.update(1) | |
if __name__ == "__main__": | |
main() | |