import requests
import os
from concurrent.futures import ThreadPoolExecutor
from fake_useragent import UserAgent
import pymongo

client = pymongo.MongoClient()
client.drop_database("bilibili")
db = client.get_database("bilibili")
collection = db.get_collection("fanju")
ua = UserAgent()

# 要爬取的URL列表
urls = [
	f'https://api.bilibili.com/pgc/season/index/result?st=1&order=3&season_version=-1&spoken_language_type=-1&area=-1&is_finish=-1&copyright=-1&season_status=-1&season_month=-1&year=-1&style_id=-1&sort=0&page={page}&season_type=1&pagesize=20&type=1'
	for page in range(1, 198)]


# 爬取单个页面的函数
def fetch_url(url):
    print(f"正在爬取{url}。。。")
    try:
        response = requests.get(url, headers={"user-agent": ua.random})
        if response.status_code == 200:
            items = response.json()['data']['list']
            collection.insert_many(items)
            print(f"保存成功{url}。。。")
        else:
            print(f"Failed to fetch {url}, Status code: {response.status_code}")
    except Exception as e:
        print(f"Error fetching {url}: {e}")

# 使用线程池进行并发爬取
def main(urls, max_workers=12):
    with ThreadPoolExecutor(max_workers=max_workers) as executor:
        # 提交任务给线程池
        futures = [executor.submit(fetch_url, url) for url in urls]

        # 等待所有任务完成
        for future in futures:
            try:
                # 调用 result() 方法获取任务执行的结果
                future.result()
            except Exception as e:
                print(f"Error during execution: {e}")

# 运行线程池爬虫
if __name__ == "__main__":
    main(urls, max_workers=os.cpu_count())  # 控制线程池中最大并发线程数为3
    client.close()