import requests
import random
import pymongo
from pymongo import MongoClient, ASCENDING, errors

MONGO_HOST = 'localhost'
MONGO_PORT = 27017
DB_NAME = 'testUrl'
COLL_NAME = 'testUrl'

# ---------- 1. 连接 ----------
def get_collection():
    client = MongoClient(MONGO_HOST, MONGO_PORT)
    coll = client[DB_NAME][COLL_NAME]
    coll.create_index([('url', ASCENDING)], unique=True)
    return coll

# ---------- 2. 断点续爬：返回“待爬”列表 ----------
def build_remaining_urls(coll, all_urls):
    """把 all_urls 中已存在的去掉，返回真正需要爬的"""
    existed = set(coll.distinct('url'))
    remain = [u for u in all_urls if u not in existed]
    print(f'[断点] 总任务 {len(all_urls)}，已爬 {len(existed)}，剩余 {len(remain)}')
    return remain

# ---------- 3. 爬一条落一条 ----------
def crawl_and_save(url, coll):
    try:
        resp = requests.get(url, timeout=10)
        resp.raise_for_status()
    except requests.RequestException as e:
        print(f'[ERROR] 请求失败 {url} -> {e}')
        return  # 失败就跳过，下次启动会再试

    doc = {
        'url': url,
        'status_code': resp.status_code,
        'text': resp.text,
        'headers': dict(resp.headers)
    }
    try:
        coll.insert_one(doc)
        print(f'[INFO] 已保存 {url}')
    except errors.DuplicateKeyError:
        print(f'[INFO] 重复写入，已跳过 {url}')

# ---------- 4. 主流程 ----------
def main():
    coll = get_collection()

    # 1. 生成全集（可按自己规则改）
    all_urls = [f'https://httpbin.org/get?age={random.randint(1, 100)}' for _ in range(100)]

    # 2. 断点过滤
    todo_urls = build_remaining_urls(coll, all_urls)

    # 3. 顺序爬剩余
    for url in todo_urls:
        crawl_and_save(url, coll)

if __name__ == '__main__':
    main()