import pymongo
# import requests
#
#
#
#
# def should_request():
#     client = pymongo.MongoClient('localhost', 27017)
#     db = client['testUrl']
#     collection = db['testUrl']
#
#
# def main():
#     urls = [f"https://httpbin.org/get?age={i}" for i in range(100)]
#     for url in urls:
#         response = requests.get(url)
#         print(response.text)

#
# import requests
# import pymongo
# from pymongo import MongoClient, ASCENDING, errors
#
# MONGO_HOST = 'localhost'
# MONGO_PORT = 27017
# DB_NAME = 'testUrl'
# COLL_NAME = 'testUrl'
#
# # ---------- 1. MongoDB 连接 ----------
# def get_collection():
#     client = MongoClient(MONGO_HOST, MONGO_PORT)
#     coll = client[DB_NAME][COLL_NAME]
#     # 建唯一索引，保证 url 不会重复
#     coll.create_index([('url', ASCENDING)], unique=True)
#     return coll
#
# # ---------- 2. 判断是否需要请求 ----------
# def should_request(url, coll):
#     """返回 True 表示数据库里不存在，需要请求"""
#     return coll.count_documents({'url': url}, limit=1) == 0
#
# # ---------- 3. 爬取并落库 ----------
# def crawl_and_save(url, coll):
#     try:
#         resp = requests.get(url, timeout=10)
#         resp.raise_for_status()
#     except requests.RequestException as e:
#         print(f'[ERROR] 请求失败 {url} -> {e}')
#         return
#
#     doc = {
#         'url': url,
#         'status_code': resp.status_code,
#         'text': resp.text,
#         'headers': dict(resp.headers)
#     }
#     try:
#         coll.insert_one(doc)
#         print(f'[INFO] 已保存 {url}')
#     except errors.DuplicateKeyError:
#         # 并发场景下唯一索引会抛重复键，这里捕获即可
#         print(f'[INFO] 重复写入，已跳过 {url}')
# import random
# # ---------- 4. 主流程 ----------
# def main():
#     coll = get_collection()
#
#     # 可换成你自己的 URL 生成逻辑
#     urls = [f'https://httpbin.org/get?age={random.randint(1, 100)}' for i in range(100)]
#
#     for url in urls:
#         if should_request(url, coll):
#             crawl_and_save(url, coll)
#         else:
#             print(f'[SKIP] 已存在 ')
#
# if __name__ == '__main__':
#     main()

import asyncio
import aiohttp
import random
from motor.motor_asyncio import AsyncIOMotorClient
from pymongo import ASCENDING

MONGO_HOST = 'localhost'
MONGO_PORT = 27017
DB_NAME   = 'testUrl'
COLL_NAME = 'testUrl'

# ---------- 1. 异步 MongoDB 连接 ----------
async def get_collection():
    client = AsyncIOMotorClient(MONGO_HOST, MONGO_PORT)
    coll = client[DB_NAME][COLL_NAME]
    # 建唯一索引，保证 url 不会重复
    await coll.create_index([('url', ASCENDING)], unique=True)
    return coll

# ---------- 2. 判断是否需要请求 ----------
async def should_request(url, coll):
    return await coll.count_documents({'url': url}, limit=1) == 0

# ---------- 3. 异步爬取并落库 ----------
async def crawl_and_save(session, url, coll):
    try:
        async with session.get(url, timeout=aiohttp.ClientTimeout(total=10)) as resp:
            resp.raise_for_status()
            text = await resp.text()
            headers = dict(resp.headers)
    except Exception as e:
        print(f'[ERROR] 请求失败 {url} -> {e}')
        return

    doc = {'url': url, 'status_code': resp.status, 'text': text, 'headers': headers}
    try:
        await coll.insert_one(doc)
        print(f'[INFO] 已保存 {url}')
    except Exception as e:
        # 唯一索引冲突
        if 'duplicate key' in str(e).lower():
            print(f'[INFO] 重复写入，已跳过 {url}')
        else:
            print(f'[ERROR] 写入失败 {url} -> {e}')

# ---------- 4. 主协程 ----------
async def main():
    coll = await get_collection()
    urls = [f'https://httpbin.org/get?age={random.randint(1, 100)}' for i in range(100)]

    # 限制并发量，防止把 httpbin 打爆
    semaphore = asyncio.Semaphore(10)

    async with aiohttp.ClientSession() as session:
        tasks = []
        for url in urls:
            if await should_request(url, coll):
                # 控制并发
                task = asyncio.create_task(
                    sem_crawl(semaphore, session, url, coll)
                )
                tasks.append(task)
            else:
                print(f'[SKIP] 已存在 {url}')

        await asyncio.gather(*tasks)

async def sem_crawl(sem, session, url, coll):
    async with sem:
        await crawl_and_save(session, url, coll)

# ---------- 5. 入口 ----------
if __name__ == '__main__':
    asyncio.run(main())