'''
aiohttp示例演练
实现思路：
1.爬取列表页
第一阶段 是异步爬取所有列表页，我们可以将所有列表页的爬取任务集合在一起，并将其声明为由 task 组成的列表，进行异步爬取。
2.爬取详情页
第二阶段 则是拿到上一步列表页的所有内容并解析，将所有图书的 id 信息组合为所有详情页的爬取任务集合，并将其声明为 task 组成的列表，进行异步爬取，同时爬取结果也以异步方式存储到 MongoDB 里面。
'''
import asyncio
import aiohttp
import logging
import json

logging.basicConfig(level=logging.INFO,
                    format='%(asctime)s - %(levelname)s - %(message)s')
# 列表页地址
INDEX_URL = 'https://spa5.scrape.center/api/book/?limit=18&offset={offset}'
# 详情页地址
DETAIL_URL = 'https://spa5.scrape.center/api/book/{id}/'
# 每页数量
PAGE_SIZE = 18
# 总页数
PAGE_NUMBER = 100
# 并发数量
CONCURRENCY = 5
# 第一部分
# 定义一个通用的爬取方法
semaphore =asyncio.Semaphore(CONCURRENCY) # 声明信号量，用于控制最大并发数量
session = None
async def scrape_api(url):
    async with semaphore:
        try:
            logging.info('scraping %s', url)
            async with session.get(url) as response:
                return await response.json()
        except aiohttp.ClientError:
            logging.error('error cccurred while scraping %s', url, exc_info=True)
# 爬取列表页
async def scrape_index(page):
    url = INDEX_URL.format(offset=PAGE_SIZE * (page - 1))
    return await scrape_api(url)


# 第二部分,爬取详情页和保存数据
from motor.motor_asyncio import AsyncIOMotorClient

MONGO_CONNECTION_STRING = 'mongodb://localhost:27017'
MONGO_DB_NAME = 'books'
MONGO_COLLECTION_NAME = 'books'

client = AsyncIOMotorClient(MONGO_CONNECTION_STRING)
db = client[MONGO_DB_NAME]
collection = db[MONGO_COLLECTION_NAME]

async def save_data(data):
    logging.info('saving data %s', data)
    if data:
        return await collection.update_one({
            'id': data.get('id')
        }, {
            '$set': data
        },upsert=True)

async def scrape_detail(id):
    url = DETAIL_URL.format(id=id)
    data = await scrape_api(url)
    await save_data(data)


async def main():
    global session
    session = aiohttp.ClientSession()
    scrape_index_tasks = [asyncio.ensure_future(scrape_index(page)) for page in range(1, PAGE_NUMBER + 1)]
    results = await asyncio.gather(*scrape_index_tasks)
    # 第二部分内容，从results中提取每本书的id
    ids = []
    for index_data in results:
        if not index_data: continue
        for item in index_data.get('results'):
            ids.append(item.get('id'))
    logging.info('results %s', json.dumps(results, ensure_ascii=False,indent=2))
    scrape_detail_tasks = [asyncio.ensure_future(scrape_detail(id)) for id in ids]
    await asyncio.wait(scrape_detail_tasks)
    await session.close()

if __name__ == '__main__':
    asyncio.run(main())