"""
aiohttp实现并发爬虫

核心知识点：
    1.asyncio爬虫，去重、入库
"""

import re
import asyncio

import aiohttp
import aiomysql
from pyquery import PyQuery

# 是否停止
stopping = False
# 起始URL
start_url = "http://www.jobbole.com/"
# 待处理urls
waitting_urls = []
# 已处理urls
seen_urls = set()

# 并发
sem = asyncio.Semaphore(3)


async def fetch(url, session):
    """
    爬取
    """

    async with sem:
        # 模拟1s请求一次
        await asyncio.sleep(1)

        try:
            async with session.get(url) as resp:
                status = resp.status
                print("url status: {}".format(status))
                if status in [200, 201]:
                    data = await resp.text()
                    return data
        except Exception as e:
            print(e)


def extract_urls(html):
    """
    提取urls
    """

    urls = []
    pg = PyQuery(html)

    for link in pg.items("a"):
        url = link.attr("href")
        if url and url.startswith("http") and url not in seen_urls:
            urls.append(url)
            waitting_urls.append(url)

    return urls


async def init_urls(url, session):
    """
    初始化urls
    """

    html = await fetch(url, session)
    seen_urls.add(url)
    extract_urls(html)


async def article_handler(url, session, pool):
    # 获取文章详情并解析入库
    html = await fetch(url, session)
    seen_urls.add(url)
    extract_urls(html)

    pq = PyQuery(html)
    title = pq("title").text()
    async with pool.acquire() as conn:
        async with conn.cursor() as cur:
            insert_sql = "insert into t_article(title) values('{}')".format(title)
            print(insert_sql)
            await cur.execute(insert_sql)


async def consumer(pool):
    """
    消费
    """
    async with aiohttp.ClientSession() as session:
        while not stopping:
            if len(waitting_urls) == 0:
                await  asyncio.sleep(0.5)
                continue

            url = waitting_urls.pop()
            # print("start get url: {}".format(url))

            if re.match("http://.*?jobbole.com/\d+/", url):
                if url not in seen_urls:
                    asyncio.ensure_future(article_handler(url, session, pool))
                    asyncio.sleep(60)
                    # else:
                    #     if url not in seen_urls:
                    #         asyncio.ensure_future(init_urls(url, session))


async def spider(loop):
    # 等待MySQL连接建立
    pool = await aiomysql.create_pool(host="127.0.0.1", port=3306,
                                      user="root", password="root",
                                      db="db_spider", loop=loop,
                                      charset="utf8", autocommit=True)

    async with aiohttp.ClientSession() as session:
        html = await fetch(start_url, session)
        seen_urls.add(start_url)
        extract_urls(html)

    asyncio.ensure_future(consumer(pool))


def use_spider():
    loop = asyncio.get_event_loop()
    asyncio.ensure_future(spider(loop))
    loop.run_forever()


if __name__ == '__main__':
    use_spider()
