# 用 tornado 来实现一个简单的爬虫
# beautifulsoup4

from urllib.parse import urljoin

from bs4 import BeautifulSoup
from tornado import gen, httpclient, ioloop, queues

base_url = "http://www.tornadoweb.org/en/stable/"
concurrency = 3  # 设置为3个并发


async def get_url_links(url):
    response = await httpclient.AsyncHTTPClient().fetch(url)
    html = response.body.decode('utf8')

    # 用bs4解析文档
    # 异步的方式仅限于IO操作的时候才使用， cpu操作是达不到期望的效果的
    soup = BeautifulSoup(html)
    links = [urljoin(base_url, a.get('href')) for a in soup.find_all('a', href=True)]
    return links


async def main():
    seet_set = set()  # 记录已经抓取过的url
    q = queues.Queue()  # 为什么要使用tornado的队列？  我们需要一个非阻塞的队列

    async def fetch_url(current_url):
        # 生产者
        if current_url in seet_set:
            return

        print("获取 {}".format(current_url))
        seet_set.add(current_url)

        next_urls = await get_url_links(current_url)

        for next_url in next_urls:
            if next_url.startswith(base_url):  # 用于检查字符串是否是以指定子字符串开头
                # 这是一个单线程，为什么不适用list而必须使用队列呢？
                # 当队列为空的时候需要协程自动切换到生产者去， list不能提供该功能
                # 当队列满了放不进去的时候也会自动切换到消费者那去
                await q.put(next_url)

    async def worker():
        # 消费者
        # while 1:
        #     url = q.get()

        async for url in q:
            if url is None:
                return

            try:
                await fetch_url(url)
            except Exception as e:
                print("exception")

            finally:
                q.task_done()

    # 放入初始url到队列
    await q.put(base_url)

    # 启动协程，实际上在这里已经开始运行了
    workers = gen.multi([worker() for _ in range(concurrency)])

    await q.join()

    for _ in range(concurrency):
        q.put(None)

    await worker()


if __name__ == '__main__':
    ioloop = ioloop.IOLoop.current()
    ioloop.run_sync(main)