# 案例：爬取网页源码
# import requests
import gevent
from gevent import monkey
monkey.patch_all()
import urllib.request


def download(url):
    response = urllib.request.urlopen(url)
    content = response.read()
    print(f"下载了{url}的数据，长度：{len(content)}")


if __name__ == '__main__':
    urls = ["http://www.163.com", "http://www.baidu.com", "http://www.qq.com"]
    g1 = gevent.spawn(download, urls[0])
    g2 = gevent.spawn(download, urls[1])
    g3 = gevent.spawn(download, urls[2])

    gevent.joinall([g1, g2, g3])       # 类似三个g.join()
    # g1.join()
    # g2.join()
    # g3.join()
