import multiprocessing
from multiprocessing import Pool
import time
import requests
from lxml import etree
# 多进程爬虫案例
urls = [
    'https://blog.csdn.net/qq_40924514/article/details/112705877',
    'https://blog.csdn.net/qq_40924514/article/details/112606141',
    'https://blog.csdn.net/qq_40924514/article/details/112587048',
    'https://blog.csdn.net/qq_40924514/article/details/112587048',
    'https://blog.csdn.net/qq_40924514/article/details/110232800',
    'https://blog.csdn.net/qq_40924514/article/details/109773234',
    'https://blog.csdn.net/qq_40924514/article/details/109694920',
    'https://blog.csdn.net/qq_40924514/article/details/109587689',
    'https://blog.csdn.net/qq_40924514/article/details/109496196',
    'https://blog.csdn.net/qq_40924514/article/details/109405649',
]

headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36'}

def get_title(url,cnt):
    resp = requests.get(url, headers=headers)
    html = resp.content
    title = etree.HTML(html).xpath('//title/text()')
    print('正在执行title,响应码{},第{}个title：{}'.format(resp.status_code, cnt, title))

def run():
    print('当前cpu核数是{}核'.format(multiprocessing.cpu_count()))
    p = Pool(4)
    i = 0
    for url in urls:
        i += 1
        p.apply_async(get_title, args=(url, i))
    p.close()
    p.join()
if __name__ == '__main__':
    start = time.time()
    run()
    print('总耗时：%.5f秒' % float(time.time()-start))
