import time
import pandas as pd
import requests
import concurrent
from concurrent import futures
import threading
from multiprocessing import Pool



#装饰器，打印程序执行时间
def gettime(func):
    def warrapper(*args,**kwargs):
        print('='*50)
        print(func.__name__,'start...')
        starttime=time.time()
        func(*args)
        endtime=time.time()
        spendtime=endtime-starttime
        print(func.__name__,'End...')
        print('Spend',spendtime,'s totally')
        print('='*50)
    return warrapper

#从文件中取地址
def get_urls_from_file(n):
    df=pd.read_csv('TestUrls.csv')
    urls=list(df['url'][:n])
    return urls

#请求数据
def getdata(url,retries=3):
    headers_value={'User-Agent':"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36"}
    try:
        html=requests.get(url,headers=headers_value)
        print('访问网址：',url,'：访问代码：',html.status_code)
    except requests.exceptions.ConnectionError as e:
        print('下载错误：',e)
        html=None
    if((html!=None) and (500<=html.status_code<600) and retries):
        print('服务器错误，正在重试。。。')
        #time.sleep(1)
        retries-=1
        getdata(urls,retries)
        data=html.text
    else:
        data=html.text
    return data

#串行
@gettime
def Mynormal():
    for url in urls:
        getdata(url)
#进程池
def MyprocessPool(num=10):
    pool=Pool(num)
    results=pool.map(getdata,urls)
    pool.close()
    pool.join()
#多线程
@gettime
def Mymultithread(max_thread=10):
    def urls_process():
        while True:
            try:
                url=urls.pop()
            except IndexError:
                break
            data=getdata(url,retries=3)
    threads=[]
    while int(len(threads)<max_thread) and  len(urls):
        thread=threading.Thread(target=urls_process())
        thread.start()
        threads.append(thread)
    for thread in threads:
        thread.join()

#线程池
@gettime
def Myfuture(num_of_max_works=10):
    with concurrent.futures.ThreadPoolExecutor(max_workers=num_of_max_works) as e:
        e.map(getdata,urls)


#并行函数运行
if __name__ == '__main__':
    urls=get_urls_from_file(10)#数量越多，并行优势越大
    Mynormal()
    MyprocessPool(10)#进程池
    Myfuture(10)#线程池
    Mymultithread(10)#多线程