import asyncio
import time

# asynciosync def main_fun():
#     tas
async def fun():
    print(f'hello start: {time.time()}')
    await asyncio.sleep(3)
    print(f'------hello end : {time.time()} ----')
async def utils_add():
    sum_number = 0
    await asyncio.sleep(1)
    await utils_add_async(sum_number)
    print(f"utils_add+{sum_number}")
    return sum_number


import requests
from concurrent.futures import ProcessPoolExecutor
from multiprocessing import Process
import time
import random
import os


def get():
    # response = requests.get(url)
    # print(f'{os.getpid()} 正在爬取:{url}')
    # # time.sleep(random.randint(1,3))
    # if response.status_code == 200:
    #     return response.text
    print(f"{url}:开始了")
    sum_number = 0
    time.sleep(3)
    for i in range(10000):
        sum_number += i
    print(sum_number)
    return sum_number



def parse(obj):
    '''
    对爬取回来的字符串的分析
    简单用len模拟一下.
    :param text:
    :return:
    '''
    time.sleep(3)

    ### obj.result() 取得结果
    print(f'{os.getpid()} 分析结果:{len(obj.result())}')


if __name__ == '__main__':

    url_list = [
        'http://www.taobao.com',
        'http://www.123.com',
        'http://www.JD.com',
    ]
    start_time = time.time()
    pool = ProcessPoolExecutor(4)
    for url in url_list:
        obj = pool.submit(get)
        # obj.add_done_callback(parse)  # 增加一个回调函数
        # 现在的进程完成的还是网络爬取的任务,拿到了返回值之后,结果丢给回调函数add_done_callback,
        # 回调函数帮助你分析结果
        # 进程继续完成下一个任务.
    pool.shutdown(wait=False)

    print(f'主: {time.time() - start_time}')
async def utils_add_async(sum_number):

    for i in range(10000):
        sum_number += i
    print(f"utils_add+{sum_number}")