import time
import requests
from concurrent.futures import ThreadPoolExecutor


# url = '''https://api.bilibili.com/x/web-interface/search/type?context=&page=%s&order=&keyword=mysql&duration=&tids_1=&tids_2=&from_source=webtop_search&from_spmid=333.788&__refresh__=true&_extra=&search_type=video&highlight=1&single_column=0'''
headers = {'Cookie': "buvid3=A29547FF-753D-419F-9BBC-ABB604A9D3B718547infoc; CURRENT_FNVAL=80; _uuid=04CCD978-342E-8DF9-07F9-D1F161B4BB2E49934infoc; blackside_state=1; rpdid=|(J|)k)kuJu~0J'uY|m|~RRlJ; CURRENT_QUALITY=80; PVID=1; bsource=search_baidu; sid=a7ar6sjy; fingerprint=5414da9ba9a7d25512f4871c0b37bbb1; buvid_fp=A29547FF-753D-419F-9BBC-ABB604A9D3B718547infoc; buvid_fp_plain=A29547FF-753D-419F-9BBC-ABB604A9D3B718547infoc; SESSDATA=c1ccce71%2C1634481179%2C32c69%2A41; bili_jct=fde862e3c2b0e6d03fa1f296861e1be4; DedeUserID=1111027158; DedeUserID__ckMd5=d02cef32acc03459; LIVE_BUVID=AUTO3216197403600627"}

# urls = [url%i for i in range(100)]
from asyncio_ import urls


def crawl(url):
    resp = requests.get(url, headers=headers)
    return resp.text


def save(data):
    
    with open('a.json', 'a') as f:
        f.write(data)


if __name__ == '__main__':
    start = time.time() 
    with ThreadPoolExecutor(100) as pool:
        results = pool.map(crawl, urls)
    
        pool.map(save, results)
    
    print(time.time() - start)   # 1.223