# long 爬虫
# {2021/9/8}
# # 本章目的：
# 1.先提取单个页面
# 2.上线程池，多个页面同时抓取
import requests,json
import csv
from concurrent.futures import ThreadPoolExecutor

f = open("data.csv",mode="w",encoding="utf-8")
csvweiter = csv.writer(f)


def dwnload_onr_page(url,page):
    data = {
        'limit':20,
        'current': page,
        'pubDateStartTime':'',
        'pubDateEndTime':'',
        'prodPcatid':'',
        'prodCatid':''

    }
    resp = requests.post(url,data)
    trs = resp.text
    # json解析
    text = json.loads(trs)

    relist = []
    for re in text['list']:
        oneArr = []
        oneArr.append(re['id'])
        oneArr.append(re['prodName'])
        oneArr.append(re['prodCat'])
        oneArr.append(re['prodCat'])
        oneArr.append(re['avgPrice'])
        relist.append(oneArr)

    for ret in relist:
        # 把数据存放到文件中
        csvweiter.writerow(ret)

    print(page,"页已完成")
if __name__ == '__main__':
    # 效率极其低下
    # for i in range(1,18216):
    #     dwnload_onr_page("http://www.xinfadi.com.cn/getPriceData.html",2)
    with ThreadPoolExecutor(50) as t:
        for i in range(1,200):
            t.submit(dwnload_onr_page,"http://www.xinfadi.com.cn/getPriceData.html",i)
    print("全部下载完毕！")