import threading
import time
import requests
from concurrent.futures import ThreadPoolExecutor

url = 'http://www.xinfadi.com.cn/getPriceData.html'

header = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36 Edg/122.0.0.0"
}

lock = threading.Lock()  # 添加锁


def get_data(url, limit, page):
    data = {
        'limit': limit,
        'current': page,
        'pubDateStartTime': None,
        'pubDateEndTime': None,
        'prodPcatid': None,
        'prodCatid': None,
        'prodName': None
    }

    try:
        res = requests.post(url=url, data=data, headers=header)
        current = res.json().get('current')
        print(current)
        js = res.json().get("list")
        with lock:  # 加锁进行文件写入操作
            for it in js:
                level1Classification = it.get('prodCat')  # 一级分类
                secondaryClassification = it.get("prodPcat")  # 二级分类
                if not secondaryClassification:
                    secondaryClassification = None
                name = it.get("prodName")  # 品名
                averagePrice = it.get('avgPrice')  # 平均价
                origin = it.get('place')  # 产地
                releaseDate = it.get('pubDate')  # 发布日期
                with open("新发地.txt", 'a', encoding='utf-8') as f:
                    f.write(
                        f"{level1Classification},{secondaryClassification},{name},{averagePrice},{origin},{releaseDate}\n")
    except requests.exceptions.JSONDecodeError as e:
        print(f"解析 JSON 出错: {e}")
    except Exception as e:
        print(f"发生其他错误: {e}")


if __name__ == '__main__':
    with ThreadPoolExecutor(5) as t:
        limit = None
        for i in range(1, 31):
            if i == 1:
                page = None
            else:
                limit = 20
                page = i
            time.sleep(1)
            t.submit(get_data, url, limit, page)
    print("全部下载完毕！！！")
