import requests
from bs4 import BeautifulSoup
import re
import time
import csv


urll = "http://www.xinfadi.com.cn/index.html"
response = requests.get(urll)
html = response.text
prodCatid = BeautifulSoup(html,'html.parser')
lis = prodCatid.find("ul",id="ul")
ids = re.findall(r'id="(.*?)"',str(lis),re.S)
with open('北京新发地.csv', 'a', newline='',encoding='utf-8-sig') as csvFile:
    csvWriter = csv.writer(csvFile)
    for i in range(1,len(ids)):
        url = "http://www.xinfadi.com.cn/getCat.html"
        data={
            'prodCatid': ids[i]
        }
        res = requests.post(url,data=data)
        all_list = res.json()['list']
        csvWriter.writerow(["="*100])
        csvWriter.writerow([f"发布日期：{all_list[0]['pubDate']},商品类型：{all_list[0]['prodCat']}"])
        for list in all_list:
            print(f"正在爬取{list['prodCat']}里面的{list['prodName']}...")
            csvWriter.writerow([f"商品名：{list['prodName']},规格：{list['specInfo']},最低价：{list['lowPrice']}, 最高价：{list['highPrice']},平均价：{list['avgPrice']}, 产地：{list['place']}"])
            time.sleep(0.1)
