# import csv
# import requests
# from bs4 import BeautifulSoup
# with open(r'D:/text','a',encoding='utf-8') as f:
#      csvwriter = csv.writer(f)
#      csvwriter.writerow(['书名','评分','价格'])
# url = 'http://books.toscrape.com/index.html'
# res = requests.get(url)
# soup = BeautifulSoup(res.text,'html.parser')
# re = soup.find('ol',class_="row").find_all('li')
# for i in re:
#     book_name = i.find('h3').find('a')['title']
#     book_star = i.find('p',class_="star-rating There")
#     book_price = i.find('p',class_="price_color").text
#     with open(r'D:/text', 'a', encoding='utf-8') as f:
#         csvwriter = csv.writer(f)
#         csvwriter.writerow([book_name, book_star, book_price])
# print("爬虫完成")

# import requests
# import csv
# from bs4 import BeautifulSoup
#
# with open(r'D:/text_1', 'a', encoding='utf-8', newline='') as f:
#     csvwriter = csv.writer(f)
#     csvwriter.writerow(['菜谱名称', '原材料', '链接'])
#
# url = 'https://www.xiachufang.com/explore/'
# headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36"}
# res = requests.get(url,headers = headers)
# # print(res.text)
# soup = BeautifulSoup(res.text, 'html.parser')
# ru = soup.find('ul', class_="list").find_all('li')
# # print(ru)
# for i in ru:
#     cook_name = i.find('div', class_="info pure-u").find('p', class_="name").text
#     cook_star = i.find('div', class_="info pure-u").find('p', class_="ing ellipsis").text
#     cook_link = i.find('div', class_="info pure-u").find('p', class_="name").find('a')['href']
#     with open(r'D:/text_1', 'a', encoding='utf-8', newline='') as f:
#         csvwriter = csv.writer(f)
#         csvwriter.writerow([cook_name, cook_star, cook_link])
#
# print("爬虫完成")


# import requests
# import csv
# from bs4 import BeautifulSoup
#
# with open(r'D:/text_1', 'a', encoding='utf-8', newline='') as f:
#     csvwriter = csv.writer(f)
#     csvwriter.writerow(['菜谱名称', '原材料', '链接'])
#
# url = 'https://www.xiachufang.com/recipe_list/?category=104&page=1'
# headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36"}
# res = requests.get(url,headers = headers)
# soup = BeautifulSoup(res.text, 'html.parser')
# recipe_list = soup.find_all('div', class_='info pure-u')
# for recipe in recipe_list:
#     cook_name = recipe.find('p', class_='name').text.strip()
#     cook_star = recipe.find('p', class_='ing ellipsis').text.strip()
#     cook_link = recipe.find('p', class_='name').find('a')['href']
#     csvwriter.writerow([cook_name, cook_star, cook_link])
# print("爬虫完成")

#正式
# import requests
# import csv
# from bs4 import BeautifulSoup
#
# with open(r'D:/text_xinfadi1.csv', 'a', encoding='utf-8', newline='') as f:
#     csvwriter = csv.writer(f)
#     csvwriter.writerow(['品名', '最低价', '最高价','平均价格'])
#
# url = 'http://www.xinfadi.com.cn/index.html'
# headers = {"user-agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36 Edg/129.0.0.0"}
# res = requests.post(url,headers = headers)
# data = res.json()
# status_code = res.status_code
# print(f"网页响应码为：{status_code}")
# # print(res.text)
# ru = [
#     {"name": item["prodName"], "lowPrice": item["lowPrice"], "highPrice": item["highPrice"], "avgPrice": item["avgPrice"]}
#     for item in data["list"]
# ]
# with open(r'D:/text_xinfadi1.csv', 'a',encoding='utf-8', newline='') as csvfile:
#     for product in ru:
#         csvwriter.writerow(product)
# print("爬虫完成")



# import requests
# from bs4 import BeautifulSoup
#
# url = 'http://www.xinfadi.com.cn/index.html'
# headers = {"user-agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"}
#
# response = requests.get(url, headers=headers)
# soup = BeautifulSoup(response.text, 'html.parser')
#
# table_body = soup.find('div', class_='tbl-body').find('table').find('tbody')
# rows = table_body.find_all('tr')
#
# selected_items = rows[:3]
#
# for item in selected_items:
#     cols = item.find_all('td')
#     if cols:
#         product_name = cols[2].text
#         low_price = cols[3].text
#         high_price = cols[5].text
#         avg_price = cols[4].text
#         print(f"品名：{product_name}，最低价：{low_price}，最高价：{high_price}，平均价：{avg_price}")
# print('爬虫完成')

# import requests
# import csv
# from bs4 import BeautifulSoup
#
# with open(r'D:/text_xinfadi.csv', 'a', encoding='utf-8', newline='') as f:
#     csvwriter = csv.writer(f)
#     csvwriter.writerow(['品名', '最低价', '最高价', '平均价格'])
#
# url = 'http://www.xinfadi.com.cn/index.html'
# headers = {"user-agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36 Edg/129.0.0.0"}
# data = {"prodCatid": 1187}
# res = requests.post(url, headers=headers, data=data)
# status_code = res.status_code
# print(f"网页响应码为：{status_code}")
# print(res.text)
# soup = BeautifulSoup(res.text, 'html.parser')
# ru = soup.find('tbody', id="ulTableBody").find_all('tr')
# print(ru)
# for i in ru:
#     cols = i.find_all('td')
#     if cols:
#         ve_name = cols[0].text
#         ve_min = cols[1].text
#         ve_max = cols[3].text
#         ve_ping = cols[2].text
#         with open(r'D:/text_xinfadi.csv', 'a', encoding='utf-8', newline='') as f:
#             csvwriter = csv.writer(f)
#             csvwriter.writerow([ve_name, ve_min, ve_max, ve_ping])
# print("爬虫完成")

#解决一
# import requests
# import csv
# from bs4 import BeautifulSoup
#
# with open(r'D:/text_xinfadi.csv', 'a', encoding='utf-8', newline='') as f:
#     csvwriter = csv.writer(f)
#     csvwriter.writerow(['品名', '最低价', '最高价', '平均价格'])
#
# url = 'http://www.xinfadi.com.cn/index.html'
# headers = {
#     "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36 Edg/129.0.0.0"}
# res = requests.post(url, headers=headers)
# status_code = res.status_code
# print(f"网页响应码为：{status_code}")
# # 尝试使用不同的解析器
# soup = BeautifulSoup(res.text, 'lxml')
# ru = soup.find('tbody', id="ulTableBody").find_all('tr')
# print(ru)
# items = ru[:3]
# for i in items:
#     cols = i.find_all('td')
#     if cols:
#         ve_name = cols[0].text
#         ve_min = cols[1].text
#         ve_max = cols[3].text
#         ve_ping = cols[2].text
#         with open(r'D:/text_xinfadi.csv', 'a', encoding='utf-8', newline='') as f:
#             csvwriter = csv.writer(f)
#             csvwriter.writerow([ve_name, ve_min, ve_max, ve_ping])
# print("爬虫完成")


#解决二
# from selenium import webdriver
# import csv
# from bs4 import BeautifulSoup
#
# with open(r'D:/text_xinfadi.csv', 'a', encoding='utf-8', newline='') as f:
#     csvwriter = csv.writer(f)
#     csvwriter.writerow(['品名', '最低价', '最高价', '平均价格'])
#
# # 设置浏览器驱动
# driver = webdriver.Chrome(r'C:\Users\89526\AppData\Local\Google\Chrome\Application')
# url = 'http://www.xinfadi.com.cn/index.html'
# driver.get(url)
# # 获取页面源代码
# page_source = driver.page_source
# soup = BeautifulSoup(page_source, 'lxml')
# ru = soup.find('tbody', id="ulTableBody").find_all('tr')
# print(ru)
# items = ru[:3]
# for i in items:
#     cols = i.find_all('td')
#     if cols:
#         ve_name = cols[0].text
#         ve_min = cols[1].text
#         ve_max = cols[3].text
#         ve_ping = cols[2].text
#         with open(r'D:/text_xinfadi.csv', 'a', encoding='utf-8', newline='') as f:
#             csvwriter = csv.writer(f)
#             csvwriter.writerow([ve_name, ve_min, ve_max, ve_ping])
# print("爬虫完成")
# driver.quit()


import requests
import csv
from bs4 import BeautifulSoup

with open(r'D:/text_xinfadi1.csv', 'a', encoding='utf-8', newline='') as f:
    csvwriter = csv.writer(f)
    csvwriter.writerow(['品名', '最低价', '最高价', '平均价格'])

url = "http://www.xinfadi.com.cn/getPriceData.html"  # 不是 index.html
headers = {"user-agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36"}
res = requests.get(url, headers=headers)  # 通常这个接口是用 GET 请求而不是 POST 请求
data = res.json()
status_code = res.status_code
print(f"网页响应码为：{status_code}")

ru = [
    {"name": item["prodName"], "lowPrice": item["lowPrice"], "highPrice": item["highPrice"], "avgPrice": item["avgPrice"]}
    for item in data["list"]
]

with open(r'D:/text_xinfadi1.csv', 'a', encoding='utf-8', newline='') as csvfile:
    for product in ru:
        csvwriter = csv.writer(csvfile)
        csvwriter.writerow([product["name"], product["lowPrice"], product["highPrice"], product["avgPrice"]])

print("爬虫完成")