#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time      :2021/3/25 14:05
# @Author    :cjw
import requests
import csv
import time
from bs4 import BeautifulSoup


# 安装：pip install bs4

def get_price():
	"""获取北京新发地市场，行情价格"""
	# 1、拿到页面源代码
	# 2、使用bs4进行解析，拿到数据
	url = 'http://www.xinfadi.com.cn/marketanalysis/0/list/1.shtml'
	headers = {
		'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36'
	}
	resp = requests.get(url, headers=headers)
	page_text = resp.text
	# 解析数据
	# 1、把页面源代码交给BeautifulSoup进行解析，生成bs对象
	bs_page = BeautifulSoup(page_text, 'html.parser')
	# 2、从bs对象中查找数据
	# find(标签，属性=值)：查找第一个
	# find_all(标签，属性=值): 查找所有
	# table = bs_page.find_all('table', class_='hq_table')  # class是python的关键字
	table = bs_page.find('table', attrs={'class': 'hq_table'})
	trs = table.find_all('tr')[1:]  # 拿到所有数据行，除表头
	f = open('./菜价.csv', 'w', encoding='utf-8')
	csv_write = csv.writer(f)
	for tr in trs:
		tds = tr.find_all('td')  # 拿到每行的td
		name = tds[0].text  # .text 表示拿到被标签标记的内容
		low_price = tds[1].text
		avg_price = tds[2].text
		high_price = tds[3].text
		specs = tds[4].text
		unit = tds[5].text
		date = tds[6].text
		csv_write.writerow([name, low_price, avg_price, high_price, specs, unit, date])
	f.close()
	resp.close()  # 关闭链接


def get_prices(price_type=0, page_nums=1):
	"""
	获取北京新发地市场，行情价格
	:param price_type: 行情类型 0：所有，1：蔬菜，2：水果，3：肉禽蛋，4：水产，5：粮油
	:param page_nums: 爬取页面的数量
	:return:
	"""
	headers = {
		'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36'
	}
	f = open('./菜价价格行情.csv', 'w', encoding='utf-8')
	csv_write = csv.writer(f)

	for num in range(1, int(page_nums) + 1):
		url = f'http://www.xinfadi.com.cn/marketanalysis/{price_type}/list/{num}.shtml'
		page_data = []  # 每页的数据
		resp = requests.get(url, headers=headers)
		page_text = resp.text

		bs_page = BeautifulSoup(page_text, 'lxml')
		table = bs_page.find('table', attrs={'class': 'hq_table'})
		trs = table.find_all('tr')[1:]  # 拿到所有数据行，除表头

		# 获取每页中的行情数据
		for tr in trs:
			tds = tr.find_all('td')
			tr_data = [td.text for td in tds[:-1]]
			page_data.append(tr_data)
		csv_write.writerows(page_data)
		print(num, len(page_data), page_data)
		resp.close()
		time.sleep(0.5)
	f.close()


if __name__ == '__main__':
	# get_price()
	get_prices(4, 10)
