import requests
import json
from bs4 import BeautifulSoup
from lxml import etree
import pandas as pd
import time


def get_delicious_url_list(delicious_url):  # 构建美食页地址列表
	delicious_url_list = []
	for i in range(1, 3):
		delicious_url_list.append(delicious_url.format(i))
	return delicious_url_list


def save_data(ls):  # 保存数据
	pd.DataFrame(ls).to_excel('美团.xlsx', index=False)


class MartianSpider:
	def __init__(self):
		self.temp_url = 'https://xy.meituan.com'
		self.headers = {
			'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) ''Chrome/65.0.3314.0 Safari/537.36 SE 2.X MetaSr 1.0 '}
		self.proxy = {
			'http': '222.89.32.186:9999'
		}
	def parse_temp_url(self):  # 解析美团首页，获取美食页地址
		response = requests.get(self.temp_url, headers=self.headers, proxies=self.proxy)
		html = response.content.decode()
		print(html)
		doc = etree.HTML(html)
		delicious_url = doc.xpath('//*[@id="react"]/div/div/div[1]/div[1]/div/div[2]/ul/li[1]/span/span/a/@href')[0] + 'pn{}/'
		return delicious_url
	
	def parse_delicious_url(self, url):  # 解析美食页，获取美食的名字，平均价格，平均分数, 评论数
		response = requests.get(url, headers=self.headers)
		html = response.content.decode()
		soup = BeautifulSoup(html, 'lxml')
		soup = soup.find_all('script')
		text = soup[14].get_text().strip()
		text = text[19:-1]
		result = json.loads(text)
		result_1 = result['poiLists']
		result_2 = result_1['poiInfos']
		rows = []
		for i in result_2:
			rows.append([i['title'], i['avgScore'], i['address'], i['allCommentNum'], i['avgPrice']])
		return rows  # 获取的原数据

	def run(self):
		# 1.获取美食网页
		delicious_url = self.parse_temp_url()
		time.sleep(2)
		# 2.获取每一页美食的url
		ls = []
		for url in get_delicious_url_list(delicious_url):
			statistics = self.parse_delicious_url(url)
			name = ['店铺名', '评分', '地址', '评论数', '平均价格']
			for shop in statistics:
				data = dict(zip(name, shop))
				ls.append(data)
			time.sleep(1)
		save_data(ls)


if __name__ == '__main__':
	gone = MartianSpider()
	gone.run()