import re
import os
import time
import threading
from multiprocessing import Pool, cpu_count

import requests
from bs4 import BeautifulSoup
# from pyquery import PyQuery as pq
import json
import random
import demjson

# import io
# import sys
# import urllib.request
# sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='gb18030') #改变标准输出的默认编码

USER_AGENT_LIST = [
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
    "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
    "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
    "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
    "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5"]
head = {
'User-Agent': '{0}'.format(random.sample(USER_AGENT_LIST, 1)[0])  # 随机获取
}


HEADERS = {
    "Accept": "application/json",
	"Accept-Encoding": "gzip, deflate",
	"Accept-Language": "zh-CN,zh;q=0.9",
	"Connection": "keep-alive",
	"Content-Type": "application/x-www-form-urlencoded",
	"Cookie": "cy=46; cye=huhehaote; _lxsdk_cuid=166bfab552fc8-0ac5772fb74f5e-b79183d-1fa400-166bfab5530c8; _lxsdk=166bfab552fc8-0ac5772fb74f5e-b79183d-1fa400-166bfab5530c8; _hc.v=fe8d2c07-7cb7-f3e0-f755-f8f21fd94c26.1540813969; _lx_utm=utm_source%3DBaidu%26utm_medium%3Dorganic; cityid=47; msource=default; default_ab=shop%3AA%3A1; _lxsdk_s=166e9899e38-9a4-717-abd%7C%7C68",
	"Host": "m.dianping.com",
	"Origin": "http://m.dianping.com",
	"Referer": "http://m.dianping.com/shop/3267880",
	"User-Agent": "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Mobile Safari/537.36",
	"X-Requested-With": "XMLHttpRequest",
}

SHOP_HEADERS = {
	
	"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
	"Accept-Encoding": "gzip, deflate",
	"Accept-Language": "zh-CN,zh;q=0.9",
	"Cache-Control": "max-age=0",
	"Connection": "keep-alive",
	"Cookie": "_hc.v=78063ee0-9f15-9880-4b48-80d4319096f8.1541497621; _lxsdk_cuid=166e86b0bd9c8-0be77b686c10ce-333b5602-1fa400-166e86b0bd9c8; _lxsdk=166e86b0bd9c8-0be77b686c10ce-333b5602-1fa400-166e86b0bd9c8; cityid=47; default_ab=shop%3AA%3A1%7Cshopdish%3AA%3A1; m_flash2=1; pvhistory=6L+U5ZuePjo8L2Vycm9yL2Vycm9yX3BhZ2U+OjwxNTQxNTU5MTM2OTQwXV9b; cy=46; cye=huhehaote; _lx_utm=utm_source%3DBaidu%26utm_medium%3Dorganic; _lxsdk_s=166ece46b81-0fc-312-aae%7C%7C289",
	"Host": "www.dianping.com",
	"If-Modified-Since": "Wed, 07 Nov 2018 00:37:02 GMT",
	"If-None-Match": "b5f604fc5a2aa11f0ce2a2769949d40c",
	"Referer": "http://www.dianping.com/shoplist/shopRank/pcChannelRankingV2?rankId=d64d5919fc236699402122b5770b4a6f71862f838d1255ea693b953b1d49c7c0",
	"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36",

}

DISH_WEB_HEADERS = {
	"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
	"Accept-Encoding": "gzip, deflate",
	"Accept-Language": "zh-CN,zh;q=0.9",
	"Cache-Control": "max-age=0",
	"Connection": "keep-alive",
	"Cookie": "_hc.v=78063ee0-9f15-9880-4b48-80d4319096f8.1541497621; _lxsdk_cuid=166e86b0bd9c8-0be77b686c10ce-333b5602-1fa400-166e86b0bd9c8; _lxsdk=166e86b0bd9c8-0be77b686c10ce-333b5602-1fa400-166e86b0bd9c8; cityid=47; default_ab=shop%3AA%3A1%7Cshopdish%3AA%3A1; m_flash2=1; pvhistory=6L+U5ZuePjo8L2Vycm9yL2Vycm9yX3BhZ2U+OjwxNTQxNTU5MTM2OTQwXV9b; cy=46; cye=huhehaote; _lx_utm=utm_source%3DBaidu%26utm_medium%3Dorganic; _lxsdk_s=166ec4207b6-965-ece-acd%7C%7C48",
	"Host": "www.dianping.com",
	"Referer": "http://www.dianping.com/shop/111988859",
	"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36",
}

TOKEN = "d64d5919fc236699402122b5770b4a6f71862f838d1255ea693b953b1d49c7c0"

DIR_PATH = r"D:\dazhong"


def get_shops():
	url = "http://www.dianping.com/mylist/ajax/shoprank?rankId="+TOKEN
	try:
		base_url = "http://www.dianping.com/shop/"
		m_base_url = "http://m.dianping.com/shop/"
		html = requests.get(url, headers=head).text

		# 打印所有商家名称
		#with open("shop_name.txt", 'w', encoding='utf-8') as shop_file:
		for data in json.loads(html)["shopBeans"]:
			#shop_file.write(data['shopName'] + "\n")
			if make_dir(data['shopName']):
				with open("shop_info.txt", 'w', encoding='utf-8') as f:
					f.write('{\n')
					f.write("'address':'" + str(data['address']) + "',\n")
					f.write("'avgPrice':'" + str(data['avgPrice']) + "',\n")
					f.write("'defaultPic':'" + str(data['defaultPic']) + "',\n")
					f.write("'mainCategoryName':'" + str(data['mainCategoryName']) + "',\n")
					f.write("'mainRegionName':'" + str(data['mainRegionName']) + "',\n")
					f.write("'score1':'" + str(data['score1']) + "',\n")
					f.write("'score2':'" + str(data['score2']) + "',\n")
					f.write("'score3':'" + str(data['score3']) + "',\n")
					f.write("'shopId':'" + str(data['shopId']) + "',\n")
					f.write("'shopName':'" + str(data['shopName']) + "',\n")
					f.write("'shopPower':'" + str(data['shopPower']) + "',\n")
					f.write("'phoneNo':'" + str(data['phoneNo']) + "'\n")
					f.write('}')
				save_shop_logo(data['defaultPic'])
				get_shop(base_url+str(data['shopId']))
				get_dishes(m_base_url+str(data['shopId']), data['shopName'])

	except Exception as e:
		print(e)


def save_shop_logo(logo_src):
    try:
        time.sleep(0.10)
        img = requests.get(logo_src, headers=head, timeout=10)
        img_name = "logo.jpg"
        with open(img_name, 'ab') as f:
            f.write(img.content)
            print(img_name)
    except Exception as e:
        print(e)



def get_shop(url):
	time.sleep(3)
	try:
		html = requests.get(url, headers=SHOP_HEADERS, timeout=100).text
		soup = BeautifulSoup(html, 'lxml')
		datas = soup.find_all('script')
		# print(datas)
		for data in datas:
			text = data.get_text()
			if (re.findall(r'shop_config', text)):
				dataText = str(demjson.decode(text.split('=')[1]))
				dataText = re.sub('\'','\"',dataText)
				info = json.loads(str(dataText))
				# print(info)
				with open("shop_detil.txt", "w") as f:
					f.write(dataText)
					
				print("写入商家信息成功成功！")
	except Exception as e:
		print(e)



def get_dishes(shop_url, shop_name):
	time.sleep(3)
	try:
		HEADERS['Referer'] = shop_url
		html = requests.get(shop_url, headers=HEADERS, timeout=100).text
		soup = BeautifulSoup(html, 'lxml')
		dishPics = soup.find('div', class_='J_recommend').find('div', class_='recommendDish').find('div', class_='dishPics').find_all('a', class_='dishItem')
		dishUrls = [item.get('href').replace(r'//m.dianping.com', 'http://www.dianping.com') for item in dishPics]
		# print(dishUrls)
		# return
		for url in dishUrls:
			get_dish(url, shop_url, shop_name)
	except Exception as e:
		print(e)


def get_dish(url, shop_url, shop_name):
	time.sleep(5)
	shop_url = shop_url.replace(r'm.dianping.com', 'www.dianping.com')

	DISH_WEB_HEADERS['Referer'] = shop_url
	try:
		html = requests.get(url, headers=DISH_WEB_HEADERS).text
		soup = BeautifulSoup(html, 'lxml')

		if not soup:
			print("soup is None")
			return

		dish_name = soup.find('div', class_='dish-crumb').find('ul').find_all('li')[-1].find('a').get_text().strip()
		# print(dish_name)
		# return

		if make_dir(os.path.join(shop_name,dish_name)):

			dish_items = soup.find('div', class_='picture-list').find('ul').find_all('li')
			for cnt, item in enumerate(dish_items):
				pic_src = item.find('img')['src']
				save_pic(pic_src, cnt)

			dish_price_text = '0'
			dish_collect_text = '0'
			if soup.find('div', class_='dish-info-area'):
				dish_price = soup.find('div', class_='dish-info-area').find('div', class_='dish-price')
				if dish_price:
					dish_price_text = dish_price.find('span', class_='price').get_text().strip().replace('\xa5', '')
				dish_collect = soup.find('div', class_='dish-info-area').find('div', class_='recommend-info')
				if dish_collect.find('div', class_='recommend-num').find('span', class_='people-num'):
					dish_collect_text = dish_collect.find('div', class_='recommend-num').find('span', class_='people-num').get_text().strip()
				with open('dish_info.txt', 'w', encoding='utf-8') as f:
					f.write("{\n")
					f.write("'price':'" + dish_price_text + "',\n")
					f.write("'collect':'" + dish_collect_text + "'\n")
					f.write("}")
	except Exception as e:
		print(e)


def save_pic(pic_src, pic_cnt):
    """
    保存图片到本地
    """
    try:
        time.sleep(0.10)
        img = requests.get(pic_src, headers=head, timeout=10)
        img_name = "dish_{}.jpg".format(pic_cnt + 1)
        with open(img_name, 'ab') as f:
            f.write(img.content)
            print(img_name)
    except Exception as e:
        print(e)


def make_dir(folder_name):
    """
    新建文件夹并切换到该目录下
    """
    path = os.path.join(DIR_PATH, folder_name)
    # 如果目录已经存在就不用再次爬取了，去重，提高效率。存在返回 False，否则反之
    if not os.path.exists(path):
        os.makedirs(path)
        print(path)
        os.chdir(path)
        return True
    print("Folder has existed!")
    return False


if __name__ == "__main__":
	get_shops()
	# get_shop("http://www.dianping.com/shop/13694024")
	# get_dishes('http://m.dianping.com/shop/13694024', '红色巴士茶饮')
	# get_dish("http://www.dianping.com/shop/20979580/dish7408283", 'http://m.dianping.com/shop/20979580')




