import json
import re
from copy import deepcopy

import requests
import scrapy
from fontTools.ttLib import TTFont


# http://www.dianping.com/ajax/json/shopDynamic/allReview?shopId=k8mGJsuDkESayEQX&cityId=2&shopType=10
# http://www.dianping.com/ajax/json/shopDynamic/reviewAndStar?shopId=G6r5uOUUvlRsL7mR&cityId=2&mainCategoryId=110
# http://www.dianping.com/ajax/json/shopDynamic/basicHideInfo?shopId=G6r5uOUUvlRsL7mR
# promoInfo

class DpSpider(scrapy.Spider):
    name = 'dp'
    allowed_domains = ['dianping.com']
    start_urls = ['http://www.dianping.com']

    def start_requests(self):

        cookie_str = """
        showNav=#nav-tab|0|0; navCtgScroll=0; showNav=#nav-tab|0|0; navCtgScroll=0; _lxsdk_cuid=17512a97901c8-0c67e7ff7f6139-b7a1334-144000-17512a97901c8; _lxsdk=17512a97901c8-0c67e7ff7f6139-b7a1334-144000-17512a97901c8; _hc.v=0fc27f2d-b32f-7c05-4879-866a4a5f4ce2.1602335898; s_ViewType=10; m_flash2=1; baidusearch_ab=index%3AA%3A1; switchcityflashtoast=1; seouser_ab=index%3AA%3A1%7CshopList%3AA%3A1; ctu=6de1ca2c84b96b4604ad23d7acf55feb42b26337dce2406eb477827b54f88791; Hm_lvt_602b80cf8079ae6591966cc70a3940e7=1602335898,1602706830,1602707723; aburl=1; Hm_lvt_dbeeb675516927da776beeb1d9802bd4=1602725177; cityInfo=%7B%22cityId%22%3A16%2C%22cityName%22%3A%22%E6%AD%A6%E6%B1%89%22%2C%22provinceId%22%3A0%2C%22parentCityId%22%3A0%2C%22cityOrderId%22%3A0%2C%22isActiveCity%22%3Afalse%2C%22cityEnName%22%3A%22wuhan%22%2C%22cityPyName%22%3Anull%2C%22cityAreaCode%22%3Anull%2C%22cityAbbrCode%22%3Anull%2C%22isOverseasCity%22%3Afalse%2C%22isScenery%22%3Afalse%2C%22TuanGouFlag%22%3A0%2C%22cityLevel%22%3A0%2C%22appHotLevel%22%3A0%2C%22gLat%22%3A0%2C%22gLng%22%3A0%2C%22directURL%22%3Anull%2C%22standardEnName%22%3Anull%7D; default_ab=shopList%3AC%3A5; cityid=16; __utma=205923334.949073817.1602779706.1602779706.1602779706.1; __utmz=205923334.1602779706.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); __utmc=205923334; looyu_id=29716535389103ce1e1f1354ce586ef44b_51868%3A1; pvhistory=6L+U5ZuePjo8L2RlYWwvMjA0OTQyNTI+OjwxNjAyNzg5MDMwMDM2XV9b; _lx_utm=utm_source%3DBaidu%26utm_medium%3Dorganic; cy=16; cye=wuhan; Hm_lvt_4c4fc10949f0d691f3a2cc4ca5065397=1602803115; Hm_lpvt_4c4fc10949f0d691f3a2cc4ca5065397=1602803115; Hm_lpvt_dbeeb675516927da776beeb1d9802bd4=1602848775; lgtoken=0f5db95a6-254c-472c-a7c7-7d8aa24172cb; dper=b2ff030cecae0d003ee1a6cbd8965310f63bcc8867e5653406edb6c86fadeebb8eb46690bdd998a793f644fcad255228608bb4652d768c3bc98ebb69507a21d6dd5687c610b7af889cd0793157874c8af35969a7d4aaebd02b0332768533ccae; ll=7fd06e815b796be3df069dec7836c3df; ua=13125065916; dplet=966c62851b18f4c14570633e8dd9bb66; Hm_lpvt_602b80cf8079ae6591966cc70a3940e7=1603115079; _lxsdk_s=175411a3c3d-ca0-d18-4a%7C%7C101
        """
        cookies_dict = {i.split("=")[0]: i.split("=")[1] for i in cookie_str.split("; ")}
        yield scrapy.Request(
            self.start_urls[0] + "/citylist",
            callback=self.parse,
            cookies=cookies_dict,
        )

    def parse(self, response):
        # 获取城市信息
        cities = response.xpath("//div[@class='main-citylist']//li[@class='letter-item']")
        for city_cate in cities:
            item = {}
            city_name_list = city_cate.xpath(".//div[@class='findHeight']/a")
            for city in city_name_list:
                item["city"] = city.xpath("./text()").extract_first()
                city_name = city.xpath("./@href").extract_first().split("com/")[1]
                url = 'http://www.dianping.com/{}'.format(city_name)
                item["city_url"] = url
                yield scrapy.Request(
                    url,
                    callback=self.parse_type,
                    meta={"item": deepcopy(item)},
                    headers={"refer": self.start_urls[0]}
                )

    def parse_type(self, response):
        # 大分类
        item = response.meta["item"]
        # print(response.url)
        type_name_list = response.xpath("//li[@class='first-item']//div[@class='primary-container']")
        for type_name in type_name_list:
            # 大分类名
            item["type_name"] = type_name.xpath(".//a/text()").extract_first()
            item["type_url"] = type_name.xpath(".//a/@href").extract_first()
            yield scrapy.Request(
                item["type_url"],
                callback=self.parse_category_location,
                meta={"item": deepcopy(item)},  # deepcopy避免因循环遍历浅拷贝问题，导致爬取数据覆盖
                headers={"refer": item["city_url"]}
            )

    def verify(self,ts):
        url = "https://report.meituan.com/?_lxskd_rnd={}".format(ts)

    def parse_category_location(self, response):
        # 获取分类信息
        item = response.meta["item"]
        # print("==========",response.url)
        if response.url.startswith("https://verify.meituan.com/v2"):
            pass
        big_categories = response.xpath("//div[@class='cate-index']//li[@class='first-item']")
        for big in big_categories:
            item["big_category_name"] = big.xpath(".//div[@class='group']/div[@class='sec-title']/span/text()").extract_first()
            mid_category_list = big.xpath(".//div[@class='group']/div[@class='sec-items']")
            for category in mid_category_list:
                item["small_category_name"] = category.xpath("./a/text()").extract_first()
                item["small_category_url"] = category.xpath("./a/@href").extract_first()
                # print(item)

                yield scrapy.Request(
                    item["small_category_url"],
                    callback=self.parse_location_part,
                    meta={"item": deepcopy(item)},
                    headers={"refer":item["small_category_url"]}
                )

    def parse_location_part(self, response):
        item = response.meta["item"]
        location_part_list = response.xpath("//div[@id='J_nt_items']/div[@id='region-nav']")
        for location_part in location_part_list:
            item["location_part_url"] = location_part.xpath("./a/@href").extract_first()
            item["location_part_name"] = location_part.xpath("./a/span/text()").extract_first()

            # print(item)
            yield scrapy.Request(
                item["location_part_url"],
                callback=self.parse_location_sub,
                meta={"item": deepcopy(item)},
                # headers={"refer": item["small_category_url"]}
            )

    def parse_location_sub(self, response):
        item = response.meta["item"]
        location_part_sub_list = response.xpath("//div[@id='J_nt_items']/div[@id='region-nav-sub']/a[not(@class)]")
        for location_part_sub in location_part_sub_list:
            item["location_part_sub_name"] = location_part_sub.xpath("./span/text()").extract_first()
            item["location_part_sub_url"] = location_part_sub.xpath("./@href").extract_first()
            # print(item)

            yield scrapy.Request(
                item["location_part_sub_url"],
                callback=self.parse_shop_list,
                meta={"item": deepcopy(item)},
                # headers={"refer":item["location_part_sub_url"]}
            )

    def parse_shop_list(self, response):
        item = response.meta["item"]
        # print(response.url)
        shop_list = response.xpath("//div[@id='shop-all-list']//li")
        for shop in shop_list:
            item["shop_name"] = shop.xpath(".//div[@class='txt']/div[@class='tit']//h4/text()").extract_first()  # 店名
            item["shop_link"] = shop.xpath(".//div[@class='txt']/div[@class='tit']/a/@href").extract_first()  # 店url地址
            item["shop_pic"] = shop.xpath(".//div[@class='pic']//a/@href").extract_first()  # 店图片
            item["shop_address"] = shop.xpath("//div[@class='operate J_operate Hide']/a/@data-address").extract_first()  # 店地址

            shop_star_list = shop.xpath(".//div[contains(@class,'star_score')]")  # 星级评分
            if shop_star_list:
                for shop_star in shop_star_list:
                    item["shop_star"] = shop_star.xpath("./text()").extract_first()
            # item["shop_tag"] = shop.xpath(".//div[@class='tag-addr']")
            # 部分分类没有推荐
            recommend_list = shop.xpath(".//div[@class='recommend']/a")  # 推荐
            if recommend_list:
                recommend_all = []
                for recommend in recommend_list:
                    recommend_name = recommend.xpath("./text()").extract_first()
                    recommend_url = recommend.xpath("./@href").extract_first()
                    recommend = {recommend_name: recommend_url}
                    recommend_all.append(recommend)
                item["recommend"] = recommend_all
            else:
                item["recommend"] = None
            # svr_info = shop.xpath(".//div[@class='svr-info']//a[not(@class)]/text()").extract()  # 团购优惠
            group_info = shop.xpath(".//div[@class='svr-info']//a[@class='more J_more']")  # 团购优惠
            if group_info:
                group = []
                for tuan in group_info:
                    group_quan_intro = tuan.xpath("./following-sibling::a/@title").extract_first().strip()
                    group_quan_url = tuan.xpath("./following-sibling::a/@href").extract_first()
                    print(group_quan_intro,  "=======",  group_quan_url)
                    group_quan_info = {group_quan_intro:group_quan_url}
                    group.append(group_quan_info)
                    print(group)
            #     item["group_quan"] = group
            #     # item["tuan_info"] = [i.strip() for i in group_info if i.isspace() != True]
            # else:
            #     item["group_quan"] = None
            print(item["shop_link"])

    #         yield scrapy.Request(
    #             item["shop_link"],
    #             callback=self.parse_shop_detail,
    #             meta={"item": deepcopy(item)},  # deepcopy避免因循环遍历浅拷贝问题，导致爬取数据覆盖
    #             headers={"Refer": response.url}
    #         )
    # # TODO 人均 字体加密 （推荐中有人均）
    # def parse_shop_detail(self, response):
    #     item = response.meta["item"]
    #     # print(response.body.decode())
    #     shop_city_id = re.findall('cityId: "(.*?)",', response.body.decode())
    #     shop_type = re.findall('shopType:(.*?),', response.body.decode())
    #     shop_id = re.findall('shopId: "(.*?)",', response.body.decode())
    #     # print(shop_city_id, shop_id, shop_type)
    #
    #     if shop_id and shop_type and shop_city_id:
    #         shop_comment_url = "http://www.dianping.com/ajax/json/shopDynamic/allReview?shopId={}&cityId={}&shopType={}".format(
    #             shop_id[0], shop_city_id[0], shop_type[0])
    #         # TODO 字体加密 电话 口味 环境 服务 营业时间（优惠券中有）
    #         item["telephone"] = response.xpath("//*[@id='basic-info']/p").extract_first()
    #         yield scrapy.Request(
    #             shop_comment_url,
    #             callback=self.parse_shop_comment,
    #             meta={"item": item}
    #         )
    #         # print(item)
    #
    # def parse_shop_comment(self, response):
    #     item = response.meta["item"]
    #     # print(item)
    #     resp = json.loads(response.body)
    #     # for comment in resp["reviewCountForMore"]:
    #         # print(comment)
    #     item["shop_comment_counts"] = resp["reviewCountForMore"]
    #     print(item)



