# -*- coding: utf-8 -*-
import json

import scrapy

from .base_spider import BaseSpider
from ..items import ShopInfoItem
from ..utils.config_util import header_html
from dp_crawler.utils.mysql_connect_util.list_shop_info_util import \
    ListShopInfoMysqlConnectUtil as MysqlConnection


class ListPageShopInfoSpider(BaseSpider):
    """
    爬取列表页店铺信息
    只能爬取到部分信息，不能获取全部，在店铺详情页中也存在部分数据
    为什么不选择一次性获取列表页及详情页？
        因为大众点评的反扒很严重，如果这样处理会增大被封禁的可能性
    """
    download_delay = 0.2
    need_proxy = True

    name = 'list_page_shop_info'
    allowed_domains = ['www.dianping.com', 's3plus.meituan.net']

    def __init__(self):
        super().__init__()
        self.base_url = "http://www.dianping.com/mylist/ajax/shoprank?rankId={}"

        self.headers = header_html

    def start_requests(self):
        # 获取待爬取的页面信息
        with MysqlConnection() as (connection, cursor, select_sql, _):
            cursor.execute(select_sql)
            result_list = cursor.fetchall()

        # 循环遍历
        for url, category, region in result_list:
            item = {"category": category, "region": region}
            task_id = url.split("=")[-1]
            # self.base_url.format(task_id) 拼接url
            yield scrapy.Request(self.base_url.format(task_id), callback=self.parse, headers=self.headers,
                                 meta={"item": item}, dont_filter=True)

    def parse(self, response):
        item = response.meta.pop("item")
        for _item in self.handler(response, item):
            yield _item

    @staticmethod
    def handler(response, item):
        # 解析json格式数据，直接通过字典去获取内容
        content = json.loads(response.text)
        shop_beans = content.get("shopBeans") or dict()
        for _rank, shop in enumerate(shop_beans, start=1):
            shop = {key: value for key, value in shop.items() if value is not None}
            shop_name = shop.get("shopName", "")
            branch_name = shop.get("branchName", "")
            if branch_name:
                shop_name = f"{shop_name}({branch_name})"
            shop_id = shop.get("shopId", "")
            district = shop.get("mainRegionName", "")
            taste = shop.get("refinedScore1", "")
            environment = shop.get("refinedScore2", "")
            serve = shop.get("refinedScore3", "")
            average_price = str(shop.get("avgPrice", ""))
            shop_tags = shop.get("shopTags", "")

            shop_info_item = ShopInfoItem()
            shop_info_item["_rank"] = _rank
            shop_info_item["shop_name"] = shop_name
            shop_info_item["shop_id"] = shop_id
            shop_info_item["shop_tags"] = shop_tags
            shop_info_item["district"] = district
            shop_info_item["taste"] = taste
            shop_info_item["environment"] = environment
            shop_info_item["serve"] = serve
            shop_info_item["average_price"] = average_price
            shop_info_item["category"] = item["category"]
            shop_info_item["region"] = item["region"]
            yield shop_info_item
