# -*- coding: utf-8 -*-
import regex
import scrapy
from scrapy import Selector

from .base_spider import BaseSpider
from ..items import ShopInfoItem
from ..utils.config_util import (
    header_html,
    header_css_svg
)
from dp_crawler.utils.mysql_connect_util.detail_shop_info_util import \
    DetailShopInfoMysqlConnectUtil as MysqlConnection


class DetailPageShopInfoSpider(BaseSpider):
    """
    爬取详情页商品信息
    """
    download_delay = 0.2
    need_proxy = True

    name = 'detail_page_shop_info'
    allowed_domains = ['www.dianping.com', 's3plus.meituan.net']

    def __init__(self):
        super().__init__()
        self.base_url = "http://www.dianping.com/shop/{}"

        self.pattern_another_name_key = regex.compile(r"别(\xa0)*?名")
        self.pattern_business_hours_key = regex.compile(r"营业时间")

        self.update_keys = ["star_level", "comment_number", "detailed_address",
                            "contact_way", "another_name", "business_hours"]

    def start_requests(self):
        """
        爬虫入口
        """
        # 查询所有待处理的数据
        with MysqlConnection() as (connection, cursor, select_sql, _):
            cursor.execute(select_sql)
            result_list = cursor.fetchall()

        for result in result_list:
            # 对于每一条数据，都去查询该店铺对应的详情页信息
            shop_id, region = result
            item = ShopInfoItem()
            item["shop_id"] = shop_id
            item["region"] = region
            yield scrapy.Request(self.base_url.format(shop_id), callback=self.parse, meta={"item": item},
                                 headers=header_html, dont_filter=True)

    def parse(self, response):
        """
        回调函数
        """
        # 获取所有的css_url，文字加密的信息在css_url中
        css_url = self.gain_url(self.pattern_css_url, str(response.body, encoding="utf-8"))
        yield scrapy.Request(css_url, callback=self.parse_css,
                             meta={"item": response.meta["item"], "response": response},
                             headers=header_css_svg, dont_filter=True)

    def parse_css(self, response):
        """
        解析css页面数据
        """
        # 编码格式转化 bytes --> utf8
        css_html = str(response.body, encoding="utf-8")
        # 获取svg链接信息，里面对应的为加密字的位置
        svg_info_dict = self.search_svg_info(css_html)
        # 获取每个加密汉字对应的px
        css_info_dict = self.handler_css(css_html)

        # 循环遍历，查找除每个加密字的内容
        for (key, svg_html) in self.query_svg_html(svg_info_dict):
            svg_list = self.enter_handler_svg(svg_html)
            svg_info_dict[key][key] = svg_list

        item = self.search_shop_info(response.meta.pop("response"), response.meta.pop("item"),
                                     css_info_dict, svg_info_dict)
        yield item

    def search_shop_info(self, response, item, css_info_dict, svg_info_dict):
        """
        内容解析
        加密字处理完成后，对页面使用xpath解析获取相关内容
        """
        region = item.pop("region")
        # response转化为可以使用xpath的对象
        response = Selector(response=response)
        # 所有的数据内容都在 id='basic-info' 中，故解析位置往内部移动
        response = response.xpath("//div[@id='basic-info']")

        # 获取星级
        star_level = response.xpath(".//div[@class='brief-info']/span/@title").extract_first()
        item["star_level"] = self.handler_text(star_level)

        # 获取评论数量
        review_count = response.xpath(".//div[@class='brief-info']/span[@id='reviewCount']").extract_first()
        review_count = self.search_info(review_count, css_info_dict, svg_info_dict)
        item["comment_number"] = self.handler_text(review_count)

        # 获取店铺地址
        address = response.xpath(".//div[contains(@class, 'address')]/span[@class='item']").extract_first()
        address = self.search_info(address, css_info_dict, svg_info_dict)
        item["detailed_address"] = self.handler_text(address)

        # 获取联系方式
        tel = response.xpath(".//p[contains(@class, 'tel')]").extract_first()
        tel = self.search_info(tel, css_info_dict, svg_info_dict)
        tel = self.handler_text(tel)
        tel = ';'.join(tel.split())
        if tel != "无添加":
            item["contact_way"] = tel

        other_response = response.xpath("//div[contains(@class, 'J-other')]//p[contains(@class, 'info-indent')]")
        for other in other_response:
            text = other.extract()
            other = other.xpath(".//span[@class='item']").extract_first()
            # 获取别名
            if self.pattern_another_name_key.search(text):
                another_name = self.search_info(other, css_info_dict, svg_info_dict)
                item.setdefault('another_name', self.handler_text(another_name))
            # 获取营业时间
            elif self.pattern_business_hours_key.search(text):
                business_hours = self.search_info(other, css_info_dict, svg_info_dict)
                item.setdefault('business_hours', self.handler_text(business_hours))

        # 查询的所有结果都放入到item中
        for key in self.update_keys:
            item.setdefault(key, "")

        # 计算当前位置的经纬度
        item.update(self.calc_lng_lat(f"{region}{item['detailed_address']}"))
        return item
