# -*- coding: utf-8 -*-
from copy import deepcopy
from urllib import parse

import scrapy

from .base_spider import BaseSpider
from ..items import ListPageItem
from dp_crawler.utils.mysql_connect_util.list_page_util import ListPageMysqlConnectUtil as MysqlConnection


class DPListPageSpider(BaseSpider):
    """
    爬取商品列表页
    """
    # 设置下载间隔
    download_delay = 0.2
    # 是否启用代理
    need_proxy = True
    # 爬虫名称
    name = 'list_page'
    # 爬取以下域名网站中的内容
    allowed_domains = ['www.dianping.com', 's3plus.meituan.net']

    def start_requests(self):
        """
        爬虫入口
        """
        # 从 mysql 中获取待爬取的url
        with MysqlConnection() as (connection, cursor, select_sql, _):
            cursor.execute(select_sql)
            result_list = cursor.fetchall()
        # 开始爬取每个地区待爬取的url
        for region, url in result_list:
            item = {"region": region}
            _dict = deepcopy(item)
            _dict.update({"detail_url": url, "label": "评论", "detail_describe": "最佳排行"})
            # scrapy.Request 发送request请求
            # callback 指定回调函数
            # meta 传参，callback函数中通过response.meta获取参数
            # dont_filter 禁止重定向
            yield scrapy.Request(url, callback=self.parse, meta={"item": item, "_dict": _dict}, dont_filter=True)

    def parse(self, response):
        # 获取 _dict 对象
        _dict = response.meta.pop("_dict", dict())
        # 以该url作为入口的对象存储
        # 创建Item对象，用于数据存储
        list_page_item = ListPageItem()
        list_page_item["detail_url"] = _dict["detail_url"]
        list_page_item["detail_describe"] = _dict["detail_describe"]
        list_page_item["label"] = _dict["label"]
        list_page_item["region"] = _dict["region"]
        # 结果回调，列表页进行存储
        yield list_page_item

        # 获取 item 对象
        item = response.meta.pop("item", dict())
        item["url"] = response.url
        # xpath 解析页面内容
        response = response.xpath("//div[@class='main_w']/div[contains(@class, 'shopRankNav')]")
        # 获取最佳排行的页面信息
        best_rank_response = response.xpath("./p[last()-1]")
        for _item in self.handler(best_rank_response, item):
            yield _item
        # 获取最佳分类的页面信息
        best_classification_response = response.xpath("./p[last()]")
        for _item in self.handler(best_classification_response, item):
            yield _item

    @staticmethod
    def handler(response, item):
        # 获取详情文本信息
        detail_describe = response.xpath('./span/text()').extract_first()
        # 特殊字符替换
        detail_describe = detail_describe.replace(":", "")
        结果放入到item对象中
        item["detail_describe"] = detail_describe

        # 对所有的url进行循环遍历
        for resp in response.xpath("./a"):
            # 深拷贝
            _dict = deepcopy(item)
            # 获取url
            detail_url = resp.xpath("@href").extract_first()
            # url拼接，合并成可以直接访问的url
            detail_url = parse.urljoin(_dict.pop("url"), detail_url)
            # 获取标签值
            label = resp.xpath("text()").extract_first()

            # 获取页面的item对象
            list_page_item = ListPageItem()
            list_page_item["detail_url"] = detail_url
            list_page_item["detail_describe"] = detail_describe
            list_page_item["label"] = label
            list_page_item["region"] = item["region"]
            yield list_page_item
