# -*- coding: utf-8 -*-
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from lxml import etree
import requests
import re


class MySpider(CrawlSpider):
    name = 'dzdp_font'

    start_urls = ['http://www.dianping.com/guangzhou/ch25/g136']

    # 下载css样式
    with open('dianping.css', 'wb') as f:
        url = 'http://s3plus.meituan.net/v1/mss_0a06a471f9514fc79c981b5466f56b91/svgtextcss/9dcace94749f744d93b0b5cabd6ed104.css'
        css_str = requests.get(url).content
        f.write(css_str)
    # 读取css样式
    with open('dianping.css', 'r') as f:
        css_str = f.read()
    # css样式，加过密的css样式
    css_ls = re.findall('\w+\[class.*?\]', css_str, re.S)
    # print(css_ls)
    # css样式 字体宽高，及其链接
    tag_font = {}
    # 6种字体的CSS样式列表
    l1 = re.findall('{width:\s?(\d+)px;height:\s?(\d+)px.*?url\((.*?)\).*?}', css_str, re.S)
    flag = 0
    for i in css_ls:
        tag_font[i] = l1[flag]
        # 下载字体，并且起名为 \w.svg
        with open(''.join(re.findall('/(\w+\.svg)', tag_font[i][2])), 'wb') as f:
            f.write(requests.get(url='http:' + tag_font[i][2]).content)
        flag += 1
    # print(tag_font)

    rules = (
        # 进入详情页
        Rule(LinkExtractor(allow=r'/shop/(\d+)', restrict_xpaths='//div[@id="shop-all-list"]'), callback='parse_item'),
    )

    def parse_item(self, response):
        """
        爬虫入口：http://www.dianping.com/shop/102038273
        反扒机制：无
        """
        content = response.body.decode()
        # print(content)
        # k为该字体的span类属性，V为字体大小和字体url
        for k, v in self.tag_font.items():
            # 获取该文本含有这6个字体文本的类节点
            replace_node_list = response.css(k)
            for node in replace_node_list:
                # 获取所有用了字体文件的类属性名字
                result_font = node.xpath('./@class').extract_first('')
                # print(result_font)
                # 获取该类属性在css_str所对应的位置，是一个列表类型，里卖弄是一个元祖
                result_font = re.findall(result_font + '{background:-(.*?)px\s-(.*?)px;}', self.css_str)

                # 字体坐标 ,浮点数的字符串类型要先转为浮点类型，再转为整型 result_font[0][0]先提取元祖，再提取第一个元素
                # 字体X轴的偏移位置
                location_x = int(float(result_font[0][0]))

                # 字体y轴的偏移位置
                location_y = int(float(result_font[0][1]))

                # v[2] 字体链接  ''.join(re.findall('/(\w+\.svg)', v[2]))

                # 相除获得商+1，即为字体文件所对应的字体位置，即第几个
                x = location_x // int(v[0])
                # print(x)

                # 相除获得商+1，即为字体文件所对应的字体位置，即第几行
                y = location_y // int(v[1])  # 相除获得商
                # print(y)

                # 取出本地字体的文件名
                font_file = ''.join(re.findall('/(\w+\.svg)', v[2]))

                with open(font_file, 'r', encoding='utf-8') as f:
                    font_rd = f.read()

                # 把所有文字正则匹配出来，匹配的为一个列表
                font_rd = re.findall('<text.*?>(.*?)</', font_rd)
                # print(font_rd)
                # 先匹配第y行，再匹配第x个
                font_rd = font_rd[y][x]
                # node.get()获得span标签名
                content = content.replace(node.get(), font_rd)

        # 重新获得替换后的文本内容
        selector = etree.HTML(content)
        item = {}
        if selector:
            # 店铺名字
            item['shop_name'] = ''.join(selector.xpath('//h1/text()'))
            # 评论数
            item['reviewCount'] = ''.join(selector.xpath('string(//span[@id="reviewCount"])'))
            # 人均价格
            item['avgPriceTitle'] = ''.join(selector.xpath('string(//span[@id="avgPriceTitle"])'))
            # 评分
            item['comment_score'] = ''.join(selector.xpath('string(//span[@id="comment_score"])'))
            # 地址
            item['info-name'] = ''.join(selector.xpath('string(//div[@itemprop="street-address"])'))
            # 电话
            item['info-tel'] = ''.join(selector.xpath('string(//p[@class="expand-info tel"])'))
            yield item
