# -*- coding: utf-8 -*-
import scrapy, time, re


class MeituanSpider(scrapy.Spider):
    name = 'meituan3'
    allowed_domains = ['i.meituan.com', 'meishi.meituan.com', 'g.meituan.com', 'meituan.com']
    start_urls = ['http://i.meituan.com/s/shijiazhuang-%E4%BC%98%E6%83%A0%E5%8A%B5?p=1']

    custom_settings = {
        "DEFAULT_REQUEST_HEADERS": {
            "User-Agent": 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36',
            'accept-language': 'zh-CN,zh;q=0.8',
            'Cookie': '__mta=42818970.1563929128321.1564024035169.1564024044777.30; JSESSIONID=1ihmgrm852wri1ld8u7ypguka4; IJSESSIONID=1ihmgrm852wri1ld8u7ypguka4; iuuid=1C3AF6B19385D79C4050A7A44887D28CBB38114CC4751DBFD9EBD07BE157B3BE; ci=76; cityname=%E7%9F%B3%E5%AE%B6%E5%BA%84; _lxsdk_cuid=16c2170acafc8-0a516c9e75316d-c343162-1fa400-16c2170acafc8; _lxsdk=1C3AF6B19385D79C4050A7A44887D28CBB38114CC4751DBFD9EBD07BE157B3BE; webp=1; __utmc=74597006; __utmz=74597006.1563929129.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); uuid=cd45fffe-64a8-4ad7-8eab-f428e565269b; _hc.v=8af2333e-563f-9fd7-700b-47124ca3e0b7.1563932608; idau=1; __utma=74597006.166325479.1563929129.1564017584.1564023855.11; latlng=38.047126,114.472865,1564023855708; i_extend=C225976438606910676283420383189785348993_e3935036082400702098_v1154189475667608253_a%e4%bc%98%e6%83%a0%e5%8a%b5_f192651668E040739386929371680098933883720487349427_e5994459062895443197_v1153902619378056808_a%e4%bc%98%e6%83%a0%e5%8a%b5H__a100005__b11; __utmb=74597006.4.10.1564023855; _lxsdk_s=16c271612a7-1cf-4d1-ec4%7C%7C10'

        },
        'ITEM_PIPELINES': {'tutorial.pipelines.Meituan3Pipeline': 400}
    }
    def parse(self, response):
        for link in response.xpath("//*[@class='list bd-deal-list']//a[@gaevent='imt/poi/list/list-item']//@href").getall():
            print(link)
            if 'meishi' in link:
                stid = response.xpath("//*[@href='%s']/@data-stid" % link).get()
                url = 'http:' + link + '?stid=' +stid + '&cevent=imt%2Fpoi%2Flist%2Flist-item'
                # time.sleep(1)

                yield scrapy.Request(url, callback=self.parse_fine_food, dont_filter=True)
            elif 'html' in link:
                url = 'https://www.meituan.com/dz/deal/{}'.format(re.findall(r'deal/(\d+)\.html', link)[0])
                # time.sleep(1)
                yield scrapy.Request(url, callback=self.parse_life)
        if response.xpath('//*[@class="pager"]/a[2]/@href').extract_first() is not None:
            next_url = 'http:' + response.xpath('//*[@class="pager"]/a[2]/@href').get()
            yield scrapy.Request(next_url, callback=self.parse)

    def parse_fine_food(self, response):
        item = dict()
        # print('美食链接', response.request.url)
        item['name'] = xstr(response.xpath("//*[@class='title']/text()").get())
        item['path'] = xstr(response.request.url)
        item['picture'] = xstr("http:%s" % response.xpath("//*[@class='toutu-img']/@src").get())

        yxq = response.xpath("//*[text()='有效期']/following-sibling::ul/li/text()").get()
        yxqs = re.search(r'(.*)至(.*\d)(.*)', yxq).groups()
        item['limited'] = xstr(yxqs[0])
        item['period'] = xstr(yxqs[1])
        item['theory'] = xstr(yxqs[2])

        #使用时间
        item['vakid'] = xstr(response.xpath("//*[text()='使用时间']/following-sibling::ul/li/text()").get())
        # 适用范围
        item['scope'] = xstr(response.xpath("//*[text()='适用范围']/following-sibling::ul/li/text()").get())
        #规则
        explainls = response.xpath("//*[text()='使用规则']/following-sibling::ul/li/text()").getall()
        item['explainl'] = ";".join(xstr(explainls))

        yield item

    def parse_life(self, response):
        # print('生活链接', response.request.url)

        item = dict()
        item['name'] = xstr(response.xpath("//*[@class='title one-line']/text()").get())
        item['path'] = xstr(response.request.url)
        item['picture'] = xstr(response.xpath("//*[@id='zoomView']/@src").get())

        yxq = response.xpath("//*[text()='有效期']/following-sibling::dd/p/text()").get()
        if "至" in yxq:
            yxqs = re.search(r'(.*)至(.*\d)(.*)', yxq).groups()
            item['limited'] = xstr(yxqs[0])
            item['period'] = xstr(yxqs[1])
            item['theory'] = xstr(yxqs[2])
        else:
            item['limited'] = ""
            item['period'] = ""
            item['theory'] = ""
        item['vakid'] = ""
        item['scope'] = ""
        item['explainl'] = ""
        # print(yxqs)
        yield item


def xstr(s):
    if s is None:
        return ''
    return str(s)





