# -*- coding: utf-8 -*-
import scrapy
import re
import  math
from ..items import MeituanchengduItem
import requests
class SpidercdSpider(scrapy.Spider):
    name = 'spidercd'
    # allowed_domains = ['cd.meituan.com']
    # start_urls = ['http://cd,meituan.com/']
    def start_requests(self):
        url = [
            "http://cd.meituan.com/meishi/"#修改首页
        ]
        for i in url:
            yield scrapy.Request(url = i ,callback=self.parse)
    def parse(self, response):
        data = response.css("body > script:nth-child(12)::text").extract()
        patten = re.compile('"subAreas":(.*)"dinnerCountsAttr"')
        res = patten.findall(str(data))
        # fhandle = open('C:/Users/yehuo/PycharmProjects/meituanchengdu/a.txt', 'w', encoding="utf-8")
        # for i in res:
        #     fhandle.write(i)
        res[0]='"subAreas":'+res[0]
        data = res[0].split("]},")
        aerali = []
        for i in data:
            patten = re.compile('"subAreas":\[(.*})')
            res = patten.findall(i)
            aerali.append(res[0])
        daqu = response.css("#app > section > div > div.left > div.filter > div > div > ul > li > span > b::text").extract()
        zii = zip(daqu,aerali)
        #{wuhou:[{xxx:url},{xxx:url},urldic,]}
        dichz = {}
        for i in zii:
            diquli = i[1].split("},{")
            urldic = {}
            if len(diquli)!=0:
                for k in diquli:
                    patten = re.compile('(http://.*)"')
                    res = patten.findall(k)
                    patten = re.compile('"name":"(.*)","')
                    nameli =patten.findall(k)
                    urldic[nameli[0]]=res[0]
                del urldic['全部']
            else:
                patten = re.compile('(http://.*)"')
                res = patten.findall(i[1])
                patten = re.compile('"name":"(.*)","')
                nameli = patten.findall(i[1])
                urldic[nameli[0]] = res[0]
            dichz[i[0]]=urldic
        for i in dichz:#i 武侯区
            for k in dichz[i]: #k xx路
                url = dichz[i][k]
                yield scrapy.Request(url=url,callback=self.parse2,meta={'district':i,'road':k,'url':url})
    def parse2(self,response):
        data = response.css('body > script:nth-child(12)::text').extract()
        patten = re.compile('("poiId":.*)\]\}\]\}\,')
        res = patten.findall(str(data))
        rooturl = response.meta['url']
        if res == []:#爬取页面失败用requests重新获取一遍
            headers= {
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'
            }
            data=requests.get(response.url,headers=headers)
            data = data.text
            patten = re.compile('("poiId":.*)\]\}\]\}\,')
            res = patten.findall(str(data))

        road = response.meta['road']
        district = response.meta['district']

        if res != []:
            fanguanli = res[0].split(']},')
            if len(fanguanli)!=0:
                for i in fanguanli:
                    patten = re.compile('"poiId":(.*),"frontImg"')
                    res = patten.findall(i)
                    fandianurl = 'http://www.meituan.com/meishi/'+res[0]
                    patten = re.compile('"title":"(.*?)","avgScore')
                    res = patten.findall(i)
                    fanguanname = res[0]
                    # headers = {
                    # 'Cookie': '',
                    # 'Host': 'www.meituan.com',
                    # 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'
                    # }
                    yield self.parse4(url=fandianurl,
                                      meta={'district': district, 'road': road, 'dianming': fanguanname})
                    # yield scrapy.Request(url=fandianurl,callback=self.parse3,meta={'district':district,'road':road,'dianming':fanguanname},headers=headers)
            else:
                patten = re.compile('"poiId":(.*),"frontImg"')
                ress = patten.findall(res[0])
                fandianurl = 'http://www.meituan.com/meishi/' + ress[0]
                patten = re.compile('"title":"(.*?)","avgScore')
                ress = patten.findall(res[0])
                fanguanname = ress[0]
                # headers = {
                #     'Cookie': '',
                #     'Host': 'www.meituan.com',
                #     'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'
                # }
                yield self.parse4(url=fandianurl,meta={'district': district, 'road': road, 'dianming': fanguanname})
                # yield scrapy.Request(url=fandianurl, callback=self.parse3 ,
                #                      meta={'district': district, 'road': road, 'dianming': fanguanname},headers=headers)
        patten = re.compile('("pn":.*,"poiInfos")')
        res = patten.findall(str(data))
        if res!=[]:
            patten = re.compile('"pn":(.*),"poiLists"')
            dangqianpage = patten.findall(res[0])
            dangqianpage = int(dangqianpage[0])
            patten = re.compile('"totalCounts":(.*),"poiInfos')
            totalshangjia = patten.findall(res[0])
            totalshangjia = float(totalshangjia[0])
            # 目前分页是每页32个
            totalpage = math.ceil(totalshangjia/32)
            # 看是否有没有下一页
            if dangqianpage < totalpage:
                nextpage = dangqianpage+1
                nexturl = response.meta['url']+'pn'+str(nextpage)+'/'
                dangqianpageurl = response.meta['url']+'pn'+str(dangqianpage)+'/'

                headers = {'Referer':dangqianpageurl}
                yield scrapy.Request(url=nexturl,callback=self.parse2,headers=headers,meta={'district':district,'road':road,'url':rooturl})
            print(res, '----', road,district)
        else:
            print('重新请求',rooturl)


            yield scrapy.Request(url=rooturl,callback=self.parse2,meta={'district':district,'road':road,'url':rooturl},dont_filter=True)
    # def parse3(self,response):
    #     item = MeituanchengduItem()
    #     data =  response.css('body > script::text').extract()
    #     print (response.body)
    #     patten = re.compile('"recommended":(.*),"crumbNav"')
    #     res = patten.findall(data)
    #     tuijianli = res[0].split("},{")
    #     tuijiancai = []
    #     for i in tuijianli:
    #         patten = re.compile('"name":"(.*)","price"')
    #         tuijian = patten.findall(i)
    #         tuijiancai.append(tuijian[0])
    #     print(tuijiancai)
    #     pass
    def parse4(self,url,meta):
        road = meta['road']
        district = meta['district']
        requesturl = url
        dianming = meta['dianming']
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'
        }
        data = requests.get(requesturl,headers=headers)
        patten = re.compile('"recommended":(.*),"crumbNav"')
        res = patten.findall(data.text)
        tuijiancai = []
        # print (res,requesturl)
        if len(res)!=0:
            tuijianli = res[0].split("},{")
            for i in tuijianli:
                patten = re.compile('"name":"(.*)","price"')
                tuijian = patten.findall(i)
                if len(tuijian)!= 0:
                    tuijiancai.append(tuijian[0])
        else:
            tuijiancai=[]
        print (tuijiancai,requesturl)
        item = MeituanchengduItem()
        item['tuijiancai'] = tuijiancai
        # item['chengshi'] =
        item['diqu'] = meta['district']
        item['jiedao'] = meta['road']
        item['dianming'] = meta['dianming']
        return item