# -*- coding: utf-8 -*-
import scrapy
import re
import os
from dazhongdianping.items import DazhongdianpingItem
import deal_fonts

class DzdpSpider(scrapy.Spider):
    name = 'dzdp'
    allowed_domains = ['www.dianping.com']
    start_urls = ['http://www.dianping.com/loudi/ch10']


    def parse(self, response):
        #这部分，获得@font-face 的css文件地址
        link='http:'+response.xpath('/html/head/link[7]/@href').get()
        #这部分抓取分类id及名称
        class_dict = {}  # 用于储存不同的分类
        classfy=response.xpath('//div[@id="classfy"]//a')
        for c in classfy:
            class_id=c.xpath('./@href').get().split('/')[-1]
            class_name=c.xpath('./@data-click-name').get().split('_')[-2]
            class_dict[class_id]=class_name

        # 由于上限是50页，行政区必须更细才能保证抓全；这里直接转到下一级行政区页面，进一步拿到下一个行政区划分；
        level1_region=response.xpath('//div[@id="region-nav"]//a')
        for r in level1_region:
            url=r.xpath('./@href').get()
            yield scrapy.Request(url=url,callback=self.parse_region,meta={'info':(class_dict,link)})
            # break

    def parse_region(self,response):
        class_dict, link=response.meta.get('info')

        #在这里转换字体文件，做成解析的dict
        code_dict,shopNum=deal_fonts.getFontsInfo(link)
        fonts_dicts=[]
        for file in os.listdir('./fonts_dicts'):
            fonts_dicts.append(file.split('.')[0])
        for key,val in code_dict.items():
            if key not in fonts_dicts:
                deal_fonts.downloadFonts(key,val)
                deal_fonts.dealFonts(key)

        #暂时只做数字的dict，先不做判别
        with open(f'./fonts_dicts/{shopNum}.txt') as f:
            num_font_dict=eval(f.read())

        url_before=re.findall(r'(.*ch.*/)',response.url)[0]
        all_level2_regions=response.xpath('//div[@id="region-nav-sub"]//a')[1:]

        level2_region={}
        for n in all_level2_regions:
            id=n.xpath('./@href').get().split('/')[-1]
            name=n.xpath('./text()').get()
            level2_region[id]=name

        for c_id,c_name in class_dict.items():
            for l_id,l_name in level2_region.items():
                url=f'{url_before}{c_id}{l_id}'
                print('parse_region',url)
                yield scrapy.Request(url=url,callback=self.parse_getpage,meta={'info':(c_name,l_name,num_font_dict)})

    def parse_getpage(self,response):
        # 小地方会有很多没数据的，也会遇到验证码
        class_name, level2_region_name,num_font_dict=response.meta.get('info')
        url_before=response.url
        try:
            all_page=response.xpath('//div[@class="page"]//a')[-2]
            page_int = int(all_page.xpath('./text()').get())

        except Exception as e:
            page_int=1
            print(e)

        for p in range(1, page_int + 1):
            page_url = f'{url_before}p{p}'
            yield scrapy.Request(url=page_url, callback=self.parse_page, meta={'info': (class_name, level2_region_name,num_font_dict)})

    #因为item需要写死，所以单独写一个解码的方法
    def num_decode(self,num_source,num_font_dict):
        num_decode=''
        for s in num_source:
            try:
                de = s.encode('unicode-escape')[-4:].decode('unicode-escape')
                num_decode+= num_font_dict[de]
            except:
                num_decode+= s
        return num_decode

    def parse_page(self,response):

        class_name, level2_region_name,num_font_dict = response.meta.get('info')
        shops=response.xpath('//div[@id="shop-all-list"]/ul//li')
        for s in shops:
            title=s.xpath('.//a[@data-click-name="shop_title_click"]/h4/text()').get()
            shop_id=s.xpath('.//a[@data-click-name="shop_title_click"]/@href').get().split('/')[-1]
            address=s.xpath('.//a[@data-click-name="shop_map_click"]/@data-address').get()
            star=s.xpath('.//div[@class="comment"]/span/@title').get()
            recommend=s.xpath('.//div[@class="recommend"]//a//text()').getall()#推荐菜
            try:
                groupdeal=s.xpath('.//a[@data-click-name="shop_info_groupdeal_click"]/@title').get()#团购
            except:
                groupdeal=''

            comment = s.xpath('.//a[@data-click-name="shop_iwant_review_click"]/b//text()').getall()#点评
            avg = s.xpath('.//a[@data-click-name="shop_avgprice_click"]/b//text()').getall()#人均
            taste=s.xpath('.//span[@class="comment-list"]/span[1]/b//text()').getall()
            service=s.xpath('.//span[@class="comment-list"]/span[3]/b//text()').getall()
            env=s.xpath('.//span[@class="comment-list"]/span[2]/b//text()').getall()

            #这里是调用解码
            num_comment=self.num_decode(comment,num_font_dict)
            num_avg=self.num_decode(avg,num_font_dict)
            taste_score=self.num_decode(taste,num_font_dict)
            service_score=self.num_decode(service,num_font_dict)
            env_score=self.num_decode(env,num_font_dict)

            item=DazhongdianpingItem(class_name=class_name,level2_region_name=level2_region_name,
                                     title=title,shop_id=shop_id,address=address,star=star,
                                     num_comment=num_comment,num_avg=num_avg,groupdeal=groupdeal,
                                     recommend=recommend,taste_score=taste_score,service_score=service_score,
                                     env_score=env_score)

            yield item








