# -*- coding: utf-8 -*-
import scrapy
from douguo2.items import Douguo2Item
import copy
import redis

class FoodSpider(scrapy.Spider):
    name = 'food'
    # allowed_domains = ['douguo.com']
    start_urls = ['https://www.douguo.com/shicai']

    def parse(self, response):
        # 分类路径
        #classification_paths = response.xpath('//div[@class="type clearfix"]//ul[@class="clearfix"]//li[@class="more"]/a/@href').extract()
        classification_paths = response.xpath('//div[@class="type clearfix"]//ul[@class="clearfix"]//li[@class="more"]/a/@href').extract()
        pass
        for path in classification_paths:  # 第一次循环
            if path:
                next_page = response.urljoin(path)  # 进入单个大食材细分类
                yield scrapy.Request(next_page, callback=self.caixi_parse)
                
            # break  ## test 只爬取一个大类
            
    
    def caixi_parse(self, response):
        # 当页所有细分食材的链接
        level1 = response.xpath('//div[@class="type"]//h4//a/text()').extract_first()  # level1
        next_food_urls = response.xpath('//ul[@class="clearfix"]//li//a/@href').extract()  #菜单页url
        # level2s = response.xpath('//ul[@class="clearfix"]//li//a/text()').extract()  # level2标签
        for next_food_url in next_food_urls:  # 第二次循环
            yield scrapy.Request(next_food_url, meta={'parm1':copy.deepcopy(level1)}, callback=self.cai_parse )
            

    def cai_parse(self, response):
        redis_db = redis.Redis(host='127.0.0.1', port='6379')  # redis
        level1 = response.meta['parm1']
        level2 = response.xpath('//div[@class="des-material"]/h3/text()').extract_first()
        urls = response.xpath('//li[@class="clearfix"]/a/@href').extract()
        for url in urls:
            food_url = response.urljoin(url)  # 单个菜品链接
            # 判断去重 redis
            # mysql  db  dup_url_douguo2_1
            # mongodb   dup_url_douguo2_1_mongo
            if not redis_db.hexists("dup_url_douguo2_1_mongo", food_url):
                redis_db.hset("dup_url_douguo2_1_mongo", food_url, 0)
                # meta 使用深度拷贝
                yield scrapy.Request(food_url, meta={'parm1':copy.deepcopy(level1), 'parm2':copy.deepcopy(level2)}, callback=self.food_parse)
        # anext = response.xpath('//div[@class="mt20"]//div[@class="pages"]//a[last()-1]/@class').extract_first()  # 下一页
        anext = response.xpath('//div[@class="mt20"]//div[@class="pages"]//a[contains(@class,"anext")]').extract_first()
        if anext :  # 判断下一页
            next_url = response.xpath('//div[@class="mt20"]//div[@class="pages"]//a[contains(@class,"anext")]/@href').extract_first()
            yield scrapy.Request(next_url, meta ={'parm1': copy.deepcopy(level1)}, callback=self.cai_parse)


    def food_parse(self, response):
        item = Douguo2Item()
        item['level1'] = response.meta['parm1']
        item['level2'] = response.meta['parm2']
        item['name'] = response.xpath('//div[@class="rinfo relative"]/h2/text()').extract_first()
        item['author'] = response.xpath('//div[@class="rinfo relative"]//div[@class="clearfix aut-info relative"]/a/@title').extract_first()
        item['intro'] = response.xpath('//div[@class="rinfo relative"]/p/text()').\
            extract_first().strip().replace('参与活动:','').replace('\n','').replace('\r', '').replace(' ','').strip()
        item['url'] = response.xpath('//div[@class="relative"]//div/@data-origin').extract_first()  # 菜首页图
        # 处理item['materials']  ：
        item['materials'] = response.xpath('//div[@class="metarial"]//span[@class="scname"]/text()').extract()


         # 处理steps文字  ：
        item['steps'] = response.xpath('//div[@class="step"]//div[@class="stepinfo"]/text()').extract()
        # temp=[]
        # for i, step in enumerate(steps):
        #     if i % 2 == 1:
        #         step = step.strip().replace('\n','').replace('\r','')
        #         temp.append(step)
        # item['steps'] = ':'.join(temp)

        # 处理tags  ：
        temp2 = response.xpath('//div[@class="fenlei"]//a/text()').extract()
        item['tags'] = ':'.join(temp2)
        # 步骤图片地址 唯一由, 连接  ，
        item['imgs_list'] = response.xpath('//div[@class="stepcont clearfix"]/a/@href').extract()
        # item['imgs_list'] = ','.join(temp3)
        # 用于下载图片
        item['steps_imgs'] = response.xpath('//div[@class="stepcont clearfix"]/a/@href').extract()
        yield item   # 把取到的数据提交给pipline处理