# -*- coding: utf-8 -*-
import scrapy
from scrapy.http.request import Request
import redis
import time

class FoodinfospiderSpider(scrapy.Spider):
    name = 'foodinfospider'
    allowed_domains = ['meishichina.com','meishi.cc','meishij.net','boohee.com','hiyd.com','xiangha.com','douguo.com',
                       'xinshipu.com']
    start_urls = [
        'https://www.meishichina.com/Eat/Menu/',
        'https://www.meishij.net/chufang/diy/?&page=1',
        'https://www.xinshipu.com/家常菜.html',
        'https://www.xiangha.com/caipu/z-jiachangcai/'
    ]

    def __init__(self):
        self.domain = {}
        self.domain["meishichina.com"] = "美食天下"
        self.domain["meishi.cc"] = "美食杰"
        self.domain["meishij.net"] = "美食杰"
        self.domain["boohee.com"] = "薄荷"
        self.domain["hiyd.com"] = "Hi运动"
        self.domain["xiangha.com"] = "香哈"
        self.domain["douguo.com"] = "豆果美食"
        self.domain["xinshipu.com"] = "心食谱"
        self.conn = redis.Redis(host="127.0.0.1", port=6379)


    def check_domain(self, url):
        name = ""
        for domain, webname in self.domain.items():
            if url.find(domain) != -1:
                name = webname
                break
        return name

    # def start_requests(self):
    #     while True:
    #         url = self.conn.lpop("url:foodinfo:list")
    #         # if url is None:
    #         #     time.sleep(1)
    #         #     continue
    #         url = url.decode("utf-8")
    #         print(url)
    #         yield Request(url, dont_filter=True, meta={'start_url': url})
    #     # for url in self.start_urls:
    #     #     yield Request(url, dont_filter=True, meta={'start_url':url})

    def parse(self, response):
        # start_url = response.meta['start_url']
        # # print(start_url)
        # # print(response.request.url)
        # # print(response.text)
        # item["url"] = start_url
        # item["content"] = response.text
        # yield item

        # webname = self.check_domain(start_url)
        webname = self.check_domain(response.request.url)
        if webname == "美食天下":
            item = {}
            datalist = response.xpath("//div[@class='space_left']/div/ul/li")
            # print(webname)
            # print(len(datalist))
            for data in datalist:
                name = data.xpath("./a/p/text()").extract_first()
                if name is None:
                    continue
                item['name'] = name
                yield item
        elif webname == "美食杰":
            item = {}
            datalist = response.xpath("//div[@class='listtyle1']")
            for data in datalist:
                name = data.xpath("./a/img/@alt").extract_first()
                if name is None:
                    continue
                item['name'] = name
                yield item
            # 列表页翻页
            next_url = response.xpath("//a[@class='next']/@href").extract_first()
            if next_url is not None:
                yield scrapy.Request(
                    next_url,
                    callback=self.parse,
                    # meta={"item": item}
                )
        elif webname == "心食谱":
            item = {}
            print(response.request.url)
            with open("./test.html", "w+", encoding="utf-8") as f:
                f.write(response.text)
            datalist = response.xpath("//ul[@class='line-list font12']/li/a")
            print(len(datalist))
            for data in datalist:
                name = data.xpath("./text()").extract_first()
                if name is None:
                    continue
                item['name'] = name
                print(count)
                yield item
        # elif webname == "Hi运动":
        #     datalist = response.xpath("//div[@class='box']//ul/li/a")
        #     print(webname)
        #     print(len(datalist))
        #     for data in datalist:
        #         suburl = data.xpath("./@href").extract_first()
        #         if suburl is None or suburl == "":
        #             continue
        #         suburl = "https:" + suburl
        #         yield scrapy.Request(
        #             suburl,
        #             callback=self.parse_subcontent,
        #             # meta={"item": deepcopy(item)}
        #         )
        elif webname == "香哈":
            item = {}
            datalist = response.xpath("//div[@class='s_list']/ul/li/a")
            for data in datalist:
                name = data.xpath("./@title").extract_first()
                if name is None:
                    continue
                item['name'] = name
                yield item
            # 列表页翻页
            next_url = response.xpath("//a[@class='nextpage']/@href").extract_first()
            if next_url is not None:
                yield scrapy.Request(
                    next_url,
                    callback=self.parse,
                    # meta={"item": item}
                )
    # elif webname == "豆果美食":
        #     datalist = response.xpath("//ul[@class='cook-list']//a[@class='cook-img']")
        #     print(webname)
        #     print(len(datalist))
        #     for data in datalist:
        #         suburl = data.xpath("./@href").extract_first()
        #         if suburl is None or suburl == "":
        #             continue
        #         suburl = "https://www.douguo.com" + suburl
        #         yield scrapy.Request(
        #             suburl,
        #             callback=self.parse_subcontent,
        #             # meta={"item": deepcopy(item)}
        #         )

    # def parse_subcontent(self, response):
    #     item = {}
    #     item["url"] = response.request.url
    #     item["content"] = response.text
    #     # print(item)
    #     yield item

