import hashlib
import json

import scrapy
from redis import StrictRedis
from scrapy.exceptions import DropItem


class PengpaiSpider(scrapy.Spider):
    name = 'pengpai'
    allowed_domains = ['thepaper.cn']
    count = 1
    url = 'https://m.thepaper.cn/list_page.jsp?nodeid=25600&isList=1&pageidx=%s'
    start_urls = [url % count]
    redis_client = StrictRedis(host='localhost', port=6379, db=0)

    def parse(self, response):
        html_xpath = response.xpath("//div[@class='list_item']")
        for i in html_xpath:
            text_url = i.xpath('./a/@href').extract_first()
            img_src = i.xpath('./a//img/@src').extract_first()
            title_ = i.xpath(".//div[@class='list_item_title']//a/text()").extract_first()
            # 文字
            item = {
                'type': 'text',
                'text_url': 'https://m.thepaper.cn/' + text_url,
                'title': title_,
            }
            waitting_hash_str = json.dumps(item)
            md5 = hashlib.md5()
            md5.update(waitting_hash_str.encode())
            hash_val = md5.hexdigest()
            exist_flag = self.redis_client.get(hash_val)
            if exist_flag:
                # 2. 如果有则抛出异常
                raise DropItem("----此数据丢弃，因为Redis中查询到了----")
            else:
                # 3. 如果没有则存储到Redis且返回item
                self.redis_client.set(hash_val, waitting_hash_str)
            yield item
            yield scrapy.Request(img_src, callback=self.parse_img, cb_kwargs={'title': title_})
        self.count += 1
        if self.count == 6:
            return
        url = self.url % self.count
        yield scrapy.Request(url=url, callback=self.parse)

    def parse_img(self, response, title):
        yield {
            'type': 'img',
            'info': response.body,
            'title': title + '.jpg'
        }
