import hashlib
import json

import scrapy
from redis import StrictRedis


class PriceSpider(scrapy.Spider):
    name = 'price'
    allowed_domains = ['xinfadi.com.cn']
    start_urls = ['http://www.xinfadi.com.cn/marketanalysis/0/list/1.shtml']
    page_number = 1  # 记录当前爬取的页数

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        # 链接Redis
        self.redis_client = StrictRedis(host='localhost', port=6379, db=0)

    def __del__(self):
        # 关闭连接
        self.redis_client.close()

    def parse(self, response):
        # 定义一个变量，用来存储在本页中是否找到了之前爬取过的数据
        find_exist_data_flag = False  # 默认没有

        print("----parse函数被回调了---", response.url)
        tr_list = response.xpath("//table[@class='hq_table']//tr")
        # tr_list中包含了 表头，共21行，所以通过切片取消第1行表头，剩下20行真正的数据
        for tr_temp in tr_list[1:]:
            title = tr_temp.xpath("./td[1]/text()").extract_first()
            price_low = tr_temp.xpath("./td[2]/text()").extract_first()
            price_average = tr_temp.xpath("./td[3]/text()").extract_first()
            price_high = tr_temp.xpath("./td[4]/text()").extract_first()
            specs = tr_temp.xpath("./td[5]/text()").extract_first()
            unit = tr_temp.xpath("./td[6]/text()").extract_first()
            publish_date = tr_temp.xpath("./td[7]/text()").extract_first()

            item = {
                "title": title,
                "price_low": price_low,
                "price_average": price_average,
                "price_high": price_high,
                "specs": specs,
                "unit": unit,
                "publish_date": publish_date
            }

            # # 计算当前提取到的数据对应的哈希值
            # item_str = json.dumps(item)
            # md5 = hashlib.md5()
            # md5.update(item_str.encode())
            # hash_val = md5.hexdigest()  # 例如 b7c114800d29dce2ef9608a04792baa5
            # # 判断此数据是否在Redis中存储过
            # if self.redis_client.get(hash_val):
            #     find_exist_data_flag = True
            #     # 如果有....跳过本次的管道处理
            #     continue

            yield item

        # 如果在处理本页的20个tr的过程中，没有找到处理过的痕迹，那么就表示本页20个数据都是新的
        # 此时，就有必要生成下一页的URL，开启下一页的爬取
        self.page_number += 1
        if self.page_number <= 3:
            next_page_url = "http://www.xinfadi.com.cn/marketanalysis/0/list/%d.shtml" % self.page_number
            yield scrapy.Request(url=next_page_url, callback=self.parse)

