import json
import redis
from scrapy.exceptions import DropItem


class SdweatherspiderPipeline:
    def __init__(self, redis_host, redis_port, redis_db, redis_key):
        self.redis_host = redis_host
        self.redis_port = redis_port
        self.redis_db = redis_db
        self.redis_key = redis_key

    @classmethod
    def from_crawler(cls, crawler):
        return cls(
            redis_host=crawler.settings.get('REDIS_HOST'),
            redis_port=crawler.settings.get('REDIS_PORT'),
            redis_db=crawler.settings.get('REDIS_DB', 0),
            redis_key=crawler.settings.get('REDIS_ITEMS_KEY')
        )

    def open_spider(self, spider):
        self.redis_conn = redis.Redis(
            host=self.redis_host,
            port=self.redis_port,
            db=self.redis_db,
            decode_responses=True
        )
        self.file = open('weather.txt', 'a', encoding='utf-8')

    def close_spider(self, spider):
        self.redis_conn.close()
        self.file.close()

    def process_item(self, item, spider):
        # 保存到文件
        self.file.write(f'=== {item["city"]} (城市编码: {item["city_code"]}) 天气 ===\n')
        self.file.write(f'{item["weather"]}\n\n')

        # 保存到Redis
        city_key = f'weather:city:{item["city_code"]}'
        item_data = {
            'city': item['city'],
            'city_code': item['city_code'],
            'weather': item['weather']
        }

        # 检查是否已处理过
        if self.redis_conn.exists(city_key):
            spider.logger.debug(f'城市 {item["city"]} 的天气数据已存在，跳过保存')
            raise DropItem(f'城市 {item["city"]} 的天气数据已存在')

        # 设置Redis数据
        self.redis_conn.hmset(city_key, item_data)
        self.redis_conn.expire(city_key, 86400)  # 24小时过期

        # 添加到处理过的城市集合
        self.redis_conn.sadd('weather:cities:processed', item['city_code'])

        # 添加到数据队列（用于后续处理）
        self.redis_conn.rpush(self.redis_key, json.dumps(item_data))

        return item