import sys
import os

# 获取项目的根目录（假设当前文件在项目的子目录中）
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "..", ".."))
sys.path.append(project_root)


import scrapy

from service.analysis_response_service.analysis_structure_service import AnalysisStructureService
from service.database_service.redis.connect.redis_client import global_redis_client
from service.detail_service.detail_service.items import DetailServiceItem


class DetailCrawlSpider(scrapy.Spider):
    name = "detail_crawl"

    def __init__(self):
        self.redis = global_redis_client.connect()

    def start_requests(self):
        # 1. 遍历 redis 中等待获取详情的链接
        links = self.redis.hgetall("news_links")
        print(f"哈希表{links}中的所有字段和值:")
        for news_id, href in links.items():
            print(news_id, href)
            yield scrapy.Request(url=href, callback=self.parse, cb_kwargs={'news_id': news_id})

    def parse(self, response, news_id):
        # 1. 清除 redis 中的数据
        self.redis.hdel("news_links", news_id)
        print(f"已删除 Redis 中的新闻 ID: {news_id}")

        # 2. 获取详情数据
        item = DetailServiceItem()
        data = AnalysisStructureService.analysis_detail_first_structure(response, news_id)
        data['detail_page_url'] = response.url
        item['data'] = data
        yield item