import scrapy
from scrapy import Request,signals
import json
from ..commons.utils import generate_headers,get_json_data
from ..commons.parse_data import parse_note_data
from ..redis_service import RedisService
class NoteByIdSpider(scrapy.Spider):
    name = "Id"

    @classmethod
    def from_crawler(cls, crawler, *args, **kwargs):
        # 使用 crawler 对象来获取 RedisService 实例
        instance = cls(*args, **kwargs)
        crawler.signals.connect(instance.spider_closed,signal=signals.spider_closed)
        return instance
    def __init__(self,note_id=None,job_id=None,*args, **kwargs):
        super(NoteByIdSpider, self).__init__(*args, **kwargs)
        # print(kwargs.get('note_id'))
        # self.note_id = kwargs.get('note_id')
        self.redis_service = RedisService().redis_client
        self.note_id =note_id
        self.data = {
        'source_note_id': f'{self.note_id}',
        'image_formats': ['jpg', 'webp', 'avif'],
        "extra": {"need_body_topic": "1"}
    }
        self.job_id = job_id
        self.final_result = None
        self.api = "/api/sns/web/v1/feed"
        self.redis_service.set(f'{self.job_id}',json.dumps({'status':'initializing'}))
    def start_requests(self):
        # print(self.redis_service)
        # print('=================================')
        # print(self.get_header_and_cookie())
        # headers, cookies = self.get_header_and_cookie()
        # yield Request(url=f'https://edith.xiaohongshu.com{self.api}',body=self.get_request_data(),method='POST',headers=headers,cookies=cookies,callback=self.parse)
        yield Request(url='https://edith.xiaohongshu.com', callback=self.parse, method='POST',meta={'api': '/api/sns/web/v1/feed', 'data':{
        'source_note_id': f'{self.note_id}',
        'image_formats': ['jpg', 'webp', 'avif'],
        "extra": {"need_body_topic": "1"}
    }})


    def parse(self, response):
        data = response.text

        json_data = json.loads(get_json_data(data))['data']['items'][0]
        # print(json_data)
        parsed_data = parse_note_data(json_data,for_web=True)

        self.final_result = parsed_data
        yield {'data': [parsed_data]}

    def spider_closed(self,spider,reason):
        # 爬虫关闭时执行的代码
        # 'reason' 参数提供了爬虫关闭的原因，例如 'finished' 或 'shutdown'
        # print(f'Spider closed due to {reason})
        self.redis_service.set(f'{self.job_id}',json.dumps({'status':'finished'}))
        print(f'Spider closed due to {reason}')

