import json
from scrapy import Request, signals, Spider
from ..commons.utils import J, get_json_data
from ..commons.parse_data import parse_note_data
from ..redis_service import RedisService
from datetime import datetime
class NoteByKeywordSpider(Spider):
    name = "note_by_keyword"

    @classmethod
    def from_crawler(cls, crawler, *args, **kwargs):
        # instance = cls(*args, **kwargs)
        spider = super(NoteByKeywordSpider, cls).from_crawler(crawler,*args, **kwargs)
        # instance.pipeline = crawler.engine.scrapy_service_pipeline
        crawler.signals.connect(spider.spider_closed, signal=signals.spider_closed)
        return spider

    def __init__(self,keyword=None, page_num=1, job_id=None, *args, **kwargs):
        super(NoteByKeywordSpider, self).__init__(*args, **kwargs)
        self.keyword = keyword
        self.job_id = job_id
        self.page_num = page_num
        self.redis_service = RedisService().redis_client
        self.api = "/api/sns/web/v1/search/notes"
        self.result = []
        self.final_result = []
        self.total_pages = 1

        # 初始化任务进度
        self.redis_service.set(f'{self.job_id}', json.dumps({'total_nums': 1, 'completed_nums': 0,'failed_nums':0,'status':'initializing'}))

    def start_requests(self):
        data = {
            'keyword': self.keyword,
            'page': self.page_num,
            'page_size': 20,
            'search_id': J(),
            'sort': 'general',
            'note_type': 0,
            'ext_flags': [],
            'image_formats': ['jpg', 'webp', 'avif'],
        }
        yield Request(
            url='https://edith.xiaohongshu.com',
            callback=self.parse_initial,
            method='POST',
            meta={'api': self.api ,'data': data}
        )

    def parse_initial(self, response):


        json_str = get_json_data(response.text)
        json_data = json.loads(json_str)['data']
        if 'items' in json_data.keys():
            self.result.extend(json_data['items'])
        has_more = json_data['has_more']
        next_page_num = int(response.meta['data']['page']) + 1
        if has_more:
            self.total_pages += 1
            # self.update_progress(self.total_pages)
            data = response.meta['data']
            data['page'] = next_page_num
            # print(self.total_pages)
            yield Request(
                url='https://edith.xiaohongshu.com' + self.api + f'/{next_page_num}',
                callback=self.parse_initial,
                method='POST',
                meta={'api': self.api, 'data': data}
            )
        else:
            print(len(self.result))
            yield self.update_total_nums(len(self.result))
            yield from self.execute_after_update_total_pages(self.result)


    def execute_after_update_total_pages(self,items_list):
        for index, item in enumerate(items_list):
            print(index)
            if len(item['id']) != 24:
                # 更新失败的数量
                self.update_failed_nums()
                self.logger.warning(f"{item['id']}====id异常跳过此id爬取")
                continue
            yield Request(
                url=f'https://edith.xiaohongshu.com/{index}',
                callback=self.parse_data,
                method='POST',
                meta={'api': '/api/sns/web/v1/feed', 'data': {
                    'source_note_id': f'{item["id"]}',
                    'image_formats': ['jpg', 'webp', 'avif'],
                    "extra": {"need_body_topic": "1"}
                }, }
            )


    def update_total_nums(self,total_nums):
        progress = json.loads(self.redis_service.get(f'{self.job_id}'))
        # progress = {'total_pages': total_pages, 'completed_pages': 0, 'status':'crawling'}
        progress['total_nums'] =total_nums
        progress['status'] = 'crawling'
        self.redis_service.set(f'{self.job_id}', json.dumps(progress))
        self.logger.info(f'Updated progress: {progress}')
    def update_failed_nums(self):
        progress = json.loads(self.redis_service.get(f'{self.job_id}'))
        progress['failed_nums'] += 1
        self.redis_service.set(f'{self.job_id}', json.dumps(progress))
        self.logger.info(f'Updated failed Nums: {progress}')
    def update_completed_nums(self):
        progress = json.loads(self.redis_service.get(f'{self.job_id}'))
        progress['completed_nums'] += 1
        self.redis_service.set(f'{self.job_id}', json.dumps(progress))
        self.logger.info(f'Updated completed Nums: {progress}')

    def parse_data(self, response):
        self.update_completed_nums()
        data = response.text
        if data:
            json_data = json.loads(get_json_data(data))['data']['items'][0]
            parsed_data = parse_note_data(json_data, for_web=True)
            self.final_result.append(parsed_data)
            yield {'data': [parsed_data]}

    def spider_closed(self, spider, reason):
        progress = json.loads(self.redis_service.get(f'{self.job_id}'))
        progress['status'] = 'finished'
        self.redis_service.set(self.job_id, json.dumps(progress))
        self.logger.info(f'Spider closed due to {reason}')


        # self.pipeline.final_db_update(final_data, spider)
