# spiders/guba_spider.py

import scrapy
import re


from ..items import MyprojectItem
from ..config.config_reader import get_next_unfinished_target, mark_target_as_finished,get_start_page_num

class GubaSpider(scrapy.Spider):
    name = "guba_run"#名字 scrapy crawl guba_run --nolog
    allowed_domains = ["guba.eastmoney.com"]

    def __init__(self, *args, **kwargs):
        super(GubaSpider, self).__init__(*args, **kwargs)

        # 从配置文件读取下一个未完成目标
        self.target_idx, self.target = get_next_unfinished_target()
        if not self.target:
            self.logger.info("所有目标已完成，无需爬取。")
            return

        # 从配置中读取参数
        self.prefix = self.target['prefix']
        self.suffix = self.target['suffix']
        self.end_page_num = self.target['end_page_num']#结束页码
        self.code = self.target['code']#期货代码
        self.task_name=self.target['name']#期货中文名字
        # 计算起始页码
        self.current_page_num = get_start_page_num(self.target)#起始页码

        # 构建起始 URL,第一次运行时使用
        if self.current_page_num == 1:
            self.start_urls = [self.prefix + self.suffix]#第一页
        else:
            self.start_urls = [f"{self.prefix}_{self.current_page_num}{self.suffix}"]#其他页

    def parse(self, response):
        if not self.target:
            return

        # 使用正则提取 post_last_time
        pattern = r'"post_last_time":"(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})"'
        datetime_matches = re.findall(pattern, response.text)
        nodes = response.xpath('//tr[@class="listitem"]')

        # 遍历每个帖子
        for node, post_date in zip(nodes, datetime_matches):
            item = MyprojectItem()
            item['postdate'] = post_date
            item['title'] = node.xpath('.//td[3]//text()').get(default='').strip()
            item['reads'] = node.xpath('.//td[1]/div/text()').get(default='').strip()
            item['review'] = node.xpath('.//td[2]/div/text()').get(default='').strip()
            item['author'] = node.xpath('.//td[4]/div/a/text()').get(default='').strip()
            item['content'] = response.urljoin(node.xpath('.//td[3]/div/a/@href').get(default=''))
            item['code'] = self.code
            item['name'] = self.task_name
            item['current_page_num'] = self.current_page_num
            yield item#交给pipelines.py存入数据库

        # 下一页逻辑
        self.current_page_num = self.current_page_num + 1
        next_url = f"{self.prefix}_{ self.current_page_num }{self.suffix}"

        if  self.current_page_num  <= self.end_page_num:
            self.logger.debug(f"请求下一页: {next_url}")
            yield scrapy.Request(#爬取下一页
                url=next_url,
                callback=self.parse,
                meta={'current_page':  self.current_page_num }
            )
        else:
            # 标记当前目标已完成
            mark_target_as_finished(self.target_idx)
            self.logger.info(f"✅ 目标 {self.name} ({self.code}) 已完成爬取至第 {self.end_page_num} 页。")