# -*- coding: utf-8 -*-

from bs4 import BeautifulSoup
from ..base_parser import BasePageParser


class NewsListParser(BasePageParser):
    """新闻列表页解析器"""
    
    def can_handle(self, response):
        """判断是否为新闻列表页"""
        # 只处理列表页，不处理详情页
        return response.url == 'https://www.iaff.org/newsroom/' or response.meta.get('page_type') == 'iaff_news_list'
    
    def parse(self, response):
        next_page_type = 'iaff_news_detail'

        self.logger.info(f"正在解析列表页: {response.url}")
        
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # 根据IAFF网站的具体结构提取链接
        # 这里需要根据实际网站结构调整选择器
        rows = soup.select("main ul > li.news")
        
        print(f"找到 {len(rows)} 个链接")
        
        for raw in rows:
            link = raw.select_one('a')
            href = link.get('href')
            if href:
                detail_url = self.urljoin(response.url, href)
                print(f"找到链接: {detail_url}")
                
                # 过滤掉非详情页链接
                yield self.make_request(
                    detail_url,
                    callback=self.spider.parse,  # 返回主spider进行路由
                    meta={
                        'category_url': response.url,
                        'page_type': next_page_type,
                        'categorys': ['Home', 'Newsroom']
                    }
                ) 
        
        
        next_page = soup.select_one('a[aria-label="Next Page"]')
        if next_page:
            next_page_url = self.urljoin(response.url, next_page.get('href'))
            yield self.make_request(
                next_page_url,
                callback=self.spider.parse,  # 返回主spider进行路由
                meta={
                    'category_url': response.url,
                    'page_type': 'iaff_news_list',
                })
                    