# -*- coding: utf-8 -*-

from bs4 import BeautifulSoup
from ..base_parser import BasePageParser


class IncidentListParser(BasePageParser):
    """新闻列表页解析器"""
    
    def can_handle(self, response):
        return response.url == 'https://www.london-fire.gov.uk/incidents/' or response.meta.get('page_type') == 'incident_list'
    
    def parse(self, response):

        self.logger.info(f"正在解析incident列表页: {response.url}")
        
        soup = BeautifulSoup(response.text, 'html.parser')
        rows = soup.select(".article-list > li")
        
        for row in rows:
            # 在每个row中查找a.boxin
            link = row.select_one('a')
            if link and link.get('href'):
                detail_url = self.urljoin(response.url, link['href'])

                yield self.make_request(
                    detail_url,
                    callback=self.spider.parse,  # 返回主spider进行路由
                    meta={
                        'category_url': response.url,
                        'page_type': 'incident_detail',
                    }
                )
        
        next_page_link = None
        pagination = soup.select_one('.pagination__item.is-active')
        if pagination:
            # 获取下一个兄弟元素
            next_item = pagination.find_next_sibling('li')
            if next_item:
                # 检查是否包含链接
                link_tag = next_item.find('a')
                if link_tag and link_tag.get('href'):
                    next_page_link = link_tag['href'].strip()
                    self.logger.info(f"找到下一页链接: {next_page_link}")

        if next_page_link:
            next_url = self.urljoin(response.url, next_page_link)
            # 禁用日志输出
            
            yield self.make_request(
                next_url,
                callback=self.spider.parse,
                meta={'page_type': 'incident_list'}
            )