# -*- coding: utf-8 -*-

from bs4 import BeautifulSoup
from ..base_parser import BasePageParser
from urllib.parse import urljoin
from .common import extract_menu_info
class AuditCommitteeParser(BasePageParser):
    """新闻列表页解析器"""
    
    def can_handle(self, response):
        """判断是否为新闻列表页"""
        # 只处理列表页，不处理详情页
        return '/about-us/governance-london-fire-commissioner/audit-committee/' in response.url
    
    def parse(self, response):

        self.logger.info(f"开始处理audit_committee {response.url}")
        
        # 创建基础item
        item = self.create_webpage_item(response)
        
        # 使用BeautifulSoup提取标题
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # 提取菜单信息
        item['category'] = extract_menu_info(soup)
        
        item['title'] = soup.select_one('h2').get_text(strip=True)
        
        item['publish_time'] = ''

        res_list = soup.select('.resource-download')
        for res in res_list:
            item['main_files'].append({
                'full_url': urljoin(response.url, res.select_one('a').get('href')),
                'name':  res.select_one('h3').get_text(strip=True),
            })
        
        # 添加新闻特有信息
        item['remark'] = {}
        
        yield item 