# -*- coding: utf-8 -*-

from fire_control_spider.utils import ContentProcessor
from bs4 import BeautifulSoup
from ..base_parser import BasePageParser
from .common import extract_menu_info

class NewsDetailParser(BasePageParser):
    """新闻详情页解析器"""
    
    def __init__(self, spider):
        super().__init__(spider)
        self.site_name = 'londonfire_gov_uk'
        
    
    def can_handle(self, response):
        """判断是否为新闻详情页"""
        return response.meta.get('page_type') == 'news_detail'
    
    def parse(self, response):
        """解析新闻详情页"""
        self.logger.info(f"开始处理详情页 {response.url}")
        
        # 创建基础item
        item = self.create_webpage_item(response)
        
        # 使用BeautifulSoup提取标题
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # 提取菜单信息
        item['category'] = extract_menu_info(soup)
        
        title = ''
        # 尝试多种标题选择器
        title_selectors = ['.t-h1']
        for selector in title_selectors:
            title_element = soup.select_one(selector)
            if title_element:
                title = title_element.get_text(strip=True)
                break
        
        if title:
            item['title'] = title
        
        item['publish_time'] = ''
        
        # 提取正文内容并处理图文交错
        main_content = self._extract_main_content(soup)
        if main_content:
            main_body, media_urls = ContentProcessor.process_content_with_media(
                main_content, response.url, self.site_name
            )
            item['main_body'] = main_body

            # 更新媒体URLs
            self._update_media_urls(item, media_urls)

        
        # 添加新闻特有信息
        item['remark'] = {}
        
        yield item
    
    def _extract_menu_info(self, soup):
        result = ['HOME']
        """提取菜单信息"""
        try:
            # 获取一级菜单 - 根据document.querySelector("header .is-current").innerText
            primary_menu = soup.select_one("header .is-current")
            primary_menu_text = ""
            if primary_menu:
                result.append( primary_menu.get_text(strip=True))
            
            # 获取下级菜单 - 根据document.querySelectorAll(".subnav .subnav__bc")
            sub_menus = soup.select(".subnav .subnav__bc")
            sub_menu_texts = []
            for sub_menu in sub_menus:
                text = sub_menu.get_text(strip=True)
                if text:
                    result.append(text)
           
        except Exception as e:
            self.logger.warning(f"提取菜单信息失败: {e}")
        return result
    
    def _extract_main_content(self, soup):
        
        # 主要选择器
        selector = 'main.page'
        content_element = soup.select_one(selector)
        if content_element:
            return str(content_element)
    
        return ''
    
    def _update_media_urls(self, item, media_urls):
        """更新媒体URLs"""
        if media_urls.get('images'):
            item['images'].extend(media_urls['images'])
        if media_urls.get('videos'):
            item['videos'].extend(media_urls['videos'])
        if media_urls.get('audios'):
            item['audios'].extend(media_urls['audios'])
        if media_urls.get('main_files'):
            # 提取URL字符串
            item['main_files'].extend(media_urls['main_files'])
        if media_urls.get('attachment_files'):
            # 提取URL字符串
            item['attachment_files'].extend(media_urls['attachment_files'])