# -*- coding: utf-8 -*-

import scrapy
from urllib.parse import urljoin
from bs4 import BeautifulSoup
from ..base_parser import BasePageParser
from ...utils import ContentProcessor, FileUtils
from .common import update_media_urls, extract_current_page_menu

page_data = [
    {
        'url': 'https://www.nfpa.org/news-blogs-and-articles/blogs/2024/11/06/history-of-nfpa',
        'categorys': ['Home', 'About NFPA', 'History of NFPA']
    },
    {
        'url': 'https://www.nfpa.org/education-and-research/Research/nfpa-research/fire-statistical-reports/fire-safety-in-the-united-states',
        'categorys': ['Home', 'About NFPA', 'NFPA Research', 'Fire Statistical Reports', 'Fire Safety in the United States']
    },
    {
        'url': 'https://www.nfpa.org/en/about-nfpa/nfpa-fire-and-life-safety-ecosystem',
        'categorys': [ 'Home', 'About NFPA', 'NFPA Fire and Life Safety Ecosystem' ]
    },
    {
        'url': 'https://www.nfpa.org/news-blogs-and-articles/nfpa-journal',
        'categorys': ['Home', 'News and Articles', 'NFPA Journal']
    },
    {
        'url': 'https://www.nfpa.org/news-blogs-and-articles',
        'categorys': ['Home', 'News and Articles']
    },
    {
        'url': 'https://www.nfpa.org/events/fire-prevention-week',
        'categorys': ['Home', 'Events']
    }

]
class NewsDetailParser(BasePageParser):
    """NFPA新闻详情页面解析器"""
    
    def can_handle(self, response):
        # 判断 response.url 是否存在于 page_data 的 url 字段中
        page_urls = [entry['url'] for entry in page_data]
        if response.url in page_urls:
            return True


        return response.meta.get('page_type') == 'nfpa_news_details'
    
    def parse(self, response):
        soup = BeautifulSoup(response.text, 'lxml')
        # 根据 response.url 获取对应的 categorys
        item = self.create_webpage_item(response)
        item['category'] = []
        for entry in page_data:
            if entry['url'] == response.url:
                item['category'] = entry.get('categorys', [])
                break
        if len(item['category']) == 0:
            item['category'] = self.generate_category_by_url(response.url)

        # 提取标题
        title_selectors = [
            'h1',
            'title',
        ]
        
        title = ''
        for selector in title_selectors:
            title_elem = soup.select_one(selector)
            if title_elem:
                title = title_elem.get_text(strip=True)
                break
        
        item['title'] = title
        
        # 提取发布时间
        publish_time_selectors = [
        ]
        
        publish_time = ''
        for selector in publish_time_selectors:
            time_elem = soup.select_one(selector)
            if time_elem:
                publish_time = time_elem.get_text(strip=True)
                # 如果有datetime属性，优先使用
                if time_elem.get('datetime'):
                    publish_time = time_elem.get('datetime')
                break
        
        item['publish_time'] = publish_time
        
        # 提取正文内容
        content_selectors = [
            '#maincontent',
        ]
        
        main_body = ''
        for selector in content_selectors:
            content_elem = soup.select_one(selector)
            if content_elem:
                # 处理图文交错内容
                main_body, media_urls = ContentProcessor.process_content_with_media(
                    str(content_elem), response.url, 'nfpa'
                )
                item['main_body'] = main_body

                # 更新媒体URLs
                update_media_urls(item, media_urls)
                break
        
        yield item 
    def generate_category_by_url(self, url):
        """
        根据url生成category列表，例如：
        http://www.nfpa.org/education-and-research/emergency-response/events-that-involve-violence
        返回 ['Education And Research', 'Emergency Response']
        """
        from urllib.parse import urlparse
        import re

        # 解析路径
        path = urlparse(url).path  # /education-and-research/emergency-response/events-that-involve-violence
        # 去除首尾斜杠
        path = path.strip('/')
        if not path:
            return []

        # 分割路径
        parts = path.split('/')
        # 只保留前两个部分
        if len(parts) < 2:
            return []
        category_parts = parts[:2]

        # 处理为首字母大写，'-'转空格
        def format_part(part):
            return re.sub(r'(^|\s|-)([a-zA-Z])', lambda m: m.group(0).replace('-', ' ').upper(), part).replace('  ', ' ').strip().title()

        # 过滤掉 'en' 和 'es'
        filtered_parts = [part for part in category_parts if part.lower() not in ('en', 'es')]
        categories = [format_part(part) for part in filtered_parts]
        return ['Home'] + categories