# -*- coding: utf-8 -*-

import scrapy
from urllib.parse import urljoin
from bs4 import BeautifulSoup
from ..base_parser import BasePageParser
from ...utils import ContentProcessor, FileUtils
from .common import update_media_urls, extract_current_page_menu

categories = ['Home', 'About NFPA', 'Why NFPA Codes and Standards Matter']
class WhyNfpaCodeAndSMParser(BasePageParser):
    """NFPA新闻详情页面解析器"""
    
    def can_handle(self, response):
        return '/about-nfpa/why-nfpa-codes-and-standards-matter' in response.url

    
    def parse(self, response):
        soup = BeautifulSoup(response.text, 'lxml')
        # 根据 response.url 获取对应的 categorys
        item = self.create_webpage_item(response)
        item['category'] = categories

        # 提取标题
        title_selectors = [
            'h1',
        ]
        
        title = ''
        for selector in title_selectors:
            title_elem = soup.select_one(selector)
            if title_elem:
                title = title_elem.get_text(strip=True)
                break
        
        item['title'] = title
        
        # 提取发布时间
        publish_time_selectors = [
        ]
        
        publish_time = ''
        for selector in publish_time_selectors:
            time_elem = soup.select_one(selector)
            if time_elem:
                publish_time = time_elem.get_text(strip=True)
                # 如果有datetime属性，优先使用
                if time_elem.get('datetime'):
                    publish_time = time_elem.get('datetime')
                break
        
        item['publish_time'] = publish_time
        
        box_list = soup.select('#maincontent > div')
        content_elem = box_list[3]
        if content_elem:
            # 处理图文交错内容
            main_body, media_urls = ContentProcessor.process_content_with_media(
                str(content_elem), response.url, 'nfpa'
            )
            item['main_body'] = main_body

            # 更新媒体URLs
            update_media_urls(item, media_urls)
        
        ee_box = str(box_list[4]) + str(box_list[5]) + str(box_list[6])
        if ee_box:
             # 处理图文交错内容
            main_body, media_urls = ContentProcessor.process_content_with_media(
                str(ee_box), response.url, 'nfpa'
            )
            ee_body = main_body

            # 更新媒体URLs
            update_media_urls(item, media_urls)

        sdp = str(box_list[7]) + str(box_list[8]) + str(box_list[9])
        if ee_box:
             # 处理图文交错内容
            main_body, media_urls = ContentProcessor.process_content_with_media(
                str(sdp), response.url, 'nfpa'
            )
            ee_body = main_body

            # 更新媒体URLs
            # update_media_urls(item, media_urls)

    


        aq_box = box_list[len(box_list) - 2]
        aq_area = aq_box.select_one("div[class^='content-wrapper_wrappe']")
        # 解析aq_area为问答字典列表
        faq_list = []
        if aq_area:
            # 每个.accordion_wrapper__hrj76为一个问答块
            qa_blocks = aq_area.select("div[class^='accordion_wrapper__']")
            for block in qa_blocks:
                # 问题
                q_btn = block.select_one('button[class^="accordion_toggle__"]')
                question = q_btn.get_text(strip=True) if q_btn else ''
                # 答案
                answer_div = block.select_one('.expanderVariant')
                answer = ''
                if answer_div:
                    # 提取所有<p>和<ul>内容，拼接为字符串
                    ps = answer_div.select('p')
                    uls = answer_div.select('ul')
                    answer_parts = []
                    for p in ps:
                        answer_parts.append(p.get_text(strip=True))
                    for ul in uls:
                        # 提取li内容
                        lis = ul.select('li')
                        for li in lis:
                            answer_parts.append(li.get_text(strip=True))
                    # 如果没有p和ul，直接取div文本
                    if not answer_parts:
                        answer_parts.append(answer_div.get_text(strip=True))
                    answer = '\n'.join(answer_parts)
                faq_list.append({'question': question, 'answer': answer})

        item['remark'] = {
            'everyday_examples': ee_body,
            'standards_development_process': sdp,
            'frequently_asked_questions': faq_list,
        }


        yield item 