# -*- coding: utf-8 -*-

from fire_control_spider.utils import ContentProcessor
from bs4 import BeautifulSoup
from ..base_parser import BasePageParser


class NewsDetailParser(BasePageParser):
    """新闻详情页解析器"""
    
    def __init__(self, spider):
        super().__init__(spider)
        self.site_name = 'beijingfire_com'
        self.selectors = {
            'content': ['#view_content', '.content', '.article-content']
        }
    
    def can_handle(self, response):
        """判断是否为新闻详情页"""
        return ('newsinfo.php' in response.url or 
                '/news/' in response.url or
                response.meta.get('page_type') == 'news_detail')
    

    
    
    def parse(self, response):
        """解析新闻详情页"""
        self.logger.info(f"正在解析新闻详情页: {response.url}")
        
        # 创建基础item
        item = self.create_webpage_item(response)
        item['category'] = response.meta.get("category")
        
        # 使用BeautifulSoup提取标题
        soup = BeautifulSoup(response.text, 'html.parser')
        
        title = ''
        title_element = soup.select_one('h2.text-center')
        if title_element:
            title = title_element.get_text(strip=True)
        
        if title:
            item['title'] = title
        
        # 使用BeautifulSoup提取发布时间
        publication_time = ''
        
        # 从 .blog-post-tags li 中提取时间
        tag_items = soup.select('.blog-post-tags li')
        
        for tag_item in tag_items:
            item_text = tag_item.get_text(strip=True)
            if item_text:
                # 使用正则表达式匹配时间格式: 2021-07-20 11:15
                import re
                time_pattern = r'(\d{4}-\d{2}-\d{2}\s+\d{2}:\d{2})'
                match = re.search(time_pattern, item_text)
                if match:
                    publication_time = match.group(1)
                    break
        
        if publication_time:
            item['publish_time'] = publication_time
        
        # 提取正文内容并处理图文交错
        main_content = self._extract_main_content(response)
        if main_content:
            main_body, media_urls = ContentProcessor.process_content_with_media(
                main_content, response.url, self.site_name
            )
            item['main_body'] = main_body
            
            # 更新媒体URLs
            self._update_media_urls(item, media_urls)
        
        # 添加新闻特有信息
        item['remark'] = {}
        # 提取编号、发布日期、实施日期等信息
        self._extract_document_info(response, item)
        
        yield item
    
    def _extract_main_content(self, response):
        """提取正文内容"""
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # 主要选择器
        content_element = soup.select_one('#view_content')
        if content_element:
            return str(content_element)
        
        # 备选方案
        for selector in self.selectors['content']:
            content_element = soup.select_one(selector)
            if content_element:
                return str(content_element)
        
        return ''
    
    def _update_media_urls(self, item, media_urls):
        """更新媒体URLs"""
        if media_urls.get('images'):
            item['images'].extend(media_urls['images'])
        if media_urls.get('videos'):
            item['videos'].extend(media_urls['videos'])
        if media_urls.get('audios'):
            item['audios'].extend(media_urls['audios'])
        if media_urls.get('main_files'):
            # 提取URL字符串
            item['main_files'].extend(media_urls['main_files'])
        if media_urls.get('attachment_files'):
            # 提取URL字符串
            item['attachment_files'].extend(media_urls['attachment_files'])
    
    def _extract_document_info(self, response, item):
        """提取文档信息（编号、发布日期、实施日期等）"""
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # 获取 #view_content 下的所有文本内容
        content_div = soup.select_one('#view_content')
        if not content_div:
            return
        
        # 查找包含编号、发布日期、实施日期的div元素
        info_divs = content_div.find_all('div', style=lambda x: x and 'color:#2675de' in x)
        
        for div in info_divs:
            text = div.get_text(strip=True)
            if not text:
                continue
            
            # 提取编号
            if '编号：' in text:
                import re
                number_match = re.search(r'编号：(.+?)(?:\n|$)', text)
                if number_match:
                    item['remark']['document_number'] = number_match.group(1).strip()
            
            # 提取发布日期
            elif '发布日期：' in text:
                import re
                publish_match = re.search(r'发布日期：(.+?)(?:\n|$)', text)
                if publish_match:
                    item['remark']['publish_date'] = publish_match.group(1).replace(" ", "")
            
            # 提取实施日期
            elif '实施日期：' in text:
                import re
                implement_match = re.search(r'实施日期：(.+?)(?:\n|$)', text)
                if implement_match:
                    item['remark']['implement_date'] = implement_match.group(1).replace(" ", "")
    
 