# -*- coding: utf-8 -*-

import scrapy
import re
from urllib.parse import urljoin
from bs4 import BeautifulSoup
from ..base_parser import BasePageParser
from ...utils import ContentProcessor, FileUtils
from .common import update_media_urls, extract_current_page_menu

class NewsDetailParser(BasePageParser):
    """SFPE新闻详情页面解析器"""
    
    def can_handle(self, response):
        """判断是否能处理该响应"""
        if '/events-education/liveeducation/coursecatalog' in response.url:
            return True
        return response.meta.get('page_type') == 'sfpe_news_detail'

    
    def parse(self, response):
        """解析新闻详情页面"""
        soup = BeautifulSoup(response.text, 'lxml')
        
        # 创建WebPageItem
        item = self.create_webpage_item(response)
        item['category'] = extract_current_page_menu(response)
        
        # 提取标题
        title_selectors = [
            'h1',
        ]
        
        title = ''
        for selector in title_selectors:
            title_elem = soup.select_one(selector)
            if title_elem:
                title = title_elem.get_text(strip=True)
                break
        
        item['title'] = title
        
        publish_time = ''
        item['publish_time'] = publish_time
        
        # 提取正文内容
        content_selectors = [
            '#MainCopy_ContentWrapper',
        ]
        
        main_body = ''
        for selector in content_selectors:
            content_elem = soup.select_one(selector)
            if content_elem:
                # 处理图文交错内容
                main_body, media_urls = ContentProcessor.process_content_with_media(
                    str(content_elem), response.url, 'sfpe'
                )
                item['main_body'] = main_body

                # 更新媒体URLs
                update_media_urls(item, media_urls)
        
        item['main_body'] = main_body
        
        yield item
    
    