# -*- coding: utf-8 -*-

import scrapy
import re
from urllib.parse import urljoin
from bs4 import BeautifulSoup
from ..base_parser import BasePageParser
from ...utils import ContentProcessor, FileUtils
from .common import update_media_urls

class FepExtraListParser(BasePageParser):
    """SFPE新闻详情页面解析器"""
    
    def can_handle(self, response):
        """判断是否能处理该响应"""
        white_list = [
            'sfpe_fpe_extra_article',
            'sfpe_fpe_extra',
        ]
        return response.meta.get('page_type') in white_list

    def extract_menu(self, soup):
        rows = soup.select('.current-page > a')
        categorys = ['Home']
        for row in rows:
            menu = row.get_text(strip=True)
            categorys.append(menu)
    
    def parse_fpe_article_list(self, response, page_type):
        soup = BeautifulSoup(response.text, 'lxml')
        a_list = soup.select('#MainCopy_ContentWrapper ul li a')

        for a in a_list:
            href = a.get('href')
            if not href:
                continue
            full_url = urljoin(response.url, href)
            yield scrapy.Request(
                url=full_url,
                callback=self.spider.parse,
                meta={
                    'page_type': page_type,
                    'category_url': response.url
                }
            )
    def parse(self, response):

        page_type = response.meta.get('page_type')
        if page_type == 'sfpe_fpe_extra':
            yield from self.parse_fpe_article_list(response, 'sfpe_fpe_extra_article')
        elif page_type == 'sfpe_fpe_extra_article':
            yield from self.parse_fpe_article_list(response, 'sfpe_news_detail')

    
    