# -*- coding: utf-8 -*-

import uuid
import logging
from fire_control_spider.items import WebPageItem


class BasePageParser:
    """页面解析器基类"""
    
    def __init__(self, spider):
        self.spider = spider
        self.logger = logging.getLogger(self.__class__.__name__)
    
    def can_handle(self, response):
        """判断是否能处理该响应 - 子类必须实现"""
        raise NotImplementedError
    
    def parse(self, response):
        """解析响应 - 子类必须实现"""
        raise NotImplementedError
    
    def create_webpage_item(self, response):
        """创建网页数据项"""
        item = WebPageItem()
        item['track_id'] = str(uuid.uuid4())
        item['url'] = response.url
        item['category'] = ""
        item['publish_time'] = ""
        item['title'] = ''
        item['main_body'] = ''
        item['main_file'] = []
        item['attachment_file'] = []
        item['remark'] = {}
        item['images'] = []
        item['videos'] = []
        item['audios'] = []
        item['main_files'] = []
        item['attachment_files'] = []
        
        return item
    
    def urljoin(self, base_url, url):
        """URL拼接"""
        from urllib.parse import urljoin
        return urljoin(base_url, url)
    
    def make_request(self, url, callback=None, meta=None, use_selenium=False):
        """创建请求
        
        Args:
            url: 目标URL
            callback: 回调函数
            meta: 元数据
            use_selenium: 是否使用Selenium渲染
        """
        from scrapy import Request
        
        # 设置元数据
        if meta is None:
            meta = {}
        
        # 如果 spider 有代理设置，添加到 meta 中
        if hasattr(self.spider, 'proxy') and self.spider.proxy:
            meta['proxy'] = self.spider.proxy
        
        if use_selenium:
            # 标记为需要 Selenium 渲染
            meta['use_selenium'] = True
            meta['original_callback'] = callback
        
        # 使用普通的 Scrapy Request
        return Request(url, callback=callback, meta=meta) 