#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
基础爬虫类
提供通用的爬虫功能，减少代码重复
"""

import requests
from .config import DEFAULT_HEADERS, REQUEST_TIMEOUT
from .utils import LogUtils, NetworkUtils


class BaseScraper:
    """基础爬虫类"""
    
    def __init__(self, gui_callback=None):
        """
        初始化基础爬虫
        
        Args:
            gui_callback (function): GUI回调函数，用于更新界面显示
        """
        self.headers = DEFAULT_HEADERS.copy()
        self.gui_callback = gui_callback
        self.session = requests.Session()
        self.session.headers.update(self.headers)
    
    def _log(self, message):
        """
        记录日志信息
        
        Args:
            message (str): 日志信息
        """
        if self.gui_callback:
            self.gui_callback(message)
        else:
            print(message)
    
    def _log_info(self, message):
        """记录信息日志"""
        self._log(LogUtils.log_info(message))
    
    def _log_success(self, message):
        """记录成功日志"""
        self._log(LogUtils.log_success(message))
    
    def _log_warning(self, message):
        """记录警告日志"""
        self._log(LogUtils.log_warning(message))
    
    def _log_error(self, message):
        """记录错误日志"""
        self._log(LogUtils.log_error(message))
    
    def _log_processing(self, message):
        """记录处理中日志"""
        self._log(LogUtils.log_processing(message))
    
    def make_request(self, url, method='GET', **kwargs):
        """
        发起HTTP请求
        
        Args:
            url (str): 请求URL
            method (str): 请求方法
            **kwargs: 其他请求参数
            
        Returns:
            requests.Response: 响应对象
            
        Raises:
            requests.RequestException: 请求异常
        """
        try:
            # 设置默认超时
            if 'timeout' not in kwargs:
                kwargs['timeout'] = REQUEST_TIMEOUT
            
            # 发起请求
            response = self.session.request(method, url, **kwargs)
            response.raise_for_status()
            
            return response
            
        except requests.RequestException as e:
            self._log_error(f"请求失败: {url} - {e}")
            raise
    
    def get(self, url, **kwargs):
        """发起GET请求"""
        return self.make_request(url, 'GET', **kwargs)
    
    def post(self, url, **kwargs):
        """发起POST请求"""
        return self.make_request(url, 'POST', **kwargs)
    
    def download_file(self, url, file_path, chunk_size=8192):
        """
        下载文件
        
        Args:
            url (str): 文件URL
            file_path (str): 保存路径
            chunk_size (int): 块大小
            
        Raises:
            requests.RequestException: 下载异常
        """
        try:
            response = self.get(url, stream=True)
            
            with open(file_path, 'wb') as f:
                for chunk in response.iter_content(chunk_size=chunk_size):
                    if chunk:
                        f.write(chunk)
            
            self._log_success(f"文件下载成功: {file_path}")
            
        except Exception as e:
            self._log_error(f"文件下载失败: {file_path} - {e}")
            raise
    
    def add_delay(self, delay_seconds=None):
        """添加请求延迟"""
        NetworkUtils.add_request_delay(delay_seconds)
    
    def close(self):
        """关闭会话"""
        if self.session:
            self.session.close()
    
    def __enter__(self):
        """上下文管理器入口"""
        return self
    
    def __exit__(self, exc_type, exc_val, exc_tb):
        """上下文管理器出口"""
        self.close()


class ScraperError(Exception):
    """爬虫异常基类"""
    pass


class NetworkError(ScraperError):
    """网络异常"""
    pass


class ParseError(ScraperError):
    """解析异常"""
    pass


class ValidationError(ScraperError):
    """验证异常"""
    pass
