import requests
from bs4 import BeautifulSoup
import redis
import sys
import os
import urllib.parse

# 将项目根目录添加到Python路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

# 使用绝对导入
from config import REDIS_CONFIG
from storage import MySQLStorage

import logging
from urllib.parse import urljoin, urlparse
import time

class BaseCrawler:
    def __init__(self, source_config):
        self.source_config = source_config
        self.redis_client = redis.Redis(**REDIS_CONFIG)
        self.storage = MySQLStorage(source_config['table_name'])
        
    def crawl(self):
        """基础爬虫逻辑"""
        logging.info(f"开始获取URL列表: {self.source_config['name']}")
        urls = self.get_urls()
        logging.info(f"获取到 {len(urls)} 个URL")
        
        for url in urls:
            try:
                if not self.is_url_crawled(url):
                    logging.info(f"正在处理: {url}")
                    data = self.parse_page(url)
                    if data:
                        self._save_with_retry(data)
                        self.mark_url_crawled(url)
                        logging.info(f"成功保存: {data['title']}")
                else:
                    logging.debug(f"URL已存在: {url}")
            except Exception as e:
                logging.error(f"处理URL失败 {url}: {str(e)}")
    
    def _save_with_retry(self, data, max_retries=3, retry_delay=2):
        """带有重试机制的数据保存方法"""
        retries = 0
        while retries < max_retries:
            try:
                self.storage.save(data)
                return True  # 保存成功
            except Exception as e:
                retries += 1
                error_msg = str(e)
                
                # 检查是否是MySQL连接丢失错误
                if "Lost connection" in error_msg or "MySQL server has gone away" in error_msg:
                    logging.warning(f"MySQL连接丢失，正在重试 ({retries}/{max_retries})...")
                    
                    # 尝试重新初始化存储连接
                    try:
                        self.storage = MySQLStorage(self.source_config['table_name'])
                    except Exception as conn_err:
                        logging.error(f"重新连接MySQL失败: {str(conn_err)}")
                    
                    # 等待一段时间再重试
                    time.sleep(retry_delay)
                else:
                    # 如果不是连接问题，直接抛出异常
                    logging.error(f"保存数据失败: {error_msg}")
                    raise e
                
        # 所有重试都失败
        logging.error(f"在 {max_retries} 次尝试后仍无法保存数据")
        raise Exception(f"保存数据失败，已重试 {max_retries} 次")
    
    def is_url_crawled(self, url):
        """检查URL是否已经被爬取过"""
        return self.redis_client.sismember(f"{self.source_config['name']}_urls", url)
    
    def mark_url_crawled(self, url):
        """标记URL为已爬取"""
        self.redis_client.sadd(f"{self.source_config['name']}_urls", url)
    
    def get_urls(self):
        """获取要爬取的URL列表，子类必须实现此方法"""
        raise NotImplementedError
        
    def parse_page(self, url):
        """解析页面内容，子类必须实现此方法"""
        raise NotImplementedError

    def resolve_url(self, base_url, href):
        """使用furl库通用解析URL"""
        try:
            from furl import furl
            
            # 移除前导的 './'
            href = href.lstrip('./')
            
            # 如果是完整的URL，直接返回
            if href.startswith(('http://', 'https://')):
                return href
            
            # 创建基础URL对象
            f_base = furl(base_url)
            
            # 如果是相对路径（以../开头）
            if href.startswith('../'):
                # 获取所有路径段（排除..）
                href_segments = [seg for seg in href.split('/') if seg and seg != '..']
                
                # 创建结果URL
                f_result = furl()
                f_result.scheme = f_base.scheme
                f_result.host = f_base.host
                f_result.path.segments = href_segments
                
                return f_result.url
                
            # 处理绝对路径
            if href.startswith('/'):
                return f"{f_base.origin}{href}"
            
            # 处理普通相对路径
            return f"{base_url.rstrip('/')}/{href.lstrip('/')}"
            
        except ImportError:
            logging.error("furl库未安装，请运行 'pip install furl' 安装")
            from urllib.parse import urljoin
            return urljoin(base_url, href)

    # ... 其余代码保持不变 ... 