# 修改说明：将主函数中的核心流程拆分为独立函数并添加参数说明
import os
import sys
import time
import requests
from datetime import datetime
import pandas as pd
from urllib.parse import urlparse, urlunparse
import fnmatch

def init_directories(work_dir, result_dir, log_dir):
    """初始化工作目录结构"""
    for dir_path in [work_dir, result_dir, log_dir]:
        if not os.path.exists(dir_path):
            os.makedirs(dir_path)

def load_state(state_file_path):
    """加载状态记录表"""
    if os.path.exists(state_file_path):
        return pd.read_excel(state_file_path)
    else:
        return pd.DataFrame(columns=[
            'url', 'depth', 'status', 'time', 'error', 
            'retry', 'reffrom', 'refnum'
        ])

def save_state(state_df, state_file_path):
    """保存状态记录表到Excel"""
    state_df.to_excel(state_file_path, index=False)

def normalize_url(url):
    """标准化URL（忽略协议差异）"""
    parsed = urlparse(url)
    return parsed.netloc + parsed.path + parsed.params + parsed.query + parsed.fragment

def get_unique_key(url):
    """生成唯一URL标识符（不区分协议）"""
    parsed = urlparse(url)
    return parsed.netloc + parsed.path

def process_url(url, depth, reffrom, result_dir):
    """处理单个URL并返回状态结果
    
    Args:
        url (str): 要处理的URL地址
        depth (int): 当前爬取深度
        reffrom (str): URL的来源页面
        
    Returns:
        tuple: (status, error_message)
    """
    try:
        # 协议处理与重试
        parsed = urlparse(url)
        if not parsed.scheme:
            test_urls = [urlunparse(('https',) + parsed[1:]), 
                        urlunparse(('http',) + parsed[1:])]
        else:
            test_urls = [url]
            
        for test_url in test_urls:
            try:
                response = requests.get(test_url, timeout=10)
                if response.status_code == 200:
                    # 保存文件
                    filename = f"{datetime.now().strftime('%Y%m%d%H%M%S')}-{url.split('/')[-1]}"
                    with open(os.path.join(result_dir, filename), 'wb') as f:
                        f.write(response.content)
                    return ('visited', None)
                elif response.status_code == 404:
                    return ('notfound', '404 Not Found')
            except requests.exceptions.RequestException as e:
                continue
                
        return ('error', 'Network error')
    
    except Exception as e:
        return ('error', str(e))

def process_pending_urls(state_df, strategy, max_depth, exclude_rules, include_rules, result_dir):
    """处理待处理的URL队列
    
    Args:
        state_df (DataFrame): 当前状态记录表
        strategy (str): 爬取策略（bfs/dfs）
        max_depth (int): 最大深度限制
        exclude_rules (list): 排除规则列表
        include_rules (list): 包含规则列表
        
    Returns:
        DataFrame: 更新后的状态记录表
    """
    pending_df = state_df[state_df['status'] == 'pending']
    if not pending_df.empty:
        if strategy == 'bfs':
            current = pending_df.iloc[0]
        elif strategy == 'dfs':
            current = pending_df.iloc[-1]
        else:
            raise ValueError("Invalid strategy")
            
        url = current['url']
        depth = current['depth']
        reffrom = current['reffrom']
        
        result = process_url(url, depth, reffrom, result_dir)
        
        update_row = state_df[state_df['url'] == url].index[0]
        state_df.at[update_row, 'status'] = result[0]
        state_df.at[update_row, 'time'] = datetime.now()
        if result[1]:
            state_df.at[update_row, 'error'] = result[1]
            
        save_state(state_df)
        
    return state_df

def process_retry_queue(state_df, retry_limit):
    """处理重试队列
    
    Args:
        state_df (DataFrame): 当前状态记录表
        retry_limit (int): 最大重试次数
        
    Returns:
        DataFrame: 更新后的状态记录表
    """
    error_df = state_df[
        (state_df['status'] == 'error') & 
        (state_df['retry'] < retry_limit)
    ]
    if not error_df.empty:
        error_df['status'] = 'pending'
        error_df['retry'] += 1
        time.sleep(5)
    return state_df

def main():
    work_dir = os.path.join(os.getcwd(), 'work')
    result_dir = os.path.join(work_dir, 'result')
    log_dir = os.path.join(work_dir, 'log')
    state_file = os.path.join(log_dir, 'status.xlsx')
    
    init_directories(work_dir, result_dir, log_dir)
    state_df = load_state(state_file)
    
    # 初始化参数（示例值，实际应从命令行/配置获取）
    start_url = 'https://example.com'
    max_depth = 2
    strategy = 'bfs'  # bfs/dfs
    exclude_rules = ['*.pdf', '*.jpg']
    include_rules = ['/*']
    retry_limit = 3
    
    # 初始化队列
    if state_df.empty:
        initial_entry = {
            'url': start_url,
            'depth': 0,
            'status': 'pending',
            'time': None,
            'error': None,
            'retry': 0,
            'reffrom': '',
            'refnum': 1
        }
        state_df = state_df.append(initial_entry, ignore_index=True)
    
    while True:
        # 处理待处理URL
        state_df = process_pending_urls(state_df, strategy, max_depth, exclude_rules, include_rules, result_dir)
        
        # 检查是否继续循环
        pending_exists = not state_df[state_df['status'] == 'pending'].empty
        if not pending_exists:
            # 处理重试队列
            state_df = process_retry_queue(state_df, retry_limit)
            # 检查是否还有可重试的错误
            error_remaining = not state_df[(state_df['status'] == 'error') & (state_df['retry'] < retry_limit)].empty
            if not error_remaining:
                break
        else:
            continue

if __name__ == '__main__':
    main()