# 主程序入口
# 负责启动和协调整个爬虫系统
import asyncio
import json
import logging
import os
import re
import random
import string
import time
from datetime import datetime
from sre_parse import SUCCESS
from sys import exception
from typing import List, Dict, Any
from utils.web_crawler import AsyncWebCrawler
from utils.markdown_converter import MarkdownConverter, create_markdown_payload
from utils.markdown_saver import save_markdown_file
from utils.rule_manager import RuleManager
from utils.web_store import WebStore
from utils.web_api import ChromeWebAPI
from config import config

# 配置日志
logging.basicConfig(level=config.LOG_LEVEL)
logger = logging.getLogger(__name__)

# 请求头配置
REQUEST_HEADERS = {
    'accept': 'application/json, text/javascript, */*; q=0.01',
    'accept-encoding': 'gzip, deflate, br, zstd',
    'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
    'connection': 'keep-alive',
    'content-type': 'application/json;charset=utf-8',
    'host': 'listing.szse.cn',
    'referer': 'https://listing.szse.cn/disclosure/ipo/index.html',
    'sec-ch-ua': '"Not;A=Brand";v="99", "Microsoft Edge";v="139", "Chromium";v="139"',
    'sec-ch-ua-mobile': '?0',
    'sec-ch-ua-platform': '"macOS"',
    'sec-fetch-dest': 'empty',
    'sec-fetch-mode': 'cors',
    'sec-fetch-site': 'same-origin',
    'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/139.0.0.0 Safari/537.36 Edg/139.0.0.0',
    'x-requested-with': 'XMLHttpRequest'
}

async def main() -> None:
    """默认运行模式：使用规则文件中定义的所有URL进行抓取"""
    await crawl_with_rules()


async def list_rules() -> None:
    """列出所有可用的抓取规则"""
    rule_manager = RuleManager()
    rules = rule_manager.list_rules()
    
    if not rules:
        print("没有找到任何抓取规则")
        return
    
    print(f"找到 {len(rules)} 个抓取规则:")
    print("-" * 60)
    for rule in rules:
        print(f"文件名: {rule['filename']}")
        print(f"规则名: {rule['name']}")
        print(f"URL模式: {rule['url_pattern']}")
        print(f"分类: {rule['category']}")
        print("-" * 60)


async def crawl_with_rules() -> None:
    """使用规则文件中定义的所有URL进行抓取"""
    md_converter = MarkdownConverter()
    rule_manager = RuleManager()
    
    rules = rule_manager.get_all_rules()
    if not rules:
        logger.warning("没有找到任何抓取规则")
        return
    
    logger.info(f"找到 {len(rules)} 个抓取规则，开始批量抓取")
    
    async with AsyncWebCrawler(headless=True) as crawler, ChromeWebAPI() as api:
        for rule in rules:
            rule_type = rule.get('type', 'html').lower()
            
            if rule_type == 'api':
                if 'query.sse.com.cn' in rule.get('url_pattern', ''):
                    await process_shjs_api_rule(api, rule)
                elif 'szse.cn' in rule.get('url_pattern', ''):
                    await process_api_rule(api, rule)
            else:
                await process_single_rule(crawler, md_converter, rule)
            
            await asyncio.sleep(2)  # 避免请求过于频繁

def convert_szse_api_data(api_result: Dict[str, Any], url: str) -> Dict[str, Any]:
    """
    将深圳证券交易所API响应数据转换为指定格式
    
    输入格式: API响应的JSON数据，包含data字段
    输出格式: 包含table_data的结构化数据，适用于save_to_database_szse方法
    """
    table_data = []
    
    # 获取API响应中的data数组
    data_list = api_result.get('data', [])
    
    for item in data_list:
        # 提取主要信息
        company_name = item.get('cmpnm', '')  # 公司名称
        update_date = item.get('ddt', '')  # 更新日期
        project_id = item.get('prjid', '')  # 项目ID
        # sequence_num = f"SZE_{project_id}" if project_id else ''  # 序号
        sequence_num = item.get('cmpcode', '') if item.get('cmpcode', '') else f"SZE_{project_id}" # 股票代码
        
        # 处理文档列表
        document_list = []
        sub_disclosure_list = item.get('subInfoDisclosureList', [])
        
        for doc in sub_disclosure_list:
            doc_name = doc.get('dfnm', '') or doc.get('configFileName', '')  # 文档名称
            doc_extension = doc.get('dfext', '')  # 文件扩展名
            doc_path = doc.get('dfpth', '')  # 文件路径
            
            # 拼接完整链接 (需要API请求域名)
            if doc_path:
                full_link = f"{url}{doc_path}"
            else:
                full_link = ''
            
            if doc_name and full_link:
                document_list.append({
                    'text': f"{company_name}:{doc_name}",
                    'is_pdf': doc_extension == 'pdf',
                    'href': full_link
                })
        
        # 构建行数据
        if company_name and document_list:  # 只有当公司名称和文档列表都存在时才添加
            row_data = {
                'stock_code': sequence_num,
                'company_name': company_name,
                'announcement_date': update_date,
                'links': document_list
            }
            table_data.append(row_data)
    
    return {
        'table_data': table_data
    }


def convert_shse_api_data(api_result: Dict[str, Any], url: str) -> Dict[str, Any]:
    """
    将上海证券交易所API响应数据转换为指定格式
    
    输入格式: API响应的JSON数据，包含data数组
    输出格式: 包含table_data的结构化数据，适用于save_to_database_shse方法
    """
    table_data = []
    
    logger.info(f"上海证券交易所API响应数据: {api_result}")
    data_list = api_result.get('data', [])
    for item in data_list:
        # 处理时间逻辑
        update_date = item.get('fileUpdTime', '')
        if not update_date:
            update_date = datetime.now().strftime('%Y%m%d%H%M%S')
        # 处理日期格式转换
        if update_date and len(update_date) == 14:  # 格式: 20250901170005
            try:
                # 提取年月日部分
                year = update_date[:4]
                month = update_date[4:6]
                day = update_date[6:8]
                update_date = f"{year}-{month}-{day}"
            except (ValueError, IndexError):
                logger.warning(f"日期格式转换失败: {update_date}")
                update_date = update_date  # 保持原值
        # 获取公司名字
        company_name = item.get('companyName', 'unknown')
        project_id = item.get('auditId', '')

        sequence_num = item.get('companyCode', '') if item.get('companyCode', '') else f"SHE_{project_id}"

        document_list = []
        text_name = item.get('fileTitle', '')
        doc_path = item.get('filePath', '')
        if doc_path:
            full_link = f"{url}{doc_path}"
        else:
            full_link = ''
        if text_name and full_link:
            document_list.append({
                'text': f"{company_name}:{text_name}",
                'is_pdf': doc_path.lower().endswith('.pdf'),
                'href': full_link
            })

        if company_name and document_list:
            row_data = {
                'stock_code': sequence_num,
                'company_name': company_name,
                'announcement_date': update_date,
                'links': document_list
            }
            table_data.append(row_data)
    
    return {
        'table_data': table_data
    }

async def process_api_rule(api: ChromeWebAPI, rule: Dict[str, Any]) -> None:
    """处理API类型的规则"""
    rule_name = rule.get('name', 'unnamed')
    url = rule.get('url_pattern', '')
    params_config = rule.get('params', {})
    
    if not url:
        logger.warning(f"API规则 '{rule_name}' 缺少URL")
        return
        
    try:
        logger.info(f"使用API规则 '{rule_name}' 请求: {url}")
        
        # 获取请求类型和参数
        request_type = params_config.get('request_type', 'get').lower()
        total_size = int(params_config.get('total_size', 0))
        request_data = params_config.get('data', {})
        
        # 生成随机数如果需要
        if 'random' in request_data and request_data['random'] == '':
            request_data['random'] = str(random.random())
            logger.info(f"生成随机数: {request_data['random']}")
        
        logger.info(f"请求类型: {request_type}")
        logger.info(f"请求参数: {request_data}")
        logger.info(f"数据库总数据量: {total_size}")
        
        # 获取分页参数，确保为整数类型
        page_index = int(request_data.get('pageIndex', 0))
        page_size = int(request_data.get('pageSize', 10))
        
        # 计算总页数
        if total_size > 0:
            total_pages = (total_size + page_size - 1) // page_size  # 向上取整
        else:
            total_pages = 1  # 如果没有total_size，至少请求一页
            
        logger.info(f"分页信息: 每页{page_size}条, 总共{total_pages}页")
        
        all_saved_count = 0
        
        # 循环请求所有页面
        for current_page in range(0, total_pages):
            try:
                # 更新当前页码
                current_request_data = request_data.copy()
                current_request_data['pageIndex'] = current_page
                
                logger.info(f"正在请求第{current_page}/{total_pages}页...")
                
                # 发起请求
                if request_type == 'post':
                    response = await api.post(
                        url=url,
                        json_data=current_request_data,
                        delay_range=(1.0, 3.0)
                    )
                else:
                    response = await api.get(
                        url=url,
                        headers=REQUEST_HEADERS,
                        params=current_request_data,
                        delay_range=(1.0, 3.0)
                    )
                
                # 处理响应
                logger.info(f"第{current_page}页响应状态: {response['status_code']}")
                logger.info(f"第{current_page}页响应时间: {response['response_time']:.2f}秒")
                logger.info(f"第{current_page}页响应成功: {response['success']}")
                
                if response['success']:
                    result = response['json']
                    data_count = len(result.get('data', []))
                    logger.info(f"第{current_page}页返回{data_count}条数据")
                    
                    # 如果当前页没有数据，跳出循环
                    if data_count == 0:
                        logger.info(f"第{current_page}页无数据，停止请求")
                        break
                    
                    # 处理深圳证券交易所API响应数据
                    if 'szse.cn' in url or 'listing.szse.cn' in url:
                        structured_data = convert_szse_api_data(result, url='https://reportdocs.static.szse.cn/')
                        page_saved_count = await save_to_database_api(structured_data, rule_name, url)
                        if page_saved_count:
                            all_saved_count += page_saved_count
                            logger.info(f"第{current_page}页保存{page_saved_count}条记录")
                    
                    # 页面间延迟，避免请求过于频繁
                    if current_page < total_pages:
                        await asyncio.sleep(1)
                        
                else:
                    logger.error(f"第{current_page}页请求失败: {response.get('error', '未知错误')}")
                    # 请求失败时可以选择继续或停止
                    continue
                    
            except Exception as page_error:
                logger.error(f"处理第{current_page}页时出错: {page_error}")
                continue
        
        logger.info(f"所有页面请求完成，总共保存{all_saved_count}条记录")
            
    except Exception as e:
        logger.error(f"处理API规则 '{rule_name}' 时出错: {e}")

async def process_shjs_api_rule(api: ChromeWebAPI, rule: Dict[str, Any]) -> None:
    SHHS_HEADERS = {
        'accept': '*/*',
        'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
        'connection': 'keep-alive',
        'host': 'query.sse.com.cn',
        'referer': 'https://www.sse.com.cn/',
        'sec-ch-ua': '"Not;A=Brand";v="99", "Microsoft Edge";v="139", "Chromium";v="139"',
        'sec-ch-ua-mobile': '?0',
        'sec-ch-ua-platform': '"macOS"',
        'sec-fetch-dest': 'script',
        'sec-fetch-mode': 'no-cors',
        'sec-fetch-site': 'same-site',
        'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/139.0.0.0 Safari/537.36 Edg/139.0.0.0',
        'Cookie': 'ba17301551dcbaf9_gdp_session_id=b8692094-52e5-4412-83ec-7ec5d9fa5d1e; gdp_user_id=gioenc-6e69ad88%2C4dab%2C5d07%2Cc4ad%2Cc19g615510a9; ba17301551dcbaf9_gdp_session_id_sent=b8692094-52e5-4412-83ec-7ec5d9fa5d1e; ba17301551dcbaf9_gdp_sequence_ids={%22globalKey%22:30%2C%22VISIT%22:2%2C%22PAGE%22:7%2C%22VIEW_CLICK%22:23}'
    }
    """处理上海证券交易所API类型的规则"""
    rule_name = rule.get('name', 'unnamed')
    url = rule.get('url_pattern', '')
    params_config = rule.get('params', {})
    
    if not url:
        logger.warning(f"API规则 '{rule_name}' 缺少URL")
        return
    
    try:
        logger.info(f"使用API规则 '{rule_name}' 请求: {url}")
        
        # 获取请求类型和参数
        request_type = params_config.get('request_type', 'get').lower()
        total_size = int(params_config.get('total_size', 0))
        request_data = params_config.get('data', {})
        
        logger.info(f"请求类型: {request_type}")
        logger.info(f"请求参数: {request_data}")
        logger.info(f"数据库总数据量: {total_size}")
        
        # 获取分页参数，确保为整数类型
        # isPagination=true
        # sqlId=GP_COMMON_FILE_SEARCH
        # fileTitle=
        # pageHelp.pageSize=25
        # fileTypeMap=  这个是请求ipo信息的：I0011,I0012,I0013,I3010。
        # fileTypeMap=  这个是请求再融资信息的：S0011,S3010,S3020。
        # fileTypeMap=  这个是并购重组信息的：M0011,M3010,M3020
        # marketType=1,2
        # searchDateBegin=
        # searchDateEnd=
        # ----------动态生成------
        # jsonCallBack=jsonpCallback<随机8位数字> # 时间戳+随机数，防缓存
        # _=175697375159X # 13 位毫秒时间戳
        # pageHelp.pageNo=N # 翻页页码
        # pageHelp.beginPage=N # 与 pageNo 保持一致
        # pageHelp.endPage=N # 与 pageNo 保持一致
        # pageHelp.cacheSize=1 # 目前固定 1，但官方可能随时改
        page_index = int(request_data.get('pageHelp.pageNo', 1))
        page_size = int(request_data.get('pageHelp.pageSize', 25))
        
        # 计算总页数
        if total_size > 0:
            total_pages = (total_size + page_size - 1) // page_size  # 向上取整
        else:
            total_pages = 1  # 如果没有total_size，至少请求一页

        logger.info(f"分页信息: 每页{page_size}条, 总共{total_pages}页")

        all_saved_count = 0

        JSONP_RE = re.compile(r'jsonpCallback\d+\((.*)\)$')   # 把 jsonp 壳剥掉

        #循环请求页面
        for current_page in range(page_index, total_pages+1):
            try:
                #更新当前页码
                current_request_data = request_data.copy()
                current_request_data['pageHelp.pageNo'] = current_page
                current_request_data['pageHelp.beginPage'] = current_request_data['pageHelp.pageNo']
                current_request_data['pageHelp.endPage'] = current_request_data['pageHelp.pageNo']
                # 更新请求时间戳
                timestamp_ms = int(time.time() * 1000)  # 13位毫秒时间戳
                current_request_data['_'] = str(timestamp_ms)
                current_request_data['jsonCallBack'] = f"jsonpCallback{random.randint(10000000, 99999999)}"

                logger.info(f"正在请求第{current_page}/{total_pages}页...")

                #发起请求
                if request_type == 'get':
                    response = await api.get(
                        url=url,
                        headers=SHHS_HEADERS,
                        params=current_request_data,
                        delay_range=(1.0, 3.0)
                    )
                #响应处理
                logger.info(f"第{current_page}页响应状态: {response['status_code']}")
                logger.info(f"第{current_page}页响应时间: {response['response_time']:.2f}秒")
                logger.info(f"第{current_page}页响应成功: {response['success']}")

                if response['success']:
                    logger.info(f"第{current_page}页响应数据: {response}")
                    match = JSONP_RE.search(response['text'])
                    if not match:
                        logger.warning(f"第{current_page}页响应数据不是jsonp格式: {response['text']}")
                        continue
                    data = json.loads(match.group(1))['pageHelp']
                    data_count = len(data.get('data', []))
                    logger.info(f"第{current_page}页返回{data_count}条数据")
                    
                    if data_count == 0:
                        logger.info(f"第{current_page}页无数据，停止请求")
                        break
                    
                    structured_data = convert_shse_api_data(data, url='https://static.sse.com.cn/stock')
                    page_saved_count = await save_to_database_api(structured_data, rule_name, url)

                    if page_saved_count:
                        all_saved_count += page_saved_count
                        logger.info(f"第{current_page}页保存{page_saved_count}条记录")
                    
                    if current_page < total_pages:
                        await asyncio.sleep(1)

                else:
                    logger.error(f"第{current_page}页请求失败: {response.get('error', '未知错误')}")
                    # 请求失败时可以选择继续或停止
                    continue


            except Exception as e:
                logger.error(f"处理第{current_page}页时出错: {e}")
                log_error_to_file(f"处理第{current_page}页时出错: {e}")
                continue

    except Exception as e:
        logger.error(f"处理上海证券交易所API规则 '{rule_name}' 时出错: {e}")

def log_error_to_file(error_message: str) -> None:
    """保存错误信息到本地错误日志"""
    error_log_path = "./logs/error.log"
    os.makedirs(os.path.dirname(error_log_path), exist_ok=True)
    with open(error_log_path, "a", encoding="utf-8") as f:
        f.write(f"[{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}] {error_message}\n")

async def process_single_rule(crawler, md_converter: MarkdownConverter, rule: Dict[str, Any]) -> None:
    """处理单个规则的抓取任务"""
    url = rule.get('url_pattern', '').rstrip('*')  # 移除通配符
    if not url.startswith('http'):
        logger.warning(f"跳过无效URL: {url}")
        return
    
    rule_name = rule.get('name', 'unnamed')
    
    try:
        logger.info(f"使用规则 '{rule_name}' 处理: {url}")
        
        title, html_content, metadata, structured_data = await crawler.fetch_page_with_rule(url, rule)
        
        if not html_content:
            logger.warning(f"抓取失败: {url}")
            return

        logger.info(f"抓取成功: {structured_data}")
        
        # 转换为Markdown
        # markdown_content = md_converter.convert_with_cleanup(html_content, title)
        # logger.info(f"Markdown转换完成，长度: {len(markdown_content)} 字符")
        
        # 保存到本地文件
        # filename = f"{rule_name.replace(' ', '_').lower()}.md"
        # await save_to_file(markdown_content, filename, title, url, metadata, structured_data, rule_name)
        
        # 根据网站选择不同的数据库保存函数
        # 北京证券交易所等其他网站
        await save_to_database(structured_data, rule_name, url)
        
        logger.info(f"规则 '{rule_name}' 处理完成")
        
    except Exception as e:
        logger.error(f"处理规则 '{rule_name}' 时出错: {e}")


async def save_to_file(markdown_content: str, filename: str, title: str, url: str, 
                      metadata: Dict[str, Any], structured_data: Dict[str, Any], rule_name: str) -> None:
    """保存到本地文件"""
    try:
        save_markdown_file(
            content=markdown_content,
            output_path="./output",
            filename=filename,
            title=title,
            url=url,
            metadata={
                **(metadata or {}),
                'structured_data': structured_data,
                'rule_name': rule_name
            }
        )
        logger.info(f"本地文件保存成功: {filename}")
    except Exception as e:
        logger.error(f"保存本地文件失败: {e}")


async def save_to_database_api(structured_data: Dict[str, Any], rule_name: str, source_url: str) -> None:
    """保存深圳证券交易所结构化数据到数据库"""
    if not structured_data or 'table_data' not in structured_data:
        logger.info("没有表格数据需要保存到数据库")
        return
    
    if 'szse.cn' in source_url:
        exchange_name = '深圳证券交易所'
    elif 'sse.com.cn' in source_url:
        exchange_name = '上海证券交易所'
    elif 'bse.cn' in source_url:
        exchange_name = '北京证券交易所'
    else:
        exchange_name = '其他证券交易所'

    try:
        with WebStore() as store:
            table_data = structured_data['table_data']
            saved_count = 0
            
            logger.info(f"开始保存API数据，共{len(table_data)}条记录")
            logger.info(f"保存数据: {source_url}")
            
            for row in table_data:
                if isinstance(row, dict):
                    # 提取基本信息
                    stock_code = row.get('stock_code', '')
                    company_name = row.get('company_name', '')
                    announcement_date = row.get('announcement_date', '')
                    links = row.get('links', [])
                    
                    # 验证必要字段
                    if not stock_code or not company_name or not links:
                        logger.warning(f"跳过不完整的记录: stock_code={stock_code}, company_name={company_name}, links_count={len(links)}")
                        continue
                    
                    # 如果日期为空，使用当前日期
                    if not announcement_date:
                        announcement_date = datetime.now().strftime('%Y-%m-%d')
                        logger.info(f"使用当前日期替代空日期: {announcement_date}")
                    
                    # 处理每个文档，为每个文档创建一个公告记录
                    for link in links:
                        if isinstance(link, dict):
                            doc_title = link.get('text', '')
                            doc_href = link.get('href', '')
                            is_pdf = link.get('is_pdf', False)
                            
                            if doc_title and doc_href:
                                # 构建链接列表格式（与原有函数保持一致）
                                announcement_links = [{
                                    'text': doc_title,
                                    'href': doc_href,
                                    'is_pdf': is_pdf,
                                    'original_href': '',
                                }]
                                
                                # 插入公告和链接，使用文档标题作为公告标题
                                announcement_id = store.insert_announcement_with_links(
                                    stock_code=stock_code,
                                    company_name=company_name,
                                    announcement_title=doc_title,
                                    announcement_date=announcement_date,
                                    exchange=exchange_name,
                                    links=announcement_links
                                )
                                
                                if announcement_id:
                                    saved_count += 1
                                    logger.info(f"成功保存公告: {stock_code} - {company_name} - {doc_title}")
                                else:
                                    logger.warning(f"保存公告失败: {stock_code} - {company_name} - {doc_title}")
                            else:
                                logger.warning(f"跳过无效文档链接: title={doc_title}, href={doc_href}")
            
            logger.info(f"API数据库保存完成: {saved_count} 条记录")
            return saved_count
            
    except Exception as e:
        logger.error(f"保存数据库失败: {e}")
        import traceback
        logger.error(traceback.format_exc())
        return 0


async def save_to_database(structured_data: Dict[str, Any], rule_name: str, source_url: str) -> None:
    """保存结构化数据到数据库"""
    if not structured_data or 'table_data' not in structured_data:
        logger.info("没有表格数据需要保存到数据库")
        return
    
    try:
        with WebStore() as store:
            table_data = structured_data['table_data']
            saved_count = 0
            
            for row in table_data:
                if isinstance(row, dict) and 'cells' in row and 'links' in row:
                    cells = row['cells']
                    links = row['links']
                    
                    # 提取公告信息（表格结构：股票代码、公司名称、公告标题、空列、公告日期）
                    if len(cells) >= 5:
                        stock_code = cells[0].strip() if cells[0] else ''
                        company_name = cells[1].strip() if cells[1] else ''
                        announcement_title = cells[2].strip() if cells[2] else ''
                        # cells[3] 是空列，跳过
                        announcement_date = cells[4].strip() if cells[4] else None
                        
                        # 验证必要字段
                        if not stock_code or not company_name or not announcement_title:
                            logger.warning(f"跳过不完整的记录: {cells}")
                            continue
                            
                        # 如果日期为空或格式不正确，使用当前日期
                        if not announcement_date or announcement_date == '':
                            announcement_date = datetime.now().strftime('%Y-%m-%d')
                            logger.info(f"使用当前日期替代空日期: {announcement_date}")
                        
                        valid_links = links
                        
                        if valid_links:
                            # 插入公告和链接
                            announcement_id = store.insert_announcement_with_links(
                                stock_code=stock_code,
                                company_name=company_name,
                                announcement_title=announcement_title,
                                announcement_date=announcement_date,
                                links=valid_links
                            )
                            
                            if announcement_id:
                                saved_count += 1
                                logger.info(f"成功保存公告: {stock_code} - {company_name}")
                        else:
                            logger.warning(f"跳过无有效链接的记录: {stock_code} - {company_name}")
            
            logger.info(f"数据库保存完成: {saved_count} 条记录")
            
    except Exception as e:
        logger.error(f"保存数据库失败: {e}")
        import traceback
        logger.error(traceback.format_exc())


if __name__ == "__main__":
    import sys
    
    if len(sys.argv) > 1:
        command = sys.argv[1]
        
        if command == "list":
            # 列出所有规则
            asyncio.run(list_rules())
        elif command == "help":
            print("使用方法:")
            print("  python main.py          # 使用规则文件中定义的所有URL进行抓取")
            print("  python main.py list     # 列出所有可用的抓取规则")
            print("  python main.py help     # 显示此帮助信息")
        else:
            print(f"未知命令: {command}")
            print("使用 'python main.py help' 查看帮助")
    else:
        # 默认运行模式：直接按规则抓取
        asyncio.run(main())


