import re
import requests
from urllib.parse import urljoin, urlparse
from bs4 import BeautifulSoup

def extract_all_urls(url):
    """
    从指定网页中提取所有URL链接
    """
    try:
        # 发送HTTP请求获取网页内容
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
        response = requests.get(url, headers=headers, timeout=10)
        response.raise_for_status()
        
        # 使用BeautifulSoup解析HTML
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # 查找所有包含href属性的标签
        links = soup.find_all(href=True)
        
        # 提取并处理URL
        urls = set()
        base_url = f"{urlparse(url).scheme}://{urlparse(url).netloc}"
        
        for link in links:
            href = link['href'].strip()
            if href and not href.startswith(('javascript:', 'mailto:', 'tel:', '#')):
                # 将相对URL转换为绝对URL
                absolute_url = urljoin(base_url, href)
                # 验证URL格式
                parsed = urlparse(absolute_url)
                if parsed.scheme in ('http', 'https') and parsed.netloc:
                    urls.add(absolute_url)
        
        return sorted(list(urls))
        
    except requests.RequestException as e:
        print(f"请求错误: {e}")
        return []
    except Exception as e:
        print(f"解析错误: {e}")
        return []

def urls_to_markdown(urls, title="提取的URL列表"):
    """
    将URL列表转换为Markdown格式
    """
    markdown_content = f"# {title}\n\n"
    markdown_content += f"共找到 {len(urls)} 个URL链接\n\n"
    
    for i, url in enumerate(urls, 1):
        markdown_content += f"{i}. [{url}]({url})\n"
    
    return markdown_content

def main():
    target_url = "https://doc.crmeb.com/zsff/crmeb_zsff_v2/"
    print(f"正在从 {target_url} 提取URL链接...")
    
    # 提取所有URL
    urls = extract_all_urls(target_url)
    
    if urls:
        # 转换为Markdown格式
        markdown_content = urls_to_markdown(urls, "CRMEB ZSFF v2文档页面URL列表")
        
        # 输出到控制台
        print("\n" + "="*50)
        print(markdown_content)
        print("="*50)
        
        # 保存到文件
        with open("extracted_urls.md", "w", encoding="utf-8") as f:
            f.write(markdown_content)
        print("\n结果已保存到 extracted_urls.md 文件")
    else:
        print("未找到任何URL链接")

if __name__ == "__main__":
    main()