import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin
import chardet
import time
import sys


def crawl_website(start_url, output_file="output.txt"):
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
    }

    try:
        # 获取初始页面
        response = requests.get(start_url, headers=headers, timeout=10)
        encoding = chardet.detect(response.content)['encoding']
        # 设置响应的编码
        print(f"encoding: {encoding}")
        response.encoding = encoding
        response.raise_for_status()
    except Exception as e:
        print(f"初始请求失败: {e}")
        return

    # 解析侧边栏链接
    soup = BeautifulSoup(response.text, 'html.parser')
    sidebar = soup.find(id='sidebar')

    if not sidebar:
        print("未找到侧边栏内容")
        return

    # 提取所有有效链接
    links = []
    for link in sidebar.find_all('a', href=True):
        full_url = urljoin(start_url, link['href'])
        if full_url not in links:  # 去重
            links.append(full_url)

    # 遍历所有链接抓取内容
    with open(output_file, 'w') as f:
        for idx, url in enumerate(links, 1):
            try:
                print(f"正在处理({idx}/{len(links)}): {url}")

                # 请求子页面
                sub_response = requests.get(url, headers=headers, timeout=10)
                encoding = chardet.detect(sub_response.content)['encoding']
                # 设置响应的编码
                sub_response.encoding = encoding
                sub_response.raise_for_status()
                sub_soup = BeautifulSoup(sub_response.text, 'html.parser')

                title_block = sub_soup.find(id='sidebar')
                if title_block:
                    h2 = title_block.find('h2').get_text(strip=True) if title_block.find('h2') else "无主题"
                    print(h2)
                    f.write(f"# {h2}\n")

                # 提取目标内容
                content_block = sub_soup.find(id='main')
                if content_block:
                    h1 = content_block.find('h1').get_text(strip=True) if content_block.find('h1') else "无标题"
                    print(h1)
                    content = content_block.find(id='content').get_text(strip=True,
                                                                            separator='\n') if content_block.find(
                        id='content') else "无内容"

                    # 写入文件
                    f.write(f"## {h1}")
                    f.write(f"\n{content}\n")
                    f.write("-" * 50 + "\n")

                time.sleep(1)  # 礼貌间隔

            except Exception as e:
                print(f"处理 {url} 时出错: {e}")
                continue


if __name__ == "__main__":
    #title='dihuang'
    title=sys.argv[1]
    # 使用示例（替换为实际URL）
    crawl_website(
        start_url="http://www.zysj.com.cn/zhongyaocai/" + title + "/index.html",
        output_file=title + ".md"
    )
    print("数据抓取完成！")