import requests
from bs4 import BeautifulSoup
import json
import time
from urllib.parse import urljoin
import warnings

warnings.filterwarnings("ignore")

# 配置信息
BASE_URL = "http://m.m.hanyupinyin.cn"  # 基础域名（补全相对路径用）
ENTRY_URL = "http://m.m.hanyupinyin.cn/biao/zhengtirenduyinjie/yun.html"  # 入口页面（含所有拼音链接）
HEADERS = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36",
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
    "Referer": BASE_URL
}
DELAY = 1  # 爬取间隔（秒），避免反爬
RESULT = []  # 存储最终数据


def get_all_pinyin_urls(entry_url):
    """第一步：从入口页面提取所有拼音详情页的URL"""
    pinyin_urls = []
    try:
        response = requests.get(entry_url, headers=HEADERS, verify=False, timeout=10)
        response.encoding = "utf-8"
        soup = BeautifulSoup(response.text, "html.parser")

        # 提取所有拼音按钮的链接（声母/韵母/整体认读音节均使用该class）
        pinyin_buttons = soup.select("a.button.hanyu06.pinyin06.large")
        for btn in pinyin_buttons:
            relative_url = btn.get("href")
            if relative_url:
                full_url = urljoin(BASE_URL, relative_url)  # 补全相对路径为完整URL
                pinyin_name = btn.text.strip()  # 拼音名称（如b、a、yun）
                pinyin_urls.append({"name": pinyin_name, "url": full_url})

        print(f"成功提取到 {len(pinyin_urls)} 个拼音的详情页URL")
        return pinyin_urls
    except Exception as e:
        print(f"提取拼音URL失败：{str(e)}")
        return []


def crawl_pinyin_detail(pinyin_info):
    """第二步：爬取单个拼音详情页的目标数据"""
    pinyin_name = pinyin_info["name"]
    detail_url = pinyin_info["url"]
    data = {
        "pinyin": pinyin_name,
        "page_url": detail_url,
        "audio_url": None,
        "image_url": None,
        "pronunciation_method": "未获取到"  # 发音方法文本（默认未获取）
    }

    try:
        # 爬取详情页
        time.sleep(DELAY)  # 延时反爬
        response = requests.get(detail_url, headers=HEADERS, verify=False, timeout=10)
        response.encoding = "utf-8"
        soup = BeautifulSoup(response.text, "html.parser")

        # 1. 提取音频地址（mp3属性 + 基础音频路径）
        audio_btn = soup.select_one("a.button.hanyusm.pinyinsm.large")
        if audio_btn and audio_btn.get("mp3"):
            mp3_filename = audio_btn.get("mp3")
            data["audio_url"] = urljoin(BASE_URL + "/mp3/", mp3_filename)  # 完整音频URL

        # 2. 提取拼音写法图片地址
        image_tag = soup.select_one("img[alt*='拼音'][alt*='写法']")
        if image_tag and image_tag.get("src"):
            data["image_url"] = urljoin(BASE_URL, image_tag.get("src"))  # 完整图片URL

        # 3. 提取发音方法文本（尝试匹配含"发音"关键词的段落，可根据实际页面调整）
        pronunciation_paragraphs = soup.find_all("p", string=lambda text: text and "发音" in text)
        if pronunciation_paragraphs:
            # 取第一个含"发音"的段落作为发音方法
            data["pronunciation_method"] = pronunciation_paragraphs[0].text.strip()

        print(f"成功爬取：{pinyin_name}（音频：{bool(data['audio_url'])}, 图片：{bool(data['image_url'])}）")
    except Exception as e:
        print(f"爬取 {pinyin_name} 失败：{str(e)}")

    return data


def save_to_json(data_list, filename="pinyin_data.json"):
    """第三步：将数据保存为JSON文件"""
    try:
        with open(filename, "w", encoding="utf-8") as f:
            json.dump(data_list, f, ensure_ascii=False, indent=2)
        print(f"\n数据已保存到 {filename}，共 {len(data_list)} 条拼音数据")
    except Exception as e:
        print(f"保存JSON失败：{str(e)}")


# 主执行流程
if __name__ == "__main__":
    print("开始爬取拼音数据...")
    # 1. 获取所有拼音详情页URL
    pinyin_url_list = get_all_pinyin_urls(ENTRY_URL)
    if not pinyin_url_list:
        print("无拼音URL可爬，程序退出")
        exit()

    # 2. 逐个爬取拼音详情
    for pinyin_info in pinyin_url_list:
        detail_data = crawl_pinyin_detail(pinyin_info)
        RESULT.append(detail_data)

    # 3. 保存为JSON
    save_to_json(RESULT)