import requests
from bs4 import BeautifulSoup
import re
from urllib.parse import urljoin
import time
import os

BASE_URL = "https://tools.enfamily.cn/slang/"
HEADERS = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
}


def get_page_urls():
    """生成所有字母页面的URL"""
    pages = ['']  # A字母页面
    for char in range(ord('B'), ord('Z') + 1):
        pages.append(f"{chr(char)}.html")
    pages.append("OTHER.html")
    return [urljoin(BASE_URL, page) for page in pages]


def parse_entry(entry):
    """解析单个词条"""
    try:
        # 提取词和词义
        term_part = entry.find('font', size="3").find('b').text.strip()
        term, definition = re.split(r":\s*", term_part, 1)

        # 提取例句
        example_tag = entry.find('b', string=re.compile(r'\bExample:\b'))
        examples = []
        if example_tag:
            current = example_tag.next_sibling
            while current and current.name != 'hr':
                if isinstance(current, str) and current.strip():
                    examples.append(current.strip())
                elif current.name == 'br':
                    next_sibling = current.next_sibling
                    if next_sibling and isinstance(next_sibling, str):
                        examples.append(next_sibling.strip())
                current = current.next_sibling

        return {
            'term': term.strip(),
            'definition': definition.strip(),
            'examples': [ex for ex in ' / '.join(examples).split(' / ') if ex]
        }
    except Exception as e:
        print(f"解析词条失败: {str(e)}")
        return None


def process_page(url, session):
    """处理单个字母页面"""
    try:
        response = session.get(url, timeout=10)
        response.encoding = 'utf-8'
        if response.status_code != 200:
            print(f"无法获取页面: {url}")
            return []

        soup = BeautifulSoup(response.text, 'html.parser')
        entries = []

        # 查找所有词条区域
        current_entry = []
        for element in soup.find_all(['font', 'hr']):
            if element.name == 'font' and element.get('size') == "3":
                if current_entry:
                    parsed = parse_entry(BeautifulSoup(''.join(str(e) for e in current_entry), 'html.parser'))
                    if parsed:
                        entries.append(parsed)
                    current_entry = []
                current_entry.append(element)
            elif element.name == 'hr' and current_entry:
                parsed = parse_entry(BeautifulSoup(''.join(str(e) for e in current_entry), 'html.parser'))
                if parsed:
                    entries.append(parsed)
                current_entry = []
            else:
                current_entry.append(element)

        return entries
    except Exception as e:
        print(f"处理页面失败: {url} - {str(e)}")
        return []


def save_to_markdown(data, filename):
    """保存为Markdown格式"""
    with open(filename, 'w', encoding='utf-8') as f:
        # 添加字母标题
        letter = os.path.splitext(os.path.basename(filename))[0].upper()
        if letter == "INDEX":
            letter = "A"
        f.write(f"# {letter}\n\n")

        # 写入词条
        for entry in data:
            f.write(f"## {entry['term']}\n")
            f.write(f"**词义**: {entry['definition']}  \n")
            if entry['examples']:
                examples = " / ".join(entry['examples'])
                f.write(f"**例句**: {examples}\n")
            f.write("\n")


def main():
    # 创建输出目录
    os.makedirs('slang_dictionary', exist_ok=True)

    # 创建会话
    with requests.Session() as session:
        session.headers.update(HEADERS)

        # 获取所有页面URL
        urls = get_page_urls()

        for url in urls:
            print(f"正在抓取: {url}")
            entries = process_page(url, session)

            if entries:
                # 生成文件名
                filename = os.path.join('slang_dictionary',
                                        os.path.basename(url).replace('.html', '.md'))
                save_to_markdown(entries, filename)
                print(f"已保存: {filename} ({len(entries)} 个词条)")

            # 礼貌等待
            time.sleep(1)


if __name__ == "__main__":
    main()