from turtle import title

import requests
from IPython.testing.tools import full_path
from bs4 import BeautifulSoup
import os
import time
from urllib.parse import urljoin
import hashlib

from isort import sections
from jupyterlab_widgets import data

from PythonData import get_page_content, save_data, parse_sub_page, generate_filename, parse_main_page

# 配置参数
URL_SOURCE = 'target_urls.txt'  # 存放URL的文件
OUTPUT_DIR = 'crawled_data'
HEADERS = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
}


def load_target_urls():
    """从文件加载目标URL"""
    urls = []

    # 如果文件存在则读取
    if os.path.exists(URL_SOURCE):
        with open(URL_SOURCE, 'r', encoding='utf-8') as f:
            for line in f:
                # 清理行内容并跳过空行
                cleaned = line.strip()
                if cleaned and not cleaned.startswith('#'):  # 支持注释行
                    urls.append(cleaned)

    # 如果文件为空则使用默认URL
    if not urls:
        urls = ['https://www.runoob.com/java/java-modifier-types.html']

    return urls


def init_directory(base_path):
    """安全创建多级目录"""
    try:
        os.makedirs(base_path, exist_ok=True)  # 关键修复点
    except Exception as e:
        print(f"创建目录失败: {base_path}")
        print(f"错误详情: {str(e)}")
        raise

def get_url_hash(url):
    """生成URL的哈希值用于命名"""
    return hashlib.md5(url.encode()).hexdigest()[:8]


def process_single_site(base_url):
    """处理单个站点"""
    site_hash = get_url_hash(base_url)
    site_dir = os.path.join(OUTPUT_DIR, "sites", site_hash)  # 增加二级目录

    # 确保目录存在
    init_directory(site_dir)

    # 主页面处理
    main_html = get_page_content(base_url)
    if not main_html:
        return

    main_data = parse_main_page(main_html, base_url)
    index_path = os.path.join(site_dir, "index.txt")
    save_data(main_data, index_path)  # 传递完整路径

    # 子页面处理
    for idx, sub_url in enumerate(main_data['sub_links'], 1):
        time.sleep(1)
        sub_html = get_page_content(sub_url)
        if not sub_html:
            continue
        sub_data = parse_sub_page(sub_html)
        if sub_data:
            # 确保正确调用哈希函数
            url_hash = get_url_hash(sub_url)  # 正确获取哈希值
            filename = f"sub_{idx:02d}_{get_url_hash(sub_url)}.txt" # 使用变量
            full_path = os.path.join(site_dir, filename)
            save_data(sub_data, full_path)  # 传递字符串路径
# 其余函数保持原有实现，需要修改parse_main_page：
# def parse_main_page(html, base_url):
#     """解析主页面内容（增加base_url参数）"""
#     soup = BeautifulSoup(html, 'lxml')
#     main_content = soup.find('div', class_='article-body')
#
#     # ... 原有解析逻辑 ...
#
#     # 修改链接处理部分
#     sub_links = []
#     for link in main_content.find_all('a', href=True):
#         full_url = urljoin(base_url, link['href'])  # 使用传入的base_url
#         if full_url != base_url:
#             sub_links.append(full_url)
#
#     return {
#         'title': title,
#         'sections': sections,
#         'sub_links': list(set(sub_links))
#     }

# def save_data(data, full_path):
#     if not isinstance(full_path, (str, bytes, os.PathLike)):
#         raise TypeError(f"无效路径类型: {type(full_path)}")
#     """安全保存数据"""
#     try:
#         with open(full_path, 'w', encoding='utf-8') as f:
#             # 原保存逻辑保持不变
#             if isinstance(data, dict):
#                 f.write(f"标题: {data.get('title', '')}\n\n")
#                 # ... 其他保存逻辑 ...
#         print(f"成功保存文件: {full_path}")
#     except IOError as e:
#         print(f"文件保存失败: {full_path}")
#         print(f"错误详情: {str(e)}")

def main():
    #init_directory()

    target_urls = load_target_urls()

    for url in target_urls:
        print(f"\n{'=' * 40}")
        print(f"开始处理站点: {url}")
        # process_single_site(url)


        # 处理主页面
        main_html = get_page_content(url)
        if not main_html:
            return

        main_data = parse_main_page(main_html)
        save_data(main_data, generate_filename())

        # 处理子页面
        for idx, sub_url in enumerate(main_data['sub_links'], 1):
            time.sleep(1)  # 礼貌性延迟
            sub_html = get_page_content(sub_url)
            if not sub_html:
                continue

            sub_data = parse_sub_page(sub_html)
            if sub_data:
                save_data(sub_data, f"{idx:02d}_{sub_data['title']}.txt")
        print(f"完成处理站点: {url}")
        print(f"{'=' * 40}\n")

if __name__ == '__main__':
    main()