# -*- coding: utf-8 -*-
# @Time : 2025/8/19 15:40
# @File : crawl4ai_simple.py
# @Software : PyCharm

import asyncio
import os
import re

import markdown_to_json
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig

from File_Output_Function import output_file


async def job(title, url_):
    try:
        print('start spider ' + '='*50)
        print(title)
        print(url_)

        # 浏览器设置
        browser_conf = BrowserConfig(
            headless=True
        )

        # 爬虫设置
        run_conf = CrawlerRunConfig()
        async with AsyncWebCrawler(config=browser_conf) as crawler:
            # Run the crawler on a URL
            result = await crawler.arun(
                url=url_,
                config=run_conf
            )

            # 替换 Windows 文件名中的非法字符
            illegal_chars = r'[\\/:*?"<>|\x00-\x1f]'  # 包含控制字符
            filename = re.sub(illegal_chars, ' ', title)

            if not result.success:
                print(f"Crawl failed: {result.error_message}")
                print(f"Status code: {result.status_code}")
                return {}, filename, 'error'
            elif not result.markdown:
                print("no markdown")
                return {}, filename, 'abnormal'
            else:
                # 导出markdown
                save_dir = f'./output/markdown'
                full_path = os.path.join(save_dir, filename + '.md')
                data = result.markdown

                # 不存在的文件
                if not os.path.exists(full_path):
                    output_file(save_dir, full_path, data)
                    json_data = markdown_to_json.jsonify(data)
                    return json_data, filename, 'finish'
                else:
                    return {}, filename, 'repeat'

                # 导出html
                save_dir_html = rf'./output/html'
                full_path_html = os.path.join(save_dir_html, filename + '.html')
                data_html = result.html
                if not os.path.exists(full_path_html):
                    output_md(save_dir_html, full_path_html, data_html)

    except Exception as e:
        print(f"爬虫过程出错：{str(e)}")
        raise


if __name__ == '__main__':

    title = r"国家防总对京津冀启动防汛四级应急响应"
    url = r"http://society.people.com.cn/n1/2025/0819/c1008-40545509.html"

    # 异步执行crawl4ai 导出markdown
    json_data = asyncio.run(job(title, url))

