import asyncio
import json

from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
from crawl4ai.content_filter_strategy import PruningContentFilter, BM25ContentFilter
from crawl4ai.markdown_generation_strategy import DefaultMarkdownGenerator

from d_utils.db_entity import LawDataProduce
from d_utils.db_init import DbInit
from d_utils.log_utils import g_log


async def async_crawl(url):
    DbInit.init_db()
    with   DbInit.g_session_local() as session:
        # 首先查找记录中 src_url的字段是否存在
        if session.query(LawDataProduce).filter(LawDataProduce.src_url == url).first():
            g_log.info(f"该url已经存在: {url}")
            return

    browser_config = BrowserConfig(
        headless=True,
        verbose=True,
    )
    run_config = CrawlerRunConfig(
        cache_mode=CacheMode.ENABLED,
        markdown_generator=DefaultMarkdownGenerator(
            content_filter=PruningContentFilter(threshold=0.48, threshold_type="fixed", min_word_threshold=0)
        ),
    )

    async with AsyncWebCrawler(config=browser_config) as crawler:
        result = await crawler.arun(
            url=url,
            config=run_config
        )

        html = result.html
        markdown = result.markdown
        metadata = json.dumps(result.metadata)
        links = json.dumps(result.links)
        with open("html.html", "w", encoding="utf-8") as f:
            f.write(html)
        with open("markdown.md", "w", encoding="utf-8") as f:
            f.write(markdown)
        v = LawDataProduce(
            src_url=url,
            src_metadata=metadata,
            src_markdown=markdown,
            src_html=html,
            src_links=links,
        )
        DbInit.init_db()
        with   DbInit.g_session_local() as session:
            # 首先查找记录中 src_url的字段是否存在
            if not session.query(LawDataProduce).filter(LawDataProduce.src_url == url).first():
                session.add(v)
                session.commit()
            else:
                print("该url已经存在")


def do_crawl(url):
    asyncio.run(async_crawl(url))


if __name__ == '__main__':
    do_crawl("https://net.jogtar.hu/")
