# import asyncio
# import json
# from datetime import datetime
#
# from crawl4ai import *
# from crawl4ai.models import CrawlResultContainer, StringCompatibleMarkdown
#
# from d_utils.excel_parser import GrapExcelData
# from d_utils.log_utils import g_log
# from d_utils.redis_utils import g_redis
#
#
# async def download_excel(url):
#     g_log.info(f"begin download {url}")
#     async with AsyncWebCrawler() as crawler:
#         result:CrawlResultContainer = await crawler.arun(
#             url=url,
#             # url="https://www.csdn.com",
#         )
#
#         markdown:StringCompatibleMarkdown
#         markdown = result.markdown
#         markdown = markdown.strip()
#         redis_data = {
#             "url":url,
#             "markdown":markdown,
#             "create_time":datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
#             "type":"excel"
#         }
#         redis_value = json.dumps(redis_data, ensure_ascii=False)
#         g_redis.lpush("raw:agent:data_produce", redis_value)
#         g_log.info(f"finish {url}")
#
#
#
#
# if __name__ == "__main__":
#     parse = GrapExcelData()
#     parse.parse_excel("data/匈牙利法律法规汇总.xlsx")
#     for url in parse.urls:
#
#         asyncio.run(download_excel(url))
from pathlib import Path

from config import settings, Settings
from d_utils.craw_utils import do_crawl
from d_utils.db_init import DbInit
from d_utils.excel_parser import GrapExcelData

if __name__ == '__main__':
    # path = "https://net.jogtar.hu/jogszabaly?docid=99300048.TV"
    settings = Settings()
    print("setttsfsdf", settings.DB_NAME)
    DbInit.init_db()
    #
    parse = GrapExcelData()
    #
    parse.parse_excel(Path(f"{__file__}").parent.joinpath("data/匈牙利法律法规汇总.xlsx").absolute())
    for url in parse.urls:
         do_crawl(url)
