import asyncio
import json
import os
import re
from datetime import date
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
from File_Output_Function import output_file, analyze_DefenseNews, analyze_TWZ
import sys
from kafka import KafkaProducer
from datetime import datetime

import pytz
tz = pytz.timezone('Asia/Shanghai')



producer = KafkaProducer(
    bootstrap_servers=['172.16.1.184:9092'],  
    value_serializer=lambda x: json.dumps(x).encode('utf-8'),  
   
)


def send_message(topic, message):

    producer.send(topic, value=message)
    producer.flush()
def send_message2(topic, times,loglev,workid,exeid,node,msg):
    

    data_dict={
    "time": times,
    "logLevel": loglev,
    "workflowId": workid,
    "executionId": exeid,
    "node": node,
    "msg": msg
}
    producer.send(topic, value=data_dict)
    producer.flush()

async def job(title, url_, mediaType, workid, exeid):
    try:
        # 启动
        print('start spider ' + '='*50)
        print('title: ' + title)
        print('url: ' + url_)
        send_message2("spider_log",datetime.now(tz=tz).strftime('%Y-%m-%d %H:%M:%S.%f')[:-3],"INFO",workid,exeid,"爬取节点",f"开始爬取:{url_}") 
        # 1.文件去重基本操作 ==============================================================================================
        # 替换 Windows 文件名中的非法字符
        illegal_chars = r'[\\/:*?"<>|\x00-\x1f]'  # 包含控制字符
        filename = re.sub(illegal_chars, ' ', title)

        # 10.1.113.61 绝对路径
        save_source = f'/mnt/vos-tfmfsko9/spider_data/{mediaType}'
        today = date.today()
        # html
        save_html = f'{save_source}/html/{today}'
        full_html = f'{save_html}/{filename}.html'
        # raw markdown
        save_dir = f'{save_source}/markdown/{today}'
        full_path = f'{save_dir}/{filename}.md'
        # clear markdown
        save_clear = f'{save_source}/md/{today}'
        full_clear = f'{save_clear}/{filename}.md'
        # json
        json_dir = f'{save_source}/json/{today}'
        json_path = f'{json_dir}/{filename}.json'

        if not os.path.exists(full_html) or not os.path.exists(full_clear) or not os.path.exists(json_path):
            # 2.开爬 ===================================================================================================
            # 2.1 浏览器设置
            browser_conf = BrowserConfig(
                headless=True,
                user_agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/140.0.0.0 Safari/537.36"
            )
            # 2.2 爬虫设置
            js_code = f"""
            window.scrollTo(0, document.body.scrollHeight);
            await new Promise(resolve => setTimeout(resolve, 2000));
            """
            run_conf = CrawlerRunConfig(
                cache_mode=CacheMode.BYPASS,
                exclude_external_links=True,
                remove_overlay_elements=True,
                js_code=js_code,
                page_timeout=300000
            )
            # 2.3 给爷爬
            async with AsyncWebCrawler(config=browser_conf) as crawler:
                result = await crawler.arun(
                    url=url_,
                    config=run_conf
                )
                if not result.success:
                    print(f"Crawl failed: {result.error_message}")
                    # 爬取失败
                    send_message2("spider_log",datetime.now(tz=tz).strftime('%Y-%m-%d %H:%M:%S.%f')[:-3],"ERROR",workid,exeid,"爬取节点",f"Crawl failed: {result.error_message}") 
                    
                    return {}, 'error'
                elif not result.html:
                    # 爬取异常
                    print("Crawl failed: no html")
                    send_message2("spider_log",datetime.now(tz=tz).strftime('%Y-%m-%d %H:%M:%S.%f')[:-3],"ERROR",workid,exeid,"爬取节点","Crawl failed: no html") 

                    return {}, 'abnormal'
                else:
                    # 3. 输出文件信息 ====================================================================================
                    # 3.1 html
                    data_html = result.html
                    # 3.2 raw markdown
                    data_md = result.markdown
                    # 3.3 解析html 形成纯净版 markdown
                    if "www.defensenews.com" in url_:
                        json_dic, data_clear = analyze_DefenseNews(data_html, title, url_, mediaType)
                    elif "www.twz.com" in url_:
                        json_dic, data_clear = analyze_TWZ(data_html, title, url_, mediaType)
                    else:
                        # 爬取异常
                        print("Crawl failed: no analyze model!")
                        send_message2("spider_log",datetime.now(tz=tz).strftime('%Y-%m-%d %H:%M:%S.%f')[:-3],"ERROR",workid,exeid,"爬取节点","Crawl failed: no analyze model") 
                        return {}, 'abnormal'

                    json_dic['htmlPath'] = full_html
                    json_dic['mdPath'] = full_clear
                    json_dic['workid'] = workid
                    json_dic['exeid'] = exeid

                    # 3.4 解析整理获得的json
                    new_json = json.dumps(json_dic, indent=4, ensure_ascii=False, sort_keys=False)

                    # 4. 保存文件 ================================================
                    # # 4.1 保存原始 html
                    # output_file(save_html, full_html, data_html)
                    # # # 保存原始 markdown
                    # # output_file(save_dir, full_path, data_md)
                    # # 4.2 保存纯净版 markdown
                    # output_file(save_clear, full_clear, data_clear)
                    # # 4.3 保存json
                    # output_file(json_dir, json_path, new_json)

                    # 5. 导入数据库 ======================================================================================
                    # es_input(json_dic)
                    send_message2("spider_log",datetime.now(tz=tz).strftime('%Y-%m-%d %H:%M:%S.%f')[:-3],"INFO",workid,exeid,"爬取节点",f"The file download complete: {url_}")     
                    print(f"The file download complete: {title} ")
                    send_message("aili_test",json_dic)
                    return json_dic, 'finish'
        else:
            print(f"The file already exists: {title} ")
            return {}, 'repeat'

    except Exception as e:
        print(f"爬虫过程出错：{str(e)}")
        send_message("spider_log",datetime.now(tz=tz).strftime('%Y-%m-%d %H:%M:%S.%f')[:-3],"ERROR",workid,exeid,"爬取节点",f"爬虫过程出错：{str(e)}")
        raise


if __name__ == '__main__':
    title = sys.argv[1]
    url = sys.argv[2]
    mediaType = sys.argv[3]
    workid = sys.argv[4]
    exeid = sys.argv[5]
    # if sys.argv[1] == "a":
    # title = "Cruise missiles are the present and future of warfare"
    # url = "https://www.defensenews.com/opinion/2025/10/09/cruise-missiles-are-the-present-and-future-of-warfare/"
    # mediaType = "military"
    # 异步执行crawl4ai 导出markdown
    json_data, status = asyncio.run(job(title, url, mediaType, workid, exeid))
    print(json_data)
    print(status)

    # elif sys.argv[1] == "b":
    #     title = "Ragnarok Mini-Cruise Missile With Big Range Targets $150K Price Tag"
    #     url = "https://www.twz.com/air/ragnarok-mini-cruise-missile-with-big-range-targets-150k-price-tag"
    #     mediaType = "military"
    #     # 异步执行crawl4ai 导出markdown
    #     json_data, status = asyncio.run(job(title, url, mediaType, workid, exeid))
    #     print(json_data)
    #     print(status)
    # else:
    #     print("skip")