# -*- coding: utf-8 -*-
# @Time : 2025/7/23 15:17
# @File : File_Output_Function.py
# @Software : PyCharm


import os
from datetime import datetime

from bs4 import BeautifulSoup
from elasticsearch import Elasticsearch
from zoneinfo import ZoneInfo


# 保存文件
def output_file(save_dir, full_path, file_str):
    os.makedirs(save_dir, exist_ok=True)
    with open(full_path, 'w', encoding='utf-8') as f:
        f.write(file_str)
    print(f"已保存到： {full_path}")


# 解析HTML: 人民网
def analyze_rmw(data_html):
    # soup = BeautifulSoup(open('051_爬虫_页面结构的介绍.html', 'r', encoding="utf-8"), 'lxml')
    soup = BeautifulSoup(data_html, 'html.parser')

    headline = soup.select('div.col.col-1.fl > h1')[0].get_text()
    source = soup.select('div.col.col-1.fl div.col-1-1.fl a[target]')[0].get_text()
    content_list = soup.select('div.rm_txt_con.cf > p[style]')
    content = ""
    for p in content_list:
        content = content + p.get_text()
    time_str = soup.select('div.col.col-1.fl div.col-1-1.fl')[0].get_text().strip().split(' |')[0]
    updateTime = datetime.strptime(time_str, "%Y年%m月%d日%H:%M")
    channel = soup.select('#rwb_navpath > a:nth-child(2)')[0].get_text()

    json_dic = {
        'headline': headline,
        'source': source,
        'content': content,
        'updateTime': updateTime,
        'channel': channel
    }

    markdown_template = f"""# {headline}
    source： {source} 
    channel： {channel} 
    updateTime： {updateTime}
    content: {content}
    """

    return json_dic, markdown_template


# 解析HTML: 中国新闻网
def analyze_zgxww(data_html):
    # soup = BeautifulSoup(open("./output/html/数智赋能发展 第22届东博会签约155个项目.html", 'r', encoding="utf-8"), 'lxml')
    soup = BeautifulSoup(data_html, 'html.parser')

    mediaType = '新闻'
    mediaCode = '000001'
    mediaName = '中国新闻网'
    time_str = soup.select('div.content_maincontent_more > div.content_left_time')[0].get_text().strip().split('　')[0]
    updateTime = str(datetime.strptime(time_str, "%Y年%m月%d日 %H:%M"))
    crawlTime = '2025-09-19 10:00:02'
    url = 'http://www.chinanews.com/cj/2025/09-19/10485045.shtml'
    headline = soup.select('div.content_maincontent_more > h1')[0].get_text()

    content_list = soup.select('div.content_maincontent_content > div.left_zw > p')
    content = ""
    for p in content_list:
        content = content + p.get_text().strip()

    if len(content) <= 100:
        abstract = content
    else:
        abstract = content[:100] + '......'

    channel = '财经'

    json_dic = {
        'mediaType': mediaType,
        'mediaCode': mediaCode,
        'mediaName': mediaName,
        'updateTime': updateTime,
        'crawlTime': crawlTime,
        'url': url,
        'headline': headline,
        'abstract': abstract,
        'channel': channel
    }

    markdown_template = f"""# {headline}
    mediaType: {mediaType}
    mediaCode: {mediaCode}
    mediaName: {mediaName}
    updateTime: {updateTime}
    crawlTime: {crawlTime}
    url: {url}
    abstract: {abstract}
    content: {content}
    channel: {channel}
    """

    return json_dic, markdown_template


# 解析HTML: Defense News
def analyze_DefenseNews(data_html, title, url_, mediaType_):
    # soup = BeautifulSoup(open("./output/html/It’s crunch time for Poland to pick a new submarine design.html", 'r', encoding="utf-8"), 'lxml')
    soup = BeautifulSoup(data_html, 'html.parser')

    mediaType = mediaType_
    mediaCode = '000001'
    mediaName = 'Defense News'
    sourceType = '新闻'

    update_selector = "#c0fM802uPEen8qp > div > div.ArticleHeader__Meta-sc-1dhqito-4.AyFNL.c-articleHeader__meta > div.ArticleHeader__BylineWrapper-sc-1dhqito-1.hjxgAe.c-articleHeader__byline > div.c-articleHeader__date > time"
    updateTime_raw = soup.select(update_selector)[0].get('datetime')
    beijing_tz = ZoneInfo("Asia/Shanghai")
    updateTime = datetime.fromisoformat(updateTime_raw).astimezone(beijing_tz).strftime('%Y-%m-%d %H:%M:%S')
    crawlTime = datetime.now().strftime('%Y-%m-%d %H:%M:%S')

    url = url_
    headline = title

    content_list = soup.select('#c0fFtVRmQEen8mE > article > p')
    content = ""
    for p in content_list:
        content = content + p.get_text().strip()

    if len(content) <= 100:
        abstract = content
    else:
        abstract = content[:150] + '......'

    json_dic = {
        'mediaType': mediaType,
        'mediaCode': mediaCode,
        'mediaName': mediaName,
        'sourceType': sourceType,
        'updateTime': updateTime,
        'crawlTime': crawlTime,
        'url': url,
        'headline': headline,
        'abstract': abstract
    }

    markdown_template = f"""# {headline}
    mediaType: {mediaType}
    mediaCode: {mediaCode}
    mediaName: {mediaName}
    sourceType: {sourceType}
    updateTime: {updateTime}
    crawlTime: {crawlTime}
    url: {url}
    abstract: {abstract}
    content: {content}
    """
    return json_dic, markdown_template


# 解析HTML: The War Zone
def analyze_TWZ(data_html, title, url_, mediaType_):
    # soup = BeautifulSoup(open(r'D:\python_code\spider\script\RSS\script\API\rss_test\output\html\Army “Absolutely Needs” Drones Like Russia’s Shahed-136  25th Infantry Division Commander.html', 'r', encoding="utf-8"), 'lxml')
    soup = BeautifulSoup(data_html, 'html.parser')

    mediaType = mediaType_
    mediaCode = '000001'
    mediaName = 'The War Zone'
    sourceType = '新闻'

    update_selector = "header > div.container.flex.flex-col.items-center > div.featured-template-byline-wrapper > div > div.item-wrapper--date.item-wrapper > div > p > time"
    updateTime_raw = soup.select(update_selector)[0].get('datetime')
    beijing_tz = ZoneInfo("Asia/Shanghai")
    updateTime = datetime.fromisoformat(updateTime_raw).astimezone(beijing_tz).strftime('%Y-%m-%d %H:%M:%S')
    crawlTime = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    url = url_
    headline = title
    content_list = soup.select('section > div.entry-content.Article-bodyText.paywall.border-b-2.w-full.mb-6 > div > p')
    content = ""
    for p in content_list:
        content = content + p.get_text().strip()

    if len(content) <= 100:
        abstract = content
    else:
        abstract = content[:150] + '......'

    json_dic = {
        'mediaType': mediaType,
        'mediaCode': mediaCode,
        'mediaName': mediaName,
        'sourceType': sourceType,
        'updateTime': updateTime,
        'crawlTime': crawlTime,
        'url': url,
        'headline': headline,
        'abstract': abstract
    }

    markdown_template = f"""# {headline}
    mediaType: {mediaType}
    mediaCode: {mediaCode}
    mediaName: {mediaName}
    sourceType: {sourceType}
    updateTime: {updateTime}
    crawlTime: {crawlTime}
    url: {url}
    abstract: {abstract}
    content: {content}
    """
    return json_dic, markdown_template


def es_input(json_data):
    es = Elasticsearch('http://10.1.110.57:9200', basic_auth=('elastic', '123456'))
    index_name = 'spider_news'
    # 将 JSON 数据导入到 Elasticsearch
    response = es.index(index=index_name, document=json_data)
    # 打印响应信息
    print(response['result'])


# if __name__ == '__main__':
#     title = "It’s crunch time for Poland to pick a new submarine design"
#     url_ = "https://www.defensenews.com/global/europe/2025/10/14/its-crunch-time-for-poland-to-pick-a-new-submarine-design/"
#     mediaType = "military"
#     json_dic, markdown_template = analyze_TWZ(title, url_, mediaType)
#     print(json_dic)
#     print(markdown_template)

