# -*- coding: utf-8 -*-
# time: 2025/11/6 16:30
# file: news_zaobao_main.py
# author: Cardinal

import asyncio
import os
import json
import time
from datetime import datetime
from playwright.async_api import async_playwright
import requests
# import feedparser
import re
import functools
from kafka_deal import send_data, send_log
import pytz
import base64
import sys

tz = pytz.timezone('Asia/Shanghai')

# ===== 新增的重试装饰器 =====
def retry_on_network_error(max_retries=5, delay=5, exceptions=(requests.exceptions.RequestException,)):
    """
    一个重试装饰器，用于在发生网络相关错误时自动重试。

    :param max_retries: 最大重试次数。
    :param delay: 每次重试之间的延迟时间（秒）。
    :param exceptions: 需要捕获并触发重试的异常类型元组。
    """

    def decorator(func):
        @functools.wraps(func)
        def wrapper(*args, **kwargs):
            retries = 0
            last_exception = None
            while retries < max_retries:
                try:
                    return func(*args, **kwargs)
                except exceptions as e:
                    last_exception = e
                    retries += 1
                    if retries >= max_retries:
                        print(f"❌ 函数 {func.__name__} 在 {max_retries} 次重试后仍然失败。错误: {e}")
                        raise  # 重试次数用尽，重新抛出最后一个异常

                    wait_time = delay * retries  # 指数退避，等待时间越来越长
                    print(f"⚠️ 网络错误: {e}。将在 {wait_time} 秒后进行第 {retries}/{max_retries} 次重试...")
                    time.sleep(wait_time)
            # 如果循环结束（理论上不会，因为上面已经raise）
            raise last_exception
        return wrapper
    return decorator

def sanitize_folder_name(name: str) -> str:
    # Windows 不允许的字符： \ / : * ? " < > |
    # 同时去掉首尾空格和控制字符
    # 限制文件名长度 最长250个字符
    if '%2F' in name:
        name = name.split('%2F')[-1]
    sanitized = re.sub(r'[\\/:*?"<>|]', "_", name)
    sanitized = sanitized.strip()
    if len(sanitized) > 150:
        sanitized = sanitized[:150]

    return sanitized

@retry_on_network_error(max_retries=5, delay=10)
def image_scraping(images, output_dir, article_folder, headers):
    images_message = []
    for image in images:
        image_src = image['src']
        # 提取文件名称
        file_name = image_src.split('/')[-1]
        file_name = sanitize_folder_name(file_name)
        # 判断文件后缀是不是图片
        image_extensions = ('.jpg', '.jpeg', '.png', '.gif', '.bmp', '.webp', '.tiff')
        if not file_name.lower().endswith(image_extensions):
            file_name = file_name + '.jpg'
        # 本地保存路径
        image_path = output_dir + os.sep + article_folder + os.sep + file_name
        # 请求图片
        response = requests.get(image_src, headers=headers)
        response.raise_for_status()
        if response.status_code == 200:
            # 整理图片的json信息
            # 将二进制数据转换为文本字符串
            image_data = base64.b64encode(response.content).decode('utf-8')
            # # 保存文件
            # if not os.path.exists(image_path):
            #     with open(image_path, "wb") as f:
            #         f.write(response.content)
            #     print(f"✅ 图片已保存: {image_path}")
            # else:
            #     print(f"✅ 跳过，图片已保存: {image_path}")
        else:
            image_data = ""

        message = {
            "image_name": file_name,
            "image_data": image_data,
            "url": image_src,
            "status": response.status_code,
            "name": "",
            "alt": image['img_title']
        }
        images_message.append(message)
    return images_message

def html_scraping(byline_html, images, body_html, tags, article_folder):
    # ===== 保存 HTML 文件 =====
    html_content = f"""
       <html lang="en">
       <head>
           <meta charset="UTF-8">
           <title>{title}</title>
       </head>
       <body>
           <h1>{title}</h1>
           <div>{byline_html}</div>
           {'<br>'.join(f'<img src="{img}">' for img in images)}
           <article>{body_html}</article>
           <p><b>Tags:</b> {', '.join(tags)}</p>
       </body>
       </html>
       """
    base_filename = "".join(c for c in title if c.isalnum() or c in (" ", "-", "_")).strip()
    html_path = os.path.join(output_dir + os.sep + article_folder, f"{base_filename}.html")
    # with open(html_path, "w", encoding="utf-8") as f:
    #     f.write(html_content)
    # print(f"✅ HTML文件已保存: {html_path}")
    return html_content

def json_scraping(title, body_text, tags, images_message, article_folder, spider_data, pub_date, mediaType, workid, exeid, html_content, region):
    timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    mediaType = mediaType or '实时政治'

    dt_obj = datetime.strptime(pub_date, "%a, %d %b %Y %H:%M:%S %Z")
    formatted = dt_obj.strftime("%Y-%m-%d %H:%M:%S")

    result = {
        'workid': workid,
        'exeid': exeid,
        'mediaType': mediaType,
        'mediaCode': '7',
        'mediaName': 'Lianhe Zaobao',
        'region': region,
        'sourceType': '新闻',
        'updateTime': formatted,
        'crawlTime': timestamp,
        'url': link,
        'headline': title,
        'abstract': body_text.strip()[:150],
        'content': body_text.strip(),
        "tags": tags,
        "images": images_message,
        "html": html_content
    }
    # 传输json信息至kafka
    send_data(spider_data, result)

    # 保存json文件
    json_file = os.path.join(output_dir + os.sep + article_folder, article_folder.strip() + '.json')
    # with open(json_file, "w", encoding="utf-8") as f:
    #     json.dump(result, f, ensure_ascii=False, indent=4)
    # print(f"✅ JSON文件已保存: {json_file}")

async def scrape_article(title, link, pub_date, mediaType, workid, exeid, output_dir, spider_log, spider_data, region):
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 Chrome/141.0.0.0 Safari/537.36"
    }
    # 开始爬取的日志记录
    send_log(spider_log, workid, exeid, link, "Crawling start", '')

    # 解析网页
    async with async_playwright() as p:
        # 打开网页
        browser = await p.chromium.launch(headless=True)
        page = await browser.new_page()
        await page.goto(link, timeout=120000)

        # 等待主内容加载 =========================================================================
        await page.wait_for_selector('#article-body', timeout=120000)
        send_log(spider_log, workid, exeid, link, "Simulate browser scraping", '')

        # ===== 获取正文段落 =====
        # 获取文本
        paragraphs = await page.locator('#article-body').all_inner_texts()
        # 提取HTML源码
        body_html_parts = await page.locator('#article-body').evaluate_all(
            "els => els.map(e => e.outerHTML)"
        )
        body_text = "\n".join(paragraphs)
        body_html = "\n".join(body_html_parts)

        # ===== 获取图片说明及链接 =====
        image_elements = page.locator('div.rounded.aspect-picture.overflow-hidden > img[title]')
        images = []
        for i in range(await image_elements.count()):
            img_title = await image_elements.nth(i).get_attribute('title')
            src = await image_elements.nth(i).get_attribute('src')
            images.append({'img_title': img_title, 'src': src})

        # ===== 获取关键词 =====
        tags_elements = page.locator('a[href$="article-bottom-keyword"]')
        tags = await tags_elements.evaluate_all("els => els.map(e => e.innerText.trim())")

        # ===== 获取时间与来源 =====
        byline_element = page.locator('#publish_time')
        byline_html = await byline_element.inner_html() if await byline_element.count() > 0 else ""
        byline_text = await byline_element.inner_text() if await byline_element.count() > 0 else ""

        await browser.close()

    send_log(spider_log, workid, exeid, link, "Analyze webpage", '')

    # 爬取成果整理
    # ===== 创建输出目录 =====
    # 单条新闻目录
    article_folder = sanitize_folder_name(title)
    # os.makedirs(output_dir + os.sep + article_folder, exist_ok=True)

    # 1. 图片
    # 1.1 kafka输出，返回json
    # 1.2 本地下载
    images_message = image_scraping(images, output_dir, article_folder, headers)
    send_log(spider_log, workid, exeid, link,"Image download", '')

    # 3. html
    # 3.1 html信息本地保存
    html_content = html_scraping(byline_html, images, body_html, tags, article_folder)
    send_log(spider_log, workid, exeid, link,"Create HTML", '')

    # 2. json
    # 2.1 整体形成json格式
    # 2.2 合并图片信息形成完整json 录入kafka
    # 2.3 本地保存json
    json_scraping(title, body_text, tags, images_message, article_folder, spider_data,
                  pub_date, mediaType, workid, exeid, html_content, region)
    send_log(spider_log, workid, exeid, link, "Create JSON", '')

    # 结束爬取的日志记录
    send_log(spider_log, workid, exeid, link, "Crawling finished", '')


if __name__ == "__main__":
    # 技术栈
    # feedparser 解析RSS源
    # playwright

    title = sys.argv[1]
    link = sys.argv[2]
    pub_date = sys.argv[3]
    mediaType = sys.argv[4]
    workid = sys.argv[5]
    exeid = sys.argv[6]
    region = sys.argv[7]

    # 保存路径
    output_dir = 'zaobao_news'
    spider_log = "spider_log"
    spider_data = "spider_data"

    try:
        asyncio.run(scrape_article(title, link, pub_date, mediaType, workid, exeid, output_dir, spider_log, spider_data, region))
    except Exception as e:
        print(f"爬虫过程出错：{str(e)}")
        send_log(spider_log, workid, exeid, link,"Error", e)
        raise



