# _*_ codeing : utf-8 _*_
# @Time : 2025-02-01
# @Author : wxp
# @File : __init__.py
# @Project : wxp-python

import requests
from bs4 import BeautifulSoup
from googletrans import Translator
import time


def translate_html_content(url):
    # 获取网页内容（处理动态内容需改用Selenium）
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
    }
    response = requests.get(url, headers=headers)
    soup = BeautifulSoup(response.text, 'html.parser')

    # 提取主要文本内容（根据实际网页结构调整选择器）
    main_content = soup.find('article') or soup.body

    # 初始化翻译器
    translator = Translator(service_urls=['translate.google.cn'])

    # 遍历所有文本节点进行翻译（保留HTML结构）
    for element in main_content.find_all(text=True):
        if element.parent.name not in ['script', 'style', 'code', 'pre']:  # 排除代码块
            try:
                translated = translator.translate(element, src='en', dest='zh-cn').text
                element.replace_with(translated)
                time.sleep(0.5)  # 防止频率限制
            except Exception as e:
                print(f"翻译失败: {str(e)}")
                continue

    # 生成完整HTML结构
    translated_html = f"""<!DOCTYPE html>
    <html lang="zh-CN">
    <head>
        <meta charset="UTF-8">
        <meta name="viewport" content="width=device-width, initial-scale=1.0">
        <title>Grafana Loki 中文文档</title>
        {str(soup.head)}
    </head>
    <body>
        {str(main_content)}
    </body>
    </html>"""

    return translated_html


# 使用示例
if __name__ == "__main__":
    target_url = "https://grafana.com/docs/loki/latest/"

    print("开始翻译文档...")
    translated_html = translate_html_content(target_url)

    with open("loki_docs_zh_CN.html", "w", encoding="utf-8") as f:
        f.write(translated_html)

    print("翻译完成！已保存为 loki_docs_zh_CN.html")