import requests
from bs4 import BeautifulSoup
from ...config import Config

def fetch_webpage(url):
    """抓取网页内容"""
    try:
        response = requests.get(url)
        response.raise_for_status()  # 如果请求失败则引发HTTPError
        return response.text
    except requests.exceptions.RequestException as e:
        print(f"Error fetching webpage: {e}")
        return None

def extract_text_from_html(html_content):
    """从HTML中提取文本"""
    soup = BeautifulSoup(html_content, 'html.parser')

    # 去除不必要的标签，如脚本和样式
    for script_or_style in soup(['script', 'style']):
        script_or_style.decompose()

    # 提取网页中所有的文本
    text = soup.get_text(separator='\n')

    # 去除多余的空白字符
    lines = (line.strip() for line in text.splitlines())
    text = '\n'.join(line for line in lines if line)

    return text

def summarize_text_with_large_model(text):
    """使用大模型对文本进行总结"""
    # API_ENDPOINT = "http://175.6.21.106:31061/v1/chat/completions"
    API_ENDPOINT = Config.XIAOZHI_URL
    # 专为博客或新闻文章定制的提示
    prompt = (
        "请基于以下内容生成一篇博客或新闻文章的简洁总结。"
        "总结应包括：\n"
        "1. 标题和主要内容：简要描述文章的主题和核心信息。\n"
        "2. 重要细节：突出文章中的关键事实或数据。\n"
        "3. 背景和上下文：提供必要的背景信息，使读者理解文章的内容。\n"
        "4. 结论和观点：总结文章的主要观点或结论。\n\n"
        f"内容如下：\n{text}"
    )

    payload = {
        "model": "llama3",
        "messages": [{"role": "user", "content": prompt}]
    }

    try:
        response = requests.post(API_ENDPOINT, json=payload)
        response.raise_for_status()  # 如果请求失败则引发HTTPError
        summary = response.json()
        return summary
    except requests.exceptions.RequestException as e:
        print(f"Error summarizing text with large model: {e}")
        return None


# 主流程
def summarize_webpage(url):
    html_content = fetch_webpage(url)
    if html_content:
        text = extract_text_from_html(html_content)
        summary = summarize_text_with_large_model(text)
        return summary
    else:
        return None
