from crewai import Agent
from crewai_tools import ScrapeWebsiteTool
from conn import llms
from pydantic import BaseModel, Field
import json

# 定义页面内容和翻译后的内容模型
class PageContent(BaseModel):
    summary_en: str = Field(..., description="页面摘要")
    summary_zh: str = Field(..., description="翻译后的摘要")
    is_protected: bool = Field(..., description="页面是否受保护的")

# 获取页面内容和翻译
def get_page(url: str):
    agent = Agent(
        role="内容获取与翻译助手",
        goal="从url获取页面内容摘要，并翻译成中文。如果页面受到保护就给个标识，否则保护标识为flask",
        llm=llms.get_llm(),
        backstory="非常专业的内容获取与翻译助手",
        tools=[ScrapeWebsiteTool()],  # 使用ScrapeWebsiteTool爬取页面内容
        verbose=True,
    )
    try:
        r = agent.kickoff(url, response_format=PageContent)
        return json.loads(r.raw)
    except Exception as e:
        return {"summary_en": "", "summary_zh": "","is_protected":True}




if __name__ == "__main__":
    url = "https://code.ffmpeg.org/FFmpeg/FFmpeg/commit/13ce36fef98a3f4e6d8360c24d6b8434cbb8869b"
    res = get_page(url)
    print(res)