import os
import requests
from bs4 import BeautifulSoup
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from config_reader import get_serpapi_api_key, get_deepseek_api_key


# ==== 工具函数 ====
def google_search(query: str, num_results: int = 3) -> list:
    """调用 SerpAPI 搜索最新链接"""
    try:
        serpapi_key = get_serpapi_api_key()
    except ValueError as e:
        raise ValueError(f"配置文件错误: {e}")

    params = {
        "q": query,
        "tbm": "nws",
        "api_key": serpapi_key,
        "num": num_results
    }
    print(f"🔍 搜索参数: {params}")
    response = requests.get("https://serpapi.com/search", params=params, timeout=15)
    data = response.json()
    
    # 调试信息
    print(f"📊 SerpAPI响应状态: {response.status_code}")
    print(f"📊 搜索结果数量: {len(data.get('news_results', []))}")
    
    if 'error' in data:
        print(f"❌ SerpAPI错误: {data['error']}")
        return []
    
    links = []
    for item in data.get("news_results", []):
        link = item.get("link", "")
        title = item.get("title", "")
        print(f"🔗 找到链接: {link}")
        print(f"📰 标题: {title}")
        
        # 扩展过滤条件：包含Twitter相关关键词的链接
        twitter_keywords = ["twitter", "x.com", "elon musk", "马斯克", "推特"]
        is_twitter_related = any(keyword.lower() in (link + " " + title).lower() for keyword in twitter_keywords)
        
        if is_twitter_related:
            links.append(link)
            print(f"✅ Twitter相关链接: {link}")
    
    return links


def read_links(links: list) -> list:
    """读取链接内容"""
    articles = []
    for link in links:
        try:
            resp = requests.get(link, timeout=10)
            soup = BeautifulSoup(resp.text, "html.parser")
            text = soup.get_text(separator=" ", strip=True)
            articles.append(text)
        except Exception as e:
            print(f"❌ 读取失败 {link}：{e}")
    return articles


def generate_summary(texts: list) -> str:
    """用 DeepSeek v3 生成摘要"""
    try:
        deepseek_api_key = get_deepseek_api_key()
    except ValueError as e:
        raise ValueError(f"配置文件错误: {e}")

    content = "\n\n".join(texts)
    prompt = f"请把以下关于 Twitter 的新闻总结成一段简洁摘要（不超过300字）：\n{content}"

    headers = {
        "Authorization": f"Bearer {deepseek_api_key}",
        "Content-Type": "application/json"
    }
    
    data = {
        "model": "deepseek-chat",
        "messages": [{"role": "user", "content": prompt}],
        "temperature": 0.3,
        "max_tokens": 300
    }
    
    response = requests.post(
        "https://api.deepseek.com/v1/chat/completions",
        headers=headers,
        json=data,
        timeout=30
    )
    
    if response.status_code != 200:
        raise Exception(f"DeepSeek API 请求失败: {response.status_code} - {response.text}")
    
    result = response.json()
    return result["choices"][0]["message"]["content"].strip()


# ==== Agent 主流程 ====
def run_agent():
    goal = "获取 Twitter 最新新闻摘要"
    print(f"🎯 目标：{goal}")

    tasks = ["搜索 Twitter 新闻链接", "阅读文章内容", "生成摘要"]
    search_results = None
    articles = None
    summary = None

    for task in tasks:
        if task == "搜索 Twitter 新闻链接":
            print("🔍 正在搜索 Twitter 新闻...")
            # 尝试多个搜索查询
            search_queries = ["Elon Musk Twitter news", "Twitter X news", "马斯克 推特 新闻", "Twitter latest news"]
            search_results = []
            for query in search_queries:
                print(f"🔍 尝试搜索: {query}")
                results = google_search(query, 5)
                search_results.extend(results)
                if len(search_results) >= 3:  # 如果找到足够的链接就停止
                    break
            print(f"✅ 找到 {len(search_results)} 条链接")

        elif task == "阅读文章内容":
            if not search_results:
                print("⚠️ 没有搜索结果，跳过阅读")
                continue
            print("📖 正在读取文章内容...")
            articles = read_links(search_results)
            print(f"✅ 成功读取 {len(articles)} 篇文章")

        elif task == "生成摘要":
            if not articles:
                print("⚠️ 没有文章内容，无法生成摘要")
                continue
            print("✍️ 正在生成摘要...")
            summary = generate_summary(articles)
            print("\n📰 最终摘要：")
            print(summary)


if __name__ == "__main__":
    run_agent()