from openai import OpenAI
from zhipuai import ZhipuAI
import os
import requests
from bs4 import BeautifulSoup
import re
import json

def call_llm(messages):
    client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY", "sk-8232f1f94ce448f0b5d71563ee23969b"),base_url="https://api.deepseek.com/v1")
    
    response = client.chat.completions.create(
        model="deepseek-chat",
        messages=messages,
        temperature=0.7
    )
    
    return response.choices[0].message.content

def call_zhipuai(messages):

    client = ZhipuAI(api_key=os.environ.get("ZHIPUAI_API_KEY", "9e2458a8acf46274d1c1b9418eec500c.oBojpibVHt3IyXnz")) # 填写您自己的APIKey
    response = client.chat.completions.create(
        model="glm-4-flash",  # 填写需要调用的模型编码
        messages=messages,
    )
    # print(response.choices[0].message)
    return response.choices[0].message.content

def search_chinanews(query, num_results=10):
    """
    搜索中新网并返回前n个新闻链接。对于此新闻站点的信息，
    
    参数:
        query: 搜索关键词
        num_results: 返回结果数量，默认为10
    
    返回:
        包含新闻标题和链接的列表
    """
    url = f"https://sou.chinanews.com/search/news?q={query}"
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
    }
    
    try:
        print(f"正在访问URL: {url}")
        response = requests.get(url, headers=headers)
        response.raise_for_status()
        
        print(f"HTTP状态码: {response.status_code}")
        
        # 从JavaScript代码中提取docArr变量
        html_content = response.text
        doc_arr_pattern = re.compile(r'var docArr = (\[.*?\]);', re.DOTALL)
        match = doc_arr_pattern.search(html_content)
        
        if not match:
            print("未找到docArr变量")
            return []
            
        # 提取JSON字符串并解析
        doc_arr_json = match.group(1)
        try:
            doc_arr = json.loads(doc_arr_json)
            print(f"成功解析到 {len(doc_arr)} 条新闻")
        except json.JSONDecodeError as e:
            print(f"JSON解析错误: {e}")
            return []
        
        # 处理结果
        results = []
        for i, item in enumerate(doc_arr):
            if i >= num_results:
                break
                
            title = item.get("title", "")
            url = item.get("url", "")
            
            # 处理标题可能是列表的情况
            if isinstance(title, list):
                title = title[0]
                
            # 移除HTML标签
            title = re.sub(r'<[^>]+>', '', title)
            
            if url:
                results.append({"title": title, "url": url})
                print(f"提取到: {title} - {url}")
                
        return results
    except Exception as e:
        print(f"搜索中新网时出错: {e}")
        import traceback
        traceback.print_exc()
        return []

if __name__ == "__main__":
    # Test the LLM call
    # print("Testing LLM call...")
    # messages = [{"role": "user", "content": "简短的回答，生活的意义是什么？"}]
    # response = call_llm(messages)
    # print(f"Prompt: {messages[0]['content']}")
    # print(f"Response: {response}")

    # print("Testing ZhipuAI call...")
    # messages = [{"role": "user", "content": "简短的回答，生活的意义是什么？"}]
    # response = call_zhipuai(messages)
    # print(f"Prompt: {messages[0]['content']}")
    # print(f"Response: {response}")

    # print("Testing China News search...")
    # results = search_chinanews("科技")
    # print(f"Found {len(results)} news items:")
    # for i, result in enumerate(results, 1):
    #     print(f"{i}. {result['title']}: {result['url']}")

    pass