from datetime import datetime
from math import log
from agents import function_tool
import os
import httpx
import logging
import json
import random


DEFAULT_SEARXNG_INSTANCE = "http://192.168.0.10:8080/"
# 获取httpx的logger对象
httpx_logger = logging.getLogger("httpx")

# 设置日志级别为ERROR
httpx_logger.setLevel(logging.ERROR)

import json
from typing import Any, Union
def decode_json(
    json_str: str, 
    strict: bool = True,
    default: Any = None
) -> Union[dict, list, None]:
    """
    安全解析JSON字符串，支持处理代码块标记
    支持处理以下格式：
    ```json
    {"key": "value"}
    ```
    ```
    {"key": "value"}
    ```
    以及不带代码块标记的普通JSON字符串

    Args:
        json_str: 待解析的JSON字符串
        strict: 严格模式(True时无效JSON会抛出异常)
        default: 非严格模式下的默认返回值
    
    Returns:
        dict/list: 解析成功返回Python对象
        None: 非严格模式解析失败时返回
        
    Raises:
        ValueError: 严格模式下JSON格式错误
        TypeError: 输入不是字符串类型
    """
    if not isinstance(json_str, str):
        if strict:
            raise TypeError(f"Expected string, got {type(json_str).__name__}")
        return default
    
    # 去除代码块标记
    cleaned_str = json_str.strip()
    # 统一处理代码块标记，避免多次判断
    if cleaned_str.startswith('```json'):
        cleaned_str = cleaned_str[7:].rstrip('`')
    elif cleaned_str.startswith('```'):
        cleaned_str = cleaned_str[3:].rstrip('`')
    elif cleaned_str.endswith('```'):
        cleaned_str = cleaned_str[:-3]

    try:
        return json.loads(cleaned_str)
    except json.JSONDecodeError as e:
        if strict:
            raise ValueError(f"decode_json Invalid JSON: {e}\njson_str:{json_str}") from None
        return default


def get_searchxng_instances():
    """
    获取searxng实例列表
    """
    
    return DEFAULT_SEARXNG_INSTANCE

    # API URL
    url = "https://searx.space/data/instances.json"
    # 发送GET请求

    response = httpx.get(url,timeout=10)

    try:
        # 检查请求是否成功
        response.raise_for_status()
        
        # 解析响应内容为JSON
        data = response.json()

        # #保存到文件
        # with open('searxng_instances.json', 'w') as f:
        #     logging.info(f"get_searchxng_instances请求成功,实例数量:{len(data)}")
        #     json.dump(data, f)

    except Exception as e:
        logging.error(f"get_searchxng_instances请求过程中发生错误: {e}")
        return DEFAULT_SEARXNG_INSTANCE

    instances = data.get('instances', {})

    instances_ok = []

    for url, instance_info in instances.items():
        #过滤url中包含.onion
        if '.onion' in url:
            continue

        # 提取状态信息
        status = '未知'
        if 'http' in instance_info and 'status_code' in instance_info['http']:
            status_code = instance_info['http']['status_code']
            status = f'正常 (HTTP {status_code})' if status_code == 200 else f'异常 (HTTP {status_code})'
        
        # 提取速度信息
        speed = '未知'
        if 'timing' in instance_info and 'search' in instance_info['timing'] and 'all' in instance_info['timing']['search']:
            search_timing = instance_info['timing']['search']['all']
            if 'median' in search_timing:
                speed = f'{search_timing["median"]:.3f}'
        
        #过滤出正常的实例，生成新的列表[{url,speed}]
        if status == '正常 (HTTP 200)' and speed != '未知':
            instances_ok.append({'url':url,'speed':speed})

    # for item in instances_ok:
    #     print(item['url'],item['speed'])

    # 按速度排序
    instances_ok.sort(key=lambda x: x['speed'])

    #从前20个实例（不到20个就是全部）中随机选一个实例返回url
    if len(instances_ok) > 0:
        return random.choice(instances_ok[:30])['url']
    else:
        return DEFAULT_SEARXNG_INSTANCE
        
@function_tool()
async def search_searxng(query: str) -> str:
    """
    searxng搜索引擎,可以从互联网搜索信息.
    args:
        query (str): 搜索查询
    Returns:
        str: json格式的搜索结果 
    """
    # API URL
    instance = get_searchxng_instances()
    # 使用f-string格式化URL，提高代码可读性
    url = f"{instance}search?q={query}&format=json"

    logging.info(f"function_tool:[search_searxng]->[{instance}] 搜索查询[{query}]")

    try:
        # 使用异步请求与函数定义的async保持一致
        async with httpx.AsyncClient() as client:
            response = await client.get(url, timeout=90)

        # 检查请求是否成功
        response.raise_for_status()

        # 将响应内容解析为JSON
        result = response.json()
        # logging.info(f"search_searxng:{instance}请求[{query}]响应内容:{response.text}")
        return result

    except httpx.HTTPStatusError as e:
        logging.error(f"search_searxng:[{instance}]请求[{query}]失败，状态码: {e.response.status_code}")
        return f"search_searxng请求[{query}]失败，状态码: {e.response.status_code}"
    except Exception as e:
        logging.error(f"search_searxng:[{instance}]请求[{query}]过程中发生错误: [{e}]")
        return f"search_searxng请求[{query}]过程中发生错误: [{e}]"
    

@function_tool()
def get_current_time():
    """
    获取当前时间

    Returns:
        当前时间
    """
    return {"time":datetime.now().strftime("%Y-%m-%d %H:%M:%S")}

    
@function_tool()
async def search_google(query: str, num_results: int = 10):
    """
    Google搜索引擎,可以从互联网搜索信息.
    args:
        query (str): 搜索查询
        num_results (int): 搜索结果数量,默认10
    Returns:
        str: json格式的搜索结果 
    """
    # 获取环境变量中的API密钥和CSE ID
    api_key = os.getenv('GOOGLE_API_KEY')
    cse_id = os.getenv('GOOGLE_CSE_ID')
    
    # 检查必要的环境变量是否设置
    if not api_key or not cse_id:
        return {
            "success": False,
            "error": "GOOGLE_API_KEY or GOOGLE_CSE_ID environment variable is not set",
            "results": []
        }

    try:
        # 构建API URL和参数
        url = "https://www.googleapis.com/customsearch/v1"
        params = {
            "key": api_key,
            "cx": cse_id,
            "q": query,
            "num": num_results
        }
        logging.info(f"function_tool:[search_google]->搜索查询[{query}]")

        # 使用异步请求与函数定义的async保持一致
        async with httpx.AsyncClient() as client:
            response = await client.get(url, params=params)
        
        # 检查请求是否成功
        response.raise_for_status()
        
        # 解析响应内容为JSON
        result = response.json()
        
        # 格式化搜索结果
        formatted_results = [
            {
                "title": item.get("title", ""),
                "link": item.get("link", ""),
                "snippet": item.get("snippet", "")
            }
            for item in result.get("items", [])
        ]
        
        return {
            "success": True,
            "results": formatted_results,
            "total_results": result.get("searchInformation", {}).get("totalResults", "0")
        }
    
    except httpx.HTTPStatusError as e:
        return {
            "success": False,
            "error": f"API request failed with status code {e.response.status_code}",
            "results": []
        }
    except ValueError:
        return {
            "success": False,
            "error": "Invalid JSON response from API",
            "results": []
        }
    except Exception as error:
        return {
            "success": False,
            "error": str(error),
            "results": []
        }


from typing import Tuple
import markdownify
import readabilipy.simple_json

def extract_content_from_html(html: str) -> str:
    """Extract and convert HTML content to Markdown format.

    Args:
        html: Raw HTML content to process

    Returns:
        Simplified markdown version of the content
    """
    try:
        # 尝试从HTML字符串中提取简化内容
        ret = readabilipy.simple_json.simple_json_from_html_string(
            html, use_readability=False  # 修改为False以禁用Node.js依赖
        )
        content = ret.get("content", "")
        if not content:
            return "<error>Page failed to be simplified from HTML</error>"
        # 将提取的内容转换为Markdown格式
        return markdownify.markdownify(
            content,
            heading_style=markdownify.ATX,
        )
    except Exception as e:
        # 捕获并处理可能出现的异常
        return f"<error>An error occurred while processing HTML: {str(e)}</error>"


@function_tool()
async def fetch_url(
    url: str, force_raw: bool = False
) -> Tuple[str, str]:
    """
    爬取、获取url的页面内容。如果你要获取原始页面，请设置force_raw=True.
    Fetch the URL and return the content in a form ready for the LLM,
    if you want get url or html tags in web page,please set force_raw=True.

    Args:
        url: URL to fetch
        force_raw: If True, return raw HTML content instead of markdown.
    Returns:
        Tuple[str, str]: 第一个元素是页面内容，第二个元素是错误信息或提示信息
    """
    from httpx import AsyncClient, HTTPError

    # 定义常量，提高代码可维护性
    USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'
    TIMEOUT = 90

    logging.info(f"fetch_url:{url}")

    async with AsyncClient() as client:
        try:
            response = await client.get(
                url,
                follow_redirects=True,
                headers={"User-Agent": USER_AGENT},
                timeout=TIMEOUT,
            )
        except HTTPError as e:
            error_msg = f"Failed to fetch {url}: {e!r}"
            return "", error_msg

        if response.status_code >= 400:
            error_msg = f"Failed to fetch {url} - status code {response.status_code}"
            return "", error_msg

        page_raw = response.text
        content_type = response.headers.get("content-type", "")

        # 优化判断逻辑，增加注释说明
        # 检查是否为HTML页面，若响应头中未指定内容类型也默认当作HTML处理
        is_page_html = (
            "<html" in page_raw[:100] or 
            "text/html" in content_type.lower() or 
            not content_type
        )

        if is_page_html and not force_raw:
            return extract_content_from_html(page_raw), ""

        info_msg = f"Content type {content_type} cannot be simplified to markdown, but here is the raw content:\n"
        return page_raw, info_msg


async def main():
    target_url = "https://www.cnblogs.com/qiao39gs/p/18880001"

    content = await fetch_url(target_url,force_raw=True)
    print(content)
    print('*'*100)
    content = await fetch_url(target_url)
    print(content)




if __name__ == "__main__":
    import asyncio

    logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(filename)s:%(lineno)d - %(levelname)s - %(message)s')
    asyncio.run(main())


