import requests
import json
from typing import List, Optional, Dict, Any
from langchain.text_splitter import RecursiveCharacterTextSplitter
from .query_rewrite import query_rewriter
from .logger import request_logger
from langchain.schema import Document

# Bocha API配置
BOCHA_API_KEY = "sk-22074fb1ea844b729fe7a26677cfcf95"
BOCHA_AI_SEARCH_API_URL = "https://api.bochaai.com/v1/web-search"

def parse_bocha_response(response: dict) -> Dict[str, Any]:
    """
    解析Bocha AI搜索API的响应
    """
    result = {}

    if not response or "data" not in response:
        return result

    data = response.get("data", {})
    
    # 添加日志记录API返回的原始数据结构
    request_logger.log("博查API原始响应", level="debug", data={
        "response_structure": {k: type(v).__name__ for k, v in response.items()},
        "data_structure": {k: type(v).__name__ for k, v in data.items()} if isinstance(data, dict) else f"data is {type(data).__name__}"
    })

    # 安全处理: 确保data是字典类型
    if not isinstance(data, dict):
        request_logger.log(f"博查API返回的data不是字典类型: {type(data).__name__}", level="warning")
        return result

    # 解析网页内容
    if "webPages" in data and isinstance(data["webPages"], dict) and "value" in data["webPages"]:
        webpages = []
        for item in data["webPages"]["value"]:
            if not isinstance(item, dict):
                continue
                
            webpage_item = {
                "id": item.get("id", ""),
                "name": item.get("name", ""),
                "url": item.get("url", ""),
                "snippet": item.get("snippet", ""),
                "siteName": item.get("siteName", ""),
                "siteIcon": item.get("siteIcon", ""),
                "datePublished": item.get("datePublished", "") or item.get("dateLastCrawled", "")
            }
            webpages.append(webpage_item)
            
        result["webpage"] = webpages
        
        # 记录找到的网页数量和摘要
        request_logger.log(f"博查API找到网页结果", level="debug", data={
            "count": len(webpages),
            "samples": [{"title": item["name"], "url": item["url"]} for item in webpages[:2]]
        })

    # 解析图片内容 - 增加安全检查
    if "images" in data and isinstance(data["images"], dict) and "value" in data["images"]:
        images = []
        for item in data["images"]["value"]:
            if not isinstance(item, dict):
                continue
                
            image_item = {
                "contentUrl": item.get("contentUrl", ""),
                "hostPageUrl": item.get("hostPageUrl", ""),
                "width": item.get("width", 0),
                "height": item.get("height", 0),
            }
            images.append(image_item)
            
        result["image"] = images
        
        # 记录找到的图片数量
        request_logger.log(f"博查API找到图片结果", level="debug", data={
            "count": len(images)
        })
    else:
        request_logger.log("博查API没有返回图片结果", level="debug")

    return result

def fetch_online_search_results(query: str, count: int = 10, use_query_rewrite: bool = True) -> Optional[Dict[str, Any]]:
    """
    向 Bocha AI 发起联网搜索请求，并返回结构化的搜索结果。
    
    参数:
        query: 查询内容字符串
        count: 每个查询返回搜索结果数量（默认10）
        use_query_rewrite: 是否使用查询改写
    
    返回:
        成功时返回结构化字典，否则返回 None
    """
    if not query:
        raise ValueError("query 参数不能为空")
    
    request_logger.log("开始联网搜索", data={
        "query": query, 
        "count": count, 
        "use_query_rewrite": use_query_rewrite
    })
    
    print(f"\n===== 开始联网搜索 =====")
    print(f"原始查询: '{query}'")
    print(f"是否使用查询改写: {use_query_rewrite}")
    
    # 决定查询方式
    if use_query_rewrite:
        # 查询改写
        queries = query_rewriter.rewrite_query(query)
        # 对每个改写后的查询限制返回结果数量，保证总量不超过原始count
        items_per_query = max(2, min(3, count // len(queries)))
        
        request_logger.log("查询改写配置", data={
            "queries_count": len(queries),
            "items_per_query": items_per_query
        })
        
        print(f"查询改写完成, 共 {len(queries)} 个查询")
        print(f"每个查询返回数量限制: {items_per_query}")
    else:
        # 不使用查询改写
        queries = [query]
        items_per_query = count
        
        request_logger.log("不使用查询改写", data={
            "original_query": query,
            "count": count
        })
        
        print(f"不使用查询改写，直接使用原始查询，返回数量限制: {items_per_query}")
        
    # 合并结果容器
    merged_result = {"webpage": [], "image": []}
    
    # 对每个查询进行搜索
    for idx, q in enumerate(queries):
        print(f"\n----- 执行查询 {idx+1}/{len(queries)}: '{q}' -----")
        request_logger.log(f"执行查询 {idx+1}/{len(queries)}", data={"query": q})
        
        try:
            payload = json.dumps({
                "query": q,
                "freshness": "noLimit",
                "count": items_per_query
            })

            headers = {
                "Authorization": f"Bearer {BOCHA_API_KEY}",
                "Content-Type": "application/json",
            }

            request_logger.log("发送请求到博查API", level="debug", data={
                "url": BOCHA_AI_SEARCH_API_URL,
                "payload": json.loads(payload)
            })
            
            print(f"发送请求到博查API...")
            response = requests.post(url=BOCHA_AI_SEARCH_API_URL, headers=headers, data=payload)
            
            if response.ok:
                status_msg = f"博查API响应成功，状态码: {response.status_code}"
                request_logger.log(status_msg, level="debug")
                print(status_msg)
                
                try:
                    # 记录API返回的原始JSON，先记录再解析
                    response_json = response.json()
                    request_logger.log("博查API返回的原始JSON", level="debug", data={
                        "response_keys": list(response_json.keys())
                    })
                    
                    parsed = parse_bocha_response(response_json)
                except Exception as e:
                    error_msg = f"解析博查API响应失败: {str(e)}"
                    request_logger.exception(error_msg)
                    print(error_msg)
                    continue
                
                # 合并网页结果
                if "webpage" in parsed and parsed["webpage"]:
                    webpage_count = len(parsed["webpage"])
                    
                    request_logger.log(f"查询 '{q}' 返回了网页结果", data={
                        "count": webpage_count,
                        "sample": [
                            {"title": item.get("name"), "url": item.get("url")}
                            for item in parsed["webpage"][:2]
                        ]
                    })
                    
                    merged_result["webpage"].extend(parsed["webpage"])
                    print(f"查询 '{q}' 返回了 {webpage_count} 条网页结果")
                    
                    # 打印部分结果示例
                    if webpage_count > 0:
                        print(f"结果示例:")
                        for i, item in enumerate(parsed["webpage"][:2], 1):
                            print(f"  {i}. {item.get('name', '无标题')} - {item.get('siteName', '无来源')}")
                            snippet = item.get('snippet', '无内容')
                            if len(snippet) > 100:
                                snippet = snippet[:100] + "..."
                            print(f"     {snippet}")
                else:
                    request_logger.log(f"查询 '{q}' 未返回网页结果", level="warning")
                    print(f"查询 '{q}' 未返回网页结果")
                
                # 合并图片结果
                if "image" in parsed and parsed["image"]:
                    image_count = len(parsed["image"])
                    merged_result["image"].extend(parsed["image"])
                    
                    request_logger.log(f"查询 '{q}' 返回了图片结果", data={"count": image_count})
                    print(f"查询 '{q}' 返回了 {image_count} 条图片结果")
            else:
                error_msg = f"查询 '{q}' 失败: 状态码 {response.status_code}"
                request_logger.log(error_msg, level="error", data={"response_text": response.text})
                print(error_msg)
                print(f"错误详情: {response.text}")
                
        except Exception as e:
            error_msg = f"查询 '{q}' 异常: {str(e)}"
            request_logger.exception(error_msg)
            print(error_msg)
    
    # 打印最终结果统计
    print(f"\n===== 联网搜索结果汇总 =====")
    
    result_summary = {}
    
    if "webpage" in merged_result and merged_result["webpage"]:
        webpage_count = len(merged_result["webpage"])
        unique_urls = set(item.get('url', '') for item in merged_result["webpage"] if item.get('url'))
        
        result_summary["webpage"] = {
            "total_count": webpage_count,
            "unique_urls": len(unique_urls)
        }
        
        print(f"合并后总共获取了 {webpage_count} 条网页结果")
        print(f"其中包含 {len(unique_urls)} 个不同的URL")
    else:
        result_summary["webpage"] = {"total_count": 0, "unique_urls": 0}
        print("未获取到任何网页结果")
    
    if "image" in merged_result and merged_result["image"]:
        image_count = len(merged_result["image"])
        result_summary["image"] = {"total_count": image_count}
        print(f"合并后总共获取了 {image_count} 条图片结果")
    else:
        result_summary["image"] = {"total_count": 0}
    
    request_logger.log("联网搜索结果汇总", data=result_summary)
    
    # 如果没有获取到任何结果，返回None
    if (not merged_result.get("webpage") and not merged_result.get("image")):
        request_logger.log("联网搜索未获取到任何结果", level="warning")
        print("联网搜索未获取到任何结果")
        return None
        
    print(f"===== 联网搜索完成 =====\n")
    request_logger.log("联网搜索完成")
    
    return merged_result

def chunk_web_search_results(web_results: dict, chunk_size: int = 500, chunk_overlap: int = 50) -> List[Any]:
    """
    将 Bocha 搜索返回的网页内容分割成标准大小的文本块。
    
    参数:
        web_results: 结构化搜索结果，必须包含 "webpage" 字段
        chunk_size: 每个块的最大长度（字符数）
        chunk_overlap: 块之间的重叠字符数
    
    返回:
        分割后的文本块列表，每个文本块都是带有元数据的Document对象
    """
    if not web_results or "webpage" not in web_results:
        request_logger.log("没有网页结果可供处理", level="warning")
        print("没有网页结果可供处理")
        return []

    request_logger.log("开始处理网页内容", data={
        "webpage_count": len(web_results.get("webpage", [])),
        "chunk_size": chunk_size,
        "chunk_overlap": chunk_overlap
    })
    
    print(f"\n===== 开始处理网页内容 =====")
    print(f"收到 {len(web_results['webpage'])} 条网页内容")
    print(f"切块设置: 大小={chunk_size}, 重叠={chunk_overlap}")
    
    text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
    chunks = []
    
    # 用于过滤的关键词
    filter_keywords = [
        "回复要求", "答题要求", "注意事项", "禁止虚假", "禁止夸大", "回答必须", 
        "回答要求", "请用中文回答", "请勿抄袭", "回答格式", "答案格式"
    ]
    
    processed_urls = set()  # 用于去重，避免处理相同URL的内容
    filtered_count = 0
    short_content_count = 0

    for item in web_results["webpage"]:
        # 检查URL是否已处理过
        url = item.get('url', '')
        if url in processed_urls:
            continue
        processed_urls.add(url)
        
        # 组合标题和内容
        title = item.get('name', '').strip()
        snippet = item.get('snippet', '').strip()
        content = f"{title}。{snippet}" if title else snippet
        
        # 过滤掉包含指导性文本的内容
        should_filter = False
        for keyword in filter_keywords:
            if keyword in content:
                # 尝试只保留指导文本前的有用内容
                parts = content.split(keyword, 1)
                content = parts[0].strip()
                if not content:  # 如果分割后内容为空，则跳过
                    should_filter = True
                    filtered_count += 1
                break
                
        if should_filter:
            continue
            
        # 如果内容太短则跳过
        if len(content) < 20:
            short_content_count += 1
            continue
        
        # 保存元数据
        metadata = {
            "title": title,
            "url": url,
            "siteName": item.get('siteName', '网络搜索'),
            "source": "网络搜索",
            "imageUrl": item.get('imageUrl', '')
        }
            
        # 切分文本
        split_texts = text_splitter.split_text(content)
        
        # 为每个切分后的文本创建Document对象，保留元数据
        for split_text in split_texts:
            doc = Document(page_content=split_text, metadata=metadata)
            chunks.append(doc)

    filter_stats = {
        "filtered_instructional": filtered_count,
        "filtered_short": short_content_count,
        "filtered_duplicate_url": len(web_results['webpage']) - len(processed_urls),
        "final_chunks": len(chunks)
    }
    
    request_logger.log("文本过滤统计", data=filter_stats)
    
    print(f"过滤情况统计:")
    print(f"  - 包含指导性文本被过滤: {filtered_count} 条")
    print(f"  - 内容过短被过滤: {short_content_count} 条")
    print(f"  - URL去重过滤: {len(web_results['webpage']) - len(processed_urls)} 条")
    print(f"最终生成 {len(chunks)} 个文本块")
    
    # 打印一些示例块
    if chunks:
        chunk_samples = []
        print(f"\n文本块示例:")
        for i, chunk in enumerate(chunks[:2], 1):
            preview = chunk.page_content[:100] + "..." if len(chunk.page_content) > 100 else chunk.page_content
            metadata_preview = f"(来源: {chunk.metadata.get('siteName')}, URL: {chunk.metadata.get('url')[:30]}...)"
            chunk_samples.append({"preview": preview, "metadata": chunk.metadata})
            print(f"  块 {i}: {preview} {metadata_preview}")
            
        request_logger.log("文本块示例", level="debug", data={"samples": chunk_samples})
    
    print(f"===== 网页内容处理完成 =====\n")
    request_logger.log("网页内容处理完成", data={"chunk_count": len(chunks)})
    
    return chunks

def get_chunks_text_content(chunks: List[Document]) -> List[str]:
    """
    从Document对象列表中提取纯文本内容
    
    参数:
        chunks: Document对象列表
        
    返回:
        纯文本内容列表
    """
    return [doc.page_content for doc in chunks] 