import json
import re
import asyncio
import random
from duckduckgo_search import DDGS
from time import time, sleep
from uuid import uuid4
from typing import Dict, List, Optional, Any, Literal, Union
from pydantic import BaseModel, Field
from fastapi import APIRouter
from ktransformers.server.utils.create_interface import get_interface
from ktransformers.server.schemas.assistants.streaming import chat_stream_response
from ktransformers.server.schemas.endpoints.chat import ChatCompletionCreate
from ktransformers.server.schemas.endpoints.chat import RawUsage, Role
from ktransformers.server.backend.base import BackendInterfaceBase
from ktransformers.server.config.config import Config
from server.config.log import logger
from ktransformers.server.schemas.endpoints.chat import ChatCompletionChunk, CompletionUsage

class DeepSeekInternalClient:

    def __init__(self):
        self.temprature = 0.7
        self.top_p = 0.8
    
    def _remove_think_tags(self, content: str) -> str:
        """去除内容中的<think>...</think>标签"""
        # 使用正则表达式去除<think>...</think>标签及其内容
        pattern = r'<think>.*?</think>'
        cleaned_content = re.sub(pattern, '', content, flags=re.DOTALL)
        # 去除多余的空白字符
        return cleaned_content.strip()

    def _extract_search_keywords(self, prompt) -> str:
        """使用大语言模型提取搜索关键词"""
        extract_prompt = f"""
                            请从以下用户问题中提取最重要的搜索关键词，用于在搜索引擎中查找相关信息。
                            请只返回关键词，多个关键词用空格分隔，不要包含其他解释。

                            用户问题：{prompt}

                            搜索关键词：
                          """
        
        result = self.chat(extract_prompt)
        if result["status"] == "success":
            return result["content"].strip()
        else:
            # 如果提取失败，返回原始prompt的前50个字符作为关键词
            return prompt[:50]


    def _search_duckduckgo(self, keywords: str, max_results: int = 5, max_retries: int = 3) -> List[Dict[str, str]]:
        """使用DuckDuckGo搜索，带重试机制和速率限制处理"""
        
        proxies = {
            "https": "192.168.2.18:20171",
            "http": "192.168.2.18:20171"
        }

        headers={"User-Agent": "Mozilla/5.0"}

        for attempt in range(max_retries):
            try:
                if attempt > 0:
                    # 添加随机延时，避免频繁请求
                    delay = random.uniform(2, 5) * (attempt + 1)  # 递增延时
                    print(f"第{attempt + 1}次重试，等待{delay:.1f}秒...")
                    sleep(delay)
                
                with DDGS(proxies=proxies, headers=headers) as ddgs:
                    results = []
                    # 添加请求间隔，降低请求频率
                    search_results = ddgs.text(keywords, max_results=max_results)
                    
                    for i, result in enumerate(search_results):
                        # 在处理每个结果之间添加小延时
                        if i > 0:
                            sleep(0.5)
                        
                        results.append({
                            "title": result.get("title", ""),
                            "body": result.get("body", ""),
                            "href": result.get("href", "")
                        })
                    
                    print(f"搜索成功，找到{len(results)}条结果")
                    return results
                    
            except Exception as e:
                error_msg = str(e).lower()
                
                # 检查是否是速率限制错误
                if "rate" in error_msg or "limit" in error_msg or "202" in error_msg:
                    print(f"遇到速率限制错误 (尝试 {attempt + 1}/{max_retries}): {str(e)}")
                    
                    if attempt < max_retries - 1:
                        # 对于速率限制，使用更长的等待时间
                        wait_time = random.uniform(10, 20) * (attempt + 1)
                        print(f"等待{wait_time:.1f}秒后重试...")
                        sleep(wait_time)
                        continue
                    else:
                        print("已达到最大重试次数，搜索失败")
                        return []
                    
                else:
                    print(f"搜索错误 (尝试 {attempt + 1}/{max_retries}): {str(e)}")
                    
                    if attempt < max_retries - 1:
                        # 对于其他错误，使用较短的等待时间
                        wait_time = random.uniform(1, 3)
                        sleep(wait_time)
                        continue
                    else:
                        print("搜索失败，返回空结果")
                        return []
        
        return []

    def _format_search_results(self, search_results: List[Dict[str, str]]) -> str:
        """格式化搜索结果"""
        if not search_results:
            return "\n\n[搜索结果]: 未找到相关信息。"
        
        formatted_results = "\n\n[搜索结果]:\n"
        for i, result in enumerate(search_results, 1):
            formatted_results += f"""
                                    {i}. 标题: {result['title']}
                                    内容: {result['body']}
                                    链接: {result['href']}

                                    """
        return formatted_results

    def chat_with_search(self, prompt: str, enable_search: bool = False, max_search_results: int = 5):
        """支持搜索功能的聊天方法"""
        
        if not enable_search:
            # 如果不启用搜索，直接调用原始chat方法
            result = self.chat(prompt)
            return result
        
        try:
            # 1. 使用大语言模型提取搜索关键词
            print("正在提取搜索关键词...")
            search_keywords = self._extract_search_keywords(prompt)
            print(f"提取的搜索关键词: {search_keywords}")
            
            # 2. 使用DuckDuckGo进行搜索
            print("正在进行网络搜索...")
            search_results = self._search_duckduckgo(search_keywords, max_search_results)
            print(f"找到 {len(search_results)} 条搜索结果")
            
            # 3. 格式化搜索结果
            formatted_results = self._format_search_results(search_results)
            
            # 4. 将搜索结果附加到原始prompt
            enhanced_prompt = prompt + formatted_results + "\n\n请基于以上搜索结果和你的知识来回答用户的问题。"
            
            # 5. 使用增强后的prompt调用大模型
            print("正在生成最终回答...")

            result = self.chat(enhanced_prompt)
            return result
            
        except Exception as e:
            print(f"搜索聊天过程中发生错误: {str(e)}")
            # 如果搜索过程出错，回退到普通聊天
            print("回退到普通聊天模式...")
            result = self.chat(prompt)
            return result


    def chat(self, prompt:str):
        """使用与OpenAI相同的调用方式测试大模型"""

        
        # 构建请求体
        create = {
            "messages": [
                {"role": "user", "content": prompt}
            ],
            "temperature": self.temprature,
            "top_p": self.top_p,
        }
        
        # 发送请求
        try:
            result = asyncio.run(self.chat_completion(create))
            
            # 获取原始内容并去除<think>标签
            raw_content = result["choices"][0]["message"]["content"]
            cleaned_content = self._remove_think_tags(raw_content)

            return {
                "status": "success",
                "content": cleaned_content,
                "reasoning_content": None,
                "code": 200,
            }
           
        except Exception as e:
            logger.info(f"发生错误: {str(e)}")
            return {
                "status": "error",
                "message": str(e)
            }


    async def chat_completion(self, create):
        id = str(uuid4().hex)

        # Process messages with tool functionality if needed
        input_message = create["messages"]

        # Process request
        interface: BackendInterfaceBase = get_interface()

        # non streaming response processing
        full_content = ""
        finish_reason = None
        tool_calls = []
        buffer = ""
        tool_call_mode = False

        # Custom model special markers
        tool_calls_begin_marker = "<｜tool▁calls▁begin｜>"
        tool_call_begin_marker = "<｜tool▁call▁begin｜>"
        tool_sep_marker = "<｜tool▁sep｜>"
        tool_call_end_marker = "<｜tool▁call▁end｜>"
        tool_calls_end_marker = "<｜tool▁calls▁end｜>"
        too_calls_dict = {
            "<tools▁begin>":"<｜tool▁calls▁begin｜>",
            "<tool▁begin>":"<｜tool▁call▁begin｜>",
            "<tool▁sep>":"<｜tool▁sep｜>",
            "<tool▁end>":"<｜tool▁call▁end｜>",
            "<tools▁end>":"<｜tool▁calls▁end｜>"
        }
        async for res in interface.inference(input_message, id, create.get("temperature"), create.get("top_p"), create.get("max_tokens"), create.get("max_completion_tokens")):
            if isinstance(res, RawUsage):
                raw_usage = res
                usage = CompletionUsage(
                    prompt_tokens=raw_usage.prefill_count,
                    completion_tokens=raw_usage.decode_count,
                    total_tokens=raw_usage.prefill_count + raw_usage.decode_count,
                )
                if create.get("return_speed"):
                    usage.prefill_time = res.prefill_time
                    usage.decode_time = res.decode_time
                else:
                    usage.__dict__.pop('prefill_time', None)
                    usage.__dict__.pop('decode_time', None)

            elif isinstance(res, tuple) and len(res) == 2:
                token, finish_reason = res
                token = re.sub('|'.join(map(re.escape, too_calls_dict.keys())), lambda m: too_calls_dict[m.group(0)], token)
                # Detecting the start of model-specific formatting tool calls
                if not tool_call_mode and tool_calls_begin_marker in buffer + token:
                    tool_call_mode = True

                    # Adjust full_content to remove tool call section
                    if buffer.endswith(tool_calls_begin_marker):
                        full_content = full_content[:-len(tool_calls_begin_marker)]
                    elif tool_calls_begin_marker in (buffer + token):
                        idx = (buffer + token).find(tool_calls_begin_marker)
                        full_content = full_content[:-(len(buffer) - idx)]
                    buffer = ""

                # Accumulation of content in non-tool call mode
                if not tool_call_mode:
                    full_content += token
                    buffer += token
                    # Keep the buffer at a reasonable size
                    if len(buffer) > 200:
                        buffer = buffer[-200:]
                else:
                    # In tool call mode, continue to collect tool call related text
                    buffer += token

                    # If the tool call end marker is found
                    if tool_calls_end_marker in buffer:
                        # Extract tool calls
                        tool_calls = getTools(buffer)
                        if tool_calls:
                            finish_reason = "tool_calls"

                        # Reset state
                        tool_call_mode = False
                        buffer = ""

        # Build Response
        message = {
            "role": "assistant",
            "content": None if tool_calls else full_content
        }

        if tool_calls:
            message["tool_calls"] = tool_calls

        response = {
            "id": id,
            "object": "chat.completion",
            "created": int(time()),
            "model": Config().model_name,
            "choices": [{
                "index": 0,
                "message": message,
                "finish_reason": finish_reason or "stop"
            }],
            "usage": usage.__dict__ if 'usage' in locals() else None,
            "system_fingerprint": f"fp_{uuid4().hex[:12]}"
        }

        return response