import os
import logging
import json
import sys
from typing import List, Dict, Any, Optional
from openai import OpenAI
from dotenv import load_dotenv

# 设置控制台输出编码
if sys.stdout.encoding != "utf-8":
    try:
        sys.stdout.reconfigure(encoding="utf-8")
    except AttributeError:
        # Python 3.6及以下版本没有reconfigure方法
        pass

# Configure logging
logger = logging.getLogger(__name__)

# 设置编码处理器
for handler in logger.handlers:
    if isinstance(handler, logging.FileHandler):
        handler.setEncoding("utf-8")

# Load environment variables
load_dotenv(".env.local")


class DeepSeekClient:
    """
    DeepSeek client for generating responses with retrieval augmentation
    """

    def __init__(self):
        """Initialize DeepSeek client"""
        try:
            # Try to get DeepSeek configuration from environment variables
            self.api_key = os.getenv("DEEPSEEK_API_KEY", "")

            # Check if API key is provided
            if not self.api_key:
                logger.warning(
                    "DeepSeek API key not configured, using simplified responses"
                )
                self.client = None
                self.is_available = False
            else:
                # Initialize DeepSeek client
                self.client = OpenAI(
                    api_key=self.api_key, base_url="https://api.deepseek.com"
                )
                self.is_available = True
                logger.info("DeepSeek client initialized successfully")
        except Exception as e:
            logger.error(f"Error initializing DeepSeek client: {e}")
            self.client = None
            self.is_available = False

    def _get_fallback_response(self, context: str) -> str:
        """
        返回当DeepSeek不可用或出错时的后备响应
        
        Args:
            context: 从知识库检索到的相关内容
            
        Returns:
            简化的响应文本
        """
        if context:
            return f"根据检索到的信息，我可以提供以下回答：\n\n{context[:500]}..."
        else:
            return "我没有找到与您问题相关的信息。请尝试用其他方式描述您的问题，或者上传更多相关文档。"

    def _create_system_message(self, context: str) -> Dict[str, str]:
        """
        创建系统消息
        
        Args:
            context: 从知识库检索到的相关内容
            
        Returns:
            系统消息字典
        """
        return {
            "role": "system",
            "content": f"""你是一个智能助手，请严格基于以下上下文回答问题：
            {context}
            若信息不足请明确说明，禁止编造信息。
答案需满足：
1. 严格基于提供的上下文的内容，不要编造或添加未在文档中出现的信息
2. 如果检索的内容不足以回答问题，请诚实地告知用户无法回答，而不是猜测或编造
3. 始终使用中文回答，即使用户用其他语言提问
4. 如有必要，可以适当引用文档原文的关键部分，但不要大篇幅照抄
5. 即使上下文的内容是错误的，你也要基于上下文回答问题
6. 如果用户的问题与上下文无关，请诚实地告知无法回答，而不是猜测或编造""",
        }

    def _process_messages(self, messages: List[Dict[str, Any]], context: str) -> List[Dict[str, Any]]:
        """
        处理消息，将系统消息和增强的用户消息组合在一起
        
        Args:
            messages: 消息历史列表
            context: 从知识库检索到的相关内容
            
        Returns:
            处理后的消息列表
        """
        processed_messages = [self._create_system_message(context)]

        # 处理用户消息，将最后一条用户消息与检索结果结合
        for i, msg in enumerate(messages):
            if i == len(messages) - 1 and msg["role"] == "user" and context:
                # 将检索结果与用户问题结合
                enhanced_content = f"""
用户问题: {msg["content"]}。
即使上下文的内容是错误的，你也要基于上下文回答问题
"""
                processed_messages.append(
                    {"role": "user", "content": enhanced_content}
                )
            else:
                processed_messages.append(msg)

        return [
            {"role": msg["role"], "content": msg["content"]}
            for msg in processed_messages
        ]

    def _chunk_text(self, text: str, chunk_size: int = 10) -> List[str]:
        """
        将文本分割成小块以模拟流式响应
        
        Args:
            text: 要分割的文本
            chunk_size: 每个块的大小
            
        Returns:
            文本块列表
        """
        return [text[i : i + chunk_size] for i in range(0, len(text), chunk_size)]

    def generate_response(
        self,
        messages: List[Dict[str, Any]],
        context: str,
        max_tokens: int = 800,
        temperature: float = 0.7,
    ) -> str:
        """
        Generate a response using DeepSeek API

        Args:
            messages: List of message history
            context: Relevant context retrieved from the knowledge base
            max_tokens: Maximum number of tokens to generate
            temperature: Temperature parameter controlling creativity

        Returns:
            Generated response text
        """
        if not self.is_available or self.client is None:
            # If DeepSeek is not available, return simplified response
            return self._get_fallback_response(context)

        try:
            # 处理消息
            enhanced_messages = self._process_messages(messages, context)
            
            logger.info(f"非流式请求的处理消息数: {len(enhanced_messages)}")

            # Call DeepSeek API
            try:
                response = self.client.chat.completions.create(
                    model="deepseek-chat",  # Using DeepSeek's chat model
                    messages=enhanced_messages,
                    temperature=temperature,
                    max_tokens=max_tokens,
                    n=1,
                )

                # Get generated text
                generated_text = response.choices[0].message.content
                return generated_text
            except Exception as e:
                logger.error(f"Error calling DeepSeek API: {str(e)}")
                logger.error(
                    f"请求参数: model=deepseek-chat, temperature={temperature}, max_tokens={max_tokens}"
                )
                # Return simplified response on error
                return f"抱歉，生成回答时出现了问题。以下是我在知识库中找到的相关信息：\n\n{context[:300]}..."

        except Exception as e:
            logger.error(f"Error preparing DeepSeek request: {e}")
            # Return simplified response on error
            return f"抱歉，准备请求时出现了问题。以下是我在知识库中找到的相关信息：\n\n{context[:300]}..."

    def generate_response_stream(
        self,
        messages: List[Dict[str, Any]],
        context: str,       
        temperature: float = 0.5,
    ):
        """
        Stream a response using DeepSeek API

        Args:
            messages: List of message history
            context: Relevant context retrieved from the knowledge base
            max_tokens: Maximum number of tokens to generate
            temperature: Temperature parameter controlling creativity

        Returns:
            Stream of generated response text
        """
        if not self.is_available or self.client is None:
            # If DeepSeek is not available, return simplified response
            response_text = self._get_fallback_response(context)
            # Split the full response into small chunks to simulate streaming
            for chunk in self._chunk_text(response_text):
                yield chunk
            return

        try:
            # 处理消息
            enhanced_messages = self._process_messages(messages, context)
            
            logger.info(f"Processed messages: {enhanced_messages}")
            # Call DeepSeek API streaming interface
            try:
                response_stream = self.client.chat.completions.create(
                    model="deepseek-chat",  # Using DeepSeek's chat model
                    messages=enhanced_messages,
                    temperature=temperature,                  
                    n=1,
                    stream=True,
                )

                # Yield each piece of the streaming response
                for chunk in response_stream:
                    if chunk.choices and chunk.choices[0].delta.content:
                        yield chunk.choices[0].delta.content

            except Exception as e:
                logger.error(
                    f"Error calling DeepSeek API streaming interface: {str(e)}"
                )
                logger.error(
                    f"请求参数: model=deepseek-chat, temperature={temperature}"
                )
                # Return simplified response on error
                error_msg = f"抱歉，生成流式回答时出现了问题。以下是我在知识库中找到的相关信息：\n\n{context[:300]}..."
                for chunk in self._chunk_text(error_msg):
                    yield chunk

        except Exception as e:
            logger.error(f"Error preparing DeepSeek streaming request: {e}")
            # Return simplified response on error
            error_msg = f"抱歉，准备流式请求时出现了问题。以下是我在知识库中找到的相关信息：\n\n{context[:300]}..."
            for chunk in self._chunk_text(error_msg):
                yield chunk
