import os
import logging
import json
from typing import List, Dict, Any, Optional
import openai
from dotenv import load_dotenv

# Configure logging
logger = logging.getLogger(__name__)

# Load environment variables
load_dotenv(".env.local")

class AzureOpenAIClient:
    """
    Azure OpenAI client for generating responses with retrieval augmentation
    """
    def __init__(self):
        """Initialize Azure OpenAI client"""
        try:
            # Try to get Azure OpenAI configuration from environment variables
            self.api_key = os.getenv("AZURE_OPENAI_API_KEY", "")
            self.endpoint = os.getenv("AZURE_OPENAI_ENDPOINT", "")
            self.deployment_name = os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME", "")
            self.api_version = os.getenv("AZURE_OPENAI_API_VERSION", "2023-05-15")
            
            # Check if all required parameters are provided
            if not all([self.api_key, self.endpoint, self.deployment_name]):
                logger.warning("Azure OpenAI parameters not configured, using simplified responses")
                self.client = None
                self.is_available = False
            else:
                # Initialize Azure OpenAI client
                self.client = openai.AzureOpenAI(
                    api_key=self.api_key,
                    api_version=self.api_version,
                    azure_endpoint=self.endpoint
                )
                self.is_available = True
                logger.info("Azure OpenAI client initialized successfully")
        except Exception as e:
            logger.error(f"Error initializing Azure OpenAI client: {e}")
            self.client = None
            self.is_available = False
    
    def generate_response(
        self, 
        messages: List[Dict[str, Any]], 
        context: str,
        max_tokens: int = 800,
        temperature: float = 0.7
    ) -> str:
        """
        Generate a response using Azure OpenAI API
        
        Args:
            messages: List of message history
            context: Relevant context retrieved from the knowledge base
            max_tokens: Maximum number of tokens to generate
            temperature: Temperature parameter controlling creativity
            
        Returns:
            Generated response text
        """
        if not self.is_available or self.client is None:
            # If Azure OpenAI is not available, return simplified response
            if context:
                return f"Based on the retrieved information, I can provide the following answer:\n\n{context[:500]}..."
            else:
                return "I couldn't find information related to your question. Please try rephrasing your question or upload more relevant documents."
        
        try:
            # Build system message
            system_message = {
                "role": "system",
                "content": "You are an AI assistant based on RAG (Retrieval Augmented Generation) technology. Answer the user's questions based on the retrieved documents. If the retrieved content doesn't answer the user's question, be honest and don't make up information. Always answer in Chinese."
            }
            
            # Reorganize message list
            processed_messages = [system_message]
            
            # Process user messages, combining the last user message with retrieval results
            for i, msg in enumerate(messages):
                if i == len(messages) - 1 and msg["role"] == "user" and context:
                    # Combine retrieval results with the user's question
                    enhanced_content = f"""
User question: {msg["content"]}

Retrieved relevant documents:
{context}

Please answer the user's question based on the above retrieved documents.
"""
                    processed_messages.append({
                        "role": "user",
                        "content": enhanced_content
                    })
                else:
                    processed_messages.append(msg)
            
            # Call Azure OpenAI API
            response = self.client.chat.completions.create(
                model=self.deployment_name,
                messages=[{
                    "role": msg["role"],
                    "content": msg["content"]
                } for msg in processed_messages],
                temperature=temperature,
                max_tokens=max_tokens,
                n=1
            )
            
            # Get generated text
            generated_text = response.choices[0].message.content
            return generated_text
            
        except Exception as e:
            logger.error(f"Error calling Azure OpenAI API: {e}")
            # Return simplified response on error
            return f"Sorry, I encountered an issue generating a response. Here's the relevant information I found in the knowledge base:\n\n{context[:300]}..."
    
    def generate_response_stream(
        self, 
        messages: List[Dict[str, Any]], 
        context: str,
        max_tokens: int = 800,
        temperature: float = 0.5
    ):
        """
        Stream a response using Azure OpenAI API
        
        Args:
            messages: List of message history
            context: Relevant context retrieved from the knowledge base
            max_tokens: Maximum number of tokens to generate
            temperature: Temperature parameter controlling creativity
            
        Returns:
            Stream of generated response text
        """
        if not self.is_available or self.client is None:
            # If Azure OpenAI is not available, return simplified response
            if context:
                response_text = f"Based on the retrieved information, I can provide the following answer:\n\n{context[:500]}..."
                # Split the full response into small chunks to simulate streaming
                chunks = [response_text[i:i+10] for i in range(0, len(response_text), 10)]
                for chunk in chunks:
                    yield chunk
            else:
                response_text = "I couldn't find information related to your question. Please try rephrasing your question or upload more relevant documents."
                chunks = [response_text[i:i+10] for i in range(0, len(response_text), 10)]
                for chunk in chunks:
                    yield chunk
            return
        
        try:
            # Build system message
            system_message = {
                "role": "system",
                "content": "You are an AI assistant based on RAG (Retrieval Augmented Generation) technology. Answer the user's questions based on the retrieved documents. If the retrieved content doesn't answer the user's question, be honest and don't make up information. Always answer in Chinese."
            }
            
            # Reorganize message list
            processed_messages = [system_message]
            
            # Process user messages, combining the last user message with retrieval results
            for i, msg in enumerate(messages):
                if i == len(messages) - 1 and msg["role"] == "user" and context:
                    # Combine retrieval results with the user's question
                    enhanced_content = f"""
User question: {msg["content"]}

Retrieved relevant documents:
{context}

Please answer the user's question based on the above retrieved documents.
"""
                    processed_messages.append({
                        "role": "user",
                        "content": enhanced_content
                    })
                else:
                    processed_messages.append(msg)
            
            # Call Azure OpenAI API streaming interface
            response_stream = self.client.chat.completions.create(
                model=self.deployment_name,
                messages=[{
                    "role": msg["role"],
                    "content": msg["content"]
                } for msg in processed_messages],
                temperature=temperature,
                max_tokens=max_tokens,
                n=1,
                stream=True
            )
            
            # Yield each piece of the streaming response
            for chunk in response_stream:
                if chunk.choices and chunk.choices[0].delta.content:
                    yield chunk.choices[0].delta.content
            
        except Exception as e:
            logger.error(f"Error calling Azure OpenAI API streaming interface: {e}")
            # Return simplified response on error
            error_msg = f"Sorry, I encountered an issue generating a response. Here's the relevant information I found in the knowledge base:\n\n{context[:300]}..."
            chunks = [error_msg[i:i+10] for i in range(0, len(error_msg), 10)]
            for chunk in chunks:
                yield chunk 