import json
import logging
import time

import openai
import requests

from app.config import settings


def call_knowledge_base(query: str, knowledge: dict) -> str:
    start_time = time.time()
    headers = {
        "Authorization": f"Bearer {knowledge['api_key']}",
        "Content-Type": "application/json",
    }
    data = {
        "inputs": {},
        "query": query,
        "response_mode": "blocking",
        "conversation_id": "",
        "user": "excel-automation-script",
    }
    try:
        response = requests.post(
            knowledge["api_url"], headers=headers, data=json.dumps(data), timeout=60
        )
        response.raise_for_status()
        response_json = response.json()
        elapsed_time = time.time() - start_time
        logging.info(f"Knowledge base API call completed in {elapsed_time:.2f} seconds for query: '{query[:50]}...'")
        return response_json.get("answer", "")
    except requests.exceptions.RequestException as e:
        elapsed_time = time.time() - start_time
        logging.error(f"Error occurred when calling knowledge base API (took {elapsed_time:.2f}s): {e}")
        return ""
    except json.JSONDecodeError as e:
        elapsed_time = time.time() - start_time
        logging.error(f"Error occurred when parsing knowledge base response (took {elapsed_time:.2f}s): {e}")
        return ""


def analyze_with_llm(description: str, kb_answer: str, language: str) -> tuple[str, float, str, str]:
    start_time = time.time()
    if not kb_answer:
        elapsed_time = time.time() - start_time
        logging.info(f"LLM analysis skipped (took {elapsed_time:.2f}s) due to empty knowledge base answer")
        return "No", 0.0, "", settings.no_support_summary
    try:
        context_str = json.dumps(json.loads(kb_answer), indent=2)
    except json.JSONDecodeError:
        context_str = kb_answer
    if not settings.openai_api_key or "your-openai-api-key" in settings.openai_api_key:
        elapsed_time = time.time() - start_time
        logging.error(f"OpenAI API key is not configured. LLM analysis failed (took {elapsed_time:.2f}s)")
        return "Error", 0.0, "OpenAI API key not configured", "Configuration error."
    response_content = ""
    try:
        client = openai.OpenAI(api_key=settings.openai_api_key, base_url=settings.openai_base_url, timeout=180)
        system_prompt = """
        You are a helpful RAG user guideline assistant, You are an expert business analyst. Your task is to determine if a business scenario is supported by the provided knowledge base context. 
        Respond ONLY with a valid JSON object in the specified format.
        """
        user_prompt = f"""
        Business Scenario: \"{description}\"

        Knowledge Base Context:
        {context_str}

        Based *only* on the provided context, please perform the following analysis:
        1. Does the knowledge base context adequately address and support the business scenario?
        2. What is your confidence level (from 0.0 to 1.0) that the context supports the scenario?
        3. From the context, identify up to the top 3 most relevant source documents that support your decision. For each document, only provide its filename.
        4. If your answer to question 1 is \"Yes\", formulate a two-part response:
           a. First, write a single, concise sentence that directly confirms how the system supports the business scenario.
           b. On a new line, write \"The solution involves:\", and then on subsequent new lines, extract all relevant details from the 'Knowledge Base Context' as a comprehensive, bulleted list. Each bullet point should represent a key feature or capability that answers the scenario.
        5. Language: {language}.

        Provide your answer in the following JSON format and nothing else. The \"source_documents\" field should be an array of objects.
        {{
          "support_status": "Yes" or "No",
          "confidence": <float between 0.0 and 1.0>,
          "source_documents": [
            {{
              "filename": "<The name of the first source document>"
            }}
          ],
          "answer": "<If support_status is 'Yes', a one-sentence summary, followed by 'The solution involves:', and then a detailed bulleted list of facts from the context.>"
        }}
        If no specific documents are relevant, the \"source_documents\" array should be empty.
        """
        completion = client.chat.completions.create(
            model=settings.openai_model,
            response_format={"type": "json_object"},
            messages=[
                {"role": "system", "content": system_prompt},
                {"role": "user", "content": user_prompt}
            ]
        )
        response_content = completion.choices[0].message.content
        if response_content is None:
            elapsed_time = time.time() - start_time
            logging.error(f"LLM analysis failed: No content returned (took {elapsed_time:.2f}s)")
            return "Error", 0.0, "LLM response is None", "No content returned."
        if response_content.strip().startswith("```json"):
            response_content = response_content.strip()[7:-3].strip()
        analysis_result = json.loads(response_content)
        support = analysis_result.get("support_status", "Error")
        confidence = float(analysis_result.get("confidence", 0.0))
        summary = analysis_result.get("answer", "No answer provided.")
        source_docs_list = analysis_result.get("source_documents", [])
        try:
            kb_answer_list = json.loads(kb_answer)
            if not isinstance(kb_answer_list, list):
                kb_answer_list = []
        except Exception:
            kb_answer_list = []
        if source_docs_list:
            formatted_sources = []
            for doc in source_docs_list:
                file_name = doc.get('filename', 'N/A')
                content = doc.get('content', 'N/A')
                for kb_item in kb_answer_list:
                    if isinstance(kb_item, dict) and file_name == kb_item.get('title'):
                        content = kb_item.get('content', content)
                formatted_sources.append(f"File: {file_name}\nContent: {content}")
            source = "\n\n---\n\n".join(formatted_sources)
        else:
            source = "No specific source document identified."
        if support == "No":
            summary = settings.no_support_summary
            source = ""
        elapsed_time = time.time() - start_time
        logging.info(f"LLM analysis completed successfully (took {elapsed_time:.2f}s) with support status: {support}")
        return support, confidence, source, summary
    except Exception as e:
        elapsed_time = time.time() - start_time
        logging.error(f"LLM analysis failed (took {elapsed_time:.2f}s): {e}. Raw response: '{response_content}'")
        return "Error", 0.0, "LLM processing error", str(e)
