#!/usr/bin/env python3
import sys
import argparse
import subprocess
import re

# LlamaIndex 0.10.x and later requires these specific imports
from llama_index.llms.ollama import Ollama
from llama_index.core import PromptTemplate

def get_context_from_rag(query: str) -> str:
    """
    Calls the query_rag.py script to get context for a given query.
    """
    print("INFO: Retrieving context from RAG knowledge base...", file=sys.stderr)
    try:
        # Ensure the virtual environment is activated for the subprocess
        command = f"source .venv/bin/activate && python3 query_rag.py \"{query}\""
        
        process = subprocess.run(
            ["bash", "-c", command],
            capture_output=True,
            text=True,
            check=True
        )
        
        # The actual output is on stdout, info logs are on stderr
        stdout = process.stdout
        
        # Clean up the output to only get the context part
        # Remove the header and footer from the query_rag.py output
        header_match = re.search(r"# Top \d+ most relevant snippets for question:.*?\n\n", stdout)
        footer_match = re.search(r"======================================================================================", stdout)

        if header_match:
            start_index = header_match.end()
            end_index = footer_match.start() if footer_match else len(stdout)
            context = stdout[start_index:end_index].strip()
            if not context or context.startswith("# --- No relevant snippets found. ---"):
                 print("WARNING: No relevant context found from RAG.", file=sys.stderr)
                 return ""
            print("INFO: Context retrieved successfully.", file=sys.stderr)
            return context
        else:
            print("WARNING: Could not parse context from RAG output.", file=sys.stderr)
            return ""

    except subprocess.CalledProcessError as e:
        print(f"ERROR: Failed to run query_rag.py: {e}", file=sys.stderr)
        print(f"Stderr: {e.stderr}", file=sys.stderr)
        return ""
    except Exception as e:
        print(f"ERROR: An unexpected error occurred while getting context: {e}", file=sys.stderr)
        return ""

def main():
    parser = argparse.ArgumentParser(
        description="Generates an answer by first querying a RAG knowledge base and then using a local LLM.",
        formatter_class=argparse.RawTextHelpFormatter
    )
    parser.add_argument("query", type=str, help="The question or query string to answer.")
    args = parser.parse_args()
    
    query = args.query
    
    # 1. Retrieve context
    context = get_context_from_rag(query)
    if not context:
        print("\nCould not generate an answer because no relevant context was found.", file=sys.stdout)
        sys.exit(1)
        
    # 2. Prepare for generation
    print("\nINFO: Generating answer with local LLM (qwen3:1.7b)...", file=sys.stderr)
    
    # Using the LlamaIndex PromptTemplate for consistency
    qa_template_str = (
        "You are an expert OpenHarmony software engineer assistant. Your task is to answer the user's question based *only* on the provided code context.\n"
        "Be concise and accurate. If the context does not contain the answer, state that the information is not available in the provided context.\n"
        "---------------------\n"
        "CONTEXT:\n{context_str}\n"
        "---------------------\n"
        "QUESTION: {query_str}\n"
        "---------------------\n"
        "ANSWER:"
    )
    qa_template = PromptTemplate(qa_template_str)
    prompt = qa_template.format(context_str=context, query_str=query)
    
    # 3. Generate answer with Ollama
    try:
        llm = Ollama(model="qwen3:1.7b", request_timeout=300.0) # Increased timeout for generation
        
        print("\n--- Generated Answer ---", file=sys.stdout)
        # Stream the response
        response = llm.stream_complete(prompt)
        for delta in response:
            print(delta.delta, end="", flush=True)
        print("\n------------------------\n", file=sys.stdout)

    except Exception as e:
        print(f"\nERROR: Failed to generate answer with Ollama: {e}", file=sys.stderr)
        print("Please ensure the Ollama service is running (`ollama serve`) and the model is available (`ollama list`).", file=sys.stderr)
        sys.exit(1)

if __name__ == "__main__":
    main()
