import os
import dspy
from dotenv import load_dotenv
from openai import AzureOpenAI

# Default deployment to use if not specified in environment variables
DEFAULT_DEPLOYMENT = "gpt-4o-mini"

# Load environment variables
load_dotenv(".env.local")
# Fallback to .env if .env.local doesn't exist
if not os.path.exists(".env.local") and os.path.exists(".env"):
    load_dotenv(".env")

# Configure Azure OpenAI as the language model
api_key = os.getenv("AZURE_OPENAI_API_KEY")
endpoint = os.getenv("AZURE_OPENAI_ENDPOINT")
api_version = os.getenv("AZURE_OPENAI_API_VERSION", "2023-05-15")
deployment = os.getenv("AZURE_OPENAI_DEPLOYMENT", DEFAULT_DEPLOYMENT)

# Set up Azure OpenAI configuration
azure_config = {
    "api_key": api_key,
    "azure_endpoint": endpoint,
    "api_version": api_version,
    "azure_deployment": deployment
}

# Create DSPy language model
# Instead of passing the client directly, provide the config parameters
azure_llm = dspy.LM(
    model=deployment,
    provider="azure",
    config={
        "api_key": api_key,
        "api_version": api_version,
        "azure_endpoint": endpoint
    }
)
dspy.settings.configure(lm=azure_llm)

# Define a simple information retrieval dataset
class Document:
    def __init__(self, text, id=None):
        self.text = text
        self.id = id

# Sample knowledge base
knowledge_base = [
    Document("DSPy is a framework for programming with foundation models. It helps developers solve complex tasks with LLMs.", "doc1"),
    Document("Azure OpenAI Service provides REST API access to OpenAI's models with the security capabilities of Azure.", "doc2"),
    Document("RAG stands for Retrieval Augmented Generation. It combines retrieval systems with LLM generation to produce more factual responses.", "doc3"),
    Document("DSPy can be integrated with Azure OpenAI to create powerful AI applications with sophisticated prompting patterns.", "doc4"),
    Document("Prompt engineering involves designing and optimizing text prompts for language models to produce desired outputs.", "doc5"),
]

# Simulate a vector database
class SimpleVectorDB:
    def __init__(self, documents):
        self.documents = documents
    
    def search(self, query, k=2):
        # In a real implementation, this would use embeddings and vector search
        # For this example, we'll use a simple keyword match
        results = []
        query_terms = query.lower().split()
        
        for doc in self.documents:
            score = sum(1 for term in query_terms if term in doc.text.lower())
            if score > 0:
                results.append((doc, score))
        
        # Sort by score and return top k
        results.sort(key=lambda x: x[1], reverse=True)
        return [doc for doc, _ in results[:k]]

# Initialize the vector database
vector_db = SimpleVectorDB(knowledge_base)

# Define the RAG modules
class Retriever(dspy.Module):
    def __init__(self, vector_db):
        super().__init__()
        self.vector_db = vector_db
    
    def forward(self, query):
        retrieved_docs = self.vector_db.search(query)
        return dspy.Prediction(retrieved_docs=retrieved_docs)

class RAGModel(dspy.Module):
    def __init__(self, retriever):
        super().__init__()
        self.retriever = retriever
        self.generate_answer = dspy.ChainOfThought("context, question -> answer")
    
    def forward(self, question):
        # Retrieve relevant documents
        retrieval_results = self.retriever(question)
        
        # Format the context from retrieved documents
        context = "\n".join([f"Document {i+1}: {doc.text}" for i, doc in enumerate(retrieval_results.retrieved_docs)])
        
        # Generate answer using the retrieved context
        response = self.generate_answer(context=context, question=question)
        
        return dspy.Prediction(
            question=question,
            context=context,
            answer=response.answer
        )

# Example usage
def main():
    # Check if environment variables are set
    missing_vars = []
    if not api_key:
        missing_vars.append("AZURE_OPENAI_API_KEY")
    if not endpoint:
        missing_vars.append("AZURE_OPENAI_ENDPOINT")
    
    if missing_vars:
        print("\n" + "="*50)
        print("ERROR: Missing required environment variables:")
        for var in missing_vars:
            print(f"  - {var}")
        print("\nPlease create a .env or .env.local file with these variables.")
        print("See README.md for setup instructions.")
        print("="*50 + "\n")
        return
    
    # Create the RAG model
    retriever = Retriever(vector_db)
    rag_model = RAGModel(retriever)
    
    # Example questions
    questions = [
        "What is DSPy and how does it work?",
        "How can DSPy be used with Azure OpenAI?",
        "What is RAG and why is it useful?"
    ]
    
    print("\n" + "="*50)
    print("DSPy + Azure OpenAI RAG Example")
    print("="*50)
    print(f"Using deployment: {deployment}")
    print("="*50 + "\n")
    
    # Generate answers for each question
    for question in questions:
        print(f"\nQuestion: {question}")
        try:
            response = rag_model(question)
            print(f"Context used:\n{response.context}")
            print(f"Answer: {response.answer}")
        except Exception as e:
            print(f"Error: {str(e)}")
            print("Check your credentials and connection to Azure OpenAI.")
            return

if __name__ == "__main__":
    main() 