import os
import dspy
from dotenv import load_dotenv

# Default deployment to use if not specified in environment variables
DEFAULT_DEPLOYMENT = "gpt-4o-mini"

# Load environment variables
load_dotenv(".env.local")
# Fallback to .env if .env.local doesn't exist
if not os.path.exists(".env.local") and os.path.exists(".env"):
    load_dotenv(".env")

# Create DSPy language model
azure_llm = dspy.LM(f'azure/{DEFAULT_DEPLOYMENT}')
dspy.settings.configure(lm=azure_llm)

# Define a simple information retrieval dataset
class Document:
    def __init__(self, text, id=None):
        self.text = text
        self.id = id

# Sample knowledge base
knowledge_base = [
    Document("DSPy is a framework for programming with foundation models. It helps developers solve complex tasks with LLMs.", "doc1"),
    Document("Azure OpenAI Service provides REST API access to OpenAI's models with the security capabilities of Azure.", "doc2"),
    Document("RAG stands for Retrieval Augmented Generation. It combines retrieval systems with LLM generation to produce more factual responses.", "doc3"),
    Document("DSPy can be integrated with Azure OpenAI to create powerful AI applications with sophisticated prompting patterns.", "doc4"),
    Document("Prompt engineering involves designing and optimizing text prompts for language models to produce desired outputs.", "doc5"),
    Document("The DSPy Teleprompter automatically optimizes prompts for specific tasks using few-shot examples.", "doc6"),
]

# Simulate a vector database
class SimpleVectorDB:
    def __init__(self, documents):
        self.documents = documents
    
    def search(self, query, k=2):
        # In a real implementation, this would use embeddings and vector search
        # For this example, we'll use a simple keyword match
        results = []
        query_terms = query.lower().split()
        
        for doc in self.documents:
            score = sum(1 for term in query_terms if term in doc.text.lower())
            if score > 0:
                results.append((doc, score))
        
        # Sort by score and return top k
        results.sort(key=lambda x: x[1], reverse=True)
        return [doc for doc, _ in results[:k]]

# Initialize the vector database
vector_db = SimpleVectorDB(knowledge_base)

# Define input/output schema for the RAG task
class RAGExample(dspy.Example):
    """Example schema for the RAG task."""
    question: str
    answer: str

# Define the RAG module
class RAGQA(dspy.Module):
    def __init__(self, retriever):
        super().__init__()
        self.retriever = retriever
        self.generate_answer = dspy.ChainOfThought("context, question -> answer")
    
    def forward(self, question):
        # Retrieve relevant documents
        docs = self.retriever.search(question)
        
        # Format the context from retrieved documents
        context = "\n".join([f"Document {i+1}: {doc.text}" for i, doc in enumerate(docs)])
        
        # Generate answer using the retrieved context
        response = self.generate_answer(context=context, question=question)
        
        return dspy.Prediction(
            question=question,
            context=context,
            answer=response.answer
        )

# Define a metric for evaluating answers
def answer_correctness_metric(example, pred, trace=None):
    # In a real implementation, this would be a more sophisticated metric
    # For this example, we'll use a simple keyword match
    if example.answer and hasattr(pred, 'answer'):
        # Check if any words in the example answer are in the predicted answer
        example_words = set(example.answer.lower().split())
        pred_words = set(pred.answer.lower().split())
        common_words = example_words.intersection(pred_words)
        score = len(common_words) / len(example_words) if example_words else 0
        return min(1.0, score)
    return 0.0

# Create training examples (in a real app, this would be from real data)
def get_training_examples():
    examples = []
    
    # Create examples with explicit input setting
    example1 = RAGExample(
        question="What is DSPy?",
        answer="DSPy is a framework for programming with foundation models that helps developers solve complex tasks with LLMs."
    ).with_inputs("question")
    
    example2 = RAGExample(
        question="How is RAG useful?",
        answer="RAG combines retrieval systems with LLM generation to produce more factual responses."
    ).with_inputs("question")
    
    example3 = RAGExample(
        question="What is prompt engineering?",
        answer="Prompt engineering involves designing and optimizing text prompts for language models to produce desired outputs."
    ).with_inputs("question")
    
    examples = [example1, example2, example3]
    return examples

# Example usage
def main():
    print("\n" + "="*50)
    print("DSPy + Azure OpenAI RAG Example")
    print("="*50)
    print(f"Using deployment: {DEFAULT_DEPLOYMENT}")
    print("="*50 + "\n")
    
    try:
        # Create the RAG model
        rag_model = RAGQA(vector_db)
        
        # Get training examples
        train_examples = get_training_examples()
        
        # Basic RAG without optimization
        print("==== Basic RAG ====")
        question = "What is DSPy and how does it work with Azure?"
        response = rag_model(question)
        print(f"Question: {question}")
        print(f"Context used:\n{response.context}")
        print(f"Answer: {response.answer}\n")
        
        # Try to optimize with Teleprompter
        print("==== Optimizing with Teleprompter ====")
        try:
            # Create a teleprompter with our metric
            teleprompter = dspy.teleprompt.BootstrapFewShot(
                metric=answer_correctness_metric
            )
            
            # Compile the optimized model
            print("Compiling optimized model...")
            optimized_model = teleprompter.compile(
                rag_model,
                trainset=train_examples
            )
            
            # Test the optimized model
            print("\n==== Optimized RAG ====")
            questions = [
                "What is DSPy and how does it work with Azure?",
                "Can you explain RAG and its benefits?",
                "What is the DSPy Teleprompter used for?"
            ]
            
            for question in questions:
                print(f"\nQuestion: {question}")
                response = optimized_model(question)
                print(f"Context used:\n{response.context}")
                print(f"Answer: {response.answer}")
                
        except Exception as e:
            print(f"Optimization error: {str(e)}")
            print("Falling back to basic model...")
            
            # Example questions for the basic model
            questions = [
                "What is DSPy and how does it work with Azure?",
                "Can you explain RAG and its benefits?",
                "What is the DSPy Teleprompter used for?"
            ]
            
            # Generate answers with the basic model
            for question in questions:
                print(f"\nQuestion: {question}")
                response = rag_model(question)
                print(f"Context used:\n{response.context}")
                print(f"Answer: {response.answer}")
            
    except Exception as e:
        print(f"Error: {str(e)}")
        print("Check your credentials and connection to Azure OpenAI.")
        return

if __name__ == "__main__":
    main() 