#!/usr/bin/env python3
"""
LangChain integration example - Using vLLM and Ollama with LangChain.

This example demonstrates how to integrate vLLM and Ollama backends
with LangChain for building LLM applications in the hydrology domain.
"""

import asyncio
import sys
sys.path.insert(0, '..')

from langchain_openai import ChatOpenAI
from langchain_community.llms import Ollama
from langchain.prompts import ChatPromptTemplate
from langchain.chains import LLMChain
from langchain_core.output_parsers import StrOutputParser


# ==================== Configuration ====================

# vLLM configuration (OpenAI-compatible API)
VLLM_CONFIG = {
    "base_url": "http://localhost:8000/v1",
    "model_name": "Qwen/Qwen2.5-3B-Instruct",
    "temperature": 0.7,
    "max_tokens": 512,
}

# Ollama configuration
OLLAMA_CONFIG = {
    "model": "qwen3:4b-instruct",
    "temperature": 0.7,
    "num_predict": 512,
}


# ==================== Hydrology Domain Examples ====================

# Sample hydrology questions
HYDROLOGY_QUESTIONS = [
    "什么是流域汇流时间？如何计算？",
    "解释新安江模型的三层蒸散发结构",
    "水文模型率定的常用目标函数有哪些？",
]

# Prompt template for hydrology expert
HYDROLOGY_PROMPT = ChatPromptTemplate.from_messages([
    ("system", "你是一位资深的水文学专家。请用专业、准确的语言回答用户的水文学问题。"),
    ("human", "{question}")
])


# ==================== vLLM Integration ====================

def create_vllm_chain():
    """Create LangChain chain with vLLM backend."""
    llm = ChatOpenAI(
        base_url=VLLM_CONFIG["base_url"],
        model_name=VLLM_CONFIG["model_name"],
        temperature=VLLM_CONFIG["temperature"],
        max_tokens=VLLM_CONFIG["max_tokens"],
    )
    
    chain = HYDROLOGY_PROMPT | llm | StrOutputParser()
    return chain


# ==================== Ollama Integration ====================

def create_ollama_chain():
    """Create LangChain chain with Ollama backend."""
    llm = Ollama(
        model=OLLAMA_CONFIG["model"],
        temperature=OLLAMA_CONFIG["temperature"],
        num_predict=OLLAMA_CONFIG["num_predict"],
    )
    
    chain = HYDROLOGY_PROMPT | llm | StrOutputParser()
    return chain


# ==================== RAG Example ====================

async def rag_example():
    """
    Demonstration of RAG (Retrieval-Augmented Generation) pattern.
    
    In a real application, you would:
    1. Retrieve relevant documents from a vector store
    2. Inject them into the prompt context
    3. Generate response using the LLM
    """
    print("\n" + "="*80)
    print("📚 RAG Pattern Example")
    print("="*80)
    
    # Simulated retrieved context
    context = """
    新安江模型是一个经典的流域水文模型，由中国水文学家创建。
    该模型采用三层蒸散发结构：
    1. 上层：主要考虑植被截留和地表蒸发
    2. 中层：土壤水分蒸散发
    3. 下层：地下水蒸散发
    """
    
    rag_prompt = ChatPromptTemplate.from_messages([
        ("system", "你是水文学专家。基于以下上下文回答问题。\n\n上下文：{context}"),
        ("human", "{question}")
    ])
    
    # Use vLLM (ensure it's running)
    llm = ChatOpenAI(
        base_url=VLLM_CONFIG["base_url"],
        model_name=VLLM_CONFIG["model_name"],
        temperature=0.7,
        max_tokens=256,
    )
    
    chain = rag_prompt | llm | StrOutputParser()
    
    question = "请详细说明新安江模型的蒸散发结构"
    response = await chain.ainvoke({
        "context": context,
        "question": question
    })
    
    print(f"\n❓ Question: {question}")
    print(f"\n💡 Response:\n{response}")


# ==================== Streaming Example ====================

async def streaming_example():
    """Demonstrate streaming responses."""
    print("\n" + "="*80)
    print("🌊 Streaming Response Example")
    print("="*80)
    
    llm = ChatOpenAI(
        base_url=VLLM_CONFIG["base_url"],
        model_name=VLLM_CONFIG["model_name"],
        temperature=0.7,
        streaming=True,
    )
    
    chain = HYDROLOGY_PROMPT | llm | StrOutputParser()
    
    question = "什么是Nash单位线？"
    print(f"\n❓ Question: {question}")
    print(f"\n💡 Response (streaming):")
    
    async for chunk in chain.astream({"question": question}):
        print(chunk, end="", flush=True)
    print("\n")


# ==================== Batch Processing ====================

async def batch_processing_example(backend="vllm"):
    """Demonstrate batch processing of multiple questions."""
    print("\n" + "="*80)
    print(f"⚡ Batch Processing Example - {backend.upper()}")
    print("="*80)
    
    if backend == "vllm":
        chain = create_vllm_chain()
    else:
        chain = create_ollama_chain()
    
    # Process multiple questions
    results = await chain.abatch([{"question": q} for q in HYDROLOGY_QUESTIONS])
    
    for i, (question, response) in enumerate(zip(HYDROLOGY_QUESTIONS, results), 1):
        print(f"\n{i}. ❓ {question}")
        print(f"   💡 {response[:200]}...")


# ==================== Main Function ====================

async def main():
    """Run all LangChain integration examples."""
    print("="*80)
    print("🔗 LangChain Integration Examples - Hydrology Domain")
    print("="*80)
    print("\nThis example demonstrates:")
    print("  1. Basic vLLM integration with LangChain")
    print("  2. Basic Ollama integration with LangChain")
    print("  3. RAG (Retrieval-Augmented Generation) pattern")
    print("  4. Streaming responses")
    print("  5. Batch processing")
    
    # Example 1: Simple question with vLLM
    print("\n" + "="*80)
    print("Example 1: vLLM Integration")
    print("="*80)
    
    try:
        vllm_chain = create_vllm_chain()
        response = await vllm_chain.ainvoke({"question": HYDROLOGY_QUESTIONS[0]})
        print(f"\n❓ {HYDROLOGY_QUESTIONS[0]}")
        print(f"\n💡 {response}")
    except Exception as e:
        print(f"\n❌ vLLM example failed: {e}")
        print("   Make sure vLLM service is running!")
    
    # Example 2: Simple question with Ollama
    print("\n" + "="*80)
    print("Example 2: Ollama Integration")
    print("="*80)
    
    try:
        ollama_chain = create_ollama_chain()
        response = await ollama_chain.ainvoke({"question": HYDROLOGY_QUESTIONS[1]})
        print(f"\n❓ {HYDROLOGY_QUESTIONS[1]}")
        print(f"\n💡 {response}")
    except Exception as e:
        print(f"\n❌ Ollama example failed: {e}")
        print("   Make sure Ollama service is running!")
    
    # Example 3: RAG pattern
    try:
        await rag_example()
    except Exception as e:
        print(f"\n❌ RAG example failed: {e}")
    
    # Example 4: Streaming
    try:
        await streaming_example()
    except Exception as e:
        print(f"\n❌ Streaming example failed: {e}")
    
    # Example 5: Batch processing
    try:
        await batch_processing_example(backend="vllm")
    except Exception as e:
        print(f"\n❌ Batch processing example failed: {e}")
    
    print("\n" + "="*80)
    print("✅ Examples Complete!")
    print("="*80)
    print("\n💡 Tips:")
    print("  - For production use, add error handling and retries")
    print("  - Consider caching responses for repeated questions")
    print("  - Use async patterns for better performance")
    print("  - Monitor token usage and costs")


if __name__ == "__main__":
    asyncio.run(main())

