"""
Example of using custom LangChain LLMs with Local Deep Research.

This example shows how to integrate your own LLM implementations or wrappers
with LDR's research functions.
"""

from typing import Any, List, Optional

from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessage, BaseMessage
from langchain_core.outputs import ChatGeneration, ChatResult

from local_deep_research.api import detailed_research, quick_summary


class CustomLLM(BaseChatModel):
    """Example custom LLM implementation."""

    def __init__(
        self, model_name: str = "custom-model", temperature: float = 0.7
    ):
        super().__init__()
        self.model_name = model_name
        self.temperature = temperature

    def _generate(
        self,
        messages: List[BaseMessage],
        stop: Optional[List[str]] = None,
        run_manager: Optional[Any] = None,
        **kwargs: Any,
    ) -> ChatResult:
        """Generate a response. This is where you'd call your custom model."""
        # This is a mock implementation - replace with your actual model call
        response_text = f"This is a response from {self.model_name} to: {messages[-1].content}"

        # Create the response
        message = AIMessage(content=response_text)
        generation = ChatGeneration(message=message)

        return ChatResult(generations=[generation])

    @property
    def _llm_type(self) -> str:
        """Return identifier for this LLM."""
        return "custom"


def custom_llm_factory(
    model_name: str = "factory-model", temperature: float = 0.5, **kwargs
):
    """Factory function that creates a custom LLM instance."""
    return CustomLLM(model_name=model_name, temperature=temperature)


def main():
    # Example 1: Using a custom LLM instance
    custom_llm = CustomLLM(model_name="my-custom-model", temperature=0.8)

    result = quick_summary(
        query="What are the latest advances in quantum computing?",
        llms={"my_custom": custom_llm},
        provider="my_custom",  # Use our registered LLM
        search_tool="wikipedia",  # Use Wikipedia as search engine
    )

    print("Summary with custom LLM instance:")
    print(result["summary"])
    print("-" * 80)

    # Example 2: Using a factory function
    result = quick_summary(
        query="Explain the benefits of renewable energy",
        llms={"factory_llm": custom_llm_factory},
        provider="factory_llm",  # Use our factory-created LLM
        model_name="renewable-expert",  # This gets passed to the factory
        temperature=0.3,
        search_tool="auto",
    )

    print("\nSummary with factory-created LLM:")
    print(result["summary"])
    print("-" * 80)

    # Example 3: Multiple custom LLMs
    llms = {
        "technical": CustomLLM(model_name="technical-writer", temperature=0.2),
        "creative": CustomLLM(model_name="creative-writer", temperature=0.9),
    }

    # Technical analysis
    technical_result = detailed_research(
        query="How do neural networks work?",
        llms=llms,
        provider="technical",
        search_tool="arxiv",
    )

    print("\nTechnical analysis:")
    print(technical_result["summary"])
    print("-" * 80)

    # Creative exploration
    creative_result = quick_summary(
        query="What are the philosophical implications of AI?",
        llms=llms,
        provider="creative",
        search_tool="wikipedia",
    )

    print("\nCreative exploration:")
    print(creative_result["summary"])


if __name__ == "__main__":
    main()
