Spaces:
Runtime error
Runtime error
"""Question answering over a graph.""" | |
from __future__ import annotations | |
from typing import Any, Dict, List, Optional | |
from langchain_core.language_models import BaseLanguageModel | |
from langchain_core.prompts import BasePromptTemplate | |
from langchain_core.pydantic_v1 import Field | |
from langchain.callbacks.manager import CallbackManagerForChainRun | |
from langchain.chains.base import Chain | |
from langchain.chains.graph_qa.prompts import ENTITY_EXTRACTION_PROMPT, GRAPH_QA_PROMPT | |
from langchain.chains.llm import LLMChain | |
from langchain.graphs.networkx_graph import NetworkxEntityGraph, get_entities | |
class GraphQAChain(Chain): | |
"""Chain for question-answering against a graph. | |
*Security note*: Make sure that the database connection uses credentials | |
that are narrowly-scoped to only include necessary permissions. | |
Failure to do so may result in data corruption or loss, since the calling | |
code may attempt commands that would result in deletion, mutation | |
of data if appropriately prompted or reading sensitive data if such | |
data is present in the database. | |
The best way to guard against such negative outcomes is to (as appropriate) | |
limit the permissions granted to the credentials used with this tool. | |
See https://python.langchain.com/docs/security for more information. | |
""" | |
graph: NetworkxEntityGraph = Field(exclude=True) | |
entity_extraction_chain: LLMChain | |
qa_chain: LLMChain | |
input_key: str = "query" #: :meta private: | |
output_key: str = "result" #: :meta private: | |
def input_keys(self) -> List[str]: | |
"""Input keys. | |
:meta private: | |
""" | |
return [self.input_key] | |
def output_keys(self) -> List[str]: | |
"""Output keys. | |
:meta private: | |
""" | |
_output_keys = [self.output_key] | |
return _output_keys | |
def from_llm( | |
cls, | |
llm: BaseLanguageModel, | |
qa_prompt: BasePromptTemplate = GRAPH_QA_PROMPT, | |
entity_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT, | |
**kwargs: Any, | |
) -> GraphQAChain: | |
"""Initialize from LLM.""" | |
qa_chain = LLMChain(llm=llm, prompt=qa_prompt) | |
entity_chain = LLMChain(llm=llm, prompt=entity_prompt) | |
return cls( | |
qa_chain=qa_chain, | |
entity_extraction_chain=entity_chain, | |
**kwargs, | |
) | |
def _call( | |
self, | |
inputs: Dict[str, Any], | |
run_manager: Optional[CallbackManagerForChainRun] = None, | |
) -> Dict[str, str]: | |
"""Extract entities, look up info and answer question.""" | |
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() | |
question = inputs[self.input_key] | |
entity_string = self.entity_extraction_chain.run(question) | |
_run_manager.on_text("Entities Extracted:", end="\n", verbose=self.verbose) | |
_run_manager.on_text( | |
entity_string, color="green", end="\n", verbose=self.verbose | |
) | |
entities = get_entities(entity_string) | |
context = "" | |
all_triplets = [] | |
for entity in entities: | |
all_triplets.extend(self.graph.get_entity_knowledge(entity)) | |
context = "\n".join(all_triplets) | |
_run_manager.on_text("Full Context:", end="\n", verbose=self.verbose) | |
_run_manager.on_text(context, color="green", end="\n", verbose=self.verbose) | |
result = self.qa_chain( | |
{"question": question, "context": context}, | |
callbacks=_run_manager.get_child(), | |
) | |
return {self.output_key: result[self.qa_chain.output_key]} | |