momegas commited on
Commit
4978dd5
1 Parent(s): ece814c

✍️ Add sources option in `bot`

Browse files
Files changed (2) hide show
  1. README.md +1 -0
  2. megabots/__init__.py +8 -1
README.md CHANGED
@@ -83,6 +83,7 @@ The `bot` function should serve as the starting point for creating and customisi
83
  | model | The name of the model to use for the bot. You can specify a different model by providing its name, like "text-davinci-003". Supported models: `gpt-3.5-turbo` (default),`text-davinci-003` More comming soon. |
84
  | prompt_template | A string template for the prompt, which defines the format of the question and context passed to the model. The template should include placeholders for the variables specified in `prompt_variables`. |
85
  | prompt_variables | A list of variables to be used in the prompt template. These variables are replaced with actual values when the bot processes a query. |
 
86
 
87
  ### How QnA bot works
88
 
 
83
  | model | The name of the model to use for the bot. You can specify a different model by providing its name, like "text-davinci-003". Supported models: `gpt-3.5-turbo` (default),`text-davinci-003` More comming soon. |
84
  | prompt_template | A string template for the prompt, which defines the format of the question and context passed to the model. The template should include placeholders for the variables specified in `prompt_variables`. |
85
  | prompt_variables | A list of variables to be used in the prompt template. These variables are replaced with actual values when the bot processes a query. |
86
+ | sources | When `sources` is `True` the bot will also include sources in the response. A known [issue](https://github.com/hwchase17/langchain/issues/2858) exists, where if you pass a custom prompt with sources the code breaks. |
87
 
88
  ### How QnA bot works
89
 
megabots/__init__.py CHANGED
@@ -37,13 +37,14 @@ class Bot:
37
 
38
  # Load the question-answering chain for the selected model
39
  self.chain = self.create_chain(
40
- prompt_template, prompt_variables, verbose=verbose
41
  )
42
 
43
  def create_chain(
44
  self,
45
  prompt_template: str | None = None,
46
  prompt_variables: list[str] | None = None,
 
47
  verbose: bool = False,
48
  ):
49
  prompt = (
@@ -51,6 +52,12 @@ class Bot:
51
  if prompt_template is not None and prompt_variables is not None
52
  else QA_PROMPT
53
  )
 
 
 
 
 
 
54
  return load_qa_chain(
55
  self.llm, chain_type="stuff", verbose=verbose, prompt=prompt
56
  )
 
37
 
38
  # Load the question-answering chain for the selected model
39
  self.chain = self.create_chain(
40
+ prompt_template, prompt_variables, sources=sources, verbose=verbose
41
  )
42
 
43
  def create_chain(
44
  self,
45
  prompt_template: str | None = None,
46
  prompt_variables: list[str] | None = None,
47
+ sources: bool | None = False,
48
  verbose: bool = False,
49
  ):
50
  prompt = (
 
52
  if prompt_template is not None and prompt_variables is not None
53
  else QA_PROMPT
54
  )
55
+ # TODO: Changing the prompt here is not working. Leave it as is for now.
56
+ # Reference: https://github.com/hwchase17/langchain/issues/2858
57
+ if sources:
58
+ return load_qa_with_sources_chain(
59
+ self.llm, chain_type="stuff", verbose=verbose
60
+ )
61
  return load_qa_chain(
62
  self.llm, chain_type="stuff", verbose=verbose, prompt=prompt
63
  )