ryanrwatkins commited on
Commit
23b9158
1 Parent(s): fd55ba5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -63
app.py CHANGED
@@ -712,7 +712,7 @@ chain = ConversationalRetrievalChain.from_llm(
712
  LLM_provider="Google",api_key=google_api_key,temperature=0.5,
713
  model_name="gemini-pro"),
714
  chain_type= "stuff",
715
- verbose= False,
716
  return_source_documents=True
717
  )
718
 
@@ -730,66 +730,6 @@ follow_up_question = "plaese give more details about it, including its use cases
730
  chain.invoke({"question":follow_up_question})['answer']
731
  """
732
 
733
- from langchain.chains import ConversationalRetrievalChain
734
-
735
- def create_ConversationalRetrievalChain(
736
- llm,condense_question_llm,
737
- retriever,
738
- chain_type= 'stuff',
739
- language="english",
740
- model_name='gemini-pro'
741
- #model_name='gpt-3.5-turbo'
742
- ):
743
- """Create a ConversationalRetrievalChain.
744
- First, it passes the follow-up question along with the chat history to an LLM which rephrases
745
- the question and generates a standalone query.
746
- This query is then sent to the retriever, which fetches relevant documents (context)
747
- and passes them along with the standalone question and chat history to an LLM to answer.
748
- """
749
-
750
- # 1. Define the standalone_question prompt.
751
- # Pass the follow-up question along with the chat history to the `condense_question_llm`
752
- # which rephrases the question and generates a standalone question.
753
-
754
- standalone_question_prompt = PromptTemplate(
755
- input_variables=['chat_history', 'question'],
756
- template="""Given the following conversation and a follow up question,
757
- rephrase the follow up question to be a standalone question, in its original language.\n\n
758
- Chat History:\n{chat_history}\n
759
- Follow Up Input: {question}\n
760
- Standalone question:""")
761
-
762
- # 2. Define the answer_prompt
763
- # Pass the standalone question + the chat history + the context (retrieved documents) to the `LLM` wihch will answer
764
-
765
- answer_prompt = ChatPromptTemplate.from_template(answer_template(language=language))
766
-
767
- # 3. Add ConversationSummaryBufferMemory for gpt-3.5, and ConversationBufferMemory for the other models
768
-
769
- memory = create_memory(model_name)
770
-
771
- # 4. Create the ConversationalRetrievalChain
772
-
773
- chain = ConversationalRetrievalChain.from_llm(
774
- condense_question_prompt=standalone_question_prompt,
775
- combine_docs_chain_kwargs={'prompt': answer_prompt},
776
- condense_question_llm=condense_question_llm,
777
-
778
- memory=memory,
779
- retriever = retriever,
780
- #llm=llm,
781
- llm=instantiate_LLM(
782
- LLM_provider="Google",api_key=google_api_key,temperature=0.5,
783
- model_name="gemini-pro"),
784
- chain_type= chain_type,
785
- verbose= False,
786
- return_source_documents=True
787
- )
788
-
789
- print("Conversational retriever chain created successfully!")
790
-
791
- return chain,memory
792
-
793
 
794
 
795
  """
@@ -814,9 +754,9 @@ def submit_message(prompt, prompt_template, temperature, max_tokens, context_len
814
  # return gr.update(value=''), [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)], state
815
 
816
  #prompt_template = prompt_templates[prompt_template]
817
-
818
  print(prompt_templates[prompt_template])
819
- print(prompt_templates)
820
 
821
 
822
  completion = chain.invoke({"question":prompt})
 
712
  LLM_provider="Google",api_key=google_api_key,temperature=0.5,
713
  model_name="gemini-pro"),
714
  chain_type= "stuff",
715
+ verbose= True,
716
  return_source_documents=True
717
  )
718
 
 
730
  chain.invoke({"question":follow_up_question})['answer']
731
  """
732
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
733
 
734
 
735
  """
 
754
  # return gr.update(value=''), [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)], state
755
 
756
  #prompt_template = prompt_templates[prompt_template]
757
+ print(prompt_template)
758
  print(prompt_templates[prompt_template])
759
+
760
 
761
 
762
  completion = chain.invoke({"question":prompt})