ALVHB95 commited on
Commit
4083e28
·
1 Parent(s): c630f6a
Files changed (1) hide show
  1. app.py +44 -23
app.py CHANGED
@@ -24,14 +24,15 @@ from langchain.prompts.prompt import PromptTemplate
24
  from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate
25
  from langchain.prompts import SystemMessagePromptTemplate, HumanMessagePromptTemplate, ChatPromptTemplate, MessagesPlaceholder
26
  from langchain.document_loaders import PyPDFDirectoryLoader
27
-
 
28
  from langchain_community.llms import HuggingFaceHub
29
 
30
  from pydantic import BaseModel
31
  import shutil
32
 
33
  # Cell 1: Image Classification Model
34
- image_pipeline = pipeline(task="image-classification", model="rocioadlc/EfficientNetV2L_TFM")
35
 
36
  def predict_image(input_img):
37
  predictions = image_pipeline(input_img)
@@ -69,26 +70,32 @@ vectordb = Chroma.from_documents(
69
  )
70
  # define retriever
71
  retriever = vectordb.as_retriever(search_kwargs={"k": 1}, search_type="mmr")
72
- prompt_template = """
73
- Your name is AngryGreta and you are a recycling chatbot with the objective to anwer questions from user in English or Spanish /
74
- Use the following pieces of context to answer the question if the question is related with recycling /
75
- Answer in the same language of the question /
76
- Always say "thanks for asking!" at the end of the answer /
77
- If the context is not relevant, please answer the question by using your own knowledge about the topic.
78
-
79
- context: {context}
80
 
81
- question: {question}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
  """
83
 
84
  # Create the chat prompt templates
85
- system_prompt = SystemMessagePromptTemplate.from_template(prompt_template)
86
  qa_prompt = ChatPromptTemplate(
87
- messages=[
88
- system_prompt,
89
- MessagesPlaceholder(variable_name="chat_history"),
90
- HumanMessagePromptTemplate.from_template("{question}")
91
- ]
92
  )
93
  llm = HuggingFaceHub(
94
  repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1",
@@ -101,23 +108,37 @@ llm = HuggingFaceHub(
101
  },
102
  )
103
 
104
- memory = ConversationBufferMemory(llm=llm, memory_key="chat_history", input_key='question', output_key='answer', return_messages=True)
105
 
106
  qa_chain = ConversationalRetrievalChain.from_llm(
107
  llm = llm,
 
108
  memory = memory,
109
  retriever = retriever,
110
  verbose = True,
111
  combine_docs_chain_kwargs={'prompt': qa_prompt},
112
  get_chat_history = lambda h : h,
113
- rephrase_question = False,
114
- output_key = 'answer'
115
  )
116
 
117
- def chat_interface(question,history):
 
 
 
 
 
 
 
 
 
 
 
118
 
119
- result = qa_chain.invoke({"question": question})
120
- return result['answer']
 
 
121
 
122
  chatbot_gradio_app = gr.ChatInterface(
123
  fn=chat_interface,
 
24
  from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate
25
  from langchain.prompts import SystemMessagePromptTemplate, HumanMessagePromptTemplate, ChatPromptTemplate, MessagesPlaceholder
26
  from langchain.document_loaders import PyPDFDirectoryLoader
27
+ from pydantic import BaseModel, Field
28
+ from langchain.output_parsers import PydanticOutputParser
29
  from langchain_community.llms import HuggingFaceHub
30
 
31
  from pydantic import BaseModel
32
  import shutil
33
 
34
  # Cell 1: Image Classification Model
35
+ image_pipeline = pipeline(task="image-classification", model="microsoft/resnet-50")
36
 
37
  def predict_image(input_img):
38
  predictions = image_pipeline(input_img)
 
70
  )
71
  # define retriever
72
  retriever = vectordb.as_retriever(search_kwargs={"k": 1}, search_type="mmr")
 
 
 
 
 
 
 
 
73
 
74
+ class FinalAnswer(BaseModel):
75
+ question: str = Field(description="the original question")
76
+ answer: str = Field(description="the extracted answer")
77
+
78
+ # Assuming you have a parser for the FinalAnswer class
79
+ parser = PydanticOutputParser(pydantic_object=FinalAnswer)
80
+
81
+ template = """
82
+ Your name is AngryGreta and you are a recycling chatbot with the objective to anwer querys from user in English or Spanish /
83
+ Use the following pieces of context to answer the question /
84
+ Answer in the same language of the question /
85
+ Context: {context}
86
+ Chat history: {chat_history}
87
+ User: {query}
88
+ {format_instructions}
89
  """
90
 
91
  # Create the chat prompt templates
92
+ sys_prompt = SystemMessagePromptTemplate.from_template(template)
93
  qa_prompt = ChatPromptTemplate(
94
+ messages=[
95
+ sys_prompt,
96
+ MessagesPlaceholder(variable_name="chat_history"),
97
+ HumanMessagePromptTemplate.from_template("{question}")],
98
+ partial_variables={"format_instructions": parser.get_format_instructions()}
99
  )
100
  llm = HuggingFaceHub(
101
  repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1",
 
108
  },
109
  )
110
 
111
+ memory = ConversationBufferMemory(llm=llm, memory_key="chat_history", input_key='query', output_key='output', return_messages=True)
112
 
113
  qa_chain = ConversationalRetrievalChain.from_llm(
114
  llm = llm,
115
+ condense_question_prompt = qa_prompt,
116
  memory = memory,
117
  retriever = retriever,
118
  verbose = True,
119
  combine_docs_chain_kwargs={'prompt': qa_prompt},
120
  get_chat_history = lambda h : h,
121
+ rephrase_question = True,
122
+ output_key = 'output',
123
  )
124
 
125
+ def chat_interface(question):
126
+ result = qa_chain.invoke({'query': question, 'context': retriever})
127
+ output_string = result['output']
128
+
129
+ # Find the index of the last occurrence of "answer": in the string
130
+ answer_index = output_string.rfind('"answer":')
131
+
132
+ # Extract the substring starting from the "answer": index
133
+ answer_part = output_string[answer_index + len('"answer":'):].strip()
134
+
135
+ # Find the next occurrence of a double quote to get the start of the answer value
136
+ quote_index = answer_part.find('"')
137
 
138
+ # Extract the answer value between double quotes
139
+ answer_value = answer_part[quote_index + 1:answer_part.find('"', quote_index + 1)]
140
+
141
+ return answer_value
142
 
143
  chatbot_gradio_app = gr.ChatInterface(
144
  fn=chat_interface,