cnmoro commited on
Commit
1c04199
1 Parent(s): 90f8e12

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -3
app.py CHANGED
@@ -127,6 +127,7 @@ def retrieval_pipeline(query, question):
127
 
128
  return retrieved_contents, websearch_time, webcrawl_time, embedding_time, retrieval_time, links
129
 
 
130
  async def predict(message, history):
131
  # message is in format: "Search: <query>; Question: <question>"
132
  # we need to parse both parts into variables
@@ -141,12 +142,12 @@ async def predict(message, history):
141
  context = ""
142
  for title, content in retrieved_contents.items():
143
  context += f'Artigo "{title}"\nConteúdo:\n{content}\n\n'
144
- prompt = f'{context.strip()}\n\nBaseado nos conteúdos dos artigos, responda: "{message}"\n\nPor favor, mencione a fonte da sua resposta.'
145
  else:
146
  context = ""
147
  for title, content in retrieved_contents.items():
148
  context += f'Article "{title}"\nContent:\n{content}\n\n'
149
- prompt = f'{context.strip()}\n\nBased on the article\'s contents, answer: "{message}"\n\nPlease, mention the source of your answer.'
150
 
151
  print(prompt)
152
 
@@ -156,7 +157,8 @@ async def predict(message, history):
156
  body = { "stream": True,
157
  "models": [
158
  "mistralai/mistral-7b-instruct:free",
159
- "openchat/openchat-7b:free"
 
160
  ],
161
  "route": "fallback",
162
  "max_tokens": 1024,
 
127
 
128
  return retrieved_contents, websearch_time, webcrawl_time, embedding_time, retrieval_time, links
129
 
130
+
131
  async def predict(message, history):
132
  # message is in format: "Search: <query>; Question: <question>"
133
  # we need to parse both parts into variables
 
142
  context = ""
143
  for title, content in retrieved_contents.items():
144
  context += f'Artigo "{title}"\nConteúdo:\n{content}\n\n'
145
+ prompt = f'{context.strip()}\n\nBaseado nos conteúdos dos artigos, responda: "{question}"\n\nPor favor, mencione a fonte da sua resposta.'
146
  else:
147
  context = ""
148
  for title, content in retrieved_contents.items():
149
  context += f'Article "{title}"\nContent:\n{content}\n\n'
150
+ prompt = f'{context.strip()}\n\nBased on the article\'s contents, answer: "{question}"\n\nPlease, mention the source of your answer.'
151
 
152
  print(prompt)
153
 
 
157
  body = { "stream": True,
158
  "models": [
159
  "mistralai/mistral-7b-instruct:free",
160
+ "openchat/openchat-7b:free",
161
+ "huggingfaceh4/zephyr-7b-beta:free"
162
  ],
163
  "route": "fallback",
164
  "max_tokens": 1024,