Spaces:
Sleeping
Sleeping
added gemma
Browse files
app.py
CHANGED
@@ -9,6 +9,7 @@ import json
|
|
9 |
|
10 |
retrieve_results = 10
|
11 |
show_examples = False
|
|
|
12 |
|
13 |
generate_kwargs = dict(
|
14 |
temperature = None,
|
@@ -56,11 +57,16 @@ def rag_cleaner(inp):
|
|
56 |
date = inp['document_metadata']['_time']
|
57 |
return f"{rank}. <b> {title} </b> \n Date : {date} \n Abstract: {content}"
|
58 |
|
59 |
-
def get_prompt_text(question, context, formatted = True):
|
60 |
if formatted:
|
61 |
sys_instruction = f"Context:\n {context} \n Given the following scientific paper abstracts, take a deep breath and lets think step by step to answer the question. Cite the titles of your sources when answering, do not cite links or dates."
|
62 |
message = f"Question: {question}"
|
63 |
-
|
|
|
|
|
|
|
|
|
|
|
64 |
return f"Context:\n {context} \n Given the following info, take a deep breath and lets think step by step to answer the question: {question}. Cite the titles of your sources when answering.\n\n"
|
65 |
|
66 |
def get_references(question, retriever, k = retrieve_results):
|
@@ -76,7 +82,7 @@ with gr.Blocks(theme = gr.themes.Soft()) as demo:
|
|
76 |
msg = gr.Textbox(label = 'Search', placeholder = 'What is Mistral?')
|
77 |
with gr.Accordion("Advanced Settings", open=False):
|
78 |
with gr.Row(equal_height = True):
|
79 |
-
llm_model = gr.Dropdown(choices =
|
80 |
llm_results = gr.Slider(minimum=4, maximum=10, value=5, step=1, interactive=True, label="Top n results as context")
|
81 |
stream_results = gr.Checkbox(value = True, label = "Stream output")
|
82 |
|
@@ -84,7 +90,7 @@ with gr.Blocks(theme = gr.themes.Soft()) as demo:
|
|
84 |
input = gr.Textbox(show_label = False, visible = False)
|
85 |
gr_md = gr.Markdown(mark_text + md_text_initial)
|
86 |
|
87 |
-
def update_with_rag_md(message, llm_results_use = 5):
|
88 |
rag_out = get_rag(message)
|
89 |
md_text_updated = mark_text
|
90 |
for i in range(retrieve_results):
|
@@ -99,7 +105,7 @@ with gr.Blocks(theme = gr.themes.Soft()) as demo:
|
|
99 |
authors_formatted = f'*{authors}*' + ' \n\n'
|
100 |
|
101 |
md_text_updated += paper_title + authors_formatted + paper_abs + '\n---------------\n'+ '\n'
|
102 |
-
prompt = get_prompt_text(message, '\n\n'.join(rag_cleaner(out) for out in rag_out[:llm_results_use]))
|
103 |
return md_text_updated, prompt
|
104 |
|
105 |
def ask_llm(prompt, llm_model_picked = 'mistralai/Mistral-7B-Instruct-v0.2', stream_outputs = False):
|
@@ -131,6 +137,6 @@ with gr.Blocks(theme = gr.themes.Soft()) as demo:
|
|
131 |
return stream
|
132 |
|
133 |
|
134 |
-
msg.submit(update_with_rag_md, [msg, llm_results], [gr_md, input]).success(ask_llm, [input, llm_model, stream_results], output_text)
|
135 |
|
136 |
demo.queue(default_concurrency_limit=10).launch()
|
|
|
9 |
|
10 |
retrieve_results = 10
|
11 |
show_examples = False
|
12 |
+
llm_models_to_choose = ['mistralai/Mixtral-8x7B-Instruct-v0.1','mistralai/Mistral-7B-Instruct-v0.2', 'google/gemma-7b-it', 'None']
|
13 |
|
14 |
generate_kwargs = dict(
|
15 |
temperature = None,
|
|
|
57 |
date = inp['document_metadata']['_time']
|
58 |
return f"{rank}. <b> {title} </b> \n Date : {date} \n Abstract: {content}"
|
59 |
|
60 |
+
def get_prompt_text(question, context, formatted = True, llm_model_picked = 'mistralai/Mistral-7B-Instruct-v0.2'):
|
61 |
if formatted:
|
62 |
sys_instruction = f"Context:\n {context} \n Given the following scientific paper abstracts, take a deep breath and lets think step by step to answer the question. Cite the titles of your sources when answering, do not cite links or dates."
|
63 |
message = f"Question: {question}"
|
64 |
+
if 'mistralai' in llm_model_picked:
|
65 |
+
return f"<s>" + f"[INST] {sys_instruction}" + f" {message}[/INST]"
|
66 |
+
|
67 |
+
elif 'gemma' in llm_model_picked:
|
68 |
+
return f"<bos><start_of_turn>user\n{sys_instruction}" + f" {message}<end_of_turn>\n"
|
69 |
+
|
70 |
return f"Context:\n {context} \n Given the following info, take a deep breath and lets think step by step to answer the question: {question}. Cite the titles of your sources when answering.\n\n"
|
71 |
|
72 |
def get_references(question, retriever, k = retrieve_results):
|
|
|
82 |
msg = gr.Textbox(label = 'Search', placeholder = 'What is Mistral?')
|
83 |
with gr.Accordion("Advanced Settings", open=False):
|
84 |
with gr.Row(equal_height = True):
|
85 |
+
llm_model = gr.Dropdown(choices = llm_models_to_choose, value = 'mistralai/Mistral-7B-Instruct-v0.2', label = 'LLM Model')
|
86 |
llm_results = gr.Slider(minimum=4, maximum=10, value=5, step=1, interactive=True, label="Top n results as context")
|
87 |
stream_results = gr.Checkbox(value = True, label = "Stream output")
|
88 |
|
|
|
90 |
input = gr.Textbox(show_label = False, visible = False)
|
91 |
gr_md = gr.Markdown(mark_text + md_text_initial)
|
92 |
|
93 |
+
def update_with_rag_md(message, llm_results_use = 5, llm_model_picked = 'mistralai/Mistral-7B-Instruct-v0.2'):
|
94 |
rag_out = get_rag(message)
|
95 |
md_text_updated = mark_text
|
96 |
for i in range(retrieve_results):
|
|
|
105 |
authors_formatted = f'*{authors}*' + ' \n\n'
|
106 |
|
107 |
md_text_updated += paper_title + authors_formatted + paper_abs + '\n---------------\n'+ '\n'
|
108 |
+
prompt = get_prompt_text(message, '\n\n'.join(rag_cleaner(out) for out in rag_out[:llm_results_use]), llm_model_picked = llm_model_picked)
|
109 |
return md_text_updated, prompt
|
110 |
|
111 |
def ask_llm(prompt, llm_model_picked = 'mistralai/Mistral-7B-Instruct-v0.2', stream_outputs = False):
|
|
|
137 |
return stream
|
138 |
|
139 |
|
140 |
+
msg.submit(update_with_rag_md, [msg, llm_results, llm_model], [gr_md, input]).success(ask_llm, [input, llm_model, stream_results], output_text)
|
141 |
|
142 |
demo.queue(default_concurrency_limit=10).launch()
|