Spaces:
Running
Running
sivan22
commited on
Commit
路
956d65c
1
Parent(s):
47f72d8
updated prompt
Browse files
app.py
CHANGED
@@ -46,20 +46,30 @@ def get_results(embeddings_model,input,df,num_of_results) -> pd.DataFrame:
|
|
46 |
def get_llm_results(query,chat,results):
|
47 |
|
48 |
prompt_template = PromptTemplate.from_template(
|
49 |
-
|
|
|
|
|
|
|
|
|
50 |
the question is: {query}
|
|
|
51 |
the possible answers are:
|
52 |
{answers}
|
53 |
|
54 |
""" )
|
55 |
|
56 |
messages = [
|
57 |
-
SystemMessage(content="
|
|
|
|
|
|
|
58 |
HumanMessage(content=prompt_template.format(query=query, answers=str.join('\n', results['text'].head(10).tolist()))),
|
59 |
]
|
60 |
|
61 |
response = chat.invoke(messages)
|
62 |
llm_results_df = pd.read_json(response.content, orient='index')
|
|
|
|
|
63 |
return llm_results_df
|
64 |
|
65 |
|
@@ -88,9 +98,13 @@ def run():
|
|
88 |
results = get_results(embeddings_model,user_input,df,num_of_results)
|
89 |
|
90 |
if use_llm:
|
91 |
-
|
92 |
-
|
93 |
-
|
|
|
|
|
|
|
|
|
94 |
|
95 |
else:
|
96 |
st.write(results[['siman','sek','text']].head(10))
|
|
|
46 |
def get_llm_results(query,chat,results):
|
47 |
|
48 |
prompt_template = PromptTemplate.from_template(
|
49 |
+
"""
|
50 |
+
your misssion is to rank the given answers based on their relevance to the given question.
|
51 |
+
Provide a relevancy score between 0 (not relevant) and 1 (highly relevant) for each possible answer.
|
52 |
+
the results should be in the following JSON format: "answer": "score", "answer": "score" while answer is the possible answer's text and score is the relevancy score.
|
53 |
+
|
54 |
the question is: {query}
|
55 |
+
|
56 |
the possible answers are:
|
57 |
{answers}
|
58 |
|
59 |
""" )
|
60 |
|
61 |
messages = [
|
62 |
+
SystemMessage(content="""
|
63 |
+
You're a helpful assistant.
|
64 |
+
Return a JSON formatted string.
|
65 |
+
"""),
|
66 |
HumanMessage(content=prompt_template.format(query=query, answers=str.join('\n', results['text'].head(10).tolist()))),
|
67 |
]
|
68 |
|
69 |
response = chat.invoke(messages)
|
70 |
llm_results_df = pd.read_json(response.content, orient='index')
|
71 |
+
llm_results_df.rename(columns={0: 'score'}, inplace=True)
|
72 |
+
llm_results_df.sort_values(by='score', ascending=False, inplace=True)
|
73 |
return llm_results_df
|
74 |
|
75 |
|
|
|
98 |
results = get_results(embeddings_model,user_input,df,num_of_results)
|
99 |
|
100 |
if use_llm:
|
101 |
+
if openAikey == None or openAikey=="":
|
102 |
+
st.write("诇讗 讛讜讻谞住 诪驻转讞 砖诇 OpenAI")
|
103 |
+
|
104 |
+
else:
|
105 |
+
chat = get_chat_api(openAikey)
|
106 |
+
llm_results = get_llm_results(user_input,chat,results)
|
107 |
+
st.write(llm_results)
|
108 |
|
109 |
else:
|
110 |
st.write(results[['siman','sek','text']].head(10))
|