mtyrrell commited on
Commit
ffd98eb
1 Parent(s): 5d23ddc

tweak to prompt

Browse files
appStore/__pycache__/rag.cpython-310.pyc CHANGED
Binary files a/appStore/__pycache__/rag.cpython-310.pyc and b/appStore/__pycache__/rag.cpython-310.pyc differ
 
appStore/__pycache__/target.cpython-310.pyc CHANGED
Binary files a/appStore/__pycache__/target.cpython-310.pyc and b/appStore/__pycache__/target.cpython-310.pyc differ
 
appStore/rag.py CHANGED
@@ -14,9 +14,9 @@ model_select = "gpt-3.5-turbo-1106"
14
 
15
 
16
  # define a special function for putting the prompt together (as we can't use haystack)
17
- def get_prompt(context):
18
  base_prompt="Summarize the following context efficiently in bullet points, the less the better. \
19
- Summarize only activities that address the vulnerability of the given context to climate change. \
20
  Formatting example: \
21
  - Collect and utilize gender-disaggregated data to inform and improve climate change adaptation efforts. \
22
  - Prioritize gender sensitivity in adaptation options, ensuring participation and benefits for women, who are more vulnerable to climate impacts. \
@@ -53,9 +53,7 @@ def completion_with_backoff(**kwargs):
53
 
54
 
55
  # construct RAG query, send to openai and process response
56
- def run_query(df):
57
- docs = df
58
-
59
  '''
60
  For non-streamed completion, enable the following 2 lines and comment out the code below
61
  '''
@@ -63,7 +61,7 @@ def run_query(df):
63
  # result = res.choices[0].message.content
64
 
65
  # instantiate ChatCompletion as a generator object (stream is set to True)
66
- response = completion_with_backoff(model=model_select, messages=[{"role": "user", "content": get_prompt(docs)}], stream=True)
67
  # iterate through the streamed output
68
  report = []
69
  res_box = st.empty()
 
14
 
15
 
16
  # define a special function for putting the prompt together (as we can't use haystack)
17
+ def get_prompt(context, label):
18
  base_prompt="Summarize the following context efficiently in bullet points, the less the better. \
19
+ Summarize only activities that address the vulnerability of "+label+" to climate change. \
20
  Formatting example: \
21
  - Collect and utilize gender-disaggregated data to inform and improve climate change adaptation efforts. \
22
  - Prioritize gender sensitivity in adaptation options, ensuring participation and benefits for women, who are more vulnerable to climate impacts. \
 
53
 
54
 
55
  # construct RAG query, send to openai and process response
56
+ def run_query(context, label):
 
 
57
  '''
58
  For non-streamed completion, enable the following 2 lines and comment out the code below
59
  '''
 
61
  # result = res.choices[0].message.content
62
 
63
  # instantiate ChatCompletion as a generator object (stream is set to True)
64
+ response = completion_with_backoff(model=model_select, messages=[{"role": "user", "content": get_prompt(context, label)}], stream=True)
65
  # iterate through the streamed output
66
  report = []
67
  res_box = st.empty()
appStore/target.py CHANGED
@@ -102,7 +102,7 @@ def target_display():
102
  # construct RAG query for each label, send to openai and process response
103
  for i in range(0,len(df_agg)):
104
  st.write(df_agg['Vulnerability Label'].iloc[i])
105
- run_query(df_agg['text'].iloc[i])
106
  # st.write(df_agg['text'].iloc[i])
107
 
108
 
 
102
  # construct RAG query for each label, send to openai and process response
103
  for i in range(0,len(df_agg)):
104
  st.write(df_agg['Vulnerability Label'].iloc[i])
105
+ run_query(context = df_agg['text'].iloc[i], label = df_agg['Vulnerability Label'].iloc[i])
106
  # st.write(df_agg['text'].iloc[i])
107
 
108