aliss77777 commited on
Commit
21804d2
1 Parent(s): 62b34c2

Deploy Chainlit app to Hugging Face Spaces

Browse files
Files changed (10) hide show
  1. .DS_Store +0 -0
  2. Dockerfile +17 -0
  3. README.md +35 -11
  4. app.py +346 -0
  5. assistant_tools.py +81 -0
  6. chainlit.md +16 -0
  7. helper_functions.py +217 -0
  8. prompts.py +217 -0
  9. requirements.txt +231 -0
  10. utils.py +79 -0
.DS_Store ADDED
Binary file (6.15 kB). View file
 
Dockerfile ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use an official Python runtime as a parent image
2
+ FROM python:3.10-slim
3
+
4
+ # Set the working directory in the container
5
+ WORKDIR /climate-change-assistant
6
+
7
+ # Copy the current directory contents into the container
8
+ COPY . /climate-change-assistant
9
+
10
+ # Install any needed packages specified in requirements.txt
11
+ RUN pip install --no-cache-dir --upgrade -r requirements.txt
12
+
13
+ # Following instructions within HF docker file setup
14
+ COPY . .
15
+
16
+ # Run app.py when the container launches
17
+ CMD ["chainlit", "run", "app.py", "--host", "0.0.0.0", "--port", "7860"]
README.md CHANGED
@@ -1,11 +1,35 @@
1
- ---
2
- title: Scenesfromtomorrow
3
- emoji: 🐢
4
- colorFrom: blue
5
- colorTo: yellow
6
- sdk: docker
7
- pinned: false
8
- license: mit
9
- ---
10
-
11
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ This chainlit app will use an [OpenAI Assistant](https://platform.openai.com/docs/assistants/overview) and the amazing [Probable futures](https://probablefutures.org/) API to provide
2
+ climate change information for a location, and provide some helpful resources for how to prepare.
3
+
4
+ The chainlit app is based off the cookbook example [here](https://github.com/Chainlit/cookbook/tree/main/openai-assistant).
5
+
6
+ ![screenshot](./images/screenshot.png)
7
+
8
+ # Setup
9
+
10
+ You will need a probable future API key, see [here](https://docs.probablefutures.org/api-access/) for more details. You will also need an [OPENAI key](https://platform.openai.com/docs/quickstart?context=python).
11
+
12
+ Setup a Conda environment ...
13
+
14
+ 1. Install [miniconda](https://docs.conda.io/en/latest/miniconda.html) by selecting the installer that fits your OS version. Once it is installed you may have to restart your terminal (closing your terminal and opening again)
15
+ 2. In this directory, open terminal
16
+ 3. `conda env create -f environment.yml`
17
+ 4. `conda activate climate-env`
18
+
19
+ Once you have these, then ...
20
+
21
+ 1. Copy `.env.example` to `.env`
22
+ 2. Set OpenAI key and Probable Futures API user and secret
23
+ 3. Create assisstent with `python3 create_assistant.py`
24
+ 4. Copy the assiostant id from output and put into `.env`
25
+
26
+ If you make changes to the assistant, rerun `create_assistant.py`, which will update the existing assistant.
27
+
28
+ # To run chainlit app
29
+
30
+ `chainlit run app.py`
31
+
32
+ # To view assistant on OpenAI
33
+
34
+ Go [here](https://platform.openai.com/assistants)
35
+
app.py ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ from typing import Dict
4
+
5
+ from openai import AsyncOpenAI
6
+ from openai.types.beta.threads.run import Run
7
+
8
+ from openai.types.beta import Thread
9
+ from openai.types.beta.threads import (
10
+ ImageFileContentBlock,
11
+ TextContentBlock,
12
+ Message,
13
+ )
14
+
15
+ import chainlit as cl
16
+ from typing import Optional
17
+ from chainlit.context import context
18
+
19
+ import assistant_tools as at
20
+ import prompts as pr
21
+ import helper_functions as hf
22
+ import datetime
23
+ import csv
24
+
25
+ from utils import DictToObject, stream_message, ask_to_continue, process_thread_message
26
+
27
+ api_key = os.environ.get("OPENAI_API_KEY")
28
+ client = AsyncOpenAI(api_key=api_key)
29
+ assistant_id = os.environ.get("ASSISTANT_ID")
30
+
31
+
32
+ @cl.on_chat_start
33
+ async def start_chat():
34
+ thread = await client.beta.threads.create()
35
+ cl.user_session.set("thread", thread)
36
+ await cl.Message(author="Climate Change Assistant", content=pr.welcome_message).send()
37
+
38
+
39
+ @cl.on_message
40
+ async def run_conversation(message_from_ui: cl.Message):
41
+ count = 0
42
+ thread = cl.user_session.get("thread") # type: Thread
43
+ # Add the message to the thread
44
+
45
+ init_message = await client.beta.threads.messages.create(
46
+ thread_id=thread.id, role="user", content=message_from_ui.content
47
+ )
48
+
49
+ # Send empty message to display the loader
50
+ loader_msg = cl.Message(author="Climate Change Assistant", content="")
51
+ await loader_msg.send()
52
+
53
+ # Create the run
54
+ run = await client.beta.threads.runs.create_and_poll(
55
+ thread_id=thread.id, assistant_id=assistant_id
56
+ )
57
+
58
+ message_references = {} # type: Dict[str, cl.Message]
59
+
60
+ # Periodically check for updates
61
+ #running = True
62
+ while True:
63
+ print('starting while True loop')
64
+ print(run)
65
+ run = await client.beta.threads.runs.retrieve(
66
+ thread_id=thread.id, run_id=run.id
67
+ )
68
+
69
+ # Fetch the run steps
70
+ run_steps = await client.beta.threads.runs.steps.list(
71
+ thread_id=thread.id, run_id=run.id, order="asc"
72
+ )
73
+
74
+ for step in run_steps.data:
75
+ # Fetch step details
76
+ run_step = await client.beta.threads.runs.steps.retrieve(
77
+ thread_id=thread.id, run_id=run.id, step_id=step.id
78
+ )
79
+ step_details = run_step.step_details
80
+ # Update step content in the Chainlit UI
81
+ if step_details.type == "message_creation":
82
+ thread_message = await client.beta.threads.messages.retrieve(
83
+ message_id=step_details.message_creation.message_id,
84
+ thread_id=thread.id,
85
+ )
86
+ await process_thread_message(message_references, thread_message)
87
+
88
+ print("line 116 about the call the tools call loop")
89
+ count += 1
90
+ print(str(count))
91
+
92
+ if step_details.type == "tool_calls":
93
+ loading_message = "Retrieving information, please stand by."
94
+ loading_message_to_assistant = cl.Message(author="Climate Change Assistant", content=loading_message)
95
+ await loading_message_to_assistant.send() # output_message_to_assistant.send()
96
+
97
+ for tool_call in step_details.tool_calls:
98
+ print('top of tool call loop line 119')
99
+
100
+ # IF tool call is a disctionary, convert to object
101
+ if isinstance(tool_call, dict):
102
+ print("here is a tool call at line 120")
103
+ print(tool_call)
104
+ tool_call = DictToObject(tool_call)
105
+ if tool_call.type == "function":
106
+ function = DictToObject(tool_call.function)
107
+ tool_call.function = function
108
+ if tool_call.type == "code_interpreter":
109
+ code_interpretor = DictToObject(tool_call.code_interpretor)
110
+ tool_call.code_interpretor = code_interpretor
111
+
112
+ print("here are step details at line 130")
113
+ print(step_details)
114
+ print("here is tool call at line 132")
115
+ print(tool_call)
116
+
117
+ if (
118
+ tool_call.type == "function"
119
+ and len(tool_call.function.arguments) > 0
120
+ ):
121
+ function_name = tool_call.function.name
122
+ function_args = json.loads(tool_call.function.arguments)
123
+
124
+ if not tool_call.id in message_references:
125
+ message_references[tool_call.id] = cl.Message(
126
+ author=function_name,
127
+ content=function_args,
128
+ language="json",
129
+ #parent_id=context.session.root_message.id,
130
+ )
131
+ #await message_references[tool_call.id].send()
132
+
133
+ function_mappings = {
134
+ #"get_pf_data_handbook": at.get_pf_data_handbook,
135
+ "get_pf_data_timeline": at.get_pf_data_timeline,
136
+ }
137
+
138
+ # Not sure why, but sometimes this is returned rather than name
139
+ function_name = function_name.replace("_schema", "")
140
+
141
+ print(f"FUNCTION NAME: {function_name}")
142
+ print(function_args)
143
+
144
+ if function_name == "get_pf_data_timeline":
145
+
146
+ # Extract 'address' and 'country' values
147
+ address = function_args['address']
148
+ country = function_args['country']
149
+ units = function_args.get('units', 'C') #returns the specific value for 'units' else C if blank
150
+
151
+ print(f"Address: {address}, Country: {country}, Units: {units}")
152
+
153
+ parsed_output = at.get_pf_data_timeline(address, country, '1.5', units)
154
+
155
+ if parsed_output is not None:
156
+
157
+ print(f"RUN STATUS: {run.status} from first timeline scene")
158
+ print(run)
159
+
160
+
161
+ # creating an initial output of what life is like today in that place
162
+ output = ""
163
+
164
+ loading_message_to_assistant = cl.Message(author="Climate Change Assistant", content=pr.timeline_message)
165
+ await loading_message_to_assistant.send()
166
+
167
+ # filtering the results to just show results describing average / baseline temperatures
168
+ summary = hf.story_completion(pr.one_five_degree_prompt, units, parsed_output[parsed_output.name.str.contains("10 hottest") | parsed_output.name.str.contains("Days above 35")])
169
+
170
+ next_output = await stream_message(summary, cl)
171
+
172
+ output += next_output
173
+
174
+ print(next_output) # hf.summarizer(output)
175
+ img_content, image_bytes = hf.get_image_response_SDXL(pr.image_prompt_SDXL + address + ' ' + country) #hf.summarizer(output)
176
+ #with open('feedback_logs/73ee4d67-4857-47ec-b835-5b1cfb570b20.png', 'rb') as file:
177
+ # img_content = file.read()
178
+ img = cl.Image(content=image_bytes, name="image1", display="inline", size="large") # img_content
179
+ print('\n Generating image, complete')
180
+ image_message_to_assistant = cl.Message(author="Climate Change Assistant", content=' ', elements=[img])
181
+ await image_message_to_assistant.send()
182
+
183
+ #adding button to allow user to paginate the content
184
+ res = await ask_to_continue()
185
+
186
+ while res and res.get("value") == "question":
187
+
188
+ question = await cl.AskUserMessage(content='How can I help?', timeout=600).send()
189
+
190
+ # Use this to send the output of completion request into the next OpenAI API call.
191
+ question_response = hf.summary_completion(address, country, output, question['output'])
192
+
193
+ next_output = await stream_message(question_response, cl)
194
+
195
+ output += next_output
196
+
197
+
198
+ # Call the function again instead of duplicating the code block
199
+ res = await ask_to_continue()
200
+
201
+
202
+ warming_scenario = ['2.0', '3.0']
203
+
204
+ #inpainting_keywords = ''
205
+
206
+ for i in range(len(warming_scenario)):
207
+ print(f"RUN STATUS: {run.status} from timeline scene # {i}")
208
+ print(run)
209
+
210
+ # going to force units to be C b/c otherwise it's breaking the logic for how the 2/3 image gets displayed
211
+ parsed_output = at.get_pf_data_timeline(address, country, warming_scenario[i], 'C') #units
212
+
213
+ # filterine results to talk about change from baseline
214
+ summary = hf.story_completion(pr.timeline_prompts[i], units, parsed_output[parsed_output.name.str.contains('Change') | parsed_output.name.str.contains('Likelihood')])
215
+ next_output = await stream_message(summary, cl)
216
+
217
+ output += next_output
218
+
219
+ data_changes = parsed_output[parsed_output['name'].str.contains('Change') | parsed_output['name'].str.contains('Likelihood')].copy()
220
+ #print(data_changes)
221
+ inpainting_keywords = hf.generate_inpainting_keywords(data_changes)
222
+
223
+ img_content, image_bytes = hf.get_image_response_SDXL(prompt=pr.image_prompt_SDXL + address + ' ' + country, image_path = img_content, filtered_keywords=inpainting_keywords) #str(hf.summarizer(output))
224
+ #with open('feedback_logs/73ee4d67-4857-47ec-b835-5b1cfb570b20.png', 'rb') as file:
225
+ # img_content = file.read()
226
+ img = cl.Image(content=image_bytes, name="image1", display="inline", size="large") #img_content
227
+ print('\n generating image, complete')
228
+ image_message_to_assistant = cl.Message(author="Climate Change Assistant", content=' ', elements=[img])
229
+ await image_message_to_assistant.send()
230
+
231
+ #adding button to allow user to paginate the content
232
+ res = await ask_to_continue()
233
+
234
+ while res and res.get("value") == "question":
235
+
236
+ question = await cl.AskUserMessage(content='How can I help?', timeout=600).send()
237
+
238
+ # Use this to send the output of completion request into the next OpenAI API call.
239
+ question_response = hf.summary_completion(address, country, output, question['output'])
240
+
241
+ next_output = await stream_message(question_response, cl)
242
+
243
+ output += next_output
244
+
245
+
246
+ # Call the function again instead of duplicating the code block
247
+ res = await ask_to_continue()
248
+
249
+ #else:
250
+ # run.status = "completed"
251
+
252
+
253
+ final_message_content = hf.summary_completion(address, country, output, "Please give the user a personalized set of recommendations for how to adapt to climate change for their location and the questions they have asked (if any).")
254
+ next_output = await stream_message(final_message_content, cl)
255
+ output += next_output
256
+
257
+ # Step 1: Ask users if they'd like to offer feedback
258
+ res_want_feedback = await cl.AskActionMessage(content="Would you like to offer feedback?",
259
+ actions=[
260
+ cl.Action(name="yes", value="yes", label="✅ Yes"),
261
+ cl.Action(name="no", value="no", label="🚫 No")],
262
+ timeout=600).send()
263
+
264
+ # Only proceed if they want to give feedback
265
+ if res_want_feedback.get("value") == "yes":
266
+ # Step 2: Ask "How was your experience?"
267
+ res_feedback = await cl.AskActionMessage(content="How was your experience?",
268
+ actions=[
269
+ cl.Action(name="good", value="good", label="😀 Good. I'm ready to take action"),
270
+ cl.Action(name="IDK", value="IDK", label="😐 Not sure"),
271
+ cl.Action(name="no_good", value="no_good", label="🙁 Not good"),],
272
+ timeout=600).send()
273
+
274
+ if res_feedback.get("value") == "good":
275
+ thank_you_message = cl.Message(author="Climate Change Assistant", content="Thanks for your feedback!")
276
+ await thank_you_message.send()
277
+
278
+ # Step 3: If "no good" or "not sure," ask why
279
+ elif res_feedback.get("value") in ["no_good", "IDK"]:
280
+ res_reason = await cl.AskUserMessage(content="Could you please tell us why?").send()
281
+
282
+ # Step 4: Capture user open-ended comments and write to a CSV file
283
+ filename = f"feedback_logs/feedback_{datetime.datetime.now().strftime('%Y%m%d_%H%M%S')}.csv"
284
+
285
+ with open(filename, "a", newline='') as csvfile:
286
+ feedback_writer = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
287
+ # Write column headers if the file is new/empty
288
+ if csvfile.tell() == 0:
289
+ feedback_writer.writerow(["Thread ID", "Feedback Value", "Reason Output"])
290
+ # Assuming thread_id is available from earlier in your session
291
+ thread = cl.user_session.get("thread")
292
+ feedback_writer.writerow([thread.id, res_feedback.get('value'), res_reason['output'] if res_reason['output'] is not None else ''])
293
+
294
+ thank_you_message = cl.Message(author="Climate Change Assistant", content="Thanks for your feedback!")
295
+ await thank_you_message.send()
296
+
297
+ next_steps = cl.Message(author="Climate Change Assistant", content=pr.next_steps)
298
+ await next_steps.send()
299
+
300
+ print('here is the bottom of the if feedback block')
301
+ print(run.status)
302
+ #run.status = "completed"
303
+
304
+ print('here is the bottom of the if pf.function is not none block')
305
+ print(run.status)
306
+ #run.status = "completed"
307
+
308
+ run = await client.beta.threads.runs.submit_tool_outputs_and_poll(
309
+ thread_id=thread.id,
310
+ run_id=run.id,
311
+ tool_outputs=[
312
+ {
313
+ "tool_call_id": tool_call.id,
314
+ "output": str(parsed_output),
315
+ },
316
+ ],
317
+ )
318
+
319
+ print('here is the bottom of the IF tool call is function block')
320
+ #run.status = "completed"
321
+ print(run.status)
322
+
323
+
324
+ #await cl.sleep(1) # Refresh every second
325
+
326
+ if run.status == "completed":
327
+ print(f"RUN STATUS: {run.status} from the bottom of the code")
328
+ #running = False
329
+ #run = await client.beta.threads.runs.cancel(
330
+ # thread_id=thread.id,
331
+ # run_id=run.id
332
+ # )
333
+ print(run)
334
+ break
335
+
336
+
337
+
338
+
339
+
340
+
341
+ if run.status in ["cancelled", "failed", "completed", "expired"]:
342
+
343
+ if run.status == "failed":
344
+ print('here is the failed run: ', run)
345
+ break
346
+ print('completed')
assistant_tools.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import pandas as pd
4
+
5
+ import requests
6
+ from openai import OpenAI
7
+ from datetime import date
8
+ from datetime import datetime
9
+
10
+ from dotenv import load_dotenv
11
+ import sys
12
+
13
+ import prompts as pr
14
+ import helper_functions as hf
15
+
16
+ pf_api_url = "https://graphql.probablefutures.org"
17
+ #pf_token_audience = "https://graphql.probablefutures.com"
18
+ #pf_token_url = "https://probablefutures.us.auth0.com/oauth/token"
19
+
20
+ load_dotenv()
21
+ #client = OpenAI()
22
+
23
+ def get_pf_data_timeline(address, country, warming_scenario='1.5', units='C'):
24
+ variables = {}
25
+
26
+ location = f"""
27
+ country: "{country}"
28
+ address: "{address}"
29
+ """
30
+
31
+ query = (
32
+ """
33
+ mutation {
34
+ getDatasetStatistics(input: { """
35
+ + location
36
+ + """ \
37
+ warmingScenario: \"""" + warming_scenario + """\"
38
+ }) {
39
+ datasetStatisticsResponses{
40
+ datasetId
41
+ name
42
+ unit
43
+ warmingScenario
44
+ midValue
45
+ highValue
46
+ mapCategory
47
+ }
48
+ }
49
+ }
50
+ """
51
+ )
52
+ #print(query)
53
+
54
+ access_token = hf.get_pf_token()
55
+ url = pf_api_url + "/graphql"
56
+ headers = {
57
+ "Content-Type": "application/json",
58
+ "Authorization": "Bearer " + access_token
59
+ }
60
+ response = requests.post(
61
+ url, json={"query": query, "variables": variables}, headers=headers
62
+ )
63
+
64
+ response = str(response.json()).replace("'", '"')
65
+ response = response.replace("None", "null")
66
+
67
+ parsed_output = hf.json_to_dataframe(response, address=address, country=country)
68
+
69
+ if units.lower() in ('f', 'fahrenheit'):
70
+ parsed_output.loc[parsed_output.unit == '°C', 'midValue'] = (((parsed_output.loc[parsed_output.unit == '°C', 'midValue'].astype('float') * 9) / 5) + 32).round()
71
+ parsed_output.loc[parsed_output.unit == '°C', 'highValue'] = (((parsed_output.loc[parsed_output.unit == '°C', 'highValue'].astype('float') * 9) / 5) + 32).round()
72
+ parsed_output.loc[parsed_output.unit == 'mm', 'midValue'] = (parsed_output.loc[parsed_output.unit == 'mm', 'midValue'].astype('float') / 25.4).round(1)
73
+ parsed_output.loc[parsed_output.unit == 'mm', 'highValue'] = (parsed_output.loc[parsed_output.unit == 'mm', 'highValue'].astype('float') / 25.4).round(1)
74
+ parsed_output.loc[parsed_output.unit == '°C', 'unit'] = '°F'
75
+ parsed_output.loc[parsed_output.unit == 'mm', 'unit'] = 'inches'
76
+
77
+ print("got output of pf_data_new")
78
+
79
+ #summary = summary_completion(str(address) + " " + str(country))
80
+
81
+ return parsed_output
chainlit.md ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Welcome to Scenes From Tomorrow
2
+
3
+ Overcoming climate procrastination by building empathy with your future self.
4
+
5
+ ## How to Use this App
6
+ The goal of this app is to provide you with a localized view of the impact of climate change and actionable steps to prepare. The data is based on climate models which have been [accurate](http://www.climatehubs.usda.gov/hubs/northwest/topic/basics-global-climate-models#:~:text=Despite%20the%20level%20of%20uncertainty,of%20the%20past%20100%20years.) in predicting global temperature changes since 1970's.
7
+
8
+ To use the app, enter a location to see the predicted climate and talk with the AI assistant about how to prepare.
9
+
10
+ ## Data Sources & Methdology
11
+ This application is based on data and other content made available by Probable Futures, a Project of SouthCoast Community Foundation and certain of that data may have been provided to Probable Futures by Woodwell Climate Research Center, Inc. or The Coordinated Regional climate Downscaling Experiment (CORDEX). Please click here for more information on the [Probable Futures API](https://docs.probablefutures.org/api/) and [Probable Futures Climate Models](https://probablefutures.org/science/climate-models/)
12
+
13
+ ## Version
14
+ 0.1.1
15
+
16
+
helper_functions.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import pandas as pd
4
+
5
+ import requests
6
+ from openai import OpenAI
7
+ from datetime import date
8
+ from datetime import datetime
9
+
10
+ from dotenv import load_dotenv
11
+ import sys
12
+
13
+ import io
14
+ import base64
15
+ import urllib
16
+ from PIL import Image
17
+ import io
18
+ import cv2
19
+
20
+ from diffusers import AutoPipelineForText2Image, AutoPipelineForImage2Image # DiffusionPipeline
21
+ import torch
22
+ #import matplotlib.pyplot as plt
23
+
24
+ import prompts as pr
25
+
26
+ pf_api_url = "https://graphql.probablefutures.org"
27
+ #pf_token_audience = "https://graphql.probablefutures.com"
28
+ #pf_token_url = "https://probablefutures.us.auth0.com/oauth/token"
29
+
30
+ load_dotenv()
31
+ client = OpenAI()
32
+
33
+ model = "gpt-4o" #"gpt-4-0125-preview" # gpt-4 #gpt-3.5-turbo-16k
34
+
35
+
36
+ pipeline_text2image = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16") # digiplay/Landscape_PhotoReal_v1
37
+ pipeline_image2image = AutoPipelineForImage2Image.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16") # digiplay/Landscape_PhotoReal_v1
38
+ pipeline_text2image.to("cuda")
39
+ pipeline_image2image.to("cuda")
40
+
41
+
42
+ def convert_to_iso8601(date_str):
43
+ try:
44
+ # Parse the date string to a datetime object
45
+ date_obj = datetime.strptime(date_str, "%Y-%m-%d")
46
+ # Format the datetime object to ISO 8601 format with timezone offset
47
+ iso8601_date = date_obj.strftime("%Y-%m-%dT%H:%M:%S+00:00")
48
+ return iso8601_date
49
+ except ValueError:
50
+ # Return the original string if it's not in the expected date format
51
+ return date_str
52
+
53
+
54
+ def get_pf_token():
55
+ client_id = os.getenv("CLIENT_ID")
56
+ client_secret = os.getenv("CLIENT_SECRET")
57
+ url = 'https://graphql.probablefutures.org/auth/token'
58
+
59
+ # Encode the client credentials
60
+ encoded_credentials = base64.b64encode(f"{client_id}:{client_secret}".encode()).decode()
61
+
62
+ headers = {
63
+ 'Authorization': 'Basic ' + encoded_credentials
64
+ }
65
+
66
+ try:
67
+ response = requests.get(url, headers=headers)
68
+ response.raise_for_status() # This will raise an exception for HTTP errors
69
+ data = response.json()
70
+ access_token = data['access_token']
71
+ return access_token
72
+
73
+ except requests.exceptions.RequestException as e:
74
+ print('There was a problem with your request:', e)
75
+ return None
76
+
77
+
78
+ def json_to_dataframe(json_data, address, country):
79
+ # Extract the relevant part of the JSON data
80
+ json_data = json.loads(json_data)
81
+ data = json_data['data']['getDatasetStatistics']['datasetStatisticsResponses']
82
+ # Convert to a DataFrame
83
+ df = pd.DataFrame(data)
84
+ # Normalize the 'info' column if needed
85
+ #if not df['info'].apply(lambda x: x == {}).all():
86
+ # info_df = pd.json_normalize(df['info'])
87
+ # df = df.drop(columns=['info']).join(info_df)
88
+ df['address'] = address
89
+ df['country'] = country
90
+ df = df[['address', 'country', 'name', 'midValue', 'highValue', 'unit', 'mapCategory']]
91
+ #df = df[df['name'].str.contains('Change')]
92
+ df = df[~((df['midValue'] == '0.0') & (df['highValue'] == '0.0'))]
93
+ df.reset_index(drop=True, inplace=True)
94
+
95
+ return df
96
+
97
+
98
+ def summary_completion(address, country, output, user_question):
99
+ content = f"Please answer the user question {user_question} for the location of {address} {country}. Use the information that was just provided previously to the user: {output}"
100
+ print(content)
101
+ completion = client.chat.completions.create(
102
+ model=model, #"gpt-4-0125-preview", # gpt-4 #gpt-3.5-turbo-16k
103
+ messages=[
104
+ {"role": "system", "content": pr.user_question_prompt},
105
+ {"role": "user", "content": content}
106
+ ],
107
+ stream=True
108
+ )
109
+
110
+ return completion#.choices[0].message.content
111
+
112
+ # the 'content' object is a dataframe so it's wrapped in a str(to_json()) call
113
+ def story_completion(story_system_prompt, units, content):
114
+ completion = client.chat.completions.create(
115
+ model=model, #"gpt-4-0125-preview", # gpt-4 #gpt-3.5-turbo-16k
116
+ messages=[
117
+ {"role": "system", "content": str(story_system_prompt + ". Be sure to describe the result using the following temperature scale: " + units)},
118
+ {"role": "user", "content": str(content.to_json())}
119
+ ],
120
+ stream=True
121
+ )
122
+
123
+ return completion#.choices[0].message.content
124
+
125
+ # need GPU to run this part; uncomment lines 31 & 32
126
+ def get_image_response_SDXL(prompt, image_path=None, filtered_keywords=None): #i'm passing a file path to image when using inpainting; FOR NOW
127
+ print('starting SDXL') # Check here for prompt language tips: https://stable-diffusion-art.com/sdxl-prompts/
128
+
129
+ if image_path is None:
130
+ # Generate image from text
131
+ # using flash attention for memory optimization
132
+ # https://huggingface.co/docs/diffusers/en/optimization/memory#memory-efficient-attention
133
+ #with torch.inference_mode():
134
+ result_image = pipeline_text2image(
135
+ prompt=prompt, num_inference_steps=2, guidance_scale=0.0).images[0] # Assuming default image dimensions or specify if required
136
+ else:
137
+ # Load the image from the path
138
+ #img = Image.open(image_path)
139
+
140
+ #plt.imshow(img)
141
+ #plt.title("Loaded Image")
142
+ #plt.show()
143
+
144
+ #if strength == None:
145
+ # strength = 0.51
146
+
147
+ # adding inpaiting keywords for 2.0 and 3.0 warming scenarios
148
+ modified_prompt = filtered_keywords if filtered_keywords else prompt
149
+ print(modified_prompt)
150
+
151
+ # Modify existing image based on new prompt
152
+ # using flash attention https://huggingface.co/docs/diffusers/en/optimization/memory#memory-efficient-attention
153
+ #with torch.inference_mode():
154
+ result_image = pipeline_image2image(
155
+ prompt=modified_prompt, image=image_path, strength=0.55, guidance_scale=0.0, num_inference_steps=2).images[0] # negative_prompt="deformed faces, distorted faces, mangled hands, extra legs",
156
+
157
+ # Save the image to a byte buffer
158
+ buffer = io.BytesIO()
159
+ result_image.save(buffer, format='PNG')
160
+ image_bytes = buffer.getvalue()
161
+ return result_image, image_bytes
162
+
163
+
164
+ def summarizer(content, inpainting=None):
165
+ if inpainting is None:
166
+ system_prompt = pr.summarizer_prompt
167
+ else:
168
+ system_prompt = pr.summarizer_prompt_inpainting
169
+ completion = client.chat.completions.create(
170
+ model=model, #"gpt-3.5-turbo-16k", # gpt-4 #gpt-4-0125-preview
171
+ messages=[
172
+ {"role": "system", "content": system_prompt},
173
+ {"role": "user", "content": content}
174
+ ],
175
+ stream=False
176
+ )
177
+
178
+ print(str(completion.choices[0].message.content))# + " Photorealism, Sharp Image, wide shot")
179
+ return str(completion.choices[0].message.content)# + " realistic humans " #" long exposure, blurred motion, streaks of light, surreal, dreamy, ghosting effect, highly detailed"
180
+
181
+
182
+ def generate_inpainting_keywords(data_changes):
183
+ # Check if the DataFrame is empty or missing the necessary columns
184
+ if data_changes.empty or 'midValue' not in data_changes.columns:
185
+ return ["no significant change"]
186
+ data_changes['name'] = data_changes['name'].str.replace('“', '', regex=False).str.replace('”', '', regex=False).str.replace('"', '', regex=False)
187
+ print(data_changes)
188
+
189
+ # Example: Select the change with the highest 'midValue' as the most significant
190
+ # Find the index of the row with the highest 'midValue'
191
+ idx_max_midValue = data_changes['midValue'].astype('float').abs().idxmax()
192
+
193
+ # Retrieve the 'name' from the row with the highest 'midValue'
194
+ most_significant_change_name = data_changes.loc[idx_max_midValue, 'name']
195
+ print(most_significant_change_name)
196
+
197
+ #change_name = most_significant_change['name'] # Assuming the name of the change is in the 'name' column
198
+ #impact = 'increase' if most_significant_change['midValue'] > 0 else 'decrease'
199
+
200
+ # Mapping of change types to potential keywords
201
+ climate_change_qualifiers = {
202
+ 'Change in total annual precipitation': 'heavy rain, flooding, gloomy skies',
203
+ 'Change in wettest 90 days': 'increased rainfall, frequent storms, saturated grounds',
204
+ 'Change in dry hot days': 'intense sunlight, heat haze, dry atmosphere',
205
+ 'Change in frequency of 1-in-100-year storm': 'severe storms, extreme weather events, damaged infrastructure',
206
+ 'Change in precipitation 1-in-100-year storm': 'torrential rain, flash floods, overflowing rivers',
207
+ 'Likelihood of year-plus extreme drought': 'faded colors, heat mirages, stark shadows',
208
+ 'Likelihood of year-plus drought': 'heat haze, dusty air, sun-bleached surfaces',
209
+ 'Change in wildfire danger days': 'smoky haze, distant glow of fires, ash particles in air'
210
+ }
211
+
212
+ # Retrieve qualifiers for the most significant change category
213
+ qualifiers = climate_change_qualifiers.get(most_significant_change_name, ["change not specified"])
214
+ #qualifiers_string = ", ".join([str(qualifier) for qualifier in qualifiers])
215
+
216
+ print(qualifiers)
217
+ return qualifiers
prompts.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # defining inputs to prompting functions
3
+ welcome_message = '''
4
+ Please enter a location (city and country) to get started. By default I'll use Metric units but if you want Fahrenheit please say that (or 'F').'''
5
+
6
+ user_question_prompt = '''
7
+ Please answer the user's question on the impact of climate change on their location and lifestyle being as specific and relevant to the information they've provided as possible.
8
+ If you aren't sure to help say, 'I'm not sure how to help with that.'
9
+ '''
10
+
11
+ storyboard_prompt = """
12
+ I am making a magazine about the impact of climate change and I want you to create storyboards.
13
+ I will share a story with you and want you to create an image based on it.
14
+ I will break the story into smaller parts and want you to create an image for each part.
15
+ Go for photorealistic quality like you are showing a picture.
16
+ Make it look like a picture of a real place not an unrealistic mash-up of different things that don't exist together in the real world.
17
+ Do not add text and callout boxes on the image.
18
+ The next story chunk is below.
19
+ """
20
+
21
+ summarizer_prompt = '''
22
+ Take the input paragraph and condense it down into a single sentence like the example below.
23
+
24
+ STORYTELLING TIPS
25
+ - always include the words cinematic, realistic, and dynamic
26
+ - describe a single image in the output, not a combination of scenes
27
+ - describe what's being shown in the image in vivid and exceptionally detailed terms
28
+ - always include the name of the place and the country
29
+
30
+ -----
31
+ INPUT EXAMPLE:
32
+ In Jakarta, Indonesia, the future holds a stark increase in climate volatility, with a 15% likelihood of experiencing extreme droughts lasting a year or more,
33
+ and a general likelihood of 36% for year-plus droughts. This drastic shift towards drier conditions is accompanied by a significant rise in wildfire danger days,
34
+ expected to increase by 15 to 29 days. These changes underscore a new era where climate instability becomes the norm, profoundly disrupting
35
+ land use, commerce, and even geopolitical stability.
36
+
37
+ Jakarta's landscapes and ecosystems, like many others, may not be fully prepared to withstand the stress of alternating extreme droughts and unpredictable
38
+ heavy precipitation events. This evolving climate scenario threatens to reduce habitable land areas permanently while placing unprecedented strain on infrastructure
39
+ and urban areas with challenges of managing too little or too much water.
40
+
41
+ ----
42
+ OUTPUT EXAMPLE:
43
+ A cinemtic, realistic, and dynamic image of Jakarta, Indonesia facing impact of climate change: rushing water through flooded streets in a bustling and eclectic southeast Asian metropolis.
44
+ '''
45
+
46
+ summarizer_prompt_inpainting = '''
47
+ Take the input paragraph and condense it down to a series of keywords to feed to an inpainting image generation model. do NOT mention the city, country, or warming scenario. A couple examples are shown below.
48
+
49
+ STORYTELLING TIPS
50
+ - describe a single series of image descriptors in the output
51
+ - describe what's being shown in the image in vivid and exceptionally detailed terms
52
+ - describe one feature of what's included in the input prompt in several keyword descriptors.
53
+ - If the input example describes multiple different possible scenarios please only describe one
54
+ - do NOT describe any abstract concepts such as management, planning, resillience, adaptation
55
+ - ONLY describe adjectives for what you are literally seeing in the landscape scene
56
+ - do NOT describe any non-literal adjectives such as innovative, courageous, resourceful
57
+
58
+ -----
59
+ INPUT EXAMPLE 1:
60
+ In Auckland, New Zealand, known for its beautiful harbors and vibrant city life, the future under a 3.0°C warming scenario presents a mixed bag of climate challenges.
61
+ The city could see an unpredictable swing in its weather patterns, with some predictions suggesting less total annual rain but heavier rainfalls during the wettest days, and an increase in dry hot days.
62
+ This could lead to more frequent and severe drought conditions, impacting Auckland's water supplies, increasing wildfire risks, and putting a strain on both urban and rural communities.
63
+ Adapting to these changes will be crucial for maintaining the quality of life and the natural beauty that Auckland is famous for.
64
+
65
+ ----
66
+ OUTPUT EXAMPLE 1:
67
+ heavy rain, flooding, storms, wet days, storm clouds, extreme weather
68
+
69
+ -----
70
+ INPUT EXAMPLE 2:
71
+ In Jakarta, Indonesia, the future holds a stark increase in climate volatility, with a 15% likelihood of experiencing extreme droughts lasting a year or more,
72
+ and a general likelihood of 36% for year-plus droughts. This drastic shift towards drier conditions is accompanied by a significant rise in wildfire danger days,
73
+ expected to increase by 15 to 29 days. These changes underscore a new era where climate instability becomes the norm, profoundly disrupting
74
+ land use, commerce, and even geopolitical stability.
75
+
76
+ Jakarta's landscapes and ecosystems, like many others, may not be fully prepared to withstand the stress of alternating extreme droughts and unpredictable
77
+ heavy precipitation events. This evolving climate scenario threatens to reduce habitable land areas permanently while placing unprecedented strain on infrastructure
78
+ and urban areas with challenges of managing too little or too much water.
79
+
80
+ ----
81
+ OUTPUT EXAMPLE 2:
82
+ drought, parched land, hazy air, wildfire smoke, sunset, high detail
83
+ '''
84
+
85
+
86
+ timeline_message = '''We'll take a look at 3 points in time. First we'll look at today, where temperatures will soon reach the 1.5°C warming level. Then, we'll look at the 2.0° scenario, which could arrive in the 2040's, and then the extreme 3.0°C scenario which could arrive by 2075.'''
87
+
88
+
89
+ one_five_degree_prompt = """
90
+ Hello, Climate Change Assistant. You help people understand how climate change will affect their life.
91
+ You will use Probable Futures API to get data that describes the impact of climate change on a specific location.
92
+ You are describing a 1.5°C warming scenario which is already here today. The changes you will describe below have already happened.
93
+ Create no more than 2-3 sentences, at the 9th or 10th grade reading level, based on the Data and Storytelling tips, and Example Story below.
94
+ Please talk about how climate change is going to have a negative impact on daily life as it exists today.
95
+ Don't be too negative, but don't be overly optimistic either. Just be direct and clear. Don't use technical language.
96
+
97
+ -----
98
+ DATA FROM EXAMPLE LOCATION: DENVER COLORADO
99
+ name unit midValue highValue mapCategory
100
+ 10 hottest nights °C 19.0 21.0 heat
101
+ 10 hottest days °C 34.0 36.0 heat
102
+ Days above 35°C (95°F) days 2.0 8.0 heat
103
+ 10 hottest wet-bulb days °C 18.0 20.0 heat
104
+
105
+ -----
106
+ STORYTELLING TIPS
107
+ - give clear and specific details about that location
108
+ - talk about changes that could impact notable activities and characteristics of that location
109
+ - don't end with vague optimistic statements
110
+ - talk about the trade-offs and hard decisions people will have to make to adapt
111
+ - describe scenes in detail as though describing a picture
112
+ - provide additional details when strongly implied by the data, for example a rise in wildfire danger in dry climates or sea level rise in coastal ones
113
+ - Make sure to describe what the area is known for today
114
+ - Create no more than 2-3 sentences, at the 9th or 10th grade reading level
115
+ - Make sure to mention the 1.5°C warming scenario which has already happened.
116
+
117
+ -----
118
+ EXAMPLE OUTPUT
119
+ In Denver, a city known for its sunny days and beautiful, mild seasons, the impact of climate change is becoming more noticeable with a warming scenario of 1.5°C.
120
+ The 10 hottest days of the year have reached a temperature of 34.0°C, and number of days above 35°C has risen to a total of 2 to 8 per year, making a hotter and more extreme summer environment than residents are used to.
121
+ This change could make outdoor activities like hiking in the Rocky Mountains much harder during the hotter months, and could even affect the city's lush parks and gardens, making it a bit tougher for people to enjoy the natural beauty Denver is famous for."""
122
+
123
+ two_degree_prompt = '''
124
+ Hello, Climate Change Assistant. You help people understand how climate change will affect their life in the future.
125
+ You will use Probable Futures API to get data that describes predicted climate change indicators for a specific location in the future.
126
+ Once you have the data, create no more than 2-3 sentences, at the 9th or 10th grade reading level, telling about how climate change is going to impact life in that location based on the Data and Storytelling tips, and Example Story below.
127
+ Please talk about how climate change is going to have a negative impact on daily life as it exists today.
128
+ Don't be too negative, but don't be overly optimistic either. Just be direct and clear. Don't use technical language.
129
+ Make sure to mention the 2.0°C warming scenario.
130
+
131
+ ----
132
+ DATA FROM EXAMPLE LOCATION: SAINT TROPEZ, FRANCE
133
+ name unit midValue highValue mapCategory
134
+ Change in total annual precipitation mm 18 67 water
135
+ Change in wettest 90 days mm 14 93 water
136
+ Change in dry hot days days 32 28 water
137
+ Change in frequency of “1-in-100-year” storm x as frequent 3 None water
138
+ Change in precipitation “1-in-100-year” storm mm 44 None water
139
+ Likelihood of year-plus extreme drought % 12 None land
140
+ Likelihood of year-plus drought % 33 None land
141
+ Change in wildfire danger days days 10 15 land
142
+ Change in water balance z-score -0.2 0.2 land
143
+
144
+
145
+ ---
146
+ STORYTELLING TIPS
147
+ - give clear and specific details about that location
148
+ - talk about changes that could impact notable activities and characteristics of that location
149
+ - don't end with vague optimistic statements
150
+ - talk about the trade-offs and hard decisions people will have to make to adapt
151
+ - describe scenes in detail as though describing a picture
152
+ - provide additional details when strongly implied by the data, for example a rise in wildfire danger in dry climates or sea level rise in coastal ones
153
+ - Make sure to describe what the area is known for today
154
+ - Make sure to mention the 2.0°C warming scenario.
155
+ - Create no more than 2-3 sentences, at the 9th or 10th grade reading level, including data points in the final output
156
+
157
+ ---
158
+ EXAMPLE OUTPUT
159
+ In a climate getting warmer by +2.0°C, Saint Tropez, an area known for its beautiful beaches and scenic coast, may experience more extreme weather.
160
+ While there's more rain, there are also likely to be more scorching hot days and unexpected, risky events like storms and droughts.
161
+ To handle longer heatwaves and more days when wildfires could start, it's crucial to act fast to adjust to these changes, especially since coastal places like Saint Tropez are at risk of rising sea levels.'''
162
+
163
+
164
+ three_degree_prompt = '''
165
+ Hello, Climate Change Assistant. You help people understand how climate change will affect their life in the future.
166
+ You will use Probable Futures API to get data that describes predicted climate change indicators for a specific location in the future.
167
+ Once you have the data, create no more than 2-3 sentences, at the 9th or 10th grade reading level, telling about how climate change is going to impact life in that location based on the Data and Storytelling tips, and Example Story below.
168
+ Please talk about how climate change is going to have a negative impact on daily life as it exists today.
169
+ Don't be too negative, but don't be overly optimistic either. Just be direct and clear. Don't use technical language.
170
+ Make sure to mention the 3.0°C warming scenario.
171
+
172
+ ----
173
+ DATA FROM EXAMPLE LOCATION: PATAGONIA, ARGENTINA
174
+ name unit midValue highValue mapCategory
175
+ Change in total annual precipitation mm 66 121 water
176
+ Change in wettest 90 days mm 18 176 water
177
+ Change in dry hot days days 33 55 water
178
+ Change in frequency of “1-in-100-year” storm x as frequent 2 None water
179
+ Change in precipitation “1-in-100-year” storm mm 67 None water
180
+ Change in wildfire danger days days 17 36 land
181
+ Change in water balance z-score -0.2 0.1 land
182
+
183
+
184
+ ---
185
+ STORYTELLING TIPS
186
+ - give clear and specific details about that location
187
+ - talk about changes that could impact notable activities and characteristics of that location
188
+ - don't end with vague optimistic statements
189
+ - talk about the trade-offs and hard decisions people will have to make to adapt
190
+ - describe scenes in detail as though describing a picture
191
+ - provide additional details when strongly implied by the data, for example a rise in wildfire danger in dry climates or sea level rise in coastal ones
192
+ - Make sure to describe what the area is known for today
193
+ - Make sure to mention the 3.0°C warming scenario.
194
+ - Create no more than 2-3 sentences, at the 9th or 10th grade reading level, including data points in the final output
195
+
196
+ ---
197
+ EXAMPLE OUTPUT
198
+ Patagonia, Argentina, renowned for its vast landscapes and Andean peaks, could face profound shifts due to climate shift to 3.0°C warming.
199
+ With an expected increase in annual precipitation and more frequent dry hot days, the region could grapple with climatic uncertainty that threatens its ecosystems and unique wildlife.
200
+ These changes could strain water resources, agriculture, and local economies, prompting urgent action to adapt to an uncertain future.'''
201
+
202
+ timeline_prompts = [two_degree_prompt, three_degree_prompt]
203
+
204
+ final_message = '''
205
+ The future you just saw is likely but not certain. We can overcome climate procrastination by identifying specific actions to take, and work together with like-minded individuals to make it a habit. Here are some resources to help you get started:
206
+ 1. Project Drawdown: Science-based solutions to help the world slow down and reverse climate change: as quickly, safely, and equitably as possible. (https://drawdown.org/)
207
+ 2. Probable Futures: Data, tools, and a climate adaptation handbook to help you adapt to the coming disruptions. (https://probablefutures.org/)
208
+ 3. All We Can Save Project: Join a community of like-minded individuals to scale your efforts through collective action (https://www.allwecansave.earth/)
209
+ '''
210
+
211
+ next_steps = '''
212
+ What would you like to do next? You can ask me questions about the scenes you just saw or enter a new location.
213
+
214
+ Also please give me a few second to process things before I can respond to your question. 🤖
215
+ '''
216
+
217
+ image_prompt_SDXL = '''landscape, crisp background, masterpiece, ultra high res, best quality, kkw-ph1, color, '''
requirements.txt ADDED
@@ -0,0 +1,231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ absl-py==2.1.0
2
+ accelerate==0.27.0
3
+ aiofiles==23.2.1
4
+ aiohttp==3.9.3
5
+ aiosignal==1.3.1
6
+ annotated-types==0.6.0
7
+ anthropic==0.21.3
8
+ anyio==3.7.1
9
+ argon2-cffi==23.1.0
10
+ argon2-cffi-bindings==21.2.0
11
+ arize==7.14.0
12
+ arrow==1.3.0
13
+ asttokens==2.4.1
14
+ async-lru==2.0.4
15
+ async-timeout==4.0.3
16
+ asyncer==0.0.2
17
+ attrs==23.2.0
18
+ Babel==2.14.0
19
+ backoff==2.2.1
20
+ beautifulsoup4==4.12.3
21
+ bidict==0.22.1
22
+ bleach==6.1.0
23
+ boto3==1.34.37
24
+ botocore==1.34.37
25
+ cachetools==5.3.2
26
+ certifi==2024.2.2
27
+ cffi==1.16.0
28
+ chainlit==1.1.101
29
+ charset-normalizer==3.3.2
30
+ chevron==0.14.0
31
+ click==8.1.7
32
+ comm==0.2.1
33
+ contourpy==1.2.0
34
+ cycler==0.12.1
35
+ dataclasses-json==0.5.14
36
+ debugpy==1.8.0
37
+ decorator==5.1.1
38
+ defusedxml==0.7.1
39
+ Deprecated==1.2.14
40
+ diffusers==0.26.2
41
+ distro==1.9.0
42
+ exceptiongroup==1.2.0
43
+ executing==2.0.1
44
+ fastapi==0.110.1
45
+ fastapi-socketio==0.0.10
46
+ fastjsonschema==2.19.1
47
+ filelock==3.13.1
48
+ filetype==1.2.0
49
+ fire==0.5.0
50
+ fonttools==4.48.1
51
+ fqdn==1.5.1
52
+ frozenlist==1.4.1
53
+ fsspec==2024.2.0
54
+ google-auth==2.27.0
55
+ google-auth-oauthlib==1.2.0
56
+ google-cloud==0.34.0
57
+ googleapis-common-protos==1.62.0
58
+ grpcio==1.60.1
59
+ h11==0.14.0
60
+ httpcore==0.17.3
61
+ httpx==0.24.1
62
+ huggingface-hub==0.20.3
63
+ idna==3.6
64
+ importlib-metadata==6.11.0
65
+ ipykernel==6.26.0
66
+ ipython==8.17.2
67
+ ipywidgets==8.1.1
68
+ isoduration==20.11.0
69
+ jedi==0.19.1
70
+ Jinja2==3.1.3
71
+ jmespath==1.0.1
72
+ joblib==1.3.2
73
+ json5==0.9.14
74
+ jsonpointer==2.4
75
+ jsonschema==4.21.1
76
+ jsonschema-specifications==2023.12.1
77
+ jupyter-events==0.9.0
78
+ jupyter-lsp==2.2.2
79
+ jupyter_client==8.6.0
80
+ jupyter_core==5.7.1
81
+ jupyter_server==2.12.5
82
+ jupyter_server_terminals==0.5.2
83
+ jupyterlab==4.0.6
84
+ jupyterlab-widgets==3.0.9
85
+ jupyterlab_pygments==0.3.0
86
+ jupyterlab_server==2.25.2
87
+ kiwisolver==1.4.5
88
+ Lazify==0.4.0
89
+ lightning==2.1.4
90
+ lightning-cloud==0.5.64
91
+ lightning-utilities==0.10.1
92
+ lightning_sdk==0.0.17a0
93
+ literalai==0.0.601
94
+ Markdown==3.5.2
95
+ markdown-it-py==3.0.0
96
+ MarkupSafe==2.1.5
97
+ marshmallow==3.20.2
98
+ matplotlib==3.8.2
99
+ matplotlib-inline==0.1.6
100
+ mdurl==0.1.2
101
+ mistune==3.0.2
102
+ mpmath==1.3.0
103
+ multidict==6.0.5
104
+ mypy-extensions==1.0.0
105
+ nbclient==0.9.0
106
+ nbconvert==7.16.0
107
+ nbformat==5.9.2
108
+ nest-asyncio==1.6.0
109
+ networkx==3.2.1
110
+ notebook_shim==0.2.3
111
+ numpy==1.26.2
112
+ nvidia-cublas-cu12==12.1.3.1
113
+ nvidia-cuda-cupti-cu12==12.1.105
114
+ nvidia-cuda-nvrtc-cu12==12.1.105
115
+ nvidia-cuda-runtime-cu12==12.1.105
116
+ nvidia-cudnn-cu12==8.9.2.26
117
+ nvidia-cufft-cu12==11.0.2.54
118
+ nvidia-curand-cu12==10.3.2.106
119
+ nvidia-cusolver-cu12==11.4.5.107
120
+ nvidia-cusparse-cu12==12.1.0.106
121
+ nvidia-nccl-cu12==2.19.3
122
+ nvidia-nvjitlink-cu12==12.4.127
123
+ nvidia-nvtx-cu12==12.1.105
124
+ oauthlib==3.2.2
125
+ openai==1.23.2
126
+ opencv-python==4.9.0.80
127
+ opentelemetry-api==1.22.0
128
+ opentelemetry-exporter-otlp==1.22.0
129
+ opentelemetry-exporter-otlp-proto-common==1.22.0
130
+ opentelemetry-exporter-otlp-proto-grpc==1.22.0
131
+ opentelemetry-exporter-otlp-proto-http==1.22.0
132
+ opentelemetry-instrumentation==0.43b0
133
+ opentelemetry-proto==1.22.0
134
+ opentelemetry-sdk==1.22.0
135
+ opentelemetry-semantic-conventions==0.43b0
136
+ overrides==7.7.0
137
+ packaging==23.2
138
+ pandas==2.1.4
139
+ pandocfilters==1.5.1
140
+ parso==0.8.3
141
+ pexpect==4.9.0
142
+ pillow==10.2.0
143
+ platformdirs==4.2.0
144
+ prometheus-client==0.19.0
145
+ prompt-toolkit==3.0.43
146
+ protobuf==4.23.4
147
+ psutil==5.9.8
148
+ ptyprocess==0.7.0
149
+ pure-eval==0.2.2
150
+ pyarrow==15.0.2
151
+ pyasn1==0.5.1
152
+ pyasn1-modules==0.3.0
153
+ pycparser==2.21
154
+ pydantic==2.6.1
155
+ pydantic_core==2.16.2
156
+ Pygments==2.17.2
157
+ PyJWT==2.8.0
158
+ pyparsing==3.1.1
159
+ python-dateutil==2.8.2
160
+ python-dotenv==1.0.1
161
+ python-engineio==4.9.0
162
+ python-graphql-client==0.4.3
163
+ python-json-logger==2.0.7
164
+ python-multipart==0.0.9
165
+ python-socketio==5.11.1
166
+ pytorch-lightning==2.1.4
167
+ pytz==2024.1
168
+ PyYAML==6.0.1
169
+ pyzmq==25.1.2
170
+ referencing==0.33.0
171
+ regex==2023.12.25
172
+ requests==2.31.0
173
+ requests-futures==1.0.0
174
+ requests-oauthlib==1.3.1
175
+ rfc3339-validator==0.1.4
176
+ rfc3986-validator==0.1.1
177
+ rich==13.7.0
178
+ rpds-py==0.17.1
179
+ rsa==4.9
180
+ s3transfer==0.10.0
181
+ safetensors==0.4.2
182
+ scikit-learn==1.3.2
183
+ scipy==1.11.4
184
+ Send2Trash==1.8.2
185
+ simple-term-menu==1.6.4
186
+ simple-websocket==1.0.0
187
+ six==1.16.0
188
+ sniffio==1.3.0
189
+ soupsieve==2.5
190
+ stack-data==0.6.3
191
+ starlette==0.37.2
192
+ sympy==1.12
193
+ syncer==2.0.3
194
+ tensorboard==2.15.1
195
+ tensorboard-data-server==0.7.2
196
+ termcolor==2.4.0
197
+ terminado==0.18.0
198
+ threadpoolctl==3.2.0
199
+ tinycss2==1.2.1
200
+ tokenizers==0.15.2
201
+ tomli==2.0.1
202
+ torch==2.2.2
203
+ #torchaudio==2.1.2+cu121
204
+ #torchmetrics==1.2.0
205
+ #torchvision==0.16.2+cu121
206
+ tornado==6.4
207
+ tqdm==4.66.1
208
+ traitlets==5.14.1
209
+ transformers==4.37.2
210
+ triton==2.2.0
211
+ types-python-dateutil==2.8.19.20240106
212
+ typing-inspect==0.9.0
213
+ typing_extensions==4.9.0
214
+ tzdata==2023.4
215
+ uptrace==1.22.0
216
+ uri-template==1.3.0
217
+ urllib3==2.0.7
218
+ uvicorn==0.25.0
219
+ watchfiles==0.20.0
220
+ wcwidth==0.2.13
221
+ webcolors==1.13
222
+ webencodings==0.5.1
223
+ websocket-client==1.7.0
224
+ websockets==12.0
225
+ Werkzeug==3.0.1
226
+ widgetsnbextension==4.0.9
227
+ wrapt==1.16.0
228
+ wsproto==1.2.0
229
+ xformers==0.0.25.post1
230
+ yarl==1.9.4
231
+ zipp==3.17.0
utils.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from openai import AsyncOpenAI
2
+ from openai.types.beta.threads.run import Run
3
+
4
+ from openai.types.beta import Thread
5
+ from openai.types.beta.threads import (
6
+ ImageFileContentBlock,
7
+ TextContentBlock,
8
+ Message,
9
+ )
10
+
11
+ import chainlit as cl
12
+ from typing import Optional
13
+ from typing import Dict
14
+ from chainlit.context import context
15
+
16
+ class DictToObject:
17
+ def __init__(self, dictionary):
18
+ for key, value in dictionary.items():
19
+ setattr(self, key, value)
20
+
21
+ async def stream_message(question_response, cl, initial_content=""):
22
+ '''handler to stream a message back to user. native streaming not supported in assistants + chainlit v1'''
23
+ msg = cl.Message(content=initial_content)
24
+ await msg.send()
25
+
26
+ next_output = ""
27
+
28
+ for part in question_response:
29
+ if token := part.choices[0].delta.content or "":
30
+ next_output += token
31
+ await msg.stream_token(token)
32
+
33
+ await msg.update()
34
+ return next_output
35
+
36
+ async def ask_to_continue():
37
+ '''a button for the user to click to continue '''
38
+ return await cl.AskActionMessage(
39
+ content="Click to continue ",
40
+ actions=[
41
+ cl.Action(name="keep_going", value="next", label="➡️ Let's keep going"),
42
+ cl.Action(name="ask_question", value="question", label="︖ Ask a question")
43
+ ],
44
+ timeout=600
45
+ ).send()
46
+
47
+ async def process_thread_message(message_references: Dict[str, cl.Message], thread_message):
48
+ for idx, content_message in enumerate(thread_message.content):
49
+ id = thread_message.id + str(idx)
50
+ if isinstance(content_message, TextContentBlock):
51
+ if id in message_references:
52
+ msg = message_references[id]
53
+ msg.content = content_message.text.value
54
+ await msg.update()
55
+ else:
56
+ message_references[id] = cl.Message(
57
+ author=thread_message.role, content=content_message.text.value
58
+ )
59
+ await message_references[id].send()
60
+ elif isinstance(content_message, MessageContentImageFile):
61
+ image_id = content_message.image_file.file_id
62
+ response = await client.files.with_raw_response.retrieve_content(image_id)
63
+ elements = [
64
+ cl.Image(
65
+ name=image_id,
66
+ content=response.content,
67
+ display="inline",
68
+ size="large",
69
+ ),
70
+ ]
71
+ if id not in message_references:
72
+ message_references[id] = cl.Message(
73
+ author=thread_message.role,
74
+ content="",
75
+ elements=elements,
76
+ )
77
+ await message_references[id]. send()
78
+ else:
79
+ print("unknown message type", type(content_message))