Ubaidbhat commited on
Commit
af4c93d
·
verified ·
1 Parent(s): 872d57e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +173 -155
app.py CHANGED
@@ -1,21 +1,35 @@
1
- from pathlib import Path
2
- from openai import OpenAI
3
- import soundfile as sf
4
- from pydub import AudioSegment
5
  import base64
6
  import logging
7
  import numpy as np
8
  import os
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
- os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
11
  client = OpenAI()
12
 
13
  # Set up logging
14
  logging.basicConfig(level=logging.INFO)
15
- def transform_text_to_speech(text: str):
16
  # Generate speech from transcription
17
- speech_file_path_mp3 = Path.cwd() / "speech.mp3"
18
- speech_file_path_wav = Path.cwd() / "speech.wav"
19
  response = client.audio.speech.create(
20
  model="tts-1",
21
  voice="onyx",
@@ -44,8 +58,8 @@ def transform_text_to_speech(text: str):
44
  return audio_html
45
 
46
 
47
- def transform_speech_to_text(audio):
48
- file_path = "saved_audio.wav"
49
  sample_rate, audio_data = audio
50
  sf.write(file_path, audio_data, sample_rate)
51
  # Transcribe audio
@@ -56,23 +70,6 @@ def transform_speech_to_text(audio):
56
  )
57
  return transcription.text
58
 
59
- # Define a function to save the image
60
- def save_image(image):
61
- save_path = "image.png"
62
- image.save(save_path)
63
- return save_path
64
-
65
-
66
- import langchain
67
- import base64
68
- from langchain_core.pydantic_v1 import BaseModel, Field
69
- from langchain.chains import TransformChain
70
- from langchain_core.messages import HumanMessage
71
- from langchain_openai import ChatOpenAI
72
- from langchain import globals
73
- from langchain_core.runnables import chain
74
- from langchain_core.output_parsers import JsonOutputParser
75
- from langchain.memory import ConversationSummaryBufferMemory, ConversationBufferMemory
76
 
77
 
78
  def load_image(inputs: dict) -> dict:
@@ -120,145 +117,171 @@ def image_model(inputs: dict) -> str | list[str] | dict:
120
  )
121
  return msg.content
122
 
123
- vision_prompt = """
124
- Given the image uploaded by a old person.
125
- You are Studs Terkel, and your role is to be a curious friend who is genuinely interested in the story behind the photograph that the older person has provided.
126
- Provide the following information,
127
- - A description of the image in 2 lines and a question that gives context to the photograph.
128
- """
129
-
130
  CONVERSATION_STARTER_PROMPT = """
131
- An older person has provided a photo.
132
- Here is the conversation history between the older person and Studs Terkel around the photograph:
133
- {history}
134
- You are Studs Terkel, and your role is to be a curious friend who is genuinely interested in the story behind the photograph that the older person has provided.
135
- Your task is to ask a question that gives context to the photograph. For example, where was this photograph taken? Who is in it? What is the significance of this photograph to you? Use the conversation history provided above and ask only one question at a time.
136
- Studs Terkel:
137
- """
138
 
139
  CONVERSATION_EXPANDING_PROMPT = """
140
- Given the image uploaded by a old person.
141
- Here is the conversation history around the Image between the older person and Stud's Terkel:
142
- {history}
143
- You are Studs Terkel, and your role is to be a curious friend who is genuinely interested in the story behind the photograph that the older person has provided around the Image uploaded.
144
- Your task is to react the old person input (use your knowledge as well) and ask a question that encourages the person to expand on their answer about the photograph. Ask for more details or their feelings about the situation depicted in the photograph. Use the conversation history provided above and ask only one question at a time.
145
- Use your knowledge to respond to user input.
146
- Studs Terkel:
147
- """
148
 
149
 
150
  CONVERSATION_ENDING_PROMPT = """
151
- Given the image uploaded by a old person.
152
- Here is the conversation history around the Image between the older person and Stud's Terkel:
153
- {history}
154
- You are Studs Terkel, and your role is to be a curious friend who is genuinely interested in the story behind the photograph that the older person has provided around the Image uploaded.
155
- Your task is to react the old person input (use your knowledge as well) and ask if they would like to tell more about the story depicted in the photograph, discuss anything that the photograph reminds them of, or if they are ready to move on to another photograph or stop reminiscing. Use the conversation history provided above and ask only one question at a time.
156
- Use your knowledge to respond to user input.
157
- Studs Terkel:
158
- """
159
-
160
- def get_image_informations(image_path: str, conversation_prompt: str, memory, new_photo) -> dict:
161
-
162
- if new_photo:
163
  parser = description_parser
164
- prompt = conversation_prompt
 
 
 
165
  else:
166
  parser = question_parser
167
- prompt=conversation_prompt.format(history = memory.buffer)
 
168
  vision_chain = load_image_chain | image_model | question_parser
169
  return vision_chain.invoke({'image_path': f'{image_path}', 'prompt': prompt, 'parser':parser})
170
 
171
 
172
- import gradio as gr
173
- import logging
174
- import contextlib
175
- import io
176
- import textwrap
177
- import openai
178
- from langchain_openai import ChatOpenAI
179
- import soundfile as sf
180
- from PIL import Image
181
-
182
- imagePath = ""
183
- question = ""
184
-
185
- chat = ChatOpenAI()
186
- i = 0
187
- conversation_prompt = vision_prompt
188
- new_photo_uploaded = True
189
- memory = ConversationBufferMemory(ai_prefix="old Pesron", human_prefix = "stud's terkel")
190
-
191
- def pred(image, input_text, audio):
192
- global memory
193
- global imagePath
194
- global question
195
- global i
196
- global conversation_prompt
197
- global new_photo_uploaded
198
- global memory
199
-
200
- if new_photo_uploaded:
201
- memory.save_context({"input": "could you please add the photograph?"}, {"output": "here is the new photograph uploaded"})
202
- conversation_prompt = vision_prompt
203
- i = 1
204
- input_text = ""
205
- imagePath = save_image(image)
206
- with contextlib.redirect_stdout(io.StringIO()):
207
- res = get_image_informations(imagePath,conversation_prompt, memory, new_photo_uploaded)
208
- question = res["description"]
209
- new_photo_uploaded = False
210
- return image, " ", None, "New Photo Uploaded", question, transform_text_to_speech(question)
211
-
212
-
213
- if input_text.strip() != "":
214
- i += 1
215
- if i >= 2:
216
- conversation_prompt = CONVERSATION_EXPANDING_PROMPT
217
- if i > 5:
218
- conversation_prompt = CONVERSATION_ENDING_PROMPT
219
- memory.save_context({"input": question}, {"output": input_text})
220
- res = get_image_informations(imagePath, conversation_prompt, memory, new_photo_uploaded)
221
- question = res["question"]
222
- text = input_text
223
- return image, " ", None, text, question, transform_text_to_speech(question)
224
-
225
- if audio is None:
226
- message = "Please wait atleast 5 seconds after finishing your recording before submitting it to ensure it is fully captured. Thank you!"
227
- return image, " ", None, message, message, transform_text_to_speech(message)
228
-
229
- i += 1
230
- if i >= 2:
231
- conversation_prompt = CONVERSATION_EXPANDING_PROMPT
232
- if i > 5:
233
- conversation_prompt = CONVERSATION_ENDING_PROMPT
234
-
235
- text = transform_speech_to_text(audio)
236
- memory.save_context({"input": question}, {"output": text})
237
- res = get_image_informations(imagePath, conversation_prompt, memory, new_photo_uploaded)
238
- question = res["question"]
239
- return image, " ", None, text, question, transform_text_to_speech(question)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
240
 
241
  # Backend function to clear inputs
242
- def clear_inputs():
243
- global new_photo_uploaded
244
- new_photo_uploaded = True
245
- return None, "", None, "", "", transform_text_to_speech("Please upload a new photo")
246
- # Backend function to reset memory.
247
- def new_session():
248
- global new_photo_uploaded
249
- global memory
250
- new_photo_uploaded = True
251
- memory = ConversationBufferMemory(ai_prefix="old Person", human_prefix = "stud's Terkel")
252
- return None, "", None, "", "", transform_text_to_speech("Ready for a new Session")
253
 
254
  # Gradio Interface
255
  with gr.Blocks() as demo:
256
  with gr.Row():
257
  with gr.Column():
258
  # Input fields
259
- image_input = gr.Image(type="pil", label="Upload Image")
260
- text_input = gr.Textbox(label="Input here...")
261
  audio_input = gr.Audio(sources="microphone", type="numpy", label="Record Audio")
 
262
 
263
  with gr.Column():
264
  # Output fields
@@ -269,19 +292,14 @@ with gr.Blocks() as demo:
269
  with gr.Row():
270
  # Buttons at the bottom
271
  submit_button = gr.Button("Submit")
272
- clear_button = gr.Button("Clear Inputs to upload new photo", elem_id="clear-button")
273
- flush_memory = gr.Button("Start Fresh")
274
 
275
  # Linking the submit button with the save_audio function
276
- submit_button.click(fn=pred, inputs = [image_input, text_input, audio_input],
277
- outputs=[image_input, text_input, audio_input, user_input_output, stud_output, audio_output])
278
-
279
- # Linking the clear button with the clear_inputs function
280
- clear_button.click(fn=clear_inputs, inputs=None, outputs=[image_input, text_input, audio_input, user_input_output, stud_output, audio_output])
281
 
282
  # Linking the clear button with the clear_inputs function
283
- flush_memory.click(fn=new_session, inputs=None, outputs=[image_input, text_input, audio_input, user_input_output, stud_output, audio_output])
284
 
285
- # # Launch the interface
286
- demo.queue()
287
- demo.launch(share=True)
 
 
 
 
 
1
  import base64
2
  import logging
3
  import numpy as np
4
  import os
5
+ import langchain
6
+ import base64
7
+ import gradio as gr
8
+ import shutil
9
+ import json
10
+ import re
11
+ from pathlib import Path
12
+ from openai import OpenAI
13
+ import soundfile as sf
14
+ from pydub import AudioSegment
15
+ from langchain_core.pydantic_v1 import BaseModel, Field
16
+ from langchain.chains import TransformChain
17
+ from langchain_core.messages import HumanMessage
18
+ from langchain_openai import ChatOpenAI
19
+ from langchain import globals
20
+ from langchain_core.runnables import chain
21
+ from langchain_core.output_parsers import JsonOutputParser
22
+ from langchain.memory import ConversationSummaryBufferMemory, ConversationBufferMemory
23
 
24
+ os.environ["OPENAI_API_KEY"] = "sk-proj-5dsm5f2bbRjgxAdWtE4yT3BlbkFJ6drh7Ilpp3EEVtBqETte"
25
  client = OpenAI()
26
 
27
  # Set up logging
28
  logging.basicConfig(level=logging.INFO)
29
+ def transform_text_to_speech(text: str, user):
30
  # Generate speech from transcription
31
+ speech_file_path_mp3 = Path.cwd() / f"{user}-speech.mp3"
32
+ speech_file_path_wav = Path.cwd() / f"{user}-speech.wav"
33
  response = client.audio.speech.create(
34
  model="tts-1",
35
  voice="onyx",
 
58
  return audio_html
59
 
60
 
61
+ def transform_speech_to_text(audio, user):
62
+ file_path = f"{user}-saved_audio.wav"
63
  sample_rate, audio_data = audio
64
  sf.write(file_path, audio_data, sample_rate)
65
  # Transcribe audio
 
70
  )
71
  return transcription.text
72
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
 
74
 
75
  def load_image(inputs: dict) -> dict:
 
117
  )
118
  return msg.content
119
 
 
 
 
 
 
 
 
120
  CONVERSATION_STARTER_PROMPT = """
121
+ Given the image uploaded by a old person.
122
+ You are Studs Terkel, and your role is to be a curious friend who is genuinely interested in the story behind the photograph that the older person has provided.
123
+ Provide the following information,
124
+ - A description of the image in 2 lines and a question that gives context to the photograph.
125
+ """
126
+
 
127
 
128
  CONVERSATION_EXPANDING_PROMPT = """
129
+ Given the image uploaded by a old person.
130
+ Here is the conversation history around the Image between the older person and Stud's Terkel:
131
+ {history}
132
+ You are Studs Terkel, and your role is to be a curious friend who is genuinely interested in the story behind the photograph that the older person has provided around the Image uploaded.
133
+ Your task is to react the old person input and ask a question that encourages the person to expand on their answer about the photograph. Ask for more details or their feelings about the situation depicted in the photograph. Use the conversation history provided above and ask only one question at a time.
134
+ Use your knowledge to respond with the information.
135
+ Studs Terkel:
136
+ """
137
 
138
 
139
  CONVERSATION_ENDING_PROMPT = """
140
+ Given the image uploaded by a old person.
141
+ Here is the conversation history around the Image between the older person and Stud's Terkel:
142
+ {history}
143
+ You are Studs Terkel, and your role is to be a curious friend who is genuinely interested in the story behind the photograph that the older person has provided around the Image uploaded.
144
+ Your task is to react the old person input and ask if they would like to tell more about the story depicted in the photograph, discuss anything that the photograph reminds them of, or if they are ready to move on to another photograph or stop reminiscing. Use the conversation history provided above and ask only one question at a time.
145
+ Studs Terkel:
146
+ """
147
+
148
+ def get_prompt(image_path: str, iter: int, memory: str) -> dict:
149
+
150
+ if iter == 1:
 
151
  parser = description_parser
152
+ prompt = CONVERSATION_STARTER_PROMPT
153
+ elif iter >= 2 and iter <= 5:
154
+ parser = question_parser
155
+ prompt= CONVERSATION_EXPANDING_PROMPT.format(history=memory)
156
  else:
157
  parser = question_parser
158
+ prompt= CONVERSATION_ENDING_PROMPT.format(history=memory)
159
+
160
  vision_chain = load_image_chain | image_model | question_parser
161
  return vision_chain.invoke({'image_path': f'{image_path}', 'prompt': prompt, 'parser':parser})
162
 
163
 
164
+
165
+ def retrieve_memory(input_filepath):
166
+ with open(input_filepath, 'r') as f:
167
+ conversation = f.read()
168
+ lines = conversation.strip().split('\n')
169
+ last_reply = None
170
+
171
+ # Loop through the lines from the end
172
+ for line in reversed(lines):
173
+ if re.match(r'(Studs Terkel|Old Person):', line):
174
+ last_reply = line
175
+ break
176
+
177
+ # Determine who made the last reply, split it based on the colon, and return JSON
178
+ if last_reply:
179
+ speaker, message = last_reply.split(":", 1)
180
+ result = {
181
+ "speaker": speaker.strip(),
182
+ "reply": message.strip()
183
+ }
184
+ return result
185
+ else:
186
+ result = {
187
+ "speaker": "",
188
+ "reply": ""
189
+ }
190
+ return result
191
+
192
+ def load_counts(count_file_path):
193
+ if os.path.exists(count_file_path):
194
+ with open(count_file_path, 'r') as f:
195
+ return json.load(f)
196
+ return {"count": 0}
197
+
198
+ def save_counts(count_file_path, counts):
199
+ with open(count_file_path, 'w') as f:
200
+ json.dump(counts, f)
201
+ def pred(user_name, image_path, audio, user_input):
202
+ if image_path:
203
+ image_name = image_path.split("/")[-1]
204
+ new_image_name = f"{user_name}-{image_name}"
205
+ new_image_path = f"./{new_image_name}"
206
+ input_filename = f"{user_name}-{image_name}-conversation-memory.txt"
207
+ input_filepath = f"./{input_filename}"
208
+ count_file_path = f"{user_name}-{image_name}-tracking.json"
209
+
210
+ if not os.path.exists(new_image_path):
211
+ shutil.copy(image_path, new_image_path)
212
+ counts = load_counts(count_file_path)
213
+ counts["count"] += 1
214
+ save_counts(count_file_path, counts)
215
+ output = get_prompt(new_image_path, counts["count"], None)
216
+ res = output["description"]
217
+ with open(input_filepath, 'w') as f:
218
+ f.write("Studs Terkel: " + res)
219
+ return None, "", "New Photo Uploaded" , res, transform_text_to_speech(res, user_name)
220
+
221
+ else:
222
+
223
+ if audio is not None:
224
+ user_input = transform_speech_to_text(audio, user_name)
225
+
226
+
227
+ if user_input.strip() != "":
228
+ counts = load_counts(count_file_path)
229
+ counts["count"] += 1
230
+ save_counts(count_file_path, counts)
231
+ with open(input_filepath, 'a') as f:
232
+ f.write("\n" + "Old Person: " + user_input)
233
+ with open(input_filepath, 'r') as f:
234
+ content = f.read()
235
+ output = get_prompt(new_image_path, counts["count"], content)
236
+ res = output["question"]
237
+ with open(input_filepath, 'a') as f:
238
+ f.write("\n" + "Studs Terkel: "+ res)
239
+ return None, "", user_input, res, transform_text_to_speech(res, user_name)
240
+
241
+ # decide the path from the contents of the conversation memory.
242
+ if os.path.exists(input_filepath):
243
+ res = retrieve_memory(input_filepath)
244
+ if res["speaker"] == "Studs Terkel":
245
+ message = "Please supply text input or wait atleast 5 seconds after finishing your recording before submitting it to ensure it is fully captured. Thank you!"
246
+ return None, "", "" , res["reply"], transform_text_to_speech(message, user_name)
247
+ else:
248
+ with open(input_filepath, 'a') as f:
249
+ f.write("\n" + "Old Person: " + "I want to talk more about this photo")
250
+ with open(input_filepath, 'r') as f:
251
+ content = f.read()
252
+ counts = load_counts(count_file_path)
253
+ counts["count"] += 1
254
+ save_counts(count_file_path, counts)
255
+ output = get_prompt(new_image_path, counts["count"], content)
256
+ res = output["question"]
257
+ with open(input_filepath, 'a') as f:
258
+ f.write("\n" + "Studs Terkel: "+ res)
259
+ return None, "", "", res, transform_text_to_speech(res, user_name)
260
+
261
+
262
+ message = "Please upload an image"
263
+ return None, "", message, message, transform_text_to_speech(message, user_name)
264
 
265
  # Backend function to clear inputs
266
+ def clear_inputs(user_name, image_path):
267
+ image_name = image_path.split("/")[-1]
268
+ input_filename = f"{user_name}-{image_name}-conversation-memory.txt"
269
+ input_filepath = f"./{input_filename}"
270
+ if os.path.exists(input_filepath):
271
+ with open(input_filepath, 'a') as f:
272
+ f.write("\n" + "Old Person: " + "new photo uploaded")
273
+
274
+ return None, None, "", "", "Please uplaod a new photo", transform_text_to_speech("Please upload a new photo", user_name)
 
 
275
 
276
  # Gradio Interface
277
  with gr.Blocks() as demo:
278
  with gr.Row():
279
  with gr.Column():
280
  # Input fields
281
+ username = gr.Textbox(label="Enter your name")
282
+ image_input = gr.Image(type="filepath", label="Upload an Image") # Removed the extra comma
283
  audio_input = gr.Audio(sources="microphone", type="numpy", label="Record Audio")
284
+ text_input = gr.Textbox(label="Input here...")
285
 
286
  with gr.Column():
287
  # Output fields
 
292
  with gr.Row():
293
  # Buttons at the bottom
294
  submit_button = gr.Button("Submit")
295
+ clear_button = gr.Button("Upload a new Photo", elem_id="clear-button")
 
296
 
297
  # Linking the submit button with the save_audio function
298
+ submit_button.click(fn=pred, inputs=[username, image_input, audio_input, text_input],
299
+ outputs=[audio_input, text_input, user_input_output, stud_output, audio_output ])
 
 
 
300
 
301
  # Linking the clear button with the clear_inputs function
302
+ clear_button.click(fn=clear_inputs, inputs=[username, image_input], outputs=[image_input, audio_input, text_input, user_input_output, stud_output, audio_output])
303
 
304
+ # Launch the interface
305
+ demo.launch(share=True, debug = True)