eaglelandsonce commited on
Commit
9d7e9d0
1 Parent(s): 4fa5a46

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -3
app.py CHANGED
@@ -16,23 +16,30 @@ import numpy as np
16
  import nltk
17
  nltk.download('punkt')
18
 
19
- # Constants for image and audio generation
20
  USER_ID_IMG = 'openai'
21
  APP_ID_IMG = 'dall-e'
22
  MODEL_ID_IMG = 'dall-e-3'
23
  MODEL_VERSION_ID_IMG = 'dc9dcb6ee67543cebc0b9a025861b868'
24
 
 
25
  USER_ID_AUDIO = 'eleven-labs'
26
  APP_ID_AUDIO = 'audio-generation'
27
  MODEL_ID_AUDIO = 'speech-synthesis'
28
  MODEL_VERSION_ID_AUDIO = 'f2cead3a965f4c419a61a4a9b501095c'
29
 
30
- # Renamed variables
31
  USER_ID_OBJECT = 'clarifai'
32
  APP_ID_OBJECT = 'main'
33
  MODEL_ID_OBJECT = 'general-image-detection'
34
  MODEL_VERSION_ID_OBJECT = '1580bb1932594c93b7e2e04456af7c6f'
35
 
 
 
 
 
 
 
36
  # Retrieve PAT from environment variable
37
  PAT = os.getenv('CLARIFAI_PAT')
38
 
@@ -141,8 +148,43 @@ def get_image_concepts(image_bytes):
141
 
142
  return post_model_outputs_response.outputs[0].data.regions
143
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144
 
145
-
146
 
147
  # Function to split text into sentences and then chunk them
148
 
@@ -420,6 +462,10 @@ with tab3:
420
 
421
 
422
  st.image(st.session_state['image_paths'][image_index])
 
 
 
 
423
 
424
  # Button for actions related to the selected image
425
  if st.button("Details", key=f"details_{image_index}"):
 
16
  import nltk
17
  nltk.download('punkt')
18
 
19
+ # Image Variables
20
  USER_ID_IMG = 'openai'
21
  APP_ID_IMG = 'dall-e'
22
  MODEL_ID_IMG = 'dall-e-3'
23
  MODEL_VERSION_ID_IMG = 'dc9dcb6ee67543cebc0b9a025861b868'
24
 
25
+ # Audio variables
26
  USER_ID_AUDIO = 'eleven-labs'
27
  APP_ID_AUDIO = 'audio-generation'
28
  MODEL_ID_AUDIO = 'speech-synthesis'
29
  MODEL_VERSION_ID_AUDIO = 'f2cead3a965f4c419a61a4a9b501095c'
30
 
31
+ # Object Detection variables
32
  USER_ID_OBJECT = 'clarifai'
33
  APP_ID_OBJECT = 'main'
34
  MODEL_ID_OBJECT = 'general-image-detection'
35
  MODEL_VERSION_ID_OBJECT = '1580bb1932594c93b7e2e04456af7c6f'
36
 
37
+ # Vision variables
38
+ USER_ID_GPT4 = 'openai'
39
+ APP_ID_GPT4 = 'chat-completion'
40
+ MODEL_ID_GPT4 = 'openai-gpt-4-vision'
41
+ MODEL_VERSION_ID_GPT4 = '266df29bc09843e0aee9b7bf723c03c2'
42
+
43
  # Retrieve PAT from environment variable
44
  PAT = os.getenv('CLARIFAI_PAT')
45
 
 
148
 
149
  return post_model_outputs_response.outputs[0].data.regions
150
 
151
+ # GPT4 Image Description Creation +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
152
+
153
+ # Method to process the text output
154
+ def process_text_output(text_output):
155
+ st.write("Processed Text Output:")
156
+ st.write(text_output)
157
+
158
+ def analyze_image(uploaded_file):
159
+
160
+ channel = ClarifaiChannel.get_grpc_channel()
161
+ stub = service_pb2_grpc.V2Stub(channel)
162
+ metadata = (('authorization', 'Key ' + PAT),)
163
+ userDataObject = resources_pb2.UserAppIDSet(user_id=USER_ID_GPT4, app_id=APP_ID_GPT4)
164
+
165
+ try:
166
+ bytes_data = uploaded_file.getvalue()
167
+
168
+ response = stub.PostModelOutputs(
169
+ service_pb2.PostModelOutputsRequest(
170
+ user_app_id=userDataObject,
171
+ model_id=MODEL_ID_GPT4,
172
+ version_id=MODEL_VERSION_ID_GPT4,
173
+ inputs=[resources_pb2.Input(data=resources_pb2.Data(image=resources_pb2.Image(base64=bytes_data)))]
174
+ ),
175
+ metadata=metadata
176
+ )
177
+
178
+ if response.status.code != SUCCESS:
179
+ st.error("Error in API call: " + response.status.description)
180
+ return None
181
+
182
+ return response.outputs[0].data.text.raw
183
+
184
+ except Exception as e:
185
+ st.error(f"An error occurred: {e}")
186
+ return None
187
 
 
188
 
189
  # Function to split text into sentences and then chunk them
190
 
 
462
 
463
 
464
  st.image(st.session_state['image_paths'][image_index])
465
+
466
+ image_text_output = analyze_image(st.session_state['image_paths'][image_index])
467
+
468
+ st.write(image_text_output)
469
 
470
  # Button for actions related to the selected image
471
  if st.button("Details", key=f"details_{image_index}"):