jfforero commited on
Commit
f77d9ca
1 Parent(s): 6a3b778

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -25
app.py CHANGED
@@ -57,37 +57,39 @@ def predict_emotion_from_audio(wav_filepath):
57
  print("Error predicting emotion:", e)
58
  return None
59
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
  # Function to get predictions
61
  def get_predictions(audio_input):
62
  emotion_prediction = predict_emotion_from_audio(audio_input)
63
  transcribed_text = transcribe(audio_input)
64
  texto_imagen = emotion_prediction + transcribed_text
65
- image = image_edition(texto_imagen) # Call the image edition function
66
  return emotion_prediction, transcribed_text, image
67
 
68
- # Define the image edition function
69
- def image_edition(text):
70
- try:
71
- # Call the DeepAI Image Editor API
72
- url = "https://api.deepai.org/api/image-editor"
73
- headers = {'api-key': api_key}
74
- files = {
75
- 'image': open('TAI_Images/TerraIncognita3.jpg', 'rb'), # Replace 'path_to_your_image.jpg' with the actual path to your image file
76
- 'text': text
77
- }
78
- response = requests.post(url, headers=headers, files=files)
79
- response_data = response.json()
80
- if 'output_url' in response_data:
81
- image_url = response_data['output_url']
82
- image_response = requests.get(image_url)
83
- image = Image.open(BytesIO(image_response.content))
84
- return image
85
- else:
86
- return None
87
- except Exception as e:
88
- print("Error generating image:", e)
89
- return None
90
-
91
  # Create the Gradio interface
92
  interface = gr.Interface(
93
  fn=get_predictions,
@@ -101,4 +103,21 @@ interface = gr.Interface(
101
  description="Create an AVE using your voice."
102
  )
103
 
104
- interface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  print("Error predicting emotion:", e)
58
  return None
59
 
60
+ api_key = os.getenv("DeepAI_api_key")
61
+
62
+ # Function to generate an image using DeepAI Text to Image API
63
+
64
+
65
+
66
+
67
+ def generate_image(api_key, text):
68
+ url = "https://api.deepai.org/api/text2img"
69
+ headers = {'api-key': api_key}
70
+ response = requests.post(
71
+ url,
72
+ data={'text': text},
73
+ headers=headers
74
+ )
75
+ response_data = response.json()
76
+ if 'output_url' in response_data:
77
+ image_url = response_data['output_url']
78
+ image_response = requests.get(image_url)
79
+ image = Image.open(BytesIO(image_response.content))
80
+ return image
81
+ else:
82
+ return None
83
+
84
+
85
  # Function to get predictions
86
  def get_predictions(audio_input):
87
  emotion_prediction = predict_emotion_from_audio(audio_input)
88
  transcribed_text = transcribe(audio_input)
89
  texto_imagen = emotion_prediction + transcribed_text
90
+ image = generate_image(api_key, texto_imagen)
91
  return emotion_prediction, transcribed_text, image
92
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
  # Create the Gradio interface
94
  interface = gr.Interface(
95
  fn=get_predictions,
 
103
  description="Create an AVE using your voice."
104
  )
105
 
106
+
107
+ interface.launch()". Replace "generate_image" by image_edition "// Example posting file picker input image (Browser only):
108
+ document.getElementById('yourFileInputId').addEventListener('change', async function() {
109
+ const formData = new FormData();
110
+ formData.append('image', this.files[0]);
111
+ formData.append('text', this.files[1]);
112
+
113
+ const resp = await fetch('https://api.deepai.org/api/image-editor', {
114
+ method: 'POST',
115
+ headers: {
116
+ 'api-key': 'dee3e3f2-d5cf-474c-8072-bd6bea47e865'
117
+ },
118
+ body: formData
119
+ });
120
+
121
+ const data = await resp.json();
122
+ console.log(data);
123
+ });