radames commited on
Commit
ebc901f
1 Parent(s): 628db1b

replace request with aiohttp

Browse files
Files changed (3) hide show
  1. README.md +1 -1
  2. app.py +9 -10
  3. requirements.txt +3 -2
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: ✍️🎥📄
4
  colorFrom: red
5
  colorTo: gray
6
  sdk: gradio
7
- sdk_version: 3.0.6
8
  app_file: app.py
9
  pinned: false
10
  ---
 
4
  colorFrom: red
5
  colorTo: gray
6
  sdk: gradio
7
+ sdk_version: 3.0.9
8
  app_file: app.py
9
  pinned: false
10
  ---
app.py CHANGED
@@ -5,6 +5,8 @@ import ffmpeg
5
  import os
6
  from pathlib import Path
7
  import time
 
 
8
 
9
 
10
  # Set true if you're using huggingface inference API API https://huggingface.co/inference-api
@@ -15,7 +17,6 @@ MODEL = "facebook/wav2vec2-base-960h"
15
  # MODEL = "patrickvonplaten/wav2vec2-large-960h-lv60-self-4-gram"
16
  if API_BACKEND:
17
  from dotenv import load_dotenv
18
- import requests
19
  import base64
20
  import asyncio
21
  load_dotenv(Path(".env"))
@@ -51,8 +52,8 @@ for file in samples_data:
51
  SAMPLES.append(sample)
52
  VIDEOS = list(map(lambda x: [x['video']], SAMPLES))
53
 
54
- total_inferences_since_reboot = 333
55
- total_cuts_since_reboot = 1254
56
 
57
 
58
  async def speech_to_text(video_file_path):
@@ -81,7 +82,7 @@ async def speech_to_text(video_file_path):
81
  for tries in range(4):
82
  print(f'Transcribing from API attempt {tries}')
83
  try:
84
- inference_reponse = query_api(audio_memory)
85
  transcription = inference_reponse["text"].lower()
86
  timestamps = [[chunk["text"].lower(), chunk["timestamp"][0], chunk["timestamp"][1]]
87
  for chunk in inference_reponse['chunks']]
@@ -185,7 +186,7 @@ def cut_timestamps_to_video(video_in, transcription, text_in, timestamps):
185
  return (tokens, output_video)
186
 
187
 
188
- def query_api(audio_bytes: bytes):
189
  """
190
  Query for Huggingface Inference API for Automatic Speech Recognition task
191
  """
@@ -199,11 +200,9 @@ def query_api(audio_bytes: bytes):
199
  "options": {"use_gpu": False}
200
  }).encode("utf-8")
201
 
202
- response = requests.request(
203
- "POST", API_URL, headers=headers, data=payload)
204
- json_reponse = json.loads(response.content.decode("utf-8"))
205
- return json_reponse
206
-
207
 
208
  # ---- Gradio Layout -----
209
  video_in = gr.Video(label="Video file")
 
5
  import os
6
  from pathlib import Path
7
  import time
8
+ import aiohttp
9
+ import asyncio
10
 
11
 
12
  # Set true if you're using huggingface inference API API https://huggingface.co/inference-api
 
17
  # MODEL = "patrickvonplaten/wav2vec2-large-960h-lv60-self-4-gram"
18
  if API_BACKEND:
19
  from dotenv import load_dotenv
 
20
  import base64
21
  import asyncio
22
  load_dotenv(Path(".env"))
 
52
  SAMPLES.append(sample)
53
  VIDEOS = list(map(lambda x: [x['video']], SAMPLES))
54
 
55
+ total_inferences_since_reboot = 415
56
+ total_cuts_since_reboot = 1539
57
 
58
 
59
  async def speech_to_text(video_file_path):
 
82
  for tries in range(4):
83
  print(f'Transcribing from API attempt {tries}')
84
  try:
85
+ inference_reponse = await query_api(audio_memory)
86
  transcription = inference_reponse["text"].lower()
87
  timestamps = [[chunk["text"].lower(), chunk["timestamp"][0], chunk["timestamp"][1]]
88
  for chunk in inference_reponse['chunks']]
 
186
  return (tokens, output_video)
187
 
188
 
189
+ async def query_api(audio_bytes: bytes):
190
  """
191
  Query for Huggingface Inference API for Automatic Speech Recognition task
192
  """
 
200
  "options": {"use_gpu": False}
201
  }).encode("utf-8")
202
 
203
+ async with aiohttp.ClientSession() as session:
204
+ async with session.post(API_URL, headers=headers, data=payload) as response:
205
+ return await response.json()
 
 
206
 
207
  # ---- Gradio Layout -----
208
  video_in = gr.Video(label="Video file")
requirements.txt CHANGED
@@ -1,7 +1,8 @@
1
  torch
2
  transformers
3
- gradio==3.0.6
4
  datasets
5
  librosa
6
  ffmpeg-python
7
- python-dotenv
 
 
1
  torch
2
  transformers
3
+ gradio==3.0.9
4
  datasets
5
  librosa
6
  ffmpeg-python
7
+ python-dotenv
8
+ aiohttp