Spaces:
Runtime error
Runtime error
| import os | |
| from openai import OpenAI | |
| import subprocess | |
| import torch | |
| model, utils = torch.hub.load(repo_or_dir='snakers4/silero-vad', | |
| model='silero_vad', | |
| force_reload=True, | |
| onnx=False) | |
| def apply_Vad(wav_file): | |
| try: | |
| torch.set_num_threads(1) | |
| SAMPLING_RATE = 16000 | |
| (get_speech_timestamps, | |
| save_audio, | |
| read_audio, | |
| VADIterator, | |
| collect_chunks) = utils | |
| wav = read_audio(wav_file, sampling_rate=SAMPLING_RATE) | |
| speech_timestamps = get_speech_timestamps(wav, model, sampling_rate=SAMPLING_RATE) | |
| save_audio(wav_file, | |
| collect_chunks(speech_timestamps, wav), sampling_rate=SAMPLING_RATE) | |
| return wav_file if len(speech_timestamps) > 0 else None | |
| except Exception as e: | |
| print(e) | |
| return None | |
| import os | |
| def transcribe_speech_local(wav_file,language): | |
| client = OpenAI() | |
| wav_file=apply_Vad(wav_file) | |
| if wav_file is None: | |
| return "" | |
| audio_file = open(wav_file, "rb") | |
| transcript = client.audio.transcriptions.create( | |
| model="whisper-1", | |
| file=audio_file, | |
| response_format="text", | |
| language=language.split("-")[0], | |
| ) | |
| print(transcript,'transcript') | |
| return transcript | |
| # Example usage | |
| import requests | |
| def translation_service(text,target_language): | |
| params={ | |
| "source_lang" : "auto", | |
| "target_lang" : target_language, | |
| "partner" : "auto", | |
| "text": text | |
| } | |
| headers = {'Content-Type': 'application/json'} | |
| url="https://ackc2zpw4agprph2s6qi4fzttu0kzzhh.lambda-url.ap-northeast-2.on.aws/translate" | |
| try: | |
| # Make the API request | |
| response = requests.post(url,headers=headers, json=params) | |
| response=response.json()['translated_text'] | |
| return response | |
| except Exception as e: | |
| print('error in custom model',e) | |
| try: | |
| if text=="" or text is None: | |
| return "" | |
| client = OpenAI() | |
| response = client.chat.completions.create( | |
| model="gpt-3.5-turbo", | |
| messages=[ | |
| {"role": "system", "content": f"You will be provided with a sentence in English, and your task is to translate it into {target_language}."}, | |
| {"role": "user", "content": text}, | |
| ] | |
| ) | |
| return response.choices[0].message.content | |
| except Exception as e: | |
| print('openAI error',e) | |
| return "not able to translate this time" | |