FatwaAi / openai_intella.py
Hamada-Fathy's picture
Upload folder using huggingface_hub
b0afd07 verified
from openai import OpenAI
import gradio as gr
import logging
import requests
import os
import shutil
import time
# Setting up basic logging
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s', filename='app.log', filemode='w' )
# Configure OpenAI with your API key
client = OpenAI(api_key='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')
def transcribe_with_intella():
logging.debug(f"Transcribing with Intella")
# Intella-Voice API endpoint for adding requests
add_request_url = "https://api.intella-voice.com/api/accounts/84752622-5158-40ef-bab6-d6928892636b/Requests"
headers = {'x-ApiToken': '78On8sGTQUOaEuY3jxOx/A=='}
# Prepare payload and file data
payload = {'fileName': 'File Name', 'timestamp': '2'}
files = [('file', ('saved_audio.mp3', open('/home/ubuntu/cntxt/Fatwa_UAE/saved_audio.mp3', 'rb'), 'audio/wav'))]
# Send POST request to add the audio file
add_response = requests.request("POST", add_request_url, headers=headers, data=payload, files=files)
print('audio text', add_response.text)
request_id = add_response.json().get('id')
print('audio text', add_response.text)
time.sleep(20)
# Intella-Voice API endpoint to get the transcription
get_transcription_url = f"https://api.intella-voice.com/api/accounts/84752622-5158-40ef-bab6-d6928892636b/requests/{request_id}"
# Send GET request to retrieve the transcription
get_response = requests.request("GET", get_transcription_url, headers=headers)
transcription = get_response.json().get('transcriptContent')
print('transcription', transcription)
return transcription
def answer_question(question):
logging.debug(f"Answering question: {question}")
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": f"Answer the following question based on the content of https://fatwauae.gov.ae/en/services/fatwa-archive :\n\n{question}"}
]
)
return response.choices[0].message.content
def process_input(question, audio):
logging.debug(f"Received question: {question}")
logging.debug(f"Received audio: {audio}")
if not question and not audio:
logging.error("No question or audio provided.")
return "", "No question provided."
if question.strip():
answer = answer_question(question)
return "", answer
if audio:
audio_path = "saved_audio.mp3"
shutil.copyfile(audio, audio_path)
print('audio saved')
transcribed_text = transcribe_with_intella()
answer = answer_question(transcribed_text)
return transcribed_text, answer
# Custom CSS to style the interface
css = """
body { font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; }
.gradio_app { max-width: 800px; margin: auto; background: #f4f4f4; padding: 20px; border-radius: 10px; box-shadow: 0 0 10px rgba(0, 0, 0, 0.1); }
.gradio_header { display: flex; align-items: center; justify-content: center; margin-bottom: 20px; }
.gradio_input_text textarea { height: 100px; }
.gradio_output_text { white-space: pre-line; }
.gradio_button { background-color: #106BA3; color: white; }
"""
iface = gr.Interface(
fn=process_input,
inputs=[gr.Textbox(lines=2, placeholder="Enter your question or use the audio button to record or upload it..."), gr.Audio(type="filepath", label="Or record/upload your question")],
outputs=[gr.Textbox(label="Transcribed Question"), gr.Textbox(label="Answer")],
css=css,
title="Question Answering Interface",
description="This interface answers your questions based on the content of the Fatwa UAE website in English. Use the text box or record/upload your audio question.",
allow_flagging=False
)
logging.debug("Launching Gradio interface...")
iface.launch(share=True, debug = True)