simonraj's picture
Update app.py
616e0b5
raw
history blame
9.43 kB
import gradio as gr
import os
import json
import requests
from google.cloud import speech_v1p1beta1 as speech
from google.oauth2 import service_account
import base64
import io
path_to_key_file = "/path/to/your/key-file.json"
credentials = service_account.Credentials.from_service_account_file(path_to_key_file)
client = speech.SpeechClient(credentials=credentials)
#Streaming endpoint
API_URL = "https://api.openai.com/v1/chat/completions" #os.getenv("API_URL") + "/generate_stream"
#Huggingface provided GPT4 OpenAI API Key
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
def transcribe_audio(audio):
audio_bytes = base64.b64decode(audio.split(",")[1])
audio_file = io.BytesIO(audio_bytes)
audio = speech.RecognitionAudio(content=audio_file.read())
config = speech.RecognitionConfig(
encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16,
sample_rate_hertz=16000,
language_code="en-US",
)
response = client.recognize(config=config, audio=audio)
for result in response.results:
return result.alternatives[0].transcript
#Inferenec function
def predict(system_msg, inputs, top_p, temperature, chat_counter, chatbot=[], history=[]):
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {OPENAI_API_KEY}"
}
print(f"system message is ^^ {system_msg}")
if system_msg.strip() == '':
initial_message = [{"role": "user", "content": f"{inputs}"},]
multi_turn_message = []
else:
initial_message= [{"role": "system", "content": system_msg},
{"role": "user", "content": f"{inputs}"},]
multi_turn_message = [{"role": "system", "content": system_msg},]
if chat_counter == 0 :
payload = {
"model": "gpt-4",
"messages": initial_message ,
"temperature" : 1.0,
"top_p":1.0,
"n" : 1,
"stream": True,
"presence_penalty":0,
"frequency_penalty":0,
}
print(f"chat_counter - {chat_counter}")
else: #if chat_counter != 0 :
messages=multi_turn_message # Of the type of - [{"role": "system", "content": system_msg},]
for data in chatbot:
user = {}
user["role"] = "user"
user["content"] = data[0]
assistant = {}
assistant["role"] = "assistant"
assistant["content"] = data[1]
messages.append(user)
messages.append(assistant)
temp = {}
temp["role"] = "user"
temp["content"] = inputs
messages.append(temp)
#messages
payload = {
"model": "gpt-4",
"messages": messages, # Of the type of [{"role": "user", "content": f"{inputs}"}],
"temperature" : temperature, #1.0,
"top_p": top_p, #1.0,
"n" : 1,
"stream": True,
"presence_penalty":0,
"frequency_penalty":0,}
chat_counter+=1
history.append(inputs)
print(f"Logging : payload is - {payload}")
# make a POST request to the API endpoint using the requests.post method, passing in stream=True
response = requests.post(API_URL, headers=headers, json=payload, stream=True)
print(f"Logging : response code - {response}")
token_counter = 0
partial_words = ""
counter=0
for chunk in response.iter_lines():
#Skipping first chunk
if counter == 0:
counter+=1
continue
# check whether each line is non-empty
if chunk.decode() :
chunk = chunk.decode()
# decode each line as response data is in bytes
if len(chunk) > 12 and "content" in json.loads(chunk[6:])['choices'][0]['delta']:
partial_words = partial_words + json.loads(chunk[6:])['choices'][0]["delta"]["content"]
if token_counter == 0:
history.append(" " + partial_words)
else:
history[-1] = partial_words
chat = [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2) ] # convert to tuples of list
token_counter+=1
yield chat, history, chat_counter, response # resembles {chatbot: chat, state: history}
#Resetting to blank
def reset_textbox():
return gr.update(value='')
#to set a component as visible=False
def set_visible_false():
return gr.update(visible=False)
#to set a component as visible=True
def set_visible_true():
return gr.update(visible=True)
title = """<h1 align="center">🔥English Teaching Assistant for Primary 6 Students +🚀Gradio-Streaming</h1>"""
#Using info to add additional information about System message in GPT4
system_msg_info = """The system message is used to set the context and behavior of the AI assistant at the beginning of a conversation. It provides instructions to the assistant to help guide its responses. For example, if you want the assistant to act as an expert in history, you can use the system message 'You are an expert in history.'"""
#Modifying existing Gradio Theme
theme = gr.themes.Soft(primary_hue="indigo", secondary_hue="blue", neutral_hue="blue",
text_size=gr.themes.sizes.text_lg)
with gr.Blocks(css = """#col_container { margin-left: auto; margin-right: auto;} #chatbot {height: 520px; overflow: auto;}""",
theme=theme) as demo:
gr.HTML(title)
gr.HTML("""<h3 align="center">🔥This application provides AI assistance for various English teaching roles in a Singapore Primary School 🎉🥳🎉🙌</h1>""")
with gr.Column(elem_id = "col_container"):
#GPT4 API Key is provided by Huggingface
with gr.Accordion(label="System message:", open=False):
system_msg = gr.Textbox(label="Instruct the AI Assistant to set its beaviour", info = system_msg_info, value="")
accordion_msg = gr.HTML(value="🚧 To set System message you will have to refresh the app", visible=False)
chatbot = gr.Chatbot(label='GPT4', elem_id="chatbot")
inputs = gr.Audio(label="Record an input", source="microphone")
state = gr.State([])
with gr.Row():
with gr.Column(scale=7):
b1 = gr.Button().style(full_width=True)
with gr.Column(scale=3):
server_status_code = gr.Textbox(label="Status code from OpenAI server", )
#top_p, temperature
with gr.Accordion("Parameters", open=False):
top_p = gr.Slider( minimum=-0, maximum=1.0, value=1.0, step=0.05, interactive=True, label="Top-p (nucleus sampling)",)
temperature = gr.Slider( minimum=-0, maximum=5.0, value=1.0, step=0.1, interactive=True, label="Temperature",)
chat_counter = gr.Number(value=0, visible=False, precision=0)
#Event handling
inputs.submit(predict, [system_msg, transcribe_audio(inputs), top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code])
b1.click(predict, [system_msg, transcribe_audio(inputs), top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code])
inputs.submit(set_visible_false, [], [system_msg])
b1.click(set_visible_false, [], [system_msg])
inputs.submit(set_visible_true, [], [accordion_msg])
b1.click(set_visible_true, [], [accordion_msg])
b1.click(reset_textbox, [], [inputs])
inputs.submit(reset_textbox, [], [inputs])
# Fix the indentation here
with gr.Accordion(label="Examples for System message:", open=False):
gr.Examples(
examples=[["You are an English Oral Coach for Primary 6 students. Help students improve their English speaking skills by providing helpful tips, correcting grammar, and giving advice on pronunciation."],
["You are a Creative Writing Coach for Primary 6 students. Help students enhance their creative writing skills by providing tips on story ideas, improving vocabulary, and suggesting ways to make their stories more engaging."],
["You are a Reading Comprehension Expert for Primary 6 students. Assist students in understanding reading passages and answering comprehension questions by offering strategies for finding the main idea, making inferences, and identifying key details."],
["You are a Grammar Guru for Primary 6 students. Provide guidance to students on proper grammar usage, punctuation, and sentence structure to improve their written and spoken English."],
["You are an English Vocabulary Builder for Primary 6 students. Help students expand their vocabulary by teaching them new words, explaining word meanings, and giving examples of how to use the words in context."],
["You are a Listening Skills Mentor for Primary 6 students. Help students improve their listening skills by providing tips on active listening, understanding different accents, and comprehending spoken English in various contexts."],
["You are a Pronunciation Expert for Primary 6 students. Assist students in refining their English pronunciation by focusing on intonation, stress, and the correct pronunciation of challenging sounds and words."],
],
inputs=system_msg,)
demo.queue(max_size=99, concurrency_count=20).launch(debug=True)