File size: 3,011 Bytes
aff19d1 09bda9c aff19d1 b95f46f aff19d1 364fbfd aff19d1 8744387 cf00e6b aff19d1 0abf22c cf00e6b 0abf22c cf00e6b 0abf22c 364fbfd cf00e6b aff19d1 9bab38b acf8bd7 aff19d1 09bda9c aff19d1 364fbfd aff19d1 0cf65dc efb7dae aff19d1 8744387 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 |
import gradio as gr
import openai
import os
import HongWenData # Importing the HongWenData module
import base64
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
openai.api_key = OPENAI_API_KEY
def image_to_base64(img_path):
with open(img_path, "rb") as img_file:
return base64.b64encode(img_file.read()).decode('utf-8')
img_base64 = image_to_base64("HongWenSBC.JPG")
img_html = f'<img src="data:image/jpg;base64,{img_base64}" alt="SBC6" width="300" style="display: block; margin: auto;"/>'
def predict(question_choice, audio):
# Transcribe the audio using Whisper
with open(audio, "rb") as audio_file:
transcript = openai.Audio.transcribe("whisper-1", audio_file)
message = transcript["text"] # This is the transcribed message from the audio input
# Generate the system message based on the chosen question
strategy, explanation = HongWenData.strategy_text["TREES"]
# Reference to the picture description from HongWenData.py
picture_description = HongWenData.description
# Construct the conversation with the system and user's message
conversation = [
{
"role": "system",
"content": f"""
You are an expert English Language Teacher in a Singapore Primary school, directly guiding a Primary 6 student in Singapore.
The student is answering the question: '{question_choice}'.
For the first question, ensure your feedback refers to the picture description provided:
{picture_description}
Point out areas they did well and where they can improve, following the {strategy}.
Encourage the use of sophisticated vocabulary and expressions.
For the second and third questions, the picture is not relevant, so the student should not refer to it in their response.
{explanation}
The feedback should be in second person, addressing the student directly.
"""
},
{"role": "user", "content": message}
]
response = openai.ChatCompletion.create(
model='gpt-3.5-turbo',
messages=conversation,
temperature=0.6,
max_tokens=1000, # Limiting the response to 500 tokens
stream=True
)
partial_message = ""
for chunk in response:
if len(chunk['choices'][0]['delta']) != 0:
partial_message = partial_message + chunk['choices'][0]['delta']['content']
yield partial_message
# Gradio Interface
iface = gr.Interface(
fn=predict,
inputs=[
gr.Radio(HongWenData.questions, label="Choose a question", default=HongWenData.questions[0]), # Dropdown for question choice
gr.inputs.Audio(source="microphone", type="filepath") # Audio input
],
outputs=gr.inputs.Textbox(), # Using inputs.Textbox as an output to make it editable
description=img_html,
css="custom.css" # Link to the custom CSS file
)
iface.queue(max_size=99, concurrency_count=40).launch(debug=True)
|