Spaces:
Sleeping
Sleeping
from openai import OpenAI | |
client = OpenAI() | |
import streamlit as st | |
from streamlit import session_state | |
import json | |
import moviepy.editor as mp | |
from io import StringIO | |
import openai | |
import json | |
import os | |
import streamlit as st | |
import cv2 as cv | |
import tempfile | |
import os | |
os.environ['OPENAI_API_KEY'] = "sk-proj-ZbejHdD4ZgJ5FFJ6LjMNT3BlbkFJ1WHLrJMFL03D8cMWSoFY" | |
openai.api_key = os.environ['OPENAI_API_KEY'] | |
def openai1(text1): | |
response = client.chat.completions.create( | |
model="gpt-4", | |
messages=[ | |
{ | |
"role": "system", | |
"content": "Identify the sentiment, whether sexual content not, abuse or not, related to academics or not (how frequent the conversation is related to non academic topics give it in percentage) and tone and phone number, name of teacher if mentioned, student feedback if mentioned and Whether the conversation is academic or non academic. Also add teacher behavior as positive and negative. \n<<REMEMBER>>\nGive output in json. generate a score 0 and 1. If Negative then 1 and if it's positive then it's 0. If the tone is aggresive then it should be 1. If phone number and email is given then it's 1. And academic should be as percentage. generate the total score and add academic percentage also. " | |
}, | |
{ | |
"role": "user", | |
"content": text1} | |
], | |
temperature=1, | |
max_tokens=256, | |
top_p=1, | |
frequency_penalty=0, | |
presence_penalty=0 | |
) | |
return response.choices[0].message.content | |
st.write("# Teacher session evaluation! 👋") | |
f = st.file_uploader("Upload file") | |
if f is not None: | |
# To convert to a string based IO: | |
tfile = tempfile.NamedTemporaryFile(delete=False) | |
tfile.write(f.read()) | |
clip = mp.VideoFileClip(tfile.name) | |
# if clip.duration <=60: | |
clip.audio.write_audiofile("theaudio.mp3") | |
# else: | |
#clip.subclip(0,60).write_audiofile("theaudio.mp3") | |
# try: | |
audio_file= open("theaudio.mp3", "rb") | |
transcript_english = client.audio.translations.create( | |
model="whisper-1", | |
file=audio_file,temperature = 0).text | |
print(transcript_english) | |
st.text_area("Report", value = openai1(transcript_english)) | |