Firefly777a's picture
Update app.py
3d14ba9
raw
history blame
3.25 kB
'''
This script calls the model from openai api to predict the next few words.
'''
import os
from pprint import pprint
import sys
import openai
import gradio as gr
import whisper
from transformers import pipeline
import torch
from transformers import AutoModelForCausalLM
from transformers import AutoTokenizer
import time
EXAMPLE_PROMPT = """This is a tool for helping someone with memory issues remember the next word.
The predictions follow a few rules:
1) The predictions are suggestions of ways to continue the transcript as if someone forgot what the next word was.
2) The predictions do not repeat themselves.
3) The predictions focus on suggesting nouns, adjectives, and verbs.
4) The predictions are related to the context in the transcript.
EXAMPLES:
Transcript: Tomorrow night we're going out to
Prediction: The Movies, A Restaurant, A Baseball Game, The Theater, A Party for a friend
Transcript: I would like to order a cheeseburger with a side of
Prediction: Frnech fries, Milkshake, Apple slices, Side salad, Extra katsup
Transcript: My friend Savanah is
Prediction: An elecrical engineer, A marine biologist, A classical musician
Transcript: I need to buy a birthday
Prediction: Present, Gift, Cake, Card
Transcript: """
# whisper model specification
asr_model = whisper.load_model("tiny")
openai.api_key = os.environ["Openai_APIkey"]
# Transcribe function
def transcribe(audio_file):
print("Transcribing")
transcription = asr_model.transcribe(audio_file)["text"]
return transcription
def debug_inference(audio, prompt, model, temperature, state=""):
# Transcribe with Whisper
print("The audio is:", audio)
transcript = transcribe(audio)
text = prompt + transcript + "\nPrediction: "
response = openai.Completion.create(
model=model,
prompt=text,
temperature=temperature,
max_tokens=8,
n=5)
infers = []
temp = []
infered=[]
for i in range(5):
print("print1 ", response['choices'][i]['text'])
temp.append(response['choices'][i]['text'])
print("print2: infers ", infers)
print("print3: Responses ", response)
print("Object type of response: ", type(response))
#infered = list(map(lambda x: x.split(',')[0], infers))
#print("Infered type is: ", type(infered))
infers = list(map(lambda x: x.replace("\n", ""), temp))
#infered = list(map(lambda x: x.split(','), infers))
return transcript, state, infers, text
# get audio from microphone
gr.Interface(
fn=debug_inference,
inputs=[gr.inputs.Audio(source="microphone", type="filepath"),
gr.inputs.Textbox(lines=15, placeholder="Enter a prompt here"),
gr.inputs.Dropdown(["text-ada-001", "text-davinci-002", "text-davinci-003", "gpt-3.5-turbo"], label="Model"),
gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.8, step=0.1, label="Temperature"),
"state"
],
outputs=["textbox","state","textbox", "textbox"],
# examples=[["example_in-the-mood-to-eat.m4a", EXAMPLE_PROMPT, "text-ada-001", 0.8, ""],["","","",0.9,""]],
live=False).launch()