File size: 3,795 Bytes
c8eb530
8ab0364
 
 
 
 
e912c09
9cdcc72
9f81930
9cdcc72
 
 
 
 
 
 
1575629
 
 
 
 
 
9cdcc72
1575629
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17ef5ad
1575629
 
 
17ef5ad
9d3b782
9cdcc72
1575629
9cdcc72
 
1575629
9cdcc72
1575629
e139dcd
f617c7f
45ffe72
0a1b459
d795229
8ab0364
f617c7f
 
d795229
f617c7f
 
deea3a0
c4563cf
 
dea79d3
4328889
9b622fc
d795229
9b622fc
8ab0364
9cdcc72
 
8ab0364
00d6eb8
8d17e35
5ed4d79
 
1624791
 
d06bf79
5ed4d79
 
00d6eb8
 
 
e6e1040
17ef5ad
1b3e680
8ab0364
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98

'''
This script calls the model from openai api to predict the next few words in a conversation.
'''
import os
import sys
import openai
import gradio as gr
os.system("pip install git+https://github.com/openai/whisper.git")
import whisper
from transformers import pipeline
import torch
from transformers import AutoModelForCausalLM
from transformers import AutoTokenizer
import time

EXAMPLE_PROMPT = """This is a tool for helping someone with memory issues remember the next word. 
The predictions follow a few rules:
1) The predictions are suggestions of ways to continue the transcript as if someone forgot what the next word was.
2) The predictions do not repeat themselves.
3) The predictions focus on suggesting nouns, adjectives, and verbs.
4) The predictions are related to the context in the transcript.
    
EXAMPLES:
Transcript: Tomorrow night we're going out to 
Prediction: The Movies, A Restaurant, A Baseball Game, The Theater, A Party for a friend   
Transcript: I would like to order a cheeseburger with a side of
Prediction: Frnech fries, Milkshake, Apple slices, Side salad, Extra katsup 
Transcript: My friend Savanah is
Prediction: An elecrical engineer, A marine biologist, A classical musician 
Transcript: I need to buy a birthday
Prediction: Present, Gift, Cake, Card
Transcript: """

# whisper model specification
asr_model = whisper.load_model("tiny")

openai.api_key = os.environ["Openai_APIkey"]

# Transcribe function
def transcribe(audio_file):
    print("Transcribing")
    transcription = asr_model.transcribe(audio_file)["text"]
    return transcription

def inference(audio, prompt, model, temperature, latest):
    # Transcribe with Whisper
    print("The audio is:", audio)
    transcript = transcribe(audio)

    latest.append(transcript)
    
    text = prompt + transcript + "\nPrediction: "
    
    response = openai.Completion.create(
                        model=model,
                        prompt=text,
                        temperature=temperature,
                        max_tokens=8,
                        n=5)

    infers = []
    temp = []
    #infered=[]
    for i in range(5):
        print("print1 ", response['choices'][i]['text'])
        temp.append(response['choices'][i]['text'])
        print("print2: infers ", infers)
        print("print3: Responses ", response)
        print("Object type of response: ", type(response))
        #infered = list(map(lambda x: x.split(',')[0], infers))
        #print("Infered type is: ", type(infered))
        infers = list(map(lambda x: x.replace("\n", ""), temp))
        #infered = list(map(lambda x: x.split(','), infers))
        

    return transcript, infers, convoState: latest


# get audio from microphone 
with gr.Blocks() as face:
    convoState = gr.State([""])
    with gr.Row():
        with gr.Column():
            audio = gr.Audio(source="microphone", type="filepath")
            promptText = gr.Textbox(lines=15, placeholder="Enter a prompt here")
            dropChoice = gr.Dropdown(choices=["text-ada-001", "text-davinci-002", "text-davinci-003", "gpt-3.5-turbo"], label="Model")
            sliderChoice = gr.Slider(minimum=0.0, maximum=1.0, default=0.8, step=0.1, label="Temperature")
            transcribe_btn = gr.Button(value="Transcribe")
        with gr.Column():
            script = gr.Textbox(label="Transcribed text")
            options = gr.Textbox(label="Predictions")
            latestConvo = gr.Textbox(label="Running conversation")
            #transcribe_btn.click(inference)
    transcribe_btn.click(fn=inference, inputs=[audio, promptText, dropChoice, sliderChoice, convoState], outputs=[script, options, latestConvo])
    examples = gr.Examples(examples=["Sedan, Truck, SUV", "Dalmaion, Shepherd, Lab, Mutt"], inputs=[options])

face.launch()