Collado27 commited on
Commit
2827b8a
1 Parent(s): 6c917c8

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +22 -6
  2. requirements.txt +1 -0
app.py CHANGED
@@ -9,6 +9,11 @@ from langchain_openai import OpenAIEmbeddings
9
  from langchain_openai import ChatOpenAI
10
  from langchain.schema import StrOutputParser
11
  from langchain.prompts import PromptTemplate
 
 
 
 
 
12
 
13
 
14
 
@@ -18,7 +23,7 @@ secret_string= os.getenv('OPENAI_API_KEY')
18
  #create simple langchain QA from question of user
19
 
20
  engine = "ft:gpt-3.5-turbo-1106:sinensia:movies:9PDh7plA"
21
-
22
  llm = ChatOpenAI(model=engine, temperature=0, openai_api_key=secret_string)
23
 
24
  prompt = PromptTemplate.from_template(
@@ -36,13 +41,24 @@ answer:
36
 
37
  chain = prompt | llm | StrOutputParser()
38
 
39
- #make a sinmple gradio interface to interact with the model
40
- import gradio as gr
 
41
 
42
- def ask_question(question):
 
 
 
 
 
 
 
 
43
  return chain.invoke({"input": question})
44
 
45
- iface = gr.Interface(fn=ask_question, inputs="text", outputs="text")
 
46
 
47
- iface.launch()
48
 
 
 
9
  from langchain_openai import ChatOpenAI
10
  from langchain.schema import StrOutputParser
11
  from langchain.prompts import PromptTemplate
12
+ from openai import OpenAI
13
+ from transformers import pipeline
14
+ import numpy as np
15
+
16
+
17
 
18
 
19
 
 
23
  #create simple langchain QA from question of user
24
 
25
  engine = "ft:gpt-3.5-turbo-1106:sinensia:movies:9PDh7plA"
26
+ client = OpenAI(api_key=secret_string)
27
  llm = ChatOpenAI(model=engine, temperature=0, openai_api_key=secret_string)
28
 
29
  prompt = PromptTemplate.from_template(
 
41
 
42
  chain = prompt | llm | StrOutputParser()
43
 
44
+ transcriber = pipeline("automatic-speech-recognition", model="openai/whisper-base.en")
45
+
46
+ #make a sinmple gradio interface that can input text or voice and output text
47
 
48
+ def voice_to_text(audio):
49
+ sr, y = audio
50
+ y = y.astype(np.float32)
51
+ y /= np.max(np.abs(y))
52
+
53
+ question = transcriber({"sampling_rate": sr, "raw": y})["text"]
54
+ return answer_question(question)
55
+
56
+ def answer_question(question):
57
  return chain.invoke({"input": question})
58
 
59
+ # interface with two inputs, one for text and one for voice and one output for text
60
+ import gradio as gr
61
 
62
+ iface = gr.Interface(fn=lambda text, audio: answer_question(text) if text else voice_to_text(audio), inputs=[gr.Textbox(lines=2, placeholder="Ask a question about movies"), gr.Audio()], outputs="text")
63
 
64
+ iface.launch()
requirements.txt CHANGED
@@ -31,4 +31,5 @@ PyPika==0.48.9
31
  pyproject_hooks==1.1.0
32
  python-dotenv==1.0.1
33
  python-multipart==0.0.9
 
34
 
 
31
  pyproject_hooks==1.1.0
32
  python-dotenv==1.0.1
33
  python-multipart==0.0.9
34
+ tf-keras
35