lrenzoha commited on
Commit
c4b3a24
1 Parent(s): 5d2f2ff

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +68 -69
app.py CHANGED
@@ -1,70 +1,69 @@
1
- import streamlit as st
2
- from llama_index import SimpleDirectoryReader, GPTVectorStoreIndex
 
 
 
3
  import os
4
- import shutil
5
-
6
-
7
- def save_file(files):
8
- directory_name = 'tmp_docs'
9
-
10
- # Remove existing files in the directory
11
- if os.path.exists(directory_name):
12
- for filename in os.listdir(directory_name):
13
- file_path = os.path.join(directory_name, filename)
14
- try:
15
- if os.path.isfile(file_path):
16
- os.remove(file_path)
17
- except Exception as e:
18
- print(f"Error: {e}")
19
-
20
- # Save the new file with original filename
21
- if files is not None:
22
- for file in files:
23
- file_name = file.name
24
- file_path = os.path.join(directory_name, file_name)
25
- with open(file_path, 'wb') as f:
26
- shutil.copyfileobj(file, f)
27
-
28
-
29
- def ingest(docs_dir):
30
- documents = SimpleDirectoryReader(docs_dir).load_data()
31
- index = GPTVectorStoreIndex.from_documents(documents)
32
- return index
33
-
34
-
35
- def get_answer(index, message):
36
- response = query(index, message)
37
- return [('Chatbot', ''.join(response.response))]
38
-
39
-
40
- def query(index, query_text):
41
- query_engine = index.as_query_engine()
42
- response = query_engine.query(query_text)
43
- return response
44
-
45
-
46
- os.environ['OPENAI_API_KEY'] = st.text_input(
47
- "Enter your OpenAI API key", type="password")
48
-
49
- if os.environ['OPENAI_API_KEY']:
50
- # Initialize chatbot history
51
- chatbot = []
52
-
53
- # Display file upload component
54
- files = st.file_uploader('Upload Files', accept_multiple_files=True)
55
- if files is not None:
56
- save_file(files)
57
-
58
- index = ingest('tmp_docs')
59
-
60
- # Display message input component
61
- message = st.text_input('Enter message')
62
-
63
- # If message is entered, ingest documents and get chatbot response
64
- if message:
65
- chatbot.append(('You', message))
66
- chatbot += get_answer(index, message)
67
-
68
- # Display chat history
69
- st.text_area('Chatbot:', value='\n'.join(
70
- [f'{x[0]}: {x[1]}' for x in chatbot]), height=250)
 
1
+ import openai
2
+ import gradio as gr
3
+ import time
4
+ import warnings
5
+ import warnings
6
  import os
7
+ from gtts import gTTS
8
+
9
+ warnings.filterwarnings("ignore")
10
+
11
+ openai.api_key = "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"
12
+
13
+ def chatgpt_api(input_text):
14
+ messages = [
15
+ {"role": "system", "content": "You are a helpful assistant."}]
16
+
17
+ if input_text:
18
+ messages.append(
19
+ {"role": "user", "content": input_text},
20
+ )
21
+ chat_completion = openai.ChatCompletion.create(
22
+ model="gpt-3.5-turbo", messages=messages
23
+ )
24
+
25
+ reply = chat_completion.choices[0].message.content
26
+ return reply
27
+
28
+ #ffmpeg -f lavfi -i anullsrc=r=44100:cl=mono -t 10 -q:a 9 -acodec libmp3lame Temp.mp3'
29
+
30
+ def transcribe(audio, text):
31
+ language = "en"
32
+
33
+ if audio is not None:
34
+ with open(audio, "rb") as transcript:
35
+ prompt = openai.Audio.transcribe("whisper-1", transcript)
36
+ s = prompt["text"]
37
+ else:
38
+ s = text
39
+
40
+ response = openai.Completion.create(
41
+ engine="text-davinci-002",
42
+ prompt=s,
43
+ max_tokens=60,
44
+ n=1,
45
+ stop=None,
46
+ temperature=0.5,
47
+ )
48
+
49
+ out_result = chatgpt_api(s)
50
+
51
+ audioobj = gTTS(text = out_result,
52
+ lang = language,
53
+ slow = False)
54
+
55
+ audioobj.save("Temp.mp3")
56
+
57
+ return [s, out_result, "Temp.mp3"]
58
+
59
+
60
+ with gr.Blocks() as demo:
61
+ gr.Markdown("Dilip AI")
62
+ input1 = gr.inputs.Audio(source="microphone", type = "filepath", label="Use your voice to chat")
63
+ input2 = gr.inputs.Textbox(lines=7, label="Chat with AI")
64
+ output_1 = gr.Textbox(label="User Input")
65
+ output_2 = gr.Textbox(label="Text Output")
66
+ output_3 = gr.Audio("Temp.mp3", label="Speech Output")
67
+ btn = gr.Button("Run")
68
+ btn.click(fn=transcribe, inputs=[input1, input2], outputs=[output_1, output_2, output_3])
69
+ demo.launch()