File size: 2,884 Bytes
92a8388
30124af
 
 
92a8388
 
 
 
 
 
4ba6253
30124af
92a8388
30124af
 
 
92a8388
 
30124af
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92a8388
 
 
 
 
30124af
 
 
 
 
 
 
 
266dced
30124af
 
 
 
 
 
 
 
 
 
d5b0ac4
 
 
 
30124af
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
import streamlit as st
# import sounddevice as sd
# import wavio
import openai
import numpy as np
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferWindowMemory
from langchain.chains import ConversationChain
from audio_recorder_streamlit import audio_recorder
 
llm = ChatOpenAI(openai_api_key="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",temperature=0.0)
openai.api_key = "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"

# audio_file = open('song.mp3', 'rb')
# audio_bytes = audio_file.read()
# memory = ConversationBufferWindowMemory(k=5) 
if "memory" not in st.session_state:
    st.session_state["memory"]=ConversationBufferWindowMemory(k=5)
# def record(duration=5, fs=48000):
#     sd.default.samplerate = fs
#     sd.default.channels = 1
#     myrecording = sd.rec(int(duration * fs))
#     sd.wait(duration)
#     return myrecording

# def read_audio(file):
#     with open(file, "rb") as audio_file:
#         audio_bytes = audio_file.read()
#     return audio_bytes

# def save_record(path_myrecording, myrecording, fs):
#     wavio.write(path_myrecording, myrecording, fs, sampwidth=2)
#     return None

st.title("Chat with AI")
if "input" not in st.session_state:
    st.session_state["input"]=np.empty((0, 2), str)

with st.sidebar:
    cols=st.columns(7)
    with cols[0]:
        audio_bytes = audio_recorder(text="",icon_size="2x")
        if audio_bytes:
            st.audio(audio_bytes, format="audio/wav")
    with cols[1]:
        st.button("🚧",type="primary")

with st.container():
    user_input=st.chat_input("Say something")
    if user_input:
        for i,a in st.session_state["input"]:
            with st.chat_message("user"):
                st.write(i)
            with st.chat_message("assistant"):
                st.write(a)
        with st.chat_message("user"):
            st.write(user_input)
        conversation = ConversationChain(
        llm=llm, 
        memory = st.session_state["memory"],
        verbose=False) 
        anwser=conversation.predict(input=user_input)
        st.session_state["input"]=np.append(st.session_state["input"],np.array([[user_input, anwser]]),axis=0)
        with st.chat_message("assistant"):
            st.write(anwser)


# st.button("🚧",type="primary")
# if st.button("Click to add"):
#     record_state = st.text("recording....")
#     duration = 5  # seconds
#     fs = 48000
#     myrecording = record(duration, fs)
#     record_state.text(f"Saving sample as record.mp3")

#     path_myrecording = f"./record.mp3"
    
#     save_record(path_myrecording, myrecording, fs)
#     audio_file= open("record.mp3", "rb")
#     transcript = openai.Audio.transcribe("whisper-1", audio_file)
#     # transcript = openai.Audio.translate("whisper-1", audio_file)
#     record_state.text(transcript.text)

#     st.audio(read_audio(path_myrecording))