File size: 3,371 Bytes
86b946a
 
43773a4
d968fe8
86b946a
 
a197dc7
 
ef5afd7
 
 
 
d4138a4
 
86b946a
 
 
a197dc7
2e97054
c88185a
e07786f
c88185a
 
e07786f
 
a197dc7
2e97054
a197dc7
86b946a
5938dff
a79aa4c
 
 
a197dc7
 
 
 
 
 
 
 
 
d4138a4
019cdf0
 
 
86b946a
 
a197dc7
 
 
883b37e
a197dc7
 
31ed649
 
a197dc7
da2bacc
31ed649
 
883b37e
a197dc7
883b37e
a197dc7
883b37e
a197dc7
883b37e
911c5ef
883b37e
 
e0d541d
883b37e
a197dc7
d968fe8
43773a4
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
import streamlit as st
from gradio_client import Client
from st_audiorec import st_audiorec


# Constants
TITLE = "Llama2 70B Chatbot"
DESCRIPTION = """
This Space demonstrates model [Llama-2-70b-chat-hf](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf) by Meta, a Llama 2 model with 70B parameters fine-tuned for chat instructions. 
| Model | Llama2 | Llama2-hf | Llama2-chat | Llama2-chat-hf |
|---|---|---|---|---|
| 70B | [Link](https://huggingface.co/meta-llama/Llama-2-70b) | [Link](https://huggingface.co/meta-llama/Llama-2-70b-hf) | [Link](https://huggingface.co/meta-llama/Llama-2-70b-chat) | [Link](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf) |

---
"""

# Initialize client


with st.sidebar:
    # system_promptSide = st.text_input("Optional system prompt:")
    temperatureSide = st.slider("Temperature", min_value=0.0, max_value=1.0, value=0.9, step=0.05)
    max_new_tokensSide = st.slider("Max new tokens", min_value=0.0, max_value=4096.0, value=4096.0, step=64.0)
    # ToppSide = st.slider("Top-p (nucleus sampling)", min_value=0.0, max_value=1.0, value=0.6, step=0.05)
    # RepetitionpenaltySide = st.slider("Repetition penalty", min_value=0.0, max_value=2.0, value=1.2, step=0.05)


    
# Prediction function
def predict(message, system_prompt='', temperature=0.7, max_new_tokens=4096,Topp=0.5,Repetitionpenalty=1.2):
    with st.status("Starting client"):
        client = Client("https://ysharma-explore-llamav2-with-tgi.hf.space/")
        st.write("Requesting client")
    with st.status("Requesting LLama-2"):
        st.write("Requesting API")
        response = client.predict(
    			message,	# str in 'Message' Textbox component
                system_prompt,	# str in 'Optional system prompt' Textbox component
    			temperature,	# int | float (numeric value between 0.0 and 1.0)
    			max_new_tokens,	# int | float (numeric value between 0 and 4096)
    			Topp,	# int | float (numeric value between 0.0 and 1)
    			Repetitionpenalty,	# int | float (numeric value between 1.0 and 2.0)
    			api_name="/chat_1"
        )
        st.write("Done")
        return response

# Streamlit UI
st.title(TITLE)
st.write(DESCRIPTION)


if "messages" not in st.session_state:
    st.session_state.messages = []

# Display chat messages from history on app rerun
for message in st.session_state.messages:
    with st.chat_message(message["role"], avatar=("πŸ§‘β€πŸ’»" if message["role"] == 'human' else 'πŸ¦™')):
        st.markdown(message["content"])
        
# React to user input
if prompt := st.chat_input("Ask LLama-2-70b anything..."):
    # Display user message in chat message container
    st.chat_message("human",avatar = "πŸ§‘β€πŸ’»").markdown(prompt)
    # Add user message to chat history
    st.session_state.messages.append({"role": "human", "content": prompt})

    response = predict(message=prompt)#, temperature= temperatureSide,max_new_tokens=max_new_tokensSide)
    # Display assistant response in chat message container
    with st.chat_message("assistant", avatar='πŸ¦™'):
        st.markdown(response)
    # Add assistant response to chat history
    st.session_state.messages.append({"role": "assistant", "content": response})
    
wav_audio_data = st_audiorec()

if wav_audio_data is not None:
    # Save audio bytes
    with open("audio.wav", "wb") as f:
        f.write(audio_bytes)