Madhuri123 commited on
Commit
fa07468
·
1 Parent(s): a9aa760

Initial commit

Browse files
Files changed (2) hide show
  1. requirements.txt +2 -0
  2. streamlit_s2.py +44 -0
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ streamlit
2
+ huggingface_hub
streamlit_s2.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import requests
3
+ from huggingface_hub import InferenceClient
4
+
5
+ st.title("Streamlit with Hugging Face API")
6
+
7
+ def get_models():
8
+ return ["meta-llama/Meta-Llama-3-8B-Instruct","meta-llama/Llama-3.1-8B-Instruct"]
9
+
10
+ models = get_models()
11
+ model_names = get_models()
12
+ placeholder_option = "Select a model"
13
+ model_names_with_placeholder = [placeholder_option] + model_names
14
+ selected_model = st.selectbox("Select the model:", model_names_with_placeholder)
15
+ prompt1 = st.chat_input("Message")
16
+
17
+ if selected_model != placeholder_option:
18
+ st.write(f"You selected: {selected_model}")
19
+ client = InferenceClient(model=selected_model)
20
+
21
+ # Initialize chat history in session state if it doesn't exist
22
+ if "messages" not in st.session_state:
23
+ st.session_state.messages = []
24
+
25
+ # Display chat messages from history on app rerun
26
+ for message in st.session_state.messages:
27
+ with st.chat_message(message["role"]):
28
+ st.markdown(message["content"])
29
+
30
+ if prompt1:
31
+ with st.chat_message("user"):
32
+ st.markdown(prompt1)
33
+ st.session_state.messages.append({"role": "user", "content": prompt1})
34
+ try:
35
+ response = client.text_generation(prompt1)
36
+ except Exception as e:
37
+ response = f"An error occurred: {e}"
38
+ with st.chat_message("assistant"):
39
+ st.markdown(response)
40
+ # Add assistant response to chat history
41
+ st.session_state.messages.append({"role": "assistant", "content": response})
42
+
43
+ else:
44
+ st.write("Please select a model.")