retiredcarboxyl commited on
Commit
8206eb7
β€’
1 Parent(s): 35159bf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +70 -13
app.py CHANGED
@@ -1,20 +1,77 @@
1
  import streamlit as st
2
- from transformers import pipeline
3
- from PIL import Image
4
 
5
- pipeline = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog")
 
6
 
7
- st.title("Hot Dog? Or Not?")
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
- file_name = st.file_uploader("Upload a hot dog candidate image")
 
 
 
 
 
 
 
 
 
10
 
11
- if file_name is not None:
12
- col1, col2 = st.columns(2)
 
13
 
14
- image = Image.open(file_name)
15
- col1.image(image, use_column_width=True)
16
- predictions = pipeline(image)
 
17
 
18
- col2.header("Probabilities")
19
- for p in predictions:
20
- col2.subheader(f"{ p['label'] }: { round(p['score'] * 100, 1)}%")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ import replicate
3
+ import os
4
 
5
+ # App title
6
+ st.set_page_config(page_title="πŸ¦™πŸ’¬ Llama 2 Chatbot")
7
 
8
+ # Replicate Credentials
9
+ with st.sidebar:
10
+ st.title('πŸ¦™πŸ’¬ Llama 2 Chatbot')
11
+ if 'REPLICATE_API_TOKEN' in st.secrets:
12
+ st.success('API key already provided!', icon='βœ…')
13
+ replicate_api = st.secrets['REPLICATE_API_TOKEN']
14
+ else:
15
+ replicate_api = st.text_input('Enter Replicate API token:', type='password')
16
+ if not (replicate_api.startswith('r8_') and len(replicate_api)==40):
17
+ st.warning('Please enter your credentials!', icon='⚠️')
18
+ else:
19
+ st.success('Proceed to entering your prompt message!', icon='πŸ‘‰')
20
+ os.environ['REPLICATE_API_TOKEN'] = replicate_api
21
 
22
+ st.subheader('Models and parameters')
23
+ selected_model = st.sidebar.selectbox('Choose a Llama2 model', ['Llama2-7B', 'Llama2-13B'], key='selected_model')
24
+ if selected_model == 'Llama2-7B':
25
+ llm = 'a16z-infra/llama7b-v2-chat:4f0a4744c7295c024a1de15e1a63c880d3da035fa1f49bfd344fe076074c8eea'
26
+ elif selected_model == 'Llama2-13B':
27
+ llm = 'a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5'
28
+ temperature = st.sidebar.slider('temperature', min_value=0.01, max_value=5.0, value=0.1, step=0.01)
29
+ top_p = st.sidebar.slider('top_p', min_value=0.01, max_value=1.0, value=0.9, step=0.01)
30
+ max_length = st.sidebar.slider('max_length', min_value=32, max_value=128, value=120, step=8)
31
+ st.markdown('πŸ“– Learn how to build this app in this [blog](https://blog.streamlit.io/how-to-build-a-llama-2-chatbot/)!')
32
 
33
+ # Store LLM generated responses
34
+ if "messages" not in st.session_state.keys():
35
+ st.session_state.messages = [{"role": "assistant", "content": "How may I assist you today?"}]
36
 
37
+ # Display or clear chat messages
38
+ for message in st.session_state.messages:
39
+ with st.chat_message(message["role"]):
40
+ st.write(message["content"])
41
 
42
+ def clear_chat_history():
43
+ st.session_state.messages = [{"role": "assistant", "content": "How may I assist you today?"}]
44
+ st.sidebar.button('Clear Chat History', on_click=clear_chat_history)
45
+
46
+ # Function for generating LLaMA2 response. Refactored from https://github.com/a16z-infra/llama2-chatbot
47
+ def generate_llama2_response(prompt_input):
48
+ string_dialogue = "You are a helpful assistant. You do not respond as 'User' or pretend to be 'User'. You only respond once as 'Assistant'."
49
+ for dict_message in st.session_state.messages:
50
+ if dict_message["role"] == "user":
51
+ string_dialogue += "User: " + dict_message["content"] + "\n\n"
52
+ else:
53
+ string_dialogue += "Assistant: " + dict_message["content"] + "\n\n"
54
+ output = replicate.run('a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5',
55
+ input={"prompt": f"{string_dialogue} {prompt_input} Assistant: ",
56
+ "temperature":temperature, "top_p":top_p, "max_length":max_length, "repetition_penalty":1})
57
+ return output
58
+
59
+ # User-provided prompt
60
+ if prompt := st.chat_input(disabled=not replicate_api):
61
+ st.session_state.messages.append({"role": "user", "content": prompt})
62
+ with st.chat_message("user"):
63
+ st.write(prompt)
64
+
65
+ # Generate a new response if last message is not from assistant
66
+ if st.session_state.messages[-1]["role"] != "assistant":
67
+ with st.chat_message("assistant"):
68
+ with st.spinner("Thinking..."):
69
+ response = generate_llama2_response(prompt)
70
+ placeholder = st.empty()
71
+ full_response = ''
72
+ for item in response:
73
+ full_response += item
74
+ placeholder.markdown(full_response)
75
+ placeholder.markdown(full_response)
76
+ message = {"role": "assistant", "content": full_response}
77
+ st.session_state.messages.append(message)