Spaces:
Runtime error
Runtime error
Duplicate from Willder/chatgpt-streamlit
Browse filesCo-authored-by: Changhai <Willder@users.noreply.huggingface.co>
- .gitattributes +34 -0
- .streamlit/config.toml +6 -0
- README.md +13 -0
- app.py +233 -0
- requirements.txt +2 -0
.gitattributes
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
.streamlit/config.toml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[theme]
|
2 |
+
base = 'light'
|
3 |
+
primaryColor="green"
|
4 |
+
secondaryBackgroundColor="#F0F2F6"
|
5 |
+
#secondaryBackgroundColor="#889197"
|
6 |
+
#secondaryBackgroundColor="#323335"
|
README.md
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Chatgpt Streamlit
|
3 |
+
emoji: π
|
4 |
+
colorFrom: blue
|
5 |
+
colorTo: purple
|
6 |
+
sdk: streamlit
|
7 |
+
sdk_version: 1.17.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
duplicated_from: Willder/chatgpt-streamlit
|
11 |
+
---
|
12 |
+
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,233 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import base64
|
2 |
+
import os
|
3 |
+
|
4 |
+
import openai
|
5 |
+
import streamlit as st
|
6 |
+
|
7 |
+
# from fpdf import FPDF
|
8 |
+
|
9 |
+
st.set_page_config(page_title="ChatGPT", page_icon="π")
|
10 |
+
|
11 |
+
MAIN = st.empty()
|
12 |
+
|
13 |
+
|
14 |
+
def create_download_link(val, filename):
|
15 |
+
b64 = base64.b64encode(val) # val looks like b'...'
|
16 |
+
return f'<a href="data:application/octet-stream;base64,{b64.decode()}" download="{filename}.pdf">Download file</a>'
|
17 |
+
|
18 |
+
|
19 |
+
@st.cache
|
20 |
+
def init_openai_settings():
|
21 |
+
openai.api_key = os.getenv("OPENAI_API_KEY")
|
22 |
+
|
23 |
+
|
24 |
+
def init_session():
|
25 |
+
if not st.session_state.get("params"):
|
26 |
+
st.session_state["params"] = dict()
|
27 |
+
if not st.session_state.get("chats"):
|
28 |
+
st.session_state["chats"] = {}
|
29 |
+
|
30 |
+
|
31 |
+
def new_chat(chat_name):
|
32 |
+
if not st.session_state["chats"].get(chat_name):
|
33 |
+
st.session_state["chats"][chat_name] = {
|
34 |
+
"answer": [],
|
35 |
+
"question": [],
|
36 |
+
"messages": [
|
37 |
+
{"role": "system", "content": st.session_state["params"]["prompt"]}
|
38 |
+
],
|
39 |
+
}
|
40 |
+
return chat_name
|
41 |
+
|
42 |
+
|
43 |
+
def switch_chat(chat_name):
|
44 |
+
if st.session_state.get("current_chat") != chat_name:
|
45 |
+
st.session_state["current_chat"] = chat_name
|
46 |
+
init_chat(chat_name)
|
47 |
+
st.stop()
|
48 |
+
|
49 |
+
|
50 |
+
def switch_chat2(chat_name):
|
51 |
+
if st.session_state.get("current_chat") != chat_name:
|
52 |
+
st.session_state["current_chat"] = chat_name
|
53 |
+
init_sidebar()
|
54 |
+
init_chat(chat_name)
|
55 |
+
st.stop()
|
56 |
+
|
57 |
+
|
58 |
+
def init_sidebar():
|
59 |
+
st.sidebar.title("ChatGPT")
|
60 |
+
chat_name_container = st.sidebar.container()
|
61 |
+
chat_config_expander = st.sidebar.expander('Chat configuration')
|
62 |
+
# export_pdf = st.sidebar.empty()
|
63 |
+
|
64 |
+
# chat config
|
65 |
+
st.session_state["params"] = dict()
|
66 |
+
# st.session_state['params']["api_key"] = chat_config_expander.text_input("API_KEY", placeholder="Please input openai key")
|
67 |
+
st.session_state["params"]["model"] = chat_config_expander.selectbox(
|
68 |
+
"Please select a model",
|
69 |
+
["gpt-3.5-turbo"], # , "text-davinci-003"
|
70 |
+
help="ID of the model to use",
|
71 |
+
)
|
72 |
+
st.session_state["params"]["temperature"] = chat_config_expander.slider(
|
73 |
+
"Temperature",
|
74 |
+
min_value=0.0,
|
75 |
+
max_value=2.0,
|
76 |
+
value=1.2,
|
77 |
+
step=0.1,
|
78 |
+
format="%0.2f",
|
79 |
+
help="""What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.""",
|
80 |
+
)
|
81 |
+
st.session_state["params"]["max_tokens"] = chat_config_expander.slider(
|
82 |
+
"MAX_TOKENS",
|
83 |
+
value=2000,
|
84 |
+
step=1,
|
85 |
+
min_value=100,
|
86 |
+
max_value=4000,
|
87 |
+
help="The maximum number of tokens to generate in the completion",
|
88 |
+
)
|
89 |
+
|
90 |
+
st.session_state["params"]["prompt"] = chat_config_expander.text_area(
|
91 |
+
"Prompts",
|
92 |
+
"You are a helpful assistant that answer questions as possible as you can.",
|
93 |
+
help="The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.",
|
94 |
+
)
|
95 |
+
chat_config_expander.caption('Looking for help at https://platform.openai.com/docs/api-reference/chat')
|
96 |
+
|
97 |
+
new_chat_button = chat_name_container.button(
|
98 |
+
label="β New Chat"
|
99 |
+
) # , use_container_width=True
|
100 |
+
if new_chat_button:
|
101 |
+
new_chat_name = f"Chat{len(st.session_state['chats'])}"
|
102 |
+
st.session_state["current_chat"] = new_chat_name
|
103 |
+
new_chat(new_chat_name)
|
104 |
+
|
105 |
+
with st.sidebar.container():
|
106 |
+
for chat_name in st.session_state.get("chats", {}).keys():
|
107 |
+
if chat_name == st.session_state.get('current_chat'):
|
108 |
+
chat_name_container.button(
|
109 |
+
label='π¬ ' + chat_name,
|
110 |
+
on_click=switch_chat2,
|
111 |
+
key=chat_name,
|
112 |
+
args=(chat_name,),
|
113 |
+
type='primary',
|
114 |
+
# use_container_width=True,
|
115 |
+
)
|
116 |
+
else:
|
117 |
+
chat_name_container.button(
|
118 |
+
label='π¬ ' + chat_name,
|
119 |
+
on_click=switch_chat2,
|
120 |
+
key=chat_name,
|
121 |
+
args=(chat_name,),
|
122 |
+
# use_container_width=True,
|
123 |
+
)
|
124 |
+
|
125 |
+
if new_chat_button:
|
126 |
+
switch_chat(new_chat_name)
|
127 |
+
|
128 |
+
# Download pdf
|
129 |
+
# if st.session_state.get('current_chat'):
|
130 |
+
# chat = st.session_state["chats"][st.session_state['current_chat']]
|
131 |
+
# pdf = FPDF('p', 'mm', 'A4')
|
132 |
+
# pdf.add_page()
|
133 |
+
# pdf.set_font(family='Times', size=16)
|
134 |
+
# # pdf.cell(40, 50, txt='abcd.pdf')
|
135 |
+
#
|
136 |
+
# if chat["answer"]:
|
137 |
+
# for i in range(len(chat["answer"]) - 1, -1, -1):
|
138 |
+
# # message(chat["answer"][i], key=str(i))
|
139 |
+
# # message(chat['question'][i], is_user=True, key=str(i) + '_user')
|
140 |
+
# pdf.cell(40, txt=f"""YOU: {chat["question"][i]}""")
|
141 |
+
# pdf.cell(40, txt=f"""AI: {chat["answer"][i]}""")
|
142 |
+
#
|
143 |
+
# export_pdf.download_button('π€ PDF', data=pdf.output(dest='S').encode('latin-1'), file_name='abcd.pdf')
|
144 |
+
|
145 |
+
|
146 |
+
def init_chat(chat_name):
|
147 |
+
chat = st.session_state["chats"][chat_name]
|
148 |
+
|
149 |
+
# with MAIN.container():
|
150 |
+
answer_zoom = st.container()
|
151 |
+
ask_form = st.empty()
|
152 |
+
|
153 |
+
if len(chat['messages']) == 1 and st.session_state["params"]["prompt"]:
|
154 |
+
chat["messages"][0]['content'] = st.session_state["params"]["prompt"]
|
155 |
+
|
156 |
+
if chat['messages']:
|
157 |
+
# answer_zoom.markdown(f"""π€ **Prompt:** {chat["messages"][0]['content']}""")
|
158 |
+
# answer_zoom.info(f"""Prompt: {chat["messages"][0]['content']}""", icon="βΉοΈ")
|
159 |
+
answer_zoom.caption(f"""βΉοΈ Prompt: {chat["messages"][0]['content']}""")
|
160 |
+
if chat["question"]:
|
161 |
+
for i in range(len(chat["question"])):
|
162 |
+
answer_zoom.markdown(f"""π **YOU:** {chat["question"][i]}""")
|
163 |
+
if i < len(chat["answer"]):
|
164 |
+
answer_zoom.markdown(f"""π€ **AI:** {chat["answer"][i]}""")
|
165 |
+
|
166 |
+
with ask_form.form(chat_name):
|
167 |
+
col1, col2 = st.columns([10, 1])
|
168 |
+
input_text = col1.text_area("π You: ", "Hello, how are you?", key="input", max_chars=2000,
|
169 |
+
label_visibility='collapsed')
|
170 |
+
|
171 |
+
submitted = col2.form_submit_button("π«")
|
172 |
+
|
173 |
+
if submitted and input_text:
|
174 |
+
chat["messages"].append({"role": "user", "content": input_text})
|
175 |
+
answer_zoom.markdown(f"""π **YOU:** {input_text}""")
|
176 |
+
|
177 |
+
with st.spinner("Wait for responding..."):
|
178 |
+
answer = ask(chat["messages"])
|
179 |
+
answer_zoom.markdown(f"""π€ **AI:** {answer}""")
|
180 |
+
chat["messages"].append({"role": "assistant", "content": answer})
|
181 |
+
if answer:
|
182 |
+
chat["question"].append(input_text)
|
183 |
+
chat["answer"].append(answer)
|
184 |
+
|
185 |
+
|
186 |
+
|
187 |
+
|
188 |
+
def init_css():
|
189 |
+
"""try to fixed input field"""
|
190 |
+
st.markdown(
|
191 |
+
"""
|
192 |
+
<style>
|
193 |
+
|
194 |
+
div[data-testid="stVerticalBlock"] > div[style*="flex-direction: column;"] > [data-testid="stVerticalBlock"] > [data-testid="stForm"] {
|
195 |
+
border: 20px groove red;
|
196 |
+
position: fixed;
|
197 |
+
width: 100%;
|
198 |
+
|
199 |
+
flex-direction: column;
|
200 |
+
flex-grow: 5;
|
201 |
+
overflow: auto;
|
202 |
+
}
|
203 |
+
</style>
|
204 |
+
""",
|
205 |
+
unsafe_allow_html=True,
|
206 |
+
)
|
207 |
+
|
208 |
+
|
209 |
+
def ask(messages):
|
210 |
+
if st.session_state["params"]["model"] == 'gpt-3.5-turbo':
|
211 |
+
response = openai.ChatCompletion.create(
|
212 |
+
model=st.session_state["params"]["model"],
|
213 |
+
temperature=st.session_state["params"]["temperature"],
|
214 |
+
messages=messages,
|
215 |
+
max_tokens=st.session_state["params"]["max_tokens"],
|
216 |
+
)
|
217 |
+
answer = response["choices"][0]["message"]["content"]
|
218 |
+
else:
|
219 |
+
raise NotImplementedError('Not implemented yet!')
|
220 |
+
return answer
|
221 |
+
|
222 |
+
|
223 |
+
if __name__ == "__main__":
|
224 |
+
print("loading")
|
225 |
+
init_openai_settings()
|
226 |
+
# init_css()
|
227 |
+
init_session()
|
228 |
+
init_sidebar()
|
229 |
+
if st.session_state.get("current_chat"):
|
230 |
+
print("current_chat: ", st.session_state.get("current_chat"))
|
231 |
+
init_chat((st.session_state["current_chat"]))
|
232 |
+
if len(st.session_state["chats"]) == 0:
|
233 |
+
switch_chat(new_chat(f"Chat{len(st.session_state['chats'])}"))
|
requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
openai
|
2 |
+
streamlit
|