Alex commited on
Commit
21594bd
1 Parent(s): 4ee9c88

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +60 -33
app.py CHANGED
@@ -2,43 +2,70 @@ import streamlit as st
2
  from streamlit_chat import message
3
  from streamlit_extras.colored_header import colored_header
4
  from streamlit_extras.add_vertical_space import add_vertical_space
5
- from hugchat import hugchat
6
  from transformers import AutoTokenizer, AutoModelForCausalLM
7
- import torch
8
 
9
- device = "cuda" if torch.cuda.is_available() else "cpu"
10
 
11
- tokenizer = AutoTokenizer.from_pretrained("OpenAssistant/oasst-sft-6-llama-30b-xor")
12
- model = AutoModelForCausalLM.from_pretrained("OpenAssistant/oasst-sft-6-llama-30b-xor")
13
-
14
-
15
- st.set_page_config(page_title="EinfachChat")
16
 
17
  # Sidebar contents
18
  with st.sidebar:
19
  st.title('EinfachChat')
20
- max_length = st.slider('Max Length', min_value=10, max_value=100, value=30)
21
- do_sample = st.checkbox('Do Sample', value=True)
22
- temperature = st.slider('Temperature', min_value=0.1, max_value=1.0, value=0.4)
23
- no_repeat_ngram_size = st.slider('No Repeat N-Gram Size', min_value=1, max_value=10, value=1)
24
- top_k = st.slider('Top K', min_value=1, max_value=100, value=50)
25
- top_p = st.slider('Top P', min_value=0.1, max_value=1.0, value=0.2)
26
-
27
- # Rest of your original Streamlit code ...
28
-
29
- def generate_text(prompt, max_length, do_sample, temperature, no_repeat_ngram_size, top_k, top_p):
30
- formatted_prompt = "\n" + prompt
31
- if not ',' in prompt:
32
- formatted_prompt += ','
33
- prompt = tokenizer(formatted_prompt, return_tensors='pt')
34
- prompt = {key: value.to(device) for key, value in prompt.items()}
35
- out = model.generate(**prompt, max_length=max_length, do_sample=do_sample, temperature=temperature,
36
- no_repeat_ngram_size=no_repeat_ngram_size, top_k=top_k, top_p=top_p)
37
- output = tokenizer.decode(out[0])
38
- clean_output = output.replace('\n', '\n')
39
- return clean_output
40
-
41
- # Inside the conditional display part, replace
42
- # response = generate_response(user_input)
43
- # with
44
- response = generate_text(user_input, max_length, do_sample, temperature, no_repeat_ngram_size, top_k, top_p)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  from streamlit_chat import message
3
  from streamlit_extras.colored_header import colored_header
4
  from streamlit_extras.add_vertical_space import add_vertical_space
 
5
  from transformers import AutoTokenizer, AutoModelForCausalLM
 
6
 
7
+ st.set_page_config(page_title="Einfach.HugChat")
8
 
9
+ # List of models
10
+ models = ["vicuna-13b", "koala-13b", "oasst-pythia-12b", "RWKV-4-Raven-14B",
11
+ "alpaca-13b", "chatglm-6b", "llama-13b", "dolly-v2-12b", "stablelm-tuned-alpha-7b",
12
+ "fastchat-t5-3b", "mpt-7b-chat"]
 
13
 
14
  # Sidebar contents
15
  with st.sidebar:
16
  st.title('EinfachChat')
17
+ st.markdown('''
18
+ ## About
19
+ This app is a LLM-powered chatbot built using:
20
+ - [Streamlit](https://streamlit.io/)
21
+ - [OpenAssistant/oasst-sft-6-llama-30b-xor](https://huggingface.co/OpenAssistant/oasst-sft-6-llama-30b-xor) LLM model
22
+
23
+ 💡 Note: No API key required!
24
+ ''')
25
+ model_name = st.selectbox('Choose a model', models)
26
+ add_vertical_space(5)
27
+ st.write('Made with ❤️ by EinfachAlex')
28
+
29
+ # Generate empty lists for generated and past.
30
+ ## generated stores AI generated responses
31
+ if 'generated' not in st.session_state:
32
+ st.session_state['generated'] = ["Hallo, wie kann ich dir helfen ?"]
33
+ ## past stores User's questions
34
+ if 'past' not in st.session_state:
35
+ st.session_state['past'] = ['Hi!']
36
+
37
+ # Layout of input/response containers
38
+ input_container = st.container()
39
+ colored_header(label='', description='', color_name='blue-30')
40
+ response_container = st.container()
41
+
42
+ # User input
43
+ ## Function for taking user provided prompt as input
44
+ def get_text():
45
+ input_text = st.text_input("You: ", "", key="input")
46
+ return input_text
47
+ ## Applying the user input box
48
+ with input_container:
49
+ user_input = get_text()
50
+
51
+ # Response output
52
+ ## Function for taking user prompt as input followed by producing AI generated responses
53
+ def generate_response(prompt, model_name):
54
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
55
+ model = AutoModelForCausalLM.from_pretrained(model_name)
56
+ inputs = tokenizer(prompt, return_tensors='pt')
57
+ outputs = model.generate(**inputs)
58
+ response = tokenizer.decode(outputs[0])
59
+ return response
60
+
61
+ ## Conditional display of AI generated responses as a function of user provided prompts
62
+ with response_container:
63
+ if user_input:
64
+ response = generate_response(user_input, model_name)
65
+ st.session_state.past.append(user_input)
66
+ st.session_state.generated.append(response)
67
+
68
+ if st.session_state['generated']:
69
+ for i in range(len(st.session_state['generated'])):
70
+ message(st.session_state['past'][i], is_user=True, key=str(i) + '_user')
71
+ message(st.session_state["generated"][i], key=str(i))