pax-dare-lab commited on
Commit
fb119fe
β€’
1 Parent(s): c6d01f1

Use GPT2 Model

Browse files
Files changed (5) hide show
  1. .streamlit/secrets.toml +6 -0
  2. app.py +58 -0
  3. app_hugchat.py +55 -0
  4. login_test.py +5 -0
  5. requirements.txt +1 -1
.streamlit/secrets.toml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ [theme]
2
+ primaryColor="#F63366"
3
+ backgroundColor="#FFFFFF"
4
+ secondaryBackgroundColor="#F0F2F6"
5
+ textColor="#262730"
6
+ font="sans serif"
app.py CHANGED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ # from transformers import T5Tokenizer,AutoModelForCausalLM
3
+
4
+ model_name = "rinna/japanese-gpt2-small"
5
+
6
+ # tokenizer = T5Tokenizer.from_pretrained(model_name)
7
+ # model = AutoModelForCausalLM.from_pretrained(model_name)
8
+
9
+ import torch
10
+ from transformers import AutoTokenizer, AutoModelForCausalLM
11
+
12
+ # Load the pre-trained GPT-2 model and tokenizer
13
+
14
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
15
+ model = AutoModelForCausalLM.from_pretrained(model_name)
16
+
17
+ # App title
18
+ st.set_page_config(page_title="ChatBot")
19
+
20
+ if "messages" not in st.session_state.keys():
21
+ st.session_state.messages = [{"role": "assistant", "content": "How may I help you?"}]
22
+
23
+ # Display chat messages
24
+ for message in st.session_state.messages:
25
+ with st.chat_message(message["role"]):
26
+ st.write(message["content"])
27
+
28
+ # Function for generating LLM response
29
+ # def generate_response(prompt_input):
30
+ # input = tokenizer.encode(prompt_input, return_tensors="pt")
31
+ # output = model.generate(input, do_sample=True, max_length=30, num_return_sequences=1)
32
+
33
+ # return tokenizer.batch_decode(output)
34
+
35
+ def generate_response(prompt, max_length=50):
36
+ input_ids = tokenizer.encode(prompt, return_tensors="pt")
37
+
38
+ # Generate response
39
+ with torch.no_grad():
40
+ output = model.generate(input_ids, max_length=max_length, num_return_sequences=1, pad_token_id=50256)
41
+
42
+ response = tokenizer.decode(output[0], skip_special_tokens=True)
43
+ return response
44
+
45
+ # User-provided prompt
46
+ if prompt := st.chat_input():
47
+ st.session_state.messages.append({"role": "user", "content": prompt})
48
+ with st.chat_message("user"):
49
+ st.write(prompt)
50
+
51
+ # Generate a new response if last message is not from assistant
52
+ if st.session_state.messages[-1]["role"] != "assistant":
53
+ with st.chat_message("assistant"):
54
+ with st.spinner("Thinking..."):
55
+ response = generate_response(prompt)
56
+ st.write(response)
57
+ message = {"role": "assistant", "content": response}
58
+ st.session_state.messages.append(message)
app_hugchat.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from hugchat import hugchat
3
+ from hugchat.login import Login
4
+
5
+ # App title
6
+ st.set_page_config(page_title="πŸ€—πŸ’¬ HugChat")
7
+
8
+ # Hugging Face Credentials
9
+ with st.sidebar:
10
+ st.title('πŸ€—πŸ’¬ HugChat')
11
+ if ('EMAIL' in st.secrets) and ('PASS' in st.secrets):
12
+ st.success('HuggingFace Login credentials already provided!', icon='βœ…')
13
+ hf_email = st.secrets['EMAIL']
14
+ hf_pass = st.secrets['PASS']
15
+ else:
16
+ hf_email = st.text_input('Enter E-mail:', type='password')
17
+ hf_pass = st.text_input('Enter password:', type='password')
18
+ if not (hf_email and hf_pass):
19
+ st.warning('Please enter your credentials!', icon='⚠️')
20
+ else:
21
+ st.success('Proceed to entering your prompt message!', icon='πŸ‘‰')
22
+ st.markdown('πŸ“– Learn how to build this app in this [blog](https://blog.streamlit.io/how-to-build-an-llm-powered-chatbot-with-streamlit/)!')
23
+
24
+ # Store LLM generated responses
25
+ if "messages" not in st.session_state.keys():
26
+ st.session_state.messages = [{"role": "assistant", "content": "How may I help you?"}]
27
+
28
+ # Display chat messages
29
+ for message in st.session_state.messages:
30
+ with st.chat_message(message["role"]):
31
+ st.write(message["content"])
32
+
33
+ # Function for generating LLM response
34
+ def generate_response(prompt_input, email, passwd):
35
+ # Hugging Face Login
36
+ sign = Login(email, passwd)
37
+ cookies = sign.login()
38
+ # Create ChatBot
39
+ chatbot = hugchat.ChatBot(cookies=cookies.get_dict())
40
+ return chatbot.chat(prompt_input)
41
+
42
+ # User-provided prompt
43
+ if prompt := st.chat_input(disabled=not (hf_email and hf_pass)):
44
+ st.session_state.messages.append({"role": "user", "content": prompt})
45
+ with st.chat_message("user"):
46
+ st.write(prompt)
47
+
48
+ # Generate a new response if last message is not from assistant
49
+ if st.session_state.messages[-1]["role"] != "assistant":
50
+ with st.chat_message("assistant"):
51
+ with st.spinner("Thinking..."):
52
+ response = generate_response(prompt, hf_email, hf_pass)
53
+ st.write(response)
54
+ message = {"role": "assistant", "content": response}
55
+ st.session_state.messages.append(message)
login_test.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from hugchat import hugchat
2
+ from hugchat.login import Login
3
+
4
+ sign = Login("pax.dare@gmail.com", "SASuse57343983")
5
+ cookies = sign.login()
requirements.txt CHANGED
@@ -4,4 +4,4 @@ transformers
4
  langchain
5
  Cython
6
  torch
7
- hugchat
 
4
  langchain
5
  Cython
6
  torch
7
+ hugchat==0.4.4