WinterGYC commited on
Commit
5c3a31c
β€’
1 Parent(s): e2de03d
Files changed (3) hide show
  1. Dockerfile +34 -0
  2. app.py +71 -0
  3. requirements.txt +6 -0
Dockerfile ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
2
+ # you will also find guides on how best to write your Dockerfile
3
+
4
+ FROM nvidia/cuda:12.2.0-devel-ubuntu20.04
5
+
6
+ #set up environment
7
+ RUN apt-get update && apt-get install --no-install-recommends --no-install-suggests -y curl
8
+ RUN apt-get install unzip
9
+ RUN apt-get -y install python3
10
+ RUN apt-get -y install python3-pip
11
+
12
+ WORKDIR /code
13
+
14
+ COPY ./requirements.txt /code/requirements.txt
15
+
16
+ RUN pip3 install --no-cache-dir --upgrade -r /code/requirements.txt
17
+
18
+ # Set up a new user named "user" with user ID 1000
19
+ RUN useradd -m -u 1000 user
20
+
21
+ # Switch to the "user" user
22
+ USER user
23
+
24
+ # Set home to the user's home directory
25
+ ENV HOME=/home/user \
26
+ PATH=/home/user/.local/bin:$PATH
27
+
28
+ # Set the working directory to the user's home directory
29
+ WORKDIR $HOME/app
30
+
31
+ # Copy the current directory contents into the container at $HOME/app setting the owner to the user
32
+ COPY --chown=user . $HOME/app
33
+
34
+ CMD ["streamlit", "run", "app.py", "--server.port", "7860", "--server.address", "0.0.0.0"]
app.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import torch
3
+ import streamlit as st
4
+ from transformers import AutoModelForCausalLM, AutoTokenizer
5
+ from transformers.generation.utils import GenerationConfig
6
+
7
+
8
+ st.set_page_config(page_title="Baichuan-13B-Chat")
9
+ st.title("Baichuan-13B-Chat")
10
+
11
+ @st.cache_resource
12
+ def init_model():
13
+ model = AutoModelForCausalLM.from_pretrained(
14
+ "baichuan-inc/Baichuan-13B-Chat",
15
+ torch_dtype=torch.float16,
16
+ device_map="auto",
17
+ trust_remote_code=True
18
+ )
19
+ model.generation_config = GenerationConfig.from_pretrained(
20
+ "baichuan-inc/Baichuan-13B-Chat"
21
+ )
22
+ tokenizer = AutoTokenizer.from_pretrained(
23
+ "baichuan-inc/Baichuan-13B-Chat",
24
+ use_fast=False,
25
+ trust_remote_code=True
26
+ )
27
+ return model, tokenizer
28
+
29
+
30
+ def clear_chat_history():
31
+ del st.session_state.messages
32
+
33
+
34
+ def init_chat_history():
35
+ with st.chat_message("assistant", avatar='πŸ€–'):
36
+ st.markdown("Greetings! I am the BaiChuan large language model, delighted to assist you.πŸ₯°")
37
+
38
+ if "messages" in st.session_state:
39
+ for message in st.session_state.messages:
40
+ avatar = 'πŸ§‘β€πŸ’»' if message["role"] == "user" else 'πŸ€–'
41
+ with st.chat_message(message["role"], avatar=avatar):
42
+ st.markdown(message["content"])
43
+ else:
44
+ st.session_state.messages = []
45
+
46
+ return st.session_state.messages
47
+
48
+
49
+ def main():
50
+ model, tokenizer = init_model()
51
+ messages = init_chat_history()
52
+
53
+ if prompt := st.chat_input("Shift + Enter for a new line, Enter to send"):
54
+ with st.chat_message("user", avatar='πŸ§‘β€πŸ’»'):
55
+ st.markdown(prompt)
56
+ messages.append({"role": "user", "content": prompt})
57
+ print(f"[user] {prompt}", flush=True)
58
+ with st.chat_message("assistant", avatar='πŸ€–'):
59
+ placeholder = st.empty()
60
+ for response in model.chat(tokenizer, messages, stream=True):
61
+ placeholder.markdown(response)
62
+ if torch.backends.mps.is_available():
63
+ torch.mps.empty_cache()
64
+ messages.append({"role": "assistant", "content": response})
65
+ print(json.dumps(messages, ensure_ascii=False), flush=True)
66
+
67
+ st.button("Reset Chat", on_click=clear_chat_history)
68
+
69
+
70
+ if __name__ == "__main__":
71
+ main()
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ accelerate
2
+ colorama
3
+ cpm_kernels
4
+ sentencepiece
5
+ streamlit
6
+ transformers_stream_generator