NicholasJohn commited on
Commit
c4bbff7
1 Parent(s): b282a8e

init commit

Browse files
Files changed (3) hide show
  1. Dockerfile +9 -0
  2. app.py +86 -0
  3. requirements.txt +3 -0
Dockerfile ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.11
2
+
3
+ WORKDIR /usr/src/app
4
+ COPY . .
5
+ RUN pip install -r requirements.txt
6
+ EXPOSE 7860
7
+ ENV GRADIO_SERVER_NAME="0.0.0.0"
8
+
9
+ CMD ["python", "app.py"]
app.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ import copy
4
+ from llama_cpp import Llama
5
+ from huggingface_hub import hf_hub_download
6
+
7
+ # huggingface-cli download microsoft/Phi-3-mini-4k-instruct-gguf Phi-3-mini-4k-instruct-q4.gguf --local-dir .
8
+ # huggingface-cli download LoneStriker/OpenBioLLM-Llama3-8B-GGUF --local-dir ./llama3-gguf
9
+ llm = Llama(
10
+ # model_path="./Phi-3-mini-4k-instruct-q4.gguf",
11
+ model_path="./llama3-gguf/OpenBioLLM-Llama3-8B-Q5_K_M.gguf",
12
+ n_ctx=2048,
13
+ n_gpu_layers=50, # change n_gpu_layers if you have more or less VRAM
14
+ )
15
+
16
+ # print("here")
17
+ def generate_text(
18
+ message,
19
+ history: list[tuple[str, str]],
20
+ system_message,
21
+ max_tokens,
22
+ temperature,
23
+ top_p,
24
+ ):
25
+ temp = ""
26
+ input_prompt = f"[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n "
27
+ for interaction in history:
28
+ input_prompt = input_prompt + str(interaction[0]) + " [/INST] " + str(interaction[1]) + " </s><s> [INST] "
29
+
30
+ input_prompt = input_prompt + str(message) + " [/INST] "
31
+
32
+ output = llm(
33
+ input_prompt,
34
+ temperature=temperature,
35
+ top_p=top_p,
36
+ top_k=40,
37
+ repeat_penalty=1.1,
38
+ max_tokens=max_tokens,
39
+ stop=[
40
+ "<|prompter|>",
41
+ "<|endoftext|>",
42
+ "<|endoftext|> \n",
43
+ "ASSISTANT:",
44
+ "USER:",
45
+ "SYSTEM:",
46
+ ],
47
+ stream=True,
48
+ )
49
+ for out in output:
50
+ stream = copy.deepcopy(out)
51
+ temp += stream["choices"][0]["text"]
52
+ yield temp
53
+
54
+
55
+ demo = gr.ChatInterface(
56
+ generate_text,
57
+ title="llama-cpp-python on CPU",
58
+ description="Running LLM with https://github.com/abetlen/llama-cpp-python",
59
+ examples=[
60
+ ['How to setup a human base on Mars? Give short answer.'],
61
+ ['Explain theory of relativity to me like I’m 8 years old.'],
62
+ ['What is 9,000 * 9,000?'],
63
+ ['Write a pun-filled happy birthday message to my friend Alex.'],
64
+ ['Justify why a penguin might make a good king of the jungle.']
65
+ ],
66
+ cache_examples=False,
67
+ retry_btn=None,
68
+ undo_btn="Delete Previous",
69
+ clear_btn="Clear",
70
+ additional_inputs=[
71
+ gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
72
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
73
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
74
+ gr.Slider(
75
+ minimum=0.1,
76
+ maximum=1.0,
77
+ value=0.95,
78
+ step=0.05,
79
+ label="Top-p (nucleus sampling)",
80
+ ),
81
+ ],
82
+ )
83
+
84
+
85
+ if __name__ == "__main__":
86
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ llama-cpp-python==0.2.69
2
+ huggingface-hub==0.23.0
3
+ gradio==4.28.3