ToonTownTommy commited on
Commit
540fcff
1 Parent(s): abc6b1b

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -0
app.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ import ctransformers
4
+
5
+ configObj = ctransformers.Config(stop=["\n", 'User'], context_length=2048)
6
+ config = ctransformers.AutoConfig(config=configObj, model_type='llama')
7
+ config.config.stop = ["\n"]
8
+
9
+ # path_to_llm = os.path.abspath("llama-2-7b-chat.ggmlv3.q4_1.bin")
10
+
11
+ llm = ctransformers.AutoModelForCausalLM.from_pretrained("TheBloke/Llama-2-7b-Chat-GGUF", model_file="llama-2-7b-chat.Q4_K_M.gguf", config=config)
12
+
13
+ def complete(prompt, stop=["User", "Assistant"]):
14
+ tokens = llm.tokenize(prompt)
15
+ output = ''
16
+ for token in llm.generate(tokens):
17
+ result = llm.detokenize(token)
18
+ output += result
19
+ for word in stop:
20
+ if word in output:
21
+ print('\n')
22
+ return output
23
+ print(result, end='',flush=True)
24
+
25
+ print('\n')
26
+ return output
27
+
28
+ title = "llama2-7b-chat-ggml"
29
+ description = "This space is an attempt to run the GGUF 4 bit quantized version of 'llama2-7b-chat' on a CPU"
30
+
31
+ example_1 = "Write a 7 line poem on AI"
32
+ example_2 = "Tell me a joke"
33
+
34
+ examples = [example_1, example_2]
35
+
36
+ def generate_response(user_input):
37
+ prompt = f'User: {user_input}\nAssistant: '
38
+ response = complete(prompt)
39
+ return response
40
+
41
+ UI = gr.Interface(
42
+ fn=generate_response,
43
+ inputs=gr.Textbox(label="User Query", placeholder="Ask your queries here...."),
44
+ outputs=gr.Textbox(label="Assistant Response"),
45
+ title=title,
46
+ description=description,
47
+ examples=examples
48
+ )
49
+
50
+ UI.launch()