Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -6,9 +6,9 @@ configObj = ctransformers.Config(stop=["\n", 'User'], context_length=2048)
|
|
6 |
config = ctransformers.AutoConfig(config=configObj, model_type='llama')
|
7 |
config.config.stop = ["\n"]
|
8 |
|
9 |
-
path_to_llm = os.path.abspath("llama-2-7b-chat.ggmlv3.q4_1.bin")
|
10 |
|
11 |
-
llm = ctransformers.AutoModelForCausalLM.from_pretrained(
|
12 |
|
13 |
def complete(prompt, stop=["User", "Assistant"]):
|
14 |
tokens = llm.tokenize(prompt)
|
@@ -26,7 +26,7 @@ def complete(prompt, stop=["User", "Assistant"]):
|
|
26 |
return output
|
27 |
|
28 |
title = "llama2-7b-chat-ggml"
|
29 |
-
description = "This space is an attempt to run the
|
30 |
|
31 |
example_1 = "Write a 7 line poem on AI"
|
32 |
example_2 = "Tell me a joke"
|
|
|
6 |
config = ctransformers.AutoConfig(config=configObj, model_type='llama')
|
7 |
config.config.stop = ["\n"]
|
8 |
|
9 |
+
# path_to_llm = os.path.abspath("llama-2-7b-chat.ggmlv3.q4_1.bin")
|
10 |
|
11 |
+
llm = ctransformers.AutoModelForCausalLM.from_pretrained("TheBloke/Llama-2-7b-Chat-GGUF", model_file="llama-2-7b-chat.q4_K_M.gguf", config=config)
|
12 |
|
13 |
def complete(prompt, stop=["User", "Assistant"]):
|
14 |
tokens = llm.tokenize(prompt)
|
|
|
26 |
return output
|
27 |
|
28 |
title = "llama2-7b-chat-ggml"
|
29 |
+
description = "This space is an attempt to run the GGUF 4 bit quantized version of 'llama2-7b-chat' on a CPU"
|
30 |
|
31 |
example_1 = "Write a 7 line poem on AI"
|
32 |
example_2 = "Tell me a joke"
|