Q4234 commited on
Commit
8f108e1
1 Parent(s): f74c6e4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -1
app.py CHANGED
@@ -12,8 +12,16 @@ class Z(object):
12
  def greet(self, txt0):
13
  prompt0 = txt0
14
 
 
15
  prompt00 = f'''USER: {prompt0}
16
  ASSISTANT:'''
 
 
 
 
 
 
 
17
 
18
  response0 = llm(prompt00, max_new_tokens=128, temperature=0.5) # 0.3
19
 
@@ -23,8 +31,9 @@ from ctransformers import AutoModelForCausalLM
23
 
24
  # wizzard vicuna
25
  # see https://github.com/melodysdreamj/WizardVicunaLM
26
- llm = AutoModelForCausalLM.from_pretrained('TheBloke/Wizard-Vicuna-13B-Uncensored-GGML', model_file='Wizard-Vicuna-13B-Uncensored.ggmlv3.q4_0.bin', model_type='llama')
27
 
 
28
 
29
 
30
 
 
12
  def greet(self, txt0):
13
  prompt0 = txt0
14
 
15
+ # for Wizard-Vicuna-13B
16
  prompt00 = f'''USER: {prompt0}
17
  ASSISTANT:'''
18
+
19
+ prompt00 = f'''Below is an instruction that describes a task. Write a response that appropriately completes the request.
20
+
21
+ ### Instruction:
22
+ {prompt0}
23
+
24
+ ### Response:'''
25
 
26
  response0 = llm(prompt00, max_new_tokens=128, temperature=0.5) # 0.3
27
 
 
31
 
32
  # wizzard vicuna
33
  # see https://github.com/melodysdreamj/WizardVicunaLM
34
+ #llm = AutoModelForCausalLM.from_pretrained('TheBloke/Wizard-Vicuna-13B-Uncensored-GGML', model_file='Wizard-Vicuna-13B-Uncensored.ggmlv3.q4_0.bin', model_type='llama')
35
 
36
+ llm = AutoModelForCausalLM.from_pretrained('mverrilli/dolly-v2-12b-ggml', model_file='ggml-model-q5_0.bin', model_type='dolly')
37
 
38
 
39