alexkueck commited on
Commit
3cea298
1 Parent(s): 5d22ed0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -24,22 +24,22 @@ from transformers import LlamaForCausalLM, LlamaTokenizer
24
  #base_model = "TheBloke/airoboros-13B-HF" #load_8bit = False (in load_tokenizer_and_model)
25
  #base_model = "EleutherAI/gpt-neo-1.3B" #load_8bit = False (in load_tokenizer_and_model)
26
  #base_model = "TheBloke/airoboros-13B-HF" #load_8bit = True
27
- #base_model = "TheBloke/vicuna-13B-1.1-HF" #load_8bit = ?
28
  #following runs only on GPU upgrade
29
- base_model = "TheBloke/airoboros-65B-gpt4-1.3-GPTQ" #model_basename = "airoboros-65b-gpt4-1.3-GPTQ-4bit--1g.act.order"
30
  #base_model = "lmsys/vicuna-13b-v1.3"
31
  #base_model = "gpt2-xl" # options: ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl']
32
 
33
  ####################################
34
  #Model und Tokenzier laden
35
- #tokenizer,model,device = load_tokenizer_and_model(base_model,False)
36
 
37
  ################################
38
  #Alternativ: Model und Tokenizer für GPT2
39
  #tokenizer,model,device = load_tokenizer_and_model_gpt2(base_model,False)
40
 
41
  #Alternativ bloke gpt3 und4 - only with GPU upgarde
42
- tokenizer,model,device = load_tokenizer_and_model_bloke_gpt(base_model, "airoboros-65b-gpt4-1.3-GPTQ-4bit--1g.act.order")
43
 
44
  #Alternativ Model und Tokenzier laden für Baize
45
  #tokenizer,model,device = load_tokenizer_and_model_Baize(base_model,False)
 
24
  #base_model = "TheBloke/airoboros-13B-HF" #load_8bit = False (in load_tokenizer_and_model)
25
  #base_model = "EleutherAI/gpt-neo-1.3B" #load_8bit = False (in load_tokenizer_and_model)
26
  #base_model = "TheBloke/airoboros-13B-HF" #load_8bit = True
27
+ base_model = "TheBloke/vicuna-13B-1.1-HF" #load_8bit = ?
28
  #following runs only on GPU upgrade
29
+ #base_model = "TheBloke/airoboros-65B-gpt4-1.3-GPTQ" #model_basename = "airoboros-65b-gpt4-1.3-GPTQ-4bit--1g.act.order"
30
  #base_model = "lmsys/vicuna-13b-v1.3"
31
  #base_model = "gpt2-xl" # options: ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl']
32
 
33
  ####################################
34
  #Model und Tokenzier laden
35
+ tokenizer,model,device = load_tokenizer_and_model(base_model,False)
36
 
37
  ################################
38
  #Alternativ: Model und Tokenizer für GPT2
39
  #tokenizer,model,device = load_tokenizer_and_model_gpt2(base_model,False)
40
 
41
  #Alternativ bloke gpt3 und4 - only with GPU upgarde
42
+ #tokenizer,model,device = load_tokenizer_and_model_bloke_gpt(base_model, "airoboros-65b-gpt4-1.3-GPTQ-4bit--1g.act.order")
43
 
44
  #Alternativ Model und Tokenzier laden für Baize
45
  #tokenizer,model,device = load_tokenizer_and_model_Baize(base_model,False)