alexkueck commited on
Commit
5fb9f60
1 Parent(s): d7269a9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -5
app.py CHANGED
@@ -18,25 +18,26 @@ from huggingface_hub import login
18
 
19
  #Alternativ mit beliebigen Modellen:
20
  #base_model = "project-baize/baize-v2-7b" #load_8bit = False (in load_tokenizer_and_model)
21
- #base_model = "alexkueck/li-tis-tuned-2" #load_8bit = False (in load_tokenizer_and_model)
22
  #base_model = "TheBloke/airoboros-13B-HF" #load_8bit = False (in load_tokenizer_and_model)
23
  #base_model = "EleutherAI/gpt-neo-1.3B" #load_8bit = False (in load_tokenizer_and_model)
24
  #base_model = "TheBloke/airoboros-13B-HF" #load_8bit = True
25
  #base_model = "TheBloke/vicuna-13B-1.1-HF" #load_8bit = ?
26
- base_model = "TheBloke/airoboros-65B-gpt4-1.3-GPTQ" #model_basename = "airoboros-65b-gpt4-1.3-GPTQ-4bit--1g.act.order"
 
27
  #base_model = "lmsys/vicuna-13b-v1.3"
28
  #base_model = "gpt2-xl" # options: ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl']
29
 
30
  ####################################
31
  #Model und Tokenzier laden
32
- #tokenizer,model,device = load_tokenizer_and_model(base_model,False)
33
 
34
  ################################
35
  #Alternativ: Model und Tokenizer für GPT2
36
  #tokenizer,model,device = load_tokenizer_and_model_gpt2(base_model,False)
37
 
38
- #Alternativ bloke gpt3 und4
39
- tokenizer,model,device = load_tokenizer_and_model_bloke_gpt(base_model, "airoboros-65b-gpt4-1.3-GPTQ-4bit--1g.act.order")
40
 
41
 
42
 
 
18
 
19
  #Alternativ mit beliebigen Modellen:
20
  #base_model = "project-baize/baize-v2-7b" #load_8bit = False (in load_tokenizer_and_model)
21
+ base_model = "alexkueck/li-tis-tuned-2" #load_8bit = False (in load_tokenizer_and_model)
22
  #base_model = "TheBloke/airoboros-13B-HF" #load_8bit = False (in load_tokenizer_and_model)
23
  #base_model = "EleutherAI/gpt-neo-1.3B" #load_8bit = False (in load_tokenizer_and_model)
24
  #base_model = "TheBloke/airoboros-13B-HF" #load_8bit = True
25
  #base_model = "TheBloke/vicuna-13B-1.1-HF" #load_8bit = ?
26
+ #following runs only on GPU upgrade
27
+ #base_model = "TheBloke/airoboros-65B-gpt4-1.3-GPTQ" #model_basename = "airoboros-65b-gpt4-1.3-GPTQ-4bit--1g.act.order"
28
  #base_model = "lmsys/vicuna-13b-v1.3"
29
  #base_model = "gpt2-xl" # options: ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl']
30
 
31
  ####################################
32
  #Model und Tokenzier laden
33
+ tokenizer,model,device = load_tokenizer_and_model(base_model,False)
34
 
35
  ################################
36
  #Alternativ: Model und Tokenizer für GPT2
37
  #tokenizer,model,device = load_tokenizer_and_model_gpt2(base_model,False)
38
 
39
+ #Alternativ bloke gpt3 und4 - only with GPU upgarde
40
+ #tokenizer,model,device = load_tokenizer_and_model_bloke_gpt(base_model, "airoboros-65b-gpt4-1.3-GPTQ-4bit--1g.act.order")
41
 
42
 
43