alexkueck commited on
Commit
3736c10
1 Parent(s): d8ca67b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -2
app.py CHANGED
@@ -6,6 +6,7 @@ import gradio as gr
6
  import torch
7
  from utils import *
8
  from presets import *
 
9
 
10
  #antwort=""
11
  ######################################################################
@@ -17,13 +18,22 @@ from presets import *
17
 
18
  #Alternativ mit beliebigen Modellen:
19
  #base_model = "project-baize/baize-v2-7b" #load_8bit = False (in load_tokenizer_and_model)
20
- base_model = "alexkueck/li-tis-tuned-2" #load_8bit = False (in load_tokenizer_and_model)
21
  #base_model = "TheBloke/airoboros-13B-HF" #load_8bit = False (in load_tokenizer_and_model)
22
  #base_model = "EleutherAI/gpt-neo-1.3B" #load_8bit = False (in load_tokenizer_and_model)
23
  #base_model = "TheBloke/airoboros-13B-HF" #load_8bit = True
24
  #base_model = "TheBloke/vicuna-13B-1.1-HF" #load_8bit = ?
25
  #base_model = "lmsys/vicuna-13b-v1.3"
26
- tokenizer,model,device = load_tokenizer_and_model(base_model,False)
 
 
 
 
 
 
 
 
 
27
 
28
 
29
  ########################################################################
 
6
  import torch
7
  from utils import *
8
  from presets import *
9
+ from transformers import GPT2Tokenizer, AutoModelForCausalLM
10
 
11
  #antwort=""
12
  ######################################################################
 
18
 
19
  #Alternativ mit beliebigen Modellen:
20
  #base_model = "project-baize/baize-v2-7b" #load_8bit = False (in load_tokenizer_and_model)
21
+ #base_model = "alexkueck/li-tis-tuned-2" #load_8bit = False (in load_tokenizer_and_model)
22
  #base_model = "TheBloke/airoboros-13B-HF" #load_8bit = False (in load_tokenizer_and_model)
23
  #base_model = "EleutherAI/gpt-neo-1.3B" #load_8bit = False (in load_tokenizer_and_model)
24
  #base_model = "TheBloke/airoboros-13B-HF" #load_8bit = True
25
  #base_model = "TheBloke/vicuna-13B-1.1-HF" #load_8bit = ?
26
  #base_model = "lmsys/vicuna-13b-v1.3"
27
+ base_model = "gpt2-xl"
28
+
29
+ ####################################
30
+ #Model und Tokenzier laden
31
+ #tokenizer,model,device = load_tokenizer_and_model(base_model,False)
32
+
33
+ ################################
34
+ #Alternativ: Model und Tokenizer direkt ladentokenizer = AutoTokenizer.from_pretrained(base_model, use_fast = True, use_auth_token=True)
35
+ tokenizer = GPT2Tokenizer.from_pretrained(base_model, use_fast = True, use_auth_token=True)
36
+ model = GPT2LMHeadModel.from_pretrained('gpt2') # options: ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl']
37
 
38
 
39
  ########################################################################