Havi999 commited on
Commit
8b328c5
β€’
1 Parent(s): 5931d56

Upload folder using huggingface_hub

Browse files
Files changed (4) hide show
  1. web_demo.py +2 -2
  2. web_demo2.py +2 -2
  3. web_demo_old.py +2 -2
  4. web_demo_vision.py +2 -2
web_demo.py CHANGED
@@ -4,8 +4,8 @@ import mdtex2html
4
 
5
  # tokenizer = AutoTokenizer.from_pretrained("../chatglm", trust_remote_code=True)
6
  # model = AutoModel.from_pretrained("../chatglm", trust_remote_code=True).float()
7
- tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
8
- model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).float()
9
 
10
  model = model.eval()
11
 
 
4
 
5
  # tokenizer = AutoTokenizer.from_pretrained("../chatglm", trust_remote_code=True)
6
  # model = AutoModel.from_pretrained("../chatglm", trust_remote_code=True).float()
7
+ tokenizer = AutoTokenizer.from_pretrained("baichuan-inc/Baichuan-13B-Chat", trust_remote_code=True)
8
+ model = AutoModel.from_pretrained("baichuan-inc/Baichuan-13B-Chat", trust_remote_code=True).float()
9
 
10
  model = model.eval()
11
 
web_demo2.py CHANGED
@@ -13,8 +13,8 @@ st.set_page_config(
13
  def get_model():
14
  # tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
15
  # model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
16
- tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
17
- model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).float()
18
  model = model.eval()
19
  return tokenizer, model
20
 
 
13
  def get_model():
14
  # tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
15
  # model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
16
+ tokenizer = AutoTokenizer.from_pretrained("baichuan-inc/Baichuan-13B-Chat", trust_remote_code=True)
17
+ model = AutoModel.from_pretrained("baichuan-inc/Baichuan-13B-Chat", trust_remote_code=True).float()
18
  model = model.eval()
19
  return tokenizer, model
20
 
web_demo_old.py CHANGED
@@ -3,8 +3,8 @@ import gradio as gr
3
 
4
  # tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
5
  # model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
6
- tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
7
- model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).float()
8
 
9
  model = model.eval()
10
 
 
3
 
4
  # tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
5
  # model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
6
+ tokenizer = AutoTokenizer.from_pretrained("baichuan-inc/Baichuan-13B-Chat", trust_remote_code=True)
7
+ model = AutoModel.from_pretrained("baichuan-inc/Baichuan-13B-Chat", trust_remote_code=True).float()
8
 
9
  model = model.eval()
10
 
web_demo_vision.py CHANGED
@@ -4,8 +4,8 @@ import mdtex2html
4
 
5
  # tokenizer = AutoTokenizer.from_pretrained("THUDM/visualglm-6b", trust_remote_code=True)
6
  # model = AutoModel.from_pretrained("THUDM/visualglm-6b", trust_remote_code=True).half().cuda()
7
- tokenizer = AutoTokenizer.from_pretrained("THUDM/visualglm-6b", trust_remote_code=True)
8
- model = AutoModel.from_pretrained("THUDM/visualglm-6b", trust_remote_code=True).float()
9
 
10
  model = model.eval()
11
 
 
4
 
5
  # tokenizer = AutoTokenizer.from_pretrained("THUDM/visualglm-6b", trust_remote_code=True)
6
  # model = AutoModel.from_pretrained("THUDM/visualglm-6b", trust_remote_code=True).half().cuda()
7
+ tokenizer = AutoTokenizer.from_pretrained("baichuan-inc/Baichuan-13B-Chat", trust_remote_code=True)
8
+ model = AutoModel.from_pretrained("baichuan-inc/Baichuan-13B-Chat", trust_remote_code=True).float()
9
 
10
  model = model.eval()
11