Nechba commited on
Commit
2591fbb
β€’
1 Parent(s): 5eb9634

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -12
app.py CHANGED
@@ -6,8 +6,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStream
6
  import os
7
  from threading import Thread
8
 
9
- HF_TOKEN = os.environ.get("HF_TOKEN", None)
10
- MODEL_ID = "Nechba/Coin-Generative-Recognition"
11
 
12
  TITLE = f'<br><center>πŸš€ Coin Generative Recognition</a></center>'
13
 
@@ -38,18 +37,15 @@ img {
38
  max-height: 300px; /* Limit the height of images */
39
  }
40
  """
41
- import os
42
- # Directory where the model and tokenizer will be saved
43
 
44
  # Load model directly
45
- from transformers import AutoModel
46
- model = AutoModel.from_pretrained("Nechba/Coin-Generative-Recognition", trust_remote_code=True).to(0)
47
- # model = AutoModelForCausalLM.from_pretrained(
48
- # MODEL_ID,
49
- # torch_dtype=torch.bfloat16,
50
- # low_cpu_mem_usage=True,
51
- # trust_remote_code=True
52
- # ).to(0)
53
 
54
  tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
55
  model.eval()
 
6
  import os
7
  from threading import Thread
8
 
9
+ MODEL_ID = "THUDM/glm-4v-9b"
 
10
 
11
  TITLE = f'<br><center>πŸš€ Coin Generative Recognition</a></center>'
12
 
 
37
  max-height: 300px; /* Limit the height of images */
38
  }
39
  """
 
 
40
 
41
  # Load model directly
42
+
43
+ model = AutoModelForCausalLM.from_pretrained(
44
+ MODEL_ID,
45
+ torch_dtype=torch.bfloat16,
46
+ low_cpu_mem_usage=True,
47
+ trust_remote_code=True
48
+ ).to(0)
 
49
 
50
  tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
51
  model.eval()