Inoichan commited on
Commit
0b11d3f
1 Parent(s): d28f62a

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +6 -4
README.md CHANGED
@@ -30,17 +30,19 @@ from PIL import Image
30
 
31
  import torch
32
  from transformers import AutoProcessor
33
- from heron.models.git_llm.git_llama import GitLlamaForCausalLM
34
 
35
  device_id = 0
36
 
37
  # prepare a pretrained model
38
- model = GitLlamaForCausalLM.from_pretrained('turing-motors/heron-chat-git-ja-stablelm-base-7b-v0')
 
 
39
  model.eval()
40
  model.to(f"cuda:{device_id}")
41
 
42
  # prepare a processor
43
- processor = AutoProcessor.from_pretrained('turing-motors/heron-chat-git-ja-stablelm-base-7b-v0', additional_special_tokens=["▁▁"])
44
 
45
  # prepare inputs
46
  url = "https://www.barnorama.com/wp-content/uploads/2016/12/03-Confusing-Pictures.jpg"
@@ -68,7 +70,7 @@ with torch.no_grad():
68
  out = model.generate(**inputs, max_length=256, do_sample=False, temperature=0., eos_token_id=eos_token_id_list)
69
 
70
  # print result
71
- print(processor.tokenizer.batch_decode(out))
72
  ```
73
 
74
 
 
30
 
31
  import torch
32
  from transformers import AutoProcessor
33
+ from heron.models.git_llm.git_japanese_stablelm_alpha import GitJapaneseStableLMAlphaForCausalLM
34
 
35
  device_id = 0
36
 
37
  # prepare a pretrained model
38
+ model = GitJapaneseStableLMAlphaForCausalLM.from_pretrained(
39
+ 'turing-motors/heron-chat-git-ja-stablelm-base-7b-v0', torch_dtype=torch.float16
40
+ )
41
  model.eval()
42
  model.to(f"cuda:{device_id}")
43
 
44
  # prepare a processor
45
+ processor = AutoProcessor.from_pretrained('turing-motors/heron-chat-git-ja-stablelm-base-7b-v0')
46
 
47
  # prepare inputs
48
  url = "https://www.barnorama.com/wp-content/uploads/2016/12/03-Confusing-Pictures.jpg"
 
70
  out = model.generate(**inputs, max_length=256, do_sample=False, temperature=0., eos_token_id=eos_token_id_list)
71
 
72
  # print result
73
+ print(processor.tokenizer.batch_decode(out)[0])
74
  ```
75
 
76