umisetokikaze commited on
Commit
6c81f4d
1 Parent(s): 187bddc

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +13 -7
README.md CHANGED
@@ -56,17 +56,23 @@ We would like to take this opportunity to thank
56
 
57
  ```python
58
  from transformers import AutoModelForCausalLM, AutoTokenizer
 
59
 
60
- model = AutoModelForCausalLM.from_pretrained("Local-Novel-LLM-project/Vecteus-v1", trust_remote_code=True)
61
- tokenizer = AutoTokenizer.from_pretrained("Local-Novel-LLM-project/Vecteus-v1")
62
 
63
- prompt = "Once upon a time,"
64
- input_ids = tokenizer.encode(prompt, return_tensors="pt")
65
 
66
- output = model.generate(input_ids, max_length=100, do_sample=True)
67
- generated_text = tokenizer.decode(output)
68
 
69
- print(generated_text)
 
 
 
 
 
 
70
  ````
71
 
72
  ## Merge recipe
 
56
 
57
  ```python
58
  from transformers import AutoModelForCausalLM, AutoTokenizer
59
+ import torch
60
 
61
+ model_id = "Local-Novel-LLM-project/Vecteus-v1"
62
+ new_tokens = 1024
63
 
64
+ model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True, torch_dtype=torch.float16, attn_implementation="flash_attention_2", device_map="auto")
65
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
66
 
67
+ system_prompt = "あなたはプロの小説家です。\n小説を書いてください\n-------- "
 
68
 
69
+ prompt = input("Enter a prompt: ")
70
+ system_prompt += prompt + "\n-------- "
71
+ model_inputs = tokenizer([prompt], return_tensors="pt").to("cuda")
72
+
73
+
74
+ generated_ids = model.generate(**model_inputs, max_new_tokens=new_tokens, do_sample=True)
75
+ print(tokenizer.batch_decode(generated_ids)[0])
76
  ````
77
 
78
  ## Merge recipe