umisetokikaze commited on
Commit
3f69f5a
1 Parent(s): 504ffe6

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +13 -7
README.md CHANGED
@@ -60,17 +60,23 @@ We would like to take this opportunity to thank
60
 
61
  ```python
62
  from transformers import AutoModelForCausalLM, AutoTokenizer
 
63
 
64
- model = AutoModelForCausalLM.from_pretrained("Local-Novel-LLM-project/Ninja-v1")
65
- tokenizer = AutoTokenizer.from_pretrained("Local-Novel-LLM-project/Ninja-v1")
66
 
67
- prompt = "Once upon a time,"
68
- input_ids = tokenizer.encode(prompt, return_tensors="pt")
69
 
70
- output = model.generate(input_ids, max_length=100, do_sample=True)
71
- generated_text = tokenizer.decode(output)
72
 
73
- print(generated_text)
 
 
 
 
 
 
74
  ````
75
 
76
  ## Merge recipe
 
60
 
61
  ```python
62
  from transformers import AutoModelForCausalLM, AutoTokenizer
63
+ import torch
64
 
65
+ model_id = "Local-Novel-LLM-project/Ninja-v1"
66
+ new_tokens = 1024
67
 
68
+ model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True, torch_dtype=torch.float16, attn_implementation="flash_attention_2", device_map="auto")
69
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
70
 
71
+ system_prompt = "あなたはプロの小説家です。\n小説を書いてください\n-------- "
 
72
 
73
+ prompt = input("Enter a prompt: ")
74
+ system_prompt += prompt + "\n-------- "
75
+ model_inputs = tokenizer([prompt], return_tensors="pt").to("cuda")
76
+
77
+
78
+ generated_ids = model.generate(**model_inputs, max_new_tokens=new_tokens, do_sample=True)
79
+ print(tokenizer.batch_decode(generated_ids)[0])
80
  ````
81
 
82
  ## Merge recipe