Kohsaku commited on
Commit
ee9a753
1 Parent(s): 1855d93

edit code comments

Browse files
Files changed (1) hide show
  1. README.md +0 -5
README.md CHANGED
@@ -30,8 +30,6 @@ This gemma2 model was trained 2x faster with [Unsloth](https://github.com/unslot
30
 
31
  model_name = "Kohsaku/gemma-2-9b-finetune-2"
32
 
33
- #@title README 検証用
34
-
35
  max_seq_length = 1024
36
 
37
  dtype = None
@@ -49,9 +47,6 @@ FastLanguageModel.for_inference(model)
49
  text = "自然言語処理とは何か"
50
  tokenized_input = tokenizer.encode(text, add_special_tokens=True , return_tensors="pt").to(model.device)
51
 
52
- # attention_maskを作成
53
- # attention_mask = torch.ones(tokenized_input.shape, device=model.device)
54
-
55
  with torch.no_grad():
56
  output = model.generate(
57
  tokenized_input,
 
30
 
31
  model_name = "Kohsaku/gemma-2-9b-finetune-2"
32
 
 
 
33
  max_seq_length = 1024
34
 
35
  dtype = None
 
47
  text = "自然言語処理とは何か"
48
  tokenized_input = tokenizer.encode(text, add_special_tokens=True , return_tensors="pt").to(model.device)
49
 
 
 
 
50
  with torch.no_grad():
51
  output = model.generate(
52
  tokenized_input,