utrobinmv commited on
Commit
bb21507
1 Parent(s): 6178ccf

update model

Browse files
Files changed (2) hide show
  1. README.md +8 -2
  2. model.safetensors +2 -2
README.md CHANGED
@@ -42,8 +42,11 @@ Example translate Russian to Chinese
42
  ```python
43
  from transformers import T5ForConditionalGeneration, T5Tokenizer
44
 
 
 
45
  model_name = 'utrobinmv/t5_translate_en_ru_zh_small_1024'
46
  model = T5ForConditionalGeneration.from_pretrained(model_name)
 
47
  tokenizer = T5Tokenizer.from_pretrained(model_name)
48
 
49
  prefix = 'translate to zh: '
@@ -52,7 +55,7 @@ src_text = prefix + "Цель разработки — предоставить
52
  # translate Russian to Chinese
53
  input_ids = tokenizer(src_text, return_tensors="pt")
54
 
55
- generated_tokens = model.generate(**input_ids)
56
 
57
  result = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
58
  print(result)
@@ -66,8 +69,11 @@ and Example translate Chinese to Russian
66
  ```python
67
  from transformers import T5ForConditionalGeneration, T5Tokenizer
68
 
 
 
69
  model_name = 'utrobinmv/t5_translate_en_ru_zh_small_1024'
70
  model = T5ForConditionalGeneration.from_pretrained(model_name)
 
71
  tokenizer = T5Tokenizer.from_pretrained(model_name)
72
 
73
  prefix = 'translate to ru: '
@@ -76,7 +82,7 @@ src_text = prefix + "开发的目的就是向用户提供个性化的同步翻
76
  # translate Russian to Chinese
77
  input_ids = tokenizer(src_text, return_tensors="pt")
78
 
79
- generated_tokens = model.generate(**input_ids)
80
 
81
  result = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
82
  print(result)
 
42
  ```python
43
  from transformers import T5ForConditionalGeneration, T5Tokenizer
44
 
45
+ device = 'cuda' #or 'cpu' for translate on cpu
46
+
47
  model_name = 'utrobinmv/t5_translate_en_ru_zh_small_1024'
48
  model = T5ForConditionalGeneration.from_pretrained(model_name)
49
+ model.to(device)
50
  tokenizer = T5Tokenizer.from_pretrained(model_name)
51
 
52
  prefix = 'translate to zh: '
 
55
  # translate Russian to Chinese
56
  input_ids = tokenizer(src_text, return_tensors="pt")
57
 
58
+ generated_tokens = model.generate(**input_ids.to(device))
59
 
60
  result = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
61
  print(result)
 
69
  ```python
70
  from transformers import T5ForConditionalGeneration, T5Tokenizer
71
 
72
+ device = 'cuda' #or 'cpu' for translate on cpu
73
+
74
  model_name = 'utrobinmv/t5_translate_en_ru_zh_small_1024'
75
  model = T5ForConditionalGeneration.from_pretrained(model_name)
76
+ model.to(device)
77
  tokenizer = T5Tokenizer.from_pretrained(model_name)
78
 
79
  prefix = 'translate to ru: '
 
82
  # translate Russian to Chinese
83
  input_ids = tokenizer(src_text, return_tensors="pt")
84
 
85
+ generated_tokens = model.generate(**input_ids.to(device))
86
 
87
  result = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
88
  print(result)
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dca087ae1ac368b5d8d1197e84e32f1e7f8a4454db2039dd73fc38105b815d41
3
- size 442920360
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:554ac6b7a16ef631fbd8615ffd02ef799390f75d779e92ba2a69b5a05b6aea79
3
+ size 221471408