9unu commited on
Commit
c73dcae
โ€ข
1 Parent(s): 0f972ab

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -1
README.md CHANGED
@@ -21,7 +21,9 @@ cache_dir = "./hugging_face"
21
  gentle_model_path='9unu/gentle_speech_translation'
22
  gentle_model = T5ForConditionalGeneration.from_pretrained(gentle_model_path, cache_dir=cache_dir)
23
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
24
 
 
25
  # transformers ํŒŒ์ดํ”„๋ผ์ธ ์ƒ์„ฑ
26
  gentle_pipeline = pipeline(model = gentle_model, tokenizer = tokenizer, device = device, max_length=60)
27
 
@@ -31,7 +33,6 @@ num_return_sequences = 1
31
  max_length = 60
32
  out = gentle_pipeline(text, num_return_sequences = num_return_sequences, max_length=max_length)
33
  print([x['generated_text'] for x in out])
34
-
35
  ```
36
 
37
  ## License
 
21
  gentle_model_path='9unu/gentle_speech_translation'
22
  gentle_model = T5ForConditionalGeneration.from_pretrained(gentle_model_path, cache_dir=cache_dir)
23
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
24
+ ```
25
 
26
+ ```python
27
  # transformers ํŒŒ์ดํ”„๋ผ์ธ ์ƒ์„ฑ
28
  gentle_pipeline = pipeline(model = gentle_model, tokenizer = tokenizer, device = device, max_length=60)
29
 
 
33
  max_length = 60
34
  out = gentle_pipeline(text, num_return_sequences = num_return_sequences, max_length=max_length)
35
  print([x['generated_text'] for x in out])
 
36
  ```
37
 
38
  ## License