Val123val commited on
Commit
a9ff7ae
1 Parent(s): 95e89a5

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +5 -2
README.md CHANGED
@@ -85,13 +85,15 @@ Speculative Decoding was proposed in Fast Inference from Transformers via Specul
85
  ```bash
86
  import torch
87
  from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor
 
88
 
89
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
90
  torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
91
 
92
-
93
  dataset = load_dataset("bond005/sberdevices_golos_10h_crowd", split="validation", token=True)
94
 
 
95
  model_id = "Val123val/ru_whisper_small"
96
 
97
  model = AutoModelForSpeechSeq2Seq.from_pretrained(
@@ -105,6 +107,7 @@ model.to(device)
105
 
106
  processor = AutoProcessor.from_pretrained(model_id)
107
 
 
108
  assistant_model_id = "openai/whisper-tiny"
109
 
110
  assistant_model = AutoModelForSpeechSeq2Seq.from_pretrained(
@@ -117,8 +120,8 @@ assistant_model = AutoModelForSpeechSeq2Seq.from_pretrained(
117
 
118
  assistant_model.to(device);
119
 
120
- from transformers import pipeline
121
 
 
122
  pipe = pipeline(
123
  "automatic-speech-recognition",
124
  model=model,
 
85
  ```bash
86
  import torch
87
  from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor
88
+ from transformers import pipeline
89
 
90
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
91
  torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
92
 
93
+ # load dataset
94
  dataset = load_dataset("bond005/sberdevices_golos_10h_crowd", split="validation", token=True)
95
 
96
+ # load model
97
  model_id = "Val123val/ru_whisper_small"
98
 
99
  model = AutoModelForSpeechSeq2Seq.from_pretrained(
 
107
 
108
  processor = AutoProcessor.from_pretrained(model_id)
109
 
110
+ # load assistant model
111
  assistant_model_id = "openai/whisper-tiny"
112
 
113
  assistant_model = AutoModelForSpeechSeq2Seq.from_pretrained(
 
120
 
121
  assistant_model.to(device);
122
 
 
123
 
124
+ # make pipe
125
  pipe = pipeline(
126
  "automatic-speech-recognition",
127
  model=model,