asahi417 commited on
Commit
e969db4
1 Parent(s): ca97597

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +1 -1
README.md CHANGED
@@ -352,7 +352,7 @@ def inference(batch):
352
  input_features = processor(audio, sampling_rate=batch["audio"][0]["sampling_rate"], return_tensors="pt").input_features
353
  input_features = input_features.to(device, dtype=torch_dtype)
354
  # 2. Auto-regressively generate the predicted token ids
355
- pred_ids = model.generate(input_features, max_new_tokens=128)
356
  # 3. Decode the token ids to the final transcription
357
  batch["transcription"] = processor.batch_decode(pred_ids, skip_special_tokens=True)
358
  batch["reference"] = batch[text_column]
 
352
  input_features = processor(audio, sampling_rate=batch["audio"][0]["sampling_rate"], return_tensors="pt").input_features
353
  input_features = input_features.to(device, dtype=torch_dtype)
354
  # 2. Auto-regressively generate the predicted token ids
355
+ pred_ids = model.generate(input_features, language="ja", max_new_tokens=128)
356
  # 3. Decode the token ids to the final transcription
357
  batch["transcription"] = processor.batch_decode(pred_ids, skip_special_tokens=True)
358
  batch["reference"] = batch[text_column]