tanmaylaud commited on
Commit
aafdd3b
1 Parent(s): 863a64d

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +5 -5
README.md CHANGED
@@ -43,8 +43,8 @@ from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
43
 
44
  # test_data = #TODO: WRITE YOUR CODE TO LOAD THE TEST DATASET. For sample see the Colab link in Training Section.
45
 
46
- processor = Wav2Vec2Processor.from_pretrained("gchhablani/wav2vec2-large-xlsr-mr-3")
47
- model = Wav2Vec2ForCTC.from_pretrained("gchhablani/wav2vec2-large-xlsr-mr-3")
48
 
49
  # Preprocessing the datasets.
50
  # We need to read the audio files as arrays
@@ -77,11 +77,11 @@ import re
77
  # test_data = #TODO: WRITE YOUR CODE TO LOAD THE TEST DATASET. For sample see the Colab link in Training Section.
78
 
79
  wer = load_metric("wer")
80
- processor = Wav2Vec2Processor.from_pretrained("gchhablani/wav2vec2-large-xlsr-mr-3")
81
- model = Wav2Vec2ForCTC.from_pretrained("gchhablani/wav2vec2-large-xlsr-mr-3")
82
  model.to("cuda")
83
 
84
- chars_to_ignore_regex = '[\\,\\?\\.\\!\\-\\;\\:\\"\\“\\%\\‘\\”\\�\\–\\…]'
85
 
86
 
87
  # Preprocessing the datasets.
 
43
 
44
  # test_data = #TODO: WRITE YOUR CODE TO LOAD THE TEST DATASET. For sample see the Colab link in Training Section.
45
 
46
+ processor = Wav2Vec2Processor.from_pretrained("tanmaylaud/wav2vec2-large-xlsr-hindi-marathi")
47
+ model = Wav2Vec2ForCTC.from_pretrained("tanmaylaud/wav2vec2-large-xlsr-hindi-marathi")
48
 
49
  # Preprocessing the datasets.
50
  # We need to read the audio files as arrays
 
77
  # test_data = #TODO: WRITE YOUR CODE TO LOAD THE TEST DATASET. For sample see the Colab link in Training Section.
78
 
79
  wer = load_metric("wer")
80
+ processor = Wav2Vec2Processor.from_pretrained("tanmaylaud/wav2vec2-large-xlsr-hindi-marathi")
81
+ model = Wav2Vec2ForCTC.from_pretrained("tanmaylaud/wav2vec2-large-xlsr-hindi-marathi")
82
  model.to("cuda")
83
 
84
+ chars_to_ignore_regex = '[\\\\,\\\\?\\\\.\\\\!\\\\-\\\\;\\\\:\\\\"\\\\“\\\\%\\\\‘\\\\”\\\\�\\\\–\\\\…]'
85
 
86
 
87
  # Preprocessing the datasets.