Vishnu-add commited on
Commit
334d55e
1 Parent(s): d16d417

Upload 36 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,9 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ Samples/emp2.wav filter=lfs diff=lfs merge=lfs -text
37
+ Samples/Montreal[[:space:]]Vacation[[:space:]]Travel[[:space:]]Guide[[:space:]][[:space:]]Expedia.mp3 filter=lfs diff=lfs merge=lfs -text
38
+ Samples/Que[[:space:]]es[[:space:]]TED[[:space:]]y[[:space:]]TEDx.mp3 filter=lfs diff=lfs merge=lfs -text
39
+ Samples/test_mixture.wav filter=lfs diff=lfs merge=lfs -text
40
+ Samples/Gujarati_2.wav filter=lfs diff=lfs merge=lfs -text
41
+ Samples/Telugu_2.wav filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,13 +1,12 @@
1
  ---
2
  title: Meta Mms ASR
3
- emoji: 👀
4
- colorFrom: blue
5
- colorTo: green
6
  sdk: gradio
7
- sdk_version: 4.1.1
8
  app_file: app.py
9
  pinned: false
10
- license: mit
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
  title: Meta Mms ASR
3
+ emoji: 📚
4
+ colorFrom: yellow
5
+ colorTo: pink
6
  sdk: gradio
7
+ sdk_version: 4.0.2
8
  app_file: app.py
9
  pinned: false
 
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
Samples/3.mp3 ADDED
Binary file (36.8 kB). View file
 
Samples/Bengali_1.wav ADDED
Binary file (129 kB). View file
 
Samples/Bengali_2.wav ADDED
Binary file (129 kB). View file
 
Samples/Gujarati_1.wav ADDED
Binary file (434 kB). View file
 
Samples/Gujarati_2.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d087ac4bc15ddccecbbded4c8fdd2c9501bd8e08c6550f9c03ec46bd2df64da
3
+ size 1597484
Samples/Hindi_1.mp3 ADDED
Binary file (36.5 kB). View file
 
Samples/Hindi_2.mp3 ADDED
Binary file (24.6 kB). View file
 
Samples/Hindi_3.mp3 ADDED
Binary file (39.8 kB). View file
 
Samples/Hindi_4.mp3 ADDED
Binary file (49.5 kB). View file
 
Samples/Hindi_5.mp3 ADDED
Binary file (52.5 kB). View file
 
Samples/Malayalam_1.wav ADDED
Binary file (762 kB). View file
 
Samples/Malayalam_2.wav ADDED
Binary file (729 kB). View file
 
Samples/Malayalam_3.wav ADDED
Binary file (555 kB). View file
 
Samples/Marathi_1.mp3 ADDED
Binary file (30.3 kB). View file
 
Samples/Marathi_2.mp3 ADDED
Binary file (55.1 kB). View file
 
Samples/Marathi_3.mp3 ADDED
Binary file (60.5 kB). View file
 
Samples/Montreal Vacation Travel Guide Expedia.mp3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc5148bfd0aca841584c6120c9b02c4c3d9aada2e9b015827a7869803b6471cb
3
+ size 8058808
Samples/Nepal_1.mp3 ADDED
Binary file (12.8 kB). View file
 
Samples/Nepal_2.mp3 ADDED
Binary file (31.4 kB). View file
 
Samples/Nepal_3.mp3 ADDED
Binary file (30.3 kB). View file
 
Samples/Que es TED y TEDx.mp3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:120a8f591bec8715dea618a14bf0538ec03444aaf875994bac0d8a1506f2ffdc
3
+ size 1852114
Samples/Tamil_1.mp3 ADDED
Binary file (57.9 kB). View file
 
Samples/Tamil_2.mp3 ADDED
Binary file (49.3 kB). View file
 
Samples/Telugu_1.wav ADDED
Binary file (500 kB). View file
 
Samples/Telugu_2.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ccee4c1d338d4d6fc96dfca5f48ecac3d9b9517e44f0302037fe46304e9e76b0
3
+ size 1122348
Samples/Telugu_3.wav ADDED
Binary file (950 kB). View file
 
Samples/climate ex short.wav ADDED
Binary file (308 kB). View file
 
Samples/emp2.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:133ac69ac8a7d99f1fe92b7dbd810d7f4e44cd4f282179aa8c5d83183e64ad61
3
+ size 1122066
Samples/ted_short.wav ADDED
Binary file (765 kB). View file
 
Samples/test_mixture.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7126269f058724336d28bf6e82fa14b7030de321dbafa48c9fa20884d35dab9d
3
+ size 1546250
Samples/test_mixture1.wav ADDED
Binary file (603 kB). View file
 
app.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from asr import transcribe,detect_language,transcribe_lang
3
+
4
+ demo = gr.Interface(transcribe,
5
+ inputs = "microphone",
6
+ # gr.Audio(sources=["microphone"]),
7
+ outputs=["text","text"],
8
+ examples=["./Samples/Hindi_1.mp3","./Samples/Hindi_2.mp3","./Samples/Tamil_1.mp3","./Samples/Tamil_2.mp3","./Samples/Marathi_1.mp3","./Samples/Marathi_2.mp3","./Samples/Nepal_1.mp3","./Samples/Nepal_2.mp3","./Samples/Telugu_1.wav","./Samples/Telugu_2.wav","./Samples/Malayalam_1.wav","./Samples/Malayalam_2.wav","./Samples/Gujarati_1.wav","./Samples/Gujarati_2.wav","./Samples/Bengali_1.wav","./Samples/Bengali_2.wav"]
9
+ )
10
+ demo2 = gr.Interface(detect_language,
11
+ inputs = "microphone",
12
+ # gr.Audio(sources=["microphone"]),
13
+ outputs=["text","text"],
14
+ examples=["./Samples/Hindi_1.mp3","./Samples/Hindi_2.mp3","./Samples/Tamil_1.mp3","./Samples/Tamil_2.mp3","./Samples/Marathi_1.mp3","./Samples/Marathi_2.mp3","./Samples/Nepal_1.mp3","./Samples/Nepal_2.mp3","./Samples/Telugu_1.wav","./Samples/Telugu_2.wav","./Samples/Malayalam_1.wav","./Samples/Malayalam_2.wav","./Samples/Gujarati_1.wav","./Samples/Gujarati_2.wav","./Samples/Bengali_1.wav","./Samples/Bengali_2.wav"]
15
+ )
16
+ demo3 = gr.Interface(transcribe_lang,
17
+ inputs = ["microphone",gr.Radio([("Hindi","hin"),("Bengali","ben"),("Odia","ory"),("Gujarati","guj"),("Telugu","tel"),("Tamil","tam"),("Marathi","mar"),("English","eng")],value="hindi")],
18
+ # gr.Audio(sources=["microphone"]),
19
+ outputs=["text","text"],
20
+ examples=[["./Samples/Hindi_1.mp3","hin"],["./Samples/Hindi_2.mp3","hin"],["./Samples/Hindi_3.mp3","hin"],["./Samples/Hindi_4.mp3","hin"],["./Samples/Hindi_5.mp3","hin"],["./Samples/Tamil_1.mp3","tam"],["./Samples/Tamil_2.mp3","tam"],["./Samples/Marathi_1.mp3","mar"],["./Samples/Marathi_2.mp3","mar"],["./Samples/Telugu_1.wav","tel"],["./Samples/Telugu_2.wav","tel"],["./Samples/Malayalam_1.wav","mal"],["./Samples/Malayalam_2.wav","mal"],["./Samples/Gujarati_1.wav","guj"],["./Samples/Gujarati_2.wav","guj"],["./Samples/Bengali_1.wav","ben"],["./Samples/Bengali_2.wav","ben"],["./Samples/climate ex short.wav","eng"],["./Samples/emp2.wav","eng"]]
21
+ )
22
+
23
+
24
+ tabbed_interface = gr.TabbedInterface([demo,demo2,demo3],["Transcribe by auto detecting language","Detect language","Transcribe by providing language"])
25
+
26
+ with gr.Blocks() as asr:
27
+ tabbed_interface.render()
28
+ asr.launch()
asr.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import Wav2Vec2ForCTC, AutoProcessor
2
+ import torch
3
+ from transformers import Wav2Vec2ForSequenceClassification, AutoFeatureExtractor
4
+ import time
5
+ import gradio as gr
6
+ import librosa
7
+ import numpy as np
8
+
9
+
10
+ model_id = "facebook/mms-1b-all"
11
+ processor = AutoProcessor.from_pretrained(model_id)
12
+ model = Wav2Vec2ForCTC.from_pretrained(model_id)
13
+
14
+ model_id_lid = "facebook/mms-lid-126"
15
+ processor_lid = AutoFeatureExtractor.from_pretrained(model_id_lid)
16
+ model_lid = Wav2Vec2ForSequenceClassification.from_pretrained(model_id_lid)
17
+
18
+ def resample_to_16k(audio, orig_sr):
19
+ y_resampled = librosa.resample(y=audio, orig_sr=orig_sr, target_sr = 16000)
20
+ return y_resampled
21
+
22
+
23
+ def transcribe(audio):
24
+ print(audio)
25
+ # audio = librosa.load(audio, sr=16_000, mono=True)[0]
26
+ # print("After loading: ",audio)
27
+ sr,y = audio
28
+ y = y.astype(np.float32)
29
+ y /= np.max(np.abs(y))
30
+ y_resampled = resample_to_16k(y, sr)
31
+ print("Without using librosa to load:",y_resampled)
32
+ # inputs = processor(audio, sampling_rate=16_000,return_tensors="pt")
33
+ inputs = processor(y_resampled, sampling_rate=16_000,return_tensors="pt")
34
+ with torch.no_grad():
35
+ tr_start_time = time.time()
36
+ outputs = model(**inputs).logits
37
+ tr_end_time = time.time()
38
+ ids = torch.argmax(outputs, dim=-1)[0]
39
+ transcription = processor.decode(ids)
40
+ return transcription,(tr_end_time-tr_start_time)
41
+
42
+
43
+ def detect_language(audio):
44
+ print(audio)
45
+ # audio = librosa.load(audio, sr=16_000, mono=True)[0]
46
+ sr,y = audio
47
+ y = y.astype(np.float32)
48
+ y /= np.max(np.abs(y))
49
+ y_resampled = resample_to_16k(y, sr)
50
+ print("Without using librosa to load:",y_resampled)
51
+ # inputs = processor(audio, sampling_rate=16_000,return_tensors="pt")
52
+ inputs = processor(y_resampled, sampling_rate=16_000,return_tensors="pt")
53
+ # print(audio)
54
+ # inputs_lid = processor_lid(audio, sampling_rate=16_000, return_tensors="pt")
55
+ with torch.no_grad():
56
+ start_time = time.time()
57
+ outputs_lid = model_lid(**inputs).logits
58
+ end_time = time.time()
59
+ # print(end_time-start_time," sec")
60
+ lang_id = torch.argmax(outputs_lid, dim=-1)[0].item()
61
+ detected_lang = model_lid.config.id2label[lang_id]
62
+ print(detected_lang)
63
+ return detected_lang, (end_time-start_time)
64
+
65
+
66
+ def transcribe_lang(audio,lang):
67
+ # audio = librosa.load(audio, sr=16_000, mono=True)[0]
68
+ sr,y = audio
69
+ y = y.astype(np.float32)
70
+ y /= np.max(np.abs(y))
71
+ y_resampled = resample_to_16k(y, sr)
72
+ print("Without using librosa to load:",y_resampled)
73
+ processor.tokenizer.set_target_lang(lang)
74
+ model.load_adapter(lang)
75
+ print(lang)
76
+ # inputs = processor(audio, sampling_rate=16_000,return_tensors="pt")
77
+ inputs = processor(y_resampled, sampling_rate=16_000,return_tensors="pt")
78
+ with torch.no_grad():
79
+ tr_start_time = time.time()
80
+ outputs = model(**inputs).logits
81
+ tr_end_time = time.time()
82
+ ids = torch.argmax(outputs, dim=-1)[0]
83
+ transcription = processor.decode(ids)
84
+ return transcription,(tr_end_time-tr_start_time)
85
+
86
+
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ torch
2
+ accelerate
3
+ torchaudio
4
+ datasets
5
+ transformers
6
+ librosa