k1ngtai commited on
Commit
3c0f7d1
1 Parent(s): 723cd11

Create lid.py

Browse files
Files changed (1) hide show
  1. lid.py +73 -0
lid.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import Wav2Vec2ForSequenceClassification, AutoFeatureExtractor
2
+ import torch
3
+ import librosa
4
+
5
+ model_id = "facebook/mms-lid-1024"
6
+
7
+ processor = AutoFeatureExtractor.from_pretrained(model_id)
8
+ model = Wav2Vec2ForSequenceClassification.from_pretrained(model_id)
9
+
10
+
11
+ LID_SAMPLING_RATE = 16_000
12
+ LID_TOPK = 10
13
+ LID_THRESHOLD = 0.33
14
+
15
+ LID_LANGUAGES = {}
16
+ with open(f"data/lid/all_langs.tsv") as f:
17
+ for line in f:
18
+ iso, name = line.split(" ", 1)
19
+ LID_LANGUAGES[iso] = name
20
+
21
+
22
+ def identify(audio_source=None, microphone=None, file_upload=None):
23
+ if audio_source is None and microphone is None and file_upload is None:
24
+ # HACK: need to handle this case for some reason
25
+ return {}
26
+
27
+ if type(microphone) is dict:
28
+ # HACK: microphone variable is a dict when running on examples
29
+ microphone = microphone["name"]
30
+ audio_fp = (
31
+ file_upload if "upload" in str(audio_source or "").lower() else microphone
32
+ )
33
+ if audio_fp is None:
34
+ return "ERROR: You have to either use the microphone or upload an audio file"
35
+
36
+ audio_samples = librosa.load(audio_fp, sr=LID_SAMPLING_RATE, mono=True)[0]
37
+
38
+ inputs = processor(
39
+ audio_samples, sampling_rate=LID_SAMPLING_RATE, return_tensors="pt"
40
+ )
41
+
42
+ # set device
43
+ if torch.cuda.is_available():
44
+ device = torch.device("cuda")
45
+ elif (
46
+ hasattr(torch.backends, "mps")
47
+ and torch.backends.mps.is_available()
48
+ and torch.backends.mps.is_built()
49
+ ):
50
+ device = torch.device("mps")
51
+ else:
52
+ device = torch.device("cpu")
53
+
54
+ model.to(device)
55
+ inputs = inputs.to(device)
56
+
57
+ with torch.no_grad():
58
+ logit = model(**inputs).logits
59
+
60
+ logit_lsm = torch.log_softmax(logit.squeeze(), dim=-1)
61
+ scores, indices = torch.topk(logit_lsm, 5, dim=-1)
62
+ scores, indices = torch.exp(scores).to("cpu").tolist(), indices.to("cpu").tolist()
63
+ iso2score = {model.config.id2label[int(i)]: s for s, i in zip(scores, indices)}
64
+ if max(iso2score.values()) < LID_THRESHOLD:
65
+ return "Low confidence in the language identification predictions. Output is not shown!"
66
+ return {LID_LANGUAGES[iso]: score for iso, score in iso2score.items()}
67
+
68
+
69
+ LID_EXAMPLES = [
70
+ [None, "./assets/english.mp3", None],
71
+ [None, "./assets/tamil.mp3", None],
72
+ [None, "./assets/burmese.mp3", None],
73
+ ]