Zeimoto commited on
Commit
fa6f424
1 Parent(s): 071265e

add model specific files

Browse files
Files changed (3) hide show
  1. app.py +1 -1
  2. nameder.py +23 -0
  3. speech2text.py +49 -0
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import streamlit as st
2
  from st_audiorec import st_audiorec
3
 
4
- from ner import init_model_ner, get_entity_labels
5
  from speech2text import init_model_trans, transcribe
6
  from resources import audit_elapsedtime, set_start
7
 
 
1
  import streamlit as st
2
  from st_audiorec import st_audiorec
3
 
4
+ from nameder import init_model_ner, get_entity_labels
5
  from speech2text import init_model_trans, transcribe
6
  from resources import audit_elapsedtime, set_start
7
 
nameder.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from gliner import GLiNER
2
+ from resources import set_start, audit_elapsedtime, entity_labels
3
+
4
+ #Named-Entity Recognition model
5
+
6
+ def init_model_ner():
7
+ print("Initiating NER model...")
8
+ start = set_start()
9
+ model = GLiNER.from_pretrained("urchade/gliner_multi")
10
+ audit_elapsedtime(function="Initiating NER model", start=start)
11
+ return model
12
+
13
+ def get_entity_labels(model: GLiNER, text: str): #-> Lead_labels:
14
+ print("Initiating entity recognition...")
15
+ start = set_start()
16
+
17
+ labels = entity_labels
18
+ entities = model.predict_entities(text, labels)
19
+ audit_elapsedtime(function="Retreiving entity labels from text", start=start)
20
+
21
+ for entity in entities:
22
+ print(entity["text"], "=>", entity["label"])
23
+ return entities
speech2text.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline
2
+ import torch
3
+ from resources import set_start, audit_elapsedtime
4
+
5
+ #Speech to text transcription model
6
+
7
+ def init_model_trans ():
8
+ print("Initiating transcription model...")
9
+ start = set_start()
10
+
11
+ device = "cuda:0" if torch.cuda.is_available() else "cpu"
12
+ torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
13
+
14
+ model_id = "openai/whisper-large-v3"
15
+
16
+ model = AutoModelForSpeechSeq2Seq.from_pretrained(
17
+ model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True
18
+ )
19
+ model.to(device)
20
+
21
+ processor = AutoProcessor.from_pretrained(model_id)
22
+
23
+ pipe = pipeline(
24
+ "automatic-speech-recognition",
25
+ model=model,
26
+ tokenizer=processor.tokenizer,
27
+ feature_extractor=processor.feature_extractor,
28
+ max_new_tokens=128,
29
+ chunk_length_s=30,
30
+ batch_size=16,
31
+ return_timestamps=True,
32
+ torch_dtype=torch_dtype,
33
+ device=device,
34
+ )
35
+ print(f'Init model successful')
36
+ audit_elapsedtime(function="Initiating transcription model", start=start)
37
+ return pipe
38
+
39
+ def transcribe (audio_sample: bytes, pipe) -> str:
40
+ print("Initiating transcription...")
41
+ start = set_start()
42
+ # dataset = load_dataset("distil-whisper/librispeech_long", "clean", split="validation")
43
+ # sample = dataset[0]["audio"]
44
+ result = pipe(audio_sample)
45
+ audit_elapsedtime(function="Transcription", start=start)
46
+ print(result)
47
+
48
+ st.write('trancription: ', result["text"])
49
+ return result["text"]