zeimoto commited on
Commit
8786bb1
1 Parent(s): b9bcd3f
__pycache__/nameder.cpython-312.pyc ADDED
Binary file (4.18 kB). View file
 
__pycache__/resources.cpython-312.pyc ADDED
Binary file (1.9 kB). View file
 
__pycache__/speech2text.cpython-312.pyc ADDED
Binary file (2.1 kB). View file
 
__pycache__/translation.cpython-312.pyc ADDED
Binary file (1.28 kB). View file
 
app.py CHANGED
@@ -3,55 +3,76 @@ import json
3
  from nameder import init_model_ner, get_entity_results
4
  from speech2text import init_model_trans, transcribe
5
  from translation import translate
6
- from resources import NER_Response, NER_Request, entity_labels_sample, set_start, audit_elapsedtime
7
-
 
8
 
9
  def translation_to_english(text: str):
10
  resultado = translate(text)
11
  return resultado
12
 
13
- def transcription(audio: bytes):
14
 
15
  s2t = init_model_trans()
16
- return transcribe(audio, s2t)
 
 
 
17
 
18
- def named_entity_recognition(text: str):
19
- tokenizer, ner = init_model_ner()
20
- # print('NER:',ner)
21
  result = get_entity_results(entities_list=entity_labels_sample,
22
  model=ner,
23
- tokenizer=tokenizer,
24
- text=text)
25
  print('result:',result,type(result))
26
- return result
27
 
28
- def get_lead(audio: bytes):
 
 
 
29
  start = set_start()
30
- transcribe = transcription(audio)
31
- translate = translation_to_english(transcribe)
 
 
 
 
 
32
  ner = named_entity_recognition(NER_Request(
33
- entities=entity_labels_sample,
34
- text=translate
35
  ))
36
  audit_elapsedtime("VoiceLead", start)
37
  return ner
38
 
39
- audio_input = gr.Microphone(
40
  label="Record your audio"
41
  )
 
 
 
 
 
 
 
 
 
 
 
42
  text_output = gr.Textbox(
43
  label="Labels",
44
  info="",
45
  lines=9,
46
  value=""
47
  )
48
- demo = gr.Interface(
49
- fn=named_entity_recognition,
50
- description= "Get the ",
51
- inputs=[audio_input],
52
  outputs=[text_output],
53
  title="VoiceLead"
54
  )
55
 
56
  if __name__ == "__main__":
57
- demo.launch()
 
3
  from nameder import init_model_ner, get_entity_results
4
  from speech2text import init_model_trans, transcribe
5
  from translation import translate
6
+ from resources import NER_Request, entity_labels_sample, set_start, audit_elapsedtime
7
+ import ast
8
+ import numpy as np
9
 
10
  def translation_to_english(text: str):
11
  resultado = translate(text)
12
  return resultado
13
 
14
+ def transcription(audio):
15
 
16
  s2t = init_model_trans()
17
+ sr, y = audio
18
+ y = y.astype(np.float32)
19
+ y /= np.max(np.abs(y))
20
+ return transcribe({"sampling_rate": sr, "raw": y}, s2t)
21
 
22
+ def named_entity_recognition(req: NER_Request):
23
+ ner = init_model_ner()
 
24
  result = get_entity_results(entities_list=entity_labels_sample,
25
  model=ner,
26
+ text=req.text)
 
27
  print('result:',result,type(result))
28
+ return json.dumps(result)
29
 
30
+ def get_lead(audio: bytes, labels: str, input_text: str):
31
+ print("audio",audio,type(audio))
32
+ print("input text:",input_text)
33
+ print("labels:2",labels)
34
  start = set_start()
35
+ labels_list = ast.literal_eval(labels)
36
+ if audio == None:
37
+ text = input_text
38
+ else:
39
+ transcribe = transcription(audio)
40
+ text = transcribe#translate = translation_to_english(transcribe)
41
+ lead_input.value = text
42
  ner = named_entity_recognition(NER_Request(
43
+ entities=labels_list,
44
+ text=text
45
  ))
46
  audit_elapsedtime("VoiceLead", start)
47
  return ner
48
 
49
+ audio_input = gr.Audio(
50
  label="Record your audio"
51
  )
52
+ labels_input = gr.Textbox(
53
+ label="Labels",
54
+ info="Choose your labels",
55
+ value=entity_labels_sample
56
+ )
57
+ lead_input = gr.Textbox(
58
+ label="Lead",
59
+ info="[Optional] Input your lead",
60
+ lines=9,
61
+ value="I have a lead that Salesforce needs 3 developers for 600 euros a day, for 6 months"
62
+ )
63
  text_output = gr.Textbox(
64
  label="Labels",
65
  info="",
66
  lines=9,
67
  value=""
68
  )
69
+ ui = gr.Interface(
70
+ fn=get_lead,
71
+ description= "Voice your lead",
72
+ inputs=[audio_input, labels_input, lead_input],
73
  outputs=[text_output],
74
  title="VoiceLead"
75
  )
76
 
77
  if __name__ == "__main__":
78
+ ui.launch(share=True)
flagged/log.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ Record your audio,Labels,Lead,Labels,flag,username,timestamp
2
+ ,"['team', 'developer', 'technology', 'tool', 'amount', 'duration', 'capacity', 'company', 'currency']","I have a lead that Salesforce needs 3 developers for 600 euros a day, for 6 months","{""team"": """", ""developer"": ""3 developers"", ""technology"": """", ""tool"": """", ""amount"": """", ""duration"": ""6 months"", ""capacity"": """", ""company"": ""Salesforce"", ""currency"": ""600 euros""}",,,2024-05-08 12:26:24.114688
nameder.py CHANGED
@@ -2,12 +2,37 @@ from typing import List
2
  from resources import set_start, audit_elapsedtime, entities_list_to_dict
3
  from transformers import BertTokenizer, BertForTokenClassification
4
  import torch
 
 
5
 
6
  #Named-Entity Recognition model
7
 
8
  def init_model_ner():
9
  print("Initiating NER model...")
10
  start = set_start()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
  # Load pre-trained tokenizer and model
13
  tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
@@ -16,33 +41,38 @@ def init_model_ner():
16
  audit_elapsedtime(function="Initiating NER model", start=start)
17
  return tokenizer, model
18
 
19
- def get_entity_results(tokenizer, model, text: str, entities_list: List[str]): #-> Lead_labels:
20
  print("Initiating entity recognition...")
21
  start = set_start()
22
  tokens = tokenizer.tokenize(tokenizer.decode(tokenizer.encode(text)))
23
- labels = entities_list
24
-
25
  # Convert tokens to IDs
26
  input_ids = tokenizer.encode(text, return_tensors="pt")
27
-
28
  # Perform NER prediction
29
  with torch.no_grad():
30
  outputs = model(input_ids)
 
31
 
32
  # Get the predicted labels
33
  predicted_labels = torch.argmax(outputs.logits, dim=2)[0]
 
34
 
35
  # Map predicted labels to actual entities
36
  entities = []
37
  current_entity = ""
38
  for i, label_id in enumerate(predicted_labels):
39
  label = model.config.id2label[label_id.item()]
 
40
  token = tokens[i]
41
  if label.startswith('B-'): # Beginning of a new entity
 
42
  if current_entity:
43
  entities.append(current_entity.strip())
44
  current_entity = token
45
  elif label.startswith('I-'): # Inside of an entity
 
46
  current_entity += " " + token
47
  else: # Outside of any entity
48
  if current_entity:
@@ -51,6 +81,7 @@ def get_entity_results(tokenizer, model, text: str, entities_list: List[str]): #
51
 
52
  # Filter out only the entities you are interested in
53
  filtered_entities = [entity for entity in entities if entity in labels]
 
54
  # entities_result = model.predict_entities(text, labels)
55
 
56
  # entities_dict = entities_list_to_dict(entities_list)
 
2
  from resources import set_start, audit_elapsedtime, entities_list_to_dict
3
  from transformers import BertTokenizer, BertForTokenClassification
4
  import torch
5
+ from gliner import GLiNER
6
+
7
 
8
  #Named-Entity Recognition model
9
 
10
  def init_model_ner():
11
  print("Initiating NER model...")
12
  start = set_start()
13
+ model = GLiNER.from_pretrained("urchade/gliner_multi")
14
+ audit_elapsedtime(function="Initiating NER model", start=start)
15
+ return model
16
+
17
+ def get_entity_results(model: GLiNER, text: str, entities_list: List[str]): #-> Lead_labels:
18
+ print("Initiating entity recognition...")
19
+ start = set_start()
20
+
21
+ labels = entities_list
22
+
23
+ entities_result = model.predict_entities(text, labels)
24
+
25
+ entities_dict = entities_list_to_dict(entities_list)
26
+ for entity in entities_result:
27
+ print(entity["label"], "=>", entity["text"])
28
+ entities_dict[entity["label"]] = entity["text"]
29
+
30
+ audit_elapsedtime(function="Retreiving entity labels from text", start=start)
31
+ return entities_dict
32
+
33
+ def init_model_ner_v2():
34
+ print("Initiating NER model...")
35
+ start = set_start()
36
 
37
  # Load pre-trained tokenizer and model
38
  tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
 
41
  audit_elapsedtime(function="Initiating NER model", start=start)
42
  return tokenizer, model
43
 
44
+ def get_entity_results_v2(tokenizer, model, text: str, entities_list: List[str]): #-> Lead_labels:
45
  print("Initiating entity recognition...")
46
  start = set_start()
47
  tokens = tokenizer.tokenize(tokenizer.decode(tokenizer.encode(text)))
48
+ labels = entities_list#["Apple Inc.", "American", "Cupertino", "California"]#entities_list
49
+ print("tokens line 24:",tokens)
50
  # Convert tokens to IDs
51
  input_ids = tokenizer.encode(text, return_tensors="pt")
52
+ print("input_ids line 27:",input_ids)
53
  # Perform NER prediction
54
  with torch.no_grad():
55
  outputs = model(input_ids)
56
+ print("outputs line 31:",outputs)
57
 
58
  # Get the predicted labels
59
  predicted_labels = torch.argmax(outputs.logits, dim=2)[0]
60
+ print("predicted_labels line 35:",predicted_labels)
61
 
62
  # Map predicted labels to actual entities
63
  entities = []
64
  current_entity = ""
65
  for i, label_id in enumerate(predicted_labels):
66
  label = model.config.id2label[label_id.item()]
67
+ print(f"i[{i}], label[{label}], label_id[{label_id}]")
68
  token = tokens[i]
69
  if label.startswith('B-'): # Beginning of a new entity
70
+ print(token)
71
  if current_entity:
72
  entities.append(current_entity.strip())
73
  current_entity = token
74
  elif label.startswith('I-'): # Inside of an entity
75
+ print(token)
76
  current_entity += " " + token
77
  else: # Outside of any entity
78
  if current_entity:
 
81
 
82
  # Filter out only the entities you are interested in
83
  filtered_entities = [entity for entity in entities if entity in labels]
84
+ print("filtered_entities line 56:",filtered_entities)
85
  # entities_result = model.predict_entities(text, labels)
86
 
87
  # entities_dict = entities_list_to_dict(entities_list)
requirements.txt CHANGED
@@ -1,21 +1,33 @@
 
1
  aiofiles==23.2.1
2
  altair==5.3.0
3
  annotated-types==0.6.0
4
  anyio==4.3.0
5
  attrs==23.2.0
 
 
 
 
6
  certifi==2024.2.2
7
  charset-normalizer==3.3.2
8
  click==8.1.7
 
9
  contourpy==1.2.1
10
  cycler==0.12.1
 
11
  dnspython==2.6.1
12
  email_validator==2.1.1
13
  fastapi==0.111.0
14
  fastapi-cli==0.0.2
15
  ffmpy==0.3.2
16
  filelock==3.14.0
 
17
  fonttools==4.51.0
18
  fsspec==2024.3.1
 
 
 
 
19
  gradio==4.29.0
20
  gradio_client==0.16.1
21
  h11==0.14.0
@@ -25,14 +37,21 @@ httpx==0.27.0
25
  huggingface-hub==0.23.0
26
  idna==3.7
27
  importlib_resources==6.4.0
 
28
  Jinja2==3.1.4
 
 
29
  jsonschema==4.22.0
30
  jsonschema-specifications==2023.12.1
31
  kiwisolver==1.4.5
 
 
32
  markdown-it-py==3.0.0
33
  MarkupSafe==2.1.5
34
  matplotlib==3.8.4
35
  mdurl==0.1.2
 
 
36
  mpmath==1.3.0
37
  networkx==3.3
38
  numpy==1.26.4
@@ -40,14 +59,19 @@ orjson==3.10.3
40
  packaging==24.0
41
  pandas==2.2.2
42
  pillow==10.3.0
 
 
 
43
  pydantic==2.7.1
44
  pydantic_core==2.18.2
45
  pydub==0.25.1
46
  Pygments==2.18.0
47
  pyparsing==3.1.2
 
48
  python-dateutil==2.9.0.post0
49
  python-dotenv==1.0.1
50
  python-multipart==0.0.9
 
51
  pytz==2024.1
52
  PyYAML==6.0.1
53
  referencing==0.35.1
@@ -56,25 +80,42 @@ requests==2.31.0
56
  rich==13.7.1
57
  rpds-py==0.18.1
58
  ruff==0.4.3
 
59
  safetensors==0.4.3
 
 
 
60
  semantic-version==2.10.0
 
 
 
61
  shellingham==1.5.4
62
  six==1.16.0
 
63
  sniffio==1.3.1
 
 
64
  starlette==0.37.2
65
  sympy==1.12
 
 
66
  tokenizers==0.19.1
67
  tomlkit==0.12.0
68
  toolz==0.12.1
69
  torch==2.3.0
 
70
  tqdm==4.66.4
 
71
  transformers==4.40.2
72
  typer==0.12.3
73
  typing_extensions==4.11.0
74
  tzdata==2024.1
75
  ujson==5.9.0
76
- urllib3==2.2.1
77
  uvicorn==0.29.0
78
  uvloop==0.19.0
79
  watchfiles==0.21.0
 
80
  websockets==11.0.3
 
 
 
1
+ accelerate==0.30.0
2
  aiofiles==23.2.1
3
  altair==5.3.0
4
  annotated-types==0.6.0
5
  anyio==4.3.0
6
  attrs==23.2.0
7
+ beautifulsoup4==4.12.3
8
+ boto3==1.34.100
9
+ botocore==1.34.100
10
+ bpemb==0.3.5
11
  certifi==2024.2.2
12
  charset-normalizer==3.3.2
13
  click==8.1.7
14
+ conllu==4.5.3
15
  contourpy==1.2.1
16
  cycler==0.12.1
17
+ Deprecated==1.2.14
18
  dnspython==2.6.1
19
  email_validator==2.1.1
20
  fastapi==0.111.0
21
  fastapi-cli==0.0.2
22
  ffmpy==0.3.2
23
  filelock==3.14.0
24
+ flair==0.13.1
25
  fonttools==4.51.0
26
  fsspec==2024.3.1
27
+ ftfy==6.2.0
28
+ gdown==5.1.0
29
+ gensim==4.3.2
30
+ gliner==0.1.12
31
  gradio==4.29.0
32
  gradio_client==0.16.1
33
  h11==0.14.0
 
37
  huggingface-hub==0.23.0
38
  idna==3.7
39
  importlib_resources==6.4.0
40
+ Janome==0.5.0
41
  Jinja2==3.1.4
42
+ jmespath==1.0.1
43
+ joblib==1.4.2
44
  jsonschema==4.22.0
45
  jsonschema-specifications==2023.12.1
46
  kiwisolver==1.4.5
47
+ langdetect==1.0.9
48
+ lxml==5.2.1
49
  markdown-it-py==3.0.0
50
  MarkupSafe==2.1.5
51
  matplotlib==3.8.4
52
  mdurl==0.1.2
53
+ more-itertools==10.2.0
54
+ mpld3==0.5.10
55
  mpmath==1.3.0
56
  networkx==3.3
57
  numpy==1.26.4
 
59
  packaging==24.0
60
  pandas==2.2.2
61
  pillow==10.3.0
62
+ pptree==3.1
63
+ protobuf==5.26.1
64
+ psutil==5.9.8
65
  pydantic==2.7.1
66
  pydantic_core==2.18.2
67
  pydub==0.25.1
68
  Pygments==2.18.0
69
  pyparsing==3.1.2
70
+ PySocks==1.7.1
71
  python-dateutil==2.9.0.post0
72
  python-dotenv==1.0.1
73
  python-multipart==0.0.9
74
+ pytorch_revgrad==0.2.0
75
  pytz==2024.1
76
  PyYAML==6.0.1
77
  referencing==0.35.1
 
80
  rich==13.7.1
81
  rpds-py==0.18.1
82
  ruff==0.4.3
83
+ s3transfer==0.10.1
84
  safetensors==0.4.3
85
+ scikit-learn==1.4.2
86
+ scipy==1.12.0
87
+ segtok==1.5.11
88
  semantic-version==2.10.0
89
+ semver==3.0.2
90
+ sentencepiece==0.2.0
91
+ seqeval==1.2.2
92
  shellingham==1.5.4
93
  six==1.16.0
94
+ smart-open==7.0.4
95
  sniffio==1.3.1
96
+ soupsieve==2.5
97
+ sqlitedict==2.1.0
98
  starlette==0.37.2
99
  sympy==1.12
100
+ tabulate==0.9.0
101
+ threadpoolctl==3.5.0
102
  tokenizers==0.19.1
103
  tomlkit==0.12.0
104
  toolz==0.12.1
105
  torch==2.3.0
106
+ torchaudio==2.3.0
107
  tqdm==4.66.4
108
+ transformer-smaller-training-vocab==0.4.0
109
  transformers==4.40.2
110
  typer==0.12.3
111
  typing_extensions==4.11.0
112
  tzdata==2024.1
113
  ujson==5.9.0
114
+ urllib3==1.26.18
115
  uvicorn==0.29.0
116
  uvloop==0.19.0
117
  watchfiles==0.21.0
118
+ wcwidth==0.2.13
119
  websockets==11.0.3
120
+ Wikipedia-API==0.6.0
121
+ wrapt==1.16.0
resources.py CHANGED
@@ -27,7 +27,7 @@ entity_labels_sample = [
27
  ]
28
 
29
  def entities_list_to_dict(entitiesList: List[str]):
30
- return {key: 'string' for key in entitiesList}
31
 
32
  def set_start () -> time:
33
  return time.time()
 
27
  ]
28
 
29
  def entities_list_to_dict(entitiesList: List[str]):
30
+ return {key: '' for key in entitiesList}
31
 
32
  def set_start () -> time:
33
  return time.time()
speech2text.py CHANGED
@@ -36,7 +36,7 @@ def init_model_trans ():
36
  audit_elapsedtime(function="Init transc model", start=start)
37
  return pipe
38
 
39
- def transcribe (audio_sample: bytes, pipe) -> str:
40
  print("Initiating transcription...")
41
  start = set_start()
42
  result = pipe(audio_sample)
 
36
  audit_elapsedtime(function="Init transc model", start=start)
37
  return pipe
38
 
39
+ def transcribe (audio_sample, pipe) -> str:
40
  print("Initiating transcription...")
41
  start = set_start()
42
  result = pipe(audio_sample)