NickyNicky commited on
Commit
94606c9
1 Parent(s): 7644d94

Upload 3 files

Browse files
Files changed (3) hide show
  1. Problema_tarjetaCredito.ogg +0 -0
  2. app.py +87 -0
  3. requirements.txt +7 -0
Problema_tarjetaCredito.ogg ADDED
Binary file (14.5 kB). View file
 
app.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import time
3
+
4
+ import torch
5
+ from peft import PeftModel, PeftConfig
6
+ from transformers import AutoModelForCausalLM, AutoTokenizer, AutoModelForSeq2SeqLM
7
+
8
+ import gradio as gr
9
+ import speech_recognition as sr
10
+ from math import log2, pow
11
+ import os
12
+
13
+ #from scipy.fftpack import fft
14
+ import gc
15
+
16
+ peft_model_id='hackathon-somos-nlp-2023/T5unami-small-v1'
17
+
18
+ config = PeftConfig.from_pretrained(peft_model_id)
19
+ model2 = AutoModelForSeq2SeqLM.from_pretrained(config.base_model_name_or_path, return_dict=True,
20
+ # load_in_8bit=True,
21
+ # load_in_8bit_fp32_cpu_offload=True,
22
+ device_map='auto')
23
+ tokenizer2 = AutoTokenizer.from_pretrained(peft_model_id)
24
+
25
+ model2 = PeftModel.from_pretrained(model2, peft_model_id)
26
+
27
+ Problema_tarjetaCredito= os.path.abspath("Problema_tarjetaCredito.ogg")
28
+ list_audios= [[Problema_tarjetaCredito]]
29
+
30
+ def gen_conversation(text,max_new_tokens=100):
31
+ text = "<SN>instruction: " + text + "\n "
32
+ batch = tokenizer2(text, return_tensors='pt')
33
+
34
+ output_tokens = model2.generate(**batch,
35
+ max_new_tokens=max_new_tokens,
36
+ eos_token_id= tokenizer2.eos_token_id,
37
+ pad_token_id= tokenizer2.pad_token_id,
38
+ bos_token_id= tokenizer2.bos_token_id,
39
+ early_stopping = True,
40
+ no_repeat_ngram_size=2,
41
+ repetition_penalty=1.2,
42
+ temperature=.9,
43
+ num_beams=3
44
+ )
45
+ gc.collect()
46
+ return tokenizer2.decode(output_tokens[0], skip_special_tokens=True).split("\n")[-1].replace("output:","")
47
+
48
+ conversacion = ""
49
+ def speech_to_text(audio_file, texto_adicional):
50
+ global conversacion
51
+ if audio_file is not None:
52
+ # Lógica para entrada de audio
53
+ r = sr.Recognizer()
54
+ audio_data = sr.AudioFile(audio_file)
55
+ with audio_data as source:
56
+ audio = r.record(source)
57
+ text_enrada=""
58
+
59
+ texto_generado = r.recognize_google(audio, language="es-ES")
60
+ texto_generado= f"[|Audio a texto|]:{texto_generado}\n" + "<br>[AGENTE]:"+gen_conversation(texto_generado,max_new_tokens=500)
61
+ texto_generado = "<div style='color: #66b3ff;'>" + texto_generado + "</div><br>"
62
+ else:
63
+ texto_generado= f"[|Solo texto|]:{texto_adicional}\n" + "<br>[AGENTE]:"+gen_conversation(texto_adicional,max_new_tokens=500)
64
+ texto_generado = "<div style='color: #66b3ff;'> " + texto_generado + "</div><br>"
65
+ conversacion += texto_generado
66
+ return conversacion
67
+
68
+ iface = gr.Interface(
69
+ fn=speech_to_text,
70
+ inputs=[gr.inputs.Audio(label="Voz", type="filepath"), gr.inputs.Textbox(label="Texto adicional")],
71
+ outputs=gr.outputs.HTML(label=["chatbot","state"]),
72
+ title="Chat bot para empresas.",
73
+ description="Este modelo convierte la entrada de voz o texto y hace inferencia",
74
+ examples=list_audios,
75
+ theme="default",
76
+ layout="vertical",
77
+ allow_flagging=False,
78
+ flagging_dir=None,
79
+ server_name=None,
80
+ server_port=None,
81
+ live=False,
82
+ capture_session=False
83
+ )
84
+
85
+ iface.launch()
86
+
87
+
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ transformers
2
+ torch
3
+ SpeechRecognition
4
+ git+https://github.com/huggingface/peft.git
5
+ gradio
6
+ bitsandbytes
7
+ loralib