Aspik101 commited on
Commit
5f30ec5
·
1 Parent(s): 0bd717c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -13
app.py CHANGED
@@ -3,6 +3,9 @@ import random
3
  import time
4
  from ctransformers import AutoModelForCausalLM
5
  from datetime import datetime
 
 
 
6
 
7
  params = {
8
  "max_new_tokens":512,
@@ -13,15 +16,38 @@ params = {
13
  "batch_size": 8}
14
 
15
 
 
16
  llm = AutoModelForCausalLM.from_pretrained("Aspik101/trurl-2-7b-pl-instruct_GGML", model_type="llama")
 
 
 
17
 
18
  with gr.Blocks() as demo:
19
  chatbot = gr.Chatbot()
20
- msg = gr.Textbox()
 
21
  clear = gr.Button("Clear")
22
-
23
- def user(user_message, history):
24
- return "", history + [[user_message, None]]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
  def parse_history(hist):
27
  history_ = ""
@@ -33,9 +59,7 @@ with gr.Blocks() as demo:
33
 
34
  def bot(history):
35
  print(f"When: {datetime.today().strftime('%Y-%m-%d %H:%M:%S')}")
36
- print("history: ",history)
37
- prompt = f"Jesteś AI assystentem. Odpowiadaj po polsku. {parse_history(history)}. <assistant>:"
38
- print("prompt: ",prompt)
39
  stream = llm(prompt, **params)
40
  history[-1][1] = ""
41
  answer_save = ""
@@ -45,12 +69,8 @@ with gr.Blocks() as demo:
45
  time.sleep(0.005)
46
  yield history
47
 
48
- print("answer_save: ",answer_save)
49
- msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
50
- bot, chatbot, chatbot
51
- )
52
  clear.click(lambda: None, None, chatbot, queue=False)
53
 
54
-
55
  demo.queue()
56
- demo.launch()
 
3
  import time
4
  from ctransformers import AutoModelForCausalLM
5
  from datetime import datetime
6
+ import whisper
7
+ from transformers import VitsModel, AutoTokenizer
8
+ import torch
9
 
10
  params = {
11
  "max_new_tokens":512,
 
16
  "batch_size": 8}
17
 
18
 
19
+ whisper_model = whisper.load_model("small")
20
  llm = AutoModelForCausalLM.from_pretrained("Aspik101/trurl-2-7b-pl-instruct_GGML", model_type="llama")
21
+ tts_model = VitsModel.from_pretrained("facebook/mms-tts-pol")
22
+ tokenizer = AutoTokenizer.from_pretrained("facebook/mms-tts-pol")
23
+
24
 
25
  with gr.Blocks() as demo:
26
  chatbot = gr.Chatbot()
27
+ audio_input = gr.Audio(source="microphone", type="filepath", show_label=False)
28
+ submit_audio = gr.Button("Submit Audio")
29
  clear = gr.Button("Clear")
30
+ audio_output = gr.Audio('temp_file.wav', label="Generated Audio (wav)", type='filepath', autoplay=False)
31
+
32
+ def translate(audio):
33
+ print("__Wysyłam nagranie do whisper!")
34
+ transcription = whisper_model.transcribe(audio, language="pl")
35
+ return transcription["text"]
36
+
37
+ def read_text(text):
38
+ print("Tutaj jest tekst to przeczytania!", text[-1][-1])
39
+ inputs = tokenizer(text[-1][-1], return_tensors="pt")
40
+ with torch.no_grad():
41
+ output = tts_model(**inputs).waveform.squeeze().numpy()
42
+ sf.write('temp_file.wav', output, tts_model.config.sampling_rate)
43
+ return 'temp_file.wav'
44
+
45
+ def user(audio_data, history):
46
+ if audio_data:
47
+ user_message = translate(audio_data)
48
+ print("USER!:")
49
+ print("", history + [[user_message, None]])
50
+ return history + [[user_message, None]]
51
 
52
  def parse_history(hist):
53
  history_ = ""
 
59
 
60
  def bot(history):
61
  print(f"When: {datetime.today().strftime('%Y-%m-%d %H:%M:%S')}")
62
+ prompt = f"Jesteś AI assystentem. Odpowiadaj krótko i po polsku. {parse_history(history)}. <assistant>:"
 
 
63
  stream = llm(prompt, **params)
64
  history[-1][1] = ""
65
  answer_save = ""
 
69
  time.sleep(0.005)
70
  yield history
71
 
72
+ submit_audio.click(user, [audio_input, chatbot], [chatbot], queue=False).then(bot, chatbot, chatbot).then(read_text, chatbot, audio_output)
 
 
 
73
  clear.click(lambda: None, None, chatbot, queue=False)
74
 
 
75
  demo.queue()
76
+ demo.launch(share=True)