jaya-sandeep-22 commited on
Commit
cb6f452
·
verified ·
1 Parent(s): 725c569

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +90 -0
app.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """Translation_APP.ipynb
3
+
4
+ Automatically generated by Colab.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1EVFldoVPoPgAsak48hRkL_D_jhCo76r_
8
+ """
9
+
10
+ from transformers import AutoModelForCausalLM, AutoTokenizer
11
+ from gtts import gTTS
12
+ import torch
13
+ import gradio as gr
14
+
15
+ device = "cuda" if torch.cuda.is_available() else "cpu"
16
+
17
+ language_model_name = "Qwen/Qwen2-1.5B-Instruct"
18
+ language_model = AutoModelForCausalLM.from_pretrained(
19
+ language_model_name,
20
+ torch_dtype="auto",
21
+ device_map="auto"
22
+ )
23
+ tokenizer = AutoTokenizer.from_pretrained(language_model_name)
24
+
25
+ def process_input(input_text, action):
26
+ if action == "Translate to English":
27
+ prompt = f"Please translate the following text into English:{input_text}"
28
+ lang = "en"
29
+ elif action == "Translate to Chinese":
30
+ prompt = f"Please translate the following text into Chinese:{input_text}"
31
+ lang = "zh-cn"
32
+ elif action == "Translate to Japanese":
33
+ prompt = f"Please translate the following text into Japanese:{input_text}"
34
+ lang = "ja"
35
+ else:
36
+ prompt = input_text
37
+ lang = "en"
38
+
39
+ messages = [
40
+ {"role": "system", "content": "You are a helpful AI assistant."},
41
+ {"role": "user", "content": prompt}
42
+ ]
43
+ text = tokenizer.apply_chat_template(
44
+ messages,
45
+ tokenize=False,
46
+ add_generation_prompt=True
47
+ )
48
+ model_inputs = tokenizer([text], return_tensors="pt").to(device)
49
+
50
+ generated_ids = language_model.generate(
51
+ model_inputs.input_ids,
52
+ max_new_tokens=512
53
+ )
54
+ generated_ids = [
55
+ output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
56
+ ]
57
+
58
+ output_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
59
+ return output_text, lang
60
+
61
+ def text_to_speech(text, lang):
62
+ tts = gTTS(text=text, lang=lang)
63
+ filename = "output_audio.mp3"
64
+ tts.save(filename)
65
+ return filename
66
+
67
+ def handle_interaction(input_text, action):
68
+ output_text, lang = process_input(input_text, action)
69
+ audio_filename = text_to_speech(output_text, lang)
70
+ return output_text, audio_filename
71
+
72
+ action_options = ["Translate to English", "Translate to Chinese", "Translate to Japanese", "Chat"]
73
+
74
+ iface = gr.Interface(
75
+ fn=handle_interaction,
76
+ inputs=[
77
+ gr.Textbox(label="input text"),
78
+ gr.Dropdown(action_options, label="select action")
79
+ ],
80
+ outputs=[
81
+ gr.Textbox(label="output text"),
82
+ gr.Audio(label="output audio")
83
+ ],
84
+ title="Translation and Chat App using AI",
85
+ description="Translate input text or chat based on the selected action.",
86
+ theme= "gradio/soft"
87
+ )
88
+
89
+ if __name__ == "__main__":
90
+ iface.launch(share=True)