Luciferalive commited on
Commit
db11521
1 Parent(s): 156d62a

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -0
app.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+
4
+ # Load the model and tokenizer
5
+ model_name = "Telugu-LLM-Labs/Indic-gemma-2b-finetuned-sft-Navarasa-2.0"
6
+ model = AutoModelForCausalLM.from_pretrained(model_name, load_in_4bit=False, device_map="auto")
7
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
8
+
9
+ def generate_prompt(instruction, user_input):
10
+ """
11
+ Generates a prompt for the model to ensure it responds with the intent in the same language as the input.
12
+ """
13
+ return f"""
14
+ ### Instruction:
15
+ {instruction}
16
+
17
+ ### Input:
18
+ {user_input}
19
+
20
+ ### Response:
21
+ """
22
+
23
+ def get_model_response(user_input, instruction="Identify and summarize the core intent in the same language:"):
24
+ """
25
+ Gets the model's response, ensuring it matches the input language and focuses on extracting a concise intent.
26
+ """
27
+ input_text = generate_prompt(instruction, user_input)
28
+ inputs = tokenizer([input_text], return_tensors="pt")
29
+ outputs = model.generate(**inputs, max_new_tokens=300, use_cache=True)
30
+ response = tokenizer.batch_decode(outputs)[0]
31
+ return response.split("### Response:")[-1].strip()
32
+
33
+ # Gradio interface
34
+ iface = gr.Interface(
35
+ fn=get_model_response,
36
+ inputs=[
37
+ gr.inputs.Textbox(label="Input Text"),
38
+ gr.inputs.Textbox(label="Instruction", default="Identify and summarize the core intent in the same language:"),
39
+ ],
40
+ outputs=gr.outputs.Textbox(label="Response"),
41
+ title="Intent Summarization",
42
+ description="Summarize the core intent of the input text in the same language.",
43
+ )
44
+
45
+ iface.launch()