File size: 3,439 Bytes
105aaaa
 
dae306d
 
105aaaa
 
b7996d7
7a4a475
 
4cabd31
105aaaa
 
 
a67f774
dae306d
 
a67f774
 
 
 
4e34f1d
 
 
 
 
dae306d
 
4e34f1d
dae306d
105aaaa
 
 
 
 
 
 
 
 
 
 
 
a67f774
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105aaaa
 
 
dae306d
 
 
 
 
105aaaa
dae306d
105aaaa
 
 
 
 
 
 
715003a
 
 
105aaaa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
from transformers import pipeline
import gradio as gr
# from diffusers import DiffusionPipeline
# from diffusers import FluxPipeline
import torch
import time
import requests
import io
import os
from PIL import Image

# Load models
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-dra-en")
# summarizer = pipeline("summarization", model="Falconsai/text_summarization")
# image_pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=torch.float16).to("cpu")

# for summarizer api
SUMMARIZER_API_URL = "https://api.groq.com/openai/v1/chat/completions"
summarizer_headers = {"Authorization": f"Bearer {os.getenv('GROQ_API_TOKEN')}",
                     "Content-Type": "application/json"}


# for image api
IMAGE_API_URL = "https://api-inference.huggingface.co/models/ZB-Tech/Text-to-Image"
img_headers = {"Authorization": f"Bearer {os.getenv('HF_API_TOKEN')}"}

def query(payload):
	response = requests.post(IMAGE_API_URL, headers=img_headers, json=payload)
	return response.content


# Translate Tamil to English
def translate_tamil_to_english(text):
    time.sleep(2)
    result = translator(text)
    return result[0]['translation_text']


# Summarize English Paragraph
def summarize_english_text(paragraph):
    time.sleep(2)
    # Request payload
    payload = {
        "model": "mixtral-8x7b-32768",
        "messages": [
            {"role": "system", "content": "Create a summary of below paragraph in 30 words max"},
            {"role": "user", "content": paragraph}
        ],
        "max_tokens": 100  # number of words in the output.
    }

    # Send POST request to Groq API
    response = requests.post(SUMMARIZER_API_URL, json=payload, headers=summarizer_headers)

    # Check if the request was successful
    if response.status_code == 200:
        # Parse the JSON response
        result = response.json()
        # Extract and print the generated text
        generated_text = result['choices'][0]['message']['content']
        return generated_text
    else:
        return f"Error: {response.status_code}, {response.text}"


# Generate image from English text
def english_text_to_image(prompt):
    image_bytes = query({
        "inputs": prompt,
    })
    image = Image.open(io.BytesIO(image_bytes))
    return image
  

with gr.Blocks() as app:
    gr.Markdown("# Multifunctional Gradio App")

    with gr.Row():
        tamil_input = gr.Textbox(lines=2, placeholder="Enter Tamil text here...")
        english_output = gr.Textbox(label="English Translation")
        translate_button = gr.Button("Translate")
        with gr.Row():
            translate_button.click(translate_tamil_to_english, inputs=tamil_input, outputs=english_output)

    with gr.Row():
        english_paragraph_input = gr.Textbox(lines=5, placeholder="Enter English paragraph here...")
        summary_output = gr.Textbox(label="Summary")
        summarize_button = gr.Button("Summarize")
        summarize_button.click(summarize_english_text, inputs=english_paragraph_input, outputs=summary_output)


    with gr.Row():
        english_text_input = gr.Textbox(lines=2, placeholder="Enter English text for image generation...")
        image_output = gr.Image(label="Generated Image")
        generate_button = gr.Button("Generate Image", size="sm") # Make the button smaller
        generate_button.click(english_text_to_image, inputs=english_text_input, outputs=image_output)

app.launch()