Spaces:
Sleeping
Sleeping
| from transformers import pipeline | |
| import gradio as gr | |
| # from diffusers import DiffusionPipeline | |
| # from diffusers import FluxPipeline | |
| import torch | |
| import time | |
| import requests | |
| import io | |
| import os | |
| from PIL import Image | |
| # Load models | |
| translator = pipeline("translation", model="Helsinki-NLP/opus-mt-dra-en") | |
| # summarizer = pipeline("summarization", model="Falconsai/text_summarization") | |
| # image_pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=torch.float16).to("cpu") | |
| # for summarizer api | |
| SUMMARIZER_API_URL = "https://api.groq.com/openai/v1/chat/completions" | |
| summarizer_headers = {"Authorization": f"Bearer {os.getenv('GROQ_API_TOKEN')}", | |
| "Content-Type": "application/json"} | |
| # for image api | |
| IMAGE_API_URL = "https://api-inference.huggingface.co/models/ZB-Tech/Text-to-Image" | |
| img_headers = {"Authorization": f"Bearer {os.getenv('HF_API_TOKEN')}"} | |
| def query(payload): | |
| response = requests.post(IMAGE_API_URL, headers=img_headers, json=payload) | |
| return response.content | |
| # Translate Tamil to English | |
| def translate_tamil_to_english(text): | |
| time.sleep(2) | |
| result = translator(text) | |
| return result[0]['translation_text'] | |
| # Summarize English Paragraph | |
| def summarize_english_text(paragraph): | |
| time.sleep(2) | |
| # Request payload | |
| payload = { | |
| "model": "mixtral-8x7b-32768", | |
| "messages": [ | |
| {"role": "system", "content": "Create a summary of below paragraph in 30 words max"}, | |
| {"role": "user", "content": paragraph} | |
| ], | |
| "max_tokens": 100 # number of words in the output. | |
| } | |
| # Send POST request to Groq API | |
| response = requests.post(SUMMARIZER_API_URL, json=payload, headers=summarizer_headers) | |
| # Check if the request was successful | |
| if response.status_code == 200: | |
| # Parse the JSON response | |
| result = response.json() | |
| # Extract and print the generated text | |
| generated_text = result['choices'][0]['message']['content'] | |
| return generated_text | |
| else: | |
| return f"Error: {response.status_code}, {response.text}" | |
| # Generate image from English text | |
| def english_text_to_image(prompt): | |
| image_bytes = query({ | |
| "inputs": prompt, | |
| }) | |
| image = Image.open(io.BytesIO(image_bytes)) | |
| return image | |
| with gr.Blocks() as app: | |
| gr.Markdown("# Multifunctional Gradio App") | |
| with gr.Row(): | |
| tamil_input = gr.Textbox(lines=2, placeholder="Enter Tamil text here...") | |
| english_output = gr.Textbox(label="English Translation") | |
| translate_button = gr.Button("Translate") | |
| with gr.Row(): | |
| translate_button.click(translate_tamil_to_english, inputs=tamil_input, outputs=english_output) | |
| with gr.Row(): | |
| english_paragraph_input = gr.Textbox(lines=5, placeholder="Enter English paragraph here...") | |
| summary_output = gr.Textbox(label="Summary") | |
| summarize_button = gr.Button("Summarize") | |
| summarize_button.click(summarize_english_text, inputs=english_paragraph_input, outputs=summary_output) | |
| with gr.Row(): | |
| english_text_input = gr.Textbox(lines=2, placeholder="Enter English text for image generation...") | |
| image_output = gr.Image(label="Generated Image") | |
| generate_button = gr.Button("Generate Image", size="sm") # Make the button smaller | |
| generate_button.click(english_text_to_image, inputs=english_text_input, outputs=image_output) | |
| app.launch() | |