File size: 2,292 Bytes
10e69c3
8cf04c0
10e69c3
 
 
0cdfeaf
8cf04c0
10e69c3
 
 
66d3993
10e69c3
 
 
8cf04c0
a97ab17
10e69c3
 
0d42d6e
 
 
 
 
 
 
 
 
 
 
 
4a305fc
3bb8124
0d42d6e
 
 
 
4a305fc
0d42d6e
 
4a305fc
 
 
 
 
 
 
0d42d6e
 
10e69c3
0d42d6e
 
 
 
 
8cf04c0
 
0d42d6e
10e69c3
4a305fc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
import os
import torch
from transformers import MarianMTModel, MarianTokenizer
import google.generativeai as genai
from huggingface_hub import InferenceApi, login
import gradio as gr

# Load API key from environment variable
os.environ["API_KEY"] = os.getenv("GOOGLE_API_KEY")  # Set your API key in your environment variables
genai.configure(api_key=os.environ["API_KEY"])

# Load Hugging Face token from environment variable
hf_token = os.getenv("HF_TOK")  # Set your Hugging Face token in your environment variables
login(hf_token)  # Log in using the Hugging Face token

diffusion_model = InferenceApi(repo_id="black-forest-labs/FLUX.1-schnell")
model = genai.GenerativeModel("gemini-1.5-flash")

# Load the pre-trained model for Tamil to English translation
translator_model_name = "Helsinki-NLP/opus-mt-mul-en"
tokenizer = MarianTokenizer.from_pretrained(translator_model_name)
translator = MarianMTModel.from_pretrained(translator_model_name)

def translate_text(tamil_text):
    # Translate Tamil to English
    inputs = tokenizer(tamil_text, return_tensors="pt", padding=True)
    translated = translator.generate(**inputs)
    return tokenizer.decode(translated[0], skip_special_tokens=True)

def generate_creative_writing(english_text):
    # Generate creative writing
    return model.generate_content("poem about" + english_text).text

def generate_image(prompt):
    # Make an inference request to generate an image
    response = diffusion_model(inputs=prompt, params={"guidance_scale": 7.5, "num_inference_steps": 50})
    return response  # Ensure this is the correct format

def process_input(tamil_text):
    try:
        translated_text = translate_text(tamil_text)
        creative_response = generate_creative_writing(translated_text)
        generated_image = generate_image(translated_text)
        return translated_text, creative_response, generated_image
    except Exception as e:
        return str(e), "Error occurred during processing", None

# Create a Gradio interface
iface = gr.Interface(
    fn=process_input,
    inputs="text",
    outputs=["text", "text", "image"],
    title="Creative Writing and Image Generation",
    description="Enter Tamil text to get translated text, a creative response, and an image."
)

# Launch the Gradio app
iface.launch()