pravin0077 commited on
Commit
83184e1
·
verified ·
1 Parent(s): 48c1712

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +67 -61
app.py CHANGED
@@ -6,89 +6,95 @@ import io
6
  from PIL import Image
7
  import gradio as gr
8
 
9
- # Set Hugging Face API key
10
  hf_token = os.getenv("HUGGINGFACE_API_KEY")
11
- if hf_token is None:
12
- raise ValueError("Hugging Face API key not found in environment variables.")
13
 
14
- # Login to Hugging Face
15
- login(token=hf_token)
16
 
17
- # Define models for specific languages
18
  language_models = {
19
- "fra": "Helsinki-NLP/opus-mt-fr-en",
20
- "spa": "Helsinki-NLP/opus-mt-es-en",
21
- "tam": "Helsinki-NLP/opus-mt-tam-en",
22
- "deu": "Helsinki-NLP/opus-mt-de-en",
23
- "jpn": "Helsinki-NLP/opus-mt-ja-en",
24
- "rus": "Helsinki-NLP/opus-mt-ru-en",
25
- "kor": "Helsinki-NLP/opus-mt-ko-en",
26
- "hin": "Helsinki-NLP/opus-mt-hi-en",
27
- "ita": "Helsinki-NLP/opus-mt-it-en",
28
- "por": "Helsinki-NLP/opus-mt-pt-en"
29
- # Add more language models as needed
30
  }
31
 
32
- # Function to get translator pipeline for specific language
33
- def get_translator(language_code):
34
- model_name = language_models.get(language_code)
35
- if model_name:
36
- tokenizer = MarianTokenizer.from_pretrained(model_name)
37
- model = MarianMTModel.from_pretrained(model_name)
38
- return pipeline("translation", model=model, tokenizer=tokenizer)
39
- else:
40
- raise ValueError(f"No translation model found for language code '{language_code}'.")
41
 
42
- # FLUX model API settings
43
  flux_API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev"
44
  flux_headers = {"Authorization": f"Bearer {hf_token}"}
45
 
46
- # Function for translation, creative text generation, and image creation
47
- def translate_generate_image_and_text(input_text, src_lang_code):
 
48
  try:
49
- # Step 1: Get translator and translate text
50
- translator = get_translator(src_lang_code)
51
- translation = translator(input_text, max_length=40)
52
  translated_text = translation[0]['translation_text']
53
-
54
- # Step 2: Generate creative text with Mistral model
55
- mistral_API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-v0.1"
56
- mistral_headers = {"Authorization": f"Bearer {hf_token}"}
57
- mistral_response = requests.post(mistral_API_URL, headers=mistral_headers, json={"inputs": translated_text})
58
-
59
- if mistral_response.status_code == 200:
60
- creative_text = mistral_response.json()[0]['generated_text']
61
- else:
62
- creative_text = "Error generating creative text"
63
-
64
- # Step 3: Generate an image with FLUX model
65
- flux_response = requests.post(flux_API_URL, headers=flux_headers, json={"inputs": creative_text})
66
- if flux_response.status_code == 200:
67
- image_bytes = flux_response.content
68
  image = Image.open(io.BytesIO(image_bytes))
 
69
  else:
70
- image = None
 
 
 
71
 
72
- return translated_text, creative_text, image
 
 
73
 
 
 
 
 
 
 
 
 
74
  except Exception as e:
75
- return f"An error occurred: {str(e)}", None, None
76
 
77
- # Gradio interface setup
 
 
 
 
 
 
 
78
  interface = gr.Interface(
79
  fn=translate_generate_image_and_text,
80
  inputs=[
81
- gr.Textbox(label="Enter text for translation"),
82
- gr.Textbox(label="Source Language Code", placeholder="e.g., 'fra' for French, 'spa' for Spanish")
83
- ],
84
- outputs=[
85
- gr.Textbox(label="Translated Text"),
86
- gr.Textbox(label="Creative Text"),
87
- gr.Image(label="Generated Image")
88
  ],
89
- title="Multilingual Translation, Creative Content, and Image Generator",
90
- description="Select a language and translate text to English, generate creative content, and produce an image."
 
91
  )
92
 
93
- # Launch the Gradio app
94
  interface.launch()
 
6
  from PIL import Image
7
  import gradio as gr
8
 
9
+ # Retrieve the token from the environment variable
10
  hf_token = os.getenv("HUGGINGFACE_API_KEY")
11
+ if not hf_token:
12
+ raise ValueError("Hugging Face token not found in environment variables.")
13
 
14
+ login(token=hf_token, add_to_git_credential=True)
 
15
 
16
+ # Define available languages with their respective Helsinki model names
17
  language_models = {
18
+ "Abkhaz": "Helsinki-NLP/opus-mt-abk-en",
19
+ "Arabic": "Helsinki-NLP/opus-mt-ar-en",
20
+ "Azerbaijani": "Helsinki-NLP/opus-mt-az-en",
21
+ "Bengali": "Helsinki-NLP/opus-mt-bn-en",
22
+ "Chinese": "Helsinki-NLP/opus-mt-zh-en",
23
+ "Danish": "Helsinki-NLP/opus-mt-da-en",
24
+ "Finnish": "Helsinki-NLP/opus-mt-fi-en",
25
+ "French": "Helsinki-NLP/opus-mt-fr-en",
26
+ "Hindi": "Helsinki-NLP/opus-mt-hi-en",
27
+ "Tamil": "Helsinki-NLP/opus-mt-mul-en" # Using multilingual model for Tamil
 
28
  }
29
 
30
+ # Function to load a translation model dynamically
31
+ def load_translation_pipeline(language):
32
+ model_name = language_models[language]
33
+ tokenizer = MarianTokenizer.from_pretrained(model_name)
34
+ model = MarianMTModel.from_pretrained(model_name)
35
+ return pipeline("translation", model=model, tokenizer=tokenizer)
 
 
 
36
 
37
+ # API credentials and endpoint for FLUX (Image generation)
38
  flux_API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev"
39
  flux_headers = {"Authorization": f"Bearer {hf_token}"}
40
 
41
+ # Function for translation
42
+ def translate_text(tamil_text, language):
43
+ translator = load_translation_pipeline(language)
44
  try:
45
+ translation = translator(tamil_text, max_length=40)
 
 
46
  translated_text = translation[0]['translation_text']
47
+ return translated_text
48
+ except Exception as e:
49
+ return f"An error occurred: {str(e)}"
50
+
51
+ # Function to send payload and generate an image
52
+ def generate_image(prompt):
53
+ try:
54
+ response = requests.post(flux_API_URL, headers=flux_headers, json={"inputs": prompt})
55
+ if response.status_code == 200:
56
+ image_bytes = response.content
 
 
 
 
 
57
  image = Image.open(io.BytesIO(image_bytes))
58
+ return image
59
  else:
60
+ return None
61
+ except Exception as e:
62
+ print(f"An error occurred: {e}")
63
+ return None
64
 
65
+ # Function for Mistral API call to generate creative text
66
+ mistral_API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-v0.1"
67
+ mistral_headers = {"Authorization": f"Bearer {hf_token}"}
68
 
69
+ def generate_creative_text(translated_text):
70
+ try:
71
+ response = requests.post(mistral_API_URL, headers=mistral_headers, json={"inputs": translated_text})
72
+ if response.status_code == 200:
73
+ creative_text = response.json()[0]['generated_text']
74
+ return creative_text
75
+ else:
76
+ return "Error generating creative text"
77
  except Exception as e:
78
+ return None
79
 
80
+ # Function to handle the full workflow
81
+ def translate_generate_image_and_text(tamil_text, language):
82
+ translated_text = translate_text(tamil_text, language)
83
+ image = generate_image(translated_text)
84
+ creative_text = generate_creative_text(translated_text)
85
+ return translated_text, creative_text, image
86
+
87
+ # Create Gradio interface with language selection
88
  interface = gr.Interface(
89
  fn=translate_generate_image_and_text,
90
  inputs=[
91
+ gr.Textbox(label="Input Text in Source Language"),
92
+ gr.Dropdown(choices=list(language_models.keys()), label="Source Language")
 
 
 
 
 
93
  ],
94
+ outputs=["text", "text", "image"],
95
+ title="Multilingual Translation, Image Generation & Creative Text",
96
+ description="Enter text to translate to English, generate an image, and create creative content based on the translation."
97
  )
98
 
99
+ # Launch Gradio app
100
  interface.launch()