akhaliq HF Staff commited on
Commit
4f0a998
·
verified ·
1 Parent(s): 09c9a1f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -244
app.py CHANGED
@@ -1,249 +1,17 @@
1
  import gradio as gr
2
- import os
3
- import tempfile
4
- import shutil
5
- from typing import Optional, Union
6
- from pathlib import Path
7
- from huggingface_hub import InferenceClient
8
 
9
- # -------------------------
10
- # Utilities
11
- # -------------------------
12
-
13
- def cleanup_temp_files():
14
- try:
15
- temp_dir = tempfile.gettempdir()
16
- for file_path in Path(temp_dir).glob("*.mp4"):
17
- try:
18
- import time
19
- if file_path.stat().st_mtime < (time.time() - 300):
20
- file_path.unlink(missing_ok=True)
21
- except Exception:
22
- pass
23
- except Exception as e:
24
- print(f"Cleanup error: {e}")
25
-
26
- def _client_from_token(token: Optional[str]) -> InferenceClient:
27
- if not token:
28
- raise gr.Error("Please sign in first. This app requires your Hugging Face login.")
29
- # IMPORTANT: do not set bill_to when using user OAuth tokens
30
- return InferenceClient(
31
- provider="fal-ai",
32
- api_key=token,
33
  )
34
 
35
- def _save_bytes_as_temp_mp4(data: bytes) -> str:
36
- temp_file = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False)
37
- try:
38
- temp_file.write(data)
39
- temp_file.flush()
40
- return temp_file.name
41
- finally:
42
- temp_file.close()
43
-
44
- # -------------------------
45
- # Inference wrappers (no env fallback; always require LoginButton)
46
- # -------------------------
47
-
48
- def generate_video(
49
- prompt: str,
50
- token: gr.OAuthToken | None,
51
- duration: int = 8, # kept for future use
52
- size: str = "1280x720", # kept for future use
53
- *_ # tolerate extra event payloads
54
- ) -> Optional[str]:
55
- if token is None or not getattr(token, "token", None):
56
- raise gr.Error("Sign in with Hugging Face to continue. This app uses your inference provider credits.")
57
- if not prompt or not prompt.strip():
58
- return None
59
-
60
- cleanup_temp_files()
61
- try:
62
- client = _client_from_token(token.token)
63
- # Ensure model id matches what users can access. Change if you intend provider repo.
64
- model_id = "akhaliq/sora-2"
65
- try:
66
- video_bytes = client.text_to_video(prompt, model=model_id)
67
- except Exception as e:
68
- # Provide a clearer message if this is an HTTP 403 from requests
69
- import requests
70
- if isinstance(e, requests.HTTPError) and getattr(e.response, "status_code", None) == 403:
71
- raise gr.Error(
72
- "Access denied by provider (403). Make sure your HF account has credits/permission "
73
- f"for provider 'fal-ai' and model '{model_id}'."
74
- )
75
- raise
76
- return _save_bytes_as_temp_mp4(video_bytes)
77
- except gr.Error:
78
- raise
79
- except Exception:
80
- raise gr.Error("Generation failed. If this keeps happening, check your provider quota or try again later.")
81
-
82
- def generate_video_from_image(
83
- image: Union[str, bytes, None],
84
- prompt: str,
85
- token: gr.OAuthToken | None,
86
- *_
87
- ) -> Optional[str]:
88
- if token is None or not getattr(token, "token", None):
89
- raise gr.Error("Sign in with Hugging Face to continue. This app uses your inference provider credits.")
90
- if not image or not prompt or not prompt.strip():
91
- return None
92
-
93
- cleanup_temp_files()
94
- try:
95
- # Load image bytes
96
- if isinstance(image, str):
97
- with open(image, "rb") as f:
98
- input_image = f.read()
99
- elif isinstance(image, (bytes, bytearray)):
100
- input_image = image
101
- else:
102
- return None
103
-
104
- client = _client_from_token(token.token)
105
- model_id = "akhaliq/sora-2-image-to-video"
106
- try:
107
- video_bytes = client.image_to_video(
108
- input_image,
109
- prompt=prompt,
110
- model=model_id,
111
- )
112
- except Exception as e:
113
- import requests
114
- if isinstance(e, requests.HTTPError) and getattr(e.response, "status_code", None) == 403:
115
- raise gr.Error(
116
- "Access denied by provider (403). Make sure your HF account has credits/permission "
117
- f"for provider 'fal-ai' and model '{model_id}'."
118
- )
119
- raise
120
- return _save_bytes_as_temp_mp4(video_bytes)
121
- except gr.Error:
122
- raise
123
- except Exception:
124
- raise gr.Error("Generation failed. If this keeps happening, check your provider quota or try again later.")
125
-
126
- # -------------------------
127
- # UI
128
- # -------------------------
129
-
130
- def create_ui():
131
- css = '''
132
- .logo-dark{display: none}
133
- .dark .logo-dark{display: block !important}
134
- .dark .logo-light{display: none}
135
- #sub_title{margin-top: -20px !important}
136
- .notice {
137
- background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
138
- color: white;
139
- padding: 14px 16px;
140
- border-radius: 12px;
141
- margin: 18px auto 6px;
142
- max-width: 860px;
143
- text-align: center;
144
- font-size: 0.98rem;
145
- }
146
- '''
147
-
148
- with gr.Blocks(title="Sora-2 (uses your provider credits)", theme=gr.themes.Soft(), css=css) as demo:
149
- gr.HTML("""
150
- <div style="text-align:center; max-width:900px; margin:0 auto;">
151
- <h1 style="font-size:2.2em; margin-bottom:6px;">🎬 Sora-2</h1>
152
- <p style="color:#777; margin:0 0 8px;">Generate videos via the Hugging Face Inference API (provider: fal-ai)</p>
153
- <div class="notice">
154
- <b>Heads up:</b> This is a paid app that uses <b>your</b> inference provider credits when you run generations.
155
- Free users get <b>$0.10 in included credits</b>. <b>PRO users</b> get <b>$2 in included credits</b>
156
- and can continue using beyond that (with billing).
157
- <a href='http://huggingface.co/subscribe/pro?source=sora_2' target='_blank' style='color:#fff; text-decoration:underline; font-weight:bold;'>Subscribe to PRO</a>
158
- for more credits. Please sign in with your Hugging Face account to continue.
159
- </div>
160
- <p style="font-size: 0.9em; color: #999; margin-top: 10px;">
161
- Built with <a href="https://huggingface.co/spaces/akhaliq/anycoder" target="_blank" style="color:#fff; text-decoration:underline;">anycoder</a>
162
- </p>
163
- </div>
164
- """)
165
-
166
- login_btn = gr.LoginButton("Sign in with Hugging Face")
167
-
168
- # Text -> Video
169
- with gr.Row():
170
- with gr.Column(scale=1):
171
- prompt_input = gr.Textbox(
172
- label="Enter your prompt",
173
- placeholder="Describe the video you want to create…",
174
- lines=4,
175
- elem_id="prompt-text-input"
176
- )
177
- generate_btn = gr.Button("🎥 Generate Video", variant="primary")
178
- with gr.Column(scale=1):
179
- video_output = gr.Video(
180
- label="Generated Video",
181
- height=400,
182
- interactive=False,
183
- show_download_button=True,
184
- elem_id="text-to-video"
185
- )
186
-
187
- # Order of inputs: prompt, token
188
- generate_btn.click(
189
- fn=generate_video,
190
- inputs=[prompt_input, login_btn],
191
- outputs=[video_output],
192
- )
193
-
194
- # Image -> Video
195
- gr.HTML("""
196
- <div style="text-align:center; margin: 34px 0 10px;">
197
- <h3 style="margin-bottom:6px;">🖼️ ➜ 🎬 Image → Video (beta)</h3>
198
- <p style="color:#666; margin:0;">Turn a single image into a short video with a guiding prompt.</p>
199
- </div>
200
- """)
201
- with gr.Row():
202
- with gr.Column(scale=1):
203
- image_input = gr.Image(label="Upload an image", type="filepath")
204
- img_prompt_input = gr.Textbox(
205
- label="Describe how the scene should evolve",
206
- placeholder="e.g., The cat starts to dance and spins playfully",
207
- lines=3,
208
- elem_id="img-prompt-text-input"
209
- )
210
- generate_img_btn = gr.Button("🎥 Generate from Image", variant="primary")
211
- with gr.Column(scale=1):
212
- video_output_img = gr.Video(
213
- label="Generated Video (from Image)",
214
- height=400,
215
- interactive=False,
216
- show_download_button=True,
217
- elem_id="image-to-video"
218
- )
219
-
220
- # Order of inputs: image, prompt, token
221
- generate_img_btn.click(
222
- fn=generate_video_from_image,
223
- inputs=[image_input, img_prompt_input, login_btn],
224
- outputs=[video_output_img],
225
- )
226
-
227
- # Examples: keep UI-only (no automatic inference to avoid accidental charges)
228
- gr.Examples(
229
- examples=[["A majestic golden eagle soaring through a vibrant sunset sky"]],
230
- inputs=prompt_input
231
- )
232
-
233
- return demo
234
-
235
- # -------------------------
236
- # Entrypoint
237
- # -------------------------
238
-
239
  if __name__ == "__main__":
240
- try:
241
- cleanup_temp_files()
242
- if os.path.exists("gradio_cached_examples"):
243
- shutil.rmtree("gradio_cached_examples", ignore_errors=True)
244
- except Exception as e:
245
- print(f"Initial cleanup error: {e}")
246
-
247
- app = create_ui()
248
- app.queue(status_update_rate="auto", api_open=False, default_concurrency_limit=None)
249
- app.launch(show_api=False, enable_monitoring=False, quiet=True, ssr_mode=True)
 
1
  import gradio as gr
 
 
 
 
 
 
2
 
3
+ # Create the Gradio interface
4
+ with gr.Blocks(theme=gr.themes.Soft(), title="Sora 2") as demo:
5
+ gr.Markdown(
6
+ """
7
+ # 🚧 Under Maintenance
8
+
9
+ Sora 2 app is currently under maintenance. Please check back later.
10
+
11
+ We apologize for any inconvenience.
12
+ """,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  )
14
 
15
+ # Launch the app
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  if __name__ == "__main__":
17
+ demo.launch()