Spaces:
Runtime error
Runtime error
| import os | |
| from huggingface_hub import login, hf_hub_download | |
| import onnxruntime as ort | |
| import numpy as np | |
| import gradio as gr | |
| # Securely get your HF token from environment variable | |
| HF_TOKEN = os.getenv("HF_TOKEN") | |
| if HF_TOKEN is None: | |
| raise ValueError("Please set the HF_TOKEN environment variable with your Hugging Face token.") | |
| login(token=HF_TOKEN) | |
| repo_id = "black-forest-labs/FLUX.1-schnell-onnx" | |
| model_filename = "t5-fp8.opt/model.onnx" | |
| model_path = hf_hub_download(repo_id=repo_id, filename=model_filename, token=HF_TOKEN) | |
| session = ort.InferenceSession(model_path) | |
| def generate_image_from_text(prompt: str): | |
| # Implement your tokenizer and inference logic here... | |
| input_tokens = np.array([1, 2, 3, 4], dtype=np.int64) | |
| outputs = session.run(None, {"input": input_tokens}) | |
| image_array = outputs[0] | |
| image_array = np.clip(image_array, 0, 1) | |
| image = (image_array * 255).astype(np.uint8) | |
| image = np.transpose(image, (1, 2, 0)) | |
| return image | |
| with gr.Blocks() as demo: | |
| gr.Markdown("## FLUX.1 ONNX Text-to-Image Generator") | |
| text_input = gr.Textbox(label="Enter your text prompt", lines=2) | |
| output_image = gr.Image(label="Generated Image") | |
| btn = gr.Button("Generate") | |
| btn.click(fn=generate_image_from_text, inputs=text_input, outputs=output_image) | |
| if __name__ == "__main__": | |
| demo.launch() |