PiDiNet_ONNX / App.py
emilymogire's picture
Create App.py
8065778 verified
raw
history blame
13.9 kB
results.txt story_app.py ~ $ nano story_app.py
~ $ cat story_app.py import requests
import os
# 1. SETUP - Replace 'YOUR_TOKEN_HERE' with your real Hugging Face token
API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell"
headers = {"Authorization": "Bearer YOUR_TOKEN_HERE"}
def generate_video_from_story():
# 2. THE STORY INPUT
print("\n--- StoryViz AI ---")
story = input("Enter your story: ") print("🎨 AI is drawing your story... please wait.")
# 3. GENERATE THE IMAGE
response = requests.post(API_URL, headers=headers, json={"inputs": story})
if response.status_code == 200:
with open("scene.jpg", "wb") as f:
f.write(response.content)
print("βœ… Image generated!")
# 4. TURN IMAGE INTO VIDEO (Using ffmpeg)
print("🎬 Converting image to a 5-second video...")
os.system("ffmpeg -loop 1 -i scene.jpg -c:v libx264 -t 5 -pix_fmt yuv420p story_video.mp4 -y")
print("\nπŸš€ SUCCESS! Your video is saved as: story_video.mp4")
else:
print(f"❌ Error: {response.status_code}. Check your Token!")
if __name__ == "__main__":
generate_video_from_story()
tus_code == 200:
with open("scene.jpg", "wb") as f:
f.write(response.content)
print("βœ… Image generated!")
# 4. TURN IMAGE INTO VIDEO (Using ffmpeg)
print("🎬 Converting image to a 5-second video...")
# This command takes the image and loops it for 5 seconds
os.system("ffmpeg -loop 1 -i scene.jpg -c:v libx264 -t 5 -pix_fmt yuv420p story_video.mp4 -y")
print("\nπŸš€ SUCCESS! Your video is saved as: story_video.mp4")
print("To view it, move it to your phone storage using:")
print("termux-setup-storage && cp story_video.mp4 /sdcard/")
else:
print("❌ Error: AI is busy or Token is wrong.")
generate_video_from_story()
The program import is not installed. Install it by executing:
pkg install imagemagick
~ $ import os
The program import is not installed. Install it by executing:
pkg install imagemagick
~ $ import time
The program import is not installed. Install it by executing:
pkg install imagemagick
~ $
~ $ # 1. SETUP
~ $ API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell"
API_URL: command not found
~ $ headers = {"Authorization": "Bearer YOUR_TOKEN_HERE"}
No command headers found, did you mean:
Command feathers in package chicken
Command head in package coreutils
Command heimer in package heimer from the x11-repo repository
Command hexer in package hexer
Command readelf in package llvm
Command folders in package mailutils
Command gears in package mesa-demos from the x11-repo repository
Command folders in package nmh
Command sneakers in package no-more-secrets
~ $
~ $ def generate_video_from_story():
bash: syntax error near unexpected token `('
~ $ # 2. THE STORY INPUT
~ $ story = input("Enter your story: ")
bash: syntax error near unexpected token `('
~ $ print("🎨 AI is drawing your story...")
bash: syntax error near unexpected token `"🎨 AI is drawing your story..."'
~ $
~ $ # 3. GENERATE THE IMAGE
~ $ response = requests.post(API_URL, headers=headers, json={"inputs": story})
bash: syntax error near unexpected token `('
~ $
~ $ if response.status_code == 200:
> with open("scene.jpg", "wb") as f:
bash: syntax error near unexpected token `('
~ $ f.write(response.content)
bash: syntax error near unexpected token `response.content'
~ $ print("βœ… Image generated!")
bash: syntax error near unexpected token `"βœ… Image generated!"'
~ $
~ $ # 4. TURN IMAGE INTO VIDEO (Using ffmpeg)
~ $ print("🎬 Converting image to a 5-second video...")
bash: syntax error near unexpected token `"🎬 Converting image to a 5-second video..."'
~ $ # This command takes the image and loops it for 5 seconds
~ $ os.system("ffmpeg -loop 1 -i scene.jpg -c:v libx264 -t 5 -pix_fmt yuv420p story_video.mp4 -y")
bash: syntax error near unexpected token `"ffmpeg -loop 1 -i scene.jpg -c:v libx264 -t 5 -pix_fmt yuv420p story_video.mp4 -y"'
~ $
~ $ print("\nπŸš€ SUCCESS! Your video is saved as: story_video.mp4")
bash: syntax error near unexpected token `"\nπŸš€ SUCCESS! Your video is saved as: story_video.mp4"'
~ $ print("To view it, move it to your phone storage using:")
bash: syntax error near unexpected token `"To view it, move it to your phone storage using:"'
~ $ print("termux-setup-storage && cp story_video.mp4 /sdcard/")
bash: syntax error near unexpected token `"termux-setup-storage && cp story_video.mp4 /sdcard/"'
~ $ else:
No command else: found, did you mean:
Command plser in package libgnustep-base
~ $ print("❌ Error: AI is busy or Token is wrong.")
bash: syntax error near unexpected token `"❌ Error: AI is busy or Token is wrong."'
~ $
~ $ generate_video_from_story()import requests
import os
import time
# 1. SETUP
API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell"
headers = {"Authorization": "Bearer YOUR_TOKEN_HERE"}
def generate_video_from_story():
# 2. THE STORY INPUT
story = input("Enter your story: ")
print("🎨 AI is drawing your story...")
# 3. GENERATE THE IMAGE
response = requests.post(API_URL, headers=headers, json={"inputs": story})
if response.status_code == 200:
with open("scene.jpg", "wb") as f:
f.write(response.content)
print("βœ… Image generated!")
# 4. TURN IMAGE INTO VIDEO (Using ffmpeg)
print("🎬 Converting image to a 5-second video...")
# This command takes the image and loops it for 5 seconds
os.system("ffmpeg -loop 1 -i scene.jpg -c:v libx264 -t 5 -pix_fmt yuv420p story_video.mp4 -y")
print("\nπŸš€ SUCCESS! Your video is saved as: story_video.mp4")
print("To view it, move it to your phone storage using:")
print("termux-setup-storage && cp story_video.mp4 /sdcard/")
else:
print("❌ Error: AI is busy or Token is wrong.")
generate_video_from_story()
import os
from openai import OpenAI
client = OpenAI(
base_url="https://router.huggingface.co/v1",
api_key=os.environ["HF_TOKEN"],
)
completion = client.chat.completions.create(
model="moonshotai/Kimi-K2-Instruct-0905",
messages=[
{
"role": "user",
"content": "Generate a list of 10 interesting facts about space."
}
],
)
print(completion.choices[0].message)import os
from huggingface_hub import InferenceClient
client = InferenceClient(
provider="wavespeed",
api_key=os.environ["HF_TOKEN"],
)
# output is a PIL.Image object
image = client.text_to_image(
"A robot playing chess with a human",
model="black-forest-labs/FLUX.1-dev",
)
~ $ cat story_app.py
import requests
import os
# 1. SETUP - Replace 'YOUR_TOKEN_HERE' with your real Hugging Face token
API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell"
headers = {"Authorization": "Bearer YOUR_TOKEN_HERE"}
def generate_video_from_story():
# 2. THE STORY INPUT
print("\n--- StoryViz AI ---")
story = input("Enter your story: ")
print("🎨 AI is drawing your story... please wait.")
# 3. GENERATE THE IMAGE
response = requests.post(API_URL, headers=headers, json={"inputs": story})
if response.status_code == 200:
with open("scene.jpg", "wb") as f:
f.write(response.content)
print("βœ… Image generated!")
# 4. TURN IMAGE INTO VIDEO (Using ffmpeg)
print("🎬 Converting image to a 5-second video...")
os.system("ffmpeg -loop 1 -i scene.jpg -c:v libx264 -t 5 -pix_fmt yuv420p story_video.mp4 -y")
print("\nπŸš€ SUCCESS! Your video is saved as: story_video.mp4")
else:
print(f"❌ Error: {response.status_code}. Check your Token!")
if __name__ == "__main__":
generate_video_from_story()
tus_code == 200:
with open("scene.jpg", "wb") as f:
f.write(response.content)
print("βœ… Image generated!")
# 4. TURN IMAGE INTO VIDEO (Using ffmpeg)
print("🎬 Converting image to a 5-second video...")
# This command takes the image and loops it for 5 seconds
os.system("ffmpeg -loop 1 -i scene.jpg -c:v libx264 -t 5 -pix_fmt yuv420p story_video.mp4 -y")
print("\nπŸš€ SUCCESS! Your video is saved as: story_video.mp4")
print("To view it, move it to your phone storage using:")
print("termux-setup-storage && cp story_video.mp4 /sdcard/")
else:
print("❌ Error: AI is busy or Token is wrong.")
generate_video_from_story()
The program import is not installed. Install it by executing:
pkg install imagemagick
~ $ import os
The program import is not installed. Install it by executing:
pkg install imagemagick
~ $ import time
The program import is not installed. Install it by executing:
pkg install imagemagick
~ $
~ $ # 1. SETUP
~ $ API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell"
API_URL: command not found
~ $ headers = {"Authorization": "Bearer YOUR_TOKEN_HERE"}
No command headers found, did you mean:
Command feathers in package chicken
Command head in package coreutils
Command heimer in package heimer from the x11-repo repository
Command hexer in package hexer
Command readelf in package llvm
Command folders in package mailutils
Command gears in package mesa-demos from the x11-repo repository
Command folders in package nmh
Command sneakers in package no-more-secrets
~ $
~ $ def generate_video_from_story():
bash: syntax error near unexpected token `('
~ $ # 2. THE STORY INPUT
~ $ story = input("Enter your story: ")
bash: syntax error near unexpected token `('
~ $ print("🎨 AI is drawing your story...")
bash: syntax error near unexpected token `"🎨 AI is drawing your story..."'
~ $
~ $ # 3. GENERATE THE IMAGE
~ $ response = requests.post(API_URL, headers=headers, json={"inputs": story})
bash: syntax error near unexpected token `('
~ $
~ $ if response.status_code == 200:
> with open("scene.jpg", "wb") as f:
bash: syntax error near unexpected token `('
~ $ f.write(response.content)
bash: syntax error near unexpected token `response.content'
~ $ print("βœ… Image generated!")
bash: syntax error near unexpected token `"βœ… Image generated!"'
~ $
~ $ # 4. TURN IMAGE INTO VIDEO (Using ffmpeg)
~ $ print("🎬 Converting image to a 5-second video...")
bash: syntax error near unexpected token `"🎬 Converting image to a 5-second video..."'
~ $ # This command takes the image and loops it for 5 seconds
~ $ os.system("ffmpeg -loop 1 -i scene.jpg -c:v libx264 -t 5 -pix_fmt yuv420p story_video.mp4 -y")
bash: syntax error near unexpected token `"ffmpeg -loop 1 -i scene.jpg -c:v libx264 -t 5 -pix_fmt yuv420p story_video.mp4 -y"'
~ $
~ $ print("\nπŸš€ SUCCESS! Your video is saved as: story_video.mp4")
bash: syntax error near unexpected token `"\nπŸš€ SUCCESS! Your video is saved as: story_video.mp4"'
~ $ print("To view it, move it to your phone storage using:")
bash: syntax error near unexpected token `"To view it, move it to your phone storage using:"'
~ $ print("termux-setup-storage && cp story_video.mp4 /sdcard/")
bash: syntax error near unexpected token `"termux-setup-storage && cp story_video.mp4 /sdcard/"'
~ $ else:
No command else: found, did you mean:
Command plser in package libgnustep-base
~ $ print("❌ Error: AI is busy or Token is wrong.")
bash: syntax error near unexpected token `"❌ Error: AI is busy or Token is wrong."'
~ $
~ $ generate_video_from_story()import requests
import os
import time
# 1. SETUP
API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell"
headers = {"Authorization": "Bearer YOUR_TOKEN_HERE"}
def generate_video_from_story():
# 2. THE STORY INPUT
story = input("Enter your story: ")
print("🎨 AI is drawing your story...")
# 3. GENERATE THE IMAGE
response = requests.post(API_URL, headers=headers, json={"inputs": story})
if response.status_code == 200:
with open("scene.jpg", "wb") as f:
f.write(response.content)
print("βœ… Image generated!")
# 4. TURN IMAGE INTO VIDEO (Using ffmpeg)
print("🎬 Converting image to a 5-second video...")
# This command takes the image and loops it for 5 seconds
os.system("ffmpeg -loop 1 -i scene.jpg -c:v libx264 -t 5 -pix_fmt yuv420p story_video.mp4 -y")
print("\nπŸš€ SUCCESS! Your video is saved as: story_video.mp4")
print("To view it, move it to your phone storage using:")
print("termux-setup-storage && cp story_video.mp4 /sdcard/")
else:
print("❌ Error: AI is busy or Token is wrong.")
generate_video_from_story()
import os
from openai import OpenAI
client = OpenAI(
base_url="https://router.huggingface.co/v1",
api_key=os.environ["HF_TOKEN"],
)
completion = client.chat.completions.create(
model="moonshotai/Kimi-K2-Instruct-0905",
messages=[
{
"role": "user",
"content": "Generate a list of 10 interesting facts about space."
}
],
)
print(completion.choices[0].message)import os
from huggingface_hub import InferenceClient
client = InferenceClient(
provider="wavespeed",
api_key=os.environ["HF_TOKEN"],
)
# output is a PIL.Image object
image = client.text_to_image(
"A robot playing chess with a human",
model="black-forest-labs/FLUX.1-dev",
)