import os import gradio as gr from stability_sdk.api import Context from stability_sdk.animation import AnimationArgs, Animator STABILITY_HOST = "grpc.stability.ai:443" def anim(f_promt, s_promt, stability_key, cadence_interp, width ,height ,sampler ,model ,custom_model ,seed ,cfg_scale ,clip_guidance ,init_image ,init_sizing ,mask_path ,mask_invert ,preset ,animation_mode ,max_frames ,border ,noise_add_curve ,noise_scale_curve ,strength_curve ,steps_curve ,steps_strength_adj ,interpolate_prompts ,locked_seed ,angle ,zoom ,translation_x ,translation_y ,translation_z ,rotation_x ,rotation_y ,rotation_z ,diffusion_cadence_curve ,cadence_spans ,color_coherence ,brightness_curve ,contrast_curve ,hue_curve ,saturation_curve ,lightness_curve ,color_match_animate ,depth_model_weight,near_plane ,far_plane ,fov_curve ,depth_blur_curve ,depth_warp_curve ,save_depth_maps ,camera_type ,render_mode ,mask_power ,use_inpainting_model ,inpaint_border ,mask_min_value ,mask_binarization_thr ,save_inpaint_masks ,video_init_path ,extract_nth_frame ,video_mix_in_curve ,video_flow_warp ,fps ,reverse ): # Connect to Stability API context = Context(STABILITY_HOST, stability_key) # Test the connection context.get_user_info() print("Connection successfuly!") # Configure the animation args = AnimationArgs() args.interpolate_prompts = interpolate_prompts args.locked_seed = locked_seed args.max_frames = max_frames args.seed = seed args.strength_curve = strength_curve args.diffusion_cadence_curve = diffusion_cadence_curve args.cadence_interp = cadence_interp args.width = width args.height = height args.sampler = sampler args.model = model args.custom_model = custom_model args.seed = seed args.cfg_scale = cfg_scale args.clip_guidance = clip_guidance args.init_image = init_image args.init_sizing = init_sizing args.mask_path = mask_path args.mask_invert = mask_invert args.preset = preset args.animation_mode = animation_mode args.max_frames = max_frames args.border = border args.noise_add_curve = noise_add_curve args.noise_scale_curve = noise_scale_curve args.strength_curve = strength_curve args.steps_curve = steps_curve args.steps_strength_adj = steps_strength_adj args.interpolate_prompts = interpolate_prompts args.locked_seed = locked_seed args.angle = angle args.zoom = zoom args.translation_x = translation_x args.translation_y = translation_y args.translation_z = translation_z args.rotation_x = rotation_x args.rotation_y = rotation_y args.rotation_z = rotation_z args.diffusion_cadence_curve = diffusion_cadence_curve args.cadence_spans = cadence_spans args.color_coherence = color_coherence args.brightness_curve = brightness_curve args.contrast_curve = contrast_curve args.hue_curve = hue_curve args.saturation_curve = saturation_curve args.lightness_curve = lightness_curve args.color_match_animate = color_match_animate args.depth_model_weight = depth_model_weight args.near_plane = near_plane args.far_plane = far_plane args.fov_curve = fov_curve args.depth_blur_curve = depth_blur_curve args.depth_warp_curve = depth_warp_curve args.save_depth_maps = save_depth_maps args.camera_type = camera_type args.render_mode = render_mode args.mask_power = mask_power args.use_inpainting_model = use_inpainting_model args.inpaint_border = inpaint_border args.mask_min_value = mask_min_value args.mask_binarization_thr = mask_binarization_thr args.save_inpaint_masks = save_inpaint_masks args.video_init_path = video_init_path args.extract_nth_frame = extract_nth_frame args.video_mix_in_curve = video_mix_in_curve args.video_flow_warp = video_flow_warp args.fps = fps args.reverse = reverse animation_prompts = { 0: f_promt, 2: s_promt, } negative_prompt = "" # Create Animator object to orchestrate the rendering animator = Animator( api_context=context, animation_prompts=animation_prompts, negative_prompt=negative_prompt, args=args ) # Define output folder path image_path = "/tmp/frames/" output_dir = os.path.join(image_path, "output") if not os.path.exists(output_dir): os.makedirs(output_dir) # Render each frame of animation images = [] for idx, frame in enumerate(animator.render()): file_path = os.path.join(output_dir, f"frame_{idx:05d}.png") frame.save(file_path) print("Created frame at:"+file_path) images.append(file_path) return images with gr.Blocks() as demo: gr.Markdown("Stability Animation") f_promt = gr.Textbox(label="First Prompt", value="a photo of a cute cat") s_promt = gr.Textbox(label="Second Prompt", value="a photo of a cute dog") stability_key = gr.Textbox(label="Stability Key", value="") outimg = gr.File(label="Generated Files") btn = gr.Button('Anim') btn.click(fn=anim, inputs=[f_promt, s_promt, stability_key], outputs=[outimg],api_name="AnimAPI") demo.launch()