| | |
| | """ |
| | Trouter-Imagine-1 Comprehensive Examples |
| | Apache 2.0 License |
| | |
| | This file contains extensive examples demonstrating various use cases |
| | and advanced techniques for the Trouter-Imagine-1 model. |
| | |
| | Topics Covered: |
| | - Basic text-to-image generation |
| | - Advanced parameter tuning |
| | - Batch processing workflows |
| | - Style transfer techniques |
| | - Prompt engineering strategies |
| | - Memory optimization |
| | - Multi-resolution generation |
| | - Quality comparison testing |
| | - Scheduler comparison |
| | - Automated prompt generation |
| | - Image series creation |
| | - Professional workflows |
| | """ |
| |
|
| | import torch |
| | from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler |
| | from PIL import Image, ImageDraw, ImageFont |
| | import random |
| | import json |
| | from pathlib import Path |
| | from typing import List, Dict, Tuple |
| | import time |
| |
|
| |
|
| | |
| | |
| | |
| |
|
| | def example_basic_generation(): |
| | """ |
| | Simplest example of generating an image from text |
| | Perfect for beginners getting started with the model |
| | """ |
| | print("\n" + "="*70) |
| | print("EXAMPLE 1: Basic Text-to-Image Generation") |
| | print("="*70) |
| | |
| | |
| | model_id = "OpenTrouter/Trouter-Imagine-1" |
| | pipe = StableDiffusionPipeline.from_pretrained( |
| | model_id, |
| | torch_dtype=torch.float16 |
| | ) |
| | pipe = pipe.to("cuda") |
| | |
| | |
| | prompt = "a beautiful sunset over mountains, vibrant colors, professional photography" |
| | |
| | |
| | print(f"Generating: {prompt}") |
| | image = pipe(prompt).images[0] |
| | |
| | |
| | image.save("example1_basic.png") |
| | print("✓ Image saved to example1_basic.png") |
| |
|
| |
|
| | |
| | |
| | |
| |
|
| | def example_negative_prompts(): |
| | """ |
| | Demonstrates how negative prompts improve image quality |
| | by specifying what NOT to include in the generation |
| | """ |
| | print("\n" + "="*70) |
| | print("EXAMPLE 2: Using Negative Prompts") |
| | print("="*70) |
| | |
| | model_id = "OpenTrouter/Trouter-Imagine-1" |
| | pipe = StableDiffusionPipeline.from_pretrained( |
| | model_id, |
| | torch_dtype=torch.float16 |
| | ).to("cuda") |
| | |
| | prompt = "portrait of a young woman, elegant dress, studio lighting" |
| | |
| | |
| | print("Generating WITHOUT negative prompt...") |
| | image_without = pipe(prompt, num_inference_steps=30).images[0] |
| | image_without.save("example2_without_negative.png") |
| | |
| | |
| | negative_prompt = "blurry, low quality, distorted, bad anatomy, ugly, deformed" |
| | print("Generating WITH negative prompt...") |
| | image_with = pipe( |
| | prompt, |
| | negative_prompt=negative_prompt, |
| | num_inference_steps=30 |
| | ).images[0] |
| | image_with.save("example2_with_negative.png") |
| | |
| | print("✓ Compare example2_without_negative.png vs example2_with_negative.png") |
| |
|
| |
|
| | |
| | |
| | |
| |
|
| | def example_parameter_exploration(): |
| | """ |
| | Shows how different parameters affect the output |
| | Tests guidance scale and inference steps |
| | """ |
| | print("\n" + "="*70) |
| | print("EXAMPLE 3: Parameter Exploration") |
| | print("="*70) |
| | |
| | model_id = "OpenTrouter/Trouter-Imagine-1" |
| | pipe = StableDiffusionPipeline.from_pretrained( |
| | model_id, |
| | torch_dtype=torch.float16 |
| | ).to("cuda") |
| | |
| | prompt = "a cozy cabin in snowy mountains, winter scene, warm lights" |
| | |
| | |
| | guidance_scales = [5.0, 7.5, 10.0, 15.0] |
| | |
| | print("Testing different guidance scales...") |
| | for guidance in guidance_scales: |
| | print(f" Generating with guidance_scale={guidance}") |
| | image = pipe( |
| | prompt, |
| | guidance_scale=guidance, |
| | num_inference_steps=30 |
| | ).images[0] |
| | image.save(f"example3_guidance_{guidance}.png") |
| | |
| | |
| | step_counts = [15, 25, 35, 50] |
| | |
| | print("\nTesting different step counts...") |
| | for steps in step_counts: |
| | print(f" Generating with {steps} steps") |
| | start_time = time.time() |
| | image = pipe( |
| | prompt, |
| | num_inference_steps=steps, |
| | guidance_scale=7.5 |
| | ).images[0] |
| | elapsed = time.time() - start_time |
| | image.save(f"example3_steps_{steps}.png") |
| | print(f" Completed in {elapsed:.2f}s") |
| | |
| | print("✓ Parameter exploration complete") |
| |
|
| |
|
| | |
| | |
| | |
| |
|
| | def example_multi_resolution(): |
| | """ |
| | Generate the same prompt at different resolutions |
| | Demonstrates quality vs speed tradeoffs |
| | """ |
| | print("\n" + "="*70) |
| | print("EXAMPLE 4: Multi-Resolution Generation") |
| | print("="*70) |
| | |
| | model_id = "OpenTrouter/Trouter-Imagine-1" |
| | pipe = StableDiffusionPipeline.from_pretrained( |
| | model_id, |
| | torch_dtype=torch.float16 |
| | ).to("cuda") |
| | |
| | prompt = "futuristic cyberpunk city at night, neon lights, detailed" |
| | |
| | resolutions = [ |
| | (512, 512, "standard"), |
| | (768, 768, "high"), |
| | (1024, 1024, "ultra"), |
| | (768, 512, "landscape"), |
| | (512, 768, "portrait") |
| | ] |
| | |
| | for width, height, desc in resolutions: |
| | print(f"Generating {width}x{height} ({desc})...") |
| | start_time = time.time() |
| | |
| | image = pipe( |
| | prompt, |
| | width=width, |
| | height=height, |
| | num_inference_steps=30, |
| | guidance_scale=7.5 |
| | ).images[0] |
| | |
| | elapsed = time.time() - start_time |
| | filename = f"example4_{desc}_{width}x{height}.png" |
| | image.save(filename) |
| | print(f" ✓ Saved {filename} ({elapsed:.2f}s)") |
| | |
| | print("✓ Multi-resolution generation complete") |
| |
|
| |
|
| | |
| | |
| | |
| |
|
| | def example_seed_variations(): |
| | """ |
| | Generate variations of the same prompt using different seeds |
| | Useful for exploring different interpretations |
| | """ |
| | print("\n" + "="*70) |
| | print("EXAMPLE 5: Seed Variations") |
| | print("="*70) |
| | |
| | model_id = "OpenTrouter/Trouter-Imagine-1" |
| | pipe = StableDiffusionPipeline.from_pretrained( |
| | model_id, |
| | torch_dtype=torch.float16 |
| | ).to("cuda") |
| | |
| | prompt = "a magical forest with glowing mushrooms, fairy lights, enchanted atmosphere" |
| | |
| | seeds = [42, 123, 456, 789, 1337, 9999] |
| | |
| | print(f"Generating {len(seeds)} variations...") |
| | for i, seed in enumerate(seeds): |
| | generator = torch.Generator("cuda").manual_seed(seed) |
| | |
| | image = pipe( |
| | prompt, |
| | generator=generator, |
| | num_inference_steps=30, |
| | guidance_scale=7.5 |
| | ).images[0] |
| | |
| | image.save(f"example5_seed_{seed}.png") |
| | print(f" ✓ Variation {i+1}/6 (seed: {seed})") |
| | |
| | print("✓ Seed variations complete") |
| |
|
| |
|
| | |
| | |
| | |
| |
|
| | def example_style_comparison(): |
| | """ |
| | Generate the same subject in different artistic styles |
| | Shows the model's versatility across styles |
| | """ |
| | print("\n" + "="*70) |
| | print("EXAMPLE 6: Style Comparison") |
| | print("="*70) |
| | |
| | model_id = "OpenTrouter/Trouter-Imagine-1" |
| | pipe = StableDiffusionPipeline.from_pretrained( |
| | model_id, |
| | torch_dtype=torch.float16 |
| | ).to("cuda") |
| | |
| | base_subject = "a majestic lion" |
| | |
| | styles = { |
| | "photorealistic": "photorealistic, 4k photography, national geographic", |
| | "oil_painting": "oil painting, classical art style, detailed brushstrokes", |
| | "watercolor": "watercolor painting, soft colors, artistic", |
| | "digital_art": "digital art, concept art, highly detailed illustration", |
| | "anime": "anime style, cel shaded, vibrant colors, manga art", |
| | "cyberpunk": "cyberpunk style, neon colors, futuristic, tech-enhanced", |
| | "fantasy": "fantasy art style, magical, ethereal, mystical atmosphere", |
| | "minimalist": "minimalist art, simple shapes, clean design, modern" |
| | } |
| | |
| | for style_name, style_desc in styles.items(): |
| | prompt = f"{base_subject}, {style_desc}" |
| | print(f"Generating {style_name} style...") |
| | |
| | image = pipe( |
| | prompt, |
| | num_inference_steps=35, |
| | guidance_scale=8.0 |
| | ).images[0] |
| | |
| | image.save(f"example6_style_{style_name}.png") |
| | print(f" ✓ {style_name}") |
| | |
| | print("✓ Style comparison complete") |
| |
|
| |
|
| | |
| | |
| | |
| |
|
| | def example_scheduler_comparison(): |
| | """ |
| | Compare different schedulers (samplers) and their outputs |
| | Helps understand which scheduler works best for different use cases |
| | """ |
| | print("\n" + "="*70) |
| | print("EXAMPLE 7: Scheduler Comparison") |
| | print("="*70) |
| | |
| | from diffusers import ( |
| | DPMSolverMultistepScheduler, |
| | EulerAncestralDiscreteScheduler, |
| | DDIMScheduler, |
| | PNDMScheduler |
| | ) |
| | |
| | model_id = "OpenTrouter/Trouter-Imagine-1" |
| | pipe = StableDiffusionPipeline.from_pretrained( |
| | model_id, |
| | torch_dtype=torch.float16 |
| | ).to("cuda") |
| | |
| | prompt = "ancient temple in jungle, overgrown with vines, mystical atmosphere" |
| | |
| | schedulers = { |
| | "DPM": DPMSolverMultistepScheduler, |
| | "Euler": EulerAncestralDiscreteScheduler, |
| | "DDIM": DDIMScheduler, |
| | "PNDM": PNDMScheduler |
| | } |
| | |
| | |
| | seed = 42 |
| | |
| | for name, scheduler_class in schedulers.items(): |
| | print(f"Testing {name} scheduler...") |
| | |
| | pipe.scheduler = scheduler_class.from_config(pipe.scheduler.config) |
| | generator = torch.Generator("cuda").manual_seed(seed) |
| | |
| | start_time = time.time() |
| | image = pipe( |
| | prompt, |
| | generator=generator, |
| | num_inference_steps=30, |
| | guidance_scale=7.5 |
| | ).images[0] |
| | elapsed = time.time() - start_time |
| | |
| | image.save(f"example7_scheduler_{name}.png") |
| | print(f" ✓ {name} completed in {elapsed:.2f}s") |
| | |
| | print("✓ Scheduler comparison complete") |
| |
|
| |
|
| | |
| | |
| | |
| |
|
| | def example_memory_optimization(): |
| | """ |
| | Demonstrates memory optimization techniques for limited VRAM |
| | Useful for running on consumer GPUs |
| | """ |
| | print("\n" + "="*70) |
| | print("EXAMPLE 8: Memory-Optimized Generation") |
| | print("="*70) |
| | |
| | model_id = "OpenTrouter/Trouter-Imagine-1" |
| | pipe = StableDiffusionPipeline.from_pretrained( |
| | model_id, |
| | torch_dtype=torch.float16 |
| | ).to("cuda") |
| | |
| | |
| | print("Enabling memory optimizations...") |
| | pipe.enable_attention_slicing() |
| | pipe.enable_vae_slicing() |
| | |
| | |
| | |
| | |
| | |
| | try: |
| | pipe.enable_xformers_memory_efficient_attention() |
| | print(" ✓ xformers enabled") |
| | except: |
| | print(" ℹ xformers not available") |
| | |
| | prompt = "detailed cityscape at sunset, skyscrapers, urban photography" |
| | |
| | |
| | print("Generating 1024x1024 image with optimizations...") |
| | image = pipe( |
| | prompt, |
| | width=1024, |
| | height=1024, |
| | num_inference_steps=30, |
| | guidance_scale=7.5 |
| | ).images[0] |
| | |
| | image.save("example8_optimized_1024.png") |
| | print("✓ High-resolution generation with optimizations complete") |
| |
|
| |
|
| | |
| | |
| | |
| |
|
| | def example_automated_prompts(): |
| | """ |
| | Automatically generate and test multiple prompt combinations |
| | Useful for finding optimal prompt formulations |
| | """ |
| | print("\n" + "="*70) |
| | print("EXAMPLE 9: Automated Prompt Generation") |
| | print("="*70) |
| | |
| | model_id = "OpenTrouter/Trouter-Imagine-1" |
| | pipe = StableDiffusionPipeline.from_pretrained( |
| | model_id, |
| | torch_dtype=torch.float16 |
| | ).to("cuda") |
| | |
| | |
| | subjects = ["a dragon", "a spaceship", "a castle"] |
| | settings = ["in space", "on a mountain", "underwater"] |
| | styles = ["cyberpunk style", "fantasy art", "photorealistic"] |
| | qualities = ["highly detailed", "4k", "masterpiece"] |
| | |
| | print("Generating combinations...") |
| | output_dir = Path("example9_automated") |
| | output_dir.mkdir(exist_ok=True) |
| | |
| | for i, subject in enumerate(subjects): |
| | for j, setting in enumerate(settings): |
| | style = random.choice(styles) |
| | quality = random.choice(qualities) |
| | |
| | prompt = f"{subject} {setting}, {style}, {quality}" |
| | print(f" Generating: {prompt[:60]}...") |
| | |
| | image = pipe( |
| | prompt, |
| | num_inference_steps=25, |
| | guidance_scale=7.5 |
| | ).images[0] |
| | |
| | filename = output_dir / f"combo_{i}_{j}.png" |
| | image.save(filename) |
| | |
| | print(f"✓ Generated {len(subjects) * len(settings)} combinations") |
| |
|
| |
|
| | |
| | |
| | |
| |
|
| | def example_image_series(): |
| | """ |
| | Generate a series of related images telling a story |
| | Demonstrates consistency in sequential generation |
| | """ |
| | print("\n" + "="*70) |
| | print("EXAMPLE 10: Image Series (Storytelling)") |
| | print("="*70) |
| | |
| | model_id = "OpenTrouter/Trouter-Imagine-1" |
| | pipe = StableDiffusionPipeline.from_pretrained( |
| | model_id, |
| | torch_dtype=torch.float16 |
| | ).to("cuda") |
| | |
| | |
| | story_prompts = [ |
| | "a young wizard finding a mysterious glowing orb in a cave, fantasy art, dramatic lighting", |
| | "the wizard holding the glowing orb as magic energy swirls around him, fantasy art, detailed", |
| | "the wizard casting a powerful spell with the orb, energy beams, magical effects, fantasy art", |
| | "the wizard standing victorious as the orb floats above his hand, epic scene, fantasy art" |
| | ] |
| | |
| | output_dir = Path("example10_story_series") |
| | output_dir.mkdir(exist_ok=True) |
| | |
| | print(f"Generating {len(story_prompts)}-part story sequence...") |
| | |
| | for i, prompt in enumerate(story_prompts, 1): |
| | print(f" Scene {i}/{len(story_prompts)}: {prompt[:50]}...") |
| | |
| | image = pipe( |
| | prompt, |
| | num_inference_steps=35, |
| | guidance_scale=8.0 |
| | ).images[0] |
| | |
| | |
| | draw = ImageDraw.Draw(image) |
| | try: |
| | |
| | font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf", 40) |
| | except: |
| | font = ImageFont.load_default() |
| | |
| | draw.text((20, 20), f"Scene {i}", fill="white", font=font) |
| | |
| | filename = output_dir / f"scene_{i:02d}.png" |
| | image.save(filename) |
| | |
| | print("✓ Story series generation complete") |
| |
|
| |
|
| | |
| | |
| | |
| |
|
| | def example_quality_speed_benchmark(): |
| | """ |
| | Benchmark different quality settings and their generation times |
| | Helps users choose optimal settings for their use case |
| | """ |
| | print("\n" + "="*70) |
| | print("EXAMPLE 11: Quality vs Speed Benchmark") |
| | print("="*70) |
| | |
| | model_id = "OpenTrouter/Trouter-Imagine-1" |
| | pipe = StableDiffusionPipeline.from_pretrained( |
| | model_id, |
| | torch_dtype=torch.float16 |
| | ).to("cuda") |
| | |
| | pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) |
| | |
| | prompt = "detailed portrait of a knight in armor, medieval, dramatic lighting" |
| | |
| | presets = { |
| | "draft": {"steps": 15, "resolution": 512, "guidance": 6.0}, |
| | "balanced": {"steps": 25, "resolution": 512, "guidance": 7.5}, |
| | "quality": {"steps": 40, "resolution": 768, "guidance": 8.0}, |
| | "maximum": {"steps": 50, "resolution": 1024, "guidance": 9.0} |
| | } |
| | |
| | results = {} |
| | |
| | for preset_name, settings in presets.items(): |
| | print(f"\nTesting {preset_name} preset:") |
| | print(f" Resolution: {settings['resolution']}x{settings['resolution']}") |
| | print(f" Steps: {settings['steps']}") |
| | print(f" Guidance: {settings['guidance']}") |
| | |
| | start_time = time.time() |
| | |
| | image = pipe( |
| | prompt, |
| | width=settings['resolution'], |
| | height=settings['resolution'], |
| | num_inference_steps=settings['steps'], |
| | guidance_scale=settings['guidance'] |
| | ).images[0] |
| | |
| | elapsed = time.time() - start_time |
| | results[preset_name] = elapsed |
| | |
| | image.save(f"example11_preset_{preset_name}.png") |
| | print(f" ✓ Generated in {elapsed:.2f}s") |
| | |
| | print("\n" + "="*70) |
| | print("BENCHMARK RESULTS:") |
| | print("="*70) |
| | for preset, time_taken in results.items(): |
| | print(f"{preset:>12}: {time_taken:>6.2f}s") |
| | |
| | print("✓ Quality vs speed benchmark complete") |
| |
|
| |
|
| | |
| | |
| | |
| |
|
| | def example_product_photography(): |
| | """ |
| | Generate professional product photography shots |
| | Demonstrates commercial use case |
| | """ |
| | print("\n" + "="*70) |
| | print("EXAMPLE 12: Professional Product Photography") |
| | print("="*70) |
| | |
| | model_id = "OpenTrouter/Trouter-Imagine-1" |
| | pipe = StableDiffusionPipeline.from_pretrained( |
| | model_id, |
| | torch_dtype=torch.float16 |
| | ).to("cuda") |
| | |
| | products = [ |
| | "luxury watch with leather strap on marble surface", |
| | "modern smartphone with sleek design on white background", |
| | "artisanal coffee cup with latte art on wooden table", |
| | "designer sunglasses with reflection of sunset", |
| | "premium headphones with soft studio lighting" |
| | ] |
| | |
| | base_prompt_additions = "professional product photography, commercial, high-end, 4k, studio lighting, detailed" |
| | negative_prompt = "low quality, blurry, amateur, cluttered, distorted, watermark" |
| | |
| | output_dir = Path("example12_product_photos") |
| | output_dir.mkdir(exist_ok=True) |
| | |
| | print("Generating professional product photos...") |
| | |
| | for i, product in enumerate(products, 1): |
| | full_prompt = f"{product}, {base_prompt_additions}" |
| | print(f" Product {i}/{len(products)}: {product}") |
| | |
| | image = pipe( |
| | prompt=full_prompt, |
| | negative_prompt=negative_prompt, |
| | width=768, |
| | height=768, |
| | num_inference_steps=40, |
| | guidance_scale=8.5 |
| | ).images[0] |
| | |
| | filename = output_dir / f"product_{i:02d}.png" |
| | image.save(filename) |
| | |
| | print("✓ Professional product photography complete") |
| |
|
| |
|
| | |
| | |
| | |
| |
|
| | def example_image_grid(): |
| | """ |
| | Create comparison grids showing different parameters |
| | Useful for presentations and documentation |
| | """ |
| | print("\n" + "="*70) |
| | print("EXAMPLE 13: Image Grid Comparison") |
| | print("="*70) |
| | |
| | model_id = "OpenTrouter/Trouter-Imagine-1" |
| | pipe = StableDiffusionPipeline.from_pretrained( |
| | model_id, |
| | torch_dtype=torch.float16 |
| | ).to("cuda") |
| | |
| | prompt = "a red sports car on mountain road, sunset" |
| | guidance_scales = [5.0, 7.5, 10.0, 12.5] |
| | |
| | print("Generating images for grid...") |
| | images = [] |
| | |
| | for guidance in guidance_scales: |
| | print(f" Guidance scale: {guidance}") |
| | image = pipe( |
| | prompt, |
| | guidance_scale=guidance, |
| | num_inference_steps=30, |
| | width=512, |
| | height=512 |
| | ).images[0] |
| | |
| | |
| | draw = ImageDraw.Draw(image) |
| | draw.text((10, 10), f"Guidance: {guidance}", fill="white") |
| | images.append(image) |
| | |
| | |
| | grid = Image.new('RGB', (1024, 1024)) |
| | for i, img in enumerate(images): |
| | x = (i % 2) * 512 |
| | y = (i // 2) * 512 |
| | grid.paste(img, (x, y)) |
| | |
| | grid.save("example13_comparison_grid.png") |
| | print("✓ Comparison grid created") |
| |
|
| |
|
| | |
| | |
| | |
| |
|
| | def example_json_batch_processing(): |
| | """ |
| | Process multiple generations from a JSON configuration file |
| | Useful for automated workflows and reproducible results |
| | """ |
| | print("\n" + "="*70) |
| | print("EXAMPLE 14: JSON Batch Processing") |
| | print("="*70) |
| | |
| | |
| | config = { |
| | "model_id": "OpenTrouter/Trouter-Imagine-1", |
| | "output_dir": "example14_json_batch", |
| | "default_params": { |
| | "num_inference_steps": 30, |
| | "guidance_scale": 7.5, |
| | "width": 512, |
| | "height": 512 |
| | }, |
| | "generations": [ |
| | { |
| | "prompt": "sunset over ocean, peaceful scene", |
| | "negative_prompt": "stormy, dark, gloomy", |
| | "filename": "peaceful_sunset.png" |
| | }, |
| | { |
| | "prompt": "cyberpunk alley with neon signs", |
| | "negative_prompt": "daytime, bright, clean", |
| | "guidance_scale": 8.5, |
| | "filename": "cyberpunk_alley.png" |
| | }, |
| | { |
| | "prompt": "fantasy castle on floating island", |
| | "negative_prompt": "modern, realistic", |
| | "width": 768, |
| | "height": 768, |
| | "num_inference_steps": 40, |
| | "filename": "floating_castle.png" |
| | } |
| | ] |
| | } |
| | |
| | |
| | config_path = "example14_config.json" |
| | with open(config_path, 'w') as f: |
| | json.dump(config, indent=2, fp=f) |
| | print(f"Config saved to {config_path}") |
| | |
| | |
| | model_id = config["model_id"] |
| | pipe = StableDiffusionPipeline.from_pretrained( |
| | model_id, |
| | torch_dtype=torch.float16 |
| | ).to("cuda") |
| | |
| | output_dir = Path(config["output_dir"]) |
| | output_dir.mkdir(exist_ok=True) |
| | |
| | default_params = config["default_params"] |
| | |
| | print(f"\nProcessing {len(config['generations'])} generations...") |
| | |
| | for i, gen_config in enumerate(config["generations"], 1): |
| | |
| | params = {**default_params, **gen_config} |
| | |
| | prompt = params.pop("prompt") |
| | filename = params.pop("filename") |
| | negative_prompt = params.pop("negative_prompt", "") |
| | |
| | print(f" {i}/{len(config['generations'])}: {filename}") |
| | |
| | image = pipe( |
| | prompt=prompt, |
| | negative_prompt=negative_prompt, |
| | **params |
| | ).images[0] |
| | |
| | image.save(output_dir / filename) |
| | |
| | print("✓ JSON batch processing complete") |
| |
|
| |
|
| | |
| | |
| | |
| |
|
| | def example_reproducible_research(): |
| | """ |
| | Demonstrates best practices for reproducible research |
| | Includes logging, seed management, and metadata storage |
| | """ |
| | print("\n" + "="*70) |
| | print("EXAMPLE 15: Reproducible Research Workflow") |
| | print("="*70) |
| | |
| | import hashlib |
| | from datetime import datetime |
| | |
| | model_id = "OpenTrouter/Trouter-Imagine-1" |
| | pipe = StableDiffusionPipeline.from_pretrained( |
| | model_id, |
| | torch_dtype=torch.float16 |
| | ).to("cuda") |
| | |
| | output_dir = Path("example15_research") |
| | output_dir.mkdir(exist_ok=True) |
| | |
| | |
| | experiment = { |
| | "experiment_id": hashlib.md5(str(datetime.now()).encode()).hexdigest()[:8], |
| | "timestamp": datetime.now().isoformat(), |
| | "model": model_id, |
| | "hypothesis": "Testing effect of guidance scale on image fidelity", |
| | "prompt": "a scientist in a laboratory, professional photography", |
| | "negative_prompt": "blurry, low quality, distorted", |
| | "fixed_seed": 12345, |
| | "variable_parameter": "guidance_scale", |
| | "test_values": [5.0, 7.5, 10.0, 12.5, 15.0], |
| | "fixed_parameters": { |
| | "width": 512, |
| | "height": 512, |
| | "num_inference_steps": 35 |
| | } |
| | } |
| | |
| | |
| | config_file = output_dir / f"experiment_{experiment['experiment_id']}.json" |
| | with open(config_file, 'w') as f: |
| | json.dump(experiment, indent=2, fp=f) |
| | |
| | print(f"Experiment ID: {experiment['experiment_id']}") |
| | print(f"Testing: {experiment['hypothesis']}") |
| | |
| | |
| | results = [] |
| | |
| | for value in experiment['test_values']: |
| | print(f"\n Testing {experiment['variable_parameter']} = {value}") |
| | |
| | generator = torch.Generator("cuda").manual_seed(experiment['fixed_seed']) |
| | |
| | start_time = time.time() |
| | |
| | image = pipe( |
| | prompt=experiment['prompt'], |
| | negative_prompt=experiment['negative_prompt'], |
| | guidance_scale=value, |
| | generator=generator, |
| | **experiment['fixed_parameters'] |
| | ).images[0] |
| | |
| | generation_time = time.time() - start_time |
| | |
| | |
| | filename = f"{experiment['experiment_id']}_guidance_{value}.png" |
| | image.save(output_dir / filename) |
| | |
| | result = { |
| | "parameter_value": value, |
| | "filename": filename, |
| | "generation_time": generation_time, |
| | "seed_used": experiment['fixed_seed'] |
| | } |
| | results.append(result) |
| | |
| | print(f" Generated in {generation_time:.2f}s") |
| | |
| | |
| | experiment['results'] = results |
| | with open(config_file, 'w') as f: |
| | json.dump(experiment, indent=2, fp=f) |
| | |
| | print(f"\n✓ Experiment complete. Results saved to {config_file}") |
| |
|
| |
|
| | |
| | |
| | |
| |
|
| | def run_all_examples(): |
| | """Run all examples (warning: this will take a long time!)""" |
| | examples = [ |
| | ("Basic Generation", example_basic_generation), |
| | ("Negative Prompts", example_negative_prompts), |
| | ("Parameter Exploration", example_parameter_exploration), |
| | ("Multi-Resolution", example_multi_resolution), |
| | ("Seed Variations", example_seed_variations), |
| | ("Style Comparison", example_style_comparison), |
| | ("Scheduler Comparison", example_scheduler_comparison), |
| | ("Memory Optimization", example_memory_optimization), |
| | ("Automated Prompts", example_automated_prompts), |
| | ("Image Series", example_image_series), |
| | ("Quality/Speed Benchmark", example_quality_speed_benchmark), |
| | ("Product Photography", example_product_photography), |
| | ("Image Grid", example_image_grid), |
| | ("JSON Batch Processing", example_json_batch_processing), |
| | ("Reproducible Research", example_reproducible_research) |
| | ] |
| | |
| | print("\n" + "="*70) |
| | print("TROUTER-IMAGINE-1 COMPREHENSIVE EXAMPLES") |
| | print("="*70) |
| | print(f"\nTotal examples: {len(examples)}") |
| | print("Warning: Running all examples will take considerable time and GPU resources") |
| | print("="*70) |
| | |
| | for i, (name, func) in enumerate(examples, 1): |
| | try: |
| | print(f"\n[{i}/{len(examples)}] Running: {name}") |
| | func() |
| | except Exception as e: |
| | print(f"ERROR in {name}: {e}") |
| | continue |
| | |
| | print("\n" + "="*70) |
| | print("ALL EXAMPLES COMPLETED") |
| | print("="*70) |
| |
|
| |
|
| | if __name__ == "__main__": |
| | import sys |
| | |
| | if len(sys.argv) > 1: |
| | example_num = sys.argv[1] |
| | |
| | examples_map = { |
| | "1": example_basic_generation, |
| | "2": example_negative_prompts, |
| | "3": example_parameter_exploration, |
| | "4": example_multi_resolution, |
| | "5": example_seed_variations, |
| | "6": example_style_comparison, |
| | "7": example_scheduler_comparison, |
| | "8": example_memory_optimization, |
| | "9": example_automated_prompts, |
| | "10": example_image_series, |
| | "11": example_quality_speed_benchmark, |
| | "12": example_product_photography, |
| | "13": example_image_grid, |
| | "14": example_json_batch_processing, |
| | "15": example_reproducible_research, |
| | "all": run_all_examples |
| | } |
| | |
| | if example_num in examples_map: |
| | examples_map[example_num]() |
| | else: |
| | print(f"Unknown example: {example_num}") |
| | print("Available examples: 1-15, all") |
| | else: |
| | print("\nUsage: python examples.py <example_number>") |
| | print("\nAvailable examples:") |
| | print(" 1 - Basic Generation") |
| | print(" 2 - Negative Prompts") |
| | print(" 3 - Parameter Exploration") |
| | print(" 4 - Multi-Resolution") |
| | print(" 5 - Seed Variations") |
| | print(" 6 - Style Comparison") |
| | print(" 7 - Scheduler Comparison") |
| | print(" 8 - Memory Optimization") |
| | print(" 9 - Automated Prompts") |
| | print(" 10 - Image Series (Storytelling)") |
| | print(" 11 - Quality vs Speed Benchmark") |
| | print(" 12 - Professional Product Photography") |
| | print(" 13 - Image Grid Comparison") |
| | print(" 14 - JSON Batch Processing") |
| | print(" 15 - Reproducible Research Workflow") |
| | print(" all - Run all examples (takes a long time!)") |
| | print("\nExample: python examples.py 1") |