shape-polygons / example.py
kimura-koya's picture
Add files using upload-large-folder tool
a0baaca verified
"""
Shape Polygons Dataset - Usage Examples
This script demonstrates various ways to load and use the Shape Polygons Dataset.
"""
import os
import pandas as pd
import matplotlib.pyplot as plt
from PIL import Image
# =============================================================================
# Basic Usage: Loading Metadata and Images
# =============================================================================
def load_dataset(data_dir=".", split="train"):
"""Load metadata and return as pandas DataFrame."""
metadata_path = os.path.join(data_dir, split, "metadata.csv")
return pd.read_csv(metadata_path)
def load_image(data_dir, split, filename):
"""Load a single image from the dataset."""
img_path = os.path.join(data_dir, split, "images", filename)
return Image.open(img_path)
# =============================================================================
# Example 1: Explore Dataset Statistics
# =============================================================================
def explore_statistics(data_dir="."):
"""Print dataset statistics."""
print("=" * 50)
print("Shape Polygons Dataset Statistics")
print("=" * 50)
for split in ["train", "test"]:
df = load_dataset(data_dir, split)
print(f"\n{split.upper()} Split:")
print(f" Total images: {len(df)}")
print(f"\n Vertices distribution:")
for v in range(3, 9):
count = len(df[df["vertices"] == v])
print(f" {v} vertices: {count} ({count/len(df)*100:.1f}%)")
print(f"\n Size statistics:")
print(f" Min: {df['size'].min():.4f}")
print(f" Max: {df['size'].max():.4f}")
print(f" Mean: {df['size'].mean():.4f}")
# =============================================================================
# Example 2: Visualize Sample Images
# =============================================================================
def visualize_samples(data_dir=".", n_samples=12, split="train"):
"""Visualize random samples from the dataset."""
df = load_dataset(data_dir, split)
samples = df.sample(n=min(n_samples, len(df)))
n_cols = 4
n_rows = (len(samples) + n_cols - 1) // n_cols
fig, axes = plt.subplots(n_rows, n_cols, figsize=(12, 3 * n_rows))
axes = axes.flatten() if n_samples > 1 else [axes]
for idx, (_, row) in enumerate(samples.iterrows()):
img = load_image(data_dir, split, row["filename"])
axes[idx].imshow(img)
axes[idx].set_title(f"{row['vertices']} vertices\nsize={row['size']:.2f}")
axes[idx].axis("off")
# Hide empty subplots
for idx in range(len(samples), len(axes)):
axes[idx].axis("off")
plt.tight_layout()
plt.savefig("samples_visualization.png", dpi=150, bbox_inches="tight")
print(f"Saved visualization to 'samples_visualization.png'")
plt.show()
# =============================================================================
# Example 3: Visualize by Shape Type
# =============================================================================
def visualize_by_shape_type(data_dir=".", split="train"):
"""Show one example of each shape type."""
df = load_dataset(data_dir, split)
shape_names = {
3: "Triangle",
4: "Quadrilateral",
5: "Pentagon",
6: "Hexagon",
7: "Heptagon",
8: "Octagon"
}
fig, axes = plt.subplots(2, 3, figsize=(12, 8))
axes = axes.flatten()
for idx, vertices in enumerate(range(3, 9)):
sample = df[df["vertices"] == vertices].iloc[0]
img = load_image(data_dir, split, sample["filename"])
axes[idx].imshow(img)
axes[idx].set_title(f"{shape_names[vertices]}\n({vertices} vertices)")
axes[idx].axis("off")
plt.suptitle("Shape Types in Dataset", fontsize=14, fontweight="bold")
plt.tight_layout()
plt.savefig("shape_types.png", dpi=150, bbox_inches="tight")
print(f"Saved visualization to 'shape_types.png'")
plt.show()
# =============================================================================
# Example 4: PyTorch Dataset Class
# =============================================================================
# Optional imports for PyTorch functionality
try:
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
PYTORCH_AVAILABLE = True
except ImportError:
PYTORCH_AVAILABLE = False
class ShapePolygonsDataset:
"""PyTorch Dataset for Shape Polygons.
Requires: torch, torchvision
"""
def __init__(self, root_dir, split="train", transform=None, task="classification"):
"""
Args:
root_dir: Root directory of the dataset
split: "train" or "test"
transform: Optional torchvision transforms
task: "classification" for vertex count, "regression" for size prediction, "multi" for all properties
"""
if not PYTORCH_AVAILABLE:
raise ImportError("PyTorch is required. Install with: pip install torch torchvision")
self.root_dir = root_dir
self.split = split
self.transform = transform
self.task = task
self.metadata = pd.read_csv(os.path.join(root_dir, split, "metadata.csv"))
if self.transform is None:
self.transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])
def __len__(self):
return len(self.metadata)
def __getitem__(self, idx):
row = self.metadata.iloc[idx]
img_path = os.path.join(self.root_dir, self.split, "images", row["filename"])
image = Image.open(img_path).convert("RGB")
if self.transform:
image = self.transform(image)
if self.task == "classification":
# Class label: 0-5 for 3-8 vertices
label = torch.tensor(row["vertices"] - 3, dtype=torch.long)
elif self.task == "regression":
# Predict size
label = torch.tensor(row["size"], dtype=torch.float32)
elif self.task == "multi":
# Multi-task: return all properties
label = {
"vertices": torch.tensor(row["vertices"] - 3, dtype=torch.long),
"size": torch.tensor(row["size"], dtype=torch.float32),
"angle": torch.tensor(row["angle"], dtype=torch.float32),
"center": torch.tensor([row["center_x"], row["center_y"]], dtype=torch.float32),
"color": torch.tensor([row["color_r"], row["color_g"], row["color_b"]], dtype=torch.float32)
}
else:
raise ValueError(f"Unknown task: {self.task}")
return image, label
def demo_pytorch_dataloader(data_dir="."):
"""Demonstrate PyTorch DataLoader usage."""
if not PYTORCH_AVAILABLE:
print("PyTorch is not installed. Install with: pip install torch torchvision")
return
print("Creating PyTorch Dataset and DataLoader...")
dataset = ShapePolygonsDataset(data_dir, split="train", task="classification")
dataloader = DataLoader(dataset, batch_size=32, shuffle=True, num_workers=0)
# Get one batch
images, labels = next(iter(dataloader))
print(f"Batch shape: {images.shape}")
print(f"Labels shape: {labels.shape}")
print(f"Label values (vertices - 3): {labels[:10].tolist()}")
print(f"Actual vertex counts: {[l + 3 for l in labels[:10].tolist()]}")
# =============================================================================
# Example 5: Color Analysis
# =============================================================================
def analyze_colors(data_dir=".", split="train"):
"""Analyze color distribution in the dataset."""
df = load_dataset(data_dir, split)
fig, axes = plt.subplots(1, 3, figsize=(15, 4))
colors = ["red", "green", "blue"]
columns = ["color_r", "color_g", "color_b"]
for idx, (color, col) in enumerate(zip(colors, columns)):
axes[idx].hist(df[col], bins=50, color=color, alpha=0.7, edgecolor="black")
axes[idx].set_xlabel(f"{color.capitalize()} Value")
axes[idx].set_ylabel("Frequency")
axes[idx].set_title(f"{color.capitalize()} Channel Distribution")
plt.suptitle(f"Color Distribution in {split.capitalize()} Set", fontsize=14, fontweight="bold")
plt.tight_layout()
plt.savefig("color_distribution.png", dpi=150, bbox_inches="tight")
print(f"Saved visualization to 'color_distribution.png'")
plt.show()
# =============================================================================
# Main
# =============================================================================
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Shape Polygons Dataset Examples")
parser.add_argument("--data-dir", type=str, default=".", help="Path to dataset root")
parser.add_argument(
"--example",
type=str,
choices=["stats", "samples", "shapes", "pytorch", "colors", "all"],
default="all",
help="Which example to run"
)
args = parser.parse_args()
examples = {
"stats": ("Dataset Statistics", lambda: explore_statistics(args.data_dir)),
"samples": ("Sample Visualization", lambda: visualize_samples(args.data_dir)),
"shapes": ("Shape Types", lambda: visualize_by_shape_type(args.data_dir)),
"pytorch": ("PyTorch DataLoader Demo", lambda: demo_pytorch_dataloader(args.data_dir)),
"colors": ("Color Analysis", lambda: analyze_colors(args.data_dir)),
}
if args.example == "all":
for name, (desc, func) in examples.items():
print(f"\n{'=' * 50}")
print(f"Example: {desc}")
print("=" * 50)
func()
else:
name = args.example
desc, func = examples[name]
print(f"Running Example: {desc}")
func()