import { Callout } from "nextra/components";
import VideoEmbed from "../../components/video-embed";

# Mask Generation

> Generate precise pixel-level masks for objects

<VideoEmbed
  src="https://geobase-docs.s3.amazonaws.com/geobase-ai-assets/mask-generation.mp4"
  title="Mask Generation Demo"
/>

<div style={{ textAlign: 'center', marginTop: '1rem' }}>
  <a 
    href="https://docs.geobase.app/geoai-live/tasks/mask-generation" 
    target="_blank" 
    rel="noopener noreferrer"
    style={{ 
      display: 'inline-flex', 
      alignItems: 'center', 
      gap: '0.5rem',
      padding: '0.5rem 1rem',
      backgroundColor: '#3b82f6',
      color: 'white',
      textDecoration: 'none',
      borderRadius: '0.375rem',
      fontWeight: '500',
      transition: 'background-color 0.2s'
    }}
    className="live-demo-button"
  >
    🚀 Try Live Demo
  </a>
</div>

<style jsx>{`
  .live-demo-button:hover {
    background-color: #2563eb !important;
  }
`}</style>

<div style={{ textAlign: 'center', marginTop: '0.5rem' }}>
  <a 
    href="https://huggingface.co/Xenova/slimsam-77-uniform" 
    target="_blank" 
    rel="noopener noreferrer"
    style={{ 
      fontSize: '0.875rem',
      color: '#6b7280',
      textDecoration: 'none'
    }}
  >
    🤗 View Model on Hugging Face
  </a>
</div>

## Quick Start

```typescript
import { geoai } from "geoai";

// Initialize pipeline
const pipeline = await geoai.pipeline(
  [{ task: "mask-generation" }],
  providerParams
);

// Run with point input
const result = await pipeline.inference({
  inputs: {
    polygon: myPolygon,
    input: {
      type: "points",
      coordinates: [longitude, latitude],
    },
  },
  postProcessingParams: { maxMasks: 3 },
});

console.log(`Generated ${result.masks.features.length} masks`);
```

<Callout type="info" emoji="🎯">
  Generate precise object boundaries using point clicks, bounding boxes, or
  chained from object detection
</Callout>

## Input Types

### Point Input

```typescript
input: {
  type: "points",
  coordinates: [longitude, latitude]  // Click on object center
}
```

### Box Input

```typescript
input: {
  type: "boxes",
  coordinates: [minLng, minLat, maxLng, maxLat]  // Bounding box around object
}
```

### Post-Processing

```typescript
postProcessingParams: {
  maxMasks: 1; // Maximum number of masks
}
```

## Chained Pipeline

```typescript
// Use with object detection pipeline
const pipeline = await geoai.pipeline(
  [
    { task: "object-detection" },
    {
      task: "mask-generation",
      modelId: "Xenova/slimsam-77-uniform",
      modelParams: { revision: "boxes" },
    },
  ],
  providerParams
);
```

<Callout type="default" emoji="💡">
  **Point prompts** work best when clicking object centers **Box prompts** need
  the "boxes" model revision for box inputs
</Callout>

## Parameters

| Parameter  | Type                       | Description                           |
| ---------- | -------------------------- | ------------------------------------- |
| `polygon`  | `GeoJSON.Feature<Polygon>` | Area of interest                      |
| `input`    | `SegmentationInput`        | Point, box, or detection results      |
| `maxMasks` | `number`                   | Maximum masks per prompt (default: 1) |

## Output

```typescript
{
  detection: GeoJSON.FeatureCollection,  // Generated mask polygons
  geoRawImage: GeoRawImage           // Source imagery metadata
}
```
