File size: 3,021 Bytes
e36cc5b
 
 
 
 
 
 
071a09b
e36cc5b
f2cdd66
56bb16d
f2cdd66
 
 
071a09b
e36cc5b
971650f
071a09b
e36cc5b
971650f
e36cc5b
 
 
 
 
971650f
 
e36cc5b
 
f2cdd66
e36cc5b
f2cdd66
7596685
8305588
e36cc5b
625d75d
e36cc5b
971650f
8305588
a791d53
971650f
e36cc5b
 
 
 
 
 
f2cdd66
e36cc5b
625d75d
e36cc5b
 
071a09b
e36cc5b
 
071a09b
e36cc5b
 
 
 
 
 
 
 
 
 
071a09b
e36cc5b
 
071a09b
 
e36cc5b
 
071a09b
e36cc5b
071a09b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
import gradio as gr
from datasets import load_dataset
from PIL import Image, ImageDraw
import numpy as np

# Load the dataset
dataset = load_dataset("dwb2023/brain-tumor-image-dataset-semantic-segmentation", split="test")
# print(f"Dataset loaded successfully. Number of images: {len(dataset)}")

CATEGORY_COLORS = {
    1: "blue",
    2: "green"
}

def draw_annotations(index):
    try:
        # Fetch the image and annotations from the dataset
        record = dataset[index]
        
        # Convert image to PIL Image if it's a numpy array
        if isinstance(record['image'], np.ndarray):
            img = Image.fromarray(record['image'])
        else:
            img = record['image']
        
        img = img.convert("RGB")  # Ensure the image is in RGB mode
        
        draw = ImageDraw.Draw(img)

        # Draw bounding box with color based on category
        bbox = record["bbox"]
        category_id = record["category_id"]
        box_color = CATEGORY_COLORS.get(category_id, "yellow")  # Default to yellow if category not in mapping
        draw.rectangle([bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]], outline=box_color, width=3)

        # Draw segmentation mask
        segmentation = record["segmentation"]
        for seg in segmentation:
            draw.polygon(seg, outline="red", width=1)

        # Prepare additional information
        area = record["area"]
        file_name = record["file_name"]

        info = f"File Name: {file_name}\n"
        info += f"Image ID: {record['id']}\n"
        info += f"Category ID: {category_id}\n"
        info += f"Bounding Box Color: {box_color}\n"
        info += f"Bounding Box: [{bbox[0]:.2f}, {bbox[1]:.2f}, {bbox[2]:.2f}, {bbox[3]:.2f}]\n"
        info += f"Segmentation: {segmentation}\n"
        info += f"Area: {area:.2f}"

        return img, info
    except Exception as e:
        print(f"Error processing image at index {index}: {e}")
        return Image.new('RGB', (300, 300), color='gray'), f"Error loading image information: {str(e)}"

# Create Gradio interface
with gr.Blocks() as demo:
    gr.Markdown("# Brain Tumor Image Dataset Viewer")
    gr.Markdown("## Refer to the [dwb2023/brain-tumor-image-dataset-semantic-segmentation](https://huggingface.co/datasets/dwb2023/brain-tumor-image-dataset-semantic-segmentation/viewer/default/test) dataset for more information")
    
    with gr.Row():
        with gr.Column(scale=1):
            image_output = gr.Image(label="Annotated Image")
        with gr.Column(scale=1):
            image_index = gr.Slider(minimum=0, maximum=len(dataset)-1, step=1, value=0, label="Image ID Slider")
            info_output = gr.Textbox(label="Image Information", lines=10)
    
    # Update image and info when slider changes
    image_index.change(draw_annotations, inputs=image_index, outputs=[image_output, info_output])

    # Display initial image and info
    demo.load(draw_annotations, inputs=image_index, outputs=[image_output, info_output])

demo.launch(debug=True)