File size: 2,365 Bytes
d67ed13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import json
import os
from datasets import Dataset, DatasetDict, Image, Features, Value, Sequence

def create_split_dataset(coco_dir, split):
    """Create a dataset for a single split with exact annotation format"""
    with open(os.path.join(coco_dir, f"{split}.json")) as f:
        coco_data = json.load(f)

    # Create annotation map with full COCO fields
    ann_map = {img['id']: [] for img in coco_data['images']}
    for ann in coco_data['annotations']:
        ann_map[ann['image_id']].append({
            'id': ann['id'],
            'category_id': 0,
            'bbox': [float(x) for x in ann['bbox']],  # Ensure float values
            'area': float(ann['area']),
            'iscrowd': int(ann.get('iscrowd', 0))
        })

    # Build dataset entries with exact format
    dataset = []
    for img in coco_data['images']:
        dataset.append({
            'image_id': int(img['id']),
            'image': {'path': os.path.join(coco_dir, img['file_name'])},
            'annotations': ann_map[img['id']]  # List of annotation dicts
        })

    # Define features schema
    features = Features({
        'image_id': Value('int64'),
        'image': Image(),
        'annotations': [{
            'id': Value('int64'),
            'category_id': Value('int64'),
            'bbox': [Value('float32')],  # List of floats for bbox
            'area': Value('float32'),
            'iscrowd': Value('int64')
        }]
    })
    
    return Dataset.from_list(dataset, features=features)

# Configuration
coco_dir = "8_calves_coco"
debug_limits = {"train": 50, "val": 20, "test": 10}
seed = 42

# Initialize containers
full_dataset = DatasetDict()
debug_dataset = DatasetDict()

# Process splits
for split in ["train", "val", "test"]:
    # Create full split dataset
    full_split = create_split_dataset(coco_dir, split)
    full_dataset[split] = full_split
    
    # Create debug version with random samples
    debug_split = full_split.shuffle(seed=seed).select(range(debug_limits[split]))
    debug_dataset[split] = debug_split

# Save debug first
# debug_dataset.save_to_disk("cache_debug")
# print(f"✅ Debug cache saved with {sum(len(d) for d in debug_dataset.values())} samples")

# Save full dataset
full_dataset.save_to_disk("8_calves_arrow")
print(f"✅ Full cache saved with {sum(len(d) for d in full_dataset.values())} samples")