File size: 5,115 Bytes
57a7965
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
import os
import json
import cv2
from datasets import Dataset, DatasetDict, Features, Value, Array2D, Array3D, Sequence
from pathlib import Path
import numpy as np

# Define constants
VIDEO_EXTENSIONS = ['.avi']
JSON_EXTENSIONS = ['.json']
KEYPOINTS = [
    "nose", "left_eye", "right_eye", "left_ear", "right_ear", "left_shoulder", "right_shoulder",
    "left_elbow", "right_elbow", "left_wrist", "right_wrist", "left_hip", "right_hip", 
    "left_knee", "right_knee", "left_ankle", "right_ankle"
]

def load_video(video_path):
    """Reads a video file and returns a list of frames as NumPy arrays."""
    cap = cv2.VideoCapture(video_path)
    frames = []
    
    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break
        frames.append(frame)
    
    cap.release()
    return np.array(frames)

def load_json(json_path):
    """Loads the JSON keypoint data for each frame."""
    with open(json_path, 'r') as f:
        data = json.load(f)
    return data

def process_frame_data(frame_data):
    """Converts the frame's keypoints into a structured format."""
    detections = []
    
    # Check if 'data' exists in the frame_data
    if 'detections' in frame_data:
        for detection in frame_data['detections']:  # Now using 'data' instead of 'detections'
            if detection:  # Check if there's any valid detection data
                person = {
                    "confidence": detection.get("confidence", 0),
                    "box": detection.get("box", {}),
                    "keypoints": {
                        keypoint['label']: keypoint['coordinates']
                        for keypoint in detection.get('keypoints', [])
                    }
                }
                detections.append(person)
            else:
                print(f"Warning: Empty detection in frame {frame_data['frame_index']}")
    else:
        # Handle the case where 'data' is missing in the frame data
        print(f"Warning: 'data' key missing in frame data: {frame_data}")
    
    return detections



def get_file_paths(base_path, split="train"):
    """Returns video and JSON file paths."""
    video_paths = []
    json_paths = []
    split_path = os.path.join(base_path, split)
    
    for label in ['Fight', 'NonFight']:
        label_path = os.path.join(split_path, label)
        for video_folder in os.listdir(label_path):
            video_folder_path = os.path.join(label_path, video_folder)
            video_file = next((f for f in os.listdir(video_folder_path) if any(f.endswith(ext) for ext in VIDEO_EXTENSIONS)), None)
            json_file = next((f for f in os.listdir(video_folder_path) if any(f.endswith(ext) for ext in JSON_EXTENSIONS)), None)
            
            if video_file and json_file:
                video_paths.append(os.path.join(video_folder_path, video_file))
                json_paths.append(os.path.join(video_folder_path, json_file))
    
    return video_paths, json_paths

def load_data(base_path, split="train"):
    """Loads and processes the data for a given split (train or val)."""
    video_paths, json_paths = get_file_paths(base_path, split)
    dataset = []
    
    for video_path, json_path in zip(video_paths, json_paths):
        # Load video frames
        frames = load_video(video_path)
        
        # Load JSON keypoints
        keypoints_data = load_json(json_path)
        
        # Process the data
        frame_data = [process_frame_data(frame) for frame in keypoints_data]
        
        # Construct the data record
        dataset.append({
            'video': frames,
            'keypoints': frame_data,
            'video_path': video_path,
            'json_path': json_path
        })
    
    return dataset

def main():
    # Path to the dataset directory
    dataset_dir = '.'  # Replace with your actual dataset path
    
    # Load training and validation data
    train_data = load_data(dataset_dir, split="train")
    val_data = load_data(dataset_dir, split="val")
    
    # Convert to Hugging Face Dataset
    train_features = Features({
        'video': Array3D(dtype='int32', shape=(None, None, None)),  # None indicates variable sizes
        'keypoints': Sequence(Features({
            'person_id': Value('int32'),
            'confidence': Value('float32'),
            'box': {
                'x1': Value('float32'),
                'y1': Value('float32'),
                'x2': Value('float32'),
                'y2': Value('float32')
            },
            'keypoints': {key: Array2D(dtype='float32', shape=(2,)) for key in KEYPOINTS}
        })),
        'video_path': Value('string'),
        'json_path': Value('string')
    })
    
    # Create DatasetDict
    dataset_dict = DatasetDict({
        'train': Dataset.from_dict(train_data, features=train_features),
        'val': Dataset.from_dict(val_data, features=train_features)
    })
    
    # Save or push dataset to Hugging Face
    dataset_dict.save_to_disk("keypoints_keyger")
    # Or to upload: dataset_dict.push_to_hub("your_dataset_name")

if __name__ == "__main__":
    main()