release dataset
Browse files- LASO-C.tar.gz +3 -0
- PIAD-C.tar.gz +3 -0
- README.md +67 -0
- dataset.py +72 -0
- supp_benchmark_1.jpg +3 -0
- supp_benchmark_2.jpg +3 -0
LASO-C.tar.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3de34d3940f38b58e723b717581c9d1a6a24ccc3e3a88bfb5b8960b126323c5e
|
3 |
+
size 2528938849
|
PIAD-C.tar.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4a39c5adf3c24fdef21aec0043b407497acb45ab1f680e2d1f12ec43cba56ca5
|
3 |
+
size 2605080518
|
README.md
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<p align="center">
|
2 |
+
|
3 |
+
<h3 align="center"><strong>GEAL: Generalizable 3D Affordance Learning with Cross-Modal Consistency</strong></h3>
|
4 |
+
|
5 |
+
<p align="center">
|
6 |
+
<a href="https://dylanorange.github.io" target='_blank'>Dongyue Lu</a>
|
7 |
+
<a href="https://ldkong.com" target='_blank'>Lingdong Kong</a>
|
8 |
+
<a href="https://tianxinhuang.github.io/" target='_blank'>Tianxin Huang</a>
|
9 |
+
<a href="https://www.comp.nus.edu.sg/~leegh/">Gim Hee Lee</a>
|
10 |
+
</br>
|
11 |
+
National University of Singapore
|
12 |
+
</p>
|
13 |
+
|
14 |
+
</p>
|
15 |
+
|
16 |
+
<p align="center">
|
17 |
+
<a href="https://dylanorange.github.io/projects/geal/static/files/geal.pdf" target='_blank'>
|
18 |
+
<img src="https://img.shields.io/badge/Paper-%F0%9F%93%83-lightblue">
|
19 |
+
</a>
|
20 |
+
|
21 |
+
<a href="https://dylanorange.github.io/projects/geal" target='_blank'>
|
22 |
+
<img src="https://img.shields.io/badge/Project-%F0%9F%94%97-blue">
|
23 |
+
</a>
|
24 |
+
|
25 |
+
<a href="https://huggingface.co/datasets/dylanorange/geal" target="_blank">
|
26 |
+
<img src="https://img.shields.io/badge/Dataset-%20Hugging%20Face-yellow">
|
27 |
+
</a>
|
28 |
+
|
29 |
+
|
30 |
+
</p>
|
31 |
+
|
32 |
+
|
33 |
+
## About 🛠️
|
34 |
+
|
35 |
+
**GEAL** is a novel framework designed to enhance the generalization and robustness of 3D affordance learning by leveraging pre-trained 2D models.
|
36 |
+
|
37 |
+
To facilitate robust 3D affordance learning across diverse real-world scenarios, we establish two 3D affordance robustness benchmarks: **PIAD-C** and **LASO-C**, based on the test sets of the commonly used datasets PIAD and LASO. We apply seven types of corruptions:
|
38 |
+
|
39 |
+
- **Add Global**
|
40 |
+
- **Add Local**
|
41 |
+
- **Drop Global**
|
42 |
+
- **Drop Local**
|
43 |
+
- **Rotate**
|
44 |
+
- **Scale**
|
45 |
+
- **Jitter**
|
46 |
+
|
47 |
+
Each corruption is applied with five severity levels, resulting in a total of **4890 object-affordance pairings**, comprising **17 affordance categories** and **23 object categories** with **2047 distinct object shapes**.
|
48 |
+
|
49 |
+
|
50 |
+
<div style="text-align: center;">
|
51 |
+
<img src="supp_benchmark_1.jpg" alt="GEAL Performance GIF" style="max-width: 100%; height: auto; width: 1000px;">
|
52 |
+
<img src="supp_benchmark_2.jpg" alt="GEAL Performance GIF" style="max-width: 100%; height: auto; width: 1000px;">
|
53 |
+
</div>
|
54 |
+
|
55 |
+
## Updates 📰
|
56 |
+
|
57 |
+
- **[2024.12]** - We have released our **PIAD-C** and **LASO-C** datasets! 🎉📂
|
58 |
+
|
59 |
+
|
60 |
+
## Dataset and Code Release 🚀
|
61 |
+
|
62 |
+
We are excited to announce the release of our dataset and dataloader:
|
63 |
+
|
64 |
+
- **Dataset**: Available in the `PIAD-C` and `LASO-C` files 📜
|
65 |
+
- **Dataloader**: Available in the `dataset.py` file 📜
|
66 |
+
|
67 |
+
Stay tuned! Further evaluation code will be coming soon. 🔧✨
|
dataset.py
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import pandas as pd
|
3 |
+
import pickle
|
4 |
+
import numpy as np
|
5 |
+
from torch.utils.data import Dataset
|
6 |
+
|
7 |
+
CLASSES = ["Bag", "Bed", "Bowl","Clock", "Dishwasher", "Display", "Door", "Earphone", "Faucet",
|
8 |
+
"Hat", "StorageFurniture", "Keyboard", "Knife", "Laptop", "Microwave", "Mug",
|
9 |
+
"Refrigerator", "Chair", "Scissors", "Table", "TrashCan", "Vase", "Bottle"]
|
10 |
+
|
11 |
+
AFFORD_CL = ['lay','sit','support','grasp','lift','contain','open','wrap_grasp','pour',
|
12 |
+
'move','display','push','pull','listen','wear','press','cut','stab']
|
13 |
+
|
14 |
+
def pc_normalize(pc):
|
15 |
+
centroid = np.mean(pc, axis=0)
|
16 |
+
pc = pc - centroid
|
17 |
+
m = np.max(np.sqrt(np.sum(pc**2, axis=1)))
|
18 |
+
pc = pc / m
|
19 |
+
return pc, centroid, m
|
20 |
+
|
21 |
+
class Corrupt(Dataset):
|
22 |
+
|
23 |
+
def __init__(self,
|
24 |
+
corrupt_type='scale',
|
25 |
+
level=0
|
26 |
+
):
|
27 |
+
|
28 |
+
#replace with the path to the LASO-C/PIAD-C dataset
|
29 |
+
data_root='LASO-C'
|
30 |
+
|
31 |
+
file_name = f'{corrupt_type}_{level}.pkl'
|
32 |
+
|
33 |
+
self.corrupt_type = corrupt_type
|
34 |
+
self.level = level
|
35 |
+
|
36 |
+
self.cls2idx = {cls.lower():np.array(i).astype(np.int64) for i, cls in enumerate(CLASSES)}
|
37 |
+
self.aff2idx = {cls:np.array(i).astype(np.int64) for i, cls in enumerate(AFFORD_CL)}
|
38 |
+
|
39 |
+
with open(os.path.join(data_root, 'point', file_name), 'rb') as f:
|
40 |
+
self.anno = pickle.load(f)
|
41 |
+
|
42 |
+
self.question_df = pd.read_csv(os.path.join(data_root, 'text', 'Affordance-Question.csv'))
|
43 |
+
|
44 |
+
def find_rephrase(self, df, object_name, affordance):
|
45 |
+
|
46 |
+
qid = 'Question0'
|
47 |
+
result = df.loc[(df['Object'] == object_name) & (df['Affordance'] == affordance), [qid]]
|
48 |
+
if not result.empty:
|
49 |
+
return result.iloc[0][qid]
|
50 |
+
else:
|
51 |
+
raise NotImplementedError
|
52 |
+
|
53 |
+
def __getitem__(self, index):
|
54 |
+
|
55 |
+
data = self.anno[index]
|
56 |
+
cls = data['class']
|
57 |
+
affordance = data['affordance']
|
58 |
+
gt_mask = data['mask']
|
59 |
+
point_set = data['point']
|
60 |
+
point_set,_,_ = pc_normalize(point_set)
|
61 |
+
|
62 |
+
question = self.find_rephrase(self.question_df, cls, affordance)
|
63 |
+
|
64 |
+
affordance = self.aff2idx[affordance]
|
65 |
+
|
66 |
+
point_input = point_set.transpose()
|
67 |
+
|
68 |
+
return point_input, self.cls2idx[cls], gt_mask, question, affordance
|
69 |
+
|
70 |
+
def __len__(self):
|
71 |
+
|
72 |
+
return len(self.anno)
|
supp_benchmark_1.jpg
ADDED
![]() |
Git LFS Details
|
supp_benchmark_2.jpg
ADDED
![]() |
Git LFS Details
|