corrosivelogic commited on
Commit
177513b
1 Parent(s): 54d66c4

Add submission scripts

Browse files
Files changed (5) hide show
  1. inference.py +0 -30
  2. model.py +0 -26
  3. preprocess_data.py +0 -24
  4. script.py +52 -24
  5. train.py +0 -28
inference.py DELETED
@@ -1,30 +0,0 @@
1
-
2
- import pandas as pd
3
- import torch
4
- from preprocess_data import load_data, preprocess_data
5
- from model import WireframeModel
6
-
7
- def inference(model, test_data):
8
- K, R, t, images, points3d, cameras, depthcm, mesh_vertices, mesh_faces, face_semantics, edge_semantics, wf_vertices, wf_edges = preprocess_data(test_data)
9
- model.eval()
10
- with torch.no_grad():
11
- pred_vertices = model(torch.tensor(K, dtype=torch.float32)).numpy()
12
- pred_edges = [] # Implement your edge detection logic here
13
- for i in range(len(pred_vertices)):
14
- pred_edges.append([[i, (i+1) % len(pred_vertices)]])
15
- return [{"__key__": test_data['order_id'][i], "wf_vertices": pred_vertices[i], "wf_edges": pred_edges[i]} for i in range(len(pred_vertices))]
16
-
17
- # Load test data
18
- test_data = load_data('test_data.parquet')
19
-
20
- # Initialize and load the model
21
- model = WireframeModel()
22
- model.load_state_dict(torch.load('model.pth'))
23
-
24
- # Generate predictions
25
- predictions = inference(model, test_data)
26
-
27
- # Save predictions to parquet file
28
- pred_df = pd.DataFrame(predictions)
29
- pred_df.to_parquet('submission.parquet', index=False)
30
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
model.py DELETED
@@ -1,26 +0,0 @@
1
-
2
- import torch
3
- import torch.nn as nn
4
-
5
- class WireframeModel(nn.Module):
6
- def __init__(self):
7
- super(WireframeModel, self).__init__()
8
- self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1)
9
- self.conv2 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1)
10
- self.fc1 = nn.Linear(128 * 8 * 8, 1024)
11
- self.fc2 = nn.Linear(1024, 512)
12
- self.fc3 = nn.Linear(512, 256)
13
- self.fc4 = nn.Linear(256, 3)
14
- self.dropout = nn.Dropout(0.5)
15
-
16
- def forward(self, x):
17
- x = torch.relu(self.conv1(x))
18
- x = torch.relu(self.conv2(x))
19
- x = x.view(x.size(0), -1)
20
- x = torch.relu(self.fc1(x))
21
- x = self.dropout(x)
22
- x = torch.relu(self.fc2(x))
23
- x = torch.relu(self.fc3(x))
24
- x = self.fc4(x)
25
- return x
26
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
preprocess_data.py DELETED
@@ -1,24 +0,0 @@
1
-
2
- import pandas as pd
3
- import numpy as np
4
-
5
- def load_data(file_path):
6
- data = pd.read_parquet(file_path)
7
- return data
8
-
9
- def preprocess_data(data):
10
- K = np.stack(data['K'].values)
11
- R = np.stack(data['R'].values)
12
- t = np.stack(data['t'].values)
13
- images = data['images']
14
- points3d = data['points3d']
15
- cameras = data['cameras']
16
- depthcm = np.stack(data['depthcm'].values)
17
- mesh_vertices = np.stack(data['mesh_vertices'].values)
18
- mesh_faces = np.stack(data['mesh_faces'].values)
19
- face_semantics = np.stack(data['face_semantics'].values)
20
- edge_semantics = np.stack(data['edge_semantics'].values)
21
- wf_vertices = np.stack(data['wf_vertices'].values)
22
- wf_edges = np.stack(data['wf_edges'].values)
23
- return K, R, t, images, points3d, cameras, depthcm, mesh_vertices, mesh_faces, face_semantics, edge_semantics, wf_vertices, wf_edges
24
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
script.py CHANGED
@@ -1,30 +1,58 @@
 
 
 
 
 
 
 
1
 
2
- import pandas as pd
3
- import torch
4
- from preprocess_data import load_data, preprocess_data
5
- from model import WireframeModel
6
 
7
- def inference(model, test_data):
8
- K, R, t, images, points3d, cameras, depthcm, mesh_vertices, mesh_faces, face_semantics, edge_semantics, wf_vertices, wf_edges = preprocess_data(test_data)
9
- model.eval()
10
- with torch.no_grad():
11
- pred_vertices = model(torch.tensor(K, dtype=torch.float32)).numpy()
12
- pred_edges = [] # Implement your edge detection logic here
13
- for i in range(len(pred_vertices)):
14
- pred_edges.append([[i, (i+1) % len(pred_vertices)]])
15
- return [{"__key__": test_data['order_id'][i], "wf_vertices": pred_vertices[i], "wf_edges": pred_edges[i]} for i in range(len(pred_vertices))]
16
 
17
- # Load test data
18
- test_data = load_data('test_data.parquet')
19
 
20
- # Initialize and load the model
21
- model = WireframeModel()
22
- model.load_state_dict(torch.load('model.pth'))
23
 
24
- # Generate predictions
25
- predictions = inference(model, test_data)
26
 
27
- # Save predictions to parquet file
28
- pred_df = pd.DataFrame(predictions)
29
- pred_df.to_parquet('submission.parquet', index=False)
30
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### This is example of the script that will be run in the test environment.
2
+ ### Some parts of the code are compulsory and you should NOT CHANGE THEM.
3
+ ### They are between '''---compulsory---''' comments.
4
+ ### You can change the rest of the code to define and test your solution.
5
+ ### However, you should not change the signature of the provided function.
6
+ ### The script would save "submission.parquet" file in the current directory.
7
+ ### You can use any additional files and subdirectories to organize your code.
8
 
9
+ '''---compulsory---'''
10
+ import hoho; hoho.setup() # YOU MUST CALL hoho.setup() BEFORE ANYTHING ELSE
11
+ '''---compulsory---'''
 
12
 
13
+ from pathlib import Path
14
+ from tqdm import tqdm
15
+ import pandas as pd
16
+ import numpy as np
 
 
 
 
 
17
 
 
 
18
 
19
+ def empty_solution(sample):
20
+ '''Return a minimal valid solution, i.e. 2 vertices and 1 edge.'''
21
+ return np.zeros((2,3)), [(0, 1)]
22
 
 
 
23
 
24
+ if __name__ == "__main__":
25
+ print ("------------ Loading dataset------------ ")
26
+ params = hoho.get_params()
27
+
28
+ # by default it is usually better to use `get_dataset()` like this
29
+ #
30
+ # dataset = hoho.get_dataset(split='all')
31
+ #
32
+ # but in this case (because we don't do anything with the sample
33
+ # anyway) we set `decode=None`. We can set the `split` argument
34
+ # to 'train' or 'val' ('all' defaults back to 'train') if we are
35
+ # testing ourselves locally.
36
+ #
37
+ # dataset = hoho.get_dataset(split='val', decode=None)
38
+ #
39
+ # On the test server *`split` must be set to 'all'*
40
+ # to compute both the public and private leaderboards.
41
+ #
42
+ dataset = hoho.get_dataset(split='all', decode=None)
43
+
44
+ print('------------ Now you can do your solution ---------------')
45
+ solution = []
46
+ for i, sample in enumerate(tqdm(dataset)):
47
+ # replace this with your solution
48
+ pred_vertices, pred_edges = empty_solution(sample)
49
+
50
+ solution.append({
51
+ '__key__': sample['__key__'],
52
+ 'wf_vertices': pred_vertices.tolist(),
53
+ 'wf_edges': pred_edges
54
+ })
55
+ print('------------ Saving results ---------------')
56
+ sub = pd.DataFrame(solution, columns=["__key__", "wf_vertices", "wf_edges"])
57
+ sub.to_parquet(Path(params['output_path']) / "submission.parquet")
58
+ print("------------ Done ------------ ")
train.py DELETED
@@ -1,28 +0,0 @@
1
-
2
- import torch
3
- import torch.optim as optim
4
- from preprocess_data import load_data, preprocess_data
5
- from model import WireframeModel
6
-
7
- # Load and preprocess data
8
- data = load_data('data.parquet')
9
- K, R, t, images, points3d, cameras, depthcm, mesh_vertices, mesh_faces, face_semantics, edge_semantics, wf_vertices, wf_edges = preprocess_data(data)
10
-
11
- # Initialize model, criterion and optimizer
12
- model = WireframeModel()
13
- criterion = torch.nn.MSELoss()
14
- optimizer = optim.Adam(model.parameters(), lr=0.001)
15
-
16
- # Training loop
17
- num_epochs = 100
18
- for epoch in range(num_epochs):
19
- optimizer.zero_grad()
20
- outputs = model(torch.tensor(K, dtype=torch.float32))
21
- loss = criterion(outputs, torch.tensor(wf_vertices, dtype=torch.float32))
22
- loss.backward()
23
- optimizer.step()
24
- print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item()}')
25
-
26
- # Save the model
27
- torch.save(model.state_dict(), 'model.pth')
28
-