liesdillen commited on
Commit
da7cd93
1 Parent(s): 0e41a86

Upload 4 files

Browse files
S3_101_102_103_validation_epoch_10.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15b55be6c0e57c14ddfece3466f7ab50415f68105ef9105ff0c1ea69616baa50
3
+ size 1100794
data_loader_interface.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import random
3
+ import matplotlib.pyplot as plt
4
+ import scipy
5
+ import statistics as st
6
+ import os
7
+ import numpy as np
8
+
9
+ from sklearn.base import TransformerMixin
10
+ from sklearn.preprocessing import StandardScaler, RobustScaler, MinMaxScaler, LabelEncoder
11
+
12
+
13
+ class NDStandardScaler(TransformerMixin):
14
+ def __init__(self, **kwargs):
15
+ self._scaler = StandardScaler(copy=True, **kwargs)
16
+ self._orig_shape = None
17
+
18
+ def fit(self, X, **kwargs):
19
+ X = np.array(X)
20
+ # Save the original shape to reshape the flattened X later
21
+ # back to its original shape
22
+ if len(X.shape) > 1:
23
+ self._orig_shape = X.shape[1:]
24
+
25
+ X = self._flatten(X) # reshape data into two-dimensioal format suitable for standardScaler
26
+ self._scaler.fit(X, **kwargs)
27
+ return self
28
+
29
+ def transform(self, X, **kwargs):
30
+ X = np.array(X) # convert X to numpy array
31
+ X = self._flatten(X) # shape data into the same format used during fit
32
+ X = self._scaler.transform(X, **kwargs)
33
+ X = self._reshape(X) # reshape in original shape
34
+ return X
35
+
36
+ def _flatten(self, X):
37
+ # Reshape X to <= 2 dimensions -> [10, 28, 28, 3] to [10, 28*28*3]
38
+ if len(X.shape) > 2:
39
+ n_dims = np.prod(self._orig_shape) # calculates the number of elements in original shape
40
+ X = X.reshape(-1, n_dims) # reshapes the data into two dimensions
41
+ return X
42
+
43
+ def _reshape(self, X):
44
+ # Reshape X back to it's original shape
45
+ if len(X.shape) >= 2:
46
+ X = X.reshape(-1, *self._orig_shape)
47
+ return X
48
+
49
+
50
+ # substraction of mean of each column from the data: reduce bias, normalization
51
+ def mean_sub(data):
52
+ EPSILON = 1e-12
53
+ for i in range(data.shape[1]):
54
+ data[:, i] = data[:, i] - np.mean(data[:, i]) # / np.std(data[subject][action]['imu_signals'][:,i,c])
55
+ return data
56
+
57
+
58
+ def degree_to_radians(signals):
59
+ return np.array(list(map(lambda signal: np.array(list(map(math.radians, signal))), signals)))
60
+
61
+
62
+ def get_start_indices(n_timesteps, window_len, step_size):
63
+ n_timesteps_valid = n_timesteps - window_len + 1
64
+ if step_size <= 0:
65
+ step_size = 1
66
+
67
+ start_indices = np.arange(0, n_timesteps_valid, step_size,
68
+ dtype=int)
69
+ return start_indices
70
+
71
+
72
+ def get_data(data):
73
+ window_len = 243
74
+ step_size = 1
75
+ X = []
76
+
77
+ print("data shape: ",data.shape)
78
+
79
+ start_indices = get_start_indices(data.shape[0], window_len=window_len, step_size=step_size)
80
+ for k in start_indices:
81
+ this_window_data = data[k:k + window_len, :]
82
+ this_window_data = mean_sub(this_window_data)
83
+
84
+ X.append(this_window_data)
85
+
86
+ print("data shape after: ",np.array(X).shape)
87
+
88
+ return X
89
+
90
+
91
+ def select_random_samples(X, sample_percent):
92
+ num_samples = len(X)
93
+ num_samples_to_select = int(num_samples * sample_percent)
94
+ selected_indices = random.sample(range(num_samples), num_samples_to_select)
95
+ selected_indices.sort()
96
+ selected_samplesX = [X[i] for i in selected_indices]
97
+ return selected_samplesX
98
+
99
+
100
+ def load_imu(data_array):
101
+ test_data = []
102
+
103
+ X = get_data(data_array)
104
+ print("X shape: ",np.array(X).shape)
105
+
106
+ samplesX = select_random_samples(X, 1)
107
+ print("samplesX shape: ",np.array(samplesX).shape)
108
+
109
+ test_data.extend(samplesX)
110
+ test_data = np.array(test_data)
111
+
112
+ print('')
113
+ print(len(test_data))
114
+
115
+ return test_data
testing_interface.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datetime
2
+ import numpy as np
3
+ import torch
4
+ import torch.nn as nn
5
+ import torch.optim as optim
6
+
7
+ from matplotlib import pyplot as plt
8
+ from data_loader_interface import load_imu
9
+ from sklearn.metrics import accuracy_score, f1_score, confusion_matrix, classification_report, average_precision_score
10
+ from torch.utils.data import Dataset, DataLoader, TensorDataset, ConcatDataset
11
+ from einops import rearrange
12
+ from torch.optim.lr_scheduler import StepLR
13
+
14
+
15
+ def test(model, test_loader, output_file):
16
+ running_loss = 0.0
17
+ predlist = torch.zeros(0, dtype=torch.long, device='cpu')
18
+ lbllist = torch.zeros(0, dtype=torch.long, device='cpu')
19
+ number_of_nan_loss = 0
20
+
21
+ with torch.no_grad(), open(output_file, 'w') as f:
22
+ for batch_idx, (data) in enumerate(test_loader):
23
+ data = data.float().to('cpu')
24
+ outputs = model(data)
25
+ _, predicted = torch.max(outputs.data, 1)
26
+ predlist = torch.cat([predlist, predicted.view(-1).cpu()])
27
+ # lbllist = torch.cat([lbllist, labels.view(-1).cpu()])
28
+
29
+ for i, prediction in enumerate(predicted):
30
+ f.write(f"{prediction.item()}\n")
31
+
32
+ return running_loss / len(test_loader), predlist
33
+
34
+
35
+ class MyDataset_labeled(Dataset):
36
+ def __init__(self, x):
37
+ self.x_data = torch.from_numpy(np.array(x)).to(torch.float)
38
+ self.len = x.shape[0]
39
+
40
+ def __getitem__(self, idx):
41
+ return self.x_data[idx]
42
+
43
+ def __len__(self):
44
+ return self.len
45
+
46
+
47
+ def model_defining(test_data_array, name_model, output_file):
48
+
49
+ test_losses = []
50
+ accuracies = []
51
+ f1_scores = []
52
+ cm_list = []
53
+ cr_list = []
54
+
55
+ model = torch.load(str(name_model) + ".pth", map_location=torch.device('cpu'))
56
+ model.eval()
57
+
58
+ test_data = load_imu(test_data_array)
59
+ test_data = rearrange(test_data, 'n t c -> n c t')
60
+
61
+ test_dataset = MyDataset_labeled(test_data)
62
+ test_dataloaders = DataLoader(test_dataset, batch_size=256, shuffle=False, drop_last=False)
63
+
64
+ print("Data loaded ...")
65
+
66
+ test_loss, predlist = test(model, test_dataloaders, output_file)
67
+ test_losses.append(test_loss)
68
+
69
+
70
+ def main():
71
+
72
+ name_model = "./models_chadwick/training_3S_model_validation/S3_101_102_103_validation_epoch_10"
73
+ train_subjects = []
74
+ test_subjects = ['104']
75
+ output_file = "S3_101_102_103_validation_epoch_10_tested_104_predicted.txt"
76
+ label_file = "S3_101_102_103_validation_epoch_10_tested_104_label.txt"
77
+ classification_file = "S3_101_102_103_validation_epoch_10_tested_104_classification.txt"
78
+
79
+ model_defining(train_subjects, test_subjects, name_model, output_file, label_file, classification_file)
80
+
81
+
82
+ if __name__ == "__main__":
83
+ main()
tryout_v3_model.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.nn as nn
2
+ import torch.nn.functional as F
3
+
4
+
5
+ class TemporalModelBase(nn.Module):
6
+ """
7
+ Do not instantiate this class.
8
+ """
9
+
10
+ def __init__(self, num_joints_in, in_features, num_joints_out,
11
+ filter_widths, causal, dropout, channels, sagittal=0, freezing=0, fusion=0):
12
+ super().__init__()
13
+
14
+ # Validate input
15
+ for fw in filter_widths:
16
+ assert fw % 2 != 0, 'Only odd filter widths are supported'
17
+
18
+ self.num_joints_in = num_joints_in
19
+ self.in_features = in_features
20
+ self.num_joints_out = 2
21
+ self.filter_widths = filter_widths
22
+
23
+ # Initialize layers
24
+ self.drop = nn.Dropout(dropout) #for regularization
25
+ self.relu = nn.ReLU(inplace=True) #introducing non-linearity
26
+
27
+ self.pad = [ filter_widths[0] // 2 ]
28
+ self.expand_bn = nn.BatchNorm1d(channels, momentum=0.1) #normalize channels
29
+ self.expand_bn2 = nn.BatchNorm2d(in_features, momentum=0.1) #normalize features
30
+
31
+ self.shrink = nn.Conv1d(channels, num_joints_out, 1)
32
+ self.flatten = nn.Flatten()
33
+
34
+ def set_bn_momentum(self, momentum):
35
+ """
36
+ Batch normalization is a technique used to normalize the inputs of each layer, which helps stabilize and speed up the training process. The momentum parameter determines how much of the statistics from the current batch should contribute to the running mean and variance of the batch normalization layer.
37
+ In some cases, especially during fine-tuning or transfer learning, it might be beneficial to adjust the momentum dynamically. For example, when fine-tuning on a new dataset, you may want to decrease the momentum to adapt faster to the new data distribution. This method provides a way to adjust the momentum value dynamically during training.
38
+ """
39
+ self.expand_bn.momentum = momentum
40
+ for bn in self.layers_bn:
41
+ bn.momentum = momentum
42
+
43
+ def receptive_field(self):
44
+ """
45
+ Return the total receptive field of this model as # of frames.
46
+ The receptive field represents the region in the input data that influences a particular output of the neural network. It is determined by the filter sizes and strides of the convolutional layers in the network.
47
+ """
48
+ frames = 0
49
+ for f in self.pad:
50
+ frames += f
51
+ return 1 + 2*frames
52
+
53
+ def total_causal_shift(self):
54
+ """
55
+ Return the asymmetric offset for sequence padding.
56
+ The returned value is typically 0 if causal convolutions are disabled,
57
+ otherwise it is half the receptive field.
58
+ """
59
+ frames = self.causal_shift[0]
60
+ next_dilation = self.filter_widths[0]
61
+ for i in range(1, len(self.filter_widths)):
62
+ frames += self.causal_shift[i] * next_dilation
63
+ next_dilation *= self.filter_widths[i]
64
+ return frames
65
+
66
+ def forward(self, x):
67
+
68
+ x = self._forward_blocks(x)
69
+ return x
70
+
71
+ class TemporalModel(TemporalModelBase):
72
+ """
73
+ Reference 3D pose estimation model with temporal convolutions.
74
+ This implementation can be used for all use-cases.
75
+ """
76
+
77
+ def __init__(self, num_joints_in, in_features, num_joints_out,
78
+ filter_widths, causal=False, dropout=0.25, channels=1024, dense=False, sagittal=0, freezing=0, fusion=0):
79
+ """
80
+ Initialize this model.
81
+
82
+ Arguments:
83
+ num_joints_in -- number of input joints (e.g. 17 for Human3.6M)
84
+ in_features -- number of input features for each joint (typically 2 for 2D input)
85
+ num_joints_out -- number of output joints (can be different than input)
86
+ filter_widths -- list of convolution widths, which also determines the # of blocks and receptive field
87
+ causal -- use causal convolutions instead of symmetric convolutions (for real-time applications)
88
+ dropout -- dropout probability
89
+ channels -- number of convolution channels
90
+ dense -- use regular dense convolutions instead of dilated convolutions (ablation experiment)
91
+ """
92
+ super().__init__(num_joints_in, in_features, num_joints_out, filter_widths, causal, dropout, channels, sagittal=sagittal, freezing=freezing, fusion=fusion)
93
+
94
+ self.expand_conv = nn.Conv1d(num_joints_in*in_features, channels, filter_widths[0], bias=False)
95
+
96
+ layers_conv = []
97
+ layers_bn = []
98
+
99
+ self.causal_shift = [ (filter_widths[0]) // 2 if causal else 0 ]
100
+ next_dilation = filter_widths[0]
101
+ for i in range(1, len(filter_widths)):
102
+ self.pad.append((filter_widths[i] - 1)*next_dilation // 2)
103
+ print(self.pad)
104
+ print(next_dilation)
105
+ self.causal_shift.append((filter_widths[i]//2 * next_dilation) if causal else 0)
106
+ print("shift", self.causal_shift)
107
+ layers_conv.append(nn.Conv1d(channels, channels,
108
+ filter_widths[i] if not dense else (2*self.pad[-1] + 1),
109
+ dilation=next_dilation if not dense else 1,
110
+ bias=False))
111
+ layers_bn.append(nn.BatchNorm1d(channels, momentum=0.1))
112
+ layers_conv.append(nn.Conv1d(channels, channels, 1, dilation=1, bias=False))
113
+ layers_bn.append(nn.BatchNorm1d(channels, momentum=0.1))
114
+
115
+ next_dilation *= filter_widths[i]
116
+
117
+ self.layers_conv = nn.ModuleList(layers_conv)
118
+ self.layers_bn = nn.ModuleList(layers_bn)
119
+
120
+ def set_freezing(self, freezing, expand_bn, expand_bn2, shrink):
121
+ for param in self.shrink.parameters():
122
+ param.requires_grad = False
123
+ for param in self.expand_bn.parameters():
124
+ param.requires_grad = False
125
+ if freezing > 0:
126
+ # Freeze initial layers (expand_conv) and potentially the first n layers_conv
127
+ for param in self.expand_conv.parameters():
128
+ param.requires_grad = False
129
+ for i in range(freezing):
130
+ print(i)
131
+ print("i%2 = ", i%2)
132
+ if (i%2 == 0):
133
+ for param in self.layers_conv[i].parameters():
134
+ param.requires_grad = False # Freeze Conv1d layer
135
+ for param in self.layers_bn[i].parameters():
136
+ param.requires_grad = False # Freeze BatchNorm layer (optional)
137
+
138
+ def _forward_blocks(self, x):
139
+
140
+ #First block of Conv1D + BatchNorm1D, Relu, Dropout
141
+ x = self.drop(self.relu(self.expand_bn(self.expand_conv(x))))
142
+
143
+ #Repeaterd block of Conv1D + BatchNorm1D, Relu, Dropout
144
+ for i in range(len(self.pad) - 1): #same length as filter_widths
145
+ pad = self.pad[i+1]
146
+ # print(pad)
147
+ shift = self.causal_shift[i+1]
148
+ res = x[:, :, pad + shift : x.shape[2] - pad + shift]
149
+ # print("i voor eerste x = ", i)
150
+ x = self.drop(self.relu(self.layers_bn[2*i](self.layers_conv[2*i](x))))
151
+ # print("i voor tweede x = ", i)
152
+ x = res + self.drop(self.relu(self.layers_bn[2*i + 1](self.layers_conv[2*i + 1](x))))
153
+
154
+ x = self.shrink(x) #1D convolution
155
+ x = self.flatten(x)
156
+ return x