navagg commited on
Commit
39b4c8c
·
1 Parent(s): 5c88cf8

Upload 45 files

Browse files
Files changed (46) hide show
  1. .gitattributes +2 -0
  2. FaceAligner.py +58 -0
  3. HDI.py +119 -0
  4. SkinColorFilter.py +226 -0
  5. SkinDetect.py +120 -0
  6. __init__.py +1 -0
  7. base.py +377 -0
  8. bvp.py +344 -0
  9. chrom.py +33 -0
  10. cohface.py +47 -0
  11. dataset.py +66 -0
  12. default_test.cfg +103 -0
  13. detrending.py +13 -0
  14. ecg.py +64 -0
  15. elapse.py +19 -0
  16. errors.py +119 -0
  17. evm.py +142 -0
  18. evm2.py +133 -0
  19. filters.py +36 -0
  20. green.py +15 -0
  21. ica.py +43 -0
  22. jade.py +430 -0
  23. lgi.py +25 -0
  24. lgi_ppgi.py +61 -0
  25. mahnob.py +40 -0
  26. multi_dataset_analysis_SPLIT.py +283 -0
  27. nonparametric_tests.py +678 -0
  28. pbv.py +27 -0
  29. pca.py +39 -0
  30. pos.py +47 -0
  31. printutils.py +66 -0
  32. pure.py +74 -0
  33. pyramid.py +122 -0
  34. sample.cfg +69 -0
  35. sample.py +58 -0
  36. shape_predictor_5_face_landmarks.dat +3 -0
  37. shape_predictor_68_face_landmarks.dat +3 -0
  38. single_dataset_analysis.py +501 -0
  39. ssr.py +164 -0
  40. stats.py +344 -0
  41. stattests.py +614 -0
  42. testsuite.py +203 -0
  43. ubfc1.py +52 -0
  44. ubfc2.py +55 -0
  45. utils.py +275 -0
  46. video.py +703 -0
.gitattributes CHANGED
@@ -53,3 +53,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ shape_predictor_5_face_landmarks.dat filter=lfs diff=lfs merge=lfs -text
57
+ shape_predictor_68_face_landmarks.dat filter=lfs diff=lfs merge=lfs -text
FaceAligner.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import cv2
3
+
4
+ class FaceAligner:
5
+ def __init__(self, desiredLeftEye=(0.35, 0.35),
6
+ desiredFaceWidth=256, desiredFaceHeight=None):
7
+ # store the facial landmark predictor, desired output left
8
+ # eye position, and desired output face width + height
9
+ self.desiredLeftEye = desiredLeftEye
10
+ self.desiredFaceWidth = desiredFaceWidth
11
+ self.desiredFaceHeight = desiredFaceHeight
12
+
13
+ # if the desired face height is None, set it to be the
14
+ # desired face width (normal behavior)
15
+ if self.desiredFaceHeight is None:
16
+ self.desiredFaceHeight = self.desiredFaceWidth
17
+
18
+ def align(self, image, r_eye, l_eye):
19
+ leftEyeCenter = np.array(l_eye)
20
+ rightEyeCenter = np.array(r_eye)
21
+
22
+ # compute the angle between the eye centroids
23
+ dY = rightEyeCenter[1] - leftEyeCenter[1]
24
+ dX = rightEyeCenter[0] - leftEyeCenter[0]
25
+ angle = np.degrees(np.arctan2(dY, dX))# - 180
26
+
27
+ # compute the desired right eye x-coordinate based on the
28
+ # desired x-coordinate of the left eye
29
+ desiredRightEyeX = 1.0 - self.desiredLeftEye[0]
30
+
31
+ # determine the scale of the new resulting image by taking
32
+ # the ratio of the distance between eyes in the *current*
33
+ # image to the ratio of distance between eyes in the
34
+ # *desired* image
35
+ dist = np.sqrt((dX ** 2) + (dY ** 2))
36
+ desiredDist = (desiredRightEyeX - self.desiredLeftEye[0])
37
+ desiredDist *= self.desiredFaceWidth
38
+ scale = desiredDist / dist
39
+
40
+ # compute center (x, y)-coordinates (i.e., the median point)
41
+ # between the two eyes in the input image
42
+ eyesCenter = ((leftEyeCenter[0] + rightEyeCenter[0]) // 2,(leftEyeCenter[1] + rightEyeCenter[1]) // 2)
43
+
44
+ # grab the rotation matrix for rotating and scaling the face
45
+ M = cv2.getRotationMatrix2D(eyesCenter, angle, scale)
46
+
47
+ # update the translation component of the matrix
48
+ tX = self.desiredFaceWidth * 0.5
49
+ tY = self.desiredFaceHeight * self.desiredLeftEye[1]
50
+ M[0, 2] += (tX - eyesCenter[0])
51
+ M[1, 2] += (tY - eyesCenter[1])
52
+
53
+ # apply the affine transformation
54
+ (w, h) = (self.desiredFaceWidth, self.desiredFaceHeight)
55
+ output = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_CUBIC)
56
+
57
+ # return the aligned face
58
+ return output
HDI.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import division
2
+ import numpy as np
3
+ import scipy.stats.kde as kde
4
+
5
+ def calc_min_interval(x, alpha):
6
+ """Internal method to determine the minimum interval of a given width
7
+ Assumes that x is sorted numpy array.
8
+ """
9
+
10
+ n = len(x)
11
+ cred_mass = 1.0-alpha
12
+
13
+ interval_idx_inc = int(np.floor(cred_mass*n))
14
+ n_intervals = n - interval_idx_inc
15
+ interval_width = x[interval_idx_inc:] - x[:n_intervals]
16
+
17
+ if len(interval_width) == 0:
18
+ raise ValueError('Too few elements for interval calculation')
19
+
20
+ min_idx = np.argmin(interval_width)
21
+ hdi_min = x[min_idx]
22
+ hdi_max = x[min_idx+interval_idx_inc]
23
+ return hdi_min, hdi_max
24
+
25
+
26
+ def hdi(x, alpha=0.05):
27
+ """Calculate highest posterior density (HPD) of array for given alpha.
28
+ The HPD is the minimum width Bayesian credible interval (BCI).
29
+ :Arguments:
30
+ x : Numpy array
31
+ An array containing MCMC samples
32
+ alpha : float
33
+ Desired probability of type I error (defaults to 0.05)
34
+ """
35
+
36
+ # Make a copy of trace
37
+ x = x.copy()
38
+ # For multivariate node
39
+ if x.ndim > 1:
40
+ # Transpose first, then sort
41
+ tx = np.transpose(x, list(range(x.ndim))[1:]+[0])
42
+ dims = np.shape(tx)
43
+ # Container list for intervals
44
+ intervals = np.resize(0.0, dims[:-1]+(2,))
45
+
46
+ for index in make_indices(dims[:-1]):
47
+ try:
48
+ index = tuple(index)
49
+ except TypeError:
50
+ pass
51
+
52
+ # Sort trace
53
+ sx = np.sort(tx[index])
54
+ # Append to list
55
+ intervals[index] = calc_min_interval(sx, alpha)
56
+ # Transpose back before returning
57
+ return np.array(intervals)
58
+ else:
59
+ # Sort univariate node
60
+ sx = np.sort(x)
61
+ return np.array(calc_min_interval(sx, alpha))
62
+
63
+
64
+ def hdi2(sample, alpha=0.05, roundto=2):
65
+ """Calculate highest posterior density (HPD) of array for given alpha.
66
+ The HPD is the minimum width Bayesian credible interval (BCI).
67
+ The function works for multimodal distributions, returning more than one mode
68
+ Parameters
69
+ ----------
70
+
71
+ sample : Numpy array or python list
72
+ An array containing MCMC samples
73
+ alpha : float
74
+ Desired probability of type I error (defaults to 0.05)
75
+ roundto: integer
76
+ Number of digits after the decimal point for the results
77
+ Returns
78
+ ----------
79
+ hpd: array with the lower
80
+
81
+ """
82
+ sample = np.asarray(sample)
83
+ sample = sample[~np.isnan(sample)]
84
+ # get upper and lower bounds
85
+ l = np.min(sample)
86
+ u = np.max(sample)
87
+ density = kde.gaussian_kde(sample)
88
+ x = np.linspace(l, u, 2000)
89
+ y = density.evaluate(x)
90
+ #y = density.evaluate(x, l, u) waitting for PR to be accepted
91
+ xy_zipped = zip(x, y/np.sum(y))
92
+ xy = sorted(xy_zipped, key=lambda x: x[1], reverse=True)
93
+ xy_cum_sum = 0
94
+ hdv = []
95
+
96
+ for val in xy:
97
+ xy_cum_sum += val[1]
98
+ hdv.append(val[0])
99
+ if xy_cum_sum >= (1-alpha):
100
+ break
101
+ hdv.sort()
102
+ diff = (u-l)/20 # differences of 5%
103
+ hpd = []
104
+ hpd.append(round(min(hdv), roundto))
105
+
106
+ for i in range(1, len(hdv)):
107
+ if hdv[i]-hdv[i-1] >= diff:
108
+ hpd.append(round(hdv[i-1], roundto))
109
+ hpd.append(round(hdv[i], roundto))
110
+ hpd.append(round(max(hdv), roundto))
111
+ ite = iter(hpd)
112
+ hpd = list(zip(ite, ite))
113
+ modes = []
114
+ for value in hpd:
115
+ x_hpd = x[(x > value[0]) & (x < value[1])]
116
+ y_hpd = y[(x > value[0]) & (x < value[1])]
117
+
118
+ modes.append(round(x_hpd[np.argmax(y_hpd)], roundto))
119
+ return hpd, x, y, modes
SkinColorFilter.py ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # BSD 3-Clause “New” or “Revised” License
2
+ #
3
+ # Copyright (c) 2016 Idiap Research Institute, http://www.idiap.ch/
4
+ # Written by Guillaume Heusch <guillaume.heusch@idiap.ch>
5
+ #
6
+ # Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
7
+ # 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
8
+ # 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
9
+ # 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
10
+ #
11
+ # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12
+ #
13
+ #
14
+ # Original authors:
15
+ # Guillaume HEUSCH <guillaume.heusch@idiap.ch> 29/07/2015
16
+ #
17
+ # Contributors:
18
+ # Michele Maione <mikymaione@hotmail.it> 07/03/2019
19
+
20
+ import numpy
21
+
22
+
23
+ class SkinColorFilter():
24
+ """
25
+ This class implements a number of functions to perform skin color filtering.
26
+ It is based on the work published in "Adaptive skin segmentation via feature-based face detection",
27
+ M.J. Taylor and T. Morris, Proc SPIE Photonics Europe, 2014 [taylor-spie-2014]_
28
+ Attributes
29
+ ----------
30
+ mean: numpy.ndarray | dim: 2
31
+ the mean skin color
32
+ covariance: numpy.ndarray | dim: 2x2
33
+ the covariance matrix of the skin color
34
+ covariance_inverse: numpy.ndarray | dim: 2x2
35
+ the inverse covariance matrix of the skin color
36
+ circular_mask: numpy.ndarray
37
+ mask of the size of the image, defining a circular region in the center
38
+ luma_mask: numpy.ndarray
39
+ mask of the size of the image, defining valid luma values
40
+ """
41
+
42
+ def __init__(self):
43
+ self.mean = numpy.array([0.0, 0.0])
44
+ self.covariance = numpy.zeros((2, 2), 'float64')
45
+ self.covariance_inverse = numpy.zeros((2, 2), 'float64')
46
+
47
+ def __generate_circular_mask(self, image, radius_ratio=0.4):
48
+
49
+ w = image.shape[0]
50
+ h = image.shape[1]
51
+ radius = radius_ratio * h
52
+
53
+ x_center = h / 2
54
+ y_center = w / 2
55
+
56
+ center = [int(x_center), int(y_center)]
57
+
58
+ Y, X = numpy.ogrid[:w, :h]
59
+ dist_from_center = numpy.sqrt((X - center[0])**2 + (Y-center[1])**2)
60
+
61
+ mask = dist_from_center <= radius
62
+
63
+ self.circular_mask = mask
64
+
65
+ '''def __generate_circular_mask(self, image, radius_ratio=0.4):
66
+ """
67
+ This function will generate a circular mask to be applied to the image.
68
+ The mask will be true for the pixels contained in a circle centered in the image center, and with radius equals to radius_ratio * the image's height.
69
+ Parameters
70
+ ----------
71
+ image: numpy.ndarray
72
+ The face image.
73
+ radius_ratio: float:
74
+ The ratio of the image's height to define the radius of the circular region. Defaults to 0.4.
75
+ """
76
+ w = image.shape[0]
77
+ h = image.shape[1]
78
+
79
+ x_center = w / 2
80
+ y_center = h / 2
81
+
82
+ # arrays with the image coordinates
83
+ X = numpy.zeros((w, h))
84
+
85
+ import code
86
+ code.interact(local=locals())
87
+
88
+ X[:] = range(0, w)
89
+
90
+ Y = numpy.zeros((h, w))
91
+ Y[:] = range(0, h)
92
+ Y = numpy.transpose(Y)
93
+
94
+ # translate s.t. the center is the origin
95
+ X -= x_center
96
+ Y -= y_center
97
+
98
+ # condition to be inside of a circle: x^2 + y^2 < r^2
99
+ radius = radius_ratio * h
100
+
101
+ # x ^ 2 + y ^ 2 < r ^ 2
102
+ cm = (X ** 2 + Y ** 2) < (radius ** 2) # dim : w x h
103
+ self.circular_mask = cm'''
104
+
105
+ def __remove_luma(self, image):
106
+ """
107
+ This function remove pixels with extreme luma values.
108
+ Some pixels are considered as non-skin if their intensity is either too high or too low.
109
+ The luma value for all pixels inside a provided circular mask is calculated. Pixels for which the luma value deviates more than 1.5 * standard deviation are pruned.
110
+ Parameters
111
+ ----------
112
+ image: numpy.ndarray
113
+ The face image.
114
+ """
115
+
116
+ # compute the mean and std of luma values on non-masked pixels only
117
+ R = 0.299 * image[self.circular_mask, 0]
118
+ G = 0.587 * image[self.circular_mask, 1]
119
+ B = 0.114 * image[self.circular_mask, 2]
120
+
121
+ luma = R + G + B
122
+
123
+ m = numpy.mean(luma)
124
+ s = numpy.std(luma)
125
+
126
+ # apply the filtering to the whole image to get the luma mask
127
+ R = 0.299 * image[:, :, 0]
128
+ G = 0.587 * image[:, :, 1]
129
+ B = 0.114 * image[:, :, 2]
130
+
131
+ luma = R + G + B
132
+
133
+ # dim : image.x x image.y
134
+ lm = numpy.logical_and((luma > (m - 1.5 * s)), (luma < (m + 1.5 * s)))
135
+ self.luma_mask = lm
136
+
137
+ def __RG_Mask(self, image, dtype=None):
138
+ # dim: image.x x image.y
139
+ channel_sum = image[:, :, 0].astype('float64') + image[:, :, 1] + image[:, :, 2]
140
+
141
+ # dim: image.x x image.y
142
+ nonzero_mask = numpy.logical_or(numpy.logical_or(image[:, :, 0] > 0, image[:, :, 1] > 0), image[:, :, 2] > 0)
143
+
144
+ # dim: image.x x image.y
145
+ R = numpy.zeros((image.shape[0], image.shape[1]), dtype)
146
+ R[nonzero_mask] = image[nonzero_mask, 0] / channel_sum[nonzero_mask]
147
+
148
+ # dim: image.x x image.y
149
+ G = numpy.zeros((image.shape[0], image.shape[1]), dtype)
150
+ G[nonzero_mask] = image[nonzero_mask, 1] / channel_sum[nonzero_mask]
151
+
152
+ return R, G
153
+
154
+ def estimate_gaussian_parameters(self, image):
155
+ """
156
+ This function estimates the parameter of the skin color distribution.
157
+ The mean and covariance matrix of the skin pixels in the normalised rg colorspace are computed.
158
+ Note that only the pixels for which both the circular and the luma mask is 'True' are considered.
159
+ Parameters
160
+ ----------
161
+ image: numpy.ndarray
162
+ The face image.
163
+ """
164
+ self.__generate_circular_mask(image)
165
+ self.__remove_luma(image)
166
+
167
+ # dim: image.x x image.y
168
+ mask = numpy.logical_and(self.luma_mask, self.circular_mask)
169
+
170
+ # get the mean
171
+ # R dim: image.x x image.y
172
+ # G dim: image.x x image.y
173
+ R, G = self.__RG_Mask(image)
174
+
175
+ # dim: 2
176
+ self.mean = numpy.array([numpy.mean(R[mask]), numpy.mean(G[mask])])
177
+
178
+ # get the covariance
179
+ R_minus_mean = R[mask] - self.mean[0]
180
+ G_minus_mean = G[mask] - self.mean[1]
181
+
182
+ samples = numpy.vstack((R_minus_mean, G_minus_mean))
183
+ samples = samples.T
184
+
185
+ cov = sum([numpy.outer(s, s) for s in samples]) # dim: 2x2
186
+
187
+ self.covariance = cov / float(samples.shape[0] - 1)
188
+
189
+ # store the inverse covariance matrix (no need to recompute)
190
+ if numpy.linalg.det(self.covariance) != 0:
191
+ self.covariance_inverse = numpy.linalg.inv(self.covariance)
192
+ else:
193
+ self.covariance_inverse = numpy.zeros_like(self.covariance)
194
+
195
+ def get_skin_mask(self, image, threshold=0.5):
196
+ """
197
+ This function computes the probability of skin-color for each pixel in the image.
198
+ Parameters
199
+ ----------
200
+ image: numpy.ndarray
201
+ The face image.
202
+ threshold: float: 0->1
203
+ The threshold on the skin color probability. Defaults to 0.5
204
+ Returns
205
+ -------
206
+ skin_mask: numpy.ndarray
207
+ The mask where skin color pixels are labeled as True.
208
+ """
209
+
210
+ # get the image in rg colorspace
211
+ R, G = self.__RG_Mask(image, 'float64')
212
+
213
+ # compute the skin probability map
214
+ R_minus_mean = R - self.mean[0]
215
+ G_minus_mean = G - self.mean[1]
216
+
217
+ n = R.shape[0] * R.shape[1]
218
+ V = numpy.dstack((R_minus_mean, G_minus_mean)) # dim: image.x x image.y
219
+ V = V.reshape((n, 2)) # dim: nx2
220
+
221
+ probs = [numpy.dot(k, numpy.dot(self.covariance_inverse, k)) for k in V]
222
+ probs = numpy.array(probs).reshape(R.shape) # dim: image.x x image.y
223
+
224
+ skin_map = numpy.exp(-0.5 * probs) # dim: image.x x image.y
225
+
226
+ return skin_map > threshold
SkinDetect.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from scipy import signal
3
+ import sys
4
+ import cv2
5
+ from pyVHR.utils.HDI import hdi, hdi2
6
+
7
+ class SkinDetect():
8
+
9
+ def __init__(self, strength=0.2):
10
+ self.description = 'Skin Detection Module'
11
+ self.strength = strength
12
+ self.stats_computed = False
13
+
14
+ def compute_stats(self, face):
15
+
16
+ assert (self.strength > 0 and self.strength < 1), "'strength' parameter must have values in [0,1]"
17
+
18
+ faceColor = cv2.cvtColor(face, cv2.COLOR_RGB2HSV)
19
+ h = faceColor[:,:,0].reshape(-1,1)
20
+ s = faceColor[:,:,1].reshape(-1,1)
21
+ v = faceColor[:,:,2].reshape(-1,1)
22
+
23
+ alpha = self.strength #the highest, the stronger the masking
24
+
25
+ hpd_h, x_h, y_h, modes_h = hdi2(np.squeeze(h), alpha=alpha)
26
+ min_s, max_s = hdi(np.squeeze(s), alpha=alpha)
27
+ min_v, max_v = hdi(np.squeeze(v), alpha=alpha)
28
+
29
+ if len(hpd_h) > 1:
30
+
31
+ self.multiple_modes = True
32
+
33
+ if len(hpd_h) > 2:
34
+ print('WARNING!! Found more than 2 HDIs in Hue Channel empirical Distribution... Considering only 2')
35
+ from scipy.spatial.distance import pdist, squareform
36
+ m = np.array(modes_h).reshape(-1,1)
37
+ d = squareform(pdist(m))
38
+ maxij = np.where(d==d.max())[0]
39
+ i = maxij[0]
40
+ j = maxij[1]
41
+ else:
42
+ i = 0
43
+ j = 1
44
+
45
+ min_h1 = hpd_h[i][0]
46
+ max_h1 = hpd_h[i][1]
47
+ min_h2 = hpd_h[j][0]
48
+ max_h2 = hpd_h[j][1]
49
+
50
+ self.lower1 = np.array([min_h1, min_s, min_v], dtype = "uint8")
51
+ self.upper1 = np.array([max_h1, max_s, max_v], dtype = "uint8")
52
+ self.lower2 = np.array([min_h2, min_s, min_v], dtype = "uint8")
53
+ self.upper2 = np.array([max_h2, max_s, max_v], dtype = "uint8")
54
+
55
+ elif len(hpd_h) == 1:
56
+
57
+ self.multiple_modes = False
58
+
59
+ min_h = hpd_h[0][0]
60
+ max_h = hpd_h[0][1]
61
+
62
+ self.lower = np.array([min_h, min_s, min_v], dtype = "uint8")
63
+ self.upper = np.array([max_h, max_s, max_v], dtype = "uint8")
64
+
65
+ self.stats_computed = True
66
+
67
+
68
+ def get_skin(self, face, filt_kern_size=7, verbose=False, plot=False):
69
+
70
+ if not self.stats_computed:
71
+ raise ValueError("ERROR! You must compute stats at least one time")
72
+
73
+ faceColor = cv2.cvtColor(face, cv2.COLOR_RGB2HSV)
74
+
75
+ if self.multiple_modes:
76
+ if verbose:
77
+ print('\nLower1: ' + str(self.lower1))
78
+ print('Upper1: ' + str(self.upper1))
79
+ print('\nLower2: ' + str(self.lower2))
80
+ print('Upper2: ' + str(self.upper2) + '\n')
81
+
82
+ skinMask1 = cv2.inRange(faceColor, self.lower1, self.upper1)
83
+ skinMask2 = cv2.inRange(faceColor, self.lower2, self.upper2)
84
+ skinMask = np.logical_or(skinMask1, skinMask2).astype(np.uint8)*255
85
+
86
+ else:
87
+
88
+ if verbose:
89
+ print('\nLower: ' + str(lower))
90
+ print('Upper: ' + str(upper) + '\n')
91
+
92
+ skinMask = cv2.inRange(faceColor, self.lower, self.upper)
93
+
94
+ if filt_kern_size > 0:
95
+ skinMask = signal.medfilt2d(skinMask, kernel_size=filt_kern_size)
96
+ skinFace = cv2.bitwise_and(face, face, mask=skinMask)
97
+
98
+ if plot:
99
+
100
+ h = faceColor[:,:,0].reshape(-1,1)
101
+ s = faceColor[:,:,1].reshape(-1,1)
102
+ v = faceColor[:,:,2].reshape(-1,1)
103
+
104
+ import matplotlib.pyplot as plt
105
+ plt.figure()
106
+ plt.subplot(2,2,1)
107
+ plt.hist(h, 20)
108
+ plt.title('Hue')
109
+ plt.subplot(2,2,2)
110
+ plt.hist(s, 20)
111
+ plt.title('Saturation')
112
+ plt.subplot(2,2,3)
113
+ plt.hist(v, 20)
114
+ plt.title('Value')
115
+ plt.subplot(2,2,4)
116
+ plt.imshow(skinFace)
117
+ plt.title('Masked Face')
118
+ plt.show()
119
+
120
+ return skinFace
__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # __iniy__py
base.py ADDED
@@ -0,0 +1,377 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import ast
3
+ import plotly.graph_objects as go
4
+ from scipy.signal import medfilt, detrend
5
+ from abc import ABCMeta, abstractmethod
6
+ from importlib import import_module
7
+ from ..signals.bvp import BVPsignal
8
+ from ..utils import filters, printutils
9
+ from ..utils import detrending
10
+
11
+ def methodFactory(methodName, *args, **kwargs):
12
+ try:
13
+ moduleName = methodName.lower()
14
+ className = methodName.upper()
15
+ methodModule = import_module('.methods.' + moduleName, package='pyVHR')
16
+ classOBJ = getattr(methodModule, className)
17
+ obj = classOBJ(**kwargs)
18
+
19
+ except (AttributeError, ModuleNotFoundError):
20
+ raise ImportError('{} is not part of pyVHR method collection!'.format(methodName))
21
+
22
+ return obj
23
+
24
+ class VHRMethod(metaclass=ABCMeta):
25
+ """
26
+ Manage VHR approaches (parent class for new approach)
27
+ """
28
+
29
+ def __init__(self, **kwargs):
30
+ self.video = kwargs['video']
31
+ self.verb = kwargs['verb']
32
+
33
+ @abstractmethod
34
+ def apply(self, X):
35
+ pass
36
+
37
+ def runOffline(self, **kwargs):
38
+
39
+ # -- parse params
40
+ startTime, endTime, winSize, timeStep, zeroMeanSTDnorm, BPfilter, minHz, maxHz, detrFilter, \
41
+ detrMethod, detrLambda = self.__readparams(**kwargs)
42
+
43
+ fs = self.video.frameRate
44
+
45
+ # -- check times
46
+ if endTime > self.video.duration:
47
+ endTime = self.video.duration
48
+ assert startTime <= endTime, "Time interval error!"
49
+ assert timeStep > 0, "Time step must be positive!"
50
+ assert winSize < (endTime-startTime),"Winsize too big!"
51
+
52
+ # -- verbose prints
53
+ if '1' in str(self.verb):
54
+ self.__verbose(startTime, endTime, winSize)
55
+
56
+ if self.video.doEVM is True:
57
+ self.video.applyEVM()
58
+ else:
59
+ self.video.processedFaces = self.video.faces
60
+
61
+ timeSteps = np.arange(startTime,endTime,timeStep)
62
+ T = startTime # times where bpm are estimated
63
+ RADIUS = winSize/2
64
+
65
+ bpmES = [] # bmp estimtes
66
+ timesES = [] # times of bmp estimtes
67
+
68
+ # -- loop on video signal chunks
69
+ startFrame = int(T*self.video.frameRate)
70
+ count = 0
71
+ while T <= endTime:
72
+ endFrame = np.min([self.video.numFrames, int((T+RADIUS)*self.video.frameRate)])
73
+
74
+ # -- extract ROIs on the frame range
75
+ self.frameSubset = np.arange(startFrame, endFrame)
76
+
77
+ self.ROImask = kwargs['ROImask']
78
+
79
+ # -- type of signal extractor
80
+ if self.ROImask == 'rect':
81
+ rects = ast.literal_eval(kwargs['rectCoords'])
82
+ self.rectCoords = []
83
+ for x in rects:
84
+ rect = []
85
+ for y in x:
86
+ rect.append(int(y))
87
+ self.rectCoords.append(rect)
88
+ self.video.setMask(self.ROImask, rectCoords=self.rectCoords)
89
+ elif self.ROImask == 'skin_adapt':
90
+ self.video.setMask(self.ROImask, skinThresh_adapt=float(kwargs['skinAdapt']))
91
+ elif self.ROImask == 'skin_fix':
92
+ threshs = ast.literal_eval(kwargs['skinFix'])
93
+ self.threshSkinFix = [int(x) for x in threshs]
94
+ self.video.setMask(self.ROImask, skinThresh_fix=self.threshSkinFix)
95
+ else:
96
+ raise ValueError(self.ROImask + " : Unimplemented Signal Extractor!")
97
+
98
+ self.video.extractSignal(self.frameSubset, count)
99
+
100
+ # -- RGB computation
101
+ RGBsig = self.video.getMeanRGB()
102
+
103
+ # -- print RGB raw data
104
+ if '2' in str(self.verb):
105
+ printutils.multiplot(y=RGBsig, name=['ch B', 'ch R','ch G'], title='RGB raw data')
106
+
107
+ # -- RGBsig preprocessing
108
+ if zeroMeanSTDnorm:
109
+ RGBsig = filters.zeroMeanSTDnorm(RGBsig)
110
+ if detrFilter:
111
+ if detrMethod == 'tarvainen':
112
+ #TODO controllare il detrending di tarvainen
113
+ RGBsig[0] = detrending.detrend(RGBsig[0], detrLambda)
114
+ RGBsig[1] = detrending.detrend(RGBsig[1], detrLambda)
115
+ RGBsig[2] = detrending.detrend(RGBsig[2], detrLambda)
116
+ else:
117
+ RGBsig = detrend(RGBsig)
118
+ if BPfilter:
119
+ RGBsig = filters.BPfilter(RGBsig, minHz, maxHz, fs)
120
+
121
+ # -- print postproce
122
+ if '2' in str(self.verb):
123
+ printutils.multiplot(y=RGBsig, name=['ch B', 'ch R','ch G'], title='RGB postprocessing')
124
+
125
+ # -- apply the selected method to estimate BVP
126
+ rPPG = self.apply(RGBsig)
127
+
128
+ # BVP postprocessing
129
+ startTime = np.max([0, T-winSize/self.video.frameRate])
130
+ bvpChunk = BVPsignal(rPPG, self.video.frameRate, startTime, minHz, maxHz, self.verb)
131
+
132
+ # -- post processing: filtering
133
+
134
+ # TODO: valutare se mantenere!!
135
+ #bvpChunk.data = filters.BPfilter(bvpChunk.data, bvpChunk.minHz, bvpChunk.maxHz, bvpChunk.fs)
136
+
137
+ if '2' in str(self.verb):
138
+ bvpChunk.plot(title='BVP estimate by ' + self.methodName)
139
+
140
+ # -- estimate BPM by PSD
141
+ bvpChunk.PSD2BPM(chooseBest=True)
142
+
143
+ # -- save the estimate
144
+ bpmES.append(bvpChunk.bpm)
145
+ timesES.append(T)
146
+
147
+ # -- define the frame range for each time step
148
+ T += timeStep
149
+ startFrame = np.max([0, int((T-RADIUS)*self.video.frameRate)])
150
+
151
+ count += 1
152
+
153
+ # set final values
154
+ self.bpm = np.array(bpmES).T
155
+
156
+ # TODO controllare se mettere o no il filtro seguente
157
+ #self.bpm = self.bpm_time_filter(self.bpm, 3)
158
+ self.times = np.array(timesES)
159
+
160
+ return self.bpm, self.times
161
+
162
+ @staticmethod
163
+ def makeMethodObject(video, methodName='ICA'):
164
+ if methodName == 'CHROM':
165
+ m = methods.CHROM(video)
166
+ elif methodName == 'LGI':
167
+ m = methods.LGI(video)
168
+ elif methodName == 'SSR':
169
+ m = methods.SSR(video)
170
+ elif methodName == 'PBV':
171
+ m = methods.PBV(video)
172
+ elif methodName == 'POS':
173
+ m = methods.POS(video)
174
+ elif methodName == 'Green':
175
+ m = methods.Green(video)
176
+ elif methodName == 'PCA':
177
+ m = methods.PCA(video)
178
+ elif methodName == 'ICA':
179
+ m = methods.ICA(video)
180
+ else:
181
+ raise ValueError("Unknown method!")
182
+ return m
183
+
184
+ def __readparams(self, **kwargs):
185
+
186
+ # get params from kwargs or set default
187
+ if 'startTime' in kwargs:
188
+ startTime = float(kwargs['startTime'])
189
+ else:
190
+ startTime = 0
191
+ if 'endTime' in kwargs:
192
+ if kwargs['endTime']=='INF':
193
+ endTime = np.Inf
194
+ else:
195
+ endTime = float(kwargs['endTime'])
196
+ else:
197
+ endTime=np.Inf
198
+ if 'winSize' in kwargs:
199
+ winSize = int(kwargs['winSize'])
200
+ else:
201
+ winSize = 5
202
+ if 'timeStep' in kwargs:
203
+ timeStep = float(kwargs['timeStep'])
204
+ else:
205
+ timeStep = 1
206
+ if 'zeroMeanSTDnorm' in kwargs:
207
+ zeroMeanSTDnorm = int(kwargs['zeroMeanSTDnorm'])
208
+ else:
209
+ zeroMeanSTDnorm = 0
210
+ if 'BPfilter' in kwargs:
211
+ BPfilter = int(kwargs['BPfilter'])
212
+ else:
213
+ BPfilter = 1
214
+ if 'minHz' in kwargs:
215
+ minHz = float(kwargs['minHz'])
216
+ else:
217
+ minHz = .75
218
+ if 'maxHz' in kwargs:
219
+ maxHz = float(kwargs['maxHz'])
220
+ else:
221
+ maxHz = 4.
222
+ if 'detrending' in kwargs:
223
+ detrending = int(kwargs['detrending'])
224
+ else:
225
+ detrending = 0
226
+ if detrending:
227
+ if 'detrLambda' in kwargs:
228
+ detrLambda = kwargs['detrLambda']
229
+ else:
230
+ detrLambda = 10
231
+ else:
232
+ detrLambda = 10
233
+ if 'detrMethod' in kwargs:
234
+ detrMethod = kwargs['detrMethod']
235
+ else:
236
+ detrMethod = 'tarvainen'
237
+
238
+ return startTime, endTime, winSize, timeStep, zeroMeanSTDnorm, BPfilter, minHz, maxHz,\
239
+ detrending, detrMethod, detrLambda
240
+
241
+ def RMSEerror(self, bvpGT):
242
+ """ RMSE: """
243
+
244
+ diff = self.__diff(bvpGT)
245
+ n,m = diff.shape # n = num channels, m = bpm length
246
+ df = np.zeros(n)
247
+ for j in range(m):
248
+ for c in range(n):
249
+ df[c] += np.power(diff[c,j],2)
250
+
251
+ # -- final RMSE
252
+ RMSE = np.sqrt(df/m)
253
+ return RMSE
254
+
255
+ def MAEerror(self, bvpGT):
256
+ """ MAE: """
257
+
258
+ diff = self.__diff(bvpGT)
259
+ n,m = diff.shape # n = num channels, m = bpm length
260
+ df = np.sum(np.abs(diff),axis=1)
261
+
262
+ # -- final MAE
263
+ MAE = df/m
264
+ return MAE
265
+
266
+ def MAXError(self, bvpGT):
267
+ """ MAE: """
268
+
269
+ diff = self.__diff(bvpGT)
270
+ n,m = diff.shape # n = num channels, m = bpm length
271
+ df = np.max(np.abs(diff),axis=1)
272
+
273
+ # -- final MAE
274
+ MAX = df
275
+ return MAX
276
+
277
+ def PearsonCorr(self, bvpGT):
278
+ from scipy import stats
279
+
280
+ diff = self.__diff(bvpGT)
281
+ bpmES = self.bpm
282
+ n,m = diff.shape # n = num channels, m = bpm length
283
+ CC = np.zeros(n)
284
+ for c in range(n):
285
+ # -- corr
286
+ r,p = stats.pearsonr(diff[c,:]+bpmES[c,:],bpmES[c,:])
287
+ CC[c] = r
288
+ return CC
289
+
290
+ def printErrors(self, bvpGT):
291
+ RMSE = self.RMSEerror(bvpGT)
292
+ MAE = self.MAEerror(bvpGT)
293
+ CC = self.PearsonCorr(bvpGT)
294
+ print('\nErrors:')
295
+ print(' RMSE: ' + str(RMSE))
296
+ print(' MAE : ' + str(MAE))
297
+ print(' CC : ' + str(CC))
298
+
299
+ def displayError(self, bvpGT):
300
+ bpmGT = bvpGT.bpm
301
+ timesGT = bvpGT.times
302
+ bpmES = self.bpm
303
+ timesES = self.times
304
+ diff = self.__diff(bvpGT)
305
+ n,m = diff.shape # n = num channels, m = bpm length
306
+ df = np.abs(diff)
307
+ dfMean = np.around(np.mean(df,axis=1),1)
308
+
309
+ # -- plot errors
310
+ fig = go.Figure()
311
+ name = 'Ch 1 (µ = ' + str(dfMean[0])+ ' )'
312
+ fig.add_trace(go.Scatter(x=timesES, y=df[0,:], name=name, mode='lines+markers'))
313
+ if n > 1:
314
+ name = 'Ch 2 (µ = ' + str(dfMean[1])+ ' )'
315
+ fig.add_trace(go.Scatter(x=timesES, y=df[1,:], name=name, mode='lines+markers'))
316
+ name = 'Ch 3 (µ = ' + str(dfMean[2])+ ' )'
317
+ fig.add_trace(go.Scatter(x=timesES, y=df[2,:], name=name, mode='lines+markers'))
318
+ fig.update_layout(xaxis_title='Times (sec)', yaxis_title='MAE', showlegend=True)
319
+ fig.show()
320
+
321
+ # -- plot bpm Gt and ES
322
+ fig = go.Figure()
323
+ GTmean = np.around(np.mean(bpmGT),1)
324
+ name = 'GT (µ = ' + str(GTmean)+ ' )'
325
+ fig.add_trace(go.Scatter(x=timesGT, y=bpmGT, name=name, mode='lines+markers'))
326
+ ESmean = np.around(np.mean(bpmES[0,:]),1)
327
+ name = 'ES1 (µ = ' + str(ESmean)+ ' )'
328
+ fig.add_trace(go.Scatter(x=timesES, y=bpmES[0,:], name=name, mode='lines+markers'))
329
+ if n > 1:
330
+ ESmean = np.around(np.mean(bpmES[1,:]),1)
331
+ name = 'ES2 (µ = ' + str(ESmean)+ ' )'
332
+ fig.add_trace(go.Scatter(x=timesES, y=bpmES[1,:], name=name, mode='lines+markers'))
333
+ ESmean = np.around(np.mean(bpmES[2,:]),1)
334
+ name = 'E3 (µ = ' + str(ESmean)+ ' )'
335
+ fig.add_trace(go.Scatter(x=timesES, y=bpmES[2,:], name=name, mode='lines+markers'))
336
+
337
+
338
+
339
+ fig.update_layout(xaxis_title='Times (sec)', yaxis_title='BPM', showlegend=True)
340
+ fig.show()
341
+
342
+
343
+
344
+ def __diff(self, bvpGT):
345
+ bpmGT = bvpGT.bpm
346
+ timesGT = bvpGT.times
347
+ bpmES = self.bpm
348
+ timesES = self.times
349
+ n,m = bpmES.shape # n = num channels, m = bpm length
350
+
351
+ diff = np.zeros((n,m))
352
+ for j in range(m):
353
+ t = timesES[j]
354
+ i = np.argmin(np.abs(t-timesGT))
355
+ for c in range(n):
356
+ diff[c,j] = bpmGT[i]-bpmES[c,j]
357
+ return diff
358
+
359
+ def bpm_time_filter(self, bpm, w_len):
360
+
361
+ n_sig = bpm.shape[0]
362
+ filtered_bpm = []
363
+
364
+ for s in range(n_sig):
365
+ x = bpm[s,:]
366
+ x = medfilt(x, w_len)
367
+ filtered_bpm.append(x)
368
+
369
+ filtered_bpm = np.vstack(filtered_bpm)
370
+
371
+ return filtered_bpm
372
+
373
+
374
+ def __verbose(self, startTime, endTime, winSize):
375
+ print("\n * %s params: start time = %.1f, end time = %.1f, winsize = %.1f (sec)"
376
+ %(self.methodName, startTime, endTime, winSize))
377
+
bvp.py ADDED
@@ -0,0 +1,344 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from scipy.signal import find_peaks, stft, lfilter, butter, welch
3
+ from plotly.subplots import make_subplots
4
+ from plotly.colors import n_colors
5
+ import plotly.graph_objects as go
6
+ from scipy.interpolate import interp1d
7
+
8
+
9
+ class BVPsignal:
10
+ """
11
+ Manage (multi-channel, row-wise) BVP signals
12
+ """
13
+ nFFT = 2048 # freq. resolution for STFTs
14
+ step = 1 # step in seconds
15
+
16
+ def __init__(self, data, fs, startTime=0, minHz=0.75, maxHz=4., verb=False):
17
+ if len(data.shape) == 1:
18
+ self.data = data.reshape(1,-1) # 2D array raw-wise
19
+ else:
20
+ self.data = data
21
+ self.numChls = self.data.shape[0] # num channels
22
+ self.fs = fs # sample rate
23
+ self.startTime = startTime
24
+ self.verb = verb
25
+ self.minHz = minHz
26
+ self.maxHz = maxHz
27
+
28
+ def getChunk(startTime, winsize=None, numSample=None):
29
+
30
+ assert startTime >= self.startTime, "Start time error!"
31
+
32
+ N = self.data.shape[1]
33
+ fs = self.fs
34
+ Nstart = int(fs*startTime)
35
+
36
+ # -- winsize > 0
37
+ if winsize:
38
+ stopTime = startTime + winsize
39
+ Nstop = np.min([int(fs*stopTime),N])
40
+
41
+ # -- numSample > 0
42
+ if numSample:
43
+ Nstop = np.min([numSample,N])
44
+
45
+ return self.data[0,Nstart:Nstop]
46
+
47
+ def hps(self, spect, d=3):
48
+
49
+ if spect.ndim == 2:
50
+ n_win = spect.shape[1]
51
+ new_spect = np.zeros_like(spect)
52
+ for w in range(n_win):
53
+ curr_w = spect[:,w]
54
+ w_down_z = np.zeros_like(curr_w)
55
+ w_down = curr_w[::d]
56
+ w_down_z[0:len(w_down)] = w_down
57
+ w_hps = np.multiply(curr_w, w_down_z)
58
+ new_spect[:, w] = w_hps
59
+ return new_spect
60
+
61
+ elif spect.ndim == 1:
62
+ s_down_z = np.zeros_like(spect)
63
+ s_down = spect[::d]
64
+ s_down_z[0:len(s_down)] = s_down
65
+ w_hps = np.multiply(spect, s_down_z)
66
+ return w_hps
67
+
68
+ else:
69
+ raise ValueError("Wrong Dimensionality of the Spectrogram for the HPS")
70
+
71
+ def spectrogram(self, winsize=5, use_hps=False):
72
+ """
73
+ Compute the BVP signal spectrogram restricted to the
74
+ band 42-240 BPM by using winsize (in sec) samples.
75
+ """
76
+
77
+ # -- spect. Z is 3-dim: Z[#chnls, #freqs, #times]
78
+ F, T, Z = stft(self.data,
79
+ self.fs,
80
+ nperseg=self.fs*winsize,
81
+ noverlap=self.fs*(winsize-self.step),
82
+ boundary='even',
83
+ nfft=self.nFFT)
84
+ Z = np.squeeze(Z, axis=0)
85
+
86
+ # -- freq subband (0.75 Hz - 4.0 Hz)
87
+ minHz = 0.75
88
+ maxHz = 4.0
89
+ band = np.argwhere((F > minHz) & (F < maxHz)).flatten()
90
+ self.spect = np.abs(Z[band,:]) # spectrum magnitude
91
+ self.freqs = 60*F[band] # spectrum freq in bpm
92
+ self.times = T # spectrum times
93
+
94
+ if use_hps:
95
+ spect_hps = self.hps(self.spect)
96
+ # -- BPM estimate by spectrum
97
+ self.bpm = self.freqs[np.argmax(spect_hps,axis=0)]
98
+ else:
99
+ # -- BPM estimate by spectrum
100
+ self.bpm = self.freqs[np.argmax(self.spect,axis=0)]
101
+
102
+ def getBPM(self, winsize=5):
103
+ self.spectrogram(winsize, use_hps=False)
104
+ return self.bpm, self.times
105
+
106
+ def PSD2BPM(self, chooseBest=True, use_hps=False):
107
+ """
108
+ Compute power spectral density using Welch’s method and estimate
109
+ BPMs from video frames
110
+ """
111
+
112
+ # -- interpolation for less than 256 samples
113
+ c,n = self.data.shape
114
+ if n < 256:
115
+ seglength = n
116
+ overlap = int(0.8*n) # fixed overlapping
117
+ else:
118
+ seglength = 256
119
+ overlap = 200
120
+
121
+ # -- periodogram by Welch
122
+ F, P = welch(self.data, nperseg=seglength, noverlap=overlap, window='hamming',fs=self.fs, nfft=self.nFFT)
123
+
124
+ # -- freq subband (0.75 Hz - 4.0 Hz)
125
+ band = np.argwhere((F > self.minHz) & (F < self.maxHz)).flatten()
126
+ self.Pfreqs = 60*F[band]
127
+ self.Power = P[:,band]
128
+
129
+ # -- if c = 3 choose that with the best SNR
130
+ if chooseBest:
131
+ winner = 0
132
+ lobes = self.PDSrippleAnalysis(ch=0)
133
+ SNR = lobes[-1]/lobes[-2]
134
+ if c == 3:
135
+ lobes = self.PDSrippleAnalysis(ch=1)
136
+ SNR1 = lobes[-1]/lobes[-2]
137
+ if SNR1 > SNR:
138
+ SNR = SNR1
139
+ winner = 1
140
+ lobes = self.PDSrippleAnalysis(ch=2)
141
+ SNR1 = lobes[-1]/lobes[-2]
142
+ if SNR1 > SNR:
143
+ SNR = SNR1
144
+ winner = 2
145
+ self.Power = self.Power[winner].reshape(1,-1)
146
+
147
+ # TODO: eliminare?
148
+ if use_hps:
149
+ p = self.Power[0]
150
+ phps = self.hps(p)
151
+ '''import matplotlib.pyplot as plt
152
+ plt.plot(p)
153
+ plt.figure()
154
+ plt.plot(phps)
155
+ plt.show()'''
156
+ Pmax = np.argmax(phps) # power max
157
+ self.bpm = np.array([self.Pfreqs[Pmax]]) # freq max
158
+
159
+ else:
160
+ # -- BPM estimate by PSD
161
+ Pmax = np.argmax(self.Power, axis=1) # power max
162
+ self.bpm = self.Pfreqs[Pmax] # freq max
163
+
164
+ if '3' in str(self.verb):
165
+ lobes = self.PDSrippleAnalysis()
166
+ self.displayPSD(lobe1=lobes[-1], lobe2=lobes[-2])
167
+
168
+ def autocorr(self):
169
+ from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
170
+
171
+ # TODO: to handle all channels
172
+ x = self.data[0,:]
173
+ plot_acf(x)
174
+ plt.show()
175
+
176
+ plot_pacf(x)
177
+ plt.show()
178
+
179
+ def displaySpectrum(self, display=False, dims=3):
180
+ """Show the spectrogram of the BVP signal"""
181
+
182
+ # -- check if bpm exists
183
+ try:
184
+ bpm = self.bpm
185
+ except AttributeError:
186
+ self.spectrogram()
187
+ bpm = self.bpm
188
+
189
+ t = self.times
190
+ f = self.freqs
191
+ S = self.spect
192
+
193
+ fig = go.Figure()
194
+ fig.add_trace(go.Heatmap(z=S, x=t, y=f, colorscale="viridis"))
195
+ fig.add_trace(go.Scatter(x=t, y=bpm, name='Frequency Domain', line=dict(color='red', width=2)))
196
+
197
+ fig.update_layout(autosize=False, height=420, showlegend=True,
198
+ title='Spectrogram of the BVP signal',
199
+ xaxis_title='Time (sec)',
200
+ yaxis_title='BPM (60*Hz)',
201
+ legend=dict(
202
+ x=0,
203
+ y=1,
204
+ traceorder="normal",
205
+ font=dict(
206
+ family="sans-serif",
207
+ size=12,
208
+ color="black"),
209
+ bgcolor="LightSteelBlue",
210
+ bordercolor="Black",
211
+ borderwidth=2)
212
+ )
213
+
214
+ fig.show()
215
+
216
+ def findPeaks(self, distance=None, height=None):
217
+
218
+ # -- take the first channel
219
+ x = self.data[0].flatten()
220
+
221
+ if distance is None:
222
+ distance = self.fs/2
223
+ if height is None:
224
+ height = np.mean(x)
225
+
226
+ # -- find peaks with the specified params
227
+ self.peaks, _ = find_peaks(x, distance=distance, height=height)
228
+
229
+ self.peaksTimes = self.peaks/self.fs
230
+ self.bpmPEAKS = 60.0/np.diff(self.peaksTimes)
231
+
232
+ def plotBPMPeaks(self, height=None, width=None):
233
+ """
234
+ Plot the the BVP signal and peak marks
235
+ """
236
+
237
+ # -- find peaks
238
+ try:
239
+ peaks = self.peaks
240
+ except AttributeError:
241
+ self.findPeaks()
242
+ peaks = self.peaks
243
+
244
+ #-- signals
245
+ y = self.data[0]
246
+ n = y.shape[0]
247
+ startTime = self.startTime
248
+ stopTime = startTime+n/self.fs
249
+ x = np.linspace(startTime, stopTime, num=n, endpoint=False)
250
+
251
+ fig = go.Figure()
252
+ fig.add_trace(go.Scatter(x=x, y=y, name="BVP"))
253
+ fig.add_trace(go.Scatter(x=x[peaks], y=y[peaks], mode='markers', name="Peaks"))
254
+
255
+ if not height:
256
+ height=400
257
+ if not width:
258
+ width=800
259
+
260
+ fig.update_layout(height=height, width=width, title="BVP signal + peaks",
261
+ font=dict(
262
+ family="Courier New, monospace",
263
+ size=14,
264
+ color="#7f7f7f"))
265
+
266
+ fig.show()
267
+
268
+ def plot(self, title="BVP signal", height=400, width=800):
269
+ """
270
+ Plot the the BVP signal (multiple channels)
271
+ """
272
+
273
+ #-- signals
274
+ y = self.data
275
+ c,n = y.shape
276
+ startTime = self.startTime
277
+ stopTime = startTime+n/self.fs
278
+ x = np.linspace(startTime, stopTime, num=n, endpoint=False)
279
+
280
+ fig = go.Figure()
281
+
282
+ for i in range(c):
283
+ name = "BVP " + str(i)
284
+ fig.add_trace(go.Scatter(x=x, y=y[i], name=name))
285
+
286
+ fig.update_layout(height=height, width=width, title=title,
287
+ font=dict(
288
+ family="Courier New, monospace",
289
+ size=14,
290
+ color="#7f7f7f"))
291
+ fig.show()
292
+
293
+ def displayPSD(self, ch=0, lobe1=None, lobe2=None, GT=None):
294
+ """Show the periodogram(s) of the BVP signal for channel ch"""
295
+
296
+ f = self.Pfreqs
297
+ P = self.Power[ch]
298
+
299
+ fig = go.Figure()
300
+
301
+ fig.add_trace(go.Scatter(x=f, y=P, name='PSD'))
302
+ fig.update_layout(autosize=False, width=500, height=400)
303
+
304
+ if lobe1 is not None and lobe2 is not None:
305
+ L1 = lobe1
306
+ L2 = lobe2
307
+ # Add horiz. lobe peack lines
308
+ fig.add_shape(type="line",x0=f[0], y0=L1, x1=f[-1], y1=L1,
309
+ line=dict(color="LightSeaGreen", width=2, dash="dashdot"))
310
+ fig.add_shape(type="line",x0=f[0], y0=L2, x1=f[-1], y1=L2,
311
+ line=dict(color="SeaGreen", width=2, dash="dashdot"))
312
+ tit = 'SNR = ' + str(np.round(L1/L2,2))
313
+ fig.update_layout(title=tit)
314
+
315
+ if GT is not None:
316
+ # Add vertical GT line
317
+ fig.add_shape(type="line",x0=GT, y0=0, x1=GT, y1=np.max(P),
318
+ line=dict(color="DarkGray", width=2, dash="dash"))
319
+
320
+ fig.show()
321
+
322
+ def PDSrippleAnalysis(self, ch=0):
323
+ # -- ripple analysis
324
+
325
+ P = self.Power[ch].flatten()
326
+ dP = np.gradient(P)
327
+ n = len(dP)
328
+ I = [];
329
+ i = 0
330
+ while i < n:
331
+ m = 0
332
+ # -- positive gradient
333
+ while (i < n) and (dP[i] > 0):
334
+ m = max([m,P[i]])
335
+ i += 1
336
+ I.append(m)
337
+ # -- skip negative gradient
338
+ while (i < n) and (dP[i] < 0) :
339
+ i += 1
340
+ lobes = np.sort(I)
341
+ if len(lobes) < 2:
342
+ lobes = np.array([lobes,0])
343
+
344
+ return lobes
chrom.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from scipy import signal
2
+ import numpy as np
3
+ from .base import VHRMethod
4
+
5
+ class CHROM(VHRMethod):
6
+ """ This method is described in the following paper:
7
+ "Remote heart rate variability for emotional state monitoring"
8
+ by Y. Benezeth, P. Li, R. Macwan, K. Nakamura, R. Gomez, F. Yang
9
+ """
10
+ methodName = 'CHROM'
11
+
12
+ def __init__(self, **kwargs):
13
+ super(CHROM, self).__init__(**kwargs)
14
+
15
+ def apply(self, X):
16
+
17
+ #self.RGB = self.getMeanRGB()
18
+ #X = signal.detrend(self.RGB.T)
19
+
20
+ # calculation of new X and Y
21
+ Xcomp = 3*X[0] - 2*X[1]
22
+ Ycomp = (1.5*X[0])+X[1]-(1.5*X[2])
23
+
24
+ # standard deviations
25
+ sX = np.std(Xcomp)
26
+ sY = np.std(Ycomp)
27
+
28
+ alpha = sX/sY
29
+
30
+ # -- rPPG signal
31
+ bvp = Xcomp-alpha*Ycomp
32
+
33
+ return bvp
cohface.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import h5py
2
+ import numpy as np
3
+ from pyVHR.datasets.dataset import Dataset
4
+ from pyVHR.signals.bvp import BVPsignal
5
+
6
+ class COHFACE(Dataset):
7
+ """
8
+ Cohface dataset structure:
9
+ -----------------
10
+ datasetDIR/
11
+ |
12
+ |-- subjDIR_1/
13
+ | |-- vidDIR1/
14
+ | |-- videoFile1.avi
15
+ | |-- ...
16
+ | |-- videoFileN.avi
17
+ |...
18
+ | |-- vidDIRM/
19
+ | |-- videoFile1.avi
20
+ | |-- ...
21
+ | |-- videoFileM.avi
22
+ |...
23
+ |-- subjDIR_n/
24
+ |...
25
+ """
26
+ name = 'COHFACE'
27
+ signalGT = 'BVP' # GT signal type
28
+ numLevels = 2 # depth of the filesystem collecting video and BVP files
29
+ numSubjects = 40 # number of subjects
30
+ video_EXT = 'avi' # extension of the video files
31
+ frameRate = 20 # vieo frame rate
32
+ VIDEO_SUBSTRING = 'data' # substring contained in the filename
33
+ SIG_EXT = 'hdf5' # extension of the BVP files
34
+ SIG_SUBSTRING = 'data' # substring contained in the filename
35
+ SIG_SampleRate = 256 # sample rate of the BVP files
36
+ skinThresh = [40,60] # thresholds for skin detection
37
+
38
+ def readSigfile(self, filename):
39
+ """ Load BVP signal.
40
+ Must return a 1-dim (row array) signal
41
+ """
42
+
43
+ f = h5py.File(filename, 'r')
44
+ data = np.array(f['pulse']) # load the signal
45
+ data = data.reshape(1,len(data)) # monodimentional signal
46
+
47
+ return BVPsignal(data, self.SIG_SampleRate)
dataset.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABCMeta, abstractmethod
2
+ import os
3
+ from importlib import import_module
4
+
5
+ #def datasetFactory(datasetName, *args, **kwargs):
6
+ def datasetFactory(datasetName, videodataDIR, BVPdataDIR):
7
+ try:
8
+ moduleName = datasetName.lower()
9
+ className = datasetName.upper()
10
+ datasetModule = import_module('pyVHR.datasets.' + moduleName) #, package='pyVHR')
11
+ classOBJ = getattr(datasetModule, className)
12
+ #obj = classOBJ(*args, **kwargs)
13
+ obj = classOBJ(videodataDIR, BVPdataDIR)
14
+
15
+ except (AttributeError, ModuleNotFoundError):
16
+ raise ImportError('{} is not part of pyVHR dataset collection!'.format(datasetName))
17
+
18
+ return obj
19
+
20
+ class Dataset(metaclass=ABCMeta):
21
+ """
22
+ Manage datasets (parent class for new datasets)
23
+ """
24
+ def __init__(self, videodataDIR=None, BVPdataDIR=None):
25
+ # -- load filenames
26
+ self.videoFilenames = [] # list of all video filenames
27
+ self.sigFilenames = [] # list of all Sig filenames
28
+ self.numVideos = 0 # num of videos in the dataset
29
+ self.videodataDIR = videodataDIR
30
+ self.BVPdataDIR = BVPdataDIR
31
+ self.loadFilenames()
32
+
33
+ def loadFilenames(self):
34
+ """Load dataset file names: define vars videoFilenames and BVPFilenames"""
35
+
36
+ # -- loop on the dir struct of the dataset getting filenames
37
+ for root, dirs, files in os.walk(self.videodataDIR):
38
+ for f in files:
39
+ filename = os.path.join(root, f)
40
+ path, name = os.path.split(filename)
41
+
42
+ # -- select video
43
+ if filename.endswith(self.video_EXT) and (name.find(self.VIDEO_SUBSTRING)>=0):
44
+ self.videoFilenames.append(filename)
45
+
46
+ # -- select signal
47
+ if filename.endswith(self.SIG_EXT) and (name.find(self.SIG_SUBSTRING)>=0):
48
+ self.sigFilenames.append(filename)
49
+
50
+ # -- number of videos
51
+ self.numVideos = len(self.videoFilenames)
52
+
53
+ def getVideoFilename(self, videoIdx=0):
54
+ """Get video filename given the progressive index"""
55
+ return self.videoFilenames[videoIdx]
56
+
57
+ def getSigFilename(self, videoIdx=0):
58
+ """Get Signal filename given the progressive index"""
59
+ return self.sigFilenames[videoIdx]
60
+
61
+ @abstractmethod
62
+ def readSigfile(self, filename):
63
+ """ Load signal from file.
64
+ Return a BVPsignal/ECGsignal object.
65
+ """
66
+ pass
default_test.cfg ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # default_test.cfg - default test configuration file for TestSuite class
2
+
3
+ ## Default parameters
4
+ #
5
+ # winsize = Duration of the time window to process the video (in seconds)
6
+ # winsizeGT = Duration of the time window to process the ground truth signal (in seconds)
7
+ # timeStep = Time step of the estimation (in seconds)
8
+ # methods = A list of methods to test (['CHROM','Green','ICA','LGI','PBV','PCA','POS','SSR'])
9
+ #
10
+ ## Video signal Preprocessing
11
+ #
12
+ # zeroMeanSTDnorm = Apply Zero Mean and Unit Standard Deviation (0/1)
13
+ # detrending = Apply detrenting algorithm (0/1)
14
+ # detrMethod = Detrenting algorithm (tarvainen/scipy)
15
+ # detLambda = If detrending = 1, regularization parameter of detrending algorithm
16
+ # BPfilter = Apply band pass filtering (0/1)
17
+ # minHz = If BPfilter = 1, the lower cut-off frequency (in hertz)
18
+ # maxHz = If BPfilter = 1, the upper cut-off frequency (in hertz)
19
+
20
+ [DEFAULT]
21
+ winSize = 5
22
+ winSizeGT = 5
23
+ timeStep = 1
24
+ methods = ['POS','CHROM']
25
+ zeroMeanSTDnorm = 0
26
+ detrending = 0
27
+ detLambda = 10
28
+ BPfilter = 1
29
+ minHz = 0.75
30
+ maxHz = 4.0
31
+
32
+ ## Video signal
33
+ #
34
+ # dataset = Name of the dataset to test ('PURE', 'UBFC1', 'UBFC2', 'LGI-PPGI', 'COHFACE', 'MAHNOB')
35
+ # videoIdx = A list of IDs reffered to the videos to test (eg. [0,1,2,...])
36
+ # or the string 'all' to test on the whole database
37
+ # detector = Method used for face detection (mtcnn, dlib, mtcnn_kalman)
38
+ # extractor = Preferred library to read video files (opencv/skvideo)
39
+ # startTime = Process video file from start time (in seconds)
40
+ # endTime = Process video file until end time (in seconds). If < 0: process until (video length - endTime)
41
+
42
+ [VIDEO]
43
+ dataset = lgi_ppgi
44
+ videodataDIR= ../sampleDataset/
45
+ BVPdataDIR = ../sampleDataset/
46
+ videoIdx = [0]
47
+ detector = mtcnn
48
+ extractor = skvideo
49
+ startTime = 3
50
+ endTime = -3
51
+ ROImask = skin_fix
52
+ skinFix = [40, 60]
53
+ skinAdapt = 0.2
54
+ rectCoords= [[0, 0, 150, 150]]
55
+ evm = 0
56
+ stat = mean
57
+
58
+ ## Method specific configurations
59
+
60
+ [ICA]
61
+ zeroMeanSTDnorm = 1
62
+ detrending = 0
63
+ BPfilter = 1
64
+ ICAmethod = jade
65
+
66
+ [PCA]
67
+ zeroMeanSTDnorm = 0
68
+ detrending = 0
69
+ BPfilter = 1
70
+ minHz = 0.75
71
+ maxHz = 4.0
72
+
73
+ [SSR]
74
+ zeroMeanSTDnorm = 0
75
+ detrending = 0
76
+ BPfilter = 0
77
+
78
+ [CHROM]
79
+ zeroMeanSTDnorm = 0
80
+ detrending = 1
81
+ detrMethod = scipy
82
+ BPfilter = 0
83
+
84
+ [POS]
85
+ zeroMeanSTDnorm = 0
86
+ detrending = 0
87
+ BPfilter = 0
88
+
89
+ [LGI]
90
+ zeroMeanSTDnorm = 0
91
+ detrending = 0
92
+ BPfilter = 0
93
+
94
+ [PBV]
95
+ zeroMeanSTDnorm = 0
96
+ detrending = 0
97
+ BPfilter = 0
98
+
99
+ [GREEN]
100
+ zeroMeanSTDnorm = 0
101
+ detrending = 1
102
+ detrMethod = scipy
103
+ BPfilter = 0
detrending.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import scipy.sparse
3
+
4
+ def detrend(X, detLambda=10):
5
+ # Smoothness prior approach as in the paper appendix:
6
+ # "An advanced detrending method with application to HRV analysis"
7
+ # by Tarvainen, Ranta-aho and Karjaalainen
8
+ t = X.shape[0]
9
+ l = t/detLambda #lambda
10
+ I = np.identity(t)
11
+ D2 = scipy.sparse.diags([1, -2, 1], [0,1,2],shape=(t-2,t)).toarray() # this works better than spdiags in python
12
+ detrendedX = (I-np.linalg.inv(I+l**2*(np.transpose(D2).dot(D2)))).dot(X)
13
+ return detrendedX
ecg.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from scipy.signal import find_peaks, stft, lfilter, butter, welch
3
+ from plotly.subplots import make_subplots
4
+ from plotly.colors import n_colors
5
+ import plotly.graph_objects as go
6
+ from biosppy.signals import ecg
7
+
8
+
9
+ class ECGsignal:
10
+ """
11
+ Manage (multi-channel, row-wise) BVP signals
12
+ """
13
+ verb = False # verbose (True)
14
+ nFFT = 4*4096 # freq. resolution for STFTs
15
+ step = 1 # step in seconds
16
+ minHz = .75 # 39 BPM - min freq.
17
+ maxHz = 4. # 240 BPM - max freq.
18
+
19
+ def __init__(self, data, fs, startTime=0):
20
+ #self.data = data
21
+ if len(data.shape) == 1:
22
+ self.data = data.reshape(1,-1) # 2D array raw-wise
23
+ self.fs = fs # sample rate
24
+ self.startTime = startTime
25
+
26
+ def getBPM(self, winsize=5):
27
+ """
28
+ Compute the ECG signal by biosppy library
29
+ """
30
+ # TODO: to handle all channels
31
+ data = self.data[0,:]
32
+ out = ecg.ecg(signal=data, sampling_rate=self.fs, show=False)
33
+ self.times = out['heart_rate_ts']
34
+ self.bpm = out['heart_rate']
35
+ self.peaksIdX = out['rpeaks']
36
+
37
+ return self.bpm, self.times
38
+
39
+ def autocorr(self):
40
+ from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
41
+
42
+ # TODO: to handle all channels
43
+ x = self.data[0,:]
44
+ plot_acf(x)
45
+ plt.show()
46
+
47
+ plot_pacf(x)
48
+ plt.show()
49
+
50
+ def plot(self):
51
+ """
52
+ Plot the the ECG signals (one channels)
53
+ """
54
+ # TODO: to handle all channels
55
+ data = self.data[0,:]
56
+ N = len(data)
57
+ times = np.linspace(self.startTime, N/self.fs, num=N, endpoint=False)
58
+
59
+ # -- plot the channel
60
+ fig = make_subplots(rows=1, cols=1)
61
+ fig.add_trace(go.Scatter(x=times, y=data, name='ECG'), row=1, col=1)
62
+ fig.add_trace(go.Scatter(x=self.peaksIdX/self.fs, y=data[self.peaksIdX], mode='markers', name='peaks'), row=1, col=1)
63
+ fig.update_layout(height=600, width=800)
64
+ fig.show()
elapse.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+
3
+ def tic(verb=True):
4
+ """ tic like Matlab function """
5
+
6
+ global tic_toc_time
7
+ tic_toc_time = time.time()
8
+ if verb:
9
+ print('start...')
10
+ return tic_toc_time
11
+
12
+ def toc(verb=True):
13
+ """ toc like Matlab function """
14
+
15
+ global tic_toc_time
16
+ T1 = time.time()-tic_toc_time
17
+ if verb:
18
+ print('elapsed = ' + str(T1))
19
+ return T1
errors.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import plotly.graph_objects as go
3
+ from pyVHR.signals.bvp import BVPsignal
4
+
5
+ def getErrors(bpmES, bpmGT, timesES, timesGT):
6
+ RMSE = RMSEerror(bpmES, bpmGT, timesES, timesGT)
7
+ MAE = MAEerror(bpmES, bpmGT, timesES, timesGT)
8
+ MAX = MAXError(bpmES, bpmGT, timesES, timesGT)
9
+ PCC = PearsonCorr(bpmES, bpmGT, timesES, timesGT)
10
+ return RMSE, MAE, MAX, PCC
11
+
12
+ def RMSEerror(bpmES, bpmGT, timesES=None, timesGT=None):
13
+ """ RMSE: """
14
+
15
+ diff = bpm_diff(bpmES, bpmGT, timesES, timesGT)
16
+ n,m = diff.shape # n = num channels, m = bpm length
17
+ df = np.zeros(n)
18
+ for j in range(m):
19
+ for c in range(n):
20
+ df[c] += np.power(diff[c,j],2)
21
+
22
+ # -- final RMSE
23
+ RMSE = np.sqrt(df/m)
24
+ return RMSE
25
+
26
+ def MAEerror(bpmES, bpmGT, timesES=None, timesGT=None):
27
+ """ MAE: """
28
+
29
+ diff = bpm_diff(bpmES, bpmGT, timesES, timesGT)
30
+ n,m = diff.shape # n = num channels, m = bpm length
31
+ df = np.sum(np.abs(diff),axis=1)
32
+
33
+ # -- final MAE
34
+ MAE = df/m
35
+ return MAE
36
+
37
+ def MAXError(bpmES, bpmGT, timesES=None, timesGT=None):
38
+ """ MAE: """
39
+
40
+ diff = bpm_diff(bpmES, bpmGT, timesES, timesGT)
41
+ n,m = diff.shape # n = num channels, m = bpm length
42
+ df = np.max(np.abs(diff),axis=1)
43
+
44
+ # -- final MAE
45
+ MAX = df
46
+ return MAX
47
+
48
+ def PearsonCorr(bpmES, bpmGT, timesES=None, timesGT=None):
49
+ from scipy import stats
50
+
51
+ diff = bpm_diff(bpmES, bpmGT, timesES, timesGT)
52
+ n,m = diff.shape # n = num channels, m = bpm length
53
+ CC = np.zeros(n)
54
+ for c in range(n):
55
+ # -- corr
56
+ r,p = stats.pearsonr(diff[c,:]+bpmES[c,:],bpmES[c,:])
57
+ CC[c] = r
58
+ return CC
59
+
60
+ def printErrors(RMSE, MAE, MAX, PCC):
61
+ print("\n * Errors: RMSE = %.2f, MAE = %.2f, MAX = %.2f, PCC = %.2f" %(RMSE,MAE,MAX,PCC))
62
+
63
+ def displayErrors(bpmES, bpmGT, timesES=None, timesGT=None):
64
+
65
+ if (timesES is None) or (timesGT is None):
66
+ timesES = np.arange(m)
67
+ timesGT = timesES
68
+
69
+ diff = bpm_diff(bpmES, bpmGT, timesES, timesGT)
70
+ n,m = diff.shape # n = num channels, m = bpm length
71
+ df = np.abs(diff)
72
+ dfMean = np.around(np.mean(df,axis=1),1)
73
+
74
+ # -- plot errors
75
+ fig = go.Figure()
76
+ name = 'Ch 1 (µ = ' + str(dfMean[0])+ ' )'
77
+ fig.add_trace(go.Scatter(x=timesES, y=df[0,:], name=name, mode='lines+markers'))
78
+ if n > 1:
79
+ name = 'Ch 2 (µ = ' + str(dfMean[1])+ ' )'
80
+ fig.add_trace(go.Scatter(x=timesES, y=df[1,:], name=name, mode='lines+markers'))
81
+ name = 'Ch 3 (µ = ' + str(dfMean[2])+ ' )'
82
+ fig.add_trace(go.Scatter(x=timesES, y=df[2,:], name=name, mode='lines+markers'))
83
+ fig.update_layout(xaxis_title='Times (sec)', yaxis_title='MAE', showlegend=True)
84
+ fig.show()
85
+
86
+ # -- plot bpm Gt and ES
87
+ fig = go.Figure()
88
+ GTmean = np.around(np.mean(bpmGT),1)
89
+ name = 'GT (µ = ' + str(GTmean)+ ' )'
90
+ fig.add_trace(go.Scatter(x=timesGT, y=bpmGT, name=name, mode='lines+markers'))
91
+ ESmean = np.around(np.mean(bpmES[0,:]),1)
92
+ name = 'ES1 (µ = ' + str(ESmean)+ ' )'
93
+ fig.add_trace(go.Scatter(x=timesES, y=bpmES[0,:], name=name, mode='lines+markers'))
94
+ if n > 1:
95
+ ESmean = np.around(np.mean(bpmES[1,:]),1)
96
+ name = 'ES2 (µ = ' + str(ESmean)+ ' )'
97
+ fig.add_trace(go.Scatter(x=timesES, y=bpmES[1,:], name=name, mode='lines+markers'))
98
+ ESmean = np.around(np.mean(bpmES[2,:]),1)
99
+ name = 'E3 (µ = ' + str(ESmean)+ ' )'
100
+ fig.add_trace(go.Scatter(x=timesES, y=bpmES[2,:], name=name, mode='lines+markers'))
101
+
102
+ fig.update_layout(xaxis_title='Times (sec)', yaxis_title='BPM', showlegend=True)
103
+ fig.show()
104
+
105
+
106
+ def bpm_diff(bpmES, bpmGT, timesES=None, timesGT=None):
107
+ n,m = bpmES.shape # n = num channels, m = bpm length
108
+
109
+ if (timesES is None) or (timesGT is None):
110
+ timesES = np.arange(m)
111
+ timesGT = timesES
112
+
113
+ diff = np.zeros((n,m))
114
+ for j in range(m):
115
+ t = timesES[j]
116
+ i = np.argmin(np.abs(t-timesGT))
117
+ for c in range(n):
118
+ diff[c,j] = bpmGT[i]-bpmES[c,j]
119
+ return diff
evm.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import numpy as np
3
+ import cv2
4
+ import scipy.signal as signal
5
+ import scipy.fftpack as fftpack
6
+
7
+
8
+ def build_gaussian_pyramid(src, levels=3):
9
+ """
10
+ Function: build_gaussian_pyramid
11
+ --------------------------------
12
+ Builds a gaussian pyramid
13
+
14
+ Args:
15
+ -----
16
+ src: the input image
17
+ levels: the number levels in the gaussian pyramid
18
+
19
+ Returns:
20
+ --------
21
+ A gaussian pyramid
22
+ """
23
+ s = src.copy()
24
+ pyramid = [s]
25
+ print(s.shape)
26
+ for i in range(levels):
27
+ s = cv2.pyrDown(s)
28
+ pyramid.append(s)
29
+
30
+ print(s.shape)
31
+
32
+ return pyramid
33
+
34
+
35
+ def gaussian_video(video, levels=3):
36
+ """
37
+ Function: gaussian_video
38
+ ------------------------
39
+ generates a gaussian pyramid for each frame in a video
40
+
41
+ Args:
42
+ -----
43
+ video: the input video array
44
+ levels: the number of levels in the gaussian pyramid
45
+
46
+ Returns:
47
+ --------
48
+ the gaussian video
49
+ """
50
+ n = video.shape[0]
51
+ for i in range(0, n):
52
+ pyr = build_gaussian_pyramid(video[i], levels=levels)
53
+ gaussian_frame=pyr[-1]
54
+ if i==0:
55
+ vid_data = np.zeros((n, *gaussian_frame.shape))
56
+ vid_data[i] = gaussian_frame
57
+ return vid_data
58
+
59
+ def temporal_ideal_filter(arr, low, high, fps, axis=0):
60
+ """
61
+ Function: temporal_ideal_filter
62
+ -------------------------------
63
+ Applies a temporal ideal filter to a numpy array
64
+ Args:
65
+ -----
66
+ arr: a numpy array with shape (N, H, W, C)
67
+ N: number of frames
68
+ H: height
69
+ W: width
70
+ C: channels
71
+ low: the low frequency bound
72
+ high: the high frequency bound
73
+ fps: the video frame rate
74
+ axis: the axis of video, should always be 0
75
+ Returns:
76
+ --------
77
+ the array with the filter applied
78
+ """
79
+ fft = fftpack.fft(arr, axis=axis)
80
+ frequencies = fftpack.fftfreq(arr.shape[0], d=1.0 / fps)
81
+ bound_low = (np.abs(frequencies - low)).argmin()
82
+ bound_high = (np.abs(frequencies - high)).argmin()
83
+ fft[:bound_low] = 0
84
+ fft[bound_high:-bound_high] = 0
85
+ fft[-bound_low:] = 0
86
+ iff=fftpack.ifft(fft, axis=axis)
87
+ return np.abs(iff)
88
+
89
+
90
+ def butter_bandpass_filter(data, lowcut, highcut, fs, order=5):
91
+ """
92
+ Function: butter_bandpass_filter
93
+ --------------------------------
94
+ applies a buttersworth bandpass filter
95
+ Args:
96
+ -----
97
+ data: the input data
98
+ lowcut: the low cut value
99
+ highcut: the high cut value
100
+ fs: the frame rate in frames per second
101
+ order: the order for butter
102
+ Returns:
103
+ --------
104
+ the result of the buttersworth bandpass filter
105
+ """
106
+ omega = 0.5 * fs
107
+ low = lowcut / omega
108
+ high = highcut / omega
109
+ b, a = signal.butter(order, [low, high], btype='band')
110
+ y = signal.lfilter(b, a, data, axis=0)
111
+ return y
112
+
113
+ def reconstruct_video_g(amp_video, original_video, levels=3):
114
+ """
115
+ Function: reconstruct_video_g
116
+ -----------------------------
117
+ reconstructs a video from a gaussian pyramid and the original
118
+
119
+ Args:
120
+ -----
121
+ amp_video: the amplified gaussian video
122
+ original_video: the original video
123
+ levels: the levels in the gaussian video
124
+
125
+ Returns:
126
+ --------
127
+ the reconstructed video
128
+ """
129
+
130
+ print(original_video.shape)
131
+ final_video = np.zeros(original_video.shape)
132
+ for i in range(0, amp_video.shape[0]):
133
+ img = amp_video[i]
134
+ print(img.shape)
135
+ for x in range(levels):
136
+ img = cv2.pyrUp(img)
137
+
138
+ print(img.shape)
139
+
140
+ img = img + original_video[i]
141
+ final_video[i] = img
142
+ return final_video
evm2.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ """
4
+ Eulerian Video Magnification (EVM) Demo
5
+ """
6
+
7
+ import time
8
+ import sys
9
+
10
+ import cv2
11
+ import numpy as np
12
+ import scipy
13
+ import skimage
14
+
15
+ def gaussian(image, numlevels):
16
+ """Constructs gaussian pyramid
17
+
18
+ Arguments:
19
+ image : Input image (monochrome or color)
20
+ numlevels : Number of levels to compute
21
+
22
+ Return:
23
+ List of progressively smaller (i.e. lower frequency) images
24
+ """
25
+
26
+ pyramid = [ image ]
27
+ for level in range(numlevels):
28
+ image = cv2.pyrDown(image)
29
+ pyramid.append(image)
30
+
31
+ return pyramid
32
+
33
+ def temporal_bandpass_filter(data, freq_min, freq_max, fps, axis=0):
34
+ """Applies ideal band-pass filter to a given video
35
+ Arguments:
36
+ data : video to be filtered (as a 4-d numpy array (time, height,
37
+ width, channels))
38
+ freq_min : lower cut-off frequency of band-pass filter
39
+ freq_max : upper cut-off frequency of band-pass filter
40
+ fps :
41
+ Return:
42
+ Temporally filtered video as 4-d array
43
+ """
44
+
45
+ # perform FFT on each frame
46
+ fft = scipy.fftpack.fft(data, axis=axis)
47
+ # sampling frequencies, where the step d is 1/samplingRate
48
+ frequencies = scipy.fftpack.fftfreq(data.shape[0], d=1.0 / fps)
49
+ # find the indices of low cut-off frequency
50
+ bound_low = (np.abs(frequencies - freq_min)).argmin()
51
+ # find the indices of high cut-off frequency
52
+ bound_high = (np.abs(frequencies - freq_max)).argmin()
53
+ # band pass filtering
54
+ fft[:bound_low] = 0
55
+ fft[-bound_low:] = 0
56
+ fft[bound_high:-bound_high] = 0
57
+ # perform inverse FFT
58
+ return np.real(scipy.fftpack.ifft(fft, axis=0))
59
+
60
+ class EVM():
61
+ """Eulerian Video Magnification"""
62
+
63
+ def __init__(self, frames, fps):
64
+ """Constructor"""
65
+ self.fps = fps
66
+ self.frames = frames
67
+ self.frameCount = len(frames)
68
+ self.frameHeight = int(frames[0].shape[0])
69
+ self.frameWidth = int(frames[0].shape[1])
70
+ self.numChannels = 3
71
+ # allocate memory for input frames
72
+ self.in_frames = frames
73
+ self.out_frames = frames
74
+
75
+ def process(self, numlevels=4, alpha=50., chromAttenuation=1., lowcut=0.5, highcut=1.5):
76
+ """Process video
77
+
78
+ Arguments:
79
+ numlevels : Number of pyramid levels to compute
80
+ """
81
+ # compute pyramid on first frame
82
+ pyramid = gaussian(self.in_frames[0], numlevels)
83
+ height, width, _ = pyramid[-1].shape
84
+
85
+ # allocate memory for downsampled frames
86
+ self.ds_frames = np.ndarray(shape=(self.frameCount, \
87
+ height, \
88
+ width, \
89
+ self.numChannels), \
90
+ dtype=np.float32)
91
+ self.ds_frames[0] = pyramid[-1]
92
+
93
+ for frameNumber in range(1, self.frameCount):
94
+
95
+ # spatial decomposition (specify laplacian or gaussian)
96
+ pyramid = gaussian(self.in_frames[frameNumber], numlevels)
97
+
98
+ # store downsampled frame into memory
99
+ self.ds_frames[frameNumber] = pyramid[-1]
100
+
101
+ #print ('filtering...')
102
+ output = temporal_bandpass_filter(self.ds_frames, lowcut, highcut, self.fps)
103
+
104
+ #print ('amplifying...')
105
+ output[:,:,:,0] *= alpha
106
+ output[:,:,:,1] *= (alpha * chromAttenuation)
107
+ output[:,:,:,2] *= (alpha * chromAttenuation)
108
+
109
+ for i in range(self.frameCount):
110
+
111
+ orig = self.in_frames[i]
112
+
113
+ filt = output[i].astype(np.float32)
114
+
115
+ # enlarge to match size of original frame (keep as 32-bit float)
116
+ filt = cv2.resize(filt, (self.frameWidth, self.frameHeight), interpolation=cv2.INTER_CUBIC)
117
+
118
+ filt = filt + orig
119
+
120
+ filt = skimage.color.yiq2rgb(filt)
121
+
122
+ #filt[filt > 1] = 1
123
+ #filt[filt < 0] = 0
124
+
125
+ self.out_frames[i] = filt
126
+
127
+ return self.out_frames
128
+
129
+ def main(frames, fps, alpha, numlevels):
130
+ evm = EVM(frames, fps)
131
+ filt = evm.process(alpha=alpha, numlevels=numlevels)
132
+
133
+ return filt
filters.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from scipy.signal import butter, lfilter, filtfilt, freqz
3
+ from scipy import signal
4
+
5
+ def BPfilter(x, minHz, maxHz, fs, order=6):
6
+ """Band Pass filter (using BPM band)"""
7
+
8
+ #nyq = fs * 0.5
9
+ #low = minHz/nyq
10
+ #high = maxHz/nyq
11
+
12
+ #print(low, high)
13
+ #-- filter type
14
+ #print('filtro=%f' % minHz)
15
+ b, a = butter(order, Wn=[minHz, maxHz], fs=fs, btype='bandpass')
16
+ #TODO verificare filtfilt o lfilter
17
+ #y = lfilter(b, a, x)
18
+ y = filtfilt(b, a, x)
19
+
20
+ #w, h = freqz(b, a)
21
+
22
+
23
+ #import matplotlib.pyplot as plt
24
+ #fig, ax1 = plt.subplots()
25
+ #ax1.set_title('Digital filter frequency response')
26
+ #ax1.plot((fs * 0.5 / np.pi) * w, abs(h), 'b')
27
+ #ax1.set_ylabel('Amplitude [dB]', color='b')
28
+ #plt.show()
29
+ return y
30
+
31
+ def zeroMeanSTDnorm(x):
32
+ # -- normalization along rows (1-3 channels)
33
+ mx = x.mean(axis=1).reshape(-1,1)
34
+ sx = x.std(axis=1).reshape(-1,1)
35
+ y = (x - mx) / sx
36
+ return y
green.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from scipy import signal
2
+ import numpy as np
3
+ from .base import VHRMethod
4
+
5
+ class GREEN(VHRMethod):
6
+ methodName = 'GREEN'
7
+
8
+ def __init__(self, **kwargs):
9
+ super(GREEN, self).__init__(**kwargs)
10
+
11
+ def apply(self, X):
12
+
13
+ bvp = X[1,:].T
14
+
15
+ return bvp
ica.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from scipy import signal
3
+ from .utils.jade import jadeR
4
+ from .base import VHRMethod
5
+
6
+ class ICA(VHRMethod):
7
+ methodName = 'ICA'
8
+
9
+ def __init__(self, **kwargs):
10
+ self.tech = kwargs['ICAmethod']
11
+ super(ICA, self).__init__(**kwargs)
12
+
13
+ def apply(self, X):
14
+ """ ICA method """
15
+
16
+ # -- JADE (ICA)
17
+ if self.tech == 'jade':
18
+ W = self.__jade(X)
19
+ elif 'fastICA':
20
+ W = self.__fastICA(X)
21
+
22
+ bvp = np.dot(W,X) # 3-dim signal!!
23
+
24
+ return bvp
25
+
26
+
27
+ def __jade(self, X):
28
+ W = np.asarray(jadeR(X, 3, False))
29
+ return W
30
+
31
+ def __fastICA(self, X):
32
+ from sklearn.decomposition import FastICA, PCA
33
+ from numpy.linalg import inv, eig
34
+
35
+ # -- PCA
36
+ pca = PCA(n_components=3)
37
+ Y = pca.fit_transform(X)
38
+
39
+ # -- ICA
40
+ ica = FastICA(n_components=3, max_iter=2000)
41
+ S = ica.fit_transform(Y)
42
+
43
+ return S.T
jade.py ADDED
@@ -0,0 +1,430 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+ Created on Wed Mar 22 11:12:29 2017
5
+
6
+ @author: juliette
7
+ """
8
+
9
+ #######################################################################
10
+ # jade.py -- Blind source separation of real signals
11
+ #
12
+ # Version 1.8
13
+ #
14
+ # Copyright 2005, Jean-Francois Cardoso (Original MATLAB code)
15
+ # Copyright 2007, Gabriel J.L. Beckers (NumPy translation)
16
+ #
17
+ # This program is free software: you can redistribute it and/or modify
18
+ # it under the terms of the GNU General Public License as published by
19
+ # the Free Software Foundation, either version 3 of the License, or
20
+ # (at your option) any later version.
21
+ #
22
+ # This program is distributed in the hope that it will be useful,
23
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
24
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25
+ # GNU General Public License for more details.
26
+ #
27
+ # You should have received a copy of the GNU General Public License
28
+ # along with this program. If not, see <http://www.gnu.org/licenses/>.
29
+ #######################################################################
30
+
31
+ """
32
+ jade
33
+
34
+ This module contains only one function, jadeR, which does blind source
35
+ separation of real signals. Hopefully more ICA algorithms will be added in the
36
+ future.
37
+ """
38
+
39
+ from sys import stdout
40
+ from numpy import abs, append, arange, arctan2, argsort, array, concatenate, \
41
+ cos, diag, dot, eye, float32, float64, matrix, multiply, ndarray, newaxis, \
42
+ sign, sin, sqrt, zeros
43
+ from numpy.linalg import eig, pinv
44
+
45
+
46
+ def jadeR(X, m=None, verbose=True):
47
+ """
48
+ Blind separation of real signals with JADE.
49
+
50
+ jadeR implements JADE, an Independent Component Analysis (ICA) algorithm
51
+ developed by Jean-Francois Cardoso. See
52
+ http://www.tsi.enst.fr/~cardoso/guidesepsou.html , and papers cited
53
+ at the end of the source file.
54
+
55
+ Translated into NumPy from the original Matlab Version 1.8 (May 2005) by
56
+ Gabriel Beckers, http://gbeckers.nl .
57
+
58
+ Parameters:
59
+
60
+ X -- an nxT data matrix (n sensors, T samples). May be a numpy array or
61
+ matrix.
62
+
63
+ m -- output matrix B has size mxn so that only m sources are
64
+ extracted. This is done by restricting the operation of jadeR
65
+ to the m first principal components. Defaults to None, in which
66
+ case m=n.
67
+
68
+ verbose -- print info on progress. Default is True.
69
+
70
+ Returns:
71
+
72
+ An m*n matrix B (NumPy matrix type), such that Y=B*X are separated
73
+ sources extracted from the n*T data matrix X. If m is omitted, B is a
74
+ square n*n matrix (as many sources as sensors). The rows of B are
75
+ ordered such that the columns of pinv(B) are in order of decreasing
76
+ norm; this has the effect that the `most energetically significant`
77
+ components appear first in the rows of Y=B*X.
78
+
79
+ Quick notes (more at the end of this file):
80
+
81
+ o This code is for REAL-valued signals. A MATLAB implementation of JADE
82
+ for both real and complex signals is also available from
83
+ http://sig.enst.fr/~cardoso/stuff.html
84
+
85
+ o This algorithm differs from the first released implementations of
86
+ JADE in that it has been optimized to deal more efficiently
87
+ 1) with real signals (as opposed to complex)
88
+ 2) with the case when the ICA model does not necessarily hold.
89
+
90
+ o There is a practical limit to the number of independent
91
+ components that can be extracted with this implementation. Note
92
+ that the first step of JADE amounts to a PCA with dimensionality
93
+ reduction from n to m (which defaults to n). In practice m
94
+ cannot be `very large` (more than 40, 50, 60... depending on
95
+ available memory)
96
+
97
+ o See more notes, references and revision history at the end of
98
+ this file and more stuff on the WEB
99
+ http://sig.enst.fr/~cardoso/stuff.html
100
+
101
+ o For more info on NumPy translation, see the end of this file.
102
+
103
+ o This code is supposed to do a good job! Please report any
104
+ problem relating to the NumPY code gabriel@gbeckers.nl
105
+
106
+ Copyright original Matlab code : Jean-Francois Cardoso <cardoso@sig.enst.fr>
107
+ Copyright Numpy translation : Gabriel Beckers <gabriel@gbeckers.nl>
108
+ """
109
+
110
+ # GB: we do some checking of the input arguments and copy data to new
111
+ # variables to avoid messing with the original input. We also require double
112
+ # precision (float64) and a numpy matrix type for X.
113
+
114
+ assert isinstance(X, ndarray),\
115
+ "X (input data matrix) is of the wrong type (%s)" % type(X)
116
+ origtype = X.dtype # remember to return matrix B of the same type
117
+ X = matrix(X.astype(float64))
118
+ assert X.ndim == 2, "X has %d dimensions, should be 2" % X.ndim
119
+ assert (verbose == True) or (verbose == False), \
120
+ "verbose parameter should be either True or False"
121
+
122
+ [n,T] = X.shape # GB: n is number of input signals, T is number of samples
123
+
124
+ if m==None:
125
+ m=n # Number of sources defaults to # of sensors
126
+ assert m<=n,\
127
+ "jade -> Do not ask more sources (%d) than sensors (%d )here!!!" % (m,n)
128
+
129
+ if verbose:
130
+ print ("jade -> Looking for %d sources" % m)
131
+ print ( "jade -> Removing the mean value")
132
+ X -= X.mean(1)
133
+
134
+ # whitening & projection onto signal subspace
135
+ # ===========================================
136
+ if verbose:
137
+ print ("jade -> Whitening the data")
138
+ [D,U] = eig((X * X.T) / float(T)) # An eigen basis for the sample covariance matrix
139
+ k = D.argsort()
140
+ Ds = D[k] # Sort by increasing variances
141
+ PCs = arange(n-1, n-m-1, -1) # The m most significant princip. comp. by decreasing variance
142
+
143
+ # --- PCA ----------------------------------------------------------
144
+ B = U[:,k[PCs]].T # % At this stage, B does the PCA on m components
145
+
146
+ # --- Scaling ------------------------------------------------------
147
+ scales = sqrt(Ds[PCs]) # The scales of the principal components .
148
+ B = diag(1./scales) * B # Now, B does PCA followed by a rescaling = sphering
149
+ #B[-1,:] = -B[-1,:] # GB: to make it compatible with octave
150
+ # --- Sphering ------------------------------------------------------
151
+ X = B * X # %% We have done the easy part: B is a whitening matrix and X is white.
152
+
153
+ del U, D, Ds, k, PCs, scales
154
+
155
+ # NOTE: At this stage, X is a PCA analysis in m components of the real data, except that
156
+ # all its entries now have unit variance. Any further rotation of X will preserve the
157
+ # property that X is a vector of uncorrelated components. It remains to find the
158
+ # rotation matrix such that the entries of X are not only uncorrelated but also `as
159
+ # independent as possible". This independence is measured by correlations of order
160
+ # higher than 2. We have defined such a measure of independence which
161
+ # 1) is a reasonable approximation of the mutual information
162
+ # 2) can be optimized by a `fast algorithm"
163
+ # This measure of independence also corresponds to the `diagonality" of a set of
164
+ # cumulant matrices. The code below finds the `missing rotation " as the matrix which
165
+ # best diagonalizes a particular set of cumulant matrices.
166
+
167
+
168
+ # Estimation of the cumulant matrices.
169
+ # ====================================
170
+ if verbose:
171
+ print ("jade -> Estimating cumulant matrices")
172
+
173
+ # Reshaping of the data, hoping to speed up things a little bit...
174
+ X = X.T
175
+ dimsymm = int((m * ( m + 1)) / 2) # Dim. of the space of real symm matrices
176
+ nbcm = dimsymm # number of cumulant matrices
177
+ CM = matrix(zeros([m,m*nbcm], dtype=float64)) # Storage for cumulant matrices
178
+ R = matrix(eye(m, dtype=float64))
179
+ Qij = matrix(zeros([m,m], dtype=float64)) # Temp for a cum. matrix
180
+ Xim = zeros(m, dtype=float64) # Temp
181
+ Xijm = zeros(m, dtype=float64) # Temp
182
+ #Uns = numpy.ones([1,m], dtype=numpy.uint32) # for convenience
183
+ # GB: we don't translate that one because NumPy doesn't need Tony's rule
184
+
185
+ # I am using a symmetry trick to save storage. I should write a short note one of these
186
+ # days explaining what is going on here.
187
+ Range = arange(m) # will index the columns of CM where to store the cum. mats.
188
+
189
+ for im in range(m):
190
+ Xim = X[:,im]
191
+ Xijm = multiply(Xim, Xim)
192
+ # Note to myself: the -R on next line can be removed: it does not affect
193
+ # the joint diagonalization criterion
194
+ Qij = multiply(Xijm, X).T * X / float(T)\
195
+ - R - 2 * dot(R[:,im], R[:,im].T)
196
+ CM[:,Range] = Qij
197
+ Range = Range + m
198
+ for jm in range(im):
199
+ Xijm = multiply(Xim, X[:,jm])
200
+ Qij = sqrt(2) * multiply(Xijm, X).T * X / float(T) \
201
+ - R[:,im] * R[:,jm].T - R[:,jm] * R[:,im].T
202
+ CM[:,Range] = Qij
203
+ Range = Range + m
204
+
205
+ # Now we have nbcm = m(m+1)/2 cumulants matrices stored in a big m x m*nbcm array.
206
+
207
+ V = matrix(eye(m, dtype=float64))
208
+
209
+ Diag = zeros(m, dtype=float64)
210
+ On = 0.0
211
+ Range = arange(m)
212
+ for im in range(nbcm):
213
+ Diag = diag(CM[:,Range])
214
+ On = On + (Diag*Diag).sum(axis=0)
215
+ Range = Range + m
216
+ Off = (multiply(CM,CM).sum(axis=0)).sum(axis=0) - On
217
+
218
+ seuil = 1.0e-6 / sqrt(T) # % A statistically scaled threshold on `small" angles
219
+ encore = True
220
+ sweep = 0 # % sweep number
221
+ updates = 0 # % Total number of rotations
222
+ upds = 0 # % Number of rotations in a given seep
223
+ g = zeros([2,nbcm], dtype=float64)
224
+ gg = zeros([2,2], dtype=float64)
225
+ G = zeros([2,2], dtype=float64)
226
+ c = 0
227
+ s = 0
228
+ ton = 0
229
+ toff = 0
230
+ theta = 0
231
+ Gain = 0
232
+
233
+ # Joint diagonalization proper
234
+
235
+ if verbose:
236
+ print ( "jade -> Contrast optimization by joint diagonalization")
237
+
238
+ while encore:
239
+ encore = False
240
+ if verbose:
241
+ print("jade -> Sweep #%3d" % sweep)
242
+ sweep = sweep + 1
243
+ upds = 0
244
+ Vkeep = V
245
+
246
+ for p in range(m-1):
247
+ for q in range(p+1, m):
248
+
249
+ Ip = arange(p, m*nbcm, m)
250
+ Iq = arange(q, m*nbcm, m)
251
+
252
+ # computation of Givens angle
253
+ g = concatenate([CM[p,Ip] - CM[q,Iq], CM[p,Iq] + CM[q,Ip]])
254
+ gg = dot(g, g.T)
255
+ ton = gg[0,0] - gg[1,1]
256
+ toff = gg[0,1] + gg[1,0]
257
+ theta = 0.5 * arctan2(toff, ton + sqrt(ton * ton + toff * toff))
258
+ Gain = (sqrt(ton * ton + toff * toff) - ton) / 4.0
259
+
260
+ # Givens update
261
+ if abs(theta) > seuil:
262
+ encore = True
263
+ upds = upds + 1
264
+ c = cos(theta)
265
+ s = sin(theta)
266
+ G = matrix([[c, -s] , [s, c] ])
267
+ pair = array([p,q])
268
+ V[:,pair] = V[:,pair] * G
269
+ CM[pair,:] = G.T * CM[pair,:]
270
+ CM[:,concatenate([Ip,Iq])] = \
271
+ append( c*CM[:,Ip]+s*CM[:,Iq], -s*CM[:,Ip]+c*CM[:,Iq], \
272
+ axis=1)
273
+ On = On + Gain
274
+ Off = Off - Gain
275
+
276
+ if verbose:
277
+ print ( "completed in %d rotations" % upds)
278
+ updates = updates + upds
279
+ if verbose:
280
+ print ("jade -> Total of %d Givens rotations" % updates)
281
+
282
+ # A separating matrix
283
+ # ===================
284
+
285
+ B = V.T * B
286
+
287
+ # Permute the rows of the separating matrix B to get the most energetic components first.
288
+ # Here the **signals** are normalized to unit variance. Therefore, the sort is
289
+ # according to the norm of the columns of A = pinv(B)
290
+
291
+ if verbose:
292
+ print("jade -> Sorting the components")
293
+
294
+ A = pinv(B)
295
+ keys = array(argsort(multiply(A,A).sum(axis=0)[0]))[0]
296
+ B = B[keys,:]
297
+ B = B[::-1,:] # % Is this smart ?
298
+
299
+
300
+ if verbose:
301
+ print ("jade -> Fixing the signs")
302
+ b = B[:,0]
303
+ signs = array(sign(sign(b)+0.1).T)[0] # just a trick to deal with sign=0
304
+ B = diag(signs) * B
305
+
306
+ return B.astype(origtype)
307
+
308
+
309
+ # Revision history of MATLAB code:
310
+ #
311
+ #- V1.8, May 2005
312
+ # - Added some commented code to explain the cumulant computation tricks.
313
+ # - Added reference to the Neural Comp. paper.
314
+ #
315
+ #- V1.7, Nov. 16, 2002
316
+ # - Reverted the mean removal code to an earlier version (not using
317
+ # repmat) to keep the code octave-compatible. Now less efficient,
318
+ # but does not make any significant difference wrt the total
319
+ # computing cost.
320
+ # - Remove some cruft (some debugging figures were created. What
321
+ # was this stuff doing there???)
322
+ #
323
+ #
324
+ #- V1.6, Feb. 24, 1997
325
+ # - Mean removal is better implemented.
326
+ # - Transposing X before computing the cumulants: small speed-up
327
+ # - Still more comments to emphasize the relationship to PCA
328
+ #
329
+ #- V1.5, Dec. 24 1997
330
+ # - The sign of each row of B is determined by letting the first element be positive.
331
+ #
332
+ #- V1.4, Dec. 23 1997
333
+ # - Minor clean up.
334
+ # - Added a verbose switch
335
+ # - Added the sorting of the rows of B in order to fix in some reasonable way the
336
+ # permutation indetermination. See note 2) below.
337
+ #
338
+ #- V1.3, Nov. 2 1997
339
+ # - Some clean up. Released in the public domain.
340
+ #
341
+ #- V1.2, Oct. 5 1997
342
+ # - Changed random picking of the cumulant matrix used for initialization to a
343
+ # deterministic choice. This is not because of a better rationale but to make the
344
+ # ouput (almost surely) deterministic.
345
+ # - Rewrote the joint diag. to take more advantage of Matlab"s tricks.
346
+ # - Created more dummy variables to combat Matlab"s loose memory management.
347
+ #
348
+ #- V1.1, Oct. 29 1997.
349
+ # Made the estimation of the cumulant matrices more regular. This also corrects a
350
+ # buglet...
351
+ #
352
+ #- V1.0, Sept. 9 1997. Created.
353
+ #
354
+ # Main references:
355
+ # @article{CS-iee-94,
356
+ # title = "Blind beamforming for non {G}aussian signals",
357
+ # author = "Jean-Fran\c{c}ois Cardoso and Antoine Souloumiac",
358
+ # HTML = "ftp://sig.enst.fr/pub/jfc/Papers/iee.ps.gz",
359
+ # journal = "IEE Proceedings-F",
360
+ # month = dec, number = 6, pages = {362-370}, volume = 140, year = 1993}
361
+ #
362
+ #
363
+ #@article{JADE:NC,
364
+ # author = "Jean-Fran\c{c}ois Cardoso",
365
+ # journal = "Neural Computation",
366
+ # title = "High-order contrasts for independent component analysis",
367
+ # HTML = "http://www.tsi.enst.fr/~cardoso/Papers.PS/neuralcomp_2ppf.ps",
368
+ # year = 1999, month = jan, volume = 11, number = 1, pages = "157-192"}
369
+ #
370
+ #
371
+ # Notes:
372
+ # ======
373
+ #
374
+ # Note 1) The original Jade algorithm/code deals with complex signals in Gaussian noise
375
+ # white and exploits an underlying assumption that the model of independent components
376
+ # actually holds. This is a reasonable assumption when dealing with some narrowband
377
+ # signals. In this context, one may i) seriously consider dealing precisely with the
378
+ # noise in the whitening process and ii) expect to use the small number of significant
379
+ # eigenmatrices to efficiently summarize all the 4th-order information. All this is done
380
+ # in the JADE algorithm.
381
+ #
382
+ # In *this* implementation, we deal with real-valued signals and we do NOT expect the ICA
383
+ # model to hold exactly. Therefore, it is pointless to try to deal precisely with the
384
+ # additive noise and it is very unlikely that the cumulant tensor can be accurately
385
+ # summarized by its first n eigen-matrices. Therefore, we consider the joint
386
+ # diagonalization of the *whole* set of eigen-matrices. However, in such a case, it is
387
+ # not necessary to compute the eigenmatrices at all because one may equivalently use
388
+ # `parallel slices" of the cumulant tensor. This part (computing the eigen-matrices) of
389
+ # the computation can be saved: it suffices to jointly diagonalize a set of cumulant
390
+ # matrices. Also, since we are dealing with reals signals, it becomes easier to exploit
391
+ # the symmetries of the cumulants to further reduce the number of matrices to be
392
+ # diagonalized. These considerations, together with other cheap tricks lead to this
393
+ # version of JADE which is optimized (again) to deal with real mixtures and to work
394
+ # `outside the model'. As the original JADE algorithm, it works by minimizing a `good
395
+ # set' of cumulants.
396
+ #
397
+ # Note 2) The rows of the separating matrix B are resorted in such a way that the columns
398
+ # of the corresponding mixing matrix A=pinv(B) are in decreasing order of (Euclidian)
399
+ # norm. This is a simple, `almost canonical" way of fixing the indetermination of
400
+ # permutation. It has the effect that the first rows of the recovered signals (ie the
401
+ # first rows of B*X) correspond to the most energetic *components*. Recall however that
402
+ # the source signals in S=B*X have unit variance. Therefore, when we say that the
403
+ # observations are unmixed in order of decreasing energy, this energetic signature is to
404
+ # be found as the norm of the columns of A=pinv(B) and not as the variances of the
405
+ # separated source signals.
406
+ #
407
+ # Note 3) In experiments where JADE is run as B=jadeR(X,m) with m varying in range of
408
+ # values, it is nice to be able to test the stability of the decomposition. In order to
409
+ # help in such a test, the rows of B can be sorted as described above. We have also
410
+ # decided to fix the sign of each row in some arbitrary but fixed way. The convention is
411
+ # that the first element of each row of B is positive.
412
+ #
413
+ # Note 4) Contrary to many other ICA algorithms, JADE (or least this version) does not
414
+ # operate on the data themselves but on a statistic (the full set of 4th order cumulant).
415
+ # This is represented by the matrix CM below, whose size grows as m^2 x m^2 where m is
416
+ # the number of sources to be extracted (m could be much smaller than n). As a
417
+ # consequence, (this version of) JADE will probably choke on a `large' number of sources.
418
+ # Here `large' depends mainly on the available memory and could be something like 40 or
419
+ # so. One of these days, I will prepare a version of JADE taking the `data' option
420
+ # rather than the `statistic' option.
421
+
422
+ # Notes on translation (GB):
423
+ # =========================
424
+ #
425
+ # Note 1) The function jadeR is a relatively literal translation from the original MATLAB
426
+ # code. I haven't really looked into optimizing it for NumPy. If you have any time to look
427
+ # at this and good ideas, let me know.
428
+ #
429
+ # Note 2) A test module that compares NumPy output with Octave (MATLAB
430
+ # clone) output of the original MATLAB script is available
lgi.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from scipy import signal
3
+ from .base import VHRMethod
4
+
5
+ class LGI(VHRMethod):
6
+ methodName = 'LGI'
7
+
8
+ def __init__(self, **kwargs):
9
+ super(LGI, self).__init__(**kwargs)
10
+
11
+ def apply(self, X):
12
+
13
+ #M = np.mean(X, axis=1)
14
+ #M = M[:, np.newaxis]
15
+ #Xzero = X - M # zero mean (row)
16
+
17
+ U,_,_ = np.linalg.svd(X)
18
+
19
+ S = U[:,0].reshape(1,-1) # array 2D shape (1,3)
20
+ P = np.identity(3) - np.matmul(S.T,S)
21
+
22
+ Y = np.dot(P,X)
23
+ bvp = Y[1,:]
24
+
25
+ return bvp
lgi_ppgi.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import xml.etree.ElementTree as ET
2
+ import numpy as np
3
+ from os import path
4
+ from pyVHR.datasets.dataset import Dataset
5
+ from pyVHR.signals.bvp import BVPsignal
6
+
7
+ class LGI_PPGI(Dataset):
8
+ """
9
+ LGI-PPGI dataset structure:
10
+ -----------------
11
+ datasetDIR/
12
+ |
13
+ |-- vidDIR1/
14
+ | |-- videoFile1.avi
15
+ |
16
+ |...
17
+ |
18
+ |-- vidDIRM/
19
+ |-- videoFile1.avi
20
+ """
21
+ name = 'LGI_PPGI'
22
+ signalGT = 'BVP' # GT signal type
23
+ numLevels = 2 # depth of the filesystem collecting video and BVP files
24
+ numSubjects = 4 # number of subjects
25
+ video_EXT = 'avi' # extension of the video files
26
+ frameRate = 25 # vieo frame rate
27
+ VIDEO_SUBSTRING = 'cv_camera' # substring contained in the filename
28
+ SIG_EXT = 'xml' # extension of the BVP files
29
+ SIG_SUBSTRING = 'cms50' # substring contained in the filename
30
+ SIG_SampleRate = 60 # sample rate of the BVP files
31
+
32
+ def readSigfile(self, filename):
33
+ """
34
+ Load BVP signal. Must return a 1-dim (row array) signal
35
+ """
36
+
37
+ tree = ET.parse(filename)
38
+ # get all bvp elements and their values
39
+ bvp_elements = tree.findall('.//*/value2')
40
+ bvp = [int(item.text) for item in bvp_elements]
41
+
42
+ n_bvp_samples = len(bvp)
43
+ last_bvp_time = int((n_bvp_samples*1000)/self.SIG_SampleRate)
44
+
45
+ vid_xml_filename = path.join(path.dirname(filename), 'cv_camera_sensor_timer_stream_handler.xml')
46
+ tree = ET.parse(vid_xml_filename)
47
+
48
+ root = tree.getroot()
49
+ last_vid_time = int(float(root[-1].find('value1').text))
50
+
51
+ diff = ((last_bvp_time - last_vid_time)/1000)
52
+
53
+ assert diff >= 0, 'Unusable data.'
54
+
55
+ print("Skipping %.2f seconds..." % diff)
56
+
57
+ diff_samples = round(diff*self.SIG_SampleRate)
58
+
59
+ data = np.array(bvp[diff_samples:])
60
+
61
+ return BVPsignal(data, self.SIG_SampleRate)
mahnob.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import numpy as np
3
+ import pybdf
4
+ from biosppy.signals import ecg
5
+ from pyVHR.datasets.dataset import Dataset
6
+ from pyVHR.signals.ecg import ECGsignal
7
+
8
+ class MAHNOB(Dataset):
9
+ """
10
+ Mahnob dataset structure:
11
+ -----------------
12
+ datasetDIR/
13
+ |
14
+ ||-- vidDIR1/
15
+ | |-- videoFile.avi
16
+ | |-- physioFile.bdf
17
+ |...
18
+ |...
19
+ """
20
+ name = 'MAHNOB'
21
+ signalGT = 'ECG' # GT signal type
22
+ numLevels = 2 # depth of the filesystem collecting video and BVP files
23
+ numSubjects = 40 # number of subjects
24
+ video_EXT = 'avi' # extension of the video files
25
+ frameRate = 20 # video frame rate
26
+ VIDEO_SUBSTRING = 'Section' # substring contained in the filename
27
+ SIG_EXT = 'bdf' # extension of the ECG files
28
+ SIG_SUBSTRING = 'emotion' # substring contained in the filename
29
+ SIG_SampleRate = 256 # sample rate of the ECG files
30
+
31
+ def readSigfile(self, filename):
32
+ """ Load ECG signal.
33
+ Return a 2-dim signal (t, bmp(t))
34
+ """
35
+ bdfRec = pybdf.bdfRecording(filename)
36
+ rec = bdfRec.getData(channels=[33])
37
+ self.SIG_SampleRate = bdfRec.sampRate[33]
38
+ data = np.array(rec['data'][0])[rec['eventTable']['idx'][2]:]
39
+ return ECGsignal(data, self.SIG_SampleRate)
40
+
multi_dataset_analysis_SPLIT.py ADDED
@@ -0,0 +1,283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ sys.path.append("..")
3
+ import pandas as pd
4
+ import numpy as np
5
+ import os
6
+ import re
7
+ import matplotlib.pyplot as plt
8
+ import scipy.stats as ss
9
+ import scikit_posthocs as sp
10
+ import pandas as pd
11
+
12
+ def sort_nicely(l):
13
+ """ Sort the given list in the way that humans expect.
14
+ """
15
+ convert = lambda text: int(text) if text.isdigit() else text
16
+ alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
17
+ l.sort( key=alphanum_key )
18
+
19
+ return l
20
+
21
+
22
+ DATASETS = ['PURE', 'UBFC1', 'UBFC2', 'LGI-PPGI']
23
+ #DATASETS = ['PURE', 'UBFC1', 'UBFC2', 'LGI-PPGI', 'Cohface', 'Mahnob']
24
+ #DATASETS = ['Cohface', 'Mahnob']
25
+ all_methods = ['CHROM','Green','ICA','LGI','PBV','PCA','POS','SSR']
26
+ metrics = ['CC', 'MAE']
27
+
28
+ avg_type = 'mean'
29
+ #avg_type = 'median'
30
+
31
+ data_CC = []
32
+ data_MAE = []
33
+
34
+ for r,DATASET in enumerate(DATASETS):
35
+
36
+ #Experiment Path
37
+ exp_path = '../results/' + DATASET + '/'
38
+ files = sort_nicely(os.listdir(exp_path))
39
+
40
+ #---------------- Produce Box plots for each method on a given dataset -----------
41
+
42
+ win_to_use = 10
43
+
44
+ f_to_use = [i for i in files if 'winSize'+str(win_to_use) in i][0]
45
+ path = exp_path + f_to_use
46
+ res = pd.read_hdf(path)
47
+
48
+ print('\n\n\t\t' + DATASET + '\n\n')
49
+
50
+ if DATASET == 'UBFC1' or DATASET == 'UBFC2' or DATASET == 'Mahnob' or DATASET == 'UBFC_ALL':
51
+
52
+ all_vals_CC = []
53
+ all_vals_MAE = []
54
+ curr_dataCC = np.zeros(len(all_methods))
55
+ curr_dataMAE = np.zeros(len(all_methods))
56
+
57
+ for metric in metrics:
58
+ for method in all_methods:
59
+ #print(method)
60
+ mean_v = []
61
+ raw_values = res[res['method'] == method][metric]
62
+ values = []
63
+ for v in raw_values:
64
+ if metric == 'CC':
65
+ values.append(v[np.argmax(v)])
66
+ else:
67
+ values.append(v[np.argmin(v)])
68
+
69
+ if metric == 'CC':
70
+ all_vals_CC.append(np.array(values))
71
+ if metric == 'MAE':
72
+ all_vals_MAE.append(np.array(values))
73
+
74
+ for c in range(len(all_vals_CC)): #for each method
75
+ if avg_type == 'median':
76
+ curr_dataCC[c] = np.median(all_vals_CC[c])
77
+ curr_dataMAE[c] = np.median(all_vals_MAE[c])
78
+ else:
79
+ curr_dataCC[c] = np.mean(all_vals_CC[c])
80
+ curr_dataMAE[c] = np.mean(all_vals_MAE[c])
81
+
82
+ data_CC.append(curr_dataCC)
83
+ data_MAE.append(curr_dataMAE)
84
+
85
+
86
+ elif DATASET == 'PURE':
87
+
88
+ cases = {'01':'steady', '02':'talking', '03':'slow_trans', '04':'fast_trans', '05':'small_rot', '06':'fast_rot'}
89
+ all_CC = {'01':[], '02':[], '03':[], '04':[], '05':[], '06':[]}
90
+ all_MAE = {'01':[], '02':[], '03':[], '04':[], '05':[], '06':[]}
91
+ CC_allcases = []
92
+ MAE_allcases = []
93
+ curr_dataCC = np.zeros(len(all_methods))
94
+ curr_dataMAE = np.zeros(len(all_methods))
95
+
96
+ for metric in metrics:
97
+ for method in all_methods:
98
+ #print(method)
99
+ for curr_case in cases.keys():
100
+
101
+ curr_res = res[res['videoName'].str.split('/').str[5].str.split('-').str[1] == curr_case]
102
+ raw_values = curr_res[curr_res['method'] == method][metric]
103
+
104
+ values = []
105
+ for v in raw_values:
106
+ if metric == 'CC':
107
+ values.append(v[np.argmax(v)])
108
+ else:
109
+ values.append(v[np.argmin(v)])
110
+
111
+ if metric == 'CC':
112
+ all_CC[curr_case].append(np.array(values))
113
+ if metric == 'MAE':
114
+ all_MAE[curr_case].append(np.array(values))
115
+
116
+ for curr_case in cases.keys():
117
+
118
+ all_vals_CC = all_CC[curr_case]
119
+ all_vals_MAE = all_MAE[curr_case]
120
+
121
+ for c in range(len(all_vals_CC)): #for each method
122
+ if avg_type == 'median':
123
+ curr_dataCC[c] = np.median(all_vals_CC[c])
124
+ curr_dataMAE[c] = np.median(all_vals_MAE[c])
125
+ else:
126
+ curr_dataCC[c] = np.mean(all_vals_CC[c])
127
+ curr_dataMAE[c] = np.mean(all_vals_MAE[c])
128
+
129
+ data_CC.append(curr_dataCC.copy())
130
+ data_MAE.append(curr_dataMAE.copy())
131
+
132
+ elif DATASET == 'Cohface':
133
+
134
+ CC_allcases = []
135
+ MAE_allcases = []
136
+ curr_dataCC = np.zeros(len(all_methods))
137
+ curr_dataMAE = np.zeros(len(all_methods))
138
+
139
+ for metric in metrics:
140
+ for method in all_methods:
141
+ raw_values = res[res['method'] == method][metric]
142
+
143
+ values = []
144
+ for v in raw_values:
145
+ if metric == 'CC':
146
+ values.append(v[np.argmax(v)])
147
+ else:
148
+ values.append(v[np.argmin(v)])
149
+
150
+ if metric == 'CC':
151
+ CC_allcases.append(np.array(values))
152
+ if metric == 'MAE':
153
+ MAE_allcases.append(np.array(values))
154
+
155
+ for c in range(len(CC_allcases)): #for each method
156
+ if avg_type == 'median':
157
+ curr_dataCC[c] = np.median(all_vals_CC[c])
158
+ curr_dataMAE[c] = np.median(all_vals_MAE[c])
159
+ else:
160
+ curr_dataCC[c] = np.mean(CC_allcases[c])
161
+ curr_dataMAE[c] = np.mean(MAE_allcases[c])
162
+
163
+ data_CC.append(curr_dataCC)
164
+ data_MAE.append(curr_dataMAE)
165
+
166
+
167
+ elif DATASET == 'LGI-PPGI':
168
+
169
+ cases = ['gym', 'resting', 'rotation', 'talk']
170
+ #cases = ['resting']
171
+ all_CC = {'gym':[], 'resting':[], 'rotation':[], 'talk':[]}
172
+ all_MAE = {'gym':[], 'resting':[], 'rotation':[], 'talk':[]}
173
+ CC_allcases = []
174
+ MAE_allcases = []
175
+ curr_dataCC = np.zeros(len(all_methods))
176
+ curr_dataMAE = np.zeros(len(all_methods))
177
+
178
+ for metric in metrics:
179
+ for method in all_methods:
180
+ #print(method)
181
+ for curr_case in cases:
182
+
183
+ curr_res = res[res['videoName'].str.split('/').str[6].str.split('_').str[1] == curr_case]
184
+ raw_values = curr_res[curr_res['method'] == method][metric]
185
+
186
+ values = []
187
+ for v in raw_values:
188
+ if metric == 'CC':
189
+ values.append(v[np.argmax(v)])
190
+ else:
191
+ values.append(v[np.argmin(v)])
192
+
193
+ if metric == 'CC':
194
+ all_CC[curr_case].append(np.array(values))
195
+ if metric == 'MAE':
196
+ all_MAE[curr_case].append(np.array(values))
197
+
198
+ for curr_case in cases:
199
+ all_vals_CC = all_CC[curr_case]
200
+ all_vals_MAE = all_MAE[curr_case]
201
+
202
+ for c in range(len(all_vals_CC)): #for each method
203
+ if avg_type == 'median':
204
+ curr_dataCC[c] = np.median(all_vals_CC[c])
205
+ curr_dataMAE[c] = np.median(all_vals_MAE[c])
206
+ else:
207
+ curr_dataCC[c] = np.mean(all_vals_CC[c])
208
+ curr_dataMAE[c] = np.mean(all_vals_MAE[c])
209
+
210
+ data_CC.append(curr_dataCC.copy())
211
+ data_MAE.append(curr_dataMAE.copy())
212
+
213
+
214
+ data_CC = np.vstack(data_CC)
215
+ data_MAE = np.vstack(data_MAE)
216
+ n_datasets = data_CC.shape[0]
217
+ alpha = '0.05'
218
+
219
+ plt.figure()
220
+
221
+ plt.subplot(1,2,1)
222
+ plt.title('CC Multi Dataset')
223
+ plt.boxplot(data_CC, showfliers=True)
224
+ plt.xticks(np.arange(1,len(all_methods)+1), all_methods)
225
+
226
+ plt.subplot(1,2,2)
227
+ plt.title('MAE Multi Dataset')
228
+ plt.boxplot(data_MAE, showfliers=True)
229
+ plt.xticks(np.arange(1,len(all_methods)+1), all_methods)
230
+
231
+ from nonparametric_tests import friedman_aligned_ranks_test as ft
232
+ import Orange
233
+
234
+ data_MAE_df = pd.DataFrame(data_MAE, columns=all_methods)
235
+ print('\nFriedman Test MAE:')
236
+ #print(ss.friedmanchisquare(*data_MAE.T))
237
+ #print(' ')
238
+ t,p,ranks_mae,piv_mae = ft(data_MAE[:,0], data_MAE[:,1], data_MAE[:,2], data_MAE[:,3], data_MAE[:,4], data_MAE[:,5], data_MAE[:,6], data_MAE[:,7])
239
+ avranksMAE = list(np.divide(ranks_mae, n_datasets))
240
+
241
+ print('statistic: ' + str(t))
242
+ print('pvalue: ' + str(p))
243
+ print(' ')
244
+ pc = sp.posthoc_nemenyi_friedman(data_MAE_df)
245
+ cmap = ['1', '#fb6a4a', '#08306b', '#4292c6', '#c6dbef']
246
+ heatmap_args = {'cmap': cmap, 'linewidths': 0.25, 'linecolor': '0.5', 'clip_on': False, 'square': True, 'cbar_ax_bbox': [0.80, 0.35, 0.04, 0.3]}
247
+
248
+ plt.figure()
249
+ sp.sign_plot(pc, **heatmap_args)
250
+ plt.title('Nemenyi Test MAE')
251
+
252
+
253
+ data_CC_df = pd.DataFrame(data_CC, columns=all_methods)
254
+ print('\nFriedman Test CC:')
255
+ #print(ss.friedmanchisquare(*data_CC.T))
256
+ #print(' ')
257
+ t,p,ranks_cc,piv_cc = ft(data_CC[:,0], data_CC[:,1], data_CC[:,2], data_CC[:,3], data_CC[:,4], data_CC[:,5], data_CC[:,6], data_CC[:,7])
258
+ avranksCC = list(np.divide(ranks_cc, n_datasets))
259
+
260
+ print('statistic: ' + str(t))
261
+ print('pvalue: ' + str(p))
262
+ print(' ')
263
+ pc = sp.posthoc_nemenyi_friedman(data_CC_df)
264
+ cmap = ['1', '#fb6a4a', '#08306b', '#4292c6', '#c6dbef']
265
+ heatmap_args = {'cmap': cmap, 'linewidths': 0.25, 'linecolor': '0.5', 'clip_on': False, 'square': True, 'cbar_ax_bbox': [0.80, 0.35, 0.04, 0.3]}
266
+
267
+ plt.figure()
268
+ sp.sign_plot(pc, **heatmap_args)
269
+ plt.title('Nemenyi Test CC')
270
+
271
+ cd = Orange.evaluation.compute_CD(avranksMAE, n_datasets, alpha=alpha) #tested on 30 datasets
272
+ Orange.evaluation.graph_ranks(avranksMAE, all_methods, cd=cd, width=6, textspace=1.5, reverse=True)
273
+ plt.title('CD Diagram MAE')
274
+
275
+ cd = Orange.evaluation.compute_CD(avranksCC, n_datasets, alpha=alpha) #tested on 30 datasets
276
+ Orange.evaluation.graph_ranks(avranksCC, all_methods, cd=cd, width=6, textspace=1.5)
277
+ plt.title('CD Diagram CC')
278
+
279
+ print(data_MAE_df)
280
+ print(' ')
281
+ print(data_CC_df)
282
+
283
+ plt.show()
nonparametric_tests.py ADDED
@@ -0,0 +1,678 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ import numpy as np
4
+ import scipy as sp
5
+ import scipy.stats as st
6
+ import itertools as it
7
+
8
+
9
+ def binomial_sign_test(*args):
10
+ """
11
+ Performs a binomial sign test for two dependent samples.
12
+ Tests the hypothesis that the two dependent samples represent two different populations.
13
+
14
+ Parameters
15
+ ----------
16
+ sample1, sample2: array_like
17
+ The sample measurements for each group.
18
+
19
+ Returns
20
+ -------
21
+ B-value : float
22
+ The computed B-value of the test.
23
+ p-value : float
24
+ The associated p-value from the B-distribution.
25
+
26
+ References
27
+ ----------
28
+ D.J. Sheskin, Handbook of parametric and nonparametric statistical procedures. crc Press, 2003, Test 19: The Binomial Sign Test for Two Dependent Samples
29
+ """
30
+ k = len(args)
31
+ if k != 2: raise ValueError('The test needs two samples')
32
+ n = len(args[0])
33
+
34
+ d_plus = 0
35
+ d_minus = 0
36
+ for i in range(n):
37
+ # Zero differences are eliminated
38
+ if args[0][i] < args[1][i]:
39
+ d_plus = d_plus+1
40
+ elif args[0][i] > args[1][i]:
41
+ d_minus = d_minus+1
42
+
43
+ x = max(d_plus, d_minus)
44
+ n = d_plus + d_minus
45
+
46
+ p_value = 2*(1 - st.binom.cdf(x, n, 0.5)) # Two-tailed of the smallest p-value
47
+
48
+ return x, p_value
49
+
50
+
51
+
52
+ def friedman_test(*args):
53
+ """
54
+ Performs a Friedman ranking test.
55
+ Tests the hypothesis that in a set of k dependent samples groups (where k >= 2)
56
+ at least two of the groups represent populations with different median values.
57
+
58
+ Parameters
59
+ ----------
60
+ sample1, sample2, ... : array_like
61
+ The sample measurements for each group.
62
+
63
+ Returns
64
+ -------
65
+ F-value : float
66
+ The computed F-value of the test.
67
+ p-value : float
68
+ The associated p-value from the F-distribution.
69
+ rankings : array_like
70
+ The ranking for each group.
71
+ pivots : array_like
72
+ The pivotal quantities for each group.
73
+
74
+ References
75
+ ----------
76
+ M. Friedman, The use of ranks to avoid the assumption of normality implicit in the
77
+ analysis of variance, Journal of the American Statistical Association 32 (1937) 674–701.
78
+ D.J. Sheskin, Handbook of parametric and nonparametric statistical procedures.
79
+ crc Press, 2003, Test 25: The Friedman Two-Way Analysis of Variance by Ranks
80
+ """
81
+ k = len(args)
82
+ if k < 2: raise ValueError('Less than 2 levels')
83
+ n = len(args[0])
84
+ if len(set([len(v) for v in args])) != 1: raise ValueError('Unequal number of samples')
85
+
86
+ rankings = []
87
+ for i in range(n):
88
+ row = [col[i] for col in args]
89
+ row_sort = sorted(row)
90
+ rankings.append([row_sort.index(v) + 1 + (row_sort.count(v)-1)/2. for v in row])
91
+
92
+ rankings_avg = [sp.mean([case[j] for case in rankings]) for j in range(k)]
93
+ rankings_cmp = [r/sp.sqrt(k*(k+1)/(6.*n)) for r in rankings_avg]
94
+
95
+ chi2 = ((12*n)/float((k*(k+1))))*((sp.sum(r**2 for r in rankings_avg))-((k*(k+1)**2)/float(4)))
96
+ iman_davenport = ((n-1)*chi2)/float((n*(k-1)-chi2))
97
+
98
+ p_value = 1 - st.f.cdf(iman_davenport, k-1, (k-1)*(n-1))
99
+
100
+ return iman_davenport, p_value, rankings_avg, rankings_cmp
101
+
102
+
103
+
104
+ def friedman_aligned_ranks_test(*args):
105
+ """
106
+ Performs a Friedman aligned ranks ranking test.
107
+ Tests the hypothesis that in a set of k dependent samples groups
108
+ (where k >= 2) at least two of the groups represent populations
109
+ with different median values.
110
+ The difference with a friedman test is that it uses the median of
111
+ each group to construct the ranking, which is useful when the number
112
+ of samples is low.
113
+
114
+ Parameters
115
+ ----------
116
+ sample1, sample2, ... : array_like
117
+ The sample measurements for each group.
118
+
119
+ Returns
120
+ -------
121
+ Chi2-value : float
122
+ The computed Chi2-value of the test.
123
+ p-value : float
124
+ The associated p-value from the Chi2-distribution.
125
+ rankings : array_like
126
+ The ranking for each group.
127
+ pivots : array_like
128
+ The pivotal quantities for each group.
129
+
130
+ References
131
+ ----------
132
+ J.L. Hodges, E.L. Lehmann, Ranks methods for combination of independent
133
+ experiments in analysis of variance, Annals of Mathematical Statistics 33 (1962) 482–497.
134
+ """
135
+
136
+
137
+ k = len(args)
138
+
139
+ if k < 2: raise ValueError('Less than 2 levels')
140
+ n = len(args[0])
141
+
142
+ if len(set([len(v) for v in args])) != 1: raise ValueError('Unequal number of samples')
143
+
144
+ aligned_observations = []
145
+ for i in range(n):
146
+ loc = sp.mean([col[i] for col in args])
147
+ aligned_observations.extend([col[i] - loc for col in args])
148
+
149
+ aligned_observations_sort = sorted(aligned_observations)
150
+
151
+ aligned_ranks = []
152
+ for i in range(n):
153
+ row = []
154
+ for j in range(k):
155
+ v = aligned_observations[i*k+j]
156
+ row.append(aligned_observations_sort.index(v) + 1 + (aligned_observations_sort.count(v)-1)/2.)
157
+ aligned_ranks.append(row)
158
+
159
+ rankings_avg = [sp.mean([case[j] for case in aligned_ranks]) for j in range(k)]
160
+ rankings_cmp = [r/sp.sqrt(k*(n*k+1)/6.) for r in rankings_avg]
161
+
162
+ r_i = [np.sum(case) for case in aligned_ranks]
163
+ r_j = [np.sum([case[j] for case in aligned_ranks]) for j in range(k)]
164
+ T = (k-1) * (sp.sum(v**2 for v in r_j) - (k*n**2/4.) * (k*n+1)**2) / float(((k*n*(k*n+1)*(2*k*n+1))/6.) - (1./float(k))*sp.sum(v**2 for v in r_i))
165
+
166
+ p_value = 1 - st.chi2.cdf(T, k-1)
167
+
168
+ return T, p_value, rankings_avg, rankings_cmp
169
+
170
+
171
+
172
+ def quade_test(*args):
173
+ """
174
+ Performs a Quade ranking test.
175
+ Tests the hypothesis that in a set of k dependent samples groups (where k >= 2) at least two of the groups represent populations with different median values.
176
+ The difference with a friedman test is that it uses the median for each sample to wiehgt the ranking.
177
+
178
+ Parameters
179
+ ----------
180
+ sample1, sample2, ... : array_like
181
+ The sample measurements for each group.
182
+
183
+ Returns
184
+ -------
185
+ F-value : float
186
+ The computed F-value of the test.
187
+ p-value : float
188
+ The associated p-value from the F-distribution.
189
+ rankings : array_like
190
+ The ranking for each group.
191
+ pivots : array_like
192
+ The pivotal quantities for each group.
193
+
194
+ References
195
+ ----------
196
+ D. Quade, Using weighted rankings in the analysis of complete blocks with additive block effects, Journal of the American Statistical Association 74 (1979) 680–683.
197
+ """
198
+ k = len(args)
199
+ if k < 2: raise ValueError('Less than 2 levels')
200
+ n = len(args[0])
201
+ if len(set([len(v) for v in args])) != 1: raise ValueError('Unequal number of samples')
202
+
203
+ rankings = []
204
+ ranges = []
205
+ for i in range(n):
206
+ row = [col[i] for col in args]
207
+ ranges.append(max(row) - min(row))
208
+ row_sort = sorted(row)
209
+ rankings.append([row_sort.index(v) + 1 + (row_sort.count(v)-1)/2. for v in row])
210
+
211
+ ranges_sort = sorted(ranges)
212
+ ranking_cases = [ranges_sort.index(v) + 1 + (ranges_sort.count(v)-1)/2. for v in ranges]
213
+
214
+ S = []
215
+ W = []
216
+ for i in range(n):
217
+ S.append([ranking_cases[i] * (r - (k + 1)/2.) for r in rankings[i]])
218
+ W.append([ranking_cases[i] * r for r in rankings[i]])
219
+
220
+ Sj = [np.sum(row[j] for row in S) for j in range(k)]
221
+ Wj = [np.sum(row[j] for row in W) for j in range(k)]
222
+
223
+ rankings_avg = [w / (n*(n+1)/2.) for w in Wj]
224
+ rankings_cmp = [r/sp.sqrt(k*(k+1)*(2*n+1)*(k-1)/(18.*n*(n+1))) for r in rankings_avg]
225
+
226
+ A = sp.sum(S[i][j]**2 for i in range(n) for j in range(k))
227
+ B = sp.sum(s**2 for s in Sj)/float(n)
228
+ F = (n-1)*B/(A-B)
229
+
230
+ p_value = 1 - st.f.cdf(F, k-1, (k-1)*(n-1))
231
+
232
+ return F, p_value, rankings_avg, rankings_cmp
233
+
234
+ def bonferroni_dunn_test(ranks, control=None):
235
+ """
236
+ Performs a Bonferroni-Dunn post-hoc test using the pivot quantities obtained by a ranking test.
237
+ Tests the hypothesis that the ranking of the control method is different to each of the other methods.
238
+
239
+ Parameters
240
+ ----------
241
+ pivots : dictionary_like
242
+ A dictionary with format 'groupname':'pivotal quantity'
243
+ control : string optional
244
+ The name of the control method (one vs all), default None (all vs all)
245
+
246
+ Returns
247
+ ----------
248
+ Comparions : array-like
249
+ Strings identifier of each comparison with format 'group_i vs group_j'
250
+ Z-values : array-like
251
+ The computed Z-value statistic for each comparison.
252
+ p-values : array-like
253
+ The associated p-value from the Z-distribution wich depends on the index of the comparison
254
+ Adjusted p-values : array-like
255
+ The associated adjusted p-values wich can be compared with a significance level
256
+
257
+ References
258
+ ----------
259
+ O.J. Dunn, Multiple comparisons among means, Journal of the American Statistical Association 56 (1961) 52–64.
260
+ """
261
+ k = len(ranks)
262
+ values = ranks.values()
263
+ keys = ranks.keys()
264
+ if not control :
265
+ control_i = values.index(min(values))
266
+ else:
267
+ control_i = keys.index(control)
268
+
269
+ comparisons = [keys[control_i] + " vs " + keys[i] for i in range(k) if i != control_i]
270
+ z_values = [abs(values[control_i] - values[i]) for i in range(k) if i != control_i]
271
+ p_values = [2*(1-st.norm.cdf(abs(z))) for z in z_values]
272
+ # Sort values by p_value so that p_0 < p_1
273
+ p_values, z_values, comparisons = map(list, zip(*sorted(zip(p_values, z_values, comparisons), key=lambda t: t[0])))
274
+ adj_p_values = [min((k-1)*p_value,1) for p_value in p_values]
275
+
276
+ return comparisons, z_values, p_values, adj_p_values
277
+
278
+
279
+ def holm_test(ranks, control=None):
280
+ """
281
+ Performs a Holm post-hoc test using the pivot quantities obtained by a ranking test.
282
+ Tests the hypothesis that the ranking of the control method is different to each of the other methods.
283
+
284
+ Parameters
285
+ ----------
286
+ pivots : dictionary_like
287
+ A dictionary with format 'groupname':'pivotal quantity'
288
+ control : string optional
289
+ The name of the control method (one vs all), default None (all vs all)
290
+
291
+ Returns
292
+ ----------
293
+ Comparions : array-like
294
+ Strings identifier of each comparison with format 'group_i vs group_j'
295
+ Z-values : array-like
296
+ The computed Z-value statistic for each comparison.
297
+ p-values : array-like
298
+ The associated p-value from the Z-distribution wich depends on the index of the comparison
299
+ Adjusted p-values : array-like
300
+ The associated adjusted p-values wich can be compared with a significance level
301
+
302
+ References
303
+ ----------
304
+ O.J. S. Holm, A simple sequentially rejective multiple test procedure, Scandinavian Journal of Statistics 6 (1979) 65–70.
305
+ """
306
+ k = len(ranks)
307
+ values = ranks.values()
308
+ keys = ranks.keys()
309
+ if not control :
310
+ control_i = values.index(min(values))
311
+ else:
312
+ control_i = keys.index(control)
313
+
314
+ comparisons = [keys[control_i] + " vs " + keys[i] for i in range(k) if i != control_i]
315
+ z_values = [abs(values[control_i] - values[i]) for i in range(k) if i != control_i]
316
+ p_values = [2*(1-st.norm.cdf(abs(z))) for z in z_values]
317
+ # Sort values by p_value so that p_0 < p_1
318
+ p_values, z_values, comparisons = map(list, zip(*sorted(zip(p_values, z_values, comparisons), key=lambda t: t[0])))
319
+ adj_p_values = [min(max((k-(j+1))*p_values[j] for j in range(i+1)), 1) for i in range(k-1)]
320
+
321
+ return comparisons, z_values, p_values, adj_p_values
322
+
323
+
324
+ def hochberg_test(ranks, control=None):
325
+ """
326
+ Performs a Hochberg post-hoc test using the pivot quantities obtained by a ranking test.
327
+ Tests the hypothesis that the ranking of the control method is different to each of the other methods.
328
+
329
+ Parameters
330
+ ----------
331
+ pivots : dictionary_like
332
+ A dictionary with format 'groupname':'pivotal quantity'
333
+ control : string optional
334
+ The name of the control method, default the group with minimum ranking
335
+
336
+ Returns
337
+ ----------
338
+ Comparions : array-like
339
+ Strings identifier of each comparison with format 'group_i vs group_j'
340
+ Z-values : array-like
341
+ The computed Z-value statistic for each comparison.
342
+ p-values : array-like
343
+ The associated p-value from the Z-distribution wich depends on the index of the comparison
344
+ Adjusted p-values : array-like
345
+ The associated adjusted p-values wich can be compared with a significance level
346
+
347
+ References
348
+ ----------
349
+ Y. Hochberg, A sharper Bonferroni procedure for multiple tests of significance, Biometrika 75 (1988) 800–803.
350
+ """
351
+ k = len(ranks)
352
+ values = ranks.values()
353
+ keys = ranks.keys()
354
+ if not control :
355
+ control_i = values.index(min(values))
356
+ else:
357
+ control_i = keys.index(control)
358
+
359
+ comparisons = [keys[control_i] + " vs " + keys[i] for i in range(k) if i != control_i]
360
+ z_values = [abs(values[control_i] - values[i]) for i in range(k) if i != control_i]
361
+ p_values = [2*(1-st.norm.cdf(abs(z))) for z in z_values]
362
+ # Sort values by p_value so that p_0 < p_1
363
+ p_values, z_values, comparisons = map(list, zip(*sorted(zip(p_values, z_values, comparisons), key=lambda t: t[0])))
364
+ adj_p_values = [min(max((k-j)*p_values[j-1] for j in range(k-1, i, -1)), 1) for i in range(k-1)]
365
+
366
+ return comparisons, z_values, p_values, adj_p_values
367
+
368
+ def li_test(ranks, control=None):
369
+ """
370
+ Performs a Li post-hoc test using the pivot quantities obtained by a ranking test.
371
+ Tests the hypothesis that the ranking of the control method is different to each of the other methods.
372
+
373
+ Parameters
374
+ ----------
375
+ pivots : dictionary_like
376
+ A dictionary with format 'groupname':'pivotal quantity'
377
+ control : string optional
378
+ The name of the control method, default the group with minimum ranking
379
+
380
+ Returns
381
+ ----------
382
+ Comparions : array-like
383
+ Strings identifier of each comparison with format 'group_i vs group_j'
384
+ Z-values : array-like
385
+ The computed Z-value statistic for each comparison.
386
+ p-values : array-like
387
+ The associated p-value from the Z-distribution wich depends on the index of the comparison
388
+ Adjusted p-values : array-like
389
+ The associated adjusted p-values wich can be compared with a significance level
390
+
391
+ References
392
+ ----------
393
+ J. Li, A two-step rejection procedure for testing multiple hypotheses, Journal of Statistical Planning and Inference 138 (2008) 1521–1527.
394
+ """
395
+ k = len(ranks)
396
+ values = ranks.values()
397
+ keys = ranks.keys()
398
+ if not control :
399
+ control_i = values.index(min(values))
400
+ else:
401
+ control_i = keys.index(control)
402
+
403
+ comparisons = [keys[control_i] + " vs " + keys[i] for i in range(k) if i != control_i]
404
+ z_values = [abs(values[control_i] - values[i]) for i in range(k) if i != control_i]
405
+ p_values = [2*(1-st.norm.cdf(abs(z))) for z in z_values]
406
+ # Sort values by p_value so that p_0 < p_1
407
+ p_values, z_values, comparisons = map(list, zip(*sorted(zip(p_values, z_values, comparisons), key=lambda t: t[0])))
408
+ adj_p_values = [p_values[i]/(p_values[i]+1-p_values[-1]) for i in range(k-1)]
409
+
410
+ return comparisons, z_values, p_values, adj_p_values
411
+
412
+ def finner_test(ranks, control=None):
413
+ """
414
+ Performs a Finner post-hoc test using the pivot quantities obtained by a ranking test.
415
+ Tests the hypothesis that the ranking of the control method is different to each of the other methods.
416
+
417
+ Parameters
418
+ ----------
419
+ pivots : dictionary_like
420
+ A dictionary with format 'groupname':'pivotal quantity'
421
+ control : string optional
422
+ The name of the control method, default the group with minimum ranking
423
+
424
+ Returns
425
+ ----------
426
+ Comparions : array-like
427
+ Strings identifier of each comparison with format 'group_i vs group_j'
428
+ Z-values : array-like
429
+ The computed Z-value statistic for each comparison.
430
+ p-values : array-like
431
+ The associated p-value from the Z-distribution wich depends on the index of the comparison
432
+ Adjusted p-values : array-like
433
+ The associated adjusted p-values wich can be compared with a significance level
434
+
435
+ References
436
+ ----------
437
+ H. Finner, On a monotonicity problem in step-down multiple test procedures, Journal of the American Statistical Association 88 (1993) 920–923.
438
+ """
439
+ k = len(ranks)
440
+ values = ranks.values()
441
+ keys = ranks.keys()
442
+ if not control :
443
+ control_i = values.index(min(values))
444
+ else:
445
+ control_i = keys.index(control)
446
+
447
+ comparisons = [keys[control_i] + " vs " + keys[i] for i in range(k) if i != control_i]
448
+ z_values = [abs(values[control_i] - values[i]) for i in range(k) if i != control_i]
449
+ p_values = [2*(1-st.norm.cdf(abs(z))) for z in z_values]
450
+ # Sort values by p_value so that p_0 < p_1
451
+ p_values, z_values, comparisons = map(list, zip(*sorted(zip(p_values, z_values, comparisons), key=lambda t: t[0])))
452
+ adj_p_values = [min(max(1-(1-p_values[j])**((k-1)/float(j+1)) for j in range(i+1)), 1) for i in range(k-1)]
453
+
454
+ return comparisons, z_values, p_values, adj_p_values
455
+
456
+
457
+ def nemenyi_multitest(ranks):
458
+ """
459
+ Performs a Nemenyi post-hoc test using the pivot quantities obtained by a ranking test.
460
+ Tests the hypothesis that the ranking of each pair of groups are different.
461
+
462
+ Parameters
463
+ ----------
464
+ pivots : dictionary_like
465
+ A dictionary with format 'groupname':'pivotal quantity'
466
+
467
+ Returns
468
+ ----------
469
+ Comparions : array-like
470
+ Strings identifier of each comparison with format 'group_i vs group_j'
471
+ Z-values : array-like
472
+ The computed Z-value statistic for each comparison.
473
+ p-values : array-like
474
+ The associated p-value from the Z-distribution wich depends on the index of the comparison
475
+ Adjusted p-values : array-like
476
+ The associated adjusted p-values wich can be compared with a significance level
477
+
478
+ References
479
+ ----------
480
+ Bonferroni-Dunn: O.J. Dunn, Multiple comparisons among means, Journal of the American Statistical Association 56 (1961) 52–64.
481
+ """
482
+ k = len(ranks)
483
+ values = ranks.values()
484
+ keys = ranks.keys()
485
+ versus = list(it.combinations(range(k), 2))
486
+
487
+ comparisons = [keys[vs[0]] + " vs " + keys[vs[1]] for vs in versus]
488
+ z_values = [abs(values[vs[0]] - values[vs[1]]) for vs in versus]
489
+ p_values = [2*(1-st.norm.cdf(abs(z))) for z in z_values]
490
+ # Sort values by p_value so that p_0 < p_1
491
+ p_values, z_values, comparisons = map(list, zip(*sorted(zip(p_values, z_values, comparisons), key=lambda t: t[0])))
492
+ m = int(k*(k-1)/2.)
493
+ adj_p_values = [min(m*p_value,1) for p_value in p_values]
494
+
495
+ return comparisons, z_values, p_values, adj_p_values
496
+
497
+
498
+ def holm_multitest(ranks):
499
+ """
500
+ Performs a Holm post-hoc test using the pivot quantities obtained by a ranking test.
501
+ Tests the hypothesis that the ranking of each pair of groups are different.
502
+
503
+ Parameters
504
+ ----------
505
+ pivots : dictionary_like
506
+ A dictionary with format 'groupname':'pivotal quantity'
507
+
508
+ Returns
509
+ ----------
510
+ Comparions : array-like
511
+ Strings identifier of each comparison with format 'group_i vs group_j'
512
+ Z-values : array-like
513
+ The computed Z-value statistic for each comparison.
514
+ p-values : array-like
515
+ The associated p-value from the Z-distribution wich depends on the index of the comparison
516
+ Adjusted p-values : array-like
517
+ The associated adjusted p-values wich can be compared with a significance level
518
+
519
+ References
520
+ ----------
521
+ O.J. S. Holm, A simple sequentially rejective multiple test procedure, Scandinavian Journal of Statistics 6 (1979) 65–70.
522
+ """
523
+ k = len(ranks)
524
+ values = ranks.values()
525
+ keys = ranks.keys()
526
+ versus = list(it.combinations(range(k), 2))
527
+
528
+ comparisons = [keys[vs[0]] + " vs " + keys[vs[1]] for vs in versus]
529
+ z_values = [abs(values[vs[0]] - values[vs[1]]) for vs in versus]
530
+ p_values = [2*(1-st.norm.cdf(abs(z))) for z in z_values]
531
+ # Sort values by p_value so that p_0 < p_1
532
+ p_values, z_values, comparisons = map(list, zip(*sorted(zip(p_values, z_values, comparisons), key=lambda t: t[0])))
533
+ m = int(k*(k-1)/2.)
534
+ adj_p_values = [min(max((m-j)*p_values[j] for j in range(i+1)), 1) for i in range(m)]
535
+
536
+ return comparisons, z_values, p_values, adj_p_values
537
+
538
+
539
+ def hochberg_multitest(ranks):
540
+ """
541
+ Performs a Hochberg post-hoc test using the pivot quantities obtained by a ranking test.
542
+ Tests the hypothesis that the ranking of each pair of groups are different.
543
+
544
+ Parameters
545
+ ----------
546
+ pivots : dictionary_like
547
+ A dictionary with format 'groupname':'pivotal quantity'
548
+
549
+ Returns
550
+ ----------
551
+ Comparions : array-like
552
+ Strings identifier of each comparison with format 'group_i vs group_j'
553
+ Z-values : array-like
554
+ The computed Z-value statistic for each comparison.
555
+ p-values : array-like
556
+ The associated p-value from the Z-distribution wich depends on the index of the comparison
557
+ Adjusted p-values : array-like
558
+ The associated adjusted p-values wich can be compared with a significance level
559
+
560
+ References
561
+ ----------
562
+ Y. Hochberg, A sharper Bonferroni procedure for multiple tests of significance, Biometrika 75 (1988) 800–803.
563
+ """
564
+ k = len(ranks)
565
+ values = ranks.values()
566
+ keys = ranks.keys()
567
+ versus = list(it.combinations(range(k), 2))
568
+
569
+ comparisons = [keys[vs[0]] + " vs " + keys[vs[1]] for vs in versus]
570
+ z_values = [abs(values[vs[0]] - values[vs[1]]) for vs in versus]
571
+ p_values = [2*(1-st.norm.cdf(abs(z))) for z in z_values]
572
+ # Sort values by p_value so that p_0 < p_1
573
+ p_values, z_values, comparisons = map(list, zip(*sorted(zip(p_values, z_values, comparisons), key=lambda t: t[0])))
574
+ m = int(k*(k-1)/2.)
575
+ adj_p_values = [max((m+1-j)*p_values[j-1] for j in range(m, i, -1))for i in range(m)]
576
+
577
+ return comparisons, z_values, p_values, adj_p_values
578
+
579
+
580
+ def finner_multitest(ranks):
581
+ """
582
+ Performs a Finner post-hoc test using the pivot quantities obtained by a ranking test.
583
+ Tests the hypothesis that the ranking of each pair of groups are different.
584
+
585
+ Parameters
586
+ ----------
587
+ pivots : dictionary_like
588
+ A dictionary with format 'groupname':'pivotal quantity'
589
+
590
+ Returns
591
+ ----------
592
+ Comparions : array-like
593
+ Strings identifier of each comparison with format 'group_i vs group_j'
594
+ Z-values : array-like
595
+ The computed Z-value statistic for each comparison.
596
+ p-values : array-like
597
+ The associated p-value from the Z-distribution wich depends on the index of the comparison
598
+ Adjusted p-values : array-like
599
+ The associated adjusted p-values wich can be compared with a significance level
600
+
601
+ References
602
+ ----------
603
+ H. Finner, On a monotonicity problem in step-down multiple test procedures, Journal of the American Statistical Association 88 (1993) 920–923.
604
+ """
605
+ k = len(ranks)
606
+ values = ranks.values()
607
+ keys = ranks.keys()
608
+ versus = list(it.combinations(range(k), 2))
609
+
610
+ comparisons = [keys[vs[0]] + " vs " + keys[vs[1]] for vs in versus]
611
+ z_values = [abs(values[vs[0]] - values[vs[1]]) for vs in versus]
612
+ p_values = [2*(1-st.norm.cdf(abs(z))) for z in z_values]
613
+ # Sort values by p_value so that p_0 < p_1
614
+ p_values, z_values, comparisons = map(list, zip(*sorted(zip(p_values, z_values, comparisons), key=lambda t: t[0])))
615
+ m = int(k*(k-1)/2.)
616
+ adj_p_values = [min(max(1-(1-p_values[j])**(m/float(j+1)) for j in range(i+1)), 1) for i in range(m)]
617
+
618
+ return comparisons, z_values, p_values, adj_p_values
619
+
620
+
621
+ def _S(k):
622
+ """
623
+ Helper function for the Shaffer test.
624
+ It obtains the number of independent test hypotheses when using an All vs All strategy using the number of groups to be compared.
625
+ """
626
+ if k == 0 or k == 1:
627
+ return {0}
628
+ else:
629
+ result = set()
630
+ for j in reversed(range(1, k+1)):
631
+ tmp = S(k - j)
632
+ for s in tmp:
633
+ result = result.union({sp.special.binom(j, 2) + s})
634
+ return list(result)
635
+
636
+
637
+ def shaffer_multitest(ranks):
638
+ """
639
+ Performs a Shaffer post-hoc test using the pivot quantities obtained by a ranking test.
640
+ Tests the hypothesis that the ranking of each pair of groups are different.
641
+
642
+ Parameters
643
+ ----------
644
+ pivots : dictionary_like
645
+ A dictionary with format 'groupname':'pivotal quantity'
646
+
647
+ Returns
648
+ ----------
649
+ Comparions : array-like
650
+ Strings identifier of each comparison with format 'group_i vs group_j'
651
+ Z-values : array-like
652
+ The computed Z-value statistic for each comparison.
653
+ p-values : array-like
654
+ The associated p-value from the Z-distribution wich depends on the index of the comparison
655
+ Adjusted p-values : array-like
656
+ The associated adjusted p-values wich can be compared with a significance level
657
+
658
+ References
659
+ ----------
660
+ J. Li, A two-step rejection procedure for testing multiple hypotheses, Journal of Statistical Planning and Inference 138 (2008) 1521–1527.
661
+ """
662
+ k = len(ranks)
663
+ values = ranks.values()
664
+ keys = ranks.keys()
665
+ versus = list(it.combinations(range(k), 2))
666
+
667
+ m = int(k*(k-1)/2.)
668
+ A = _S(int((1 + sp.sqrt(1+4*m*2))/2))
669
+ t = [max([a for a in A if a <= m-i]) for i in range(m)]
670
+
671
+ comparisons = [keys[vs[0]] + " vs " + keys[vs[1]] for vs in versus]
672
+ z_values = [abs(values[vs[0]] - values[vs[1]]) for vs in versus]
673
+ p_values = [2*(1-st.norm.cdf(abs(z))) for z in z_values]
674
+ # Sort values by p_value so that p_0 < p_1
675
+ p_values, z_values, comparisons = map(list, zip(*sorted(zip(p_values, z_values, comparisons), key=lambda t: t[0])))
676
+ adj_p_values = [min(max(t[j]*p_values[j] for j in range(i+1)), 1) for i in range(m)]
677
+
678
+ return comparisons, z_values, p_values, adj_p_values
pbv.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from scipy import signal
3
+ from .base import VHRMethod
4
+
5
+ class PBV(VHRMethod):
6
+ methodName = 'PBV'
7
+
8
+ def __init__(self, **kwargs):
9
+ super(PBV, self).__init__(**kwargs)
10
+
11
+ def apply(self, X):
12
+
13
+ r_mean = X[0,:]/np.mean(X[0,:])
14
+ g_mean = X[1,:]/np.mean(X[1,:])
15
+ b_mean = X[2,:]/np.mean(X[2,:])
16
+
17
+ pbv_n = np.array([np.std(r_mean), np.std(g_mean), np.std(b_mean)])
18
+ pbv_d = np.sqrt(np.var(r_mean) + np.var(g_mean) + np.var(b_mean))
19
+ pbv = pbv_n / pbv_d
20
+
21
+ C = np.array([r_mean, g_mean, b_mean])
22
+ Q = np.matmul(C ,np.transpose(C))
23
+ W = np.linalg.solve(Q,pbv)
24
+
25
+ bvp = np.matmul(C.T,W)/(np.matmul(pbv.T,W))
26
+
27
+ return bvp
pca.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sklearn import decomposition
2
+ from numpy import vstack
3
+ from .base import VHRMethod
4
+
5
+ class PCA(VHRMethod):
6
+ methodName = 'PCA'
7
+
8
+ def __init__(self, **kwargs):
9
+ super(PCA, self).__init__(**kwargs)
10
+
11
+ def apply(self, X):
12
+
13
+ # TODO: preproc
14
+ #X = self.__preprocess(X.T)
15
+
16
+ bvp = decomposition.PCA(n_components=3).fit_transform(X.T).T
17
+
18
+ return bvp
19
+
20
+
21
+ def __preprocess(self, X):
22
+
23
+ R = X[:,0].copy()
24
+ G = X[:,1].copy()
25
+ B = X[:,2].copy()
26
+
27
+ # -- BP pre-filtering of RGB channels
28
+ minHz = BVPsignal.minHz
29
+ maxHz = BVPsignal.maxHz
30
+ fs = self.video.frameRate
31
+
32
+ # -- filter
33
+ filteredR = BPfilter(R, minHz, maxHz, fs)
34
+ filteredG = BPfilter(G, minHz, maxHz, fs)
35
+ filteredB = BPfilter(B, minHz, maxHz, fs)
36
+
37
+ X = vstack([filteredR, filteredG, filteredB])
38
+
39
+ return X
pos.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from scipy import signal
3
+ from .base import VHRMethod
4
+
5
+ class POS(VHRMethod):
6
+ """
7
+ POS algorithm described in "Algorithmic Principles of Remote PPG"
8
+ (https://ieeexplore.ieee.org/document/7565547 )
9
+ Numbers in brackets refer to the line numbers in the "Algorithm 1" of the paper
10
+ """
11
+
12
+ methodName = 'POS'
13
+ projection = np.array([[0, 1, -1], [-2, 1, 1]])
14
+
15
+ def __init__(self, **kwargs):
16
+ super(POS, self).__init__(**kwargs)
17
+
18
+ def apply(self, X):
19
+ # Run the pos algorithm on the RGB color signal c with sliding window length wlen
20
+ # Recommended value for wlen is 32 for a 20 fps camera (1.6 s)
21
+
22
+ wlen = int(1.6*self.video.frameRate)
23
+
24
+ # Initialize (1)
25
+ h = np.zeros(X.shape[1])
26
+ for n in range(X.shape[1]):
27
+ # Start index of sliding window (4)
28
+ m = n - wlen + 1
29
+ if m >= 0:
30
+ # Temporal normalization (5)
31
+ cn = X[:, m:(n+1)]
32
+ cn = np.dot(self.__get_normalization_matrix(cn), cn)
33
+ # Projection (6)
34
+ s = np.dot(self.projection, cn)
35
+ # Tuning (7)
36
+ hn = np.add(s[0, :], np.std(s[0, :])/np.std(s[1, :])*s[1, :])
37
+ # Overlap-adding (8)
38
+ h[m:(n+1)] = np.add(h[m:(n+1)], hn - np.mean(hn))
39
+ return h
40
+
41
+
42
+ def __get_normalization_matrix(self, x):
43
+ # Compute a diagonal matrix n such that the mean of n*x is a vector of ones
44
+ d = 0 if (len(x.shape) < 2) else 1
45
+ m = np.mean(x, d)
46
+ n = np.array([[1/m[i] if i == j and m[i] else 0 for i in range(len(m))] for j in range(len(m))])
47
+ return n
printutils.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import plotly.graph_objects as go
2
+ import numpy as np
3
+
4
+ # Print iterations progress
5
+ def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = "\r"):
6
+ """
7
+ Call in a loop to create terminal progress bar
8
+ @params:
9
+ iteration - Required : current iteration (Int)
10
+ total - Required : total iterations (Int)
11
+ prefix - Optional : prefix string (Str)
12
+ suffix - Optional : suffix string (Str)
13
+ decimals - Optional : positive number of decimals in percent complete (Int)
14
+ length - Optional : character length of bar (Int)
15
+ fill - Optional : bar fill character (Str)
16
+ printEnd - Optional : end character (e.g. "\r", "\r\n") (Str)
17
+ """
18
+ percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
19
+ filledLength = int(length * iteration // total)
20
+ bar = fill * filledLength + '-' * (length - filledLength)
21
+ print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = printEnd)
22
+ # Print New Line on Complete
23
+ if iteration == total:
24
+ print()
25
+
26
+
27
+ def multiplot(x=None, y=None, name=None, zeroMean=True, title="Signal", height=400, width=800):
28
+
29
+ fig = go.Figure()
30
+
31
+ if not np.any(y):
32
+ return
33
+ else:
34
+ if y.ndim == 1:
35
+ c = 1
36
+ n = y.shape[0]
37
+ if zeroMean:
38
+ z = y-y.mean()
39
+ if not np.any(x):
40
+ x = np.linspace(0,n-1,n)
41
+ fig.add_trace(go.Scatter(x=x,y=z,name=name))
42
+ else:
43
+ c,n = y.shape
44
+ if not np.any(x):
45
+ x = np.linspace(0,n-1,n)
46
+ for i in range(c):
47
+ z = y[i]
48
+ if name:
49
+ s = name[i]
50
+ else:
51
+ s = "sig" + str(i)
52
+ if zeroMean:
53
+ z = y[i]-y[i].mean()
54
+
55
+ fig.add_trace(go.Scatter(x=x,y=z,name=s))
56
+
57
+ fig.update_layout(height=height, width=width, title=title,
58
+ font=dict(
59
+ family="Courier New, monospace",
60
+ size=14,
61
+ color="#7f7f7f")
62
+ )
63
+
64
+ fig.show()
65
+
66
+ return fig
pure.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import numpy as np
3
+ import os
4
+ from pyVHR.datasets.dataset import Dataset
5
+ from pyVHR.signals.bvp import BVPsignal
6
+
7
+ class PURE(Dataset):
8
+ """
9
+ PURE dataset structure:
10
+ -----------------
11
+ datasetDIR/
12
+ |
13
+ |-- 01-01/
14
+ |---- Image...1.png
15
+ |---- Image.....png
16
+ |---- Image...n.png
17
+ |-- 01-01.json
18
+ |...
19
+ |...
20
+ |-- nn-nn/
21
+ |---- Image...1.png
22
+ |---- Image.....png
23
+ |---- Image...n.png
24
+ |-- nn-nn.json
25
+ |...
26
+ """
27
+ name = 'PURE'
28
+ signalGT = 'BVP' # GT signal type
29
+ numLevels = 1 # depth of the filesystem collecting video and BVP files
30
+ numSubjects = 10 # number of subjects
31
+ video_EXT = 'png' # extension of the video files
32
+ frameRate = 30 # vieo frame rate
33
+ VIDEO_SUBSTRING = '-' # substring contained in the filename
34
+ SIG_EXT = 'json' # extension of the BVP files
35
+ SIG_SUBSTRING = '-' # substring contained in the filename
36
+ SIG_SampleRate = 60 # sample rate of the BVP files
37
+ skinThresh = [40,60] # thresholds for skin detection
38
+
39
+ def readSigfile(self, filename):
40
+ """ Load BVP signal.
41
+ Must return a 1-dim (row array) signal
42
+ """
43
+ bvp = []
44
+ with open(filename) as json_file:
45
+ json_data = json.load(json_file)
46
+ for p in json_data['/FullPackage']:
47
+ bvp.append(p['Value']['waveform'])
48
+
49
+ data = np.array(bvp)
50
+
51
+ return BVPsignal(data, self.SIG_SampleRate)
52
+
53
+
54
+ def loadFilenames(self):
55
+ """
56
+ Load dataset file names and directories of frames:
57
+ define vars videoFilenames and BVPFilenames
58
+ """
59
+
60
+ # -- loop on the dir struct of the dataset getting directories and filenames
61
+ for root, dirs, files in os.walk(self.videodataDIR):
62
+
63
+ for f in files:
64
+ filename = os.path.join(root, f)
65
+ path, name = os.path.split(filename)
66
+
67
+ # -- select signal
68
+ if name.endswith(self.SIG_EXT) and (name.find(self.SIG_SUBSTRING)>=0):
69
+ self.sigFilenames.append(filename)
70
+ self.videoFilenames.append(filename[:-5] + '/file.' + self.video_EXT)
71
+
72
+ # -- number of videos
73
+ self.numVideos = len(self.videoFilenames)
74
+
pyramid.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import cv2
3
+ #import scipy.fftpack as fftpack
4
+ import scipy
5
+
6
+
7
+ def gaussian_video(video, pyramid_levels):
8
+ """Create a gaussian representation of a video"""
9
+ vid_data = None
10
+ for x in range(0, video.shape[0]):
11
+ frame = video[x]
12
+ gauss_copy = np.ndarray(shape=frame.shape, dtype="float")
13
+ gauss_copy[:] = frame
14
+ for i in range(pyramid_levels):
15
+ gauss_copy = gausPyrDown(gauss_copy)
16
+ if x == 0:
17
+ vid_data = np.zeros((video.shape[0], gauss_copy.shape[0], gauss_copy.shape[1], 3))
18
+ vid_data[x] = gauss_copy
19
+ return vid_data
20
+
21
+ def gausPyrDown(frame,sz=5,sigma=1):
22
+ height, width, channel = frame.shape
23
+ convFrame = np.zeros(shape=(height, width,channel), dtype="float")
24
+ kernel = gausKernel(sz=sz,sigma=sigma)
25
+ for channel_i in range(channel):
26
+ convFrame[:,:,channel_i] = scipy.ndimage.convolve(frame[:,:,channel_i],kernel)
27
+ downFrame = convFrame[::2,::2,:]
28
+ return downFrame
29
+
30
+ def gausPyrUp(frame,sz=5,sigma=1):
31
+ height, width, channel = frame.shape
32
+ upFrame = np.zeros(shape=(2*height, 2*width,channel), dtype="float")
33
+ kernel = gausKernel(sz=sz,sigma=sigma)
34
+ for channel_i in range(channel):
35
+ upFrame[::2,::2,channel_i] = frame[:,:,channel_i]
36
+ upFrame[:,:,channel_i] = scipy.ndimage.convolve(upFrame[:,:,channel_i],kernel*4)
37
+ return upFrame
38
+
39
+ # Define the Gaussian kernel
40
+ def gausKernel(sz = 5,sigma=1):
41
+ kernel = np.zeros((sz,sz))
42
+ ### STUDENT: Implement the Gaussian kernel
43
+ sz_1 = int(sz/2)
44
+ for x in (np.arange(sz)-sz_1):
45
+ for y in (np.arange(sz)-sz_1):
46
+ ix,iy = x+sz_1,y+sz_1
47
+ kernel[ix,iy] = np.exp(-(x**2+y**2)/(2.0*sigma**2))
48
+ kernel = kernel / np.sum(kernel)
49
+ ### STUDENT END
50
+ return kernel
51
+
52
+ # We will use your previously implemented temporal bandpass filter, so make sure it works!
53
+ def temporal_bandpass_filter(data, fps, freq_min=0.833, freq_max=1, axis=0):
54
+ # Inputs:
55
+ # data: video data of shape #frames x height x width x #channel (3,RGB)
56
+ # fps: frames per second (30)
57
+ # freq_min, freq_max: cut-off frequencies for bandpass filter
58
+ # axis: dimension along which to apply FFT (default:0,
59
+ # time domain <->for a single pixel along all frames)
60
+ # Output:
61
+ # Band-passed video data, with only frequency components (absolute value)
62
+ # between freq_min and freq_max preserved
63
+ # of shape #frames x height x width x #channel (3,RGB)
64
+ data_process = np.zeros(data.shape)
65
+ sample_interval = 1.0/fps
66
+ for x in range(data.shape[1]):
67
+ for y in range(data.shape[2]):
68
+ for z in range(data.shape[3]):
69
+ # the bandpass_filter is YOUR implementation!
70
+ data_process[:,x,y,z] = bandpass_filter(data[:,x,y,z], sample_interval, freq_min, freq_max)
71
+ return data_process
72
+
73
+
74
+
75
+ # Implement the temporal bandpass filter
76
+ def bandpass_filter(x, sample_interval, freq_min, freq_max):
77
+ # Inputs:
78
+ # x: temporal signal of shape (N,)
79
+ # sample_interval: the temporal sampling interval.
80
+ # freq_min, freq_max: cut-off frequencies for bandpass filter
81
+ # Output:
82
+ # Band-passed signal, with only frequency components (absolute value)
83
+ # between freq_min and freq_max preserved
84
+
85
+ ### STUDENT: Implement the bandpass filter.
86
+ ### Feel free to use numpy.fft.fft, numpy.fft.fftfreq, numpy.fft.ifft
87
+ X = np.fft.fft(x)
88
+ frequencies = np.fft.fftfreq(len(x), d=sample_interval)
89
+ bound_low = (np.abs(frequencies - freq_min)).argmin()
90
+ bound_high = (np.abs(frequencies - freq_max)).argmin()
91
+ X[:bound_low] = 0
92
+ X[bound_high:-bound_high] = 0
93
+ X[-bound_low:] = 0
94
+ band_pass_signal = np.abs(np.fft.ifft(X, axis=0))
95
+ ### STUDENT END
96
+ return band_pass_signal
97
+
98
+
99
+ # Utility function: used to convert numpy array to comform with video format
100
+ def convertScaleAbs(frame):
101
+ outFrame = np.ndarray(shape=frame.shape, dtype="uint8")
102
+ for channel_i in range(3):
103
+ outFrame[:,:,channel_i] = np.clip(np.abs(frame[:,:,channel_i]),0,255).astype(np.uint8).copy()
104
+ return outFrame
105
+
106
+
107
+ def combine_pyramid_and_save(g_video, orig_video, enlarge_multiple, fps):
108
+ """Combine a gaussian video representation with the original"""
109
+
110
+ width, height = orig_video.shape[2], orig_video.shape[1]
111
+ mag_data = np.zeros(orig_video.shape, dtype='uint8')
112
+ for x in range(0, g_video.shape[0]):
113
+ img = np.ndarray(shape=g_video[x].shape, dtype='float')
114
+ img[:] = g_video[x]
115
+ for i in range(enlarge_multiple):
116
+ img = gausPyrUp(img)
117
+ img[:height, :width] = img[:height, :width] + orig_video[x]
118
+ res = convertScaleAbs(img[:height, :width])
119
+ mag_data[x] = res
120
+ return mag_data
121
+
122
+
sample.cfg ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # default_test.cfg - default test configuration file for TestSuite class
2
+
3
+ ## Default parameters
4
+ #
5
+ # winsize = Duration of the time window to process the video (in seconds)
6
+ # winsizeGT = Duration of the time window to process the ground truth signal (in seconds)
7
+ # timeStep = Time step of the estimation (in seconds)
8
+ # methods = A list of methods to test (['CHROM','Green','ICA','LGI','PBV','PCA','POS','SSR'])
9
+ #
10
+ ## Video signal Preprocessing
11
+ #
12
+ # zeroMeanSTDnorm = Apply Zero Mean and Unit Standard Deviation (0/1)
13
+ # detrending = Apply detrenting algorithm (0/1)
14
+ # detrMethod = Detrenting algorithm (tarvainen/scipy)
15
+ # detLambda = If detrending = 1, regularization parameter of detrending algorithm
16
+ # BPfilter = Apply band pass filtering (0/1)
17
+ # minHz = If BPfilter = 1, the lower cut-off frequency (in hertz)
18
+ # maxHz = If BPfilter = 1, the upper cut-off frequency (in hertz)
19
+
20
+ [DEFAULT]
21
+ winSize = 5
22
+ winSizeGT = 5
23
+ timeStep = 1
24
+ methods = ['POS','CHROM']
25
+ zeroMeanSTDnorm = 0
26
+ detrending = 0
27
+ detLambda = 10
28
+ BPfilter = 1
29
+ minHz = 0.75
30
+ maxHz = 4.0
31
+
32
+ ## Video signal
33
+ #
34
+ # dataset = Name of the dataset to test ('PURE', 'UBFC1', 'UBFC2', 'LGI-PPGI', 'COHFACE', 'MAHNOB')
35
+ # videoIdx = A list of IDs reffered to the videos to test (eg. [0,1,2,...])
36
+ # or the string 'all' to test on the whole database
37
+ # detector = Method used for face detection (mtcnn, dlib, mtcnn_kalman)
38
+ # extractor = Preferred library to read video files (opencv/skvideo)
39
+ # startTime = Process video file from start time (in seconds)
40
+ # endTime = Process video file until end time (in seconds). If < 0: process until (video length - endTime)
41
+
42
+ [VIDEO]
43
+ dataset = lgi_ppgi
44
+ videodataDIR= ../sampleDataset/
45
+ BVPdataDIR = ../sampleDataset/
46
+ videoIdx = [0]
47
+ detector = mtcnn
48
+ extractor = skvideo
49
+ startTime = 3
50
+ endTime = -3
51
+ ROImask = skin_fix
52
+ skinFix = [40, 60]
53
+ skinAdapt = 0.2
54
+ rectCoords= [[0, 0, 150, 150]]
55
+ evm = 0
56
+ stat = mean
57
+
58
+ ## Method specific configurations
59
+
60
+ [CHROM]
61
+ zeroMeanSTDnorm = 0
62
+ detrending = 1
63
+ detrMethod = scipy
64
+ BPfilter = 0
65
+
66
+ [POS]
67
+ zeroMeanSTDnorm = 0
68
+ detrending = 0
69
+ BPfilter = 0
sample.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import xml.etree.ElementTree as ET
2
+ import numpy as np
3
+ from os import path
4
+ from pyVHR.datasets.dataset import Dataset
5
+ from pyVHR.signals.bvp import BVPsignal
6
+
7
+ class SAMPLE(Dataset):
8
+ """
9
+ Sample dataset structure:
10
+ -----------------
11
+ datasetDIR/
12
+ |
13
+ |-- videoSample.avi
14
+ |-- signalGT.xml
15
+ """
16
+ name = 'SAMPLE'
17
+ videodataDIR = '../sampleDataset/' # path relative to notebook dir in pyVHR filesystem on GitHub
18
+ BVPdataDIR = '../sampleDataset/' # path relative to notebook dir in pyVHR filesystem on GitHub
19
+ signalGT = 'BVP' # GT signal type
20
+ numLevels = 1 # depth of the filesystem collecting video and BVP files
21
+ numSubjects = 4 # number of subjects
22
+ video_EXT = 'avi' # extension of the video files
23
+ frameRate = 25 # vieo frame rate
24
+ VIDEO_SUBSTRING = 'cv_camera' # substring contained in the filename
25
+ SIG_EXT = 'xml' # extension of the BVP files
26
+ SIG_SUBSTRING = 'cms50' # substring contained in the filename
27
+ SIG_SampleRate = 60 # sample rate of the BVP files
28
+
29
+ def readSigfile(self, filename):
30
+ """
31
+ Load BVP signal. Must return a 1-dim (row array) signal
32
+ """
33
+
34
+ tree = ET.parse(filename)
35
+ # get all bvp elements and their values
36
+ bvp_elements = tree.findall('.//*/value2')
37
+ bvp = [int(item.text) for item in bvp_elements]
38
+
39
+ n_bvp_samples = len(bvp)
40
+ last_bvp_time = int((n_bvp_samples*1000)/self.SIG_SampleRate)
41
+
42
+ vid_xml_filename = path.join(path.dirname(filename), 'cv_camera_sensor_timer_stream_handler.xml')
43
+ tree = ET.parse(vid_xml_filename)
44
+
45
+ root = tree.getroot()
46
+ last_vid_time = int(float(root[-1].find('value1').text))
47
+
48
+ diff = ((last_bvp_time - last_vid_time)/1000)
49
+
50
+ assert diff >= 0, 'Unusable data.'
51
+
52
+ print("Skipping %.2f seconds..." % diff)
53
+
54
+ diff_samples = round(diff*self.SIG_SampleRate)
55
+
56
+ data = np.array(bvp[diff_samples:])
57
+
58
+ return BVPsignal(data, self.SIG_SampleRate)
shape_predictor_5_face_landmarks.dat ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c4b1e9804792707d3a405c2c16a80a20269e6675021f64a41d30fffafbc41888
3
+ size 9150489
shape_predictor_68_face_landmarks.dat ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fbdc2cb80eb9aa7a758672cbfdda32ba6300efe9b6e6c7a299ff7e736b11b92f
3
+ size 99693937
single_dataset_analysis.py ADDED
@@ -0,0 +1,501 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ sys.path.append("..")
3
+ import pandas as pd
4
+ import numpy as np
5
+ import os
6
+ import re
7
+ import matplotlib.pyplot as plt
8
+ import scipy.stats as ss
9
+ import scikit_posthocs as sp
10
+ import pandas as pd
11
+ from nonparametric_tests import friedman_aligned_ranks_test as ft
12
+ import Orange
13
+
14
+ def sort_nicely(l):
15
+ """ Sort the given list in the way that humans expect.
16
+ """
17
+ convert = lambda text: int(text) if text.isdigit() else text
18
+ alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
19
+ l.sort( key=alphanum_key )
20
+
21
+ return l
22
+
23
+ #Dataset on which perform analysis
24
+ #DATASET = 'LGI-PPGI'
25
+ #DATASET = 'PURE'
26
+ DATASET = 'UBFC1'
27
+ #DATASET = 'UBFC2'
28
+ #DATASET = 'Cohface'
29
+ #DATASET = 'Mahnob'
30
+ #DATASET = 'UBFC_ALL'
31
+
32
+ CASE = 'full'
33
+ #CASE = 'split'
34
+
35
+ alpha = '0.05'
36
+
37
+ if DATASET == 'UBFC_ALL':
38
+ exp_path1 = '../../results/' + 'UBFC1' + '/'
39
+ files1 = sort_nicely(os.listdir(exp_path1))
40
+ exp_path2 = '../../results/' + 'UBFC2' + '/'
41
+ files2 = sort_nicely(os.listdir(exp_path2))
42
+ else:
43
+ #Experiment Path
44
+ exp_path = '../../results/' + DATASET + '/'
45
+ files = sort_nicely(os.listdir(exp_path))
46
+
47
+ #All rPPG methods used
48
+ all_methods = ['CHROM','Green','ICA','LGI','PBV','PCA','POS','SSR']
49
+
50
+ #Method(s) for the visualization of the performance vs winSize
51
+ #methods = ['POS', 'CHROM', 'LGI']
52
+
53
+ #Metrics to Visualize
54
+ #metrics = ['CC', 'MAE', 'RMSE']
55
+ metrics = ['MAE']
56
+
57
+
58
+ print(all_methods)
59
+
60
+ #---------------- Produce Box plots for each method on a given dataset -----------
61
+
62
+ win_to_use = 10
63
+
64
+ if DATASET == 'UBFC_ALL':
65
+ f_to_use = [i for i in files1 if 'winSize'+str(win_to_use) in i][0]
66
+ path = exp_path1 + f_to_use
67
+ res1 = pd.read_hdf(path)
68
+ f_to_use = [i for i in files2 if 'winSize'+str(win_to_use) in i][0]
69
+ path = exp_path2 + f_to_use
70
+ res2 = pd.read_hdf(path)
71
+ res = res1.append(res2)
72
+ else:
73
+ f_to_use = [i for i in files if 'winSize'+str(win_to_use) in i][0]
74
+ path = exp_path + f_to_use
75
+ res = pd.read_hdf(path)
76
+
77
+ print('\n\n\t\t' + DATASET + '\n\n')
78
+
79
+ if DATASET == 'UBFC1' or DATASET == 'UBFC2' or DATASET == 'Mahnob' or DATASET == 'UBFC_ALL' or DATASET == 'Cohface':
80
+
81
+ all_vals_CC = []
82
+ all_vals_MAE = []
83
+ all_vals_RMSE = []
84
+
85
+ for metric in metrics:
86
+ for method in all_methods:
87
+ #print(method)
88
+ mean_v = []
89
+ raw_values = res[res['method'] == method][metric]
90
+ print(raw_values)
91
+ values = []
92
+ for v in raw_values:
93
+
94
+
95
+
96
+ if metric == 'CC':
97
+ values.append(v[np.argmax(v)])
98
+ else:
99
+ values.append(v[np.argmin(v)])
100
+
101
+ if metric == 'CC':
102
+ all_vals_CC.append(np.array(values))
103
+ if metric == 'MAE':
104
+ all_vals_MAE.append(np.array(values))
105
+
106
+
107
+
108
+
109
+ data_MAE = np.zeros([len(all_vals_MAE[0]), len(all_vals_MAE)])
110
+ for i,m in enumerate(all_vals_MAE):
111
+ data_MAE[:,i] = m
112
+
113
+ print(data_MAE)
114
+
115
+ '''data_MAE_df = pd.DataFrame(data_MAE, columns=all_methods)
116
+ print('\nFriedman Test MAE:')
117
+ print(ss.friedmanchisquare(*data_MAE.T))
118
+ print(' ')'''
119
+
120
+ '''pc = sp.posthoc_nemenyi_friedman(data_MAE_df)
121
+ cmap = ['1', '#fb6a4a', '#08306b', '#4292c6', '#c6dbef']
122
+ heatmap_args = {'cmap': cmap, 'linewidths': 0.25, 'linecolor': '0.5', 'clip_on': False, 'square': True, 'cbar_ax_bbox': [0.80, 0.35, 0.04, 0.3]}
123
+ plt.figure()
124
+ sp.sign_plot(pc, **heatmap_args)
125
+ plt.title('Nemenyi Test MAE')'''
126
+
127
+ n_datasets = data_MAE.shape[0]
128
+
129
+ t,p,ranks_mae,piv_mae = ft(data_MAE[:,0], data_MAE[:,1], data_MAE[:,2], data_MAE[:,3],
130
+ data_MAE[:,4], data_MAE[:,5], data_MAE[:,6], data_MAE[:,7])
131
+ avranksMAE = list(np.divide(ranks_mae, n_datasets))
132
+ print('statistic: ' + str(t))
133
+ print('pvalue: ' + str(p))
134
+ print(' ')
135
+
136
+ data_CC = np.zeros([len(all_vals_CC[0]), len(all_vals_CC)])
137
+ for i,m in enumerate(all_vals_CC):
138
+ data_CC[:,i] = m
139
+
140
+ '''data_CC_df = pd.DataFrame(data_CC, columns=all_methods)
141
+ print('\nFriedman Test MAE:')
142
+ print(ss.friedmanchisquare(*data_CC.T))
143
+ print(' ')
144
+ pc = sp.posthoc_nemenyi_friedman(data_CC_df)
145
+ cmap = ['1', '#fb6a4a', '#08306b', '#4292c6', '#c6dbef']
146
+ heatmap_args = {'cmap': cmap, 'linewidths': 0.25, 'linecolor': '0.5', 'clip_on': False, 'square': True, 'cbar_ax_bbox': [0.80, 0.35, 0.04, 0.3]}
147
+ plt.figure()
148
+ sp.sign_plot(pc, **heatmap_args)
149
+ plt.title('Nemenyi Test CC')'''
150
+
151
+ t,p,ranks_cc,piv_cc = ft(data_CC[:,0], data_CC[:,1], data_CC[:,2], data_CC[:,3], data_CC[:,4],
152
+ data_CC[:,5], data_CC[:,6], data_CC[:,7])
153
+ avranksCC = list(np.divide(ranks_cc, n_datasets))
154
+ print('statistic: ' + str(t))
155
+ print('pvalue: ' + str(p))
156
+ print(' ')
157
+
158
+ #plt.figure()
159
+ #plt.subplot(1,2,1)
160
+ #plt.title('CC')
161
+ #plt.boxplot(all_vals_CC, showfliers=False)
162
+ #plt.xticks(np.arange(1,len(all_methods)+1), all_methods)
163
+
164
+ #plt.subplot(1,2,2)
165
+ #plt.title('MAE')
166
+ #plt.boxplot(all_vals_MAE, showfliers=False)
167
+ #plt.xticks(np.arange(1,len(all_methods)+1), all_methods)
168
+
169
+ cd = Orange.evaluation.compute_CD(avranksMAE, n_datasets, alpha=alpha) #tested on 30 datasets
170
+ Orange.evaluation.graph_ranks(avranksMAE, all_methods, cd=cd, width=6, textspace=1.5, reverse=True)
171
+ #plt.title('CD Diagram MAE')
172
+
173
+ cd = Orange.evaluation.compute_CD(avranksCC, n_datasets, alpha=alpha) #tested on 30 datasets
174
+ Orange.evaluation.graph_ranks(avranksCC, all_methods, cd=cd, width=6, textspace=1.5)
175
+ #plt.title('CD Diagram CC')
176
+
177
+ #plt.show()
178
+
179
+ elif DATASET == 'PURE':
180
+
181
+ cases = {'01':'steady', '02':'talking', '03':'slow_trans', '04':'fast_trans', '05':'small_rot', '06':'fast_rot'}
182
+ all_CC = {'01':[], '02':[], '03':[], '04':[], '05':[], '06':[]}
183
+ all_MAE = {'01':[], '02':[], '03':[], '04':[], '05':[], '06':[]}
184
+
185
+ if CASE == 'split':
186
+
187
+ for metric in metrics:
188
+ for method in all_methods:
189
+ #print(method)
190
+ for curr_case in cases.keys():
191
+
192
+ curr_res = res[res['videoName'].str.split('/').str[5].str.split('-').str[1] == curr_case]
193
+ raw_values = curr_res[curr_res['method'] == method][metric]
194
+
195
+ values = []
196
+ for v in raw_values:
197
+ if metric == 'CC':
198
+ values.append(v[np.argmax(v)])
199
+ else:
200
+ values.append(v[np.argmin(v)])
201
+
202
+ if metric == 'CC':
203
+ all_CC[curr_case].append(np.array(values))
204
+ if metric == 'MAE':
205
+ all_MAE[curr_case].append(np.array(values))
206
+
207
+ for curr_case in cases.keys():
208
+ '''plt.figure()
209
+
210
+ plt.subplot(1,2,1)
211
+ plt.title('CC ' + cases[curr_case])
212
+ plt.boxplot(all_CC[curr_case], showfliers=False)
213
+ plt.xticks(np.arange(1,len(all_methods)+1), all_methods)
214
+
215
+ plt.subplot(1,2,2)
216
+ plt.title('MAE ' + cases[curr_case])
217
+ plt.boxplot(all_MAE[curr_case], showfliers=False)
218
+ plt.xticks(np.arange(1,len(all_methods)+1), all_methods)'''
219
+
220
+ print('\n' + curr_case + '\n')
221
+
222
+ data_MAE = np.zeros([len(all_MAE[curr_case][0]), len(all_MAE[curr_case])])
223
+ for i,m in enumerate(all_MAE[curr_case]):
224
+ data_MAE[:,i] = m
225
+
226
+ n_datasets = data_MAE.shape[0]
227
+
228
+ data_CC = np.zeros([len(all_CC[curr_case][0]), len(all_CC[curr_case])])
229
+ for i,m in enumerate(all_CC[curr_case]):
230
+ data_CC[:,i] = m
231
+
232
+ t,p,ranks_mae,piv_mae = ft(data_MAE[:,0], data_MAE[:,1], data_MAE[:,2], data_MAE[:,3],
233
+ data_MAE[:,4], data_MAE[:,5], data_MAE[:,6], data_MAE[:,7])
234
+ avranksMAE = list(np.divide(ranks_mae, n_datasets))
235
+ print('statistic: ' + str(t))
236
+ print('pvalue: ' + str(p))
237
+ print(' ')
238
+
239
+ t,p,ranks_cc,piv_cc = ft(data_CC[:,0], data_CC[:,1], data_CC[:,2], data_CC[:,3],
240
+ data_CC[:,4], data_CC[:,5], data_CC[:,6], data_CC[:,7])
241
+ avranksCC = list(np.divide(ranks_cc, n_datasets))
242
+ print('statistic: ' + str(t))
243
+ print('pvalue: ' + str(p))
244
+ print(' ')
245
+
246
+ cd = Orange.evaluation.compute_CD(avranksMAE, n_datasets, alpha=alpha) #tested on 30 datasets
247
+ Orange.evaluation.graph_ranks(avranksMAE, all_methods, cd=cd, width=6, textspace=1.5, reverse=True)
248
+ plt.title('CD Diagram MAE')
249
+
250
+ cd = Orange.evaluation.compute_CD(avranksCC, n_datasets, alpha=alpha) #tested on 30 datasets
251
+ Orange.evaluation.graph_ranks(avranksCC, all_methods, cd=cd, width=6, textspace=1.5)
252
+ plt.title('CD Diagram CC')
253
+
254
+ plt.show()
255
+
256
+ elif CASE == 'full':
257
+
258
+ CC_allcases = []
259
+ MAE_allcases = []
260
+ for metric in metrics:
261
+ for method in all_methods:
262
+ raw_values = res[res['method'] == method][metric]
263
+
264
+ values = []
265
+ for v in raw_values:
266
+ if metric == 'CC':
267
+ values.append(v[np.argmax(v)])
268
+ else:
269
+ values.append(v[np.argmin(v)])
270
+
271
+ if metric == 'CC':
272
+ CC_allcases.append(np.array(values))
273
+ if metric == 'MAE':
274
+ MAE_allcases.append(np.array(values))
275
+
276
+ data_MAE = np.zeros([len(MAE_allcases[0]), len(MAE_allcases)])
277
+ for i,m in enumerate(MAE_allcases):
278
+ data_MAE[:,i] = m
279
+
280
+ n_datasets = data_MAE.shape[0]
281
+
282
+ '''data_MAE_df = pd.DataFrame(data_MAE, columns=all_methods)
283
+ print('\nFriedman Test MAE:')
284
+ print(ss.friedmanchisquare(*data_MAE.T))
285
+ print(' ')
286
+ pc = sp.posthoc_nemenyi_friedman(data_MAE_df)
287
+ cmap = ['1', '#fb6a4a', '#08306b', '#4292c6', '#c6dbef']
288
+ heatmap_args = {'cmap': cmap, 'linewidths': 0.25, 'linecolor': '0.5', 'clip_on': False, 'square': True, 'cbar_ax_bbox': [0.80, 0.35, 0.04, 0.3]}
289
+ plt.figure()
290
+ sp.sign_plot(pc, **heatmap_args)
291
+ plt.title('Nemenyi Test MAE')'''
292
+
293
+ t,p,ranks_mae,piv_mae = ft(data_MAE[:,0], data_MAE[:,1], data_MAE[:,2], data_MAE[:,3], data_MAE[:,4], data_MAE[:,5], data_MAE[:,6], data_MAE[:,7])
294
+ avranksMAE = list(np.divide(ranks_mae, n_datasets))
295
+ print('statistic: ' + str(t))
296
+ print('pvalue: ' + str(p))
297
+ print(' ')
298
+
299
+ data_CC = np.zeros([len(CC_allcases[0]), len(CC_allcases)])
300
+ for i,m in enumerate(CC_allcases):
301
+ data_CC[:,i] = m
302
+
303
+ '''data_CC_df = pd.DataFrame(data_CC, columns=all_methods)
304
+ print('\nFriedman Test MAE:')
305
+ print(ss.friedmanchisquare(*data_CC.T))
306
+ print(' ')
307
+ pc = sp.posthoc_nemenyi_friedman(data_CC_df)
308
+ cmap = ['1', '#fb6a4a', '#08306b', '#4292c6', '#c6dbef']
309
+ heatmap_args = {'cmap': cmap, 'linewidths': 0.25, 'linecolor': '0.5', 'clip_on': False, 'square': True, 'cbar_ax_bbox': [0.80, 0.35, 0.04, 0.3]}
310
+ plt.figure()
311
+ sp.sign_plot(pc, **heatmap_args)
312
+ plt.title('Nemenyi Test CC')'''
313
+
314
+ t,p,ranks_cc,piv_cc = ft(data_CC[:,0], data_CC[:,1], data_CC[:,2], data_CC[:,3], data_CC[:,4], data_CC[:,5], data_CC[:,6], data_CC[:,7])
315
+ avranksCC = list(np.divide(ranks_cc, n_datasets))
316
+ print('statistic: ' + str(t))
317
+ print('pvalue: ' + str(p))
318
+ print(' ')
319
+
320
+ '''plt.figure()
321
+ plt.subplot(1,2,1)
322
+ plt.title('CC')
323
+ plt.boxplot(CC_allcases, showfliers=False)
324
+ plt.xticks(np.arange(1,len(all_methods)+1), all_methods)
325
+
326
+ plt.subplot(1,2,2)
327
+ plt.title('MAE')
328
+ plt.boxplot(MAE_allcases, showfliers=False)
329
+ plt.xticks(np.arange(1,len(all_methods)+1), all_methods)'''
330
+
331
+ cd = Orange.evaluation.compute_CD(avranksMAE, n_datasets, alpha=alpha) #tested on 30 datasets
332
+ Orange.evaluation.graph_ranks(avranksMAE, all_methods, cd=cd, width=6, textspace=1.5, reverse=True)
333
+ plt.title('CD Diagram MAE')
334
+
335
+ cd = Orange.evaluation.compute_CD(avranksCC, n_datasets, alpha=alpha) #tested on 30 datasets
336
+ Orange.evaluation.graph_ranks(avranksCC, all_methods, cd=cd, width=6, textspace=1.5)
337
+ plt.title('CD Diagram CC')
338
+
339
+ plt.show()
340
+
341
+
342
+ elif DATASET == 'LGI-PPGI':
343
+
344
+ cases = ['gym', 'resting', 'rotation', 'talk']
345
+ all_CC = {'gym':[], 'resting':[], 'rotation':[], 'talk':[]}
346
+ all_MAE = {'gym':[], 'resting':[], 'rotation':[], 'talk':[]}
347
+
348
+ if CASE == 'split':
349
+
350
+ for metric in metrics:
351
+ for method in all_methods:
352
+ #print(method)
353
+ for curr_case in cases:
354
+
355
+ curr_res = res[res['videoName'].str.split('/').str[6].str.split('_').str[1] == curr_case]
356
+ raw_values = curr_res[curr_res['method'] == method][metric]
357
+
358
+ values = []
359
+ for v in raw_values:
360
+ if metric == 'CC':
361
+ values.append(v[np.argmax(v)])
362
+ else:
363
+ values.append(v[np.argmin(v)])
364
+
365
+ if metric == 'CC':
366
+ all_CC[curr_case].append(np.array(values))
367
+ if metric == 'MAE':
368
+ all_MAE[curr_case].append(np.array(values))
369
+
370
+ for curr_case in cases:
371
+
372
+ plt.figure()
373
+
374
+ plt.subplot(1,2,1)
375
+ plt.title('CC ' + curr_case)
376
+ plt.boxplot(all_CC[curr_case], showfliers=False)
377
+ plt.xticks(np.arange(1,len(all_methods)+1), all_methods)
378
+
379
+ plt.subplot(1,2,2)
380
+ plt.title('MAE ' + curr_case)
381
+ plt.boxplot(all_MAE[curr_case], showfliers=False)
382
+ plt.xticks(np.arange(1,len(all_methods)+1), all_methods)
383
+
384
+ print('\n' + curr_case + '\n')
385
+
386
+ data_MAE = np.zeros([len(all_MAE[curr_case][0]), len(all_MAE[curr_case])])
387
+ for i,m in enumerate(all_MAE[curr_case]):
388
+ data_MAE[:,i] = m
389
+
390
+ n_datasets = data_MAE.shape[0]
391
+
392
+ data_CC = np.zeros([len(all_CC[curr_case][0]), len(all_CC[curr_case])])
393
+ for i,m in enumerate(all_CC[curr_case]):
394
+ data_CC[:,i] = m
395
+
396
+ t,p,ranks_mae,piv_mae = ft(data_MAE[:,0], data_MAE[:,1], data_MAE[:,2], data_MAE[:,3], data_MAE[:,4], data_MAE[:,5], data_MAE[:,6], data_MAE[:,7])
397
+ avranksMAE = list(np.divide(ranks_mae, n_datasets))
398
+ print('statistic: ' + str(t))
399
+ print('pvalue: ' + str(p))
400
+ print(' ')
401
+
402
+ t,p,ranks_cc,piv_cc = ft(data_CC[:,0], data_CC[:,1], data_CC[:,2], data_CC[:,3], data_CC[:,4], data_CC[:,5], data_CC[:,6], data_CC[:,7])
403
+ avranksCC = list(np.divide(ranks_cc, n_datasets))
404
+ print('statistic: ' + str(t))
405
+ print('pvalue: ' + str(p))
406
+ print(' ')
407
+
408
+ cd = Orange.evaluation.compute_CD(avranksMAE, n_datasets, alpha=alpha) #tested on 30 datasets
409
+ Orange.evaluation.graph_ranks(avranksMAE, all_methods, cd=cd, width=6, textspace=1.5, reverse=True)
410
+ plt.title('CD Diagram MAE')
411
+
412
+ cd = Orange.evaluation.compute_CD(avranksCC, n_datasets, alpha=alpha) #tested on 30 datasets
413
+ Orange.evaluation.graph_ranks(avranksCC, all_methods, cd=cd, width=6, textspace=1.5)
414
+ plt.title('CD Diagram CC')
415
+
416
+ plt.show()
417
+
418
+ elif CASE == 'full':
419
+
420
+ CC_allcases = []
421
+ MAE_allcases = []
422
+ for metric in metrics:
423
+ for method in all_methods:
424
+ raw_values = res[res['method'] == method][metric]
425
+
426
+ values = []
427
+ for v in raw_values:
428
+ if metric == 'CC':
429
+ values.append(v[np.argmax(v)])
430
+ else:
431
+ values.append(v[np.argmin(v)])
432
+
433
+ if metric == 'CC':
434
+ CC_allcases.append(np.array(values))
435
+ if metric == 'MAE':
436
+ MAE_allcases.append(np.array(values))
437
+
438
+ data_MAE = np.zeros([len(MAE_allcases[0]), len(MAE_allcases)])
439
+ for i,m in enumerate(MAE_allcases):
440
+ data_MAE[:,i] = m
441
+
442
+ n_datasets = data_MAE.shape[0]
443
+
444
+ data_MAE_df = pd.DataFrame(data_MAE, columns=all_methods)
445
+ print('\nFriedman Test MAE:')
446
+ print(ss.friedmanchisquare(*data_MAE.T))
447
+ print(' ')
448
+ pc = sp.posthoc_nemenyi_friedman(data_MAE_df)
449
+ cmap = ['1', '#fb6a4a', '#08306b', '#4292c6', '#c6dbef']
450
+ heatmap_args = {'cmap': cmap, 'linewidths': 0.25, 'linecolor': '0.5', 'clip_on': False, 'square': True, 'cbar_ax_bbox': [0.80, 0.35, 0.04, 0.3]}
451
+ plt.figure()
452
+ sp.sign_plot(pc, **heatmap_args)
453
+ plt.title('Nemenyi Test MAE')
454
+
455
+ t,p,ranks_mae,piv_mae = ft(data_MAE[:,0], data_MAE[:,1], data_MAE[:,2], data_MAE[:,3], data_MAE[:,4], data_MAE[:,5], data_MAE[:,6], data_MAE[:,7])
456
+ avranksMAE = list(np.divide(ranks_mae, n_datasets))
457
+ print('statistic: ' + str(t))
458
+ print('pvalue: ' + str(p))
459
+ print(' ')
460
+
461
+ data_CC = np.zeros([len(CC_allcases[0]), len(CC_allcases)])
462
+ for i,m in enumerate(CC_allcases):
463
+ data_CC[:,i] = m
464
+
465
+ data_CC_df = pd.DataFrame(data_CC, columns=all_methods)
466
+ print('\nFriedman Test CC:')
467
+ print(ss.friedmanchisquare(*data_CC.T))
468
+ print(' ')
469
+ pc = sp.posthoc_nemenyi_friedman(data_CC_df)
470
+ cmap = ['1', '#fb6a4a', '#08306b', '#4292c6', '#c6dbef']
471
+ heatmap_args = {'cmap': cmap, 'linewidths': 0.25, 'linecolor': '0.5', 'clip_on': False, 'square': True, 'cbar_ax_bbox': [0.80, 0.35, 0.04, 0.3]}
472
+ plt.figure()
473
+ sp.sign_plot(pc, **heatmap_args)
474
+ plt.title('Nemenyi Test CC')
475
+
476
+ t,p,ranks_cc,piv_cc = ft(data_CC[:,0], data_CC[:,1], data_CC[:,2], data_CC[:,3], data_CC[:,4], data_CC[:,5], data_CC[:,6], data_CC[:,7])
477
+ avranksCC = list(np.divide(ranks_cc, n_datasets))
478
+ print('statistic: ' + str(t))
479
+ print('pvalue: ' + str(p))
480
+ print(' ')
481
+
482
+ plt.figure()
483
+ plt.subplot(1,2,1)
484
+ plt.title('CC')
485
+ plt.boxplot(CC_allcases, showfliers=False)
486
+ plt.xticks(np.arange(1,len(all_methods)+1), all_methods)
487
+
488
+ plt.subplot(1,2,2)
489
+ plt.title('MAE')
490
+ plt.boxplot(MAE_allcases, showfliers=False)
491
+ plt.xticks(np.arange(1,len(all_methods)+1), all_methods)
492
+
493
+ cd = Orange.evaluation.compute_CD(avranksMAE, n_datasets, alpha=alpha) #tested on 30 datasets
494
+ Orange.evaluation.graph_ranks(avranksMAE, all_methods, cd=cd, width=6, textspace=1.5, reverse=True)
495
+ plt.title('CD Diagram MAE')
496
+
497
+ cd = Orange.evaluation.compute_CD(avranksCC, n_datasets, alpha=alpha) #tested on 30 datasets
498
+ Orange.evaluation.graph_ranks(avranksCC, all_methods, cd=cd, width=6, textspace=1.5)
499
+ plt.title('CD Diagram CC')
500
+
501
+ plt.show()
ssr.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from .utils.SkinColorFilter import SkinColorFilter
3
+ from .base import VHRMethod
4
+
5
+ class SSR(VHRMethod):
6
+ methodName = 'SSR'
7
+
8
+ def __init__(self, **kwargs):
9
+ super(SSR, self).__init__(**kwargs)
10
+
11
+ def apply(self, X):
12
+
13
+ K = len(self.video.faceSignal)
14
+ l = self.video.frameRate
15
+
16
+ P = np.zeros(K) # 1 | dim: K
17
+ # store the eigenvalues Λ and the eigenvectors U at each frame
18
+ L = np.zeros((3, K), dtype='float64') # dim: 3xK
19
+ U = np.zeros((3, 3, K), dtype='float64') # dim: 3x3xK
20
+
21
+ for k in range(K):
22
+ n_roi = len(self.video.faceSignal[k])
23
+ VV = []
24
+
25
+ for r in range(n_roi):
26
+ V = self.video.faceSignal[k][r].astype(np.float64)
27
+ idx = V!=0
28
+ idx2 = np.logical_and(np.logical_and(idx[:,:,0], idx[:,:,1]), idx[:,:,2])
29
+ V_skin_only = V[idx2]
30
+ VV.append(V_skin_only)
31
+
32
+ VV = np.vstack(VV)
33
+
34
+ C = self.__build_correlation_matrix(VV) #dim: 3x3
35
+
36
+ # get: eigenvalues Λ, eigenvectors U
37
+ L[:,k], U[:,:,k] = self.__eigs(C) # dim Λ: 3 | dim U: 3x3
38
+
39
+ # build p and add it to the pulse signal P
40
+ if k >= l: # 5
41
+ tau = k - l # 5
42
+ p = self.__build_p(tau, k, l, U, L) # 6, 7, 8, 9, 10, 11 | dim: l
43
+ P[tau:k] += p # 11
44
+
45
+ if np.isnan(np.sum(P)):
46
+ print('NAN')
47
+ print(self.video.faceSignal[k])
48
+
49
+ bvp = P
50
+
51
+ return bvp
52
+
53
+ def __build_p(self, τ, k, l, U, Λ):
54
+ """
55
+ builds P
56
+ Parameters
57
+ ----------
58
+ k: int
59
+ The frame index
60
+ l: int
61
+ The temporal stride to use
62
+ U: numpy.ndarray
63
+ The eigenvectors of the c matrix (for all frames up to counter).
64
+ Λ: numpy.ndarray
65
+ The eigenvalues of the c matrix (for all frames up to counter).
66
+ Returns
67
+ -------
68
+ p: numpy.ndarray
69
+ The p signal to add to the pulse.
70
+ """
71
+ # SR'
72
+ SR = np.zeros((3, l), 'float64') # dim: 3xl
73
+ z = 0
74
+
75
+ for t in range(τ, k, 1): # 6, 7
76
+ a = Λ[0, t]
77
+ b = Λ[1, τ]
78
+ c = Λ[2, τ]
79
+ d = U[:, 0, t].T
80
+ e = U[:, 1, τ]
81
+ f = U[:, 2, τ]
82
+ g = U[:, 1, τ].T
83
+ h = U[:, 2, τ].T
84
+ x1 = a / b
85
+ x2 = a / c
86
+ x3 = np.outer(e, g)
87
+ x4 = np.dot(d, x3)
88
+ x5 = np.outer(f, h)
89
+ x6 = np.dot(d, x5)
90
+ x7 = np.sqrt(x1)
91
+ x8 = np.sqrt(x2)
92
+ x9 = x7 * x4
93
+ x10 = x8 * x6
94
+ x11 = x9 + x10
95
+ SR[:, z] = x11 # 8 | dim: 3
96
+ z += 1
97
+
98
+ # build p and add it to the final pulse signal
99
+ s0 = SR[0, :] # dim: l
100
+ s1 = SR[1, :] # dim: l
101
+ p = s0 - ((np.std(s0) / np.std(s1)) * s1) # 10 | dim: l
102
+ p = p - np.mean(p) # 11
103
+ return p # dim: l
104
+
105
+ def __get_skin_pixels(self, skin_filter, face, do_skininit):
106
+ """
107
+ get eigenvalues and eigenvectors, sort them.
108
+ Parameters
109
+ ----------
110
+ C: numpy.ndarray
111
+ The RGB values of skin-colored pixels.
112
+ Returns
113
+ -------
114
+ Λ: numpy.ndarray
115
+ The eigenvalues of the correlation matrix
116
+ U: numpy.ndarray
117
+ The (sorted) eigenvectors of the correlation matrix
118
+ """
119
+ ROI = face
120
+
121
+ if do_skininit:
122
+ skin_filter.estimate_gaussian_parameters(ROI)
123
+
124
+ skin_mask = skin_filter.get_skin_mask(ROI) # dim: wxh
125
+
126
+ V = ROI[skin_mask] # dim: (w×h)x3
127
+ V = V.astype('float64') / 255.0
128
+
129
+ return V
130
+
131
+
132
+ def __build_correlation_matrix(self, V):
133
+ # V dim: (W×H)x3
134
+ #V = np.unique(V, axis=0)
135
+ V_T = V.T # dim: 3x(W×H)
136
+ N = V.shape[0]
137
+ # build the correlation matrix
138
+ C = np.dot(V_T, V) # dim: 3x3
139
+ C = C / N
140
+
141
+ return C
142
+
143
+ def __eigs(self, C):
144
+ """
145
+ get eigenvalues and eigenvectors, sort them.
146
+ Parameters
147
+ ----------
148
+ C: numpy.ndarray
149
+ The RGB values of skin-colored pixels.
150
+ Returns
151
+ -------
152
+ Λ: numpy.ndarray
153
+ The eigenvalues of the correlation matrix
154
+ U: numpy.ndarray
155
+ The (sorted) eigenvectors of the correlation matrix
156
+ """
157
+ # get eigenvectors and sort them according to eigenvalues (largest first)
158
+ L, U = np.linalg.eig(C) # dim Λ: 3 | dim U: 3x3
159
+ idx = L.argsort() # dim: 3x1
160
+ idx = idx[::-1] # dim: 1x3
161
+ L_ = L[idx] # dim: 3
162
+ U_ = U[:, idx] # dim: 3x3
163
+
164
+ return L_, U_
stats.py ADDED
@@ -0,0 +1,344 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import numpy as np
3
+ import os
4
+ import re
5
+ import matplotlib.pyplot as plt
6
+ import plotly.graph_objects as go
7
+ import scipy.stats as ss
8
+ import scikit_posthocs as sp
9
+ from .stattests import friedman_aligned_ranks_test as ft
10
+ import Orange
11
+
12
+ class StatAnalysis():
13
+ """ Statistics analysis for multiple datasets and multiple VHR methods"""
14
+
15
+ def __init__(self, filepath='default'):
16
+
17
+ if os.path.isdir(filepath):
18
+ self.multidataset = True
19
+ self.path = filepath + "/"
20
+ self.datasetsList = os.listdir(filepath)
21
+ elif os.path.isfile(filepath):
22
+ self.multidataset = False
23
+ self.datasetsList = [filepath]
24
+ self.path = ""
25
+ else:
26
+ raise("Error: filepath is wrong!")
27
+
28
+ # -- get common methods
29
+ self.__getMethods()
30
+ self.metricSort = {'MAE':'min','RMSE':'min','CC':'max','PCC':'max'}
31
+ self.scale = {'MAE':'log','RMSE':'log','CC':'linear','PCC':'linear'}
32
+
33
+ def FriedmanTest(self, methods=None, metric='MAE'):
34
+
35
+ # -- Method(s)
36
+ if methods == None:
37
+ methods = self.methods
38
+ else:
39
+ if set(methods) <= set(self.methods):
40
+ raise("Some method is wrong!")
41
+ else:
42
+ self.methods = methods
43
+
44
+ # -- set metric
45
+ self.metric = metric
46
+ self.mag = self.metricSort[metric]
47
+
48
+ # -- get data from dataset(s)
49
+ # return Y = mat(n-datasets,k-methods)
50
+ if self.multidataset:
51
+ Y = self.__getData()
52
+ else:
53
+ Y = self.__getDataMono()
54
+ self.ndataset = Y.shape[0]
55
+
56
+ # -- Friedman test
57
+ t,p,ranks,piv = ft(Y)
58
+ self.avranks = list(np.divide(ranks, self.ndataset))
59
+
60
+ return t,p,ranks,piv,self.ndataset
61
+
62
+ def SignificancePlot(self, methods=None, metric='MAE'):
63
+
64
+ # -- Method(s)
65
+ if methods == None:
66
+ methods = self.methods
67
+ else:
68
+ if set(methods) <= set(self.methods):
69
+ raise("Some method is wrong!")
70
+ else:
71
+ self.methods = methods
72
+
73
+ # -- set metric
74
+ self.metric = metric
75
+ self.mag = self.metricSort[metric]
76
+
77
+ # -- get data from dataset(s)
78
+ if self.multidataset:
79
+ Y = self.__getData()
80
+ else:
81
+ Y = self.__getDataMono()
82
+
83
+ # -- Significance plot, a heatmap of p values
84
+ methodNames = [x.upper() for x in self.methods]
85
+ Ypd = pd.DataFrame(Y, columns=methodNames)
86
+ ph = sp.posthoc_nemenyi_friedman(Ypd)
87
+ cmap = ['1', '#fb6a4a', '#08306b', '#4292c6', '#c6dbef']
88
+ heatmap_args = {'cmap': cmap, 'linewidths': 0.25, 'linecolor': '0.5',
89
+ 'clip_on': False, 'square': True, 'cbar_ax_bbox': [0.85, 0.35, 0.04, 0.3]}
90
+
91
+ plt.figure(figsize=(5,4))
92
+ sp.sign_plot(ph, cbar=True, **heatmap_args)
93
+ plt.title('p-vals')
94
+
95
+ fname = 'SP_' + self.metric + '.pdf'
96
+ plt.savefig(fname)
97
+ plt.show()
98
+
99
+ def computeCD(self, avranks=None, numDatasets=None, alpha='0.05', display=True):
100
+ """
101
+ Returns critical difference for Nemenyi or Bonferroni-Dunn test according
102
+ to given alpha (either alpha=”0.05” or alpha=”0.1”) for average ranks and
103
+ number of tested datasets N. Test can be either “nemenyi” for for Nemenyi
104
+ two tailed test or “bonferroni-dunn” for Bonferroni-Dunn test.
105
+ See Orange package docs.
106
+ """
107
+ if not numDatasets:
108
+ numDatasets = self.ndataset
109
+ if not avranks:
110
+ avranks = self.avranks
111
+
112
+ cd = Orange.evaluation.compute_CD(avranks, numDatasets, alpha=alpha) #tested on 30 datasets
113
+
114
+ if self.mag == 'min':
115
+ reverse = True
116
+ else:
117
+ reverse = False
118
+
119
+ methodNames = [x.upper() for x in self.methods]
120
+ if display:
121
+ Orange.evaluation.graph_ranks(avranks, methodNames, cd=cd, width=6, textspace=1.5, reverse=reverse)
122
+ name = 'CD Diagram (metric: ' + self.metric +')'
123
+ plt.title(name)
124
+ fname = 'CD_' + self.metric + '.pdf'
125
+ plt.savefig(fname)
126
+
127
+ plt.show()
128
+ return cd
129
+
130
+ def displayBoxPlot(self, methods=None, metric='MAE', scale=None, title=True):
131
+
132
+ # -- Method(s)
133
+ if methods == None:
134
+ methods = self.methods
135
+ else:
136
+ if set(methods) <= set(self.methods):
137
+ raise("Some method is wrong!")
138
+ else:
139
+ self.methods = methods
140
+
141
+ # -- set metric
142
+ self.metric = metric
143
+ self.mag = self.metricSort[metric]
144
+ if scale == None:
145
+ scale = self.scale[metric]
146
+
147
+ # -- get data from dataset(s)
148
+ if self.multidataset:
149
+ Y = self.__getData()
150
+ else:
151
+ Y = self.__getDataMono()
152
+
153
+ # -- display box plot
154
+ self.boxPlot(methods, metric, Y, scale=scale, title=title)
155
+
156
+ def boxPlot(self, methods, metric, Y, scale, title):
157
+
158
+ # Y = mat(n-datasets,k-methods)
159
+
160
+ k = len(methods)
161
+
162
+ if not (k == Y.shape[1]):
163
+ raise("error!")
164
+
165
+ offset = 50
166
+ fig = go.Figure()
167
+
168
+ methodNames = [x.upper() for x in self.methods]
169
+ for i in range(k):
170
+ yd = Y[:,i]
171
+ name = methodNames[i]
172
+ # -- set color for box
173
+ if metric == 'MAE' or metric == 'RMSE':
174
+ med = np.median(yd)
175
+ col = str(min(200,5*int(med)+offset))
176
+ if metric == 'CC' or metric == 'PCC':
177
+ med = 1-np.abs(np.median(yd))
178
+ col = str(int(200*med)+offset)
179
+
180
+ # -- add box
181
+ fig.add_trace(go.Box(
182
+ y=yd,
183
+ name=name,
184
+ boxpoints='all',
185
+ jitter=.7,
186
+ #whiskerwidth=0.2,
187
+ fillcolor="rgba("+col+","+col+","+col+",0.5)",
188
+ line_color="rgba(0,0,255,0.5)",
189
+ marker_size=2,
190
+ line_width=2)
191
+ )
192
+
193
+ gwidth = np.max(Y)/10
194
+
195
+ if title:
196
+ tit = "Metric: " + metric
197
+ top = 40
198
+ else:
199
+ tit=''
200
+ top = 10
201
+
202
+ fig.update_layout(
203
+ title=tit,
204
+ yaxis_type=scale,
205
+ xaxis_type="category",
206
+ yaxis=dict(
207
+ autorange=True,
208
+ showgrid=True,
209
+ zeroline=True,
210
+ #dtick=gwidth,
211
+ gridcolor='rgb(255,255,255)',
212
+ gridwidth=.1,
213
+ zerolinewidth=2,
214
+ titlefont=dict(size=30)
215
+ ),
216
+ font=dict(
217
+ family="monospace",
218
+ size=16,
219
+ color='rgb(20,20,20)'
220
+ ),
221
+ margin=dict(
222
+ l=20,
223
+ r=10,
224
+ b=20,
225
+ t=top,
226
+ ),
227
+ paper_bgcolor='rgb(250, 250, 250)',
228
+ plot_bgcolor='rgb(243, 243, 243)',
229
+ showlegend=False
230
+ )
231
+
232
+ fig.show()
233
+
234
+ def saveStatsData(self, methods=None, metric='MAE', outfilename='statsData.csv'):
235
+ Y = self.getStatsData(methods=methods, metric=metric, printTable=False)
236
+ np.savetxt(outfilename, Y)
237
+
238
+ def getStatsData(self, methods=None, metric='MAE', printTable=True):
239
+ # -- Method(s)
240
+ if methods == None:
241
+ methods = self.methods
242
+ else:
243
+ if set(methods) <= set(self.methods):
244
+ raise("Some method is wrong!")
245
+ else:
246
+ self.methods = methods
247
+
248
+ # -- set metric
249
+ self.metric = metric
250
+ self.mag = self.metricSort[metric]
251
+
252
+ # -- get data from dataset(s)
253
+ # return Y = mat(n-datasets,k-methods)
254
+ if self.multidataset:
255
+ Y = self.__getData()
256
+ else:
257
+ Y = self.__getDataMono()
258
+
259
+ # -- add median and IQR
260
+ I = ss.iqr(Y,axis=0)
261
+ M = np.median(Y,axis=0)
262
+ Y = np.vstack((Y,M))
263
+ Y = np.vstack((Y,I))
264
+
265
+ if printTable:
266
+ methodNames = [x.upper() for x in self.methods]
267
+ dataseNames = self.datasetNames
268
+ dataseNames.append('Median')
269
+ dataseNames.append('IQR')
270
+ df = pd.DataFrame(Y, columns=methodNames, index=dataseNames)
271
+ display(df)
272
+
273
+ return Y
274
+
275
+ def __getDataMono(self):
276
+ mag = self.mag
277
+ metric = self.metric
278
+ methods = self.methods
279
+
280
+ frame = self.dataFrame[0]
281
+ # -- loop on methods
282
+ Y = []
283
+ for method in methods:
284
+ vals = frame[frame['method'] == method][metric]
285
+ if mag == 'min':
286
+ data = [v[np.argmin(v)] for v in vals]
287
+ else:
288
+ data = [v[np.argmax(v)] for v in vals]
289
+ Y.append(data)
290
+
291
+ return np.array(Y).T
292
+
293
+ def __getData(self):
294
+
295
+ mag = self.mag
296
+ metric = self.metric
297
+ methods = self.methods
298
+
299
+ # -- loop on datasets
300
+ Y = []
301
+ for frame in self.dataFrame:
302
+
303
+ # -- loop on methods
304
+ y = []
305
+ for method in methods:
306
+ vals = frame[frame['method'] == method][metric]
307
+ if mag == 'min':
308
+ data = [v[np.argmin(v)] for v in vals]
309
+ else:
310
+ data = [v[np.argmax(v)] for v in vals]
311
+
312
+ y.append(data)
313
+
314
+ y = np.array(y)
315
+ Y.append(np.mean(y,axis=1))
316
+ return np.array(Y)
317
+
318
+ def __getMethods(self):
319
+
320
+ mets = []
321
+ dataFrame = []
322
+ N = len(self.datasetsList)
323
+
324
+ # -- load dataframes
325
+ self.datasetNames = []
326
+ for file in self.datasetsList:
327
+ filename = self.path + file
328
+ self.datasetNames.append(file)
329
+ data = pd.read_hdf(filename)
330
+ mets.append(set(list(data['method'])))
331
+ dataFrame.append(data)
332
+
333
+ # -- method names intersection among datasets
334
+ methods = set(mets[0])
335
+ if N > 1:
336
+ for m in range(1,N-1):
337
+ methods.intersection(mets[m])
338
+
339
+ methods = list(methods)
340
+ methods.sort()
341
+ self.methods = methods
342
+ self.dataFrame = dataFrame
343
+
344
+
stattests.py ADDED
@@ -0,0 +1,614 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import scipy as sp
3
+ import scipy.stats as st
4
+ import itertools as it
5
+
6
+ def binomial_sign_test(*args):
7
+ """
8
+ Performs a binomial sign test for two dependent samples.
9
+ Tests the hypothesis that the two dependent samples represent two different populations.
10
+
11
+ Parameters
12
+ ----------
13
+ sample1, sample2: array_like
14
+ The sample measurements for each group.
15
+
16
+ Returns
17
+ -------
18
+ B-value : float
19
+ The computed B-value of the test.
20
+ p-value : float
21
+ The associated p-value from the B-distribution.
22
+
23
+ References
24
+ ----------
25
+ D.J. Sheskin, Handbook of parametric and nonparametric statistical procedures.
26
+ crc Press, 2003, Test 19: The Binomial Sign Test for Two Dependent Samples
27
+ """
28
+ k = len(args)
29
+ if k != 2: raise ValueError('The test needs two samples')
30
+ n = len(args[0])
31
+
32
+ d_plus = 0
33
+ d_minus = 0
34
+ for i in range(n):
35
+ # Zero differences are eliminated
36
+ if args[0][i] < args[1][i]:
37
+ d_plus = d_plus+1
38
+ elif args[0][i] > args[1][i]:
39
+ d_minus = d_minus+1
40
+
41
+ x = max(d_plus, d_minus)
42
+ n = d_plus + d_minus
43
+
44
+ p_value = 2*(1 - st.binom.cdf(x, n, 0.5)) # Two-tailed of the smallest p-value
45
+
46
+ return x, p_value
47
+
48
+ def friedman_aligned_ranks_test(S):
49
+ """
50
+ Performs a Friedman aligned ranks ranking test.
51
+ Tests the hypothesis that in a set of k dependent samples groups (where k >= 2)
52
+ at least two of the groups represent populations with different median values.
53
+ The difference with a friedman test is that it uses the median of each group to
54
+ construct the ranking, which is useful when the number of samples is low.
55
+
56
+ Parameters
57
+ ----------
58
+ S = [sample1, sample2, ... sample_k] : matrix(n,k)
59
+ The sample measurements (matrix columns) for each group.
60
+
61
+ Returns
62
+ -------
63
+ Chi2-value : float
64
+ The computed Chi2-value of the test.
65
+ p-value : float
66
+ The associated p-value from the Chi2-distribution.
67
+ rankings : array_like
68
+ The ranking for each group.
69
+ pivots : array_like
70
+ The pivotal quantities for each group.
71
+
72
+ References
73
+ ----------
74
+ J.L. Hodges, E.L. Lehmann, Ranks methods for combination of independent experiments
75
+ in analysis of variance, Annals of Mathematical Statistics 33 (1962) 482–497.
76
+ """
77
+
78
+ # modified from the original
79
+ n,k = S.shape # n samples, k groups
80
+
81
+ aligned_observations = []
82
+ for i in range(n):
83
+ loc = sp.mean(S[i])
84
+ aligned_observations.extend(S[i]-loc)
85
+
86
+ aligned_observations_sort = sorted(aligned_observations)
87
+
88
+ aligned_ranks = []
89
+ for i in range(n):
90
+ row = []
91
+ for j in range(k):
92
+ v = aligned_observations[i*k+j]
93
+ row.append(aligned_observations_sort.index(v) + 1 + (aligned_observations_sort.count(v)-1)/2.)
94
+ aligned_ranks.append(row)
95
+
96
+ rankings_avg = [sp.mean([case[j] for case in aligned_ranks]) for j in range(k)]
97
+ rankings_cmp = [r/sp.sqrt(k*(n*k+1)/6.) for r in rankings_avg]
98
+
99
+ r_i = [np.sum(case) for case in aligned_ranks]
100
+ r_j = [np.sum([case[j] for case in aligned_ranks]) for j in range(k)]
101
+ T = (k-1) * (sp.sum(v**2 for v in r_j) - (k*n**2/4.) * (k*n+1)**2) / float(((k*n*(k*n+1)*(2*k*n+1))/6.) - (1./float(k))*sp.sum(v**2 for v in r_i))
102
+
103
+ p_value = 1 - st.chi2.cdf(T, k-1)
104
+
105
+ return T, p_value, rankings_avg, rankings_cmp
106
+
107
+ def quade_test(*args):
108
+ """
109
+ Performs a Quade ranking test.
110
+ Tests the hypothesis that in a set of k dependent samples groups (where k >= 2) at least two of the groups represent populations with different median values.
111
+ The difference with a friedman test is that it uses the median for each sample to wiehgt the ranking.
112
+
113
+ Parameters
114
+ ----------
115
+ sample1, sample2, ... : array_like
116
+ The sample measurements for each group.
117
+
118
+ Returns
119
+ -------
120
+ F-value : float
121
+ The computed F-value of the test.
122
+ p-value : float
123
+ The associated p-value from the F-distribution.
124
+ rankings : array_like
125
+ The ranking for each group.
126
+ pivots : array_like
127
+ The pivotal quantities for each group.
128
+
129
+ References
130
+ ----------
131
+ D. Quade, Using weighted rankings in the analysis of complete blocks with additive block effects, Journal of the American Statistical Association 74 (1979) 680–683.
132
+ """
133
+ k = len(args)
134
+ if k < 2: raise ValueError('Less than 2 levels')
135
+ n = len(args[0])
136
+ if len(set([len(v) for v in args])) != 1: raise ValueError('Unequal number of samples')
137
+
138
+ rankings = []
139
+ ranges = []
140
+ for i in range(n):
141
+ row = [col[i] for col in args]
142
+ ranges.append(max(row) - min(row))
143
+ row_sort = sorted(row)
144
+ rankings.append([row_sort.index(v) + 1 + (row_sort.count(v)-1)/2. for v in row])
145
+
146
+ ranges_sort = sorted(ranges)
147
+ ranking_cases = [ranges_sort.index(v) + 1 + (ranges_sort.count(v)-1)/2. for v in ranges]
148
+
149
+ S = []
150
+ W = []
151
+ for i in range(n):
152
+ S.append([ranking_cases[i] * (r - (k + 1)/2.) for r in rankings[i]])
153
+ W.append([ranking_cases[i] * r for r in rankings[i]])
154
+
155
+ Sj = [np.sum(row[j] for row in S) for j in range(k)]
156
+ Wj = [np.sum(row[j] for row in W) for j in range(k)]
157
+
158
+ rankings_avg = [w / (n*(n+1)/2.) for w in Wj]
159
+ rankings_cmp = [r/sp.sqrt(k*(k+1)*(2*n+1)*(k-1)/(18.*n*(n+1))) for r in rankings_avg]
160
+
161
+ A = sp.sum(S[i][j]**2 for i in range(n) for j in range(k))
162
+ B = sp.sum(s**2 for s in Sj)/float(n)
163
+ F = (n-1)*B/(A-B)
164
+
165
+ p_value = 1 - st.f.cdf(F, k-1, (k-1)*(n-1))
166
+
167
+ return F, p_value, rankings_avg, rankings_cmp
168
+
169
+ def bonferroni_dunn_test(ranks, control=None):
170
+ """
171
+ Performs a Bonferroni-Dunn post-hoc test using the pivot quantities obtained by a ranking test.
172
+ Tests the hypothesis that the ranking of the control method is different to each of the other methods.
173
+
174
+ Parameters
175
+ ----------
176
+ pivots : dictionary_like
177
+ A dictionary with format 'groupname':'pivotal quantity'
178
+ control : string optional
179
+ The name of the control method (one vs all), default None (all vs all)
180
+
181
+ Returns
182
+ ----------
183
+ Comparions : array-like
184
+ Strings identifier of each comparison with format 'group_i vs group_j'
185
+ Z-values : array-like
186
+ The computed Z-value statistic for each comparison.
187
+ p-values : array-like
188
+ The associated p-value from the Z-distribution wich depends on the index of the comparison
189
+ Adjusted p-values : array-like
190
+ The associated adjusted p-values wich can be compared with a significance level
191
+
192
+ References
193
+ ----------
194
+ O.J. Dunn, Multiple comparisons among means, Journal of the American Statistical Association 56 (1961) 52–64.
195
+ """
196
+ k = len(ranks)
197
+ values = ranks.values()
198
+ keys = ranks.keys()
199
+ if not control :
200
+ control_i = values.index(min(values))
201
+ else:
202
+ control_i = keys.index(control)
203
+
204
+ comparisons = [keys[control_i] + " vs " + keys[i] for i in range(k) if i != control_i]
205
+ z_values = [abs(values[control_i] - values[i]) for i in range(k) if i != control_i]
206
+ p_values = [2*(1-st.norm.cdf(abs(z))) for z in z_values]
207
+ # Sort values by p_value so that p_0 < p_1
208
+ p_values, z_values, comparisons = map(list, zip(*sorted(zip(p_values, z_values, comparisons), key=lambda t: t[0])))
209
+ adj_p_values = [min((k-1)*p_value,1) for p_value in p_values]
210
+
211
+ return comparisons, z_values, p_values, adj_p_values
212
+
213
+ def holm_test(ranks, control=None):
214
+ """
215
+ Performs a Holm post-hoc test using the pivot quantities obtained by a ranking test.
216
+ Tests the hypothesis that the ranking of the control method is different to each of the other methods.
217
+
218
+ Parameters
219
+ ----------
220
+ pivots : dictionary_like
221
+ A dictionary with format 'groupname':'pivotal quantity'
222
+ control : string optional
223
+ The name of the control method (one vs all), default None (all vs all)
224
+
225
+ Returns
226
+ ----------
227
+ Comparions : array-like
228
+ Strings identifier of each comparison with format 'group_i vs group_j'
229
+ Z-values : array-like
230
+ The computed Z-value statistic for each comparison.
231
+ p-values : array-like
232
+ The associated p-value from the Z-distribution wich depends on the index of the comparison
233
+ Adjusted p-values : array-like
234
+ The associated adjusted p-values wich can be compared with a significance level
235
+
236
+ References
237
+ ----------
238
+ O.J. S. Holm, A simple sequentially rejective multiple test procedure, Scandinavian
239
+ Journal of Statistics 6 (1979) 65–70.
240
+ """
241
+ k = len(ranks)
242
+ values = ranks.values()
243
+ keys = ranks.keys()
244
+ if not control :
245
+ control_i = values.index(min(values))
246
+ else:
247
+ control_i = keys.index(control)
248
+
249
+ comparisons = [keys[control_i] + " vs " + keys[i] for i in range(k) if i != control_i]
250
+ z_values = [abs(values[control_i] - values[i]) for i in range(k) if i != control_i]
251
+ p_values = [2*(1-st.norm.cdf(abs(z))) for z in z_values]
252
+ # Sort values by p_value so that p_0 < p_1
253
+ p_values, z_values, comparisons = map(list, zip(*sorted(zip(p_values, z_values, comparisons), key=lambda t: t[0])))
254
+ adj_p_values = [min(max((k-(j+1))*p_values[j] for j in range(i+1)), 1) for i in range(k-1)]
255
+
256
+ return comparisons, z_values, p_values, adj_p_values
257
+
258
+ def hochberg_test(ranks, control=None):
259
+ """
260
+ Performs a Hochberg post-hoc test using the pivot quantities obtained by a ranking test.
261
+ Tests the hypothesis that the ranking of the control method is different to each of the other methods.
262
+
263
+ Parameters
264
+ ----------
265
+ pivots : dictionary_like
266
+ A dictionary with format 'groupname':'pivotal quantity'
267
+ control : string optional
268
+ The name of the control method, default the group with minimum ranking
269
+
270
+ Returns
271
+ ----------
272
+ Comparions : array-like
273
+ Strings identifier of each comparison with format 'group_i vs group_j'
274
+ Z-values : array-like
275
+ The computed Z-value statistic for each comparison.
276
+ p-values : array-like
277
+ The associated p-value from the Z-distribution wich depends on the index of the comparison
278
+ Adjusted p-values : array-like
279
+ The associated adjusted p-values wich can be compared with a significance level
280
+
281
+ References
282
+ ----------
283
+ Y. Hochberg, A sharper Bonferroni procedure for multiple tests of significance,
284
+ Biometrika 75 (1988) 800–803.
285
+ """
286
+ k = len(ranks)
287
+ values = ranks.values()
288
+ keys = ranks.keys()
289
+ if not control :
290
+ control_i = values.index(min(values))
291
+ else:
292
+ control_i = keys.index(control)
293
+
294
+ comparisons = [keys[control_i] + " vs " + keys[i] for i in range(k) if i != control_i]
295
+ z_values = [abs(values[control_i] - values[i]) for i in range(k) if i != control_i]
296
+ p_values = [2*(1-st.norm.cdf(abs(z))) for z in z_values]
297
+ # Sort values by p_value so that p_0 < p_1
298
+ p_values, z_values, comparisons = map(list, zip(*sorted(zip(p_values, z_values, comparisons), key=lambda t: t[0])))
299
+ adj_p_values = [min(max((k-j)*p_values[j-1] for j in range(k-1, i, -1)), 1) for i in range(k-1)]
300
+
301
+ return comparisons, z_values, p_values, adj_p_values
302
+
303
+ def li_test(ranks, control=None):
304
+ """
305
+ Performs a Li post-hoc test using the pivot quantities obtained by a ranking test.
306
+ Tests the hypothesis that the ranking of the control method is different to each of the other methods.
307
+
308
+ Parameters
309
+ ----------
310
+ pivots : dictionary_like
311
+ A dictionary with format 'groupname':'pivotal quantity'
312
+ control : string optional
313
+ The name of the control method, default the group with minimum ranking
314
+
315
+ Returns
316
+ ----------
317
+ Comparions : array-like
318
+ Strings identifier of each comparison with format 'group_i vs group_j'
319
+ Z-values : array-like
320
+ The computed Z-value statistic for each comparison.
321
+ p-values : array-like
322
+ The associated p-value from the Z-distribution wich depends on the index of the comparison
323
+ Adjusted p-values : array-like
324
+ The associated adjusted p-values wich can be compared with a significance level
325
+
326
+ References
327
+ ----------
328
+ J. Li, A two-step rejection procedure for testing multiple hypotheses, Journal of
329
+ Statistical Planning and Inference 138 (2008) 1521–1527.
330
+ """
331
+ k = len(ranks)
332
+ values = ranks.values()
333
+ keys = ranks.keys()
334
+ if not control :
335
+ control_i = values.index(min(values))
336
+ else:
337
+ control_i = keys.index(control)
338
+
339
+ comparisons = [keys[control_i] + " vs " + keys[i] for i in range(k) if i != control_i]
340
+ z_values = [abs(values[control_i] - values[i]) for i in range(k) if i != control_i]
341
+ p_values = [2*(1-st.norm.cdf(abs(z))) for z in z_values]
342
+ # Sort values by p_value so that p_0 < p_1
343
+ p_values, z_values, comparisons = map(list, zip(*sorted(zip(p_values, z_values, comparisons), key=lambda t: t[0])))
344
+ adj_p_values = [p_values[i]/(p_values[i]+1-p_values[-1]) for i in range(k-1)]
345
+
346
+ return comparisons, z_values, p_values, adj_p_values
347
+
348
+ def finner_test(ranks, control=None):
349
+ """
350
+ Performs a Finner post-hoc test using the pivot quantities obtained by a ranking test.
351
+ Tests the hypothesis that the ranking of the control method is different to each of the other methods.
352
+
353
+ Parameters
354
+ ----------
355
+ pivots : dictionary_like
356
+ A dictionary with format 'groupname':'pivotal quantity'
357
+ control : string optional
358
+ The name of the control method, default the group with minimum ranking
359
+
360
+ Returns
361
+ ----------
362
+ Comparions : array-like
363
+ Strings identifier of each comparison with format 'group_i vs group_j'
364
+ Z-values : array-like
365
+ The computed Z-value statistic for each comparison.
366
+ p-values : array-like
367
+ The associated p-value from the Z-distribution wich depends on the index of the comparison
368
+ Adjusted p-values : array-like
369
+ The associated adjusted p-values wich can be compared with a significance level
370
+
371
+ References
372
+ ----------
373
+ H. Finner, On a monotonicity problem in step-down multiple test procedures, Journal
374
+ of the American Statistical Association 88 (1993) 920–923.
375
+ """
376
+ k = len(ranks)
377
+ values = ranks.values()
378
+ keys = ranks.keys()
379
+ if not control :
380
+ control_i = values.index(min(values))
381
+ else:
382
+ control_i = keys.index(control)
383
+
384
+ comparisons = [keys[control_i] + " vs " + keys[i] for i in range(k) if i != control_i]
385
+ z_values = [abs(values[control_i] - values[i]) for i in range(k) if i != control_i]
386
+ p_values = [2*(1-st.norm.cdf(abs(z))) for z in z_values]
387
+ # Sort values by p_value so that p_0 < p_1
388
+ p_values, z_values, comparisons = map(list, zip(*sorted(zip(p_values, z_values, comparisons), key=lambda t: t[0])))
389
+ adj_p_values = [min(max(1-(1-p_values[j])**((k-1)/float(j+1)) for j in range(i+1)), 1) for i in range(k-1)]
390
+
391
+ return comparisons, z_values, p_values, adj_p_values
392
+
393
+ def nemenyi_multitest(ranks):
394
+ """
395
+ Performs a Nemenyi post-hoc test using the pivot quantities obtained by a ranking test.
396
+ Tests the hypothesis that the ranking of each pair of groups are different.
397
+
398
+ Parameters
399
+ ----------
400
+ pivots : dictionary_like
401
+ A dictionary with format 'groupname':'pivotal quantity'
402
+
403
+ Returns
404
+ ----------
405
+ Comparions : array-like
406
+ Strings identifier of each comparison with format 'group_i vs group_j'
407
+ Z-values : array-like
408
+ The computed Z-value statistic for each comparison.
409
+ p-values : array-like
410
+ The associated p-value from the Z-distribution wich depends on the index of the comparison
411
+ Adjusted p-values : array-like
412
+ The associated adjusted p-values wich can be compared with a significance level
413
+
414
+ References
415
+ ----------
416
+ Bonferroni-Dunn: O.J. Dunn, Multiple comparisons among means, Journal of the American
417
+ Statistical Association 56 (1961) 52–64.
418
+ """
419
+ k = len(ranks)
420
+ values = ranks.values()
421
+ keys = ranks.keys()
422
+ versus = list(it.combinations(range(k), 2))
423
+
424
+ comparisons = [keys[vs[0]] + " vs " + keys[vs[1]] for vs in versus]
425
+ z_values = [abs(values[vs[0]] - values[vs[1]]) for vs in versus]
426
+ p_values = [2*(1-st.norm.cdf(abs(z))) for z in z_values]
427
+ # Sort values by p_value so that p_0 < p_1
428
+ p_values, z_values, comparisons = map(list, zip(*sorted(zip(p_values, z_values, comparisons), key=lambda t: t[0])))
429
+ m = int(k*(k-1)/2.)
430
+ adj_p_values = [min(m*p_value,1) for p_value in p_values]
431
+
432
+ return comparisons, z_values, p_values, adj_p_values
433
+
434
+ def holm_multitest(ranks):
435
+ """
436
+ Performs a Holm post-hoc test using the pivot quantities obtained by a ranking test.
437
+ Tests the hypothesis that the ranking of each pair of groups are different.
438
+
439
+ Parameters
440
+ ----------
441
+ pivots : dictionary_like
442
+ A dictionary with format 'groupname':'pivotal quantity'
443
+
444
+ Returns
445
+ ----------
446
+ Comparions : array-like
447
+ Strings identifier of each comparison with format 'group_i vs group_j'
448
+ Z-values : array-like
449
+ The computed Z-value statistic for each comparison.
450
+ p-values : array-like
451
+ The associated p-value from the Z-distribution wich depends on the index of the comparison
452
+ Adjusted p-values : array-like
453
+ The associated adjusted p-values wich can be compared with a significance level
454
+
455
+ References
456
+ ----------
457
+ O.J. S. Holm, A simple sequentially rejective multiple test procedure, Scandinavian Journal
458
+ of Statistics 6 (1979) 65–70.
459
+ """
460
+ k = len(ranks)
461
+ values = ranks.values()
462
+ keys = ranks.keys()
463
+ versus = list(it.combinations(range(k), 2))
464
+
465
+ comparisons = [keys[vs[0]] + " vs " + keys[vs[1]] for vs in versus]
466
+ z_values = [abs(values[vs[0]] - values[vs[1]]) for vs in versus]
467
+ p_values = [2*(1-st.norm.cdf(abs(z))) for z in z_values]
468
+ # Sort values by p_value so that p_0 < p_1
469
+ p_values, z_values, comparisons = map(list, zip(*sorted(zip(p_values, z_values, comparisons), key=lambda t: t[0])))
470
+ m = int(k*(k-1)/2.)
471
+ adj_p_values = [min(max((m-j)*p_values[j] for j in range(i+1)), 1) for i in range(m)]
472
+
473
+ return comparisons, z_values, p_values, adj_p_values
474
+
475
+ def hochberg_multitest(ranks):
476
+ """
477
+ Performs a Hochberg post-hoc test using the pivot quantities obtained by a ranking test.
478
+ Tests the hypothesis that the ranking of each pair of groups are different.
479
+
480
+ Parameters
481
+ ----------
482
+ pivots : dictionary_like
483
+ A dictionary with format 'groupname':'pivotal quantity'
484
+
485
+ Returns
486
+ ----------
487
+ Comparions : array-like
488
+ Strings identifier of each comparison with format 'group_i vs group_j'
489
+ Z-values : array-like
490
+ The computed Z-value statistic for each comparison.
491
+ p-values : array-like
492
+ The associated p-value from the Z-distribution wich depends on the index of the comparison
493
+ Adjusted p-values : array-like
494
+ The associated adjusted p-values wich can be compared with a significance level
495
+
496
+ References
497
+ ----------
498
+ Y. Hochberg, A sharper Bonferroni procedure for multiple tests of significance, Biometrika 75 (1988) 800–803.
499
+ """
500
+ k = len(ranks)
501
+ values = ranks.values()
502
+ keys = ranks.keys()
503
+ versus = list(it.combinations(range(k), 2))
504
+
505
+ comparisons = [keys[vs[0]] + " vs " + keys[vs[1]] for vs in versus]
506
+ z_values = [abs(values[vs[0]] - values[vs[1]]) for vs in versus]
507
+ p_values = [2*(1-st.norm.cdf(abs(z))) for z in z_values]
508
+ # Sort values by p_value so that p_0 < p_1
509
+ p_values, z_values, comparisons = map(list, zip(*sorted(zip(p_values, z_values, comparisons), key=lambda t: t[0])))
510
+ m = int(k*(k-1)/2.)
511
+ adj_p_values = [max((m+1-j)*p_values[j-1] for j in range(m, i, -1))for i in range(m)]
512
+
513
+ return comparisons, z_values, p_values, adj_p_values
514
+
515
+ def finner_multitest(ranks):
516
+ """
517
+ Performs a Finner post-hoc test using the pivot quantities obtained by a ranking test.
518
+ Tests the hypothesis that the ranking of each pair of groups are different.
519
+
520
+ Parameters
521
+ ----------
522
+ pivots : dictionary_like
523
+ A dictionary with format 'groupname':'pivotal quantity'
524
+
525
+ Returns
526
+ ----------
527
+ Comparions : array-like
528
+ Strings identifier of each comparison with format 'group_i vs group_j'
529
+ Z-values : array-like
530
+ The computed Z-value statistic for each comparison.
531
+ p-values : array-like
532
+ The associated p-value from the Z-distribution wich depends on the index of the comparison
533
+ Adjusted p-values : array-like
534
+ The associated adjusted p-values wich can be compared with a significance level
535
+
536
+ References
537
+ ----------
538
+ H. Finner, On a monotonicity problem in step-down multiple test procedures, Journal of the
539
+ American Statistical Association 88 (1993) 920–923.
540
+ """
541
+ k = len(ranks)
542
+ values = ranks.values()
543
+ keys = ranks.keys()
544
+ versus = list(it.combinations(range(k), 2))
545
+
546
+ comparisons = [keys[vs[0]] + " vs " + keys[vs[1]] for vs in versus]
547
+ z_values = [abs(values[vs[0]] - values[vs[1]]) for vs in versus]
548
+ p_values = [2*(1-st.norm.cdf(abs(z))) for z in z_values]
549
+ # Sort values by p_value so that p_0 < p_1
550
+ p_values, z_values, comparisons = map(list, zip(*sorted(zip(p_values, z_values, comparisons), key=lambda t: t[0])))
551
+ m = int(k*(k-1)/2.)
552
+ adj_p_values = [min(max(1-(1-p_values[j])**(m/float(j+1)) for j in range(i+1)), 1) for i in range(m)]
553
+
554
+ return comparisons, z_values, p_values, adj_p_values
555
+
556
+ def _S(k):
557
+ """
558
+ Helper function for the Shaffer test.
559
+ It obtains the number of independent test hypotheses when using an All vs
560
+ All strategy using the number of groups to be compared.
561
+ """
562
+ if k == 0 or k == 1:
563
+ return {0}
564
+ else:
565
+ result = set()
566
+ for j in reversed(range(1, k+1)):
567
+ tmp = S(k - j)
568
+ for s in tmp:
569
+ result = result.union({sp.special.binom(j, 2) + s})
570
+ return list(result)
571
+
572
+ def shaffer_multitest(ranks):
573
+ """
574
+ Performs a Shaffer post-hoc test using the pivot quantities obtained by a ranking test.
575
+ Tests the hypothesis that the ranking of each pair of groups are different.
576
+
577
+ Parameters
578
+ ----------
579
+ pivots : dictionary_like
580
+ A dictionary with format 'groupname':'pivotal quantity'
581
+
582
+ Returns
583
+ ----------
584
+ Comparions : array-like
585
+ Strings identifier of each comparison with format 'group_i vs group_j'
586
+ Z-values : array-like
587
+ The computed Z-value statistic for each comparison.
588
+ p-values : array-like
589
+ The associated p-value from the Z-distribution wich depends on the index of the comparison
590
+ Adjusted p-values : array-like
591
+ The associated adjusted p-values wich can be compared with a significance level
592
+
593
+ References
594
+ ----------
595
+ J. Li, A two-step rejection procedure for testing multiple hypotheses, Journal of Statistical
596
+ Planning and Inference 138 (2008) 1521–1527.
597
+ """
598
+ k = len(ranks)
599
+ values = ranks.values()
600
+ keys = ranks.keys()
601
+ versus = list(it.combinations(range(k), 2))
602
+
603
+ m = int(k*(k-1)/2.)
604
+ A = _S(int((1 + sp.sqrt(1+4*m*2))/2))
605
+ t = [max([a for a in A if a <= m-i]) for i in range(m)]
606
+
607
+ comparisons = [keys[vs[0]] + " vs " + keys[vs[1]] for vs in versus]
608
+ z_values = [abs(values[vs[0]] - values[vs[1]]) for vs in versus]
609
+ p_values = [2*(1-st.norm.cdf(abs(z))) for z in z_values]
610
+ # Sort values by p_value so that p_0 < p_1
611
+ p_values, z_values, comparisons = map(list, zip(*sorted(zip(p_values, z_values, comparisons), key=lambda t: t[0])))
612
+ adj_p_values = [min(max(t[j]*p_values[j] for j in range(i+1)), 1) for i in range(m)]
613
+
614
+ return comparisons, z_values, p_values, adj_p_values
testsuite.py ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import configparser
2
+ import ast
3
+ import pandas as pd
4
+ import numpy as np
5
+ import plotly.graph_objects as go
6
+ from ..datasets.dataset import datasetFactory
7
+ from ..methods.base import methodFactory
8
+ from ..signals.video import Video
9
+ from ..utils.errors import getErrors, printErrors, displayErrors
10
+
11
+ class TestSuite():
12
+ """ Test suite for a given video dataset and multiple VHR methods"""
13
+
14
+ def __init__(self, configFilename='default'):
15
+ if configFilename == 'default':
16
+ configFilename = '../pyVHR/analysis/default_test.cfg'
17
+ self.parse_cfg(configFilename)
18
+
19
+ def start(self, saveResults=True, outFilename=None, verb=0):
20
+ """ Runs the tests as specified in the loaded config file.
21
+
22
+ verbose degree:
23
+ 0 - not verbose
24
+ 1 - show the main steps
25
+ 2 - display graphic
26
+ 3 - display spectra
27
+ 4 - display errors
28
+ (use also combinations, e.g. verb=21, verb=321)
29
+ """
30
+
31
+ # -- verbose prints
32
+ if '1' in str(verb):
33
+ self.__verbose('a')
34
+
35
+ # -- dataset & cfg params
36
+ #dataset = datasetFactory(self.videodict['dataset'])
37
+ dataset = datasetFactory(self.videodict['dataset'], videodataDIR=self.videodict['videodataDIR'], BVPdataDIR=self.videodict['BVPdataDIR'])
38
+
39
+ # -- catch data (object)
40
+ res = TestResult()
41
+
42
+ # -- loop on methods
43
+ for m in self.methods:
44
+
45
+ # -- loop on videos
46
+ if self.videoIdx == 'all':
47
+ self.videoIdx = np.arange(0,dataset.numVideos)
48
+ for v in self.videoIdx:
49
+
50
+ # -- verbose prints
51
+ if '1' in str(verb):
52
+ print("\n**** Using Method: %s on videoID: %d" % (m,v))
53
+
54
+ # -- catch data
55
+ res.newDataSerie()
56
+ res.addData('method', m)
57
+ res.addData('dataset', dataset.name)
58
+ res.addData('videoIdx', v)
59
+
60
+ # -- video object
61
+ videoFilename = dataset.getVideoFilename(v)
62
+ video = Video(videoFilename, verb)
63
+ video.getCroppedFaces(detector=self.videodict['detector'],
64
+ extractor=self.videodict['extractor'])
65
+ etime = float(self.videodict['endTime'])
66
+ if etime < 0:
67
+ self.videodict['endTime'] = str(video.duration-etime)
68
+ # -- catch data
69
+ res.addData('videoFilename', videoFilename)
70
+
71
+ # -- ground-truth signal
72
+ fname = dataset.getSigFilename(v)
73
+ sigGT = dataset.readSigfile(fname)
74
+ winSizeGT = int(self.methodsdict[m]['winSizeGT'])
75
+ bpmGT, timesGT = sigGT.getBPM(winSizeGT)
76
+ # -- catch data
77
+ res.addData('sigFilename', fname)
78
+ res.addData('bpmGT', sigGT.bpm)
79
+ res.addData('timeGT', sigGT.times)
80
+
81
+ # -- method object
82
+ # load params of m
83
+ self.methodsdict[m]['video'] = video
84
+ self.methodsdict[m]['verb'] = verb
85
+ # merge video parameters dict in method parameters dict before calling method
86
+ self.__merge(self.methodsdict[m], self.videodict)
87
+ method = methodFactory(m, **self.methodsdict[m])
88
+ bpmES, timesES = method.runOffline(**self.methodsdict[m])
89
+ # -- catch data
90
+ res.addData('bpmES', bpmES)
91
+ res.addData('timeES', timesES)
92
+
93
+ # -- error metrics
94
+ RMSE, MAE, MAX, PCC = getErrors(bpmES, bpmGT, timesES, timesGT)
95
+ # -- catch data
96
+ res.addData('RMSE', RMSE)
97
+ res.addData('MAE', MAE)
98
+ res.addData('PCC', PCC)
99
+ res.addData('MAX', MAX)
100
+ res.addDataSerie()
101
+
102
+ if '1' in str(verb):
103
+ printErrors(RMSE, MAE, MAX, PCC)
104
+ if '4' in str(verb):
105
+ displayErrors(bpmES, bpmGT, timesES, timesGT)
106
+
107
+ # -- save results on a file
108
+ if saveResults:
109
+ res.saveResults()
110
+
111
+ return res
112
+
113
+
114
+ def parse_cfg(self, configFilename):
115
+ """ parses the given config file for experiments. """
116
+
117
+ self.parser = configparser.ConfigParser(inline_comment_prefixes=('#', ';'))
118
+ self.parser.optionxform = str
119
+ if not self.parser.read(configFilename):
120
+ raise FileNotFoundError(configFilename)
121
+
122
+ # checks
123
+ assert not self.parser.has_section('DEFAULT'),"ERROR... DEFAULT section is mandatory!"
124
+
125
+ # load default paramas
126
+ self.defaultdict = dict(self.parser['DEFAULT'].items())
127
+
128
+ # load video params
129
+ self.videodict = dict(self.parser['VIDEO'].items())
130
+
131
+ # video idx list extraction
132
+ if self.videodict['videoIdx'] == 'all':
133
+ self.videoIdx = 'all'
134
+ else:
135
+ svid = ast.literal_eval(self.videodict['videoIdx'])
136
+ self.videoIdx = [int(v) for v in svid]
137
+
138
+ # load parameters for each methods
139
+ self.methodsdict = {}
140
+ self.methods = ast.literal_eval(self.defaultdict['methods'])
141
+ for x in self.methods:
142
+ self.methodsdict[x] = dict(self.parser[x].items())
143
+
144
+ def __merge(self, dict1, dict2):
145
+ for key in dict2:
146
+ if key not in dict1:
147
+ dict1[key]= dict2[key]
148
+
149
+ def __verbose(self, verb):
150
+ if verb == 'a':
151
+ print("** Run the test with the following config:")
152
+ print(" dataset: " + self.videodict['dataset'].upper())
153
+ print(" methods: " + str(self.methods))
154
+
155
+
156
+ class TestResult():
157
+ """ Manage the results of a test for a given video dataset and multiple VHR methods"""
158
+
159
+ def __init__(self, filename=None):
160
+
161
+ if filename == None:
162
+ self.dataFrame = pd.DataFrame()
163
+ else:
164
+ self.dataFrame = pd.read_hdf(filename)
165
+ self.dict = None
166
+
167
+ def addDataSerie(self):
168
+ # -- store serie
169
+ if self.dict != None:
170
+ self.dataFrame = self.dataFrame.append(self.dict, ignore_index=True)
171
+
172
+ def newDataSerie(self):
173
+ # -- new dict
174
+ D = {}
175
+ D['method'] = ''
176
+ D['dataset'] = ''
177
+ D['videoIdx'] = '' # video filename
178
+ D['sigFilename'] = '' # GT signal filename
179
+ D['videoFilename'] = '' # GT signal filename
180
+ D['EVM'] = False # True if used, False otherwise
181
+ D['mask'] = '' # mask used
182
+ D['RMSE'] = ''
183
+ D['MAE'] = ''
184
+ D['PCC'] = ''
185
+ D['MAX'] = ''
186
+ D['telapse'] = ''
187
+ D['bpmGT'] = '' # GT bpm
188
+ D['bpmES'] = ''
189
+ D['timeGT'] = '' # GT bpm
190
+ D['timeES'] = ''
191
+ self.dict = D
192
+
193
+ def addData(self, key, value):
194
+ self.dict[key] = value
195
+
196
+ def saveResults(self, outFilename=None):
197
+ if outFilename == None:
198
+ outFilename = "testResults.h5"
199
+ else:
200
+ self.outFilename = outFilename
201
+
202
+ # -- save data
203
+ self.dataFrame.to_hdf(outFilename, key='df', mode='w')
ubfc1.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import numpy as np
3
+ from pyVHR.datasets.dataset import Dataset
4
+ from pyVHR.signals.bvp import BVPsignal
5
+
6
+ class UBFC1(Dataset):
7
+ """
8
+ UBFC dataset structure:
9
+ -----------------
10
+ datasetDIR/
11
+ | |-- SubjDIR1/
12
+ | |-- vid.avi
13
+ |...
14
+ | |-- SubjDIRM/
15
+ | |-- vid.avi
16
+ """
17
+ name = 'UBFC1'
18
+ signalGT = 'BVP' # GT signal type
19
+ numLevels = 2 # depth of the filesystem collecting video and BVP files
20
+ numSubjects = 8 # number of subjects
21
+ video_EXT = 'avi' # extension of the video files
22
+ frameRate = 30 # vieo frame rate
23
+ VIDEO_SUBSTRING = '' # substring contained in the filename
24
+ SIG_EXT = 'xmp' # extension of the BVP files
25
+ SIG_SUBSTRING = '' # substring contained in the filename
26
+ SIG_SampleRate = 62 # sample rate of the BVP files
27
+ skinThresh = [40,60] # thresholds for skin detection
28
+
29
+ def readSigfile(self, filename):
30
+ """ Load BVP signal.
31
+ Must return a 1-dim (row array) signal
32
+ """
33
+ gtTrace = []
34
+ gtTime = []
35
+ gtHR = []
36
+ with open(filename, 'r') as csvfile:
37
+ xmp = csv.reader(csvfile)
38
+ for row in xmp:
39
+ gtTrace.append(float(row[3]))
40
+ gtTime.append(float(row[0])/1000.)
41
+ gtHR.append(float(row[1]))
42
+
43
+ data = np.array(gtTrace)
44
+ time = np.array(gtTime)
45
+ hr = np.array(gtHR)
46
+ self.SIG_SampleRate = np.round(1/np.mean(np.diff(time)))
47
+
48
+ '''import matplotlib.pyplot as plt
49
+ plt.plot(hr)
50
+ plt.show()'''
51
+
52
+ return BVPsignal(data, self.SIG_SampleRate)
ubfc2.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import numpy as np
3
+ from pyVHR.datasets.dataset import Dataset
4
+ from pyVHR.signals.bvp import BVPsignal
5
+
6
+ class UBFC2(Dataset):
7
+ """
8
+ UBFC dataset structure:
9
+ -----------------
10
+ datasetDIR/
11
+ | |-- SubjDIR1/
12
+ | |-- vid.avi
13
+ |...
14
+ | |-- SubjDIRM/
15
+ | |-- vid.avi
16
+ """
17
+ name = 'UBFC2'
18
+ signalGT = 'BVP' # GT signal type
19
+ numLevels = 2 # depth of the filesystem collecting video and BVP files
20
+ numSubjects = 26 # number of subjects
21
+ video_EXT = 'avi' # extension of the video files
22
+ frameRate = 30 # vieo frame rate
23
+ VIDEO_SUBSTRING = '' # substring contained in the filename
24
+ SIG_EXT = 'txt' # extension of the BVP files
25
+ SIG_SUBSTRING = '' # substring contained in the filename
26
+ SIG_SampleRate = 30 # sample rate of the BVP files
27
+ skinThresh = [40,60] # thresholds for skin detection
28
+
29
+ def readSigfile(self, filename):
30
+ """ Load BVP signal.
31
+ Must return a 1-dim (row array) signal
32
+ """
33
+ gtTrace = []
34
+ gtTime = []
35
+ gtHR = []
36
+ with open(filename, 'r') as f:
37
+ x = f.readlines()
38
+
39
+ s = x[0].split(' ')
40
+ s = list(filter(lambda a: a != '', s))
41
+ gtTrace = np.array(s).astype(np.float64)
42
+
43
+ t = x[2].split(' ')
44
+ t = list(filter(lambda a: a != '', t))
45
+ gtTime = np.array(t).astype(np.float64)
46
+
47
+ hr = x[1].split(' ')
48
+ hr = list(filter(lambda a: a != '', hr))
49
+ gtHR = np.array(hr).astype(np.float64)
50
+
51
+ data = np.array(gtTrace)
52
+ time = np.array(gtTime)
53
+ self.SIG_SampleRate = np.round(1/np.mean(np.diff(time)))
54
+
55
+ return BVPsignal(data, self.SIG_SampleRate)
utils.py ADDED
@@ -0,0 +1,275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A utils module used in the actual evm module performing such tasks as
3
+ pyramid construction, video io and filter application
4
+
5
+ functions were originally written by flyingzhao but adapted for this module
6
+ """
7
+
8
+ import cv2
9
+ import numpy as np
10
+ import scipy.signal as signal
11
+ import scipy.fftpack as fftpack
12
+
13
+ def build_gaussian_pyramid(src, levels=3):
14
+ """
15
+ Function: build_gaussian_pyramid
16
+ --------------------------------
17
+ Builds a gaussian pyramid
18
+
19
+ Args:
20
+ -----
21
+ src: the input image
22
+ levels: the number levels in the gaussian pyramid
23
+
24
+ Returns:
25
+ --------
26
+ A gaussian pyramid
27
+ """
28
+ s=src.copy()
29
+ pyramid=[s]
30
+ for i in range(levels):
31
+ s=cv2.pyrDown(s)
32
+ pyramid.append(s)
33
+ return pyramid
34
+
35
+
36
+ def gaussian_video(video, levels=3):
37
+ """
38
+ Function: gaussian_video
39
+ ------------------------
40
+ generates a gaussian pyramid for each frame in a video
41
+
42
+ Args:
43
+ -----
44
+ video: the input video array
45
+ levels: the number of levels in the gaussian pyramid
46
+
47
+ Returns:
48
+ --------
49
+ the gaussian video
50
+ """
51
+ n = video.shape[0]
52
+ for i in range(0, n):
53
+ pyr = build_gaussian_pyramid(video[i], levels=levels)
54
+ gaussian_frame=pyr[-1]
55
+ if i==0:
56
+ vid_data = np.zeros((n, *gaussian_frame.shape))
57
+ vid_data[i] = gaussian_frame
58
+ return vid_data
59
+
60
+
61
+ def reconstruct_video_g(amp_video, original_video, levels=3):
62
+ """
63
+ Function: reconstruct_video_g
64
+ -----------------------------
65
+ reconstructs a video from a gaussian pyramid and the original
66
+
67
+ Args:
68
+ -----
69
+ amp_video: the amplified gaussian video
70
+ original_video: the original video
71
+ levels: the levels in the gaussian video
72
+
73
+ Returns:
74
+ --------
75
+ the reconstructed video
76
+ """
77
+ final_video = np.zeros(original_video.shape)
78
+ for i in range(0, amp_video.shape[0]):
79
+ img = amp_video[i]
80
+ for x in range(levels):
81
+ img = cv2.pyrUp(img)
82
+ img = img + original_video[i]
83
+ final_video[i] = img
84
+ return final_video
85
+
86
+
87
+ def build_laplacian_pyramid(src,levels=3):
88
+ """
89
+ Function: build_laplacian_pyramid
90
+ ---------------------------------
91
+ Builds a Laplacian Pyramid
92
+
93
+ Args:
94
+ -----
95
+ src: the input image
96
+ levels: the number levels in the laplacian pyramid
97
+
98
+ Returns:
99
+ --------
100
+ A Laplacian pyramid
101
+ """
102
+ gaussianPyramid = build_gaussian_pyramid(src, levels)
103
+ pyramid=[]
104
+ for i in range(levels,0,-1):
105
+ GE=cv2.pyrUp(gaussianPyramid[i])
106
+ L=cv2.subtract(gaussianPyramid[i-1],GE)
107
+ pyramid.append(L)
108
+ return pyramid
109
+
110
+
111
+ def laplacian_video(video, levels=3):
112
+ """
113
+ Function: laplacian_video
114
+ -------------------------
115
+ generates a laplaican pyramid for each frame in a video
116
+
117
+ Args:
118
+ -----
119
+ video: the input video array
120
+ levels: the number of levels for each laplacian pyramid
121
+
122
+ Returns:
123
+ --------
124
+ The laplacian video
125
+ """
126
+ tensor_list=[]
127
+ n = video.shape[0]
128
+ for i in range(0, n):
129
+ frame=video[i]
130
+ pyr = build_laplacian_pyramid(frame,levels=levels)
131
+ if i==0:
132
+ for k in range(levels):
133
+ tensor_list.append(np.zeros((n, *pyr[k].shape)))
134
+ for n in range(levels):
135
+ tensor_list[n][i] = pyr[n]
136
+ return tensor_list
137
+
138
+
139
+ def reconstruct_video_l(lap_pyr, levels=3):
140
+ """
141
+ Function: reconstruct_video_l
142
+ -----------------------------
143
+ reconstructs a video from a laplacian pyramid and the original
144
+
145
+ Args:
146
+ -----
147
+ lap_pyr: the amplified laplacian pyramid
148
+ levels: the levels in the laplacian video
149
+
150
+ Returns:
151
+ --------
152
+ the reconstructed video
153
+ """
154
+ final = np.zeros(lap_pyr[-1].shape)
155
+ for i in range(lap_pyr[0].shape[0]):
156
+ up = lap_pyr[0][i]
157
+ for n in range(levels-1):
158
+ up = cv2.pyrUp(up) + lap_pyr[n + 1][i]
159
+ final[i] = up
160
+ return final
161
+
162
+
163
+ def save_video(video, filename='out.avi'):
164
+ """
165
+ Function: save_video
166
+ --------------------
167
+ saves a video to a file
168
+
169
+ Args:
170
+ -----
171
+ video: the numpy array representing the video
172
+ filename: the name of the output file
173
+
174
+ Returns:
175
+ None
176
+ """
177
+ fourcc = cv2.VideoWriter_fourcc('M','J','P','G')
178
+ n, h, w, _ = video.shape
179
+ writer = cv2.VideoWriter(filename, fourcc, 30, (w, h), 1)
180
+ for i in range(0, n):
181
+ writer.write(cv2.convertScaleAbs(video[i]))
182
+ writer.release()
183
+
184
+
185
+ def load_video(video_filename):
186
+ """
187
+ Function: load_video
188
+ --------------------
189
+ Loads a video from a file
190
+
191
+ Args:
192
+ -----
193
+ video_filename: the name of the video file
194
+
195
+ Returns:
196
+ --------
197
+ a numpy array with shape (num_frames, height, width, channels)
198
+ the frame rate of the video
199
+ """
200
+ cap = cv2.VideoCapture(video_filename)
201
+
202
+ frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
203
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
204
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
205
+ fps = int(cap.get(cv2.CAP_PROP_FPS))
206
+
207
+ video = np.zeros((frame_count, height, width, 3), dtype='float')
208
+ x = 0
209
+ while cap.isOpened():
210
+ ret, frame = cap.read()
211
+ if ret is True:
212
+ video[x] = frame
213
+ x += 1
214
+ else:
215
+ break
216
+ return video, fps
217
+
218
+
219
+ def temporal_ideal_filter(arr, low, high, fps, axis=0):
220
+ """
221
+ Function: temporal_ideal_filter
222
+ -------------------------------
223
+ Applies a temporal ideal filter to a numpy array
224
+
225
+ Args:
226
+ -----
227
+ arr: a numpy array with shape (N, H, W, C)
228
+ N: number of frames
229
+ H: height
230
+ W: width
231
+ C: channels
232
+ low: the low frequency bound
233
+ high: the high frequency bound
234
+ fps: the video frame rate
235
+ axis: the axis of video, should always be 0
236
+
237
+ Returns:
238
+ --------
239
+ the array with the filter applied
240
+ """
241
+ fft = fftpack.fft(arr, axis=axis)
242
+ frequencies = fftpack.fftfreq(arr.shape[0], d=1.0 / fps)
243
+ bound_low = (np.abs(frequencies - low)).argmin()
244
+ bound_high = (np.abs(frequencies - high)).argmin()
245
+ fft[:bound_low] = 0
246
+ fft[bound_high:-bound_high] = 0
247
+ fft[-bound_low:] = 0
248
+ iff=fftpack.ifft(fft, axis=axis)
249
+ return np.abs(iff)
250
+
251
+
252
+ def butter_bandpass_filter(data, lowcut, highcut, fs, order=5):
253
+ """
254
+ Function: butter_bandpass_filter
255
+ --------------------------------
256
+ applies a buttersworth bandpass filter
257
+
258
+ Args:
259
+ -----
260
+ data: the input data
261
+ lowcut: the low cut value
262
+ highcut: the high cut value
263
+ fs: the frame rate in frames per second
264
+ order: the order for butter
265
+
266
+ Returns:
267
+ --------
268
+ the result of the buttersworth bandpass filter
269
+ """
270
+ omega = 0.5 * fs
271
+ low = lowcut / omega
272
+ high = highcut / omega
273
+ b, a = signal.butter(order, [low, high], btype='band')
274
+ y = signal.lfilter(b, a, data, axis=0)
275
+ return y
video.py ADDED
@@ -0,0 +1,703 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import warnings
4
+
5
+ import dlib
6
+ import matplotlib.pyplot as plt
7
+ import skvideo
8
+ import skvideo.io
9
+ from matplotlib import patches
10
+
11
+ from .pyramid import *
12
+ from ..utils import printutils
13
+ from ..utils.SkinDetect import SkinDetect
14
+
15
+
16
+ class Video:
17
+ """
18
+ Basic class for extracting ROIs from video frames
19
+ """
20
+
21
+ facePadding = 0.2 # dlib param for padding
22
+ filenameCompressed = "croppedFaces.npz" # filename to store on disk
23
+ saveCropFaces = True # enable the storage on disk of the cropped faces
24
+ loadCropFaces = True # enable the loading of cropped faces from disk
25
+
26
+
27
+ def __init__(self, filename, verb=0):
28
+ self.filename = filename
29
+ self.faces = np.array([]) # empty array of cropped faces (RGB)
30
+ self.processedFaces = np.array([])
31
+ self.faceSignal = np.array([]) # empty array of face signals (RGB) after roi/skin extraction
32
+
33
+ self.verb = verb
34
+ self.cropSize = [150, 150] # param for cropping
35
+ self.typeROI = 'rect' # type of rois between ['rect', 'skin']
36
+ self.detector = 'mtcnn'
37
+ self.time_vid_start = 0
38
+
39
+ self.doEVM = False
40
+ self.EVMalpha = 20
41
+ self.EVMlevels = 3
42
+ self.EVMlow = .8
43
+ self.EVMhigh = 4
44
+
45
+
46
+ self.rectCoords = [[0, 0, self.cropSize[0], self.cropSize[1]]] # default 'rect' roi coordinates
47
+ self.skinThresh_fix = [40, 80] # default min values of Sauturation and Value (HSV) for 'skin' roi
48
+ self.skinThresh_adapt = 0.2
49
+
50
+ def getCroppedFaces(self, detector='mtcnn', extractor='skvideo', fps=30):
51
+ """ Time is in seconds"""
52
+
53
+ # -- check if cropped faces already exists on disk
54
+ path, name = os.path.split(self.filename)
55
+ filenamez = path + '/' + self.filenameCompressed
56
+
57
+ self.detector = detector
58
+ self.extractor = extractor
59
+
60
+ # -- if compressed exists... load it
61
+ if self.loadCropFaces and os.path.isfile(filenamez):
62
+ self.cropped = True
63
+ data = np.load(filenamez, allow_pickle=True)
64
+ self.faces = data['a']
65
+ self.numFrames = int(data['b'])
66
+ self.frameRate = int(data['c'])
67
+ self.height = int(data['d'])
68
+ self.width = int(data['e'])
69
+ self.duration = float(data['f'])
70
+ self.codec = data['g']
71
+ self.detector = data['h']
72
+ self.extractor = data['i']
73
+ self.cropSize = self.faces[0].shape
74
+
75
+ if self.detector != detector:
76
+ warnings.warn("\nWARNING!! Requested detector method is different from the saved one\n")
77
+
78
+ # -- if compressed does not exist, load orig. video and extract faces
79
+ else:
80
+ self.cropped = False
81
+
82
+ # if the video signal is stored in video container
83
+ if os.path.isfile(self.filename):
84
+ # -- metadata
85
+ metadata = skvideo.io.ffprobe(self.filename)
86
+ self.numFrames = int(eval(metadata["video"]["@nb_frames"]))
87
+ self.height = int(eval(metadata["video"]["@height"]))
88
+ self.width = int(eval(metadata["video"]["@width"]))
89
+ self.frameRate = int(np.round(eval(metadata["video"]["@avg_frame_rate"])))
90
+ self.duration = float(eval(metadata["video"]["@duration"]))
91
+ self.codec = metadata["video"]["@codec_name"]
92
+ # -- load video on a ndarray with skvideo or openCV
93
+ video = None
94
+ if extractor == 'opencv':
95
+ video = self.__opencvRead()
96
+ else:
97
+ video = skvideo.io.vread(self.filename)
98
+
99
+ # else if the video signal is stored as single frames
100
+ else: # elif os.path.isdir(self.filename):
101
+ # -- load frames on a ndarray
102
+ self.path = path
103
+ video = self.__loadFrames()
104
+ self.numFrames = len(video)
105
+ self.height = video[0].shape[0]
106
+ self.width = video[0].shape[1]
107
+ self.frameRate = fps ###### <<<<----- TO SET MANUALLY ####
108
+ self.duration = self.numFrames/self.frameRate
109
+ self.codec = 'raw'
110
+
111
+ # -- extract faces and resize
112
+ print('\n\n' + detector + '\n\n')
113
+ self.__extractFace(video, method=detector)
114
+
115
+ # -- store cropped faces on disk
116
+ if self.saveCropFaces:
117
+ np.savez_compressed(filenamez, a=self.faces,
118
+ b=self.numFrames, c=self.frameRate,
119
+ d=self.height, e=self.width,
120
+ f=self.duration, g=self.codec,
121
+ h=self.detector, i=self.extractor)
122
+
123
+ if '1' in str(self.verb):
124
+ self.printVideoInfo()
125
+ if not self.cropped:
126
+ print(' Extracted faces: not found! Detecting...')
127
+ else:
128
+ print(' Extracted faces: found! Loading...')
129
+
130
+ def setMask(self, typeROI='rect',
131
+ rectCoords=None, rectRegions=None,
132
+ skinThresh_fix=None, skinThresh_adapt=None):
133
+ self.typeROI = typeROI
134
+ if self.typeROI == 'rect':
135
+ if rectCoords is not None:
136
+ # List of rectangular ROIs: [[x0,y0,w0,h0],...,[xk,yk,wk,hk]]
137
+ self.rectCoords = rectCoords
138
+ elif rectRegions is not None:
139
+ # List of rectangular regions: ['forehead', 'lcheek', 'rcheek', 'nose']
140
+ self.rectCoords = self.__rectRegions2Coord(rectRegions)
141
+ elif self.typeROI == 'skin_adapt' and skinThresh_adapt is not None:
142
+ # Skin limits for HSV
143
+ self.skinThresh_adapt = skinThresh_adapt
144
+ elif self.typeROI == 'skin_fix' and skinThresh_fix is not None:
145
+ # Skin limits for HSV
146
+ self.skinThresh_fix = skinThresh_fix
147
+ else:
148
+ raise ValueError('Unrecognized type of ROI provided.')
149
+
150
+ def extractSignal(self, frameSubset, count=None):
151
+ if self.typeROI == 'rect':
152
+ return self.__extractRectSignal(frameSubset)
153
+
154
+ elif self.typeROI == 'skin_adapt' or self.typeROI == 'skin_fix':
155
+ return self.__extractSkinSignal(frameSubset, count)
156
+
157
+ def setEVM(self, enable=True, alpha=20, levels=3, low=.8, high=4):
158
+ """Eulerian Video Magnification"""
159
+
160
+ #rawFaces = self.faces
161
+ #gaussFaces = gaussian_video(rawFaces, levels=levels)
162
+ #filtered = temporal_ideal_filter(gaussFaces, low, high, self.frameRate)
163
+ #amplified = alpha * filtered
164
+ #self.faces = reconstruct_video_g(amplified, rawFaces, levels=levels)
165
+ self.doEVM = enable
166
+
167
+ if enable is True:
168
+ self.EVMalpha = alpha
169
+ self.EVMlevels = levels
170
+ self.EVMlow = low
171
+ self.EVMhigh = high
172
+
173
+ def applyEVM(self):
174
+ vid_data = gaussian_video(self.faces, self.EVMlevels)
175
+ vid_data = temporal_bandpass_filter(vid_data, self.frameRate,
176
+ freq_min=self.EVMlow,
177
+ freq_max=self.EVMhigh)
178
+ vid_data *= self.EVMalpha
179
+ self.processedFaces = combine_pyramid_and_save(vid_data,
180
+ self.faces,
181
+ enlarge_multiple=3,
182
+ fps=self.frameRate)
183
+
184
+ def getMeanRGB(self):
185
+
186
+ n_frames = len(self.faceSignal)
187
+ n_roi = len(self.faceSignal[0])
188
+ rgb = np.zeros([3, n_frames])
189
+
190
+ for i in range(n_frames):
191
+ mean_rgb = 0
192
+
193
+ for roi in self.faceSignal[i]:
194
+ idx = roi != 0
195
+ idx2 = np.logical_and(np.logical_and(idx[:, :, 0], idx[:, :, 1]), idx[:, :, 2])
196
+ roi = roi[idx2]
197
+ if len(roi) == 0:
198
+ mean_rgb += 0
199
+ else:
200
+ mean_rgb += np.mean(roi, axis=0)
201
+
202
+ rgb[:, i] = mean_rgb/n_roi
203
+ return rgb
204
+
205
+ def printVideoInfo(self):
206
+ print('\n * Video filename: %s' %self.filename)
207
+ print(' Total frames: %s' %self.numFrames)
208
+ print(' Duration: %s (sec)' %np.round(self.duration,2))
209
+ print(' Frame rate: %s (fps)' % self.frameRate)
210
+ print(' Codec: %s' % self.codec)
211
+
212
+ printOK = 1
213
+ try:
214
+ f = self.numFrames
215
+ except AttributeError:
216
+ printOK = 0
217
+
218
+ if printOK:
219
+ print(' Num frames: %s' % self.numFrames)
220
+ print(' Height: %s' % self.height)
221
+ print(' Width: %s' % self.height)
222
+ print(' Detector: %s' % self.detector)
223
+ print(' Extractor: %s' % self.extractor)
224
+
225
+ def printROIInfo(self):
226
+ print(' ROI type: ' + self.typeROI)
227
+ if self.typeROI == 'rect':
228
+ print(' Rect coords: ' + str(self.rectCoords))
229
+ elif self.typeROI == 'skin_fix':
230
+ print(' Skin thresh: ' + str(self.skinThresh_fix))
231
+ elif self.typeROI == 'skin_adapt':
232
+ print(' Skin thresh: ' + str(self.skinThresh_adapt))
233
+
234
+ def showVideo(self):
235
+ from ipywidgets import interact
236
+ import ipywidgets as widgets
237
+
238
+ n = self.numFrames
239
+ def view_image(frame):
240
+
241
+ idx = frame-1
242
+
243
+ if self.processedFaces.size == 0:
244
+ face = self.faces[idx]
245
+ else:
246
+ face = self.processedFaces[idx]
247
+
248
+ if self.typeROI == 'rect':
249
+ plt.imshow(face, interpolation='nearest')
250
+
251
+ ax = plt.gca()
252
+
253
+ for coord in self.rectCoords:
254
+ rect = patches.Rectangle((coord[0],coord[1]),
255
+ coord[2],coord[3],linewidth=1,edgecolor='y',facecolor='none')
256
+ ax.add_patch(rect)
257
+
258
+ elif self.typeROI == 'skin_fix':
259
+ lower = np.array([0, self.skinThresh_fix[0], self.skinThresh_fix[1]], dtype = "uint8")
260
+ upper = np.array([20, 255, 255], dtype = "uint8")
261
+ converted = cv2.cvtColor(face, cv2.COLOR_RGB2HSV)
262
+ skinMask = cv2.inRange(converted, lower, upper)
263
+ skinFace = cv2.bitwise_and(face, face, mask=skinMask)
264
+ plt.imshow(skinFace, interpolation='nearest')
265
+
266
+ elif self.typeROI == 'skin_adapt':
267
+ sd = SkinDetect(strength=self.skinThresh_adapt)
268
+ sd.compute_stats(face)
269
+ skinFace = sd.get_skin(face, filt_kern_size=7, verbose=False, plot=False)
270
+ plt.imshow(skinFace, interpolation='nearest')
271
+
272
+ interact(view_image, frame=widgets.IntSlider(min=1, max=n, step=1, value=1))
273
+
274
+ def __opencvRead(self):
275
+ vid = cv2.VideoCapture(self.filename)
276
+ frames = []
277
+ retval, frame = vid.read()
278
+ while retval == True:
279
+ frames.append(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
280
+ retval, frame = vid.read()
281
+ vid.release()
282
+ return np.asarray(frames)
283
+
284
+ def __extractRectSignal(self, frameSubset):
285
+ """ Extract R,G,B values on all ROIs of a frame subset """
286
+
287
+ assert self.processedFaces.size > 0, "Faces are not processed yet! Please call runOffline first"
288
+
289
+ self.faceSignal = []
290
+
291
+ i = 0
292
+ for r in frameSubset:
293
+ face = self.processedFaces[r]
294
+ H = face.shape[0]
295
+ W = face.shape[1]
296
+
297
+ # take frame-level rois
298
+ rois = []
299
+ for roi in self.rectCoords:
300
+ x = roi[0]
301
+ y = roi[1]
302
+ w = min(x + roi[2], W)
303
+ h = min(y + roi[3], H)
304
+ rois.append(face[y:h,x:w,:])
305
+
306
+ # take all rois of the frame
307
+ self.faceSignal.append(rois)
308
+ i += 1
309
+
310
+ def __extractSkinSignal(self, frameSubset, count=None, frameByframe=False):
311
+ """ Extract R,G,B values from skin-based roi of a frame subset """
312
+
313
+ assert self.processedFaces.size > 0, "Faces are not processed yet! Please call runOffline first"
314
+
315
+ self.faceSignal = []
316
+
317
+ cp = self.cropSize
318
+ skinFace = np.zeros([cp[0], cp[1], 3], dtype='uint8')
319
+
320
+ # -- loop on frames
321
+ for i, r in enumerate(frameSubset):
322
+ face = self.processedFaces[r]
323
+
324
+ if self.typeROI == 'skin_fix':
325
+ assert len(self.skinThresh_fix) == 2, "Please provide 2 values for Fixed Skin Detector"
326
+ lower = np.array([0, self.skinThresh_fix[0], self.skinThresh_fix[1]], dtype = "uint8")
327
+ upper = np.array([20, 255, 255], dtype = "uint8")
328
+ converted = cv2.cvtColor(face, cv2.COLOR_RGB2HSV)
329
+ skinMask = cv2.inRange(converted, lower, upper)
330
+ skinFace = cv2.bitwise_and(face, face, mask=skinMask)
331
+ self.faceSignal.append([skinFace])
332
+
333
+ elif self.typeROI == 'skin_adapt':
334
+ if count == 0 and i == 0:
335
+ self.sd = SkinDetect(strength=self.skinThresh_adapt)
336
+ self.sd.compute_stats(face)
337
+
338
+ if frameByframe and i > 0:
339
+ self.sd.compute_stats(face)
340
+
341
+ skinFace = self.sd.get_skin(face, filt_kern_size=0, verbose=False, plot=False)
342
+
343
+ self.faceSignal.append([skinFace])
344
+
345
+ def __extractFace(self, video, method, t_downsample_rate=2):
346
+
347
+ # -- save on GPU
348
+ # self.facesGPU = cp.asarray(self.faces) # move the data to the current device.
349
+
350
+ if method == 'dlib':
351
+ # -- dlib detector
352
+ detector = dlib.get_frontal_face_detector()
353
+ if os.path.exists("resources/shape_predictor_68_face_landmarks.dat"):
354
+ file_predict = "resources/shape_predictor_68_face_landmarks.dat"
355
+ elif os.path.exists("../resources/shape_predictor_68_face_landmarks.dat"):
356
+ file_predict = "../resources/shape_predictor_68_face_landmarks.dat"
357
+ predictor = dlib.shape_predictor(file_predict)
358
+ self.faces = np.zeros([self.numFrames, self.cropSize[0], self.cropSize[1], 3],
359
+ dtype='uint8')
360
+
361
+ # -- loop on frames
362
+ cp = self.cropSize
363
+ self.faces = np.zeros([self.numFrames, cp[0], cp[1], 3], dtype='uint8')
364
+ for i in range(self.numFrames):
365
+ frame = video[i, :, :, :]
366
+ # -- Detect face using dlib
367
+ self.numFaces = 0
368
+ facesRect = detector(frame, 0)
369
+ if len(facesRect) > 0:
370
+ # -- process only the first face
371
+ self.numFaces += 1
372
+ rect = facesRect[0]
373
+ x0 = rect.left()
374
+ y0 = rect.top()
375
+ w = rect.width()
376
+ h = rect.height()
377
+
378
+ # -- extract cropped faces
379
+ shape = predictor(frame, rect)
380
+ f = dlib.get_face_chip(frame, shape, size=self.cropSize[0], padding=self.facePadding)
381
+ self.faces[i, :, :, :] = f.astype('uint8')
382
+
383
+ if self.verb: printutils.printProgressBar(i, self.numFrames, prefix='Processing:', suffix='Complete', length=50)
384
+
385
+ else:
386
+ print("No face detected at frame %s",i)
387
+
388
+ elif method == 'mtcnn_kalman':
389
+ # mtcnn detector
390
+ from mtcnn import MTCNN
391
+ detector = MTCNN()
392
+
393
+ h0 = None
394
+ w0 = None
395
+ crop = np.zeros([2, 2, 2])
396
+ skipped_frames = 0
397
+
398
+ while crop.shape[:2] != (h0,w0):
399
+ if skipped_frames > 0:
400
+ print("\nWARNING! Strange Face Crop... Skipping frame " + str(skipped_frames) + '...')
401
+ frame = video[skipped_frames, :, :, :]
402
+ detection = detector.detect_faces(frame)
403
+
404
+ if len(detection) > 1:
405
+ areas = []
406
+ for det in detection:
407
+ areas.append(det['box'][2] * det['box'][3])
408
+ areas = np.array(areas)
409
+ ia = np.argsort(areas)
410
+ [x0, y0, w0, h0] = detection[ia[-1]]['box']
411
+ else:
412
+ [x0, y0, w0, h0] = detection[0]['box']
413
+
414
+ w0 = 2*(int(w0/2))
415
+ h0 = 2*(int(h0/2))
416
+ #Cropping face
417
+ crop = frame[y0:y0+h0, x0:x0+w0, :]
418
+
419
+ skipped_frames += 1
420
+
421
+ self.cropSize = crop.shape[:2]
422
+
423
+ if skipped_frames > 1:
424
+ self.numFrames = self.numFrames - skipped_frames
425
+ new_time_vid_start = skipped_frames / self.frameRate
426
+
427
+ if new_time_vid_start > self.time_vid_start:
428
+ self.time_vid_start = new_time_vid_start
429
+ print("\tVideo now starts at " + str(self.time_vid_start) + " seconds\n")
430
+
431
+ self.faces = np.zeros([self.numFrames, self.cropSize[0], self.cropSize[1], 3], dtype='uint8')
432
+ self.faces[0, :, :, :] = crop
433
+
434
+ #set the initial tracking window
435
+ state = np.array([int(x0+w0/2), int(y0+h0/2), 0, 0], dtype='float64') # initial position
436
+
437
+ #Setting up Kalman Filter
438
+ kalman = cv2.KalmanFilter(4, 2, 0)
439
+ kalman.transitionMatrix = np.array([[1., 0., .1, 0.],
440
+ [0., 1., 0., .1],
441
+ [0., 0., 1., 0.],
442
+ [0., 0., 0., 1.]])
443
+ kalman.measurementMatrix = 1. * np.eye(2, 4)
444
+ kalman.processNoiseCov = 1e-5 * np.eye(4, 4)
445
+ kalman.measurementNoiseCov = 1e-3 * np.eye(2, 2)
446
+ kalman.errorCovPost = 1e-1 * np.eye(4, 4)
447
+ kalman.statePost = state
448
+ measurement = np.array([int(x0+w0/2), int(y0+h0/2)], dtype='float64')
449
+
450
+ for i in range(skipped_frames, self.numFrames):
451
+ frame = video[i, :, :, :]
452
+
453
+ if i%t_downsample_rate == 0:
454
+ detection = detector.detect_faces(frame)
455
+ if len(detection) != 0:
456
+ areas = []
457
+ if len(detection) > 1:
458
+ for det in detection:
459
+ areas.append(det['box'][2] * det['box'][3])
460
+ areas = np.array(areas)
461
+ ia = np.argsort(areas)
462
+
463
+ [x0, y0, w, h] = detection[ia[-1]]['box']
464
+ else:
465
+ [x0, y0, w, h] = detection[0]['box']
466
+
467
+ not_found = False
468
+ else:
469
+ not_found = True
470
+
471
+ prediction = kalman.predict() #prediction
472
+
473
+ if i%t_downsample_rate == 0 and not not_found:
474
+ measurement = np.array([x0+w/2, y0+h/2], dtype='float64')
475
+ posterior = kalman.correct(measurement)
476
+ [cx0, cy0, wn, hn] = posterior.astype(int)
477
+ else:
478
+ [cx0, cy0, wn, hn] = prediction.astype(int)
479
+
480
+ # Cropping with new bounding box
481
+ crop = frame[int(cy0-h0/2):int(cy0+h0/2), int(cx0-w0/2):int(cx0+w0/2), :]
482
+
483
+ if crop.shape[:2] != self.faces.shape[1:3]:
484
+ print("WARNING! Strange face crop: video frame " + str(i) +" probably does not contain the whole face... Reshaping Crop\n")
485
+ crop = cv2.resize(crop, (self.faces.shape[2], self.faces.shape[1]))
486
+
487
+ self.faces[i, :, :, :] = crop.astype('uint8')
488
+
489
+ elif method == 'mtcnn':
490
+ # mtcnn detector
491
+ from mtcnn import MTCNN
492
+ # from utils.FaceAligner import FaceAligner
493
+ detector = MTCNN()
494
+
495
+ print("\nPerforming face detection...")
496
+
497
+ h0 = None
498
+ w0 = None
499
+ crop = np.zeros([2, 2, 2])
500
+ skipped_frames = 0
501
+
502
+ while crop.shape[:2] != (h0, w0):
503
+ if skipped_frames > 0:
504
+ print("\nWARNING! Strange Face Crop... Skipping frame " + str(skipped_frames) + '...')
505
+ frame = video[skipped_frames, :, :, :]
506
+ detection = detector.detect_faces(frame)
507
+
508
+ if len(detection) == 0:
509
+ skipped_frames += 1
510
+ continue
511
+
512
+ if len(detection) > 1:
513
+ areas = []
514
+ for det in detection:
515
+ areas.append(det['box'][2] * det['box'][3])
516
+ areas = np.array(areas)
517
+ ia = np.argsort(areas)
518
+ [x0, y0, w0, h0] = detection[ia[-1]]['box']
519
+ nose = detection[ia[-1]]['keypoints']['nose']
520
+ r_eye = detection[ia[-1]]['keypoints']['right_eye']
521
+ l_eye = detection[ia[-1]]['keypoints']['left_eye']
522
+ else:
523
+ [x0, y0, w0, h0] = detection[0]['box']
524
+ nose = detection[0]['keypoints']['nose']
525
+ r_eye = detection[0]['keypoints']['right_eye']
526
+ l_eye = detection[0]['keypoints']['left_eye']
527
+
528
+ w0 = 2*(int(w0/2))
529
+ h0 = 2*(int(h0/2))
530
+ barycenter = (np.array(nose) + np.array(r_eye) + np.array(l_eye)) / 3.
531
+ cy0 = barycenter[1]
532
+ cx0 = barycenter[0]
533
+ # Cropping face
534
+ crop = frame[int(cy0-h0/2):int(cy0+h0/2), int(cx0-w0/2):int(cx0+w0/2), :]
535
+
536
+ skipped_frames += 1
537
+
538
+ # fa = FaceAligner(desiredLeftEye=(0.3, 0.3),desiredFaceWidth=w0, desiredFaceHeight=h0)
539
+ # crop_align = fa.align(frame, r_eye, l_eye)
540
+
541
+ self.cropSize = crop.shape[:2]
542
+
543
+ if skipped_frames > 1:
544
+ self.numFrames = self.numFrames - skipped_frames
545
+ new_time_vid_start = skipped_frames / self.frameRate
546
+
547
+ if new_time_vid_start > self.time_vid_start:
548
+ self.time_vid_start = new_time_vid_start
549
+ print("\tVideo now starts at " + str(self.time_vid_start) + " seconds\n")
550
+
551
+ self.faces = np.zeros([self.numFrames, self.cropSize[0], self.cropSize[1], 3], dtype='uint8')
552
+ self.faces[0, :, :, :] = crop
553
+
554
+ old_detection = detection
555
+ for i in range(skipped_frames,self.numFrames):
556
+ # print('\tFrame ' + str(i) + ' of ' + str(self.numFrames))
557
+ frame = video[i, :, :, :]
558
+
559
+ new_detection = detector.detect_faces(frame)
560
+ areas = []
561
+
562
+ if len(new_detection) == 0:
563
+ new_detection = old_detection
564
+
565
+ if len(new_detection) > 1:
566
+ for det in new_detection:
567
+ areas.append(det['box'][2] * det['box'][3])
568
+ areas = np.array(areas)
569
+ ia = np.argsort(areas)
570
+
571
+ [x0, y0, w, h] = new_detection[ia[-1]]['box']
572
+ nose = new_detection[ia[-1]]['keypoints']['nose']
573
+ r_eye = new_detection[ia[-1]]['keypoints']['right_eye']
574
+ l_eye = new_detection[ia[-1]]['keypoints']['left_eye']
575
+ else:
576
+ [x0, y0, w, h] = new_detection[0]['box']
577
+ nose = new_detection[0]['keypoints']['nose']
578
+ r_eye = new_detection[0]['keypoints']['right_eye']
579
+ l_eye = new_detection[0]['keypoints']['left_eye']
580
+
581
+ barycenter = (np.array(nose) + np.array(r_eye) + np.array(l_eye)) / 3.
582
+ cy0 = barycenter[1]
583
+ cx0 = barycenter[0]
584
+ #Cropping with new bounding box
585
+ crop = frame[int(cy0-h0/2):int(cy0+h0/2), int(cx0-w0/2):int(cx0+w0/2), :]
586
+
587
+ if crop.shape[:2] != self.faces.shape[1:3]:
588
+ print("WARNING! Strange face crop: video frame " + str(i) +" probably does not contain the whole face... Reshaping Crop\n")
589
+ crop = cv2.resize(crop, (self.faces.shape[2], self.faces.shape[1]))
590
+
591
+ self.faces[i, :, :, :] = crop.astype('uint8')
592
+ old_detection = new_detection
593
+
594
+ #if self.verb:
595
+ printutils.printProgressBar(i, self.numFrames, prefix = 'Processing:', suffix = 'Complete', length = 50)
596
+ else:
597
+
598
+ raise ValueError('Unrecognized Face detection method. Please use "dlib" or "mtcnn"')
599
+
600
+ def __rectRegions2Coord(self, rectRegions):
601
+
602
+ # regions 'forehead'
603
+ # 'lcheek'
604
+ # 'rcheek'
605
+ # 'nose'
606
+ assert len(self.faces) > 0, "Faces not found, please run getCroppedFaces first!"
607
+
608
+ w = self.faces[0].shape[1]
609
+ h = self.faces[0].shape[0]
610
+
611
+ coords = []
612
+
613
+ for roi in rectRegions:
614
+ if roi == 'forehead':
615
+ if self.detector == 'dlib':
616
+ x_f = int(w * .34)
617
+ y_f = int(h * .05)
618
+ w_f = int(w * .32)
619
+ h_f = int(h * .05)
620
+
621
+ elif (self.detector == 'mtcnn') or (self.detector == 'mtcnn_kalman'):
622
+ x_f = int(w * .20)
623
+ y_f = int(h * .10)
624
+ w_f = int(w * .60)
625
+ h_f = int(h * .12)
626
+
627
+ coords.append([x_f, y_f, w_f, h_f])
628
+
629
+ elif roi == 'lcheek':
630
+ if self.detector == 'dlib':
631
+ x_c = int(w * .22)
632
+ y_c = int(h * .40)
633
+ w_c = int(w * .14)
634
+ h_c = int(h * .11)
635
+
636
+ elif (self.detector == 'mtcnn') or (self.detector == 'mtcnn_kalman'):
637
+ x_c = int(w * .15)
638
+ y_c = int(h * .54)
639
+ w_c = int(w * .15)
640
+ h_c = int(h * .11)
641
+
642
+ coords.append([x_c, y_c, w_c, h_c])
643
+
644
+ elif roi == 'rcheek':
645
+ if self.detector == 'dlib':
646
+ x_c = int(w * .64)
647
+ y_c = int(h * .40)
648
+ w_c = int(w * .14)
649
+ h_c = int(h * .11)
650
+
651
+ elif (self.detector == 'mtcnn') or (self.detector == 'mtcnn_kalman'):
652
+ x_c = int(w * .70)
653
+ y_c = int(h * .54)
654
+ w_c = int(w * .15)
655
+ h_c = int(h * .11)
656
+
657
+ coords.append([x_c, y_c, w_c, h_c])
658
+
659
+ elif roi == 'nose':
660
+ if self.detector == 'dlib':
661
+ x_c = int(w * .40)
662
+ y_c = int(h * .35)
663
+ w_c = int(w * .20)
664
+ h_c = int(h * .05)
665
+
666
+ elif (self.detector == 'mtcnn') or (self.detector == 'mtcnn_kalman'):
667
+ x_c = int(w * .35)
668
+ y_c = int(h * .50)
669
+ w_c = int(w * .30)
670
+ h_c = int(h * .08)
671
+
672
+ coords.append([x_c, y_c, w_c, h_c])
673
+
674
+ else:
675
+ raise ValueError('Unrecognized rect region name.')
676
+
677
+ return coords
678
+
679
+ def __sort_nicely(self, l):
680
+ """ Sort the given list in the way that humans expect.
681
+ """
682
+ convert = lambda text: int(text) if text.isdigit() else text
683
+ alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
684
+ l.sort( key=alphanum_key )
685
+ return l
686
+
687
+ def __loadFrames(self):
688
+
689
+ # -- delete the compressed if exists
690
+ cmpFile = os.path.join(self.path, self.filenameCompressed)
691
+ if os.path.exists(cmpFile):
692
+ os.remove(cmpFile)
693
+
694
+ # -- get filenames within dir
695
+ f_names = self.__sort_nicely(os.listdir(self.path))
696
+ frames = []
697
+ for n in range(len(f_names)):
698
+ filename = os.path.join(self.path, f_names[n])
699
+ frames.append(cv2.imread(filename)[:, :, ::-1])
700
+
701
+ frames = np.array(frames)
702
+ return frames
703
+