Juliojuse commited on
Commit
fa926f8
1 Parent(s): 8d9b681
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ /code/shape_predictor_68_face_landmarks.dat
README.md CHANGED
@@ -1,13 +1 @@
1
- ---
2
- title: Human Health Gradio
3
- emoji: 🏢
4
- colorFrom: indigo
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 3.29.0
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
+ ## human-health gradio implement
 
 
 
 
 
 
 
 
 
 
 
 
code/__pycache__/face_detection.cpython-37.pyc ADDED
Binary file (2.66 kB). View file
 
code/__pycache__/face_detection.cpython-38.pyc ADDED
Binary file (2.66 kB). View file
 
code/__pycache__/gradio_utils.cpython-37.pyc ADDED
Binary file (2.41 kB). View file
 
code/__pycache__/model.cpython-37.pyc ADDED
Binary file (6.73 kB). View file
 
code/__pycache__/model.cpython-38.pyc ADDED
Binary file (6.35 kB). View file
 
code/__pycache__/physiological_indicators.cpython-37.pyc ADDED
Binary file (3.16 kB). View file
 
code/__pycache__/utils_sig.cpython-37.pyc ADDED
Binary file (6.14 kB). View file
 
code/__pycache__/utils_sig.cpython-38.pyc ADDED
Binary file (6.05 kB). View file
 
code/app.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import gradio as gr
3
+ import cv2
4
+ from model import PhysNet_Model,DeepPhys_Model
5
+ import pandas as pd
6
+ import time
7
+ from physiological_indicators import PhysiologicalIndicators
8
+ from face_detection import FaceDetection
9
+
10
+ from utils_sig import *
11
+ from gradio_utils import rppg_to_physiological_indicators,fake_diffusion
12
+ from gradio_utils import read_video
13
+
14
+
15
+
16
+ fece_detection = FaceDetection()
17
+ csv_url = './ippg_predict.csv'
18
+
19
+
20
+ def video_to_rppg_dynamic(model_choice,path):
21
+ '''
22
+ 动态进行video到rppg的转换
23
+ path: video file path
24
+ #C:\\Users\\74314\\AppData\\Local\\Temp\\gradio\\f50cc35ca9bac3568e36f1f7277a72d5e252ad37\\e0faab5f1b8c05eda32648dd635a97c4.mp4
25
+ '''
26
+ print(model_choice,"=======================================")
27
+ if model_choice=="ContrastPhys":
28
+ model = PhysNet_Model('./contrast_phys/model_weights.pt')
29
+ elif model_choice=="DeepPhys":
30
+ model = DeepPhys_Model('./contrast_phys/PURE_PURE_UBFC_deepphys_Epoch29.pth')
31
+ else:
32
+ model = None
33
+
34
+ # read video from file path as frames list
35
+ print("===============================",path)
36
+ video = read_video(path)
37
+ # print(video.shape)
38
+
39
+ HR_list = []
40
+ RR_list = []
41
+ HRV_list = []
42
+ BO_list = []
43
+
44
+ for i in range(0,video.shape[0]-128,30):
45
+ video_input = video[i:i+128,:,:,:]
46
+ video_input, ROI1, ROI2, status, face_region = face_detection_ROI(fece_detection, video_input)
47
+
48
+ # print(video_input.shape)
49
+ ippg, face_list = model.predict(video_input)
50
+ HR,RR,BP,HRV,BO = rppg_to_physiological_indicators(ippg, ROI1, ROI2)
51
+ HR_list.append(HR)
52
+ RR_list.append(RR)
53
+ HRV_list.append(HRV)
54
+ BO_list.append(BO)
55
+
56
+ ippg_pd = pd.DataFrame({
57
+ "index":range(0,80),
58
+ "rppg":ippg,
59
+ })
60
+ HR_pd = pd.DataFrame({"index":range(0,len(HR_list)),"HR":HR_list})
61
+ RR_pd = pd.DataFrame({"index":range(0,len(RR_list)),"RR":RR_list})
62
+ BP_pd = pd.DataFrame({"index":range(0,len(BP)),"BP":BP})
63
+ HRV_pd = pd.DataFrame({"index":range(0,len(HRV_list)),"HRV":HRV_list})
64
+ BO_pd = pd.DataFrame({"index":range(0,len(BO_list)),"BO":BO_list})
65
+ ippg_pd.to_csv(csv_url,index=False)
66
+ print(len(ippg))
67
+
68
+ yield ippg_pd,HR_pd,RR_pd,BP_pd,HRV_pd,BO_pd
69
+
70
+
71
+ def get_data():
72
+ return pd.read_csv(csv_url)
73
+
74
+ with gr.Blocks() as demo:
75
+ gr.Markdown("## Calculate rppg from video.") # 使用 Markdown 输出一句话
76
+ with gr.Tab("Video"): # 新建一个 Tab
77
+ with gr.Row(): # 同一行排列
78
+ model_select_video = gr.Dropdown(["ContrastPhys","DeepPhys"],label="Model",info="选择rppg检测模型") # 下拉菜单
79
+ with gr.Row(): # 同一行排列
80
+ video_input = gr.Video()
81
+ # video_output = gr.Video()
82
+ rppg_output_video = gr.LinePlot(x="index", y="rppg", y_title="rppg singal", width=600, height=300,label="rppg singal")
83
+ with gr.Row(): # 同一行排列
84
+ HR_output_video = gr.LinePlot(x="index", y="HR", y_title="心率", width=170, height=150,label="心率")
85
+ RR_output_video = gr.LinePlot(x="index", y="RR", y_title="呼吸", width=170, height=150,label="呼吸")
86
+ BP_output_video = gr.LinePlot(x="index", y="BP", y_title="血压", width=170, height=150,label="血压")
87
+ HRV_output_video = gr.LinePlot(x="index", y="HRV", y_title="心率变异性", width=170, height=150,label="心率变异性")
88
+ BO_output_video = gr.LinePlot(x="index", y="BO", y_title="血氧", width=170, height=150,label="血氧")
89
+ text_button = gr.Button("Start",label="开始")
90
+ with gr.Tab("Webcam"): # 新建一个 Tab
91
+ with gr.Row(): # 同一行排列
92
+ model_select_webcam = gr.Dropdown(["ContrastPhys","DeepPhys"],label="Model",info="选择rppg检测模型") # 下拉菜单
93
+ with gr.Row(): # 同一行排列
94
+ webcam_input = gr.Video(source="webcam")
95
+ # video_output = gr.Video()
96
+ rppg_output_webcam = gr.LinePlot(x="index", y="rppg", y_title="rppg singal", width=600, height=300,label="rppg singal")
97
+ with gr.Row(): # 同一行排列
98
+ HR_output_webcam = gr.LinePlot(x="index", y="HR", y_title="心率", width=170, height=150,label="心率")
99
+ RR_output_webcam = gr.LinePlot(x="index", y="RR", y_title="呼吸", width=170, height=150,label="呼吸")
100
+ BP_output_webcam = gr.LinePlot(x="index", y="BP", y_title="血压", width=170, height=150,label="血压")
101
+ HRV_output_webcam = gr.LinePlot(x="index", y="HRV", y_title="心率变异性", width=170, height=150,label="心率变异性")
102
+ BO_output_webcam = gr.LinePlot(x="index", y="BO", y_title="血氧", width=170, height=150,label="血氧")
103
+ image_button = gr.Button("Start")
104
+
105
+ with gr.Accordion("Open for More!"): # 可折叠的组件
106
+ gr.Markdown("Look at me...")
107
+
108
+ text_button.click(video_to_rppg_dynamic,
109
+ inputs=[model_select_video,video_input],
110
+ outputs=[rppg_output_video,HR_output_video,RR_output_video,BP_output_video,HRV_output_video,BO_output_video]) # 按钮绑定相应的槽函数
111
+ image_button.click(video_to_rppg_dynamic,
112
+ inputs=[model_select_webcam,webcam_input],
113
+ outputs=[rppg_output_webcam,HR_output_webcam,RR_output_webcam,BP_output_webcam,HRV_output_webcam,BO_output_webcam]) # 按钮绑定相应的槽函数
114
+
115
+ # define queue - required for generators
116
+ demo.queue()
117
+
118
+ demo.launch(debug=True,share=True)
code/contrast_phys/DeepPhysModel.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """DeepPhys - 2D Convolutional Attention Network.
2
+ DeepPhys: Video-Based Physiological Measurement Using Convolutional Attention Networks
3
+ ECCV, 2018
4
+ Weixuan Chen, Daniel McDuff
5
+ """
6
+
7
+ import torch
8
+ import torch.nn as nn
9
+
10
+
11
+ class Attention_mask(nn.Module):
12
+ def __init__(self):
13
+ super(Attention_mask, self).__init__()
14
+
15
+ def forward(self, x):
16
+ xsum = torch.sum(x, dim=2, keepdim=True)
17
+ xsum = torch.sum(xsum, dim=3, keepdim=True)
18
+ xshape = tuple(x.size())
19
+ return x / xsum * xshape[2] * xshape[3] * 0.5
20
+
21
+ def get_config(self):
22
+ """May be generated manually. """
23
+ config = super(Attention_mask, self).get_config()
24
+ return config
25
+
26
+
27
+ class DeepPhys(nn.Module):
28
+
29
+ def __init__(self, in_channels=3, nb_filters1=32, nb_filters2=64, kernel_size=3, dropout_rate1=0.25,
30
+ dropout_rate2=0.5, pool_size=(2, 2), nb_dense=128, img_size=36):
31
+ """Definition of DeepPhys.
32
+ Args:
33
+ in_channels: the number of input channel. Default: 3
34
+ img_size: height/width of each frame. Default: 36.
35
+ Returns:
36
+ DeepPhys model.
37
+ """
38
+ super(DeepPhys, self).__init__()
39
+ self.in_channels = in_channels
40
+ self.kernel_size = kernel_size
41
+ self.dropout_rate1 = dropout_rate1
42
+ self.dropout_rate2 = dropout_rate2
43
+ self.pool_size = pool_size
44
+ self.nb_filters1 = nb_filters1
45
+ self.nb_filters2 = nb_filters2
46
+ self.nb_dense = nb_dense
47
+ # Motion branch convs
48
+ self.motion_conv1 = nn.Conv2d(self.in_channels, self.nb_filters1, kernel_size=self.kernel_size, padding=(1, 1),
49
+ bias=True)
50
+ self.motion_conv2 = nn.Conv2d(self.nb_filters1, self.nb_filters1, kernel_size=self.kernel_size, bias=True)
51
+ self.motion_conv3 = nn.Conv2d(self.nb_filters1, self.nb_filters2, kernel_size=self.kernel_size, padding=(1, 1),
52
+ bias=True)
53
+ self.motion_conv4 = nn.Conv2d(self.nb_filters2, self.nb_filters2, kernel_size=self.kernel_size, bias=True)
54
+ # Apperance branch convs
55
+ self.apperance_conv1 = nn.Conv2d(self.in_channels, self.nb_filters1, kernel_size=self.kernel_size,
56
+ padding=(1, 1), bias=True)
57
+ self.apperance_conv2 = nn.Conv2d(self.nb_filters1, self.nb_filters1, kernel_size=self.kernel_size, bias=True)
58
+ self.apperance_conv3 = nn.Conv2d(self.nb_filters1, self.nb_filters2, kernel_size=self.kernel_size,
59
+ padding=(1, 1), bias=True)
60
+ self.apperance_conv4 = nn.Conv2d(self.nb_filters2, self.nb_filters2, kernel_size=self.kernel_size, bias=True)
61
+ # Attention layers
62
+ self.apperance_att_conv1 = nn.Conv2d(self.nb_filters1, 1, kernel_size=1, padding=(0, 0), bias=True)
63
+ self.attn_mask_1 = Attention_mask()
64
+ self.apperance_att_conv2 = nn.Conv2d(self.nb_filters2, 1, kernel_size=1, padding=(0, 0), bias=True)
65
+ self.attn_mask_2 = Attention_mask()
66
+ # Avg pooling
67
+ self.avg_pooling_1 = nn.AvgPool2d(self.pool_size)
68
+ self.avg_pooling_2 = nn.AvgPool2d(self.pool_size)
69
+ self.avg_pooling_3 = nn.AvgPool2d(self.pool_size)
70
+ # Dropout layers
71
+ self.dropout_1 = nn.Dropout(self.dropout_rate1)
72
+ self.dropout_2 = nn.Dropout(self.dropout_rate1)
73
+ self.dropout_3 = nn.Dropout(self.dropout_rate1)
74
+ self.dropout_4 = nn.Dropout(self.dropout_rate2)
75
+ # Dense layers
76
+ if img_size == 36:
77
+ self.final_dense_1 = nn.Linear(3136, self.nb_dense, bias=True)
78
+ elif img_size == 72:
79
+ self.final_dense_1 = nn.Linear(16384, self.nb_dense, bias=True)
80
+ elif img_size == 96:
81
+ self.final_dense_1 = nn.Linear(30976, self.nb_dense, bias=True)
82
+ else:
83
+ raise Exception('Unsupported image size')
84
+ self.final_dense_2 = nn.Linear(self.nb_dense, 1, bias=True)
85
+
86
+ def forward(self, inputs, params=None):
87
+
88
+ diff_input = inputs[:, :3, :, :]
89
+ raw_input = inputs[:, 3:, :, :]
90
+
91
+ d1 = torch.tanh(self.motion_conv1(diff_input))
92
+ d2 = torch.tanh(self.motion_conv2(d1))
93
+
94
+ r1 = torch.tanh(self.apperance_conv1(raw_input))
95
+ r2 = torch.tanh(self.apperance_conv2(r1))
96
+
97
+ g1 = torch.sigmoid(self.apperance_att_conv1(r2))
98
+ g1 = self.attn_mask_1(g1)
99
+ gated1 = d2 * g1
100
+
101
+ d3 = self.avg_pooling_1(gated1)
102
+ d4 = self.dropout_1(d3)
103
+
104
+ r3 = self.avg_pooling_2(r2)
105
+ r4 = self.dropout_2(r3)
106
+
107
+ d5 = torch.tanh(self.motion_conv3(d4))
108
+ d6 = torch.tanh(self.motion_conv4(d5))
109
+
110
+ r5 = torch.tanh(self.apperance_conv3(r4))
111
+ r6 = torch.tanh(self.apperance_conv4(r5))
112
+
113
+ g2 = torch.sigmoid(self.apperance_att_conv2(r6))
114
+ g2 = self.attn_mask_2(g2)
115
+ gated2 = d6 * g2
116
+
117
+ d7 = self.avg_pooling_3(gated2)
118
+ d8 = self.dropout_3(d7)
119
+ d9 = d8.reshape(d8.size(0), -1)
120
+ d10 = torch.tanh(self.final_dense_1(d9))
121
+ d11 = self.dropout_4(d10)
122
+ out = self.final_dense_2(d11)
123
+
124
+ return out
125
+
code/contrast_phys/PURE_PURE_UBFC_contrastphy_Epoch10.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d0b97a71f5509b6ec69691754abafeab3f78e5779601d71b8650fe953c53fa2
3
+ size 3467843
code/contrast_phys/PURE_PURE_UBFC_deepphys_Epoch29.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0dded770839af3bd6bb47f3d21b322d453b53a7eaad7d4c782c9180dd7b2226
3
+ size 8923597
code/contrast_phys/PURE_PURE_UBFC_physnet_diffnormalized_Epoch29.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1138ff94761db7425e0c35581777a2c2215704dd467e353f956f217ed7aa894b
3
+ size 3108222
code/contrast_phys/PhysNetModel.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+
6
+
7
+ # -------------------------------------------------------------------------------------------------------------------
8
+ # PhysNet model
9
+ #
10
+ # the output is an ST-rPPG block rather than a rPPG signal.
11
+ # -------------------------------------------------------------------------------------------------------------------
12
+ class PhysNet(nn.Module):
13
+ def __init__(self, S=2, in_ch=3):
14
+ super().__init__()
15
+
16
+ self.S = S # S is the spatial dimension of ST-rPPG block
17
+
18
+ self.start = nn.Sequential(
19
+ nn.Conv3d(in_channels=in_ch, out_channels=32, kernel_size=(1, 5, 5), stride=1, padding=(0, 2, 2)),
20
+ nn.BatchNorm3d(32),
21
+ nn.ELU()
22
+ )
23
+
24
+ # 1x
25
+ self.loop1 = nn.Sequential(
26
+ nn.AvgPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2), padding=0),
27
+ nn.Conv3d(in_channels=32, out_channels=64, kernel_size=(3, 3, 3), stride=1, padding=(1, 1, 1)),
28
+ nn.BatchNorm3d(64),
29
+ nn.ELU(),
30
+ nn.Conv3d(in_channels=64, out_channels=64, kernel_size=(3, 3, 3), stride=1, padding=(1, 1, 1)),
31
+ nn.BatchNorm3d(64),
32
+ nn.ELU()
33
+ )
34
+
35
+ # encoder
36
+ self.encoder1 = nn.Sequential(
37
+ nn.AvgPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2), padding=0),
38
+ nn.Conv3d(in_channels=64, out_channels=64, kernel_size=(3, 3, 3), stride=1, padding=(1, 1, 1)),
39
+ nn.BatchNorm3d(64),
40
+ nn.ELU(),
41
+ nn.Conv3d(in_channels=64, out_channels=64, kernel_size=(3, 3, 3), stride=1, padding=(1, 1, 1)),
42
+ nn.BatchNorm3d(64),
43
+ nn.ELU(),
44
+ )
45
+ self.encoder2 = nn.Sequential(
46
+ nn.AvgPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2), padding=0),
47
+ nn.Conv3d(in_channels=64, out_channels=64, kernel_size=(3, 3, 3), stride=1, padding=(1, 1, 1)),
48
+ nn.BatchNorm3d(64),
49
+ nn.ELU(),
50
+ nn.Conv3d(in_channels=64, out_channels=64, kernel_size=(3, 3, 3), stride=1, padding=(1, 1, 1)),
51
+ nn.BatchNorm3d(64),
52
+ nn.ELU()
53
+ )
54
+
55
+ #
56
+ self.loop4 = nn.Sequential(
57
+ nn.AvgPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2), padding=0),
58
+ nn.Conv3d(in_channels=64, out_channels=64, kernel_size=(3, 3, 3), stride=1, padding=(1, 1, 1)),
59
+ nn.BatchNorm3d(64),
60
+ nn.ELU(),
61
+ nn.Conv3d(in_channels=64, out_channels=64, kernel_size=(3, 3, 3), stride=1, padding=(1, 1, 1)),
62
+ nn.BatchNorm3d(64),
63
+ nn.ELU()
64
+ )
65
+
66
+ # decoder to reach back initial temporal length
67
+ self.decoder1 = nn.Sequential(
68
+ nn.Conv3d(in_channels=64, out_channels=64, kernel_size=(3, 1, 1), stride=1, padding=(1, 0, 0)),
69
+ nn.BatchNorm3d(64),
70
+ nn.ELU(),
71
+ )
72
+ self.decoder2 = nn.Sequential(
73
+ nn.Conv3d(in_channels=64, out_channels=64, kernel_size=(3, 1, 1), stride=1, padding=(1, 0, 0)),
74
+ nn.BatchNorm3d(64),
75
+ nn.ELU()
76
+ )
77
+
78
+
79
+ self.end = nn.Sequential(
80
+ nn.AdaptiveAvgPool3d((None, S, S)),
81
+ nn.Conv3d(in_channels=64, out_channels=1, kernel_size=(1, 1, 1), stride=1, padding=(0, 0, 0))
82
+ )
83
+
84
+ def forward(self, x):
85
+ print("physet shape = ====================",x.shape)
86
+ means = torch.mean(x, dim=(2, 3, 4), keepdim=True)
87
+ stds = torch.std(x, dim=(2, 3, 4), keepdim=True)
88
+ x = (x - means) / stds # (B, C, T, 128, 128)
89
+
90
+ parity = []
91
+ x = self.start(x) # (B, C, T, 128, 128)
92
+ x = self.loop1(x) # (B, 64, T, 64, 64)
93
+ parity.append(x.size(2) % 2)
94
+ x = self.encoder1(x) # (B, 64, T/2, 32, 32)
95
+ parity.append(x.size(2) % 2)
96
+ x = self.encoder2(x) # (B, 64, T/4, 16, 16)
97
+ x = self.loop4(x) # (B, 64, T/4, 8, 8)
98
+
99
+ x = F.interpolate(x, scale_factor=(2, 1, 1)) # (B, 64, T/2, 8, 8)
100
+ x = self.decoder1(x) # (B, 64, T/2, 8, 8)
101
+ x = F.pad(x, (0,0,0,0,0,parity[-1]), mode='replicate')
102
+ x = F.interpolate(x, scale_factor=(2, 1, 1)) # (B, 64, T, 8, 8)
103
+ x = self.decoder2(x) # (B, 64, T, 8, 8)
104
+ x = F.pad(x, (0,0,0,0,0,parity[-2]), mode='replicate')
105
+ x = self.end(x) # (B, 1, T, S, S), ST-rPPG block
106
+
107
+ x_list = []
108
+ for a in range(self.S):
109
+ for b in range(self.S):
110
+ x_list.append(x[:,:,:,a,b]) # (B, 1, T)
111
+
112
+ x = sum(x_list)/(self.S*self.S) # (B, 1, T)
113
+ X = torch.cat(x_list+[x], 1) # (B, N, T), flatten all spatial signals to the second dimension
114
+ print("physet shape output = ====================",X.shape)
115
+ return X
code/contrast_phys/__pycache__/DeepPhysModel.cpython-37.pyc ADDED
Binary file (3.92 kB). View file
 
code/contrast_phys/__pycache__/DeepPhysModel.cpython-38.pyc ADDED
Binary file (3.97 kB). View file
 
code/contrast_phys/__pycache__/PhysNetModel.cpython-37.pyc ADDED
Binary file (2.59 kB). View file
 
code/contrast_phys/__pycache__/PhysNetModel.cpython-38.pyc ADDED
Binary file (2.62 kB). View file
 
code/contrast_phys/model_weights.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb1e88a2db11d715a8ec41886a2f66fa40f6e4c9e031ecfd88648b14e563b135
3
+ size 3465714
code/face_detection.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import dlib
4
+ from imutils import face_utils
5
+ import imutils
6
+ import joblib
7
+
8
+ class FaceDetection(object):
9
+ def __init__(self):
10
+ self.detector = dlib.get_frontal_face_detector()
11
+ self.predictor = joblib.load('./shape_predictor_68_face_landmarks_predictor.pkl')
12
+ self.fa = face_utils.FaceAligner(self.predictor, desiredFaceWidth=256)
13
+
14
+ def face_detect(self, frame):
15
+ #frame = imutils.resize(frame, width=400)
16
+ face_frame = np.zeros((10, 10, 3), np.uint8)
17
+ mask = np.zeros((10, 10, 3), np.uint8)
18
+ ROI1 = np.zeros((10, 10, 3), np.uint8)
19
+ ROI2 = np.zeros((10, 10, 3), np.uint8)
20
+ face_region = np.zeros((10, 10, 3), np.uint8)
21
+ #ROI3 = np.zeros((10, 10, 3), np.uint8)
22
+ status = False
23
+
24
+ if frame is None:
25
+ return
26
+
27
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
28
+ # detect faces in the grayscale image
29
+ rects = self.detector(gray, 0)
30
+
31
+
32
+ # loop over the face detections
33
+ #for (i, rect) in enumerate(rects):
34
+ # determine the facial landmarks for the face region, then
35
+ # convert the facial landmark (x, y)-coordinates to a NumPy
36
+ # array
37
+
38
+ #assumpion: only 1 face is detected
39
+ if len(rects)>0:
40
+ status = True
41
+ # shape = self.predictor(gray, rects[0])
42
+ # shape = face_utils.shape_to_np(shape)
43
+
44
+ # convert dlib's rectangle to a OpenCV-style bounding box
45
+ # [i.e., (x, y, w, h)], then draw the face bounding box
46
+ (x, y, w, h) = face_utils.rect_to_bb(rects[0])
47
+ cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 1)
48
+ if y<0:
49
+ print("a")
50
+ return frame, face_frame, ROI1, ROI2, status, mask
51
+ #if i==0:
52
+ face_frame = frame[y:y+h,x:x+w]
53
+ face_region = face_frame.copy()
54
+ # show the face number
55
+ #cv2.putText(frame, "Face #{}".format(i + 1), (x - 10, y - 10),
56
+ # cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
57
+ # loop over the (x, y)-coordinates for the facial landmarks
58
+ # and draw them on the image
59
+
60
+ # for (x, y) in shape:
61
+ # cv2.circle(frame, (x, y), 1, (0, 0, 255), -1) #draw facial landmarks
62
+ if(face_frame.shape[:2][1] != 0):
63
+ face_frame = imutils.resize(face_frame,width=256)
64
+
65
+ #print(frame,'|||',gray,'|||',rects[0],type(np.array(rects[0])))
66
+ face_frame = self.fa.align(frame,gray,rects[0]) # align face
67
+
68
+ grayf = cv2.cvtColor(face_frame, cv2.COLOR_BGR2GRAY)
69
+ rectsf = self.detector(grayf, 0)
70
+
71
+ if len(rectsf) >0:
72
+ shape = self.predictor(grayf, rectsf[0])
73
+ shape = face_utils.shape_to_np(shape)
74
+
75
+ for (a, b) in shape:
76
+ cv2.circle(face_frame, (a, b), 1, (0, 0, 255), -1) #draw facial landmarks
77
+
78
+ cv2.rectangle(face_frame,(shape[54][0], shape[29][1]), #draw rectangle on right and left cheeks
79
+ (shape[12][0],shape[33][1]), (0,255,0), 0)
80
+ cv2.rectangle(face_frame, (shape[4][0], shape[29][1]),
81
+ (shape[48][0],shape[33][1]), (0,255,0), 0)
82
+
83
+ ROI1 = face_frame[shape[29][1]:shape[33][1], #right cheek
84
+ shape[54][0]:shape[12][0]]
85
+
86
+ ROI2 = face_frame[shape[29][1]:shape[33][1], #left cheek
87
+ shape[4][0]:shape[48][0]]
88
+
89
+ # ROI3 = face_frame[shape[29][1]:shape[33][1], #nose
90
+ # shape[31][0]:shape[35][0]]
91
+
92
+ #get the shape of face for color amplification
93
+ rshape = np.zeros_like(shape)
94
+ rshape = self.face_remap(shape)
95
+ mask = np.zeros((face_frame.shape[0], face_frame.shape[1]))
96
+
97
+ cv2.fillConvexPoly(mask, rshape[0:27], 1)
98
+ # mask = np.zeros((face_frame.shape[0], face_frame.shape[1],3),np.uint8)
99
+ # cv2.fillConvexPoly(mask, shape, 1)
100
+
101
+ #cv2.imshow("face align", face_frame)
102
+
103
+ # cv2.rectangle(frame,(shape[54][0], shape[29][1]), #draw rectangle on right and left cheeks
104
+ # (shape[12][0],shape[54][1]), (0,255,0), 0)
105
+ # cv2.rectangle(frame, (shape[4][0], shape[29][1]),
106
+ # (shape[48][0],shape[48][1]), (0,255,0), 0)
107
+
108
+ # ROI1 = frame[shape[29][1]:shape[54][1], #right cheek
109
+ # shape[54][0]:shape[12][0]]
110
+
111
+ # ROI2 = frame[shape[29][1]:shape[54][1], #left cheek
112
+ # shape[4][0]:shape[48][0]]
113
+
114
+ else:
115
+ cv2.putText(frame, "No face detected",
116
+ (200,200), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 0, 255),2)
117
+ status = False
118
+ return frame, face_frame, ROI1, ROI2, status, mask, face_region
119
+
120
+ # some points in the facial landmarks need to be re-ordered
121
+ def face_remap(self,shape):
122
+ remapped_image = shape.copy()
123
+ # left eye brow
124
+ remapped_image[17] = shape[26]
125
+ remapped_image[18] = shape[25]
126
+ remapped_image[19] = shape[24]
127
+ remapped_image[20] = shape[23]
128
+ remapped_image[21] = shape[22]
129
+ # right eye brow
130
+ remapped_image[22] = shape[21]
131
+ remapped_image[23] = shape[20]
132
+ remapped_image[24] = shape[19]
133
+ remapped_image[25] = shape[18]
134
+ remapped_image[26] = shape[17]
135
+ # neatening
136
+ remapped_image[27] = shape[0]
137
+
138
+ remapped_image = cv2.convexHull(shape)
139
+ return remapped_image
140
+
141
+
142
+
143
+
144
+
code/gradio_utils.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import gradio as gr
3
+ import cv2
4
+ from model import PhysNet_Model
5
+ import pandas as pd
6
+ import time
7
+ from physiological_indicators import PhysiologicalIndicators
8
+ from utils_sig import *
9
+ from face_detection import FaceDetection
10
+
11
+ ## rppg signal to HR,RR,BP,HRV,BO
12
+ def rppg_to_physiological_indicators(rppg,ROI1, ROI2):
13
+ '''
14
+ input: rppg signal(list)
15
+ output:HR,RR,BP,HRV,BO
16
+ '''
17
+ indicators = PhysiologicalIndicators()
18
+ HR,RR = indicators.calculate_heart_rate(rppg,30)
19
+ # RR = rppg#indicators.calculate_respiratory_rate(rppg,30)
20
+ BP, max_BP, min_BP = indicators.calculate_blood_pressure(rppg)
21
+ HRV = indicators.calculate_heart_rate_variability(rppg,30)
22
+ BO = indicators.calculate_SpO2(ROI1, ROI2)
23
+ return HR,RR,BP,HRV,BO
24
+
25
+
26
+ def fake_diffusion(steps):
27
+ for _ in range(steps):
28
+ time.sleep(1)
29
+ image = np.random.random((600, 600, 3))
30
+ yield image
31
+ image = "https://gradio-builds.s3.amazonaws.com/diffusion_image/cute_dog.jpg"
32
+ yield image
33
+
34
+
35
+ def video_to_rppg_statistic(path):
36
+ '''
37
+ 静态进行video到rppg的转换
38
+ path: video file path
39
+ #C:\\Users\\74314\\AppData\\Local\\Temp\\gradio\\f50cc35ca9bac3568e36f1f7277a72d5e252ad37\\e0faab5f1b8c05eda32648dd635a97c4.mp4
40
+ '''
41
+ # read video from file path as frames list
42
+ frames = []
43
+ cap = cv2.VideoCapture(path)
44
+ ret = True
45
+ while ret:
46
+ ret, img = cap.read() # read one frame from the 'capture' object; img is (H, W, C)
47
+ if ret:
48
+ frames.append(img)
49
+ video = np.stack(frames, axis=0) # dimensions (T, H, W, C)
50
+ print(video.shape)
51
+
52
+ face, ROI1, ROI2, status, face_region = face_detection_ROI(fece_detection, video)
53
+
54
+ video_input = face
55
+ print(print(video_input.shape))
56
+ ippg, len_ippg,face_list = model.predict_statistic(video_input)
57
+ ippg_pd = pd.DataFrame({
58
+ "index":range(0,len_ippg),
59
+ "rppg":ippg,
60
+ })
61
+ ippg_pd.to_csv(csv_url,index=False)
62
+ HR,RR,BP,HRV,BO = rppg_to_physiological_indicators(ippg, ROI1, ROI2)
63
+ return ippg_pd,HR,RR,BP,HRV,BO
64
+
65
+
66
+ def read_video(path):
67
+ '''
68
+ input: video file path
69
+ output: video frames list
70
+ '''
71
+ # read video from file path as frames list
72
+ frames = []
73
+ cap = cv2.VideoCapture(path)
74
+ ret = True
75
+ while ret:
76
+ ret, img = cap.read() # read one frame from the 'capture' object; img is (H, W, C)
77
+ if ret:
78
+ frames.append(img)
79
+ print(img.shape)
80
+ video = np.stack(frames, axis=0) # dimensions (T, H, W, C)
81
+ # import skvideo.io
82
+ # video = skvideo.io.vread(path)
83
+ # print(video.shape)
84
+
85
+ return video
code/ippg_predict.csv ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ index,rppg
2
+ 0,0.41405636
3
+ 1,0.2000318
4
+ 2,0.0820328
5
+ 3,-0.028137699
6
+ 4,-0.049714215
7
+ 5,-0.064972766
8
+ 6,-0.09421185
9
+ 7,-0.114180595
10
+ 8,-0.09591034
11
+ 9,-0.073588
12
+ 10,-0.04313855
13
+ 11,0.03085187
14
+ 12,0.0934888
15
+ 13,0.3231313
16
+ 14,0.47150603
17
+ 15,0.69849265
18
+ 16,0.77421856
19
+ 17,0.83780587
20
+ 18,0.83160996
21
+ 19,0.77588594
22
+ 20,0.70812327
23
+ 21,0.530922
24
+ 22,0.42441338
25
+ 23,0.31190425
26
+ 24,0.28862673
27
+ 25,0.34351623
28
+ 26,0.39518428
29
+ 27,0.47460616
30
+ 28,0.48288664
31
+ 29,0.3444392
32
+ 30,0.22368154
33
+ 31,0.013961956
34
+ 32,-0.13188864
35
+ 33,-0.4489758
36
+ 34,-0.6084776
37
+ 35,-0.73801184
38
+ 36,-0.69461787
39
+ 37,-0.42097837
40
+ 38,-0.2688645
41
+ 39,0.017156467
42
+ 40,0.18636954
43
+ 41,0.42036015
44
+ 42,0.4912194
45
+ 43,0.5223713
46
+ 44,0.46916124
47
+ 45,0.21872395
48
+ 46,0.085381635
49
+ 47,-0.071113326
50
+ 48,-0.08085425
51
+ 49,0.063260555
52
+ 50,0.21006191
53
+ 51,0.45567837
54
+ 52,0.58702725
55
+ 53,0.81640875
56
+ 54,0.8822088
57
+ 55,0.9162681
58
+ 56,0.86282206
59
+ 57,0.6050979
60
+ 58,0.4452159
61
+ 59,0.2106748
62
+ 60,0.12560174
63
+ 61,0.09135161
64
+ 62,0.09115855
65
+ 63,0.16783619
66
+ 64,0.23296665
67
+ 65,0.33295977
68
+ 66,0.35504892
69
+ 67,0.35514745
70
+ 68,0.30651757
71
+ 69,0.11715238
72
+ 70,0.006680727
73
+ 71,-0.14071405
74
+ 72,-0.1679501
75
+ 73,-0.083684474
76
+ 74,-0.012030022
77
+ 75,0.14085057
78
+ 76,0.22960266
79
+ 77,0.34104726
80
+ 78,0.3676189
81
+ 79,0.37382895
code/model.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from contrast_phys.PhysNetModel import PhysNet
2
+ from utils_sig import *
3
+ import matplotlib.pyplot as plt
4
+
5
+
6
+ class PhysNet_Model:
7
+ def __init__(self, model_path):
8
+ self.device = torch.device('cuda:0' if torch.cuda.is_available() else "cpu")
9
+ print("device",self.device)
10
+ self.model = PhysNet(S=2).to(self.device).eval()
11
+ # self.model.load_state_dict(torch.load(model_path, map_location=self.device))
12
+ self.rppg = []
13
+ self.fps = 30 # 默认30fps
14
+
15
+ if "pt" in model_path:
16
+ print("Testing uses pt!")
17
+ weight = torch.load(model_path, map_location=self.device)
18
+ import collections
19
+ new_dict = collections.OrderedDict((old_key.replace("module.",""), value) for (old_key, value) in weight.items())
20
+ self.model.load_state_dict(new_dict)
21
+
22
+ def predict(self, frame_list):
23
+ # 模型预测
24
+ print("model processing")
25
+ face_list = self.load_data(frame_list[-128:])
26
+ face_list = self.standardized_data(face_list)
27
+ face_list_t = torch.tensor(face_list.astype('float32')).to(self.device)
28
+ print("+++++face_list_t++++++++",face_list_t.shape) # need [1, 3, 128, 128, 128]
29
+ rppg = self.model(face_list_t)[:,-1, :]
30
+ rppg = rppg[0].detach().cpu().numpy()[20:100]
31
+
32
+ print("model done")
33
+ return rppg, face_list
34
+
35
+ def predict_statistic(self, frame_list):
36
+ # 模型预测
37
+ print("model processing")
38
+ face_list = self.load_data(frame_list)
39
+ face_list = self.standardized_data(face_list)
40
+ face_list_t = torch.tensor(face_list.astype('float32')).to(self.device)
41
+ print("+++++face_list_t++++++++",face_list_t.shape) # need [1, 3, 128, 128, 128]
42
+ rppg = self.model(face_list_t)[:,-1, :]
43
+ rppg = rppg[0].detach().cpu().numpy()
44
+
45
+ rppg = rppg[20:len(rppg)-20]
46
+
47
+ print("model done")
48
+ return rppg, len(rppg), face_list
49
+
50
+ def load_data(self,frame_list):
51
+ # 处理输入的frame_list
52
+ # face_list = face_detection(frame_list)
53
+ face_list = []
54
+ for frame in frame_list:
55
+ face_frame = cv2.resize(frame.astype('float32'), (128, 128), interpolation=cv2.INTER_AREA)
56
+ face_list.append(face_frame)
57
+ face_list = np.array(face_list) # (D, H, W, C) (N , C, D, H, W)
58
+ print("============= face_list shape ==============",face_list.shape) # (180, 128, 128, 3)
59
+ face_list = np.transpose(face_list, (3, 0, 1, 2)) # (C, D, H, W)
60
+ face_list = np.array(face_list)[np.newaxis]
61
+ # face_list = torch.tensor(face_list.astype('float32')).to(device)
62
+
63
+ return face_list
64
+
65
+ def plot(self):
66
+ # 创建用于绘制脉搏波图的Matplotlib图形
67
+ hr, psd_y, psd_x = hr_fft(self.rppg, fs= self.fps)
68
+
69
+ fig, (ax1, ax2) = plt.subplots(2, figsize=(20,10))
70
+ ax1.plot(np.arange(len(self.rppg))/self.fps, self.rppg)
71
+ ax1.set_xlabel('time (sec)')
72
+ ax1.grid('on')
73
+ ax1.set_title('rPPG waveform')
74
+
75
+ ax2.plot(psd_x, psd_y)
76
+ ax2.set_xlabel('heart rate (bpm)')
77
+ ax2.set_xlim([40,200])
78
+ ax2.grid('on')
79
+ ax2.set_title('PSD')
80
+ return fig
81
+
82
+ def show(self):
83
+ # 显示脉搏波图
84
+ plt.show()
85
+
86
+ def standardized_data(self,data):
87
+ """Z-score standardization for video data."""
88
+ data = data - np.mean(data)
89
+ data = data / np.std(data)
90
+ data[np.isnan(data)] = 0
91
+ return data
92
+
93
+ from contrast_phys.DeepPhysModel import DeepPhys
94
+ class DeepPhys_Model:
95
+ def __init__(self, model_path):
96
+ self.device = torch.device('cuda:0' if torch.cuda.is_available() else "cpu")
97
+ self.model = DeepPhys(img_size=72).to(self.device).eval()
98
+ # self.model.load_state_dict(torch.load(model_path, map_location=self.device))
99
+ self.rppg = []
100
+ self.fps = 30 # 默认30fps
101
+
102
+ if "pt" in model_path:
103
+ print("Testing uses pt!")
104
+ weight = torch.load(model_path, map_location=self.device)
105
+ import collections
106
+ new_dict = collections.OrderedDict((old_key.replace("module.",""), value) for (old_key, value) in weight.items())
107
+ self.model.load_state_dict(new_dict)
108
+
109
+ def predict(self, frame_list):
110
+ # 模型预测
111
+ print("model processing")
112
+ face_list = self.load_data(frame_list[-180:])
113
+ face_list_t = torch.tensor(face_list.astype('float32')).to(self.device)
114
+ print("face_list_t shape =============",face_list_t.shape) # [120, 3, 72, 72] need [4*180, 6, 72, 72])
115
+ rppg = self.model(face_list_t).flatten()
116
+ print("++++++++++++++rppg++++++++++++++++",rppg)
117
+ rppg = rppg.detach().cpu().numpy()[20:100]
118
+
119
+ print("model done")
120
+ return rppg, face_list
121
+
122
+ def load_data(self,frame_list):
123
+ # 处理输入的frame_list
124
+ # face_list = face_detection(frame_list)
125
+ face_list = []
126
+ for frame in frame_list:
127
+ face_frame = cv2.resize(frame, (72, 72))
128
+ face_list.append(face_frame)
129
+ face_list = np.array(face_list) # (N, H, W, C) # (180, 72, 72, 3)
130
+
131
+ frame_list_standardized = self.standardized_data(face_list) # (180, 72, 72, 3)
132
+ frame_list_diff_normalize = self.diff_normalize_data(face_list)
133
+ # concat frame_list_standardized and frame_list_diff_normalize at axis 3
134
+ face_list = np.concatenate((frame_list_standardized, frame_list_diff_normalize), axis=3) # (180, 72, 72, 6)
135
+
136
+ N, H, W, C = face_list.shape
137
+ # face_list = face_list.view(N * 1, C, H, W)
138
+
139
+ face_list = np.transpose(face_list, (0, 3, 1, 2)) # (N, C, H, W)
140
+ # face_list = np.array(face_list)[np.newaxis]
141
+ # face_list = torch.tensor(face_list.astype('float32')).to(device)
142
+
143
+ return face_list
144
+
145
+ def plot(self):
146
+ # 创建用于绘制脉搏波图的Matplotlib图形
147
+ hr, psd_y, psd_x = hr_fft(self.rppg, fs=self.fps)
148
+
149
+ fig, (ax1, ax2) = plt.subplots(2, figsize=(20,10))
150
+ ax1.plot(np.arange(len(self.rppg))/self.fps, self.rppg)
151
+ ax1.set_xlabel('time (sec)')
152
+ ax1.grid('on')
153
+ ax1.set_title('rPPG waveform')
154
+
155
+ ax2.plot(psd_x, psd_y)
156
+ ax2.set_xlabel('heart rate (bpm)')
157
+ ax2.set_xlim([40,200])
158
+ ax2.grid('on')
159
+ ax2.set_title('PSD')
160
+ return fig
161
+
162
+ def show(self):
163
+ # 显示脉搏波图
164
+ plt.show()
165
+
166
+ def standardized_data(self,data):
167
+ """Z-score standardization for video data."""
168
+ data = data - np.mean(data)
169
+ data = data / np.std(data)
170
+ data[np.isnan(data)] = 0
171
+ return data
172
+
173
+ def diff_normalize_data(self,data):
174
+ """Calculate discrete difference in video data along the time-axis and nornamize by its standard deviation."""
175
+ n, h, w, c = data.shape
176
+ diffnormalized_len = n - 1
177
+ diffnormalized_data = np.zeros((diffnormalized_len, h, w, c), dtype=np.float32)
178
+ diffnormalized_data_padding = np.zeros((1, h, w, c), dtype=np.float32)
179
+ for j in range(diffnormalized_len - 1):
180
+ diffnormalized_data[j, :, :, :] = (data[j + 1, :, :, :] - data[j, :, :, :]) / (
181
+ data[j + 1, :, :, :] + data[j, :, :, :] + 1e-7)
182
+ diffnormalized_data = diffnormalized_data / np.std(diffnormalized_data)
183
+ diffnormalized_data = np.append(diffnormalized_data, diffnormalized_data_padding, axis=0)
184
+ diffnormalized_data[np.isnan(diffnormalized_data)] = 0
185
+ return diffnormalized_data
code/model_weight/lgb_model_ppg2bp.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7117d11a361a5fb3fc2fae6ef90b0971d59ff5661323d40c6fc45f2c435803f6
3
+ size 1648716
code/model_weight/lgb_model_threechanel2HR.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98c628828e8c28810798e2d87f619bc5e0e2e1861063f34ceaf6bde1981c0dec
3
+ size 1743173
code/model_weight/lgb_model_threechanel2spo2.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a9db82a01900021c3aaad7b903a7aa2eab1207c22999b5d18b63ff4d127c333
3
+ size 520838
code/physiological_indicators.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from utils_sig import *
2
+ import joblib
3
+ import numpy as np
4
+ from lightgbm import LGBMRegressor
5
+
6
+ import heartpy as hp
7
+ import scipy.signal as sig
8
+
9
+ class PhysiologicalIndicators:
10
+ def __init__(self):
11
+ self.heart_rate = 0
12
+ self.respiratory_rate = 0
13
+ self.heart_rate_variability = 0
14
+ self.SpO2 = 0
15
+ self.blood_pressure = 0
16
+
17
+ def calculate_heart_rate(self, ippg_data, fps):
18
+ # 计算心率的代码
19
+ print("HR processing")
20
+ self.heart_rate, self.respiratory_rate = hr_fft_2(ippg_data, fps)
21
+ # ippg = butter_bandpass(ippg_data, lowcut=0.6, highcut=4, fs=fps)
22
+ # self.heart_rate, psd_y, psd_x = hr_fft(ippg, fs=fps)
23
+ print("HR done")
24
+ return self.heart_rate, self.respiratory_rate
25
+
26
+ def calculate_heart_rate_variability(self, ippg_data, fps):
27
+ # 计算心率变异性的代码
28
+ # TODO: 实现心率变异性计算
29
+ self.heart_rate_variability = calculate_hrv(ippg_data, fps)
30
+ return self.heart_rate_variability
31
+
32
+ def calculate_SpO2(self, ROI_list, ROI2_list):
33
+ # 计算血氧饱和度的代码
34
+ # TODO: 实现血氧饱和度计算
35
+ ROI1_SpO2 = RGB_SpO2(ROI_list)
36
+ ROI2_SpO2 = RGB_SpO2(ROI2_list)
37
+ self.SpO2 = (ROI1_SpO2 + ROI2_SpO2) / 2
38
+ return self.SpO2
39
+
40
+ def calculate_blood_pressure(self, ippg_data):
41
+ # 计算血压的代码
42
+ ippg_data = np.array(ippg_data).reshape(len(ippg_data),1)
43
+
44
+ bp_pred = []
45
+ model_list = joblib.load( './model_weight/lgb_model_ppg2bp.pkl')
46
+ for model in model_list:
47
+ result = model.predict(ippg_data)
48
+ bp_pred.append(result+10)
49
+ bp_list = np.mean(bp_pred, axis=0)
50
+ return bp_list,np.max(bp_list),np.min(bp_list)-15
51
+
52
+ def calculate_HR(self, ROI_list, ROI2_list):
53
+ # 计算HR的代码
54
+ ROI1_HR = RGB_HR(ROI_list)
55
+ ROI2_HR = RGB_HR(ROI2_list)
56
+ print("ROI1_HR",ROI1_HR,"ROI2_HR",ROI2_HR)
57
+ HR = (ROI1_HR + ROI2_HR) / 2
58
+ return HR
59
+
60
+
61
+ # 定义一个函数来计算心率
62
+ def calculate_heart_rate_2(self,ppg_signal, sampling_rate):
63
+ # 使用巴特沃斯滤波器处理信号,去除噪声
64
+ nyquist_frequency = sampling_rate / 2.0
65
+ low_cutoff_frequency = 0.5
66
+ high_cutoff_frequency = 5.0
67
+ filter_order = 2
68
+
69
+ b, a = sig.butter(filter_order, [low_cutoff_frequency/nyquist_frequency, high_cutoff_frequency/nyquist_frequency], btype='band')
70
+ filtered_signal = sig.filtfilt(b, a, ppg_signal)
71
+
72
+ # 计算心率
73
+ window_length = int(sampling_rate * 0.75)
74
+ step_size = int(sampling_rate * 0.1)
75
+ threshold = 0.4
76
+
77
+ # 使用峰值检测算法来找到脉冲峰值
78
+ peak_indexes, _ = sig.find_peaks(filtered_signal, distance=10)
79
+ print("============================",peak_indexes,sampling_rate)
80
+ # 计算时间间隔并计算心率
81
+ # time_intervals = np.diff(peak_indexes) / float(sampling_rate)
82
+ time_intervals = np.diff(peak_indexes) * 0.045
83
+ heart_rate = 60.0 / np.mean(time_intervals)
84
+
85
+ return heart_rate
86
+
87
+ # 定义一个函数来从rppg信号中计算心率
88
+ def calculate_heart_rate_3(self,signal):
89
+ wd, m = hp.process(signal, sample_rate = 100.0)
90
+
91
+ return wd, m
92
+
93
+
94
+
95
+ # def calculate_SPO2(self, ippg_chanel_data):
96
+ # # 计算血氧的代码
97
+ # ippg_chanel_data = np.array(ippg_chanel_data).reshape(len(ippg_chanel_data),6)
98
+
99
+ # SPO2_pred = []
100
+ # model_list = joblib.load( './model_weight/lgb_model_threechanel2spo2.pkl')
101
+ # for model in model_list:
102
+ # result = model.predict(ippg_chanel_data)
103
+ # SPO2_pred.append(result)
104
+ # SPO2_list = np.mean(SPO2_pred, axis=0)
105
+ # return SPO2_list
106
+
code/shape_predictor_68_face_landmarks_predictor.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:243bb6b31457d69cb814885f8878a4b7c709169b347b51d4e6b0fe7cd8cc6a6c
3
+ size 99693979
code/utils_sig.py ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import math
3
+ import cv2
4
+ import torch
5
+ from scipy import signal
6
+ from scipy.fft import fft
7
+ from scipy.signal import butter, filtfilt
8
+ # from facenet_pytorch import MTCNN
9
+ from face_detection import FaceDetection
10
+ import joblib
11
+
12
+
13
+ def butter_bandpass(sig, lowcut, highcut, fs, order=2):
14
+ # butterworth bandpass filter
15
+
16
+ sig = np.reshape(sig, -1)
17
+ nyq = 0.5 * fs
18
+ low = lowcut / nyq
19
+ high = highcut / nyq
20
+ b, a = butter(order, [low, high], btype='band')
21
+
22
+ y = filtfilt(b, a, sig)
23
+ return y
24
+
25
+
26
+ def face_detection(video_list):
27
+ device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
28
+ mtcnn = MTCNN(device=device)
29
+
30
+ face_list = []
31
+ for t, frame in enumerate(video_list):
32
+ if t == 0:
33
+ boxes, _, = mtcnn.detect(
34
+ frame) # we only detect face bbox in the first frame, keep it in the following frames.
35
+ if t == 0:
36
+ box_len = np.max([boxes[0, 2] - boxes[0, 0], boxes[0, 3] - boxes[0, 1]])
37
+ box_half_len = np.round(box_len / 2 * 1.1).astype('int')
38
+ box_mid_y = np.round((boxes[0, 3] + boxes[0, 1]) / 2).astype('int')
39
+ box_mid_x = np.round((boxes[0, 2] + boxes[0, 0]) / 2).astype('int')
40
+ cropped_face = frame[box_mid_y - box_half_len:box_mid_y + box_half_len,
41
+ box_mid_x - box_half_len:box_mid_x + box_half_len]
42
+ cropped_face = cv2.resize(cropped_face, (128, 128))
43
+ face_list.append(cropped_face)
44
+
45
+ print('face detection %2d' % (100 * (t + 1) / len(video_list)), '%', end='\r', flush=True)
46
+
47
+ face_list = np.array(face_list) # (T, H, W, C)
48
+ face_list = np.transpose(face_list, (3, 0, 1, 2)) # (C, T, H, W)
49
+ face_list = np.array(face_list)[np.newaxis]
50
+
51
+ return face_list
52
+
53
+
54
+ def face_detection_ROI(face_detection, frame_list):
55
+ face_frame_list = []
56
+ ROI1_list = []
57
+ ROI2_list = []
58
+
59
+ for i in range(0, frame_list.shape[0]):
60
+ frame = frame_list[i]
61
+ frame, face_frame, ROI1, ROI2, status, mask, face_region = FaceDetection.face_detect(face_detection, frame)
62
+ face_frame_list.append(face_frame)
63
+ ROI1_list.append(ROI1)
64
+ ROI2_list.append(ROI2)
65
+ return np.array(face_frame_list), np.array(ROI1_list), np.array(ROI2_list), status, face_region
66
+
67
+
68
+ def butter_bandpass(sig, lowcut, highcut, fs, order=2):
69
+ # butterworth bandpass filter
70
+
71
+ sig = np.reshape(sig, -1)
72
+ nyq = 0.5 * fs
73
+ low = lowcut / nyq
74
+ high = highcut / nyq
75
+ b, a = butter(order, [low, high], btype='band')
76
+
77
+ y = filtfilt(b, a, sig)
78
+ return y
79
+
80
+ def hr_fft(sig, fs, harmonics_removal=True):
81
+ # get heart rate by FFT
82
+ # return both heart rate and PSD
83
+
84
+ sig = sig.reshape(-1)
85
+ sig = sig * signal.windows.hann(sig.shape[0])
86
+ sig_f = np.abs(fft(sig))
87
+ low_idx = np.round(0.6 / fs * sig.shape[0]).astype('int')
88
+ high_idx = np.round(4 / fs * sig.shape[0]).astype('int')
89
+ sig_f_original = sig_f.copy()
90
+
91
+ sig_f[:low_idx] = 0
92
+ sig_f[high_idx:] = 0
93
+
94
+ peak_idx, _ = signal.find_peaks(sig_f)
95
+ sort_idx = np.argsort(sig_f[peak_idx])
96
+ sort_idx = sort_idx[::-1]
97
+
98
+ peak_idx1 = peak_idx[sort_idx[0]]
99
+ peak_idx2 = peak_idx[sort_idx[1]]
100
+
101
+ f_hr1 = peak_idx1 / sig_f.shape[0] * fs
102
+ hr1 = f_hr1 * 60
103
+
104
+ f_hr2 = peak_idx2 / sig_f.shape[0] * fs
105
+ hr2 = f_hr2 * 60
106
+ if harmonics_removal:
107
+ if np.abs(hr1-2*hr2)<10:
108
+ hr = hr2
109
+ else:
110
+ hr = hr1
111
+ else:
112
+ hr = hr1
113
+
114
+ x_hr = np.arange(len(sig_f))/len(sig_f)*fs*60
115
+ return hr, sig_f_original, x_hr
116
+
117
+
118
+ def hr_fft_2(processed, fps):
119
+ L = len(processed)
120
+ fps = 30#float(L) / (times[-1] - times[-L]) # calculate HR using a true fps of processor of the computer, not the fps the camera provide
121
+ LEN = int(fps * 1.55)
122
+ # even_times = np.linspace(times[-L], times[-1], LEN)
123
+
124
+ processed = signal.detrend(processed) # detrend the signal to avoid interference of light change
125
+ # interpolated = np.interp(even_times, times[-L:], processed) # interpolation by 1
126
+ interpolated = processed
127
+ # interpolated = np.hamming(LEN) * interpolated # make the signal become more periodic (advoid spectral leakage)
128
+ norm = (interpolated - np.mean(interpolated))/np.std(interpolated)#normalization
129
+
130
+ # norm = interpolated / np.linalg.norm(interpolated)
131
+ raw = np.fft.rfft(norm * 30) # do real fft with the normalization multiplied by 10
132
+ raw_r = raw.copy()
133
+
134
+ freqs = float(fps) / LEN * np.arange(LEN / 2 + 1)
135
+ freqs = 60. * freqs
136
+
137
+ idx_remove = np.where((freqs < 50) & (freqs > 180))
138
+ raw[idx_remove] = 0
139
+
140
+ fft = np.abs(raw) ** 2 # get amplitude spectrum
141
+
142
+ idx = np.where((freqs > 50) & (freqs < 180)) # the range of frequency that HR is supposed to be within
143
+ pruned = fft[idx]
144
+ pfreq = freqs[idx]
145
+
146
+ # freqs = pfreq
147
+ fft = pruned
148
+
149
+ idx2 = np.argmax(pruned) # max in the range can be HR
150
+ bpm = pfreq[idx2]
151
+
152
+ # calculate Respiratory Rate, 计算呼吸���
153
+ idx_remove = np.where((freqs < 5) & (freqs > 60))
154
+ raw_r[idx_remove] = 0
155
+ fft_r = np.abs(raw_r) ** 2
156
+ idx = np.where((freqs > 5) & (freqs < 60))
157
+ pruned_r = fft_r[idx]
158
+ pfreq = freqs[idx]
159
+ idx3 = np.argmax(pruned_r)
160
+ pruned_r[idx3] = 0
161
+ idx3 = np.argmax(pruned_r)
162
+ respiratory_rate = pfreq[idx3]
163
+
164
+ return bpm, respiratory_rate
165
+
166
+
167
+ def calc_rr(peaklist, sample_rate, working_data={}):
168
+ peaklist = np.array(peaklist) #cast numpy array to be sure or correct array type
169
+ working_data['peaklist'] = peaklist # Make sure, peaklist is always an np.array
170
+
171
+ rr_list = (np.diff(peaklist) / sample_rate) * 1000.0
172
+ rr_indices = [(peaklist[i], peaklist[i+1]) for i in range(len(peaklist) - 1)]
173
+ rr_diff = np.abs(np.diff(rr_list))
174
+ rr_sqdiff = np.power(rr_diff, 2)
175
+ working_data['RR_list'] = rr_list
176
+ working_data['RR_indices'] = rr_indices
177
+ working_data['RR_diff'] = rr_diff
178
+ working_data['RR_sqdiff'] = rr_sqdiff
179
+ return working_data
180
+
181
+
182
+ def calculate_hrv(ippg, fps):
183
+ peak_array, _ = signal.find_peaks(ippg[-200::2]) # down sample rate: 2
184
+ peak_list = peak_array.tolist()
185
+ result = calc_rr(peak_list, fps/2)
186
+ # print(peak_list)
187
+ # RR_list = result['RR_list'].tolist()
188
+ # RR_diff = result['RR_diff'].tolist()
189
+ # print(RR_list)
190
+ # print(RR_diff)
191
+ RR_std = 0
192
+ if len(result['RR_list']) > 0:
193
+ RR_std = np.std(result['RR_list'], ddof=1) # calculate RR interval standard deviation
194
+ if math.isnan(RR_std):
195
+ RR_std = 0
196
+ return RR_std
197
+
198
+ def RGB_SpO2(ROI_list):
199
+ roi_avg = []
200
+ roi_std = []
201
+ for i in range(len(ROI_list)):
202
+ roi_avg.append(np.average(ROI_list[i], axis=(0, 1)))
203
+ roi_std.append(np.std(ROI_list[i], axis=(0, 1), ddof=1))
204
+ roi_avg = np.array(roi_avg)
205
+ roi_std = np.array(roi_std)
206
+ mean_red = roi_avg[:, 0]
207
+ std_red = roi_std[:, 0]
208
+ mean_green = roi_avg[:, 1]
209
+ std_green = roi_std[:, 1]
210
+ mean_blue = roi_avg[:, 2]
211
+ std_blue = roi_std[:, 2]
212
+ A = -11.2
213
+ B = 109.3
214
+ R = (np.average(mean_red) / np.average(std_red)) / (np.average(mean_blue) / np.average(std_blue))
215
+ SpO2 = A * R + B
216
+ return SpO2
217
+
218
+
219
+ def RGB_HR(ROI_list):
220
+ roi_avg = []
221
+ roi_std = []
222
+ for i in range(len(ROI_list)):
223
+ roi_avg.append(np.average(ROI_list[i], axis=(0, 1)))
224
+ roi_std.append(np.std(ROI_list[i], axis=(0, 1), ddof=1))
225
+ roi_avg = np.array(roi_avg)
226
+ roi_std = np.array(roi_std)
227
+ mean_red = roi_avg[:, 0]
228
+ std_red = roi_std[:, 0]
229
+ mean_green = roi_avg[:, 1]
230
+ std_green = roi_std[:, 1]
231
+ mean_blue = roi_avg[:, 2]
232
+ std_blue = roi_std[:, 2]
233
+
234
+
235
+
236
+ ippg_chanel_data = np.array((mean_red,std_red,mean_green,std_green,mean_blue,std_blue)).T
237
+ print(ippg_chanel_data )
238
+ # ippg_chanel_data = np.array(ippg_chanel_data).reshape(len(ippg_chanel_data),6)
239
+
240
+ HR_pred = []
241
+ model_list = joblib.load( './model_weight/lgb_model_threechanel2HR.pkl')
242
+ for model in model_list:
243
+ result = model.predict(ippg_chanel_data)
244
+ HR_pred.append(result+10)
245
+ HR = np.mean(HR_pred, axis=0)
246
+
247
+ return np.mean(HR)