File size: 5,567 Bytes
fa926f8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d15a19c
fa926f8
 
 
 
 
 
 
 
 
 
d15a19c
fa926f8
d15a19c
fa926f8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8a59d8e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
import numpy as np
import gradio as gr
import cv2
from model import PhysNet_Model,DeepPhys_Model
import pandas as pd
import time
from physiological_indicators import PhysiologicalIndicators
from face_detection import FaceDetection

from utils_sig import *
from gradio_utils import rppg_to_physiological_indicators,fake_diffusion
from gradio_utils import read_video



fece_detection = FaceDetection()
csv_url = './code/ippg_predict.csv'


def video_to_rppg_dynamic(model_choice,path):
    '''
    动态进行video到rppg的转换
    path: video file path
    #C:\\Users\\74314\\AppData\\Local\\Temp\\gradio\\f50cc35ca9bac3568e36f1f7277a72d5e252ad37\\e0faab5f1b8c05eda32648dd635a97c4.mp4
    '''
    print(model_choice,"=======================================")
    if model_choice=="ContrastPhys":
        model = PhysNet_Model('./code/contrast_phys/model_weights.pt')
    elif model_choice=="DeepPhys":
        model = DeepPhys_Model('./code/contrast_phys/PURE_PURE_UBFC_deepphys_Epoch29.pth')
    else:
        model = None

    # read video from file path as frames list
    print("===============================",path)
    video = read_video(path)
    # print(video.shape)

    HR_list = []
    RR_list = []
    HRV_list = []
    BO_list = []

    for i in range(0,video.shape[0]-128,30):
        video_input = video[i:i+128,:,:,:]
        video_input, ROI1, ROI2, status, face_region = face_detection_ROI(fece_detection, video_input)

        # print(video_input.shape)
        ippg, face_list = model.predict(video_input)
        HR,RR,BP,HRV,BO = rppg_to_physiological_indicators(ippg, ROI1, ROI2)
        HR_list.append(HR)
        RR_list.append(RR)
        HRV_list.append(HRV)
        BO_list.append(BO)

        ippg_pd = pd.DataFrame({
            "index":range(0,80),
            "rppg":ippg,
        })
        HR_pd = pd.DataFrame({"index":range(0,len(HR_list)),"HR":HR_list})
        RR_pd = pd.DataFrame({"index":range(0,len(RR_list)),"RR":RR_list})
        BP_pd = pd.DataFrame({"index":range(0,len(BP)),"BP":BP})
        HRV_pd = pd.DataFrame({"index":range(0,len(HRV_list)),"HRV":HRV_list})
        BO_pd = pd.DataFrame({"index":range(0,len(BO_list)),"BO":BO_list})
        ippg_pd.to_csv(csv_url,index=False)
        print(len(ippg))

        yield ippg_pd,HR_pd,RR_pd,BP_pd,HRV_pd,BO_pd


def get_data():
    return pd.read_csv(csv_url)

with gr.Blocks() as demo:
    gr.Markdown("## Calculate rppg from video.") # 使用 Markdown 输出一句话
    with gr.Tab("Video"): # 新建一个 Tab
        with gr.Row(): # 同一行排列
            model_select_video = gr.Dropdown(["ContrastPhys","DeepPhys"],label="Model",info="选择rppg检测模型") # 下拉菜单
        with gr.Row(): # 同一行排列
            video_input = gr.Video()
            # video_output = gr.Video()
            rppg_output_video =  gr.LinePlot(x="index", y="rppg", y_title="rppg singal", width=600, height=300,label="rppg singal")
        with gr.Row(): # 同一行排列
            HR_output_video =  gr.LinePlot(x="index", y="HR", y_title="心率", width=170, height=150,label="心率")
            RR_output_video =  gr.LinePlot(x="index", y="RR", y_title="呼吸", width=170, height=150,label="呼吸")
            BP_output_video =  gr.LinePlot(x="index", y="BP", y_title="血压", width=170, height=150,label="血压")
            HRV_output_video =  gr.LinePlot(x="index", y="HRV", y_title="心率变异性", width=170, height=150,label="心率变异性")
            BO_output_video =  gr.LinePlot(x="index", y="BO", y_title="血氧", width=170, height=150,label="血氧")
        text_button = gr.Button("Start",label="开始")
    with gr.Tab("Webcam"): # 新建一个 Tab
        with gr.Row(): # 同一行排列
            model_select_webcam = gr.Dropdown(["ContrastPhys","DeepPhys"],label="Model",info="选择rppg检测模型") # 下拉菜单
        with gr.Row(): # 同一行排列
            webcam_input = gr.Video(source="webcam")
            # video_output = gr.Video()
            rppg_output_webcam =  gr.LinePlot(x="index", y="rppg", y_title="rppg singal", width=600, height=300,label="rppg singal")
        with gr.Row(): # 同一行排列
            HR_output_webcam =  gr.LinePlot(x="index", y="HR", y_title="心率", width=170, height=150,label="心率")
            RR_output_webcam =  gr.LinePlot(x="index", y="RR", y_title="呼吸", width=170, height=150,label="呼吸")
            BP_output_webcam =  gr.LinePlot(x="index", y="BP", y_title="血压", width=170, height=150,label="血压")
            HRV_output_webcam =  gr.LinePlot(x="index", y="HRV", y_title="心率变异性", width=170, height=150,label="心率变异性")
            BO_output_webcam =  gr.LinePlot(x="index", y="BO", y_title="血氧", width=170, height=150,label="血氧")
        image_button = gr.Button("Start")
 
    with gr.Accordion("Open for More!"): # 可折叠的组件
        gr.Markdown("Look at me...")
 
    text_button.click(video_to_rppg_dynamic,
                       inputs=[model_select_video,video_input],
                       outputs=[rppg_output_video,HR_output_video,RR_output_video,BP_output_video,HRV_output_video,BO_output_video]) # 按钮绑定相应的槽函数
    image_button.click(video_to_rppg_dynamic,
                       inputs=[model_select_webcam,webcam_input],
                       outputs=[rppg_output_webcam,HR_output_webcam,RR_output_webcam,BP_output_webcam,HRV_output_webcam,BO_output_webcam]) # 按钮绑定相应的槽函数
 
# define queue - required for generators
demo.queue()

demo.launch()