human_health_gradio / code /gradio_utils.py
Juliojuse's picture
init
fa926f8
raw
history blame
No virus
2.63 kB
import numpy as np
import gradio as gr
import cv2
from model import PhysNet_Model
import pandas as pd
import time
from physiological_indicators import PhysiologicalIndicators
from utils_sig import *
from face_detection import FaceDetection
## rppg signal to HR,RR,BP,HRV,BO
def rppg_to_physiological_indicators(rppg,ROI1, ROI2):
'''
input: rppg signal(list)
output:HR,RR,BP,HRV,BO
'''
indicators = PhysiologicalIndicators()
HR,RR = indicators.calculate_heart_rate(rppg,30)
# RR = rppg#indicators.calculate_respiratory_rate(rppg,30)
BP, max_BP, min_BP = indicators.calculate_blood_pressure(rppg)
HRV = indicators.calculate_heart_rate_variability(rppg,30)
BO = indicators.calculate_SpO2(ROI1, ROI2)
return HR,RR,BP,HRV,BO
def fake_diffusion(steps):
for _ in range(steps):
time.sleep(1)
image = np.random.random((600, 600, 3))
yield image
image = "https://gradio-builds.s3.amazonaws.com/diffusion_image/cute_dog.jpg"
yield image
def video_to_rppg_statistic(path):
'''
静态进行video到rppg的转换
path: video file path
#C:\\Users\\74314\\AppData\\Local\\Temp\\gradio\\f50cc35ca9bac3568e36f1f7277a72d5e252ad37\\e0faab5f1b8c05eda32648dd635a97c4.mp4
'''
# read video from file path as frames list
frames = []
cap = cv2.VideoCapture(path)
ret = True
while ret:
ret, img = cap.read() # read one frame from the 'capture' object; img is (H, W, C)
if ret:
frames.append(img)
video = np.stack(frames, axis=0) # dimensions (T, H, W, C)
print(video.shape)
face, ROI1, ROI2, status, face_region = face_detection_ROI(fece_detection, video)
video_input = face
print(print(video_input.shape))
ippg, len_ippg,face_list = model.predict_statistic(video_input)
ippg_pd = pd.DataFrame({
"index":range(0,len_ippg),
"rppg":ippg,
})
ippg_pd.to_csv(csv_url,index=False)
HR,RR,BP,HRV,BO = rppg_to_physiological_indicators(ippg, ROI1, ROI2)
return ippg_pd,HR,RR,BP,HRV,BO
def read_video(path):
'''
input: video file path
output: video frames list
'''
# read video from file path as frames list
frames = []
cap = cv2.VideoCapture(path)
ret = True
while ret:
ret, img = cap.read() # read one frame from the 'capture' object; img is (H, W, C)
if ret:
frames.append(img)
print(img.shape)
video = np.stack(frames, axis=0) # dimensions (T, H, W, C)
# import skvideo.io
# video = skvideo.io.vread(path)
# print(video.shape)
return video