File size: 2,627 Bytes
fa926f8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
import numpy as np
import gradio as gr
import cv2
from model import PhysNet_Model
import pandas as pd
import time
from physiological_indicators import PhysiologicalIndicators
from utils_sig import *
from face_detection import FaceDetection

## rppg signal to HR,RR,BP,HRV,BO
def rppg_to_physiological_indicators(rppg,ROI1, ROI2):
    '''
    input: rppg signal(list)
    output:HR,RR,BP,HRV,BO
    '''
    indicators = PhysiologicalIndicators()
    HR,RR = indicators.calculate_heart_rate(rppg,30)
    # RR = rppg#indicators.calculate_respiratory_rate(rppg,30)
    BP, max_BP, min_BP = indicators.calculate_blood_pressure(rppg)
    HRV = indicators.calculate_heart_rate_variability(rppg,30)
    BO = indicators.calculate_SpO2(ROI1, ROI2)
    return HR,RR,BP,HRV,BO


def fake_diffusion(steps):
    for _ in range(steps):
        time.sleep(1)
        image = np.random.random((600, 600, 3))
        yield image
    image = "https://gradio-builds.s3.amazonaws.com/diffusion_image/cute_dog.jpg"
    yield image


def video_to_rppg_statistic(path):
    '''
    静态进行video到rppg的转换
    path: video file path
    #C:\\Users\\74314\\AppData\\Local\\Temp\\gradio\\f50cc35ca9bac3568e36f1f7277a72d5e252ad37\\e0faab5f1b8c05eda32648dd635a97c4.mp4
    '''
    # read video from file path as frames list
    frames = []
    cap = cv2.VideoCapture(path)
    ret = True
    while ret:
        ret, img = cap.read() # read one frame from the 'capture' object; img is (H, W, C)
        if ret:
            frames.append(img)
    video = np.stack(frames, axis=0) # dimensions (T, H, W, C)
    print(video.shape)
    
    face, ROI1, ROI2, status, face_region = face_detection_ROI(fece_detection, video)

    video_input = face
    print(print(video_input.shape))
    ippg, len_ippg,face_list = model.predict_statistic(video_input)
    ippg_pd = pd.DataFrame({
        "index":range(0,len_ippg),
        "rppg":ippg,
    })
    ippg_pd.to_csv(csv_url,index=False)
    HR,RR,BP,HRV,BO = rppg_to_physiological_indicators(ippg, ROI1, ROI2)
    return ippg_pd,HR,RR,BP,HRV,BO


def read_video(path):
    '''
    input: video file path
    output: video frames list
    '''
    # read video from file path as frames list
    frames = []
    cap = cv2.VideoCapture(path)
    ret = True
    while ret:
        ret, img = cap.read() # read one frame from the 'capture' object; img is (H, W, C)
        if ret:
            frames.append(img)
            print(img.shape)
    video = np.stack(frames, axis=0) # dimensions (T, H, W, C)
    # import skvideo.io  
    # video = skvideo.io.vread(path)  
    # print(video.shape)

    return video