{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "776f8011",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 导入包\n",
    "import scipy.io as sio\n",
    "import os\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "from matplotlib import font_manager\n",
    "from scipy.ndimage import label\n",
    "from scipy.stats import zscore\n",
    "from scipy import signal\n",
    "from scipy.integrate import trapz\n",
    "from scipy.interpolate import interp1d\n",
    "\n",
    "import tsfresh as tsf\n",
    "from tsfresh import extract_features, select_features\n",
    "from tsfresh.utilities.dataframe_functions import impute\n",
    "from tsfresh.feature_extraction.feature_calculators import set_property\n",
    "\n",
    "import math"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "976332f7",
   "metadata": {},
   "outputs": [],
   "source": [
    "def plot(signal):\n",
    "    # 加载字体\n",
    "    font = font_manager.FontProperties(fname=\"msyh.ttc\")\n",
    "    step=500\n",
    "    plt.figure(figsize=(20,10))\n",
    "    plt.plot(signal, color=\"#51A6D8\")\n",
    "    font.set_size(10)\n",
    "#     plt.xticks(range(0,signal.size,step),[\"%d\"%x for x in range(0,signal.size,step)],fontproperties=font)\n",
    "    plt.title(\"BCG 信号\",fontproperties=font)\n",
    "    plt.xlabel(\"时间(0.01s)\",fontproperties=font)\n",
    "    plt.ylabel(\"振幅(任意单位)\",fontproperties=font)\n",
    "    plt.grid()\n",
    "    \n",
    "# 提取峰值的正弦波匹配算法\n",
    "def mokuai(df,threshold):\n",
    "    # 需要传入的是竖着的变量\n",
    "    def detect_peaks(BCG_signal, threshold=0.3, qrs_filter=None):\n",
    "        if qrs_filter is None:\n",
    "            # 创建默认QRS滤波器，它只是正弦函数的一部分\n",
    "            t = np.linspace(1.5 * np.pi, 3.5 * np.pi, 50)\n",
    "            qrs_filter = np.sin(t)[np.sin(t)>0.4]\n",
    "\n",
    "        # calculate cross correlation\n",
    "        similarity = np.correlate(BCG_signal, qrs_filter, mode=\"same\")\n",
    "        similarity = similarity / np.max(similarity)\n",
    "\n",
    "        # return peaks (values in ms) using threshold\n",
    "        return BCG_signal[similarity > threshold].index, similarity\n",
    "\n",
    "    def get_plot_ranges(start=10, end=20, n=20):\n",
    "        distance = end - start\n",
    "        for i in np.arange(start, end, np.floor(distance/n)):\n",
    "            yield (int(i), int(np.minimum(end, np.floor(distance/n) + i)))\n",
    "\n",
    "    def group_peaks(p, threshold=30):\n",
    "        # initialize output\n",
    "        output = np.empty(0)\n",
    "        # label groups of sample that belong to the same peak\n",
    "        peak_groups, num_groups = label(np.diff(p) < threshold)\n",
    "        # iterate through groups and take the mean as peak index\n",
    "        for i in np.unique(peak_groups)[1:]:\n",
    "            peak_group = p[np.where(peak_groups == i)]\n",
    "            output = np.append(output, peak_group[np.argmax(df.T[1][peak_group])])\n",
    "        return output\n",
    "    # df.T[1]是为了获得可遍历的数据框格式（列的形状）\n",
    "    df.index=[1]\n",
    "    peaks, similarity = detect_peaks(df.T[1], threshold)\n",
    "    grouped_peaks = group_peaks(np.array(peaks,dtype=int))\n",
    "    return grouped_peaks\n",
    "\n",
    "# 提取JJ间期的算法函数\n",
    "def jj_extract(grouped_peaks):\n",
    "    jj = np.diff(grouped_peaks)\n",
    "    jj_corrected = jj.copy()\n",
    "    jj_corrected[np.abs(zscore(jj)) > 2] = np.median(jj)\n",
    "    return jj_corrected\n",
    "\n",
    "# 提取时域特征函数\n",
    "'''\n",
    "均值-mean_jj:\n",
    "    JJ间隔的平均值\n",
    "标准差-std_jj:\n",
    "    连续JJ间隔的标准\n",
    "RMSSD:\n",
    "    连续JJ间隔的均方根长度\n",
    "PJJ50:\n",
    "    大于50毫秒的JJ间隔的百分比\n",
    "'''\n",
    "def timedomain(jj):\n",
    "    results = {}\n",
    "    results['mean_jj'] = np.mean(jj)\n",
    "    results['STD_jj'] = np.std(jj)\n",
    "    results['RMSSD'] = np.sqrt(np.mean(np.square(np.diff(jj))))\n",
    "    results['PJJ50'] = 100 * np.sum((np.abs(np.diff(jj)) > 5)*1) / len(jj)\n",
    "    return results\n",
    "\n",
    "# 频域特征提取函数\n",
    "def frequency_domain(jj_intervals, fs=0.4):\n",
    "    # sample rate for interpolation\n",
    "    steps = 1 / fs\n",
    "    # 基于jj样本创建插值函数\n",
    "    x = np.cumsum(jj_intervals) / 1000.0\n",
    "    f = interp1d(x, jj_intervals, kind='cubic')\n",
    "    # now we can sample from interpolation function\n",
    "    xx = np.arange(np.min(x), np.max(x), steps)\n",
    "    jj_interpolated = f(xx)\n",
    "    \n",
    "    \n",
    "    # Estimate the spectral density using Welch's method\n",
    "    fxx, pxx = signal.welch(x=jj_interpolated, fs=fs)\n",
    "    \n",
    "    '''\n",
    "    Segement found frequencies in the bands \n",
    "     - Very Low Frequency (VLF): 0-0.04Hz \n",
    "     - Low Frequency (LF): 0.04-0.15Hz \n",
    "     - High Frequency (HF): 0.15-0.4Hz\n",
    "    '''\n",
    "    cond_vlf = (fxx >= 0) & (fxx < 0.04)\n",
    "    cond_lf = (fxx >= 0.04) & (fxx < 0.15)\n",
    "    cond_hf = (fxx >= 0.15) & (fxx < 0.4)\n",
    "    \n",
    "    # calculate power in each band by integrating the spectral density \n",
    "    vlf = trapz(pxx[cond_vlf], fxx[cond_vlf])\n",
    "    lf = trapz(pxx[cond_lf], fxx[cond_lf])\n",
    "    hf = trapz(pxx[cond_hf], fxx[cond_hf])\n",
    "    \n",
    "    # sum these up to get total power\n",
    "    total_power = vlf + lf + hf\n",
    "\n",
    "#     # find which frequency has the most power in each band\n",
    "#     peak_vlf = fxx[cond_vlf][np.argmax(pxx[cond_vlf])]\n",
    "#     peak_lf = fxx[cond_lf][np.argmax(pxx[cond_lf])]\n",
    "#     peak_hf = fxx[cond_hf][np.argmax(pxx[cond_hf])]\n",
    "\n",
    "#     # fraction of lf and hf\n",
    "#     lf_nu = 100 * lf / (lf + hf)\n",
    "#     hf_nu = 100 * hf / (lf + hf)\n",
    "    \n",
    "    results = {}\n",
    "    results['Power VLF (ms2)'] = vlf\n",
    "    results['Power LF (ms2)'] = lf\n",
    "    results['Power HF (ms2)'] = hf   \n",
    "    results['Power Total (ms2)'] = total_power\n",
    "\n",
    "    results['LF/HF'] = (lf/hf)\n",
    "    return results\n",
    "\n",
    "def HRV_extract(signal,threshold=0.1):\n",
    "    # 输入一个BCG原始数据，返回一个HRV特征字典\n",
    "    peaks=mokuai(signal,threshold)\n",
    "    jj=jj_extract(peaks)\n",
    "    features = {}\n",
    "    td=timedomain(jj)\n",
    "    features.update(td)\n",
    "#     fd=frequency_domain(jj, fs=0.4)\n",
    "#     features.update(fd)\n",
    "    return features"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1a5c6758",
   "metadata": {},
   "outputs": [],
   "source": [
    "all_bcg_data=pd.read_csv('./one_bcg.csv')\n",
    "first_bcg_oneperson = all_bcg_data.iloc[:,1:5001]\n",
    "first_bcg_oneperson = pd.DataFrame(first_bcg_oneperson.values)\n",
    "second_bcg_oneperson = all_bcg_data.iloc[:,5001:10001]\n",
    "second_bcg_oneperson = pd.DataFrame(second_bcg_oneperson.values)\n",
    "label_oneperson = all_bcg_data.iloc[:,10001:]\n",
    "label_oneperson = pd.DataFrame(label_oneperson.values)\n",
    "\n",
    "\n",
    "# 构建目标数据格式\n",
    "sample=HRV_extract(first_bcg_oneperson[0:1])\n",
    "#构造二维数组对象\n",
    "arr1 = np.zeros(137*4).reshape(137,4)\n",
    "frame1 = pd.DataFrame(arr1)\n",
    "#列索引重建\n",
    "first_bcg_oneperson_HRV = frame1.reindex(columns =sample.keys())\n",
    "second_bcg_oneperson_HRV = frame1.reindex(columns =sample.keys())\n",
    "\n",
    "# HRV特征提取\n",
    "for i in np.arange(first_bcg_oneperson.shape[0]):\n",
    "    # 在不改变原有数据的情况下修改行索引\n",
    "    df=first_bcg_oneperson[i:i+1]\n",
    "    df.index=[1]\n",
    "    features=HRV_extract(df,0.1)\n",
    "    first_bcg_oneperson_HRV[i:i+1]=features.values()\n",
    "for i in np.arange(second_bcg_oneperson.shape[0]):\n",
    "    # 在不改变原有数据的情况下修改行索引\n",
    "    df=second_bcg_oneperson[i:i+1]\n",
    "    df.index=[1]\n",
    "    features=HRV_extract(df,0.1)\n",
    "    second_bcg_oneperson_HRV[i:i+1]=features.values()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bdc1b50c",
   "metadata": {},
   "outputs": [],
   "source": [
    "arr1 = np.zeros(600*5000*5).reshape(600*5000,5)\n",
    "frame1 = pd.DataFrame(arr1)\n",
    "frame1.rename(columns={0:'id',1:'time',2:'bcgsignal',3:'label1',4:'label2'},inplace=True)\n",
    "\n",
    "bcg = pd.concat([first_bcg_oneperson,second_bcg_oneperson],axis=0).reset_index(drop=True).values.reshape(600*5000,1)\n",
    "row=k=0\n",
    "for i in range(600):\n",
    "    for j in range(5000):\n",
    "        frame1.values[row][0] = int(k)\n",
    "        frame1.values[row][1] = int(j)\n",
    "        frame1.values[row][2] = bcg[row][0]\n",
    "        frame1.values[row][3] =  pd.concat([label_oneperson,label_oneperson],axis=0).reset_index(drop=True).values[k][0]\n",
    "        frame1.values[row][4] =  pd.concat([label_oneperson,label_oneperson],axis=0).reset_index(drop=True).values[k][1]\n",
    "        row = row+1\n",
    "    k = k+1\n",
    "frame1['id'] = frame1[['id']].values.astype(np.int64)\n",
    "frame1['time'] = frame1[['time']].values.astype(np.int64)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6c4a4e32",
   "metadata": {},
   "outputs": [],
   "source": [
    "from tsfresh.feature_extraction import extract_features\n",
    "\n",
    "train_features = extract_features(frame1[['id','time','bcgsignal']], column_id='id', column_sort='time')\n",
    "\n",
    "from tsfresh.utilities.dataframe_functions import impute\n",
    "impute(train_features)\n",
    "\n",
    "from tsfresh import select_features\n",
    "\n",
    "# 按照特征和数据label之间的相关性进行特征选择\n",
    "train_features_filtered = select_features(train_features, pd.concat([label_oneperson,label_oneperson],axis=0).reset_index(drop=True)[0],fdr_level=0.005)\n",
    "\n",
    "\n",
    "first_tsfresh_festures = train_features_filtered[0:300].reset_index(drop=True)\n",
    "second_tsfresh_festures = train_features_filtered[300:].reset_index(drop=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7317b8af",
   "metadata": {},
   "outputs": [],
   "source": [
    "def logarithmic_n(min_n, max_n, factor):\n",
    "    \"\"\"\n",
    "    Creates a list of values by successively multiplying a minimum value min_n by\n",
    "    a factor > 1 until a maximum value max_n is reached.\n",
    "\n",
    "    Non-integer results are rounded down.\n",
    "\n",
    "    Args:\n",
    "    min_n (float): minimum value (must be < max_n)\n",
    "    max_n (float): maximum value (must be > min_n)\n",
    "    factor (float): factor used to increase min_n (must be > 1)\n",
    "\n",
    "    Returns:\n",
    "    list of integers: min_n, min_n * factor, min_n * factor^2, ... min_n * factor^i < max_n\n",
    "                      without duplicates\n",
    "    \"\"\"\n",
    "    assert max_n > min_n\n",
    "    assert factor > 1\n",
    "    \n",
    "    # stop condition: min * f^x = max\n",
    "    # => f^x = max/min\n",
    "    # => x = log(max/min) / log(f)\n",
    "    \n",
    "    max_i = int(np.floor(np.log(1.0 * max_n / min_n) / np.log(factor)))\n",
    "    ns = [min_n]\n",
    "    \n",
    "    for i in range(max_i+1):\n",
    "        n = int(np.floor(min_n * (factor ** i)))\n",
    "        if n > ns[-1]:\n",
    "            ns.append(n)\n",
    "            \n",
    "    return ns\n",
    "@set_property(\"fctype\", \"simple\")\n",
    "def dfa(data, nvals= None, overlap=True, order=1, debug_plot=False, plot_file=None):\n",
    "\n",
    "    total_N = len(data)\n",
    "    if nvals is None:\n",
    "        nvals = logarithmic_n(4, 0.1*total_N, 1.2)\n",
    "        \n",
    "    # create the signal profile (cumulative sum of deviations from the mean => \"walk\")\n",
    "    walk = np.nancumsum(data - np.nanmean(data))\n",
    "    fluctuations = []\n",
    "    \n",
    "    for n in nvals:\n",
    "        # subdivide data into chunks of size n\n",
    "        if overlap:\n",
    "            # step size n/2 instead of n\n",
    "            d = np.array([walk[i:i+n] for i in range(0,len(walk)-n,n//2)])\n",
    "        else:\n",
    "            # non-overlapping windows => we can simply do a reshape\n",
    "            d = walk[:total_N-(total_N % n)]\n",
    "            d = d.reshape((total_N//n, n))\n",
    "            \n",
    "        # calculate local trends as polynomes\n",
    "        x = np.arange(n)\n",
    "        tpoly = np.array([np.polyfit(x, d[i], order) for i in range(len(d))])\n",
    "        trend = np.array([np.polyval(tpoly[i], x) for i in range(len(d))])\n",
    "        \n",
    "        # calculate standard deviation (\"fluctuation\") of walks in d around trend\n",
    "        flucs = np.sqrt(np.nansum((d - trend) ** 2, axis=1) / n)\n",
    "        \n",
    "        # calculate mean fluctuation over all subsequences\n",
    "        f_n = np.nansum(flucs) / len(flucs)\n",
    "        fluctuations.append(f_n)\n",
    "        \n",
    "        \n",
    "    fluctuations = np.array(fluctuations)\n",
    "    # filter zeros from fluctuations\n",
    "    nonzero = np.where(fluctuations != 0)\n",
    "    nvals = np.array(nvals)[nonzero]\n",
    "    fluctuations = fluctuations[nonzero]\n",
    "    if len(fluctuations) == 0:\n",
    "        # all fluctuations are zero => we cannot fit a line\n",
    "        poly = [np.nan, np.nan]\n",
    "    else:\n",
    "        poly = np.polyfit(np.log(nvals), np.log(fluctuations), 1)\n",
    "    if debug_plot:\n",
    "        plot_reg(np.log(nvals), np.log(fluctuations), poly, \"log(n)\", \"std(X,n)\", fname=plot_file)\n",
    "        \n",
    "    return poly[0]\n",
    "\n",
    "def ZeroCR(waveData,frameSize=360000,overLap=0):\n",
    "    wlen = len(waveData)\n",
    "    step = frameSize - overLap\n",
    "    frameNum = math.ceil(wlen/step)\n",
    "    zcr = np.zeros((frameNum,1))\n",
    "    for i in range(frameNum):\n",
    "        curFrame = waveData[np.arange(i*step,min(i*step+frameSize,wlen))]\n",
    "    #To avoid DC bias, usually we need to perform mean subtraction on each frame\n",
    "    #ref: http://neural.cs.nthu.edu.tw/jang/books/audiosignalprocessing/basicFeatureZeroCrossingRate.asp\n",
    "        curFrame = curFrame - np.mean(curFrame) # zero-justified\n",
    "        zcr[i] = sum(curFrame[0:-1]*curFrame[1::]<=0)\n",
    "    return zcr[0][0]/360000\n",
    "\n",
    "@set_property(\"fctype\", \"simple\")\n",
    "def SampEn(x, m, r=0.15):\n",
    "    \"\"\"\n",
    "    样本熵\n",
    "    m 滑动时窗的长度\n",
    "    r 阈值系数 取值范围一般为：0.1~0.25\n",
    "    \"\"\"\n",
    "    # 将x转化为数组\n",
    "    x = np.array(x)\n",
    "    # 检查x是否为一维数据\n",
    "    if x.ndim != 1:\n",
    "        raise ValueError(\"x的维度不是一维\")\n",
    "    # 计算x的行数是否小于m+1\n",
    "    if len(x) < m+1:\n",
    "        raise ValueError(\"len(x)小于m+1\")\n",
    "    # 将x以m为窗口进行划分\n",
    "    entropy = 0  # 近似熵\n",
    "    for temp in range(2):\n",
    "        X = []\n",
    "        for i in range(len(x)-m+1-temp):\n",
    "            X.append(x[i:i+m+temp])\n",
    "        X = np.array(X)\n",
    "        # 计算X任意一行数据与所有行数据对应索引数据的差值绝对值的最大值\n",
    "        D_value = []  # 存储差值\n",
    "        for index1, i in enumerate(X):\n",
    "            sub = []\n",
    "            for index2, j in enumerate(X):\n",
    "                if index1 != index2:\n",
    "                    sub.append(max(np.abs(i-j)))\n",
    "            D_value.append(sub)\n",
    "        # 计算阈值\n",
    "        F = r*np.std(x, ddof=1)\n",
    "        # 判断D_value中的每一行中的值比阈值小的个数除以len(x)-m+1的比例\n",
    "        num = np.sum(D_value<F, axis=1)/(len(X)-m+1-temp)\n",
    "        # 计算num的对数平均值\n",
    "        epsilon = 1e-5\n",
    "        Lm = np.average(np.log(num + epsilon))\n",
    "        entropy = abs(entropy) - Lm\n",
    "\n",
    "    return entropy\n",
    "\n",
    "def other_extract(signal):\n",
    "#     输入一个BCG原始数据，返回一个特征字典\n",
    "    ddfa = dfa(signal)\n",
    "#     calculate Zero Cross Rate\n",
    "    zZeroCR = ZeroCR(signal)\n",
    "#     sSampEn = SampEn(signal, m=2, r=0.15)\n",
    "    results = {}\n",
    "    results['dfa'] = ddfa\n",
    "    results['ZeroCR'] = zZeroCR\n",
    "#     results['SampEn'] = sSampEn   \n",
    "    return results"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "792912ca",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 构建目标数据格式\n",
    "sample=other_extract(first_bcg_oneperson.values[0])\n",
    "#构造二维数组对象\n",
    "arr1 = np.zeros(137*2).reshape(137,2)\n",
    "frame1 = pd.DataFrame(arr1)\n",
    "#列索引重建\n",
    "first_other_features_oneperson = frame1.reindex(columns =sample.keys())\n",
    "second_other_features_oneperson = frame1.reindex(columns =sample.keys())\n",
    "\n",
    "# 特征提取\n",
    "for i in np.arange(first_bcg_oneperson.shape[0]):\n",
    "    # 在不改变原有数据的情况下修改行索引\n",
    "    df=first_bcg_oneperson.values[i]\n",
    "    features=other_extract(df)\n",
    "    first_other_features_oneperson[i:i+1]=features.values()\n",
    "for i in np.arange(second_bcg_oneperson.shape[0]):\n",
    "    # 在不改变原有数据的情况下修改行索引\n",
    "    df=second_bcg_oneperson.values[i]\n",
    "    features=other_extract(df)\n",
    "    second_other_features_oneperson[i:i+1]=features.values()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1c98d8cf",
   "metadata": {},
   "outputs": [],
   "source": [
    "first_features_oneperson = pd.concat([first_bcg_oneperson_HRV,first_tsfresh_festures,first_other_features_oneperson],axis=1)\n",
    "second_features_oneperson = pd.concat([second_bcg_oneperson_HRV,second_tsfresh_festures,first_other_features_oneperson],axis=1)\n",
    "\n",
    "first_features_oneperson.to_csv('first_features_oneperson.csv', index=0)\n",
    "second_features_oneperson.to_csv('second_features_oneperson.csv', index=0)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
