#划分数据集，将PCG记录按3s切分

import argparse
import os
import shutil
from patient_information import find_patient_files,load_patient_data,get_grade,get_murmur
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from tqdm import tqdm
import wave
import librosa
import soundfile


def stratified_test_vali_split(
    stratified_features: list,
    data_directory: str,#数据集路径
    out_directory: str,
    test_size: float,
    vali_size: float,
    random_state: int,
):
    # Check if out_directory directory exists, otherwise create it.
    if not os.path.exists(out_directory):
        os.makedirs(out_directory)
    else:
        exit()
        # shutil.rmtree(out_directory) #删除输出路径下的文件夹
        # os.makedirs(out_directory)
    # Get metadata
    patient_files = find_patient_files(data_directory)#获取排序后的病人text文件
    num_patient_files = len(patient_files)
    murmur_classes = ["Absent", "Soft", "Loud"]
    num_murmur_classes = len(murmur_classes)
    murmurs = list()
    patient_ID = list()
    for i in tqdm(range(num_patient_files)): #可视化遍历进度条，遍历每个病人的txt文件
        # Load the current patient data and recordings.
        current_patient_data = load_patient_data(patient_files[i]) #获取text文本内容
        murmur_unkonwn=get_murmur(current_patient_data)
        #跳过unknown的数据
        if murmur_unkonwn == 'Unknown':
            continue
        current_ID = current_patient_data.split(" ")[0] #获取ID
        patient_ID.append(current_ID)#添加ID
        current_murmur = np.zeros(num_murmur_classes, dtype=int)
        murmur = get_grade(current_patient_data)  # 获取杂音标签
        if murmur in murmur_classes:
            j = murmur_classes.index(murmur)
            current_murmur[j] = 1            #"Aresent"=100,soft=010
        murmurs.append(current_murmur)   #保存杂音标签
    patient_ID = np.vstack(patient_ID)
    num_murmurs=len(murmurs);#筛除unknown后的患者数量
    print(len(murmurs))
    murmurs = np.vstack(murmurs);
    #指标
    patients_pd = pd.DataFrame(patient_ID,columns=['ID'],)
    murmurs_pd = pd.DataFrame(murmurs, columns=murmur_classes)
    complete_pd = pd.concat([patients_pd, murmurs_pd], axis=1)  # 拼接表格（ID，杂音label）
    complete_pd["ID"] = complete_pd["ID"].astype(int).astype(str)
    # Split data
    complete_pd["stratify_column"] = (
        complete_pd[stratified_features].astype(str).agg("-".join, axis=1)# 添加一列：Absent-Soft-Loud
    )
    print(complete_pd)
    # 测试集
    complete_pd_train, complete_pd_test = train_test_split(
        complete_pd,
        test_size = test_size,
        random_state=random_state,
        stratify=complete_pd["stratify_column"],
    );
    # 验证集
    vali_split = vali_size / (1 - test_size);
    complete_pd_train, complete_pd_val = train_test_split(
        complete_pd_train,
        test_size=vali_split,
        random_state=random_state + 1,
        stratify=complete_pd_train["stratify_column"],
    );
    #保存划分数据的文件夹 ，创建text文件记录划分特征Absent，Soft，Loud
    with open(os.path.join(out_directory, "split_details.txt"), "w") as text_file:
        text_file.write("This data split is stratified over the following features: \n")
        for feature in stratified_features:
            text_file.write(feature + ", ")
    # Save the files.
    #保存数据
    os.makedirs(os.path.join(out_directory, "train_data"))
    os.makedirs(os.path.join(out_directory, "vali_data"))
    os.makedirs(os.path.join(out_directory, "test_data"))
    # 没有切分的数据
    # for f in complete_pd_train["ID"]:
    #     copy_files(
    #         data_directory,
    #         f,
    #         os.path.join(out_directory, "train_data/"),
    #     )
    # for f in complete_pd_val["ID"]:
    #     copy_files(
    #         data_directory,
    #         f,
    #         os.path.join(out_directory, "vali_data/"),
    #     )
    # for f in complete_pd_test["ID"]:
    #     copy_files(
    #         data_directory,
    #         f,
    #         os.path.join(out_directory, "test_data/"),
    #     )

    #切分为3s的数据
    for f in complete_pd_train["ID"]:
        cut_copy_files(
            data_directory,
            f,
            os.path.join(out_directory, "train_data/"),
        )
    for f in complete_pd_val["ID"]:
        cut_copy_files(
            data_directory,
            f,
            os.path.join(out_directory, "vali_data/"),
        )
    for f in complete_pd_test["ID"]:
        cut_copy_files(
            data_directory,
            f,
            os.path.join(out_directory, "test_data/"),
        )


def copy_files(data_directory: str, ident: str, out_directory: str) -> None:
    # Get the list of files in the data folder.
    files = os.listdir(data_directory)
    # Copy all files in data_directory that start with f to out_directory
    for f in files:
        if f.startswith(ident):
            _ = shutil.copy(os.path.join(data_directory, f), out_directory)
#3s切割
def cut_copy_files(data_directory: str, ident: str, out_directory: str) -> None:
    files = os.listdir(data_directory)
    for f in files:
        root, extension = os.path.splitext(f)
        if f.startswith(ident) :
            if extension == '.txt':
                _ = shutil.copy(os.path.join(data_directory, f), out_directory)
            elif extension == '.wav':
                #获取当前wav文件的ID 听诊区 等级
                with open(os.path.join(data_directory, ident+'.txt'), 'r') as txt_f:
                    txt_data = txt_f.read()
                    patient_ID=txt_data.split('\n')[0].split()[0]#获取病人ID
                    grade = get_grade(txt_data)
                    location = root.split('_')[1]
                recording, fs = librosa.load(os.path.join(data_directory, f), sr=4000)  # 分割（3s不重叠）
                num_cut = len(recording) / (3 * 4000)  # 每个记录的片段数量
                start = 0
                end = start+3*fs
                cut = list()
                for num in range(int(num_cut)):  # 将每个片段写入对应的听诊区文件夹
                    small = recording[start:end]
                    cut.append(small)
                    soundfile.write(out_directory + '/' + patient_ID + '_'+str(location)+'_' + str(grade) + '_' + str(num) + '.wav', cut[num], fs)
                    start += 3 * fs
                    end = start + 3 * fs

if __name__ == "__main__":
    data_directory="E:/HZH/heart_data/2022_challenge_new/the-circor-digiscope-phonocardiogram-dataset-1.0.3/training_data"
    out_directory="data/stratified_data_new"
    test_size=0.2
    vali_size=0.16
    random_state=5678
    stratified_features = ["Absent", "Soft", "Loud"]
    stratified_test_vali_split(stratified_features,data_directory,out_directory,test_size,vali_size,random_state)