# 实现文件的合并功能，并提取文件中的需要的数据
import csv
import pandas as pd
import json
import  os
from collections import defaultdict
from difflib import SequenceMatcher


from function.fun_data_deal import group_by_prefix, find_elements_with_substrings
from other_function.info import file_dict

pd.set_option('display.max_columns', None)  # 显示所有列
pd.set_option('display.width', 1000)        # 设置显示宽度
pd.set_option('display.max_colwidth', 100)  # 设置列内容的最大显示宽度




# 将原始文件筛选有用信息后全部转为csv，其余原始文件fit，img等未作处理
# 将原始数据转为deal中合并后的数据
def to_csv(subject_path,deal_path,person):
    subject_path =subject_path + '\\' + person
    deal_path = deal_path + '\\' + person

    # 创建目录
    if not os.path.exists(deal_path):
        os.makedirs(deal_path)
    file_list = os.listdir(subject_path)

    # 打印结果
    for i, lst in enumerate(group_by_prefix(file_list)):
        test=lst[0][0:8]
        result = find_elements_with_substrings(lst, file_dict)
        print('实验合并test', test,lst)
        # 检测文件数量并合并
        for key, value in result.items():
            if key == 'img' or key == 'fit':

                print("复制", key)
            else:
                #给定文件类型，文件名，文件路径，得到合并后df
                df = merger(key, value, subject_path)
                #             保存至subject的copy文件夹
                try:
                    to_csv_path = deal_path + '\\' + value[0][0:9] + key + '.csv'
                    print('合并保存', test,value)
                    df.to_csv(to_csv_path, index=False)

                except:
                    print('无法保存', test,value)

    # handing(person)
    return deal_path

# 合并总函数，无论是一个或是多个都可以合并
#给定文件类型，文件名，文件路径，得到合并后df
def merger(key,list,subject_path):

    if len(list)==0:
        df=[[]]
        return df
    else:
        df_list = []
        for file in list:
            # 这里是指调用函数名字为key+_deal的函数
            func = globals()[key+"_deal"]
            df_list.append(func(subject_path+'\\'+file))
        df = pd.concat(df_list, ignore_index=True)



        return df




# ecg转df
def ecg_deal(file):
    data = []
    with open(file, 'r', newline='') as file:
        reader = csv.reader(file)
        headers = next(reader)[0:5]
        for row in reader:

            data.append(row[0:5])

    # 将读取的数据转换为 Pandas DataFrame
    df = pd.DataFrame(data,columns=headers)
    #排序
    df.sort_values(by='ecg_timestamp',ascending=True,inplace=True)
    df['ecg_timestamp'] = df['ecg_timestamp'].str[0:13]
    # 打印 DataFrame，检查数据是否正确加载
    # print("转换的 DataFrame:",df.columns)
    # print(df)
    return df
def long_term_ecg_deal(file):
    data = []
    with open(file, 'r', newline='') as file:
        reader = csv.reader(file)
        headers = next(reader)[0:7]
        for row in reader:

            data.append(row[0:7])

    # 将读取的数据转换为 Pandas DataFrame
    df = pd.DataFrame(data,columns=headers)
    #排序
    df.sort_values(by='ecg_timestamp',ascending=True,inplace=True)
    df['ecg_timestamp'] = df['ecg_timestamp'].str[0:13]
    # 打印 DataFrame，检查数据是否正确加载
    # print("转换的 DataFrame:",df.columns)
    # print(df)
    return df
#rrdata 转df
def rrdata_deal(file):
    data = []
    with open(file, 'r', newline='') as file:
        reader = csv.reader(file)
        next(reader)
        for row in reader:
            row.append('')
            row.append('')
            row.append('')
            row.append('')
            row.append('')
            row.append('')
            data.append(row[0:8])

            # print(row)
    # 将读取的数据转换为 Pandas DataFrame
    df = pd.DataFrame(data, columns=['timestamp','HR','rr1','rr2','rr3','rr4','rr5','rr6'],)
    df.sort_values(by='timestamp',ascending=True,inplace=True)
    df['timestamp'] = df['timestamp'].str[0:13]
    df['rr']=''
    df['rr'] = df[['rr1', 'rr2', 'rr3', 'rr4', 'rr5', 'rr6']].apply(lambda row: row.tolist(), axis=1)
    # 打印 DataFrame，检查数据是否正确加载
    # print("转换的 DataFrame:",df.columns)
    # print(df)
    return df



# ppg转df

def ppg_deal(file):
    df = pd.read_csv(file, sep='\t' , encoding='utf-8',engine='python')
    # df.sort_values(by='PPG_TIME', ascending=True, inplace=True)
    # print(df)
    return df


#rri转df

def rri_deal(file):
    '''

    :param file:
    :return:
    '''
    try:
        df = pd.read_csv(file, sep=',', header=None, encoding='utf-8')
        # 读取并处理第五列数据
        fifth_column = df.iloc[:, 5]  # 第五列的索引是4，因为索引从0开始计数

        # 准备存储数据的列表
        data = []

        # 遍历第五列的数据
        for k in fifth_column[1:-1]:  # 注意：这里可能需要根据实际情况调整切片范围
            for i in json.loads(k):
                timestamp = i['timeFrame']['timestamp']
                sqi = i['sqi']
                value = i['rri']['value']
                data.append([timestamp, sqi, value])

    # 创建 DataFrame
        df_rr = pd.DataFrame(data, columns=['timestamp', 'sqi', 'value'])
        df_rr.sort_values(by='timestamp', ascending=True, inplace=True)

        return df_rr
    except FileNotFoundError:
        print("文件 '{}' 不存在。".format(file))

    except Exception as e:
        print("处理数据时出错：{}".format(e))



# singwork 转df



def singlework_deal(file):
    '''

    :param file:
    :return:
    '''
    df = pd.read_csv(file, sep=',', encoding='utf-8', )

    df = df.loc[:, ['数据时间','活动名称', '活动.测量开始时间', '活动.测量结束时间','运动总时长', ]]
    df['timestamp'] = pd.to_datetime(df['数据时间']).astype('int64')
    df.sort_values(by='timestamp', ascending=True, inplace=True)

    return df



#singledetail 转df
def singledetail_deal(file):
    '''

    :param file:
    :return:
    '''
    df = pd.read_csv(file, sep=',', encoding='utf-8',)

    df=df.loc[:,['数据时间','速度','步频','心率']]
    df['timestamp'] = pd.to_datetime(df['数据时间']).dt.tz_localize('Asia/Shanghai').dt.tz_convert('UTC').astype('int64')
    # df['timestamp'] = df['timestamp'].dt.tz_convert('Asia/Shanghai')
    df.sort_values(by='timestamp', ascending=True, inplace=True)
    return df






















