import pandas as pd
import os
import numpy as np
from datetime import datetime
from sklearn import preprocessing
import joblib

# 定义输入和输出文件夹路径
input_folder = "/root/autodl-tmp/bdata/two"
output_file = "result_3.xlsx"

# 获取输入文件夹中所有以 P 开头且以 .csv 结尾的文件
file_list = [f for f in os.listdir(input_folder) if f.startswith('P') and f.endswith('.csv')]

def time_count(filtered_df):
    result = (filtered_df.shape[0] - 1) * 10
    result = round(result / (1000 * 60 * 60), 4)
    return result

def custom_parse(x):
    return datetime.strptime(x, '%Y-%m-%d %H:%M:%S.%f')

def df_pred(file_path):
    # 修改为使用自定义解析函数读取 CSV 文件
    df = pd.read_csv(file_path, header=0, parse_dates=[0], date_parser=custom_parse)
    # 提取数字部分（如 11585）
    df['class'] = df['annotation'].str.extract(r'(\d+)')
    # 提取 MET 值部分（如 1.5）
    df['MET'] = df['annotation'].str.extract(r'MET\s(\d+(\.\d+)?)')[0]
    # 删除原始第四列（可选）
    df.drop(columns=['annotation'], inplace=True)
    # 将 class 和 MET 列转换为数值类型
    df['class'] = pd.to_numeric(df['class'])
    df['MET'] = pd.to_numeric(df['MET'])
    # 直接将时间列转换为时间戳（毫秒）
    df['timestamp_ms'] = df.iloc[:, 0].astype('int64') // 10**6
    # 按照 timestamp_ms 从小到大排序
    df.sort_values(by='timestamp_ms', inplace=True)
    return df

# 加载保存的模型和 scaler
loaded_model = joblib.load('best_gmm_model.joblib')
loaded_scaler = joblib.load('scaler.joblib')

# 每次处理五个文件
chunk_size = 2
for i in range(0, len(file_list), chunk_size):
    chunk_files = file_list[i:i + chunk_size]
    all_results = []
    for file_name in chunk_files:
        file_path = os.path.join(input_folder, file_name)
        try:
            # 读取并预处理数据
            df = df_pred(file_path)
            print(f"begin:{file_name}")
            df.iloc[:, 5] = pd.to_numeric(df.iloc[:, 5], errors='coerce')  # 数据类型转换
            # 按照要求 MET<1.0 为睡眠，提取 MET<1.0 的数据
            filtered_df = df[df.iloc[:, 5] < 1.0]
            features = filtered_df.iloc[:, 1:4]

            # 使用保存的 scaler 进行数据标准化
            scaled_features = loaded_scaler.transform(features)

            # 进行预测
            predictions = loaded_model.predict(scaled_features)

            # 将预测结果添加到数据中
            filtered_df['predicted_cluster'] = predictions
            # 筛选出第六列值小于 1 的行
            dfa = filtered_df[filtered_df.iloc[:, 6] == 0]
            mode_one = time_count(dfa)
            # 筛选met在1~1.6的
            dfa = filtered_df[filtered_df.iloc[:, 6] == 1]
            mode_two = time_count(dfa)
            # 筛选met在1.6~3.0的
            dfa = filtered_df[filtered_df.iloc[:, 6] == 2]
            mode_three = time_count(dfa)
            # 筛选met在3.0~6.0的
            dfa = filtered_df[filtered_df.iloc[:, 6] == 3]
            mode_four = time_count(dfa)
            # 筛选met大于6.0的
            dfa = filtered_df[filtered_df.iloc[:, 6] == 4]
            mode_five = time_count(dfa)

            result_all = mode_one + mode_two + mode_three + mode_four + mode_five
            results = [file_name, result_all, mode_one, mode_two, mode_three, mode_four, mode_five]
            all_results.append(results)

        except Exception as e:
            print(f"处理文件 {file_name} 时出错: {e}")

    columns = ['志愿者ID', '睡眠总时长(小时)', '睡眠模式一(小时)', '睡眠模式二(小时)', '睡眠模式三(小时)', '睡眠模式四(小时)', '睡眠模式五(小时)']
    result_df = pd.DataFrame(all_results, columns=columns)
    try:
        if i == 0:
            result_df.to_excel(output_file, index=False)
        else:
            with pd.ExcelWriter(output_file, mode='a', engine='openpyxl', if_sheet_exists='overlay') as writer:
                result_df.to_excel(writer, index=False, header=False, startrow=writer.sheets['Sheet1'].max_row)
        print(f"已处理并保存 {chunk_files} 到 {output_file}")
    except PermissionError:
        print(f"权限不足，无法保存文件 {output_file}。请确保文件未被其他程序占用，并且你有写入权限。")
    
