import numpy as np
import os
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import random
import pandas as pd
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
import joblib
all_results = []
output_file = "result_1.xlsx"
def time_count(filtered_df):
    result = (filtered_df.shape[0]-1)*10
    result = round(result/(1000*60*60),4)
    return result
# 定义 TimeSeriesDataset 类
class TimeSeriesDataset(Dataset):
    def __init__(self, df, seq_len=20, forecast_horizon=1, input_mean=None, input_std=None, target_mean=None,
                 target_std=None):
        super(TimeSeriesDataset, self).__init__()
        self.seq_len = seq_len
        self.forecast_horizon = forecast_horizon

        data = df.values
        data = data[::50]

        # 补全数据到六列
        for row in data:
            if len(row) == 5:
                row = np.append(row, np.nan)

        # 检查数据列数是否符合预期
        if data.shape[1] != 6:
            raise ValueError(f"数据列数应为 6，但实际为 {data.shape[1]}")

        # 划分输入特征和预测目标
        # 输入特征为第 2、3、4 列
        input_features = data[:, 1:4]
        # 预测目标为第 5、6 列
        target_features = data[:, 4:6]

        # 使用训练时的标准化参数
        self.input_mean = input_mean if input_mean is not None else input_features.mean(axis=0)
        self.input_std = input_std if input_std is not None else input_features.std(axis=0)
        self.input_data = (input_features - self.input_mean) / self.input_std

        self.target_mean = target_mean if target_mean is not None else target_features.mean(axis=0)
        self.target_std = target_std if target_std is not None else target_features.std(axis=0)
        self.target_data = (target_features - self.target_mean) / self.target_std

        # 构造样本：每个样本包含 seq_len 个历史时间步及对应的预测目标
        self.samples = []
        T = len(data) - seq_len - forecast_horizon + 1
        for i in range(T):
            x = self.input_data[i:i + seq_len, :]
            y = self.target_data[i + seq_len:i + seq_len + forecast_horizon]
            self.samples.append((x, y))

    def __len__(self):
        return len(self.samples)

    def __getitem__(self, idx):
        x, y = self.samples[idx]
        return torch.tensor(x, dtype=torch.float32).transpose(0, 1), torch.tensor(y, dtype=torch.float32).squeeze(0)

# 定义残差网络模型
class ResidualBlock(nn.Module):
    def __init__(self, channels, kernel_size=3, padding=1):
        super(ResidualBlock, self).__init__()
        self.conv1 = nn.Conv1d(channels, channels, kernel_size, padding=padding)
        self.relu = nn.ReLU(inplace=True)
        self.conv2 = nn.Conv1d(channels, channels, kernel_size, padding=padding)

    def forward(self, x):
        out = self.conv1(x)
        out = self.relu(out)
        out = self.conv2(out)
        out += x
        out = self.relu(out)
        return out


class ResNetTimeSeries(nn.Module):
    def __init__(self, in_channels, num_filters, num_residual_blocks, seq_len, forecast_horizon, output_dim):
        super(ResNetTimeSeries, self).__init__()
        self.pre_conv = nn.Conv1d(in_channels, num_filters, kernel_size=3, padding=1)
        self.relu = nn.ReLU(inplace=True)
        self.res_blocks = nn.Sequential(
            *[ResidualBlock(num_filters, kernel_size=3, padding=1) for _ in range(num_residual_blocks)]
        )
        self.global_pool = nn.AdaptiveAvgPool1d(1)
        self.fc = nn.Linear(num_filters, output_dim)

    def forward(self, x):
        out = self.pre_conv(x)
        out = self.relu(out)
        out = self.res_blocks(out)
        out = self.global_pool(out)
        out = out.squeeze(-1)
        out = self.fc(out)
        return out

# 加载模型和标准化器
model = torch.load('/root/bt/result/train/result_model.pth', weights_only=True)
model.eval()
scaler = joblib.load('/root/bt/result/train/scaler.pkl')

# 检查是否有可用的 GPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

# 设置参数
SEQ_LEN = 20
BATCH_SIZE = 32

# 文件夹路径
folder_path = '/root/autodl-tmp/bdata/one'  # 替换为实际的文件夹路径
# 遍历文件夹中的所有文件
for filename in os.listdir(folder_path):
    if filename.endswith('.csv'):
        file_path = os.path.join(folder_path, filename)
        df = pd.read_csv(file_path)

        # 补全数据到六列
        if df.shape[1] == 5:
            df[df.columns[-1] + '_new'] = np.nan

        # 检查数据列数是否符合预期
        if df.shape[1] != 6:
            print(f"文件 {filename} 的列数应为 6，但实际为 {df.shape[1]}，跳过该文件。")
            continue

        # 找到第 5、6 列为空的行
        # 修改查找空行的列索引
        empty_rows = df[df.iloc[:, 4].isnull() & df.iloc[:, 5].isnull()]

        if not empty_rows.empty:
            # 提取输入特征
            # 修改提取输入特征的列索引
            input_features = empty_rows.iloc[:, 1:4].values

            # 标准化输入特征
            input_features = (input_features - scaler['input_mean']) / scaler['input_std']

            # 构造样本
            samples = []
            for i in range(len(input_features) - SEQ_LEN + 1):
                x = input_features[i:i + SEQ_LEN, :]
                samples.append(x)

            if samples:
                samples = np.array(samples)
                samples = torch.tensor(samples, dtype=torch.float32).transpose(1, 2).to(device)

                # 进行预测
                predictions = []
                with torch.no_grad():
                    for i in range(0, len(samples), BATCH_SIZE):
                        batch_x = samples[i:i + BATCH_SIZE]
                        preds = model(batch_x)
                        predictions.extend(preds.cpu().numpy())

                predictions = np.array(predictions)

                # 反标准化预测结果
                predictions = predictions * scaler['target_std'] + scaler['target_mean']

                # 将预测结果写回到原文件中
                # 修改写入预测结果的列索引
                for i, index in enumerate(empty_rows.index[SEQ_LEN - 1:]):
                    df.loc[index, df.columns[4:6]] = predictions[i]

                # 将第六列（索引为 5）转换为 float 类型
                df.iloc[:, 6] = pd.to_numeric(df.iloc[:, 6], errors='coerce')
                # 筛选出第六列值小于 1 的行
                filtered_df = df[df.iloc[:, 5] < 1.0]
                result_sleep = time_count(filtered_df)
                # 筛选met在1~1.6的
                filtered_df = df[(df.iloc[:, 5] >=1) & (df.iloc[:, 5] < 1.6)]
                result_static = time_count(filtered_df)
                # 筛选met在1.6~3.0的
                filtered_df = df[(df.iloc[:, 5] >=1.6) & (df.iloc[:, 5] < 3.0)]
                result_low = time_count(filtered_df)
                # 筛选met在3.0~6.0的
                filtered_df = df[(df.iloc[:, 5] >=3.0) & (df.iloc[:, 5] < 6.0)]
                result_moderate = time_count(filtered_df)
                # 筛选met大于6.0的
                filtered_df = df[df.iloc[:, 5] >=6.0]
                result_intensive = time_count(filtered_df)
                result_all = result_sleep + result_static + result_low +  result_moderate + result_intensive
                results = [filename, result_all, result_sleep, result_intensive, result_moderate, result_low, result_static]
                all_results.append(results)

        # # 保存修改后的文件
        # df.to_csv(file_path, index=False)
        # print(f"文件 {filename} 处理完成，预测结果已写回原文件。")    

columns = ['志愿者ID', '记录总时长(小时)', '睡眠总时长(小时)', '高强度运动总时长(小时)', '中强度运动总时长(小时)', '低强度运动总时长(小时)', '静态运动总时长(小时)']
result_df = pd.DataFrame(all_results, columns=columns)
try:
    result_df.to_excel(output_file, index=False)
    print(f"文件已成功保存到 {output_file}")
except PermissionError:
    print(f"权限不足，无法保存文件 {output_file}。请确保文件未被其他程序占用，并且你有写入权限。")