import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
import os
import matplotlib.pyplot as plt
import seaborn as sns

def load_data(file_path='../data/room_occupancy.csv'):
    """
    加载房间占用数据集
    
    参数:
    file_path: 数据文件路径
    
    返回:
    DataFrame: 处理好的数据
    """
    print(f"加载数据: {file_path}")
    # 检查文件是否存在
    if not os.path.exists(file_path):
        print(f"文件不存在: {file_path}")
        # 尝试寻找正确的路径
        current_dir = os.path.dirname(os.path.abspath(__file__))
        parent_dir = os.path.dirname(current_dir)
        alternative_path = os.path.join(parent_dir, 'data', 'room_occupancy.csv')
        print(f"尝试替代路径: {alternative_path}")
        if os.path.exists(alternative_path):
            file_path = alternative_path
        else:
            raise FileNotFoundError(f"无法找到数据文件: {file_path}")
    
    # 加载数据
    df = pd.read_csv(file_path)
    print(f"成功加载数据，形状: {df.shape}")
    return df

def explore_data(df):
    """
    数据探索
    
    参数:
    df: 数据DataFrame
    """
    print("\n数据基本信息:")
    print(df.info())
    
    print("\n数据统计描述:")
    print(df.describe())
    
    print("\n缺失值统计:")
    missing_values = df.isnull().sum()
    missing_percent = (missing_values / len(df)) * 100
    missing_stats = pd.DataFrame({
        '缺失值数量': missing_values,
        '缺失百分比': missing_percent
    })
    print(missing_stats[missing_stats['缺失值数量'] > 0])
    
    # 合并日期和时间
    if 'Date' in df.columns and 'Time' in df.columns:
        print("\n时间范围:")
        df['DateTime'] = pd.to_datetime(df['Date'] + ' ' + df['Time'])
        print(f"开始时间: {df['DateTime'].min()}")
        print(f"结束时间: {df['DateTime'].max()}")

def clean_data(df):
    """
    数据清洗: 处理异常值和缺失值
    
    参数:
    df: 原始数据DataFrame
    
    返回:
    DataFrame: 清洗后的数据
    """
    print("\n开始数据清洗...")
    
    # 创建副本避免警告
    df_clean = df.copy()
    
    # 1. 处理缺失值
    before_missing = df_clean.isnull().sum().sum()
    print(f"处理前缺失值总数: {before_missing}")
    
    # 对数值列使用中位数填充缺失值
    numeric_cols = df_clean.select_dtypes(include=['float64', 'int64']).columns
    for col in numeric_cols:
        if df_clean[col].isnull().sum() > 0:
            median_val = df_clean[col].median()
            df_clean[col].fillna(median_val, inplace=True)
            print(f"列 '{col}' 的缺失值已用中位数 {median_val} 填充")
    
    # 对分类列使用众数填充缺失值
    categorical_cols = df_clean.select_dtypes(include=['object']).columns
    for col in categorical_cols:
        if df_clean[col].isnull().sum() > 0:
            mode_val = df_clean[col].mode()[0]
            df_clean[col].fillna(mode_val, inplace=True)
            print(f"列 '{col}' 的缺失值已用众数 '{mode_val}' 填充")
    
    after_missing = df_clean.isnull().sum().sum()
    print(f"处理后缺失值总数: {after_missing}")
    
    # 2. 处理异常值 (使用IQR方法)
    for col in numeric_cols:
        if col not in ['Room_Occupancy_Count', 'S6_PIR', 'S7_PIR']:  # 排除不适合IQR的列
            # 计算Q1、Q3和IQR
            Q1 = df_clean[col].quantile(0.25)
            Q3 = df_clean[col].quantile(0.75)
            IQR = Q3 - Q1
            
            # 定义异常值界限
            lower_bound = Q1 - 1.5 * IQR
            upper_bound = Q3 + 1.5 * IQR
            
            # 统计异常值数量
            outliers_count = ((df_clean[col] < lower_bound) | (df_clean[col] > upper_bound)).sum()
            
            if outliers_count > 0:
                print(f"列 '{col}' 中检测到 {outliers_count} 个异常值")
                
                # 将异常值替换为边界值
                df_clean.loc[df_clean[col] < lower_bound, col] = lower_bound
                df_clean.loc[df_clean[col] > upper_bound, col] = upper_bound
                print(f"已将 '{col}' 中的异常值替换为边界值")
    
    return df_clean

def normalize_data(df):
    """
    数据归一化/标准化
    
    参数:
    df: 清洗后的数据DataFrame
    
    返回:
    DataFrame: 归一化后的数据和归一化器
    """
    print("\n开始数据归一化...")
    
    # 创建副本
    df_norm = df.copy()
    
    # 提取日期时间列
    datetime_cols = []
    if 'Date' in df_norm.columns:
        datetime_cols.append('Date')
    if 'Time' in df_norm.columns:
        datetime_cols.append('Time')
    if 'DateTime' in df_norm.columns:
        datetime_cols.append('DateTime')
    
    # 分离目标变量和非数值列
    target_col = 'Room_Occupancy_Count'
    categorical_cols = df_norm.select_dtypes(include=['object']).columns.tolist()
    
    # 确定要标准化的列
    cols_to_normalize = [col for col in df_norm.columns 
                         if col not in datetime_cols + categorical_cols + [target_col]]
    
    print(f"将对以下列进行标准化: {cols_to_normalize}")
    
    # 应用标准化
    scaler = StandardScaler()
    df_norm[cols_to_normalize] = scaler.fit_transform(df_norm[cols_to_normalize])
    
    print("数据归一化完成")
    return df_norm, scaler

def add_features(df):
    """
    特征工程: 添加新特征
    
    参数:
    df: 数据DataFrame
    
    返回:
    DataFrame: 添加新特征后的数据
    """
    print("\n开始特征工程...")
    
    # 创建副本
    df_features = df.copy()
    
    # 1. 添加时间特征
    if 'Date' in df.columns and 'Time' in df.columns:
        # 如果DateTime列不存在则创建
        if 'DateTime' not in df_features.columns:
            df_features['DateTime'] = pd.to_datetime(df['Date'] + ' ' + df['Time'])
        
        # 提取时间特征
        df_features['Hour'] = df_features['DateTime'].dt.hour
        df_features['Minute'] = df_features['DateTime'].dt.minute
        df_features['DayOfWeek'] = df_features['DateTime'].dt.dayofweek
        df_features['IsWeekend'] = df_features['DayOfWeek'].isin([5, 6]).astype(int)
        
        print("添加了时间特征: Hour, Minute, DayOfWeek, IsWeekend")
    
    # 2. 添加传感器统计特征
    # 温度传感器平均值和方差
    temp_cols = [col for col in df.columns if 'Temp' in col]
    if temp_cols:
        df_features['Temp_Mean'] = df_features[temp_cols].mean(axis=1)
        df_features['Temp_Var'] = df_features[temp_cols].var(axis=1)
        print("添加了温度传感器统计特征: Temp_Mean, Temp_Var")
    
    # 光线传感器平均值和方差
    light_cols = [col for col in df.columns if 'Light' in col]
    if light_cols:
        df_features['Light_Mean'] = df_features[light_cols].mean(axis=1)
        df_features['Light_Var'] = df_features[light_cols].var(axis=1)
        print("添加了光线传感器统计特征: Light_Mean, Light_Var")
    
    # 声音传感器平均值和方差
    sound_cols = [col for col in df.columns if 'Sound' in col]
    if sound_cols:
        df_features['Sound_Mean'] = df_features[sound_cols].mean(axis=1)
        df_features['Sound_Var'] = df_features[sound_cols].var(axis=1)
        print("添加了声音传感器统计特征: Sound_Mean, Sound_Var")
    
    # PIR传感器总和
    pir_cols = [col for col in df.columns if 'PIR' in col]
    if pir_cols:
        df_features['PIR_Sum'] = df_features[pir_cols].sum(axis=1)
        print("添加了PIR传感器总和特征: PIR_Sum")
    
    print("特征工程完成")
    return df_features

def save_processed_data(df, output_path='../data/first_clean/processed_room_occupancy.csv'):
    """
    保存处理后的数据
    
    参数:
    df: 处理后的数据DataFrame
    output_path: 输出文件路径
    """
    # 确保输出目录存在
    output_dir = os.path.dirname(output_path)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    
    # 保存数据
    df.to_csv(output_path, index=False)
    print(f"\n处理后的数据已保存至: {output_path}")
    print(f"数据形状: {df.shape}")

def preprocess_pipeline():
    """
    完整的数据预处理流程
    """
    print("开始数据预处理流程...")
    
    # 1. 加载数据
    df = load_data()
    
    # 2. 数据探索
    explore_data(df)
    
    # 3. 数据清洗
    df_clean = clean_data(df)
    
    # 4. 特征工程
    df_features = add_features(df_clean)
    
    # 5. 数据归一化
    df_normalized, scaler = normalize_data(df_features)
    
    # 6. 保存处理后的数据
    save_processed_data(df_normalized)
    
    print("数据预处理流程完成!")
    return df_normalized, scaler

if __name__ == "__main__":
    preprocess_pipeline() 