import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.ensemble import RandomForestRegressor

class DataPreprocessor:
    def __init__(self, data_path):
        try:
            self.data = pd.read_csv(data_path)
            print(f"✓ 数据加载完成,共 {len(self.data)} 条记录")
            
            # 验证必要的列是否存在
            required_columns = ['t', 'cell_id', 'cat_id', 'pe_id', 'load', 'has_anomaly']
            missing_columns = [col for col in required_columns if col not in self.data.columns]
            if missing_columns:
                raise ValueError(f"数据集缺少必要的列: {missing_columns}")
            
            # 验证数据类型
            numeric_columns = ['t', 'cell_id', 'cat_id', 'pe_id', 'load']
            for col in numeric_columns:
                if not pd.api.types.is_numeric_dtype(self.data[col]):
                    self.data[col] = pd.to_numeric(self.data[col], errors='coerce')
            
            # 验证异常标签
            if not self.data['has_anomaly'].isin([0, 1]).all():
                raise ValueError("has_anomaly列必须只包含0和1")
            
            self.scaler = StandardScaler()
            
            print("\n数据基本信息:")
            print(self.data.info())
            print("\n数据预览:")
            print(self.data.head())
            print("\n数据统计:")
            print(self.data.describe())
            
        except Exception as e:
            print(f"数据加载失败: {str(e)}")
            raise
        
    def create_features(self, window_sizes=[2, 4, 8]):
        """创建时序特征"""
        df = self.data.copy()
        
        # 按时间排序
        df = df.sort_values(['t', 'cell_id', 'cat_id', 'pe_id'])
        
        # 为每个 (cell_id, cat_id, pe_id) 组合创建特征
        for window in window_sizes:
            # 计算滑动平均
            df[f'last{window}_mean'] = df.groupby(['cell_id', 'cat_id', 'pe_id'])['load'].transform(
                lambda x: x.rolling(window=window, min_periods=1).mean()
            )
            
            # 计算百分比变化
            df[f'per_change_last{window}'] = df.groupby(['cell_id', 'cat_id', 'pe_id'])['load'].transform(
                lambda x: x.pct_change(periods=window)
            )
            
        # 添加时间特征
        df['hour'] = df['t'] % 24  # 假设t以小时为单位
        df['is_peak_hour'] = df['hour'].apply(lambda x: 1 if (7 <= x <= 9) or (17 <= x <= 19) else 0)
        
        # 填充NaN值
        df = df.fillna(method='ffill').fillna(0)
        
        print("\n创建的特征列:")
        print(df.columns.tolist())
        
        return df
    
    def prepare_data(self, target_col='load', test_size=0.2, random_state=42):
        """准备负载预测的训练和测试数据"""
        # 创建特征
        df = self.create_features()
        
        # 选择特征列
        feature_cols = [col for col in df.columns if col.startswith(('last', 'per_change', 'hour', 'is_peak'))]
        
        print(f"\n使用的特征列: {feature_cols}")
        
        # 准备特征和目标
        X = df[feature_cols]
        y = df[target_col]
        
        # 标准化特征
        X_scaled = self.scaler.fit_transform(X)
        
        # 分割数据
        X_train, X_test, y_train, y_test = train_test_split(
            X_scaled, y, test_size=test_size, random_state=random_state
        )
        
        print(f"\n训练集大小: {X_train.shape}")
        print(f"测试集大小: {X_test.shape}")
        
        return X_train, X_test, y_train, y_test, feature_cols

    def prepare_anomaly_detection_data(self, test_size=0.2, random_state=42):
        """准备异常检测的训练和测试数据"""
        df = self.create_features()
        
        # 选择特征列
        feature_cols = [col for col in df.columns if col.startswith(('last', 'per_change', 'hour', 'is_peak'))]
        
        # 准备特征和目标
        X = df[feature_cols]
        y = df['has_anomaly']
        
        # 标准化特征
        X_scaled = self.scaler.fit_transform(X)
        
        # 分割数据
        X_train, X_test, y_train, y_test = train_test_split(
            X_scaled, y, test_size=test_size, random_state=random_state, stratify=y
        )
        
        print(f"\n异常检测数据:")
        print(f"异常样本比例: {y.mean():.2%}")
        print(f"训练集大小: {X_train.shape}")
        print(f"测试集大小: {X_test.shape}")
        
        return X_train, X_test, y_train, y_test, feature_cols

    def analyze_features(self, df, feature_cols):
        """分析特征重要性和相关性"""
        print("\n特征分析:")
        print("-"*30)
        
        # 特征重要性分析
        rf = RandomForestRegressor(random_state=42)
        rf.fit(df[feature_cols], df['load'])
        importance = pd.DataFrame({
            'feature': feature_cols,
            'importance': rf.feature_importances_
        }).sort_values('importance', ascending=False)
        print("\n特征重要性:")
        print(importance)
        
        # 相关性分析
        corr = df[feature_cols + ['load']].corr()
        plt.figure(figsize=(10, 8))
        sns.heatmap(corr, annot=True, cmap='coolwarm')
        plt.title('Feature Correlation Matrix')
        plt.tight_layout()
        plt.savefig('correlation_matrix.png')
        plt.close()

if __name__ == "__main__":
    # 测试代码
    preprocessor = DataPreprocessor(r'C:\Users\18499\Desktop\porject\nwdaf_data.csv')
    X_train, X_test, y_train, y_test, feature_cols = preprocessor.prepare_data() 