import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import joblib
import os
from sklearn.impute import KNNImputer

class DataProcessor:
    def __init__(self, config):
        self.config = config
        self.scaler = StandardScaler()
        self.base_dir = os.path.dirname(os.path.abspath(__file__))
        self.scaler_path = os.path.join(self.base_dir, 'models', 'scaler.joblib')
        self.feature_names = ['age', 'sex', 'cp', 'trestbps', 'chol', 'fbs', 'restecg', 
                            'thalach', 'exang', 'oldpeak', 'slope', 'ca', 'thal']
        
    def load_data(self, file_path):
        """加载数据集"""
        return pd.read_csv(file_path)
    
    def remove_outliers(self, data):
        """
        移除数据集中的离群值
        - 异常值：不可能为0或负数的值
        - 极端值：使用IQR方法检测极端异常值
        """
        # 复制数据以避免修改原始数据
        cleaned_data = data.copy()
        
        # 处理异常值（不可能为0或负数的特征）
        non_negative_features = ['age', 'trestbps', 'chol', 'thalach', 'oldpeak']
        for feature in non_negative_features:
            if feature in cleaned_data.columns:
                # 将负值替换为NaN，稍后会填充
                cleaned_data.loc[cleaned_data[feature] <= 0, feature] = np.nan
        
        # 处理极端值（使用IQR方法）
        numeric_features = cleaned_data.select_dtypes(include=['float64', 'int64']).columns
        for feature in numeric_features:
            Q1 = cleaned_data[feature].quantile(0.25)
            Q3 = cleaned_data[feature].quantile(0.75)
            IQR = Q3 - Q1
            
            # 定义极端值的界限（通常为1.5*IQR）
            lower_bound = Q1 - 1.5 * IQR
            upper_bound = Q3 + 1.5 * IQR
            
            # 将极端值替换为NaN
            cleaned_data.loc[cleaned_data[feature] < lower_bound, feature] = np.nan
            cleaned_data.loc[cleaned_data[feature] > upper_bound, feature] = np.nan
        
        return cleaned_data
    
    def fill_missing_values(self, data, n_neighbors=5):
        """
        使用K近邻算法填补缺失值
        """
        # 复制数据以避免修改原始数据
        filled_data = data.copy()
        
        # 检查是否有缺失值
        if not filled_data.isnull().values.any():
            return filled_data
        
        # 使用KNN填补缺失值
        imputer = KNNImputer(n_neighbors=n_neighbors)
        
        # 获取数值型特征
        numeric_features = filled_data.select_dtypes(include=['float64', 'int64']).columns
        
        # 填补数值型特征的缺失值
        filled_data[numeric_features] = imputer.fit_transform(filled_data[numeric_features])
        
        return filled_data
    
    def preprocess_data(self, df, is_prediction=False):
        """数据预处理"""
        # 检查缺失值
        if df.isnull().sum().any():
            print("警告：数据集中存在缺失值")
            df = df.dropna()
            
        # 如果是预测数据，不需要处理target列
        if not is_prediction:
            # 将target列转换为二分类（0或1）
            df['target'] = (df['target'] > 0).astype(int)
            
        # 确保所有必需的列都存在
        missing_cols = [col for col in self.feature_names if col not in df.columns]
        if missing_cols:
            raise ValueError(f"缺少必需的列: {', '.join(missing_cols)}")
            
        # 添加离群值去除
        df = self.remove_outliers(df)
        
        # 添加缺失值填补
        df = self.fill_missing_values(df)
        
        # 特征标准化
        X = self.scaler.fit_transform(df[self.feature_names])
        
        if is_prediction:
            return X
        
        # 保存scaler
        os.makedirs(os.path.dirname(self.scaler_path), exist_ok=True)
        joblib.dump(self.scaler, self.scaler_path)
        
        return X, df['target']
    
    def split_data(self, X, y, test_size=0.2):
        """划分训练集和测试集"""
        return train_test_split(X, y, test_size=test_size, random_state=42)
    
    def prepare_prediction_data(self, data):
        """准备预测数据"""
        # 确保所有必需的列都存在
        missing_cols = [col for col in self.feature_names if col not in data.columns]
        if missing_cols:
            raise ValueError(f"缺少必需的列: {', '.join(missing_cols)}")
            
        # 加载已训练的scaler
        if os.path.exists(self.scaler_path):
            self.scaler = joblib.load(self.scaler_path)
        else:
            raise ValueError("请先训练模型！")
            
        # 确保列的顺序一致
        data = data[self.feature_names]
        return self.scaler.transform(data) 