import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from utils.log import Logger
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report, precision_score, recall_score, roc_auc_score, accuracy_score
import joblib
import os

# 创建日志记录器
logger = Logger(root_path=os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 
                log_name='common', level='info').get_logger()


def data_preprocessing(path):
    """
    数据预处理函数
    1. 去除无意义特征
    2. 创建新特征
    3. 字符串特征热编码
    4. 数值特征标准化
    输入需要优化的数据集地址
    返回 1.数据集 2.数据标准化器
    """
    try:
        logger.info(f"开始数据预处理，数据文件路径: {path}")
        
        # 读取数据
        data = pd.read_csv(path)
        logger.info(f"成功读取数据文件，数据形状: {data.shape}")

        # 1. 去除无意义特征
        drop_cols = ['StandardHours', 'Over18', 'EmployeeNumber']
        data = data.drop(columns=drop_cols)
        logger.info(f"已去除无意义特征: {drop_cols}，当前数据形状: {data.shape}")

        # 2. 创建新特征
        data['JobLevel_MonthlyIncome'] = data['JobLevel'] * 0.51 + data['MonthlyIncome'] * 0.49
        data['PercentSalaryHike_PerformanceRating'] = data['PercentSalaryHike'] * 0.57 + data['PerformanceRating'] * 0.43
        logger.info("已创建新特征: JobLevel_MonthlyIncome, PercentSalaryHike_PerformanceRating")

        # 3. 字符串特征热编码
        str_cols = ['BusinessTravel', 'Department', 'EducationField', 'Gender', 'JobRole', 'MaritalStatus', 'OverTime']
        data = pd.get_dummies(data, columns=str_cols, drop_first=True)  # drop_first=True避免多重共线性
        logger.info(f"已完成字符串特征热编码，处理特征: {str_cols}，当前数据形状: {data.shape}")

        # 4. 数值特征标准化
        numeric_cols = ['Age', 'DistanceFromHome', 'MonthlyIncome', 'NumCompaniesWorked', 'PercentSalaryHike',
                        'TotalWorkingYears', 'TrainingTimesLastYear', 'YearsAtCompany', 'YearsInCurrentRole',
                        'YearsSinceLastPromotion', 'YearsWithCurrManager']

        scaler = StandardScaler()
        # 标准化后重新创建DataFrame以保持列名和索引
        scaled_data = scaler.fit_transform(data[numeric_cols])
        data[numeric_cols] = pd.DataFrame(scaled_data, columns=numeric_cols, index=data.index)
        logger.info(f"已完成数值特征标准化，处理特征数量: {len(numeric_cols)}")

        # 5. 删除用于创建新特征的原始列
        data = data.drop(columns=['JobLevel', 'PerformanceRating', 'MonthlyIncome', 'PerformanceRating'])
        logger.info(f"已删除用于创建新特征的原始列，最终数据形状: {data.shape}")
        
        logger.info("数据预处理完成")
        return data, scaler
        
    except FileNotFoundError:
        logger.error(f"数据文件未找到: {path}")
        raise
    except KeyError as e:
        logger.error(f"数据预处理过程中缺少必要的列: {e}")
        raise
    except Exception as e:
        logger.error(f"数据预处理过程中出现未知错误: {e}")
        raise

