import pandas as pd
import mysql.connector
from datetime import datetime
import os
import json
import numpy as np

# 数据库连接配置
db_config = {
    'host': 'localhost',
    'user': 'root',
    'password': '123456',  # 请替换为实际密码
    'database': 'talent_frm_db',
    'charset': 'utf8mb4'
}


def clean_data(df):
    """
    数据清洗函数
    """
    print("开始数据清洗...")

    # 处理缺失值（如果有）
    df = df.fillna({
        'NumCompaniesWorked': 0,
        'TotalWorkingYears': 0,
        'YearsAtCompany': 0,
        'YearsInCurrentRole': 0,
        'YearsSinceLastPromotion': 0,
        'YearsWithCurrManager': 0
    })

    # 确保数值列的数据类型正确
    numeric_columns = [
        'Age', 'DistanceFromHome', 'Education', 'EmployeeNumber', 'EnvironmentSatisfaction',
        'JobInvolvement', 'JobLevel', 'JobSatisfaction', 'MonthlyIncome', 'NumCompaniesWorked',
        'PercentSalaryHike', 'PerformanceRating', 'RelationshipSatisfaction', 'StockOptionLevel',
        'TotalWorkingYears', 'TrainingTimesLastYear', 'WorkLifeBalance', 'YearsAtCompany',
        'YearsInCurrentRole', 'YearsSinceLastPromotion', 'YearsWithCurrManager', 'Attrition'
    ]

    for col in numeric_columns:
        if col in df.columns:
            df[col] = pd.to_numeric(df[col], errors='coerce').fillna(0)

    # 处理分类变量的标准化
    categorical_columns = [
        'BusinessTravel', 'Department', 'EducationField', 'Gender', 'JobRole',
        'MaritalStatus', 'OverTime'
    ]

    # 标准化分类变量值（去除多余空格等）
    for col in categorical_columns:
        if col in df.columns:
            df[col] = df[col].astype(str).str.strip()

    print("数据清洗完成")
    return df


def validate_data(df):
    """
    数据验证函数
    """
    print("开始数据验证...")

    # 检查关键字段是否存在
    required_columns = [
        'Age', 'BusinessTravel', 'Department', 'DistanceFromHome', 'Education',
        'EducationField', 'EmployeeNumber', 'EnvironmentSatisfaction', 'Gender',
        'JobInvolvement', 'JobLevel', 'JobRole', 'JobSatisfaction', 'MaritalStatus',
        'MonthlyIncome', 'NumCompaniesWorked', 'OverTime', 'PercentSalaryHike',
        'PerformanceRating', 'RelationshipSatisfaction', 'StockOptionLevel',
        'TotalWorkingYears', 'TrainingTimesLastYear', 'WorkLifeBalance',
        'YearsAtCompany', 'YearsInCurrentRole', 'YearsSinceLastPromotion',
        'YearsWithCurrManager', 'Attrition'
    ]

    missing_columns = [col for col in required_columns if col not in df.columns]
    if missing_columns:
        raise ValueError(f"缺少必要的列: {missing_columns}")

    # 检查数据范围
    if df['Age'].min() < 18 or df['Age'].max() > 100:
        print("警告: 年龄数据可能存在异常值")

    if df['MonthlyIncome'].min() < 0:
        print("警告: 月收入存在负值")

    print("数据验证完成")
    return True


def import_csv_to_mysql(csv_file_path, table_name):
    """
    将CSV文件导入到MySQL数据库
    """
    try:
        # 读取CSV文件
        df = pd.read_csv(csv_file_path)
        print(f"读取文件 {csv_file_path}，共 {len(df)} 行数据")

        # 数据清洗
        df = clean_data(df)

        # 数据验证
        validate_data(df)

        # 连接数据库
        conn = mysql.connector.connect(**db_config)
        cursor = conn.cursor()

        # 清空表数据（可选）
        cursor.execute(f"DELETE FROM {table_name}")
        conn.commit()

        # 准备插入语句
        insert_query = f"""
        INSERT INTO {table_name} (
            age, business_travel, department, distance_from_home, education, 
            education_field, employee_number, environment_satisfaction, gender, 
            job_involvement, job_level, job_role, job_satisfaction, marital_status, 
            monthly_income, num_companies_worked, over_time, percent_salary_hike, 
            performance_rating, relationship_satisfaction, stock_option_level, 
            total_working_years, training_times_last_year, work_life_balance, 
            years_at_company, years_in_current_role, years_since_last_promotion, 
            years_with_curr_manager, attrition
        ) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
        """

        # 批量插入数据
        data_to_insert = []
        for _, row in df.iterrows():
            data_to_insert.append((
                int(row['Age']),
                str(row['BusinessTravel']),
                str(row['Department']),
                int(row['DistanceFromHome']),
                int(row['Education']),
                str(row['EducationField']),
                int(row['EmployeeNumber']),
                int(row['EnvironmentSatisfaction']),
                str(row['Gender']),
                int(row['JobInvolvement']),
                int(row['JobLevel']),
                str(row['JobRole']),
                int(row['JobSatisfaction']),
                str(row['MaritalStatus']),
                int(row['MonthlyIncome']),
                int(row['NumCompaniesWorked']),
                str(row['OverTime']),
                int(row['PercentSalaryHike']),
                int(row['PerformanceRating']),
                int(row['RelationshipSatisfaction']),
                int(row['StockOptionLevel']),
                int(row['TotalWorkingYears']),
                int(row['TrainingTimesLastYear']),
                int(row['WorkLifeBalance']),
                int(row['YearsAtCompany']),
                int(row['YearsInCurrentRole']),
                int(row['YearsSinceLastPromotion']),
                int(row['YearsWithCurrManager']),
                int(row['Attrition'])
            ))

        cursor.executemany(insert_query, data_to_insert)
        conn.commit()

        print(f"成功导入 {len(data_to_insert)} 条数据到 {table_name} 表")

        # 关闭连接
        cursor.close()
        conn.close()

    except Exception as e:
        print(f"导入数据时发生错误: {str(e)}")
        raise


def analyze_categorical_data(csv_file_path):
    """
    分析分类变量，为one-hot编码做准备
    """
    try:
        # 读取CSV文件
        df = pd.read_csv(csv_file_path)

        print("=== 分类变量分析 ===")
        categorical_columns = [
            'BusinessTravel', 'Department', 'EducationField', 'Gender',
            'JobRole', 'MaritalStatus', 'OverTime'
        ]

        for col in categorical_columns:
            if col in df.columns:
                unique_values = df[col].unique()
                print(f"{col}: {len(unique_values)} 个唯一值")
                print(f"  值: {list(unique_values)}")
                print()

        print("=== 数值变量统计 ===")
        numeric_columns = [
            'Age', 'DistanceFromHome', 'Education', 'EnvironmentSatisfaction',
            'JobInvolvement', 'JobLevel', 'JobSatisfaction', 'MonthlyIncome',
            'NumCompaniesWorked', 'PercentSalaryHike', 'PerformanceRating',
            'RelationshipSatisfaction', 'StockOptionLevel', 'TotalWorkingYears',
            'TrainingTimesLastYear', 'WorkLifeBalance', 'YearsAtCompany',
            'YearsInCurrentRole', 'YearsSinceLastPromotion', 'YearsWithCurrManager'
        ]

        for col in numeric_columns:
            if col in df.columns:
                print(
                    f"{col}: 均值={df[col].mean():.2f}, 标准差={df[col].std():.2f}, 范围=[{df[col].min()}, {df[col].max()}]")

    except Exception as e:
        print(f"分析数据时发生错误: {str(e)}")


def create_onehot_mapping(csv_file_path):
    """
    创建one-hot编码映射，供特征工程阶段使用
    """
    try:
        # 读取CSV文件
        df = pd.read_csv(csv_file_path)

        # 创建编码映射
        encoding_mapping = {}
        categorical_columns = [
            'BusinessTravel', 'Department', 'EducationField', 'Gender',
            'JobRole', 'MaritalStatus', 'OverTime'
        ]

        for col in categorical_columns:
            if col in df.columns:
                unique_values = sorted(df[col].unique())
                encoding_mapping[col.lower()] = {val: i for i, val in enumerate(unique_values)}

        return encoding_mapping

    except Exception as e:
        print(f"创建编码映射时发生错误: {str(e)}")
        return {}


if __name__ == "__main__":
    # 分析数据结构
    print("=== 数据结构分析 ===")
    train_csv_path = "../data/train.csv"
    if os.path.exists(train_csv_path):
        analyze_categorical_data(train_csv_path)

    # 创建编码映射（供特征工程使用）
    print("\n=== 创建编码映射 ===")
    if os.path.exists(train_csv_path):
        mapping = create_onehot_mapping(train_csv_path)
        print("编码映射创建完成")
        # 可以将映射保存到文件供后续使用


        with open('../data/encoding_mapping.json', 'w', encoding='utf-8') as f:
            json.dump(mapping, f, ensure_ascii=False, indent=2)
        print("编码映射已保存到 ../data/encoding_mapping.json")

    # 导入训练数据
    if os.path.exists(train_csv_path):
        import_csv_to_mysql(train_csv_path, "training_data")
    else:
        print(f"训练数据文件不存在: {train_csv_path}")

    # 导入测试数据
    test_csv_path = "../data/test.csv"
    if os.path.exists(test_csv_path):
        import_csv_to_mysql(test_csv_path, "test_data")
    else:
        print(f"测试数据文件不存在: {test_csv_path}")
