import pandas as pd
from sklearn.model_selection import train_test_split, RandomizedSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_auc_score
from sklearn.preprocessing import OrdinalEncoder, StandardScaler
from imblearn.over_sampling import SMOTE
from net.bwie.gongdan.bean.FilePath import FILE1_TEST_FORMAT1_FILEPATH1, FILE1_TRAIN_FORMAT1_FILEPATH1, \
    FILE1_USER_INFO_FORMAT1_FILEPATH1, FILE1_USER_LOG_FORMAT1_FILEPATH1, FILE2_TEST_FORMAT2_FILEPATH2, \
    FILE2_TRAIN_FORMAT2_FILEPATH2, FILE3_SAMPLE_SUBMISSION_FILEPATH3

if __name__ == '__main__':
    # todo：读取数据
    def read_data():
        file1_test = \
            pd.read_csv(FILE1_TEST_FORMAT1_FILEPATH1).sample(frac=0.1, random_state=42)
        file1_train = \
            pd.read_csv(FILE1_TRAIN_FORMAT1_FILEPATH1).sample(frac=0.1, random_state=42)
        file1_user_info = \
            pd.read_csv(FILE1_USER_INFO_FORMAT1_FILEPATH1).sample(frac=0.1, random_state=42)
        file1_user_log = \
            pd.read_csv(FILE1_USER_LOG_FORMAT1_FILEPATH1).sample(frac=0.1, random_state=42)
        file2_test = \
            pd.read_csv(FILE2_TEST_FORMAT2_FILEPATH2).sample(frac=0.1, random_state=42)
        file2_train = \
            pd.read_csv(FILE2_TRAIN_FORMAT2_FILEPATH2).sample(frac=0.1, random_state=42)
        file3_sample_submission = \
            pd.read_csv(FILE3_SAMPLE_SUBMISSION_FILEPATH3).sample(frac=0.1, random_state=42)
        return file1_test, file1_train, file1_user_info, file1_user_log, file2_test, file2_train, file3_sample_submission


    # todo:预处理
    def preprocess_data(file1_test, file1_train, file1_user_info, file1_user_log):
        # todo：合并数据
        common_user_id = set(file1_train['user_id']).intersection(
            set(file1_user_info['user_id']),
            set(file1_user_log['user_id']))
        train_merged = file1_train[file1_train['user_id'].isin(common_user_id)]
        train_merged = pd.merge(train_merged, file1_user_info, on='user_id', how='left')
        train_merged = pd.merge(train_merged, file1_user_log, on='user_id', how='left')
        common_user_id = set(file1_test['user_id']).intersection(set(file1_user_info['user_id']),
                                                                 set(file1_user_log['user_id']))
        test_merged = file1_test[file1_test['user_id'].isin(common_user_id)]
        test_merged = pd.merge(test_merged, file1_user_info, on='user_id', how='left')
        test_merged = pd.merge(test_merged, file1_user_log, on='user_id', how='left')
        # todo：补充缺失值
        train_merged.fillna(0, inplace=True)
        test_merged.fillna(0, inplace=True)
        # todo：类别特征编码
        encoder = OrdinalEncoder()
        cat_cols = ['age_range', 'gender']
        train_merged[cat_cols] = encoder.fit_transform(train_merged[cat_cols])
        test_merged[cat_cols] = encoder.transform(test_merged[cat_cols])
        # todo：数据标准化
        scaler = StandardScaler()
        num_cols = ['age_range', 'gender']  # 根据实际数值型特征调整
        train_merged[num_cols] = scaler.fit_transform(train_merged[num_cols])
        test_merged[num_cols] = scaler.transform(test_merged[num_cols])
        return test_merged, train_merged

    # todo：选取特征
    def feature_engineering(test_merged, train_merged):
        # todo：特征列和目标列
        feature_columns = ['age_range', 'gender']
        target_column = 'label'
        X_train = train_merged[feature_columns]
        y_train = train_merged[target_column]
        X_test = test_merged[feature_columns]
        # todo：处理数据倾斜
        smote = SMOTE(random_state=42)
        X_train_resampled, y_train_resampled = smote.fit_resample(X_train, y_train)
        # todo：抽样获取数据
        X_train_split, X_val, y_train_split, y_val = train_test_split(X_train_resampled, y_train_resampled,
                                                                      test_size=0.2, random_state=42)
        return X_train_split, y_train_split, X_test, X_val, y_val

    # todo：训练评估逻辑回归模型
    def train_and_evaluate_logistic_regression(X_train, y_train, X_val, y_val):
        param_dist = {
            'C': [0.1, 1, 10],
            'penalty': ['l1', 'l2'],
            'solver': ['liblinear']
        }
        model = LogisticRegression()
        random_search = RandomizedSearchCV(model, param_distributions=param_dist, cv=3, n_iter=5, random_state=42)
        random_search.fit(X_train, y_train)
        best_model = random_search.best_estimator_
        y_pred = best_model.predict(X_val)
        # todo：检测空数据
        valid_indices = ~pd.isnull(y_val) & ~pd.isnull(y_pred)
        y_val_valid = y_val[valid_indices]
        y_pred_valid = y_pred[valid_indices]
        accuracy = accuracy_score(y_val_valid, y_pred_valid)
        precision = precision_score(y_val_valid, y_pred_valid, zero_division=1)
        recall = recall_score(y_val_valid, y_pred_valid)
        f1 = f1_score(y_val_valid, y_pred_valid)
        auc = roc_auc_score(y_val_valid, y_pred_valid)
        print("逻辑回归模型评估指标：")
        print(f"准确率: {accuracy}")
        print(f"精确率: {precision}")
        print(f"召回率: {recall}")
        print(f"F1值: {f1}")
        print(f"AUC值: {auc}")
        return best_model

    # todo：训练并评估决策树模型
    def train_and_evaluate_decision_tree(X_train, y_train, X_val, y_val):
        param_dist = {
            'max_depth': [3, 5, 7],
            'min_samples_split': [2, 5],
            'min_samples_leaf': [1, 2]
        }
        model = DecisionTreeClassifier()
        random_search = RandomizedSearchCV(model, param_distributions=param_dist, cv=3, n_iter=5, random_state=42)
        random_search.fit(X_train, y_train)
        best_model = random_search.best_estimator_
        y_pred = best_model.predict(X_val)
        # todo：检测空数据
        valid_indices = ~pd.isnull(y_val) & ~pd.isnull(y_pred)
        y_val_valid = y_val[valid_indices]
        y_pred_valid = y_pred[valid_indices]

        accuracy = accuracy_score(y_val_valid, y_pred_valid)
        precision = precision_score(y_val_valid, y_pred_valid, zero_division=1)
        recall = recall_score(y_val_valid, y_pred_valid)
        f1 = f1_score(y_val_valid, y_pred_valid)
        auc = roc_auc_score(y_val_valid, y_pred_valid)
        print("决策树模型评估指标：")
        print(f"准确率: {accuracy}")
        print(f"精确率: {precision}")
        print(f"召回率: {recall}")
        print(f"F1值: {f1}")
        print(f"AUC值: {auc}")
        return best_model

    # todo：训练评估随机森林模型
    def train_and_evaluate_random_forest(X_train, y_train, X_val, y_val):
        param_dist = {
            'n_estimators': [50, 100],
            'max_depth': [3, 5, 7],
            'min_samples_split': [2, 5],
            'min_samples_leaf': [1, 2]
        }
        model = RandomForestClassifier()
        random_search = RandomizedSearchCV(model, param_distributions=param_dist, cv=3, n_iter=5, random_state=42)
        random_search.fit(X_train, y_train)
        best_model = random_search.best_estimator_
        y_pred = best_model.predict(X_val)
        # todo：检测空数据
        valid_indices = ~pd.isnull(y_val) & ~pd.isnull(y_pred)
        y_val_valid = y_val[valid_indices]
        y_pred_valid = y_pred[valid_indices]

        accuracy = accuracy_score(y_val_valid, y_pred_valid)
        precision = precision_score(y_val_valid, y_pred_valid, zero_division=1)
        recall = recall_score(y_val_valid, y_pred_valid)
        f1 = f1_score(y_val_valid, y_pred_valid)
        auc = roc_auc_score(y_val_valid, y_pred_valid)
        print("随机森林模型评估指标：")
        print(f"准确率: {accuracy}")
        print(f"精确率: {precision}")
        print(f"召回率: {recall}")
        print(f"F1值: {f1}")
        print(f"AUC值: {auc}")
        return best_model


    if __name__ == "__main__":
        file1_test, file1_train, file1_user_info, file1_user_log, file2_test, file2_train, file3_sample_submission = read_data()
        test_merged, train_merged = preprocess_data(file1_test, file1_train, file1_user_info, file1_user_log)
        X_train, y_train, X_test, X_val, y_val = feature_engineering(test_merged, train_merged)

        # todo：训练评估分类模型
        logistic_model = train_and_evaluate_logistic_regression(X_train, y_train, X_val, y_val)
        decision_tree_model = train_and_evaluate_decision_tree(X_train, y_train, X_val, y_val)
        random_forest_model = train_and_evaluate_random_forest(X_train, y_train, X_val, y_val)
