# 1.加载相关库与数据集
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
import seaborn as sns
import warnings

warnings.filterwarnings('ignore')
print(os.listdir("C:/Users/admin/PycharmProjects/pythonProject/数据集"))

# 2.主要数据集预览
# 2.1训练集数据（特征与数据条数量查看）
app_train = pd.read_csv("C:/Users/admin/PycharmProjects/pythonProject/数据集/application_train.csv")
print('Training data shape: ', app_train.shape)
print(app_train.head())

# 2.2测试集数据（特征与数据条数量查看）
app_test = pd.read_csv("C:/Users/admin/PycharmProjects/pythonProject/数据集/application_test.csv")
print('Testing data shape: ', app_test.shape)
print(app_test.head())
# 缺失特征值TARGET,即我们需要预测的目标值（0表示贷款按时偿还，1表示贷款未按时偿还)
print(app_train['TARGET'].value_counts())  # 检查TARGET列的分布，看看每一类贷款的数量.

# 3.数据处理
# 3.1 缺失值的处理
# 3.1.1数据清洗
app_train = app_train.dropna(thresh=len(app_train) / 2, axis=1)  # 删除空白值超过一半的列


# 定义一个函数，查看看剩余数据的缺失值情况

def missing_values_table(df):
    # Total missing values
    mis_val = df.isnull().sum()
    # Percentage of missing values
    mis_val_percent = 100 * df.isnull().sum() / len(df)
    # Make a table with the results
    mis_val_table = pd.concat([mis_val, mis_val_percent], axis=1)
    # Rename the columns
    mis_val_table_ren_columns = mis_val_table.rename(
        columns={0: 'Missing Values', 1: '% of Total Values'})
    # Sort the table by percentage of missing descending
    mis_val_table_ren_columns = mis_val_table_ren_columns[
        mis_val_table_ren_columns.iloc[:, 1] != 0].sort_values(
        '% of Total Values', ascending=False).round(1)
    # Print some summary information
    print("Your selected dataframe has " + str(df.shape[1]) + " columns.\n"
                                                              "There are " + str(mis_val_table_ren_columns.shape[0]) +
          " columns that have missing values.")
    # Return the dataframe with missing information
    return mis_val_table_ren_columns


missing_values = missing_values_table(app_train)
print(missing_values.head(10))
# 缺失比例高，对列进行删除
drop_columns = ["FLOORSMAX_AVG", "FLOORSMAX_MODE", "FLOORSMAX_MEDI", "YEARS_BEGINEXPLUATATION_AVG",
                "YEARS_BEGINEXPLUATATION_MODE",
                "YEARS_BEGINEXPLUATATION_MEDI", "TOTALAREA_MODE", "EMERGENCYSTATE_MODE"]
app_train = app_train.drop(drop_columns, axis=1)

# 3.1.2数据类型转换
print(app_train.dtypes.value_counts())  # 各个类型的数据查看
print(app_train.select_dtypes('object').apply(pd.Series.nunique, axis=0))  # object类型的有12个，需要将其转换成数值类型，先查看它们各自的特征类别有多少
# 对于类别只有2个的特征，我们使用 Label Encoding 进行数据的转换
from sklearn.preprocessing import LabelEncoder

# Create a label encoder object
le = LabelEncoder()
le_count = 0

# Iterate through the columns
for col in app_train:
    if app_train[col].dtype == 'object':
        # If 2 or fewer unique categories
        if len(list(app_train[col].unique())) <= 2:
            # Train on the training data
            le.fit(app_train[col])
            # Transform both training and testing data
            app_train[col] = le.transform(app_train[col])
            app_test[col] = le.transform(app_test[col])

            # Keep track of how many columns were label encoded
            le_count += 1
print('%d columns were label encoded.' % le_count)

# 使用 One-Hot Encoding 进行剩余数据的转换
app_train = pd.get_dummies(app_train)
app_test = pd.get_dummies(app_test)
print('Training Features shape: ', app_train.shape)
print('Testing Features shape: ', app_test.shape)
# 对两份数据取并集，处理共同拥有的列
train_labels = app_train['TARGET']
# Align the training and testing data, keep only columns present in both dataframes
app_train, app_test = app_train.align(app_test, join = 'inner', axis = 1)
# Add the target back in
app_train['TARGET'] = train_labels
print('Training Features shape: ', app_train.shape)
print('Testing Features shape: ', app_test.shape)

# 3.1.3数据类型转换完成，下面统一进行缺失值处理，可以采用中位数进行填充
from sklearn.impute import SimpleImputer
simpleimputer = SimpleImputer(strategy='median')
train = app_train.drop(columns=['TARGET'])
column_list = train.columns.tolist()

# fit with Training_data, fill both Training_data and Testing_data
simpleimputer.fit(train)
train = simpleimputer.transform(train)
test = simpleimputer.transform(app_test)

train = pd.DataFrame(train, columns=column_list)
app_train = pd.concat([train, app_train['TARGET']], axis=1)
app_test = pd.DataFrame(test, columns=column_list)
print('Training data shape: ', app_train.shape)
print('Testing data shape: ', app_test.shape)

# 检查是否还有缺失值：
print(app_train.isnull().sum())
print(app_test.isnull().sum())
# "SK_ID_CURR" 经过处理之后变成float类型，需要重新转换成 int类型
app_train["SK_ID_CURR"] = app_train["SK_ID_CURR"].astype(int)
app_test["SK_ID_CURR"] = app_test["SK_ID_CURR"].astype(int)

# 3.2异常值处理
print((app_train['DAYS_BIRTH'] / -365).describe()) # 对年龄进行异常值检查(无异常)
print(app_train['DAYS_EMPLOYED'].describe())  # 对在职天数进行异常值检查（异常）
# 处理异常值 （将异常值视为缺失值处理，然后在使用算法之前填充它们）
app_train['DAYS_EMPLOYED_ANOM'] = app_train["DAYS_EMPLOYED"] == 365243
# Replace the anomalous values with nan
app_train['DAYS_EMPLOYED'].replace({365243: np.nan}, inplace = True)
app_test['DAYS_EMPLOYED_ANOM'] = app_test["DAYS_EMPLOYED"] == 365243
app_test["DAYS_EMPLOYED"].replace({365243: np.nan}, inplace = True)
print('There are %d anomalies in the test data out of %d entries' % (app_test["DAYS_EMPLOYED_ANOM"].sum(), len(app_test)))
# 使用中位数对异常值转换后的缺失值进行填充：
from sklearn.impute import SimpleImputer
simpleimputer = SimpleImputer(strategy='median')
train = app_train['DAYS_EMPLOYED'].values.reshape(-1, 1)
simpleimputer.fit(train)
train = simpleimputer.transform(app_train['DAYS_EMPLOYED'].values.reshape(-1, 1))
test = simpleimputer.transform(app_test['DAYS_EMPLOYED'].values.reshape(-1, 1))
app_train['DAYS_EMPLOYED'] = train
app_test['DAYS_EMPLOYED'] = test
print(app_train['DAYS_EMPLOYED'].describe())
print(app_test['DAYS_EMPLOYED'].describe())

# 4.相关系数分析
correlations = app_train.corr()['TARGET'].sort_values()
print('Most Positive Correlations:\n', correlations.tail(15))
print('\nMost Negative Correlations:\n', correlations.head(15))
# 和目标值有较大正相关性的有 DAYS_BIRTH 这个特征，对其展开分析
# 5.特征分析
# 5.1客户年龄与按时偿还贷款之间的关系
plt.figure(figsize=(10, 8))
# KDE图中按时偿还的贷款
sns.kdeplot(app_train.loc[app_train['TARGET'] == 0, 'DAYS_BIRTH'] / -365, label = 'target == 0')
# KDE图中未按时偿还的贷款
sns.kdeplot(app_train.loc[app_train['TARGET'] == 1, 'DAYS_BIRTH'] / -365, label = 'target == 1')
plt.xlabel('Age (years)')
plt.ylabel('Density')
plt.title('Distribution of Ages')
plt.show()
# 5.1.尝试分析其他特征--入职天数即工作稳定程度，发现因为相关程度小，没有明显的规律可言，其他相关程度低的特征也就无分析的必要
plt.figure(figsize=(10, 8))
# KDE图中按时偿还的贷款
sns.kdeplot(app_train.loc[app_train['TARGET'] == 0, 'DAYS_EMPLOYED'] / -365, label = 'target == 0')
# KDE图中未按时偿还的贷款
sns.kdeplot(app_train.loc[app_train['TARGET'] == 1, 'DAYS_EMPLOYED'] / -365, label = 'target == 1')
plt.xlabel('days employed')
plt.ylabel('Density')
plt.title('Distribution of Ages')
plt.show()

# 6.模型训练__实现各个特征的可分析
# 6.1以逻辑回归建立模型
from sklearn.linear_model import LogisticRegression
log_reg = LogisticRegression()
log_reg.fit(train, train_labels)
log_reg_pred = log_reg.predict_proba(test)[:, 1]
submit = app_test[['SK_ID_CURR']]
submit['TARGET'] = log_reg_pred
print(submit['TARGET'])
print(submit.head())
print(submit.to_csv('log_reg_baseline.csv', index = False))

# 6.2运用模型进行预测
# 增加领域知识特征，创建几个特性，捕捉认为对于判断客户是否会拖欠贷款可能很重要的信息
app_train_domain = app_train.copy()
app_train['CREDIT_INCOME_PERCENT'] = app_train_domain['AMT_CREDIT'] / app_train_domain['AMT_INCOME_TOTAL']
app_train['ANNUITY_INCOME_PERCENT'] = app_train_domain['AMT_ANNUITY'] / app_train_domain['AMT_INCOME_TOTAL']
app_train['CREDIT_TERM'] = app_train_domain['AMT_ANNUITY'] / app_train_domain['AMT_CREDIT']
app_train['DAYS_EMPLOYED_PERCENT'] = app_train_domain['DAYS_EMPLOYED'] / app_train_domain['DAYS_BIRTH']

app_test_domain = app_test.copy()
app_test['CREDIT_INCOME_PERCENT'] = app_test_domain['AMT_CREDIT'] / app_test_domain['AMT_INCOME_TOTAL']
app_test['ANNUITY_INCOME_PERCENT'] = app_test_domain['AMT_ANNUITY'] / app_test_domain['AMT_INCOME_TOTAL']
app_test['CREDIT_TERM'] = app_test_domain['AMT_ANNUITY'] / app_test_domain['AMT_CREDIT']
app_test['DAYS_EMPLOYED_PERCENT'] = app_test_domain['DAYS_EMPLOYED'] / app_test_domain['DAYS_BIRTH']

# 使用逻辑回归模型进行预测
from sklearn.linear_model import LogisticRegression
log_reg = LogisticRegression()
log_reg.fit(train, train_labels)
log_reg_pred = log_reg.predict_proba(test)[:, 1]
submit = app_test[['SK_ID_CURR']]
submit['TARGET'] = log_reg_pred
print(submit['TARGET'])
print(submit.head())
print(submit.to_csv('log_reg_baseline_domain.csv', index = False))

