# -*- coding: utf-8 -*-
"""
Created on Fri Oct 18 14:43:53 2024

@author: Unknown
"""

import pandas as pd 
import matplotlib.pyplot as plt
import numpy as np
df_train=pd.read_csv('cs-training.csv',header=0)
print(df_train)
df_test=pd.read_csv('cs-test.csv',header=0)
df_target=pd.read_csv('sampleEntry.csv',header=0)
df_train=df_train.iloc[:,1:]
df_test=df_test.iloc[:,1:]
pos=sum(df_train['SeriousDlqin2yrs']>0.5)
neg=len(df_train)-pos
plt.figure(figsize=(14,10))
dict={'POS':pos,'NEG':neg}
size=len(dict)
for i,key in enumerate(dict):
    plt.bar(i,dict[key],width=0.2)
    plt.text(i-0.05, dict[key]+0.01, dict[key], fontsize=24)
plt.xticks(np.arange(size),dict.keys(),fontsize=24)
plt.yticks([10000,70000,130000],fontsize=24)

plt.figure(figsize=(14, 10))
loc =[]
s = pd.isnull(df_train).sum() / len(df_train)
for i in range(0, df_train.shape[1]):
    if s[i] != 0:
        plt.bar(i, s[i], width = 1)
        plt.text(i-0.1, s[i]+0.005, '%.3f'%s[i], fontsize = 24)
        loc.append(i)
plt.xticks (loc, s.index[loc], fontsize = 24)
plt.yticks([0, 0.1, 0.2], fontsize = 24)
plt.ylim(0,0.25)
#处理缺失值
df_train = df_train.drop(['MonthlyIncome'], axis = 1)
df_test = df_test.drop(['MonthlyIncome'], axis = 1)
df_train['NumberOfDependents'].fillna (df_train['NumberOfDependents'].mean(),inplace = True)
df_test ['NumberOfDependents'].fillna(df_train['NumberOfDependents'].mean(),inplace = True)
df_test ['NumberOfDependents'].fillna(df_train['NumberOfDependents'].mean(),inplace = True)


fig = plt.figure(figsize = (14, 8))
for i in range(df_train.shape[1]):
    fig.add_subplot(5,2, i+1)
    plt.title (df_train.columns[i], fontsize = 16)
    dat = df_train.iloc[:, i]
    plt.scatter(np.arange(len(dat)), dat, s = 1)
    plt.xticks([])
    plt.yticks(fontsize = 16)
fig.tight_layout()
#删除异常值数据
index = df_train['RevolvingUtilizationOfUnsecuredLines'] <=1
df_train2 = df_train[index]
inãex = df_train['age'] > 18
df_train2 = df_train2[index]




from sklearn.model_selection import KFold
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier 
from sklearn.model_selection import cross_validate 
import numpy as np
np.random.seed (10)
X = np.array(df_train2.iloc[:,1:])
y = np.array(df_train2.iloc[:,0])
weight = sum(y == 0) / sum(y == 1)
class_weight ={0:1,1:weight}
scoring =['aceuracy', 'balanced_accuracy','roc_auc']
# 创建 CART 决策树模型
cart =DecisionTreeClassifier(class_weight=class_weight,min_samples_leaf=80,max_depth=8)

scores=cross_validate(cart, X, y, cv=10)
print('CART 决策树模型的信用评分结果：')
s=np.mean(scores['test_accuracy'])
print ('accuracy: %s'% s)
s = np.mean(scores['test_balanced_accuracy'])
print ('balanced_accuracy:%s'%s)
s = np.mean (scores ['test_roc_auc'])
print ('AUC: %s'% s)


#svm = make_pipeline (Standardscaler(),SVC(gamma = 'auto', C = 100, class_weight = class_weight))
svm
scores = cross_validate(svm, X, y, cv =10, scoring = scoring)
print('SVM 模型的信用评分结果：')
s = np.mean(scores['test accuracy'])
print ('accuracy: %s'% s)
s = np.mean (scores ['test balanced accuracy'])
print ('balanced accuracy: %s'%s)
s = np.mean(scores['test_roc auc'])
print('AUC:&s'%s)