import pandas
from sklearn.neighbors import KNeighborsClassifier
import graphviz
from sklearn.model_selection import train_test_split
from sklearn import tree
data=pandas.read_csv('./divorce.csv',header=0,sep=';',encoding='utf-8',na_filter=False)#因为数据不存在空值，所以读文件时不检测是否为空值，加快读的速度
#print(data.isna().sum())  无空值
feature=data.iloc[:,0:-1]
label=data.iloc[:,-1]
# print(feature)
# print(label)
feature_name=list(feature.columns)
classifier=tree.DecisionTreeClassifier(criterion='entropy')# criterion:gini或者entropy#创建决策树分类器对象
classifier.fit(feature,label)#对整个数据使用决策树算法找出决定性的特征
dot_data = tree.export_graphviz(classifier,class_names=['不离婚','离婚']
                                    ,feature_names= feature_name
                                    ,filled=True
                                    ,rounded=True,fontname='SimHei',
                                     impurity=True)#fontname指定字体,返回值为dot字符串
graph = graphviz.Source(dot_data)#将字符串转化为对象
graph.save("feature_select.png")
graph.render(view=True, format="pdf", filename="tree_pdf")#找到决定性的因素为:Art18,Art26,Art40
li=[]
for i in range(100):
    Xtrain, Xtest, Ytrain, Ytest=train_test_split(feature,label,test_size=0.3)
    classifier.fit(Xtrain,Ytrain)
    percent=classifier.score(Xtest,Ytest)
    li.append(percent)
print('直接使用决策树算法的预测准确率为{:.2%}'.format(sum(li)/100))#随机100次取平均值
classifier=KNeighborsClassifier(n_neighbors=20)#k取20
li=[]
li1=[]
for i in range(100):
    Xtrain, Xtest, Ytrain, Ytest=train_test_split(feature,label,test_size=0.3)
    classifier.fit(Xtest,Ytest)
    percent=classifier.score(Xtest,Ytest)
    li.append(percent)
    classifier.fit(Xtest.iloc[:,[17,25,39]], Ytest)
    percent = classifier.score(Xtest.iloc[:,[17,25,39]], Ytest)
    li1.append(percent)
print('直接使用KNN算法的预测准确率为{:.2%}'.format(sum(li)/100))
print('使用决策树与KNN算法结合预测准确率为{:.2%}'.format(sum(li1)/100))#随机100次取平均值
# 显然决策树与knn算法结合，能提升knn算法的准确性和速度。对于非线性分类问题，KNN虽然简单，但是有时效率和准确率会很高哦
#511717356@qq.com