from sklearn.datasets import load_iris
from sklearn import tree
from sklearn.model_selection import train_test_split
import pandas as pd
from sklearn import preprocessing
from sklearn.model_selection import cross_val_score
from sklearn import svm
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import KNeighborsRegressor
from sklearn import ensemble
import numpy as np

import warnings
warnings.filterwarnings(action="ignore")

'''iris = load_iris()
clf = tree.DecisionTreeClassifier()
train_data,test_data,train_target,test_target=train_test_split(iris.data,iris.target,test_size=0.3,random_state=7)
clf = clf.fit(train_data,train_target)'''

# 导入训练数据和测试数据
adult_train = pd.read_csv('data/adult.data',sep='\t',delimiter=',')
adult_test = pd.read_csv('data/adult.test',sep='\t',delimiter=',')

# # 计数值
# adult_train['num'] = 1
# train_label = adult_train['income']
# label_group = adult_train.groupby('income').count().reset_index()


# 数据预处理部分
from sklearn import preprocessing
#-------------------------------------------------------------#


# 1. 解决不一致性
adult_test['income'] = adult_test.income.replace({' <=50K.': ' <=50K', ' >50K.': ' >50K'})

# 2. 法1：直接删除带有缺失值的数据
# adult_train = adult_train.replace(" ?",np.nan).dropna()
# adult_test = adult_test.replace(" ?",np.nan).dropna()

#2. 法2：差集
train_miss = adult_train[adult_train['workclass'].str.contains('\?') | adult_train['education'].str.contains('\?') |
                         adult_train['marital-status'].str.contains('\?') | adult_train['occupation'].str.contains('\?')
                         | adult_train['relationship'].str.contains('\?') | adult_train['race'].str.contains('\?')
                         | adult_train['sex'].str.contains('\?') | adult_train['native-country'].str.contains('\?')
                         | adult_train['income'].str.contains('\?')]

test_miss = adult_test[adult_test['workclass'].str.contains('\?') | adult_test['education'].str.contains('\?') |
                       adult_test['marital-status'].str.contains('\?') | adult_test['occupation'].str.contains('\?')
                         | adult_test['relationship'].str.contains('\?') | adult_test['race'].str.contains('\?')
                         | adult_test['sex'].str.contains('\?') | adult_test['native-country'].str.contains('\?')
                         | adult_test['income'].str.contains('\?')]
# 去除缺失数据
adult_train = adult_train.append(train_miss)
adult_train = adult_train.drop_duplicates(keep=False)
adult_test = adult_test.append(test_miss)
adult_test = adult_test.drop_duplicates(keep=False)

# 区分训练集中的属性和标记
col = adult_train.columns.values.tolist()
attributes = col[2:-1]
label = col[-1:]
train_attr = adult_train[attributes]
train_label = adult_train[label]

#区分测试集中的属性和标记
test_attr = adult_test[attributes]
test_label = adult_test[label]

# 3. 标称属性转数值属性
le = preprocessing.LabelEncoder()


# # 国家数据分类
# country_group = adult_train.groupby('native-country').count().reset_index()
# # 转换国家为数字
# le.fit(list(country_group['native-country']))
# 查看现有分类
# list(le.classes_)

# 国家数据处理
le.fit(list(train_attr['native-country'].unique()))
train_attr['native-country'] = le.transform(list(train_attr['native-country']))
test_attr['native-country'] = le.transform(list(test_attr['native-country']))

# 转换性别数据
le.fit(list(train_attr['sex'].unique()))
train_attr['sex'] = le.transform(list(train_attr['sex']))
test_attr['sex'] = le.transform(list(test_attr['sex']))


# 转换教育程度
le = preprocessing.LabelEncoder()
le.fit(list(train_attr['education'].unique()))
train_attr['education'] = le.transform(list(train_attr['education']))
test_attr['education'] = le.transform(list(test_attr['education']))

# 转换婚姻状况
le.fit(list(train_attr['marital-status'].unique()))
train_attr['marital-status'] = le.transform(list(train_attr['marital-status']))
test_attr['marital-status'] = le.transform(list(test_attr['marital-status']))


# 转换职业信息
le.fit(list(train_attr['occupation'].unique()))
train_attr['occupation'] = le.transform(list(train_attr['occupation']))
test_attr['occupation'] = le.transform(list(test_attr['occupation']))


# 转换家庭关系
le.fit(list(train_attr['relationship'].unique()))
train_attr['relationship'] = le.transform(list(train_attr['relationship']))
test_attr['relationship'] = le.transform(list(test_attr['relationship']))


# 转换种族
le.fit(list(train_attr['race'].unique()))
train_attr['race'] = le.transform(list(train_attr['race']))
test_attr['race'] = le.transform(list(test_attr['race']))

# 转换结果label
le.fit(list(train_label['income'].unique()))
train_label = le.transform(list(train_label['income']))
le.fit(list(test_label['income'].unique()))
test_label = le.transform(list(test_label['income']))


# # 使用信息熵模型
# decision_tree_model = tree.DecisionTreeClassifier(criterion='gini')
# decision_tree_model.fit(train_attr,train_label)
# print("decision tree cross val using gini without normalizaiton: ")
# print(np.mean(cross_val_score(decision_tree_model,test_attr,test_label,scoring='accuracy',cv=10,n_jobs=-1)))
# 去除无效字段
## 删除fnlwgt列，该列信息熵较高，不适宜用于分类
# drop_train_attr = train_attr.drop('fnlwgt',axis=1)


# 4. 数据归一化
## min-max 归一化

## Z-score 归一化
normal_train_attr = preprocessing.scale(train_attr)

#-------------------------------------------------------------#

# 训练决策树模型

## 归一化后，使用信息熵模型
decision_tree_model = tree.DecisionTreeClassifier(criterion='gini')
decision_tree_model.fit(train_attr,train_label)
print(print("decision tree cross val using entropy without normalizaiton: "))
print(np.mean(cross_val_score(decision_tree_model,test_attr,test_label,scoring='f1',cv=10,n_jobs=-1)))
decision_tree_model = tree.DecisionTreeClassifier(criterion='gini')
decision_tree_model.fit(normal_train_attr,train_label)
print(print("decision tree cross val using entropy with normalizaiton: "))
print(np.mean(cross_val_score(decision_tree_model,preprocessing.scale(test_attr),test_label,scoring='f1',cv=10,n_jobs=-1)))

## 决策树剪枝,获取最佳剪枝数量，最佳深度为5，
# for i in range(2,100):
# 时间消耗
def decision_tree():
    decision_tree_model = tree.DecisionTreeClassifier(criterion='gini',max_depth=6,min_samples_leaf=i,min_samples_split=10,max_leaf_nodes=18)
    decision_tree_model.fit(normal_train_attr,train_label)
    print("decision tree cross val using gini with normalizaiton: depth: "+str(i))
    print(np.mean(cross_val_score(decision_tree_model,preprocessing.scale(test_attr),test_label,scoring='f1',cv=10,n_jobs=1)))



# from sklearn import feature_selection
# # 特征筛选
# fs = feature_selection.SelectPercentile(feature_selection.chi2, percentile=5)
# train_attr_fs = fs.fit_transform(train_attr, train_label)
# # 使用gini指数模型
# decision_tree_model = tree.DecisionTreeClassifier(criterion='gini')
# decision_tree_model.fit(train_attr_fs,train_label)
# test_attr_fs = fs.transform(test_attr)
#
# print("decision tree cross val with gini: ")
# print(np.mean(cross_val_score(decision_tree_model,test_attr_fs,test_label,scoring='accuracy',cv=10,n_jobs=-1)))

# # 通过交叉验证的方法，按照固定间隔的百分比筛选特征，并作图展示性能随特征筛选比例的变化
# percentiles = range(1, 100, 2)
# results = []
#
# for i in percentiles:
#     fs = feature_selection.SelectPercentile(feature_selection.chi2, percentile=i)  # chi2卡方检验
#     X_train_fs = fs.fit_transform(train_attr, train_label)
#     scores = np.mean(cross_val_score(decision_tree_model,X_train_fs,train_label,scoring='accuracy',cv=10,n_jobs=-1))  # cv:交叉验证的迭代词数
#     results = np.append(results, scores.mean())
#
# print(results)

# 决策树可视化
import pydotplus
from IPython.display import Image
dot_data = tree.export_graphviz(decision_tree_model, out_file=None,filled=True, rounded=True, special_characters=True)
graph = pydotplus.graph_from_dot_data(dot_data)
graph.write_pdf("decision tree.pdf")

#-------------------------------------------------------------#

# SVM 支持向量机

## 归一化对模型性能的影响
### 归一化前
clf_rbf = svm.SVC(kernel='rbf')
clf_rbf.fit(train_attr,train_label)
print("SVM cross val without normalization")
print(np.mean(cross_val_score(clf_rbf, test_attr, test_label, scoring='f1', cv=10, n_jobs=-1)))
### 归一化后
clf_rbf = svm.SVC(kernel='rbf')
clf_rbf.fit(normal_train_attr,train_label)
print("SVM cross val with normalization")
print(np.mean(cross_val_score(clf_rbf, preprocessing.scale(test_attr), test_label, scoring='f1', cv=10, n_jobs=-1)))
# clf_rbf = svm.SVC(kernel='linear',C=0.4)

def svn():
    clf_rbf = svm.SVC(kernel='rbf')
    clf_rbf.fit(train_attr,train_label)
    print("SVM cross val")
    print(np.mean(cross_val_score(clf_rbf,test_attr,test_label,scoring='accuracy',cv=10,n_jobs=-1)))

clf_rbf.fit(normal_train_attr,train_label)
print("SVM cross val with preprocessing")
print(np.mean(cross_val_score(clf_rbf,preprocessing.scale(test_attr),test_label,scoring='accuracy',cv=10,n_jobs=-1)))

#-------------------------------------------------------------#

# K-NN 最近邻方法

# K-NN归一化与否对比
## 未使用归一化
clf_knn = KNeighborsClassifier()
clf_knn.fit(train_attr,train_label)
print("K-NN cross val without normalization")
print(np.mean(cross_val_score(clf_knn,test_attr,test_label,scoring='f1',cv=10,n_jobs=1)))
## 使用归一化
clf_knn = KNeighborsClassifier()
clf_knn.fit(normal_train_attr,train_label)
print("K-NN cross val with normalization")
print(np.mean(cross_val_score(clf_knn,preprocessing.scale(test_attr),test_label,scoring='f1',cv=10,n_jobs=1)))

# 调试最近邻个数，获取最佳邻居数量
for i in range(1,50):
    clf_knn = KNeighborsClassifier(n_neighbors=i)
    clf_knn.fit(normal_train_attr,train_label)
    print("K-NN cross val with i = "+str(i))
    print(np.mean(cross_val_score(clf_knn,preprocessing.scale(test_attr),test_label,scoring='f1',cv=10,n_jobs=-1)))

# 时间分析
def knn():
    clf_knn = KNeighborsClassifier(n_neighbors=20)
    clf_knn.fit(normal_train_attr, train_label)
    print(np.mean(cross_val_score(clf_knn, preprocessing.scale(test_attr), test_label, scoring='f1', cv=10, n_jobs=-1)))


### 集成方法

# 随机森林
rf = ensemble.RandomForestClassifier(n_estimators=100,criterion='gini',max_depth=7,)
rf.fit(normal_train_attr,train_label)
print("Random forest cross val")
print(np.mean(cross_val_score(rf,preprocessing.scale(test_attr),test_label,scoring='f1',cv=10,n_jobs=-1)))

# Adaboost
## 未归一化
# def randomforest():
for i in range(1,200):
    ada = ensemble.AdaBoostClassifier(n_estimators=i)
    ada.fit(normal_train_attr,train_label)
    print("Adaboost cross val with normalization with i = "+str(i))
    print(np.mean(cross_val_score(ada,preprocessing.scale(test_attr),test_label,scoring="f1",cv=10,n_jobs=-1)))
## 归一化
#for i in range(1,100):
def adaboost():
    ada = ensemble.AdaBoostClassifier(n_estimators=72)
    ada.fit(normal_train_attr,train_label)
    print("Adaboost cross val with normalization with i = "+str(i))
    print(np.mean(cross_val_score(ada,preprocessing.scale(test_attr),test_label,scoring="f1",cv=10,n_jobs=-1)))
# GBRT
def gbrt():
    gbrt = ensemble.GradientBoostingClassifier(n_estimators=100)
    gbrt.fit(train_attr,train_label)
    print("GBRT cross val with normalization")
    print(np.mean(cross_val_score(gbrt,test_attr,test_label,scoring='f1',cv=10,n_jobs=-1)))






