import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn import datasets,linear_model
from sklearn import metrics
from sklearn import tree
from sklearn.model_selection import train_test_split
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_predict
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.neural_network import BernoulliRBM
from sklearn.neural_network import MLPClassifier
from sklearn.neural_network import MLPRegressor
from sklearn.externals import joblib


data = pd.read_csv('../adult.data')
for i in ['race','occupation','sex']:
    un = data[i].unique()
    def transform(i):
        # np.argwhere 返回选取值的索引
        return np.argwhere(i == un)[0,0] # 每个字符串转化为特征值数值
    data[i] = data[i].map(transform)
    X =data[['race','occupation','houers_per_week']].copy()
y = data['sex'].copy()

x_train,x_test,y_train,y_test = train_test_split(X,y,train_size=0.85)

# lrg = LogisticRegression()
# lrg.fit(x_train,y_train)
# print(lrg.score(x_test,y_test))

# 线性回归算法
def mx_line(train_x, train_y):
    mx = LinearRegression()
    mx.fit(train_x,train_y)
    return mx

# 逻辑回归算法
def mx_log(train_x, train_y):
    mx = LogisticRegression()
    mx.fit(train_x,train_y)
    return mx

# 多项式朴素贝叶斯
def mx_bayes(train_x, train_y):
    mx = MultinomialNB()
    mx.fit(train_x,train_y)
    return mx

# KNN 近邻算法
def mx_knn(train_x, train_y):
    mx = KNeighborsClassifier()
    mx.fit(train_x,train_y)
    return mx

# 随机森林算法
def mx_forest(train_x, train_y):
    mx = RandomForestClassifier(n_estimators=8)
    mx.fit(train_x,train_y)
    return mx

# 决策树算法
def mx_dtree(train_x, train_y):
    mx = tree.DecisionTreeClassifier()
    mx.fit(train_x,train_y)
    return mx

# GBDT迭代决策树算法
def mx_gbdt(train_x, train_y):
    mx = GradientBoostingClassifier(n_estimators=200)
    mx.fit(train_x,train_y)
    return mx

# SVM 向量机
def mx_svm(train_x, train_y):
    mx = SVC()
    mx.fit(train_x,train_y)
    return mx

# svm-cross向量机交叉算法,自动调参
def mx_svm_cross(train_x, train_y):
    mx = SVC(kernel='rbf', probability=True)
    param_grid = {'C':[1e-3,1e-2,1e-1,1,10,100,1000],
                  'gamma':[0.001,0.0001]}
    grid_search = GridSearchCV(mx,param_grid,n_jobs=1,verbose=1)
    grid_search.fit(train_x,train_y)
    best_parameters = grid_search.best_estimator_.get_params()
    mx = SVC(kernel='rbf',C=best_parameters['C'],gamma=best_parameters['gamma'],probability=True)
    mx.fit(train_x,train_y)
    return mx

# MLP神经网络算法
def mx_MLP(train_x, train_y):
    mx = MLPClassifier()
    mx.fit(train_x,train_y)
    return mx

# MLP神经网络回归算法
def mx_MLP_reg(train_x,train_y):
    mx = MLPRegressor()
    mx.fit(train_x,train_y)
    return mx

mxfunSgn = {'line':mx_line,
            'log':mx_log,
            'bayes':mx_bayes,
            'knn':mx_knn,
            'forest':mx_forest,
            'dtree':mx_dtree,
            'gbdt':mx_gbdt,
            'svm':mx_svm,
            'svmcr':mx_svm_cross,
            'mlp':mx_MLP,
            'mlpreg':mx_MLP_reg}
result = {}
for key in mxfunSgn:
    mx = mxfunSgn[key](x_train,y_train)
    print(key,mx.score(x_test,y_test))
print(result.values.max())


