# -*- coding:utf-8 -*-
from __future__ import division
from sklearn.ensemble import RandomForestClassifier,AdaBoostClassifier,GradientBoostingClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import MultinomialNB, GaussianNB,BernoulliNB
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.grid_search import GridSearchCV
from sknn.mlp import Classifier, Layer, Convolution
import numpy as np
__author__ = 'shudongma.msd(风骐)'


# 随机森林
def rand_forest(X_train, y_train,X_test, y_test):
    clf = RandomForestClassifier(n_estimators=200,max_depth=8,min_samples_split=5,min_samples_leaf=5,random_state=1)
    # clf.fit(X_train,y_train)
    # y_pred = clf.predict(X_test)
    # y_pred_prob = clf.predict_proba(X_test)
    # return np.mean(y_pred==y_test)
    # return y_pred,y_pred_prob

    param_grid = {
        # 'n_estimators':[150,200,250],
        # 'max_depth':[5,8,10],
        # 'min_samples_split':[5,3,2],
        # 'min_samples_leaf':[5,3,2],
        'random_state':[1,6,12]
    }
    gridSearch = GridSearchCV(estimator=clf,param_grid=param_grid)
    gridSearch.fit(X_train,y_train)
    print gridSearch.best_estimator_
    print gridSearch.best_score_
    print gridSearch.best_params_

# adaboost
def ada_boost(X_train, y_train,X_test, y_test):
    clf = AdaBoostClassifier(n_estimators=300,learning_rate=1,random_state=1)
    # clf.fit(X_train,y_train)
    # y_pred = clf.predict(X_test)
    # y_pred_prob = clf.predict_proba(X_test)
    # return np.mean(y_pred==y_test)
    # return y_pred,y_pred_prob

    param_grid = {
        'n_estimators':[250,200,300],
        #'random_state':[1,6,12],
        #'learning_rate':[1,0.5,2]
    }
    gridSearch = GridSearchCV(estimator=clf,param_grid=param_grid)
    gridSearch.fit(X_train,y_train)
    print gridSearch.best_estimator_
    print gridSearch.best_score_
    print gridSearch.best_params_

# GradientBoosting
def gradient_boost(X_train, y_train,X_test, y_test):
    clf = GradientBoostingClassifier(n_estimators=1000, learning_rate=0.05, random_state=1, max_depth=8, min_samples_split=3, min_samples_leaf=2)
    # clf.fit(X_train,y_train)
    # y_pred = clf.predict(X_test)
    # y_pred_prob = clf.predict_proba(X_test)
    # return np.mean(y_pred==y_test)
    # return y_pred,y_pred_prob
    # return clf

    param_grid = {
        # 'n_estimators':[1000,600,1500],
        # 'max_depth':[5,8,10],
        #'min_samples_split':[5,3,2],
        # 'min_samples_leaf':[5,3,2],
        # 'random_state':[1,6,12]
        'learning_rate':[0.1,0.05,0.01]
    }
    gridSearch = GridSearchCV(estimator=clf,param_grid=param_grid,verbose=2)
    gridSearch.fit(X_train,y_train)
    print gridSearch.best_estimator_
    print gridSearch.best_score_
    print gridSearch.best_params_

    print clf.max_features

# knn
def knn(X_train, y_train,X_test, y_test):
    clf = KNeighborsClassifier(n_neighbors=10)
    clf.fit(X_train,y_train)
   # y_pred = clf.predict(X_test)
    # y_pred_prob = clf.predict_proba(X_test)
    # return np.mean(y_pred==y_test)
    # return y_pred,y_pred_prob

    param_grid = {
        #'n_neighbors':[5,10,20],
        # 'metric':['euclidean','manhattan','chebyshev','minkowski']
    }
    gridSearch = GridSearchCV(estimator=clf,param_grid=param_grid)
    gridSearch.fit(X_train,y_train)
    print gridSearch.best_estimator_
    print gridSearch.best_score_
    print gridSearch.best_params_


# 决策树
def decision_tree(X_train, y_train,X_test, y_test):
    clf = DecisionTreeClassifier(criterion='entropy',random_state=1,max_depth=10,min_samples_split=5,min_samples_leaf=2)
    clf.fit(X_train,y_train)
    # y_pred = clf.predict(X_test)
    # y_pred_prob = clf.predict_proba(X_test)
    # return np.mean(y_pred==y_test)
    # return y_pred,y_pred_prob

    param_grid = {
        # 'criterion':['entropy','gini'],
        # 'max_depth':[5,8,10],
        'min_samples_split':[5,3,2],
        'min_samples_leaf':[5,3,2],
    }
    gridSearch = GridSearchCV(estimator=clf,param_grid=param_grid)
    gridSearch.fit(X_train,y_train)
    print gridSearch.best_estimator_
    print gridSearch.best_score_
    print gridSearch.best_params_

# 朴素贝叶斯
def naive_bayes(X_train, y_train,X_test, y_test):
    clf = GaussianNB()
    clf.fit(X_train,y_train)
    y_pred = clf.predict(X_test)
    y_pred_prob = clf.predict_proba(X_test)
    return np.mean(y_pred==y_test)
    # return y_pred,y_pred_prob


# 支持向量机
def svm(X_train, y_train,X_test, y_test):
    clf = SVC(C=2.0,kernel='rbf',degree=5,tol=1e-3,random_state=1)
    clf.fit(X_train,y_train)
    # y_pred = clf.predict(X_test)
    # y_pred_prob = clf.predict_proba(X_test)
    # return np.mean(y_pred==y_test)
    # return y_pred,y_pred_prob

    param_grid = {
        #'C':[1,2,0.5],
        #'kernel':['poly','rbf','sigmoid'],
        'degree':[5,3,2],
        'tol':[1e-3,1e-4,1e-5],
    }
    gridSearch = GridSearchCV(estimator=clf,param_grid=param_grid)
    gridSearch.fit(X_train,y_train)
    print gridSearch.best_estimator_
    print gridSearch.best_score_
    print gridSearch.best_params_

# 利用logistic回归 分类
def logistic(X_train, y_train,X_test, y_test):
    clf = LogisticRegression()
    clf.fit(X_train,y_train)
    y_pred = clf.predict(X_test)
    y_pred_prob = clf.predict_proba(X_test)
    return np.mean(y_pred==y_test)
    # return y_pred,y_pred_prob


# 神经网络
def neural_network(X_train, y_train,X_test, y_test):
    # 将pandas转换成numpy
    X_train = X_train.values
    X_test = X_test.values
    nn = Classifier(
    layers=[
        Layer('Sigmoid', units=100,dropout=0.25),
        Layer('Softmax',dropout=0.25)],
    learning_rate=0.001,
    n_iter=100)
    nn.fit(X_train,y_train)
    y_pred = nn.predict(X_test)
    y_pred_prob = nn.predict_proba(X_test)
    return np.mean(y_pred==y_test)
    # return y_pred,y_pred_prob