#!/usr/bin/python
#coding:utf-8
''''' 
分类方法评估和实现
网址：
    评估方法介绍：https://blog.csdn.net/cherdw/article/details/55813071
'''  
   
import time    
from sklearn import metrics    
import pickle as pickle    
import pandas as pd  
import pandas
from sklearn import model_selection
  
    
# Multinomial Naive Bayes Classifier    
def naive_bayes_classifier(train_x, train_y):    
    from sklearn.naive_bayes import MultinomialNB    
    model = MultinomialNB(alpha=0.01)    
    model.fit(train_x, train_y)    
    return model    
    
    
# KNN Classifier    
def knn_classifier(train_x, train_y):    
    from sklearn.neighbors import KNeighborsClassifier    
    model = KNeighborsClassifier()    
    model.fit(train_x, train_y)    
    return model    
    
    
# Logistic Regression Classifier    
def logistic_regression_classifier(train_x, train_y):    
    from sklearn.linear_model import LogisticRegression    
    model = LogisticRegression(penalty='l2')    
    model.fit(train_x, train_y)    
    return model    
    
    
# Random Forest Classifier    
def random_forest_classifier(train_x, train_y):    
    from sklearn.ensemble import RandomForestClassifier    
    model = RandomForestClassifier(n_estimators=8)    
    model.fit(train_x, train_y)    
    return model    
    
    
# Decision Tree Classifier    
def decision_tree_classifier(train_x, train_y):    
    from sklearn import tree    
    model = tree.DecisionTreeClassifier()    
    model.fit(train_x, train_y)    
    return model    
    
    
# GBDT(Gradient Boosting Decision Tree) Classifier    
def gradient_boosting_classifier(train_x, train_y):    
    from sklearn.ensemble import GradientBoostingClassifier    
    model = GradientBoostingClassifier(n_estimators=200)    
    model.fit(train_x, train_y)    
    return model    
    
    
# SVM Classifier    
def svm_classifier(train_x, train_y):    
    from sklearn.svm import SVC    
    model = SVC(kernel='rbf', probability=True)    
    model.fit(train_x, train_y)    
    return model    
    
# SVM Classifier using cross validation    
def svm_cross_validation(train_x, train_y):    
    from sklearn.grid_search import GridSearchCV    
    from sklearn.svm import SVC    
    model = SVC(kernel='rbf', probability=True)    
    param_grid = {'C': [1e-3, 1e-2, 1e-1, 1, 10, 100, 1000], 'gamma': [0.001, 0.0001]}    
    grid_search = GridSearchCV(model, param_grid, n_jobs = 1, verbose=1)    
    grid_search.fit(train_x, train_y)    
    best_parameters = grid_search.best_estimator_.get_params()    
    for para, val in list(best_parameters.items()):    
        print(para, val)    
    model = SVC(kernel='rbf', C=best_parameters['C'], gamma=best_parameters['gamma'], probability=True)    
    model.fit(train_x, train_y)    
    return model    
    
def read_data(data_file):    
    data = pd.read_csv(data_file)  
    train = data[:int(len(data)*0.9)]  
    test = data[int(len(data)*0.9):]  
    train_y = train.label  
    train_x = train.drop('label', axis=1)  
    test_y = test.label  
    test_x = test.drop('label', axis=1)  
    return train_x, train_y, test_x, test_y  

def loadTxt():
    # url="F:\\input\\ecicikit.txt"
    # names=["utcstarttime","city_id","province_id","starttime","cell_id","VoLteAudErl","VoLteVidErl","VoLteAudOrigCallSuccNbr","VoLteAudTermCallSuccNbr","VoLteAudOrigCallNbr","VoLteAudTermCallNbr","VoLteAudSuccRate","VoLteAudNetCallNbr","VoLteAudNetSuccRate","VoLteVidOrigCallSuccNbr","VoLteVidTermCallSuccNbr","VoLteVidOrigCallNbr","VoLteVidTermCallNbr","VoLteVidSuccRate","VoLteVidNetCallNbr","VoLteVidNetSuccRate","VoLteAudOffLineNbr","VoLteAudOrigCallReplyNbr","VoLteAudTermCallReplyNbr","VoLteAudOffLineRate","VoLteVidOffLineNbr","VoLteVidOrigCallReplyNbr","VoLteVidTermCallReplyNbr","VoLteVidOffLineRate","VoLteOrigCallTimeVtoV","VoLteOrigCallTimeVtoAll","SRVCCSwitchSuccNbr","SRVCCSwitchAttNbr","SRVCCSwitchSuccRate","SRVCCSwitchTime","RTCPMos","RTPMosUl","RTPMosDl","RTPPktLossUl","VolteActiveUserNbr","class"]
    # dataframe = pandas.read_csv(url,names=names)
    # array = dataframe.values
    # x=array[:,0:39]  
    # y = array[:,39] 

    from sklearn import datasets
    ############鸢尾花#####################
    #获取总数据
    dataMat=datasets.load_iris()
    #获取数据
    data=dataMat.data
    #获取标签
    labels =  dataMat.target
    #75%训练  25%测试
    X_train,X_test,y_train,y_test=model_selection.train_test_split(data,labels,test_size=0.25)
    return X_train,y_train,X_test,y_test
        
if __name__ == '__main__':    
    thresh = 0.5    
    model_save_file = None    
    model_save = {}    
     
    test_classifiers = ['NB', 'KNN', 'LR', 'RF', 'DT', 'SVM','GBDT']    
    classifiers = {'NB':naive_bayes_classifier,     
                  'KNN':knn_classifier,    
                   'LR':logistic_regression_classifier,    
                   'RF':random_forest_classifier,    
                   'DT':decision_tree_classifier,    
                  'SVM':svm_classifier,    
                # 'SVMCV':svm_cross_validation,    
                 'GBDT':gradient_boosting_classifier    
    }    
        
    print('reading training and testing data...')    
    train_x, train_y, test_x, test_y = loadTxt()  
    test_y = [int(i) for i in test_y]  
        
    for classifier in test_classifiers:    
        print('******************* %s ********************' % classifier)    
        start_time = time.time()    
        model = classifiers[classifier](train_x, train_y)    
        print('training took %fs!' % (time.time() - start_time))    
        predict = model.predict(test_x)  
        predict = [int(i) for i in predict]  

        if model_save_file != None:    
            model_save[classifier] = model

        '''
            average参数：
                macro：计算二分类metrics的均值，为每个类给出相同权重的分值。当小类很重要时会出问题，因为该macro-averging方法是对性能的平均。另一方面，该方法假设所有分类都是一样重要的，因此macro-averaging方法会对小类的性能影响很大。
                weighted:对于不均衡数量的类来说，计算二分类metrics的平均，通过在每个类的score上进行加权实现。
                micro：给出了每个样本类以及它对整个metrics的贡献的pair（sample-weight），而非对整个类的metrics求和，它会每个类的metrics上的权重及因子进行求和，来计算整个份额。Micro-averaging方法在多标签（multilabel）问题中设置，包含多分类，此时，大类将被忽略。
                samples：应用在multilabel问题上。它不会计算每个类，相反，它会在评估数据中，通过计算真实类和预测类的差异的metrics，来求平均（sample_weight-weighted）
                average：average=None将返回一个数组，它包含了每个类的得分.
        '''
        #精准度  
        precision = metrics.precision_score(test_y, predict,average='macro')     #average : string, [None, 'binary二源' (default), 'micro 微型', 'macro 宏', 'samples 样品', 'weighted 加权']

        #召回率 提取出的正确信息条数 除以 样本信息条数，简单来所所有准确条目多少条被测试出来了
        recall = metrics.recall_score(test_y, predict,average='macro')  #  average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', 'weighted']
        print('precision: %.2f%%, recall: %.2f%%' % (100 * precision, 100 * recall))  

        #分类准确率  normalize true（默认） 返回准确率比例   false返回分类的样本数
        accuracy = metrics.accuracy_score(test_y, predict)    
        print('accuracy: %.2f%%' % (100 * accuracy))     
    
    if model_save_file != None:    
        pickle.dump(model_save, open(model_save_file, 'wb'))    