{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "9b6dc388",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "'usr' 不是内部或外部命令，也不是可运行的程序\n",
      "或批处理文件。\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "******************** Data Info *********************\n",
      "#training data: 492, #testing_data: 243, dimension: 7\n",
      "******************* RF ********************\n",
      "training took 1.219564s!\n",
      "precision: 50.00%, recall: 57.50%\n",
      "accuracy: 83.54%\n",
      "******************* DT ********************\n",
      "training took 0.012074s!\n",
      "precision: 40.62%, recall: 65.00%\n",
      "accuracy: 78.60%\n",
      "******************* GBDT ********************\n",
      "training took 0.128225s!\n",
      "precision: 52.27%, recall: 57.50%\n",
      "accuracy: 84.36%\n"
     ]
    }
   ],
   "source": [
    "!usr/bin/env python  \n",
    "#-*- coding: utf-8 -*-  \n",
    "\n",
    "import time  \n",
    "from sklearn import metrics  \n",
    "import numpy as np  \n",
    "from numpy import *\n",
    "from sklearn import model_selection\n",
    "import pandas as pd\n",
    "pre=[]\n",
    "'''\n",
    "# Multinomial Naive Bayes Classifier  \n",
    "def naive_bayes_classifier(train_x, train_y):  \n",
    "    from sklearn.naive_bayes import MultinomialNB  \n",
    "    model = MultinomialNB(alpha=0.01)  \n",
    "    model.fit(train_x, train_y)  \n",
    "    return model  \n",
    "'''\n",
    "\n",
    "# KNN Classifier  \n",
    "def knn_classifier(train_x, train_y):  \n",
    "    from sklearn.neighbors import KNeighborsClassifier  \n",
    "    model = KNeighborsClassifier(n_neighbors=10)  \n",
    "    model.fit(train_x, train_y)  \n",
    "    return model  \n",
    "\n",
    "\n",
    "# Logistic Regression Classifier  \n",
    "def logistic_regression_classifier(train_x, train_y):  \n",
    "    from sklearn.linear_model import LogisticRegression  \n",
    "    model = LogisticRegression(penalty='l2')  \n",
    "    model.fit(train_x, train_y)  \n",
    "    return model  \n",
    "\n",
    "\n",
    "# Random Forest Classifier  \n",
    "def random_forest_classifier(train_x, train_y):  \n",
    "    from sklearn.ensemble import RandomForestClassifier  \n",
    "    model = RandomForestClassifier(n_estimators=100)  \n",
    "    model.fit(train_x, train_y)  \n",
    "    return model  \n",
    "\n",
    "\n",
    "# Decision Tree Classifier  \n",
    "def decision_tree_classifier(train_x, train_y):  \n",
    "    from sklearn import tree  \n",
    "    model = tree.DecisionTreeClassifier()  \n",
    "    model.fit(train_x, train_y)  \n",
    "    return model  \n",
    "\n",
    "\n",
    "# GBDT(Gradient Boosting Decision Tree) Classifier  \n",
    "def gradient_boosting_classifier(train_x, train_y):  \n",
    "    from sklearn.ensemble import GradientBoostingClassifier  \n",
    "    model = GradientBoostingClassifier(n_estimators=40)  \n",
    "    model.fit(train_x, train_y)  \n",
    "    return model  \n",
    "\n",
    "\n",
    "# SVM Classifier  \n",
    "def svm_classifier(train_x, train_y):  \n",
    "    from sklearn.svm import SVC  \n",
    "    model = SVC(kernel='rbf', probability=True)  \n",
    "    model.fit(train_x, train_y)  \n",
    "    return model  \n",
    "\n",
    "# SVM Classifier using cross validation  \n",
    "def svm_cross_validation(train_x, train_y):  \n",
    "    from sklearn.grid_search import GridSearchCV  \n",
    "    from sklearn.svm import SVC  \n",
    "    model = SVC(kernel='rbf', probability=True)  \n",
    "    param_grid = {'C': [1e-3, 1e-2, 1e-1, 1, 10, 100, 1000], 'gamma': [0.001, 0.0001]}  \n",
    "    grid_search = GridSearchCV(model, param_grid, n_jobs = 1, verbose=1)  \n",
    "    grid_search.fit(train_x, train_y)  \n",
    "    best_parameters = grid_search.best_estimator_.get_params()  \n",
    "    for para, val in best_parameters.items():  \n",
    "        print (para, val)  \n",
    "    model = SVC(kernel='rbf', C=best_parameters['C'], gamma=best_parameters['gamma'], probability=True)  \n",
    "    model.fit(train_x, train_y)  \n",
    "    return model  \n",
    "'''\n",
    "# XGBoost Classfier\n",
    "def extreme_gradient_boosting_classifier(train_x,train_y):\n",
    "    import xgboost\n",
    "    model = xgboost.XGBClassifier()\n",
    "    model.fit(train_x,train_y)\n",
    "    return model\n",
    "'''\n",
    "data = pd.read_excel(r'./test1.xlsx',header=None)\n",
    "x = data.iloc[:,:7]\n",
    "y = data.iloc[:,7]\n",
    "# read dataset\n",
    "def read_data():  \n",
    "    data = pd.read_excel(r'./test1.xlsx',header=None)\n",
    "    x = data.iloc[:,:7]\n",
    "    y = data.iloc[:,7]\n",
    "    seed = 7\n",
    "    test_size = 0.33\n",
    "    # split the dataset \n",
    "    train_x,test_x,train_y,test_y = model_selection.train_test_split \\\n",
    "        (x,y,test_size=test_size,random_state=seed)\n",
    "    return train_x, test_x, train_y, test_y  \n",
    "\n",
    "\n",
    "if __name__ == '__main__':        \n",
    "    #test_classifiers = ['NB','RF','SVM','KNN','LR','DT','GBDT','XGBDT']\n",
    "    test_classifiers = ['RF','DT','GBDT']\n",
    "    classifiers = {#'NB':naive_bayes_classifier,   \n",
    "                  #'KNN':knn_classifier,  \n",
    "                  # 'LR':logistic_regression_classifier,  \n",
    "                   'RF':random_forest_classifier,  \n",
    "                   'DT':decision_tree_classifier,  \n",
    "                  #'SVM':svm_classifier,  \n",
    "                #'SVMCV':svm_cross_validation,  \n",
    "                 'GBDT':gradient_boosting_classifier,  \n",
    "                #'XGBDT':extreme_gradient_boosting_classifier\n",
    "    }  \n",
    "\n",
    "    #print 'reading training and testing data...'  \n",
    "    train_x, test_x, train_y, test_y = read_data()  \n",
    "    num_train, num_feat = train_x.shape  \n",
    "    num_test, num_feat = test_x.shape  \n",
    "    is_binary_class = (len(np.unique(train_y)) == 2)  \n",
    "    print ('******************** Data Info *********************'  )\n",
    "    print ('#training data: %d, #testing_data: %d, dimension: %d' % (num_train, num_test, num_feat) ) \n",
    "\n",
    "    for classifier in test_classifiers:  \n",
    "        print ('******************* %s ********************' % classifier ) \n",
    "        start_time = time.time()  \n",
    "        model = classifiers[classifier](train_x, train_y) \n",
    "        print ('training took %fs!' % (time.time() - start_time) ) \n",
    "        predict = model.predict(test_x)\n",
    "        predict_all = model.predict(x)\n",
    "        pre.append(predict_all)\n",
    "        if is_binary_class:  \n",
    "            precision = metrics.precision_score(test_y, predict)  \n",
    "            recall = metrics.recall_score(test_y, predict)  \n",
    "            print ('precision: %.2f%%, recall: %.2f%%' % (100 * precision, 100 * recall))  \n",
    "        accuracy = metrics.accuracy_score(test_y, predict)  \n",
    "        print ('accuracy: %.2f%%' % (100 * accuracy)) \n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "9d8bf3be",
   "metadata": {},
   "outputs": [],
   "source": [
    "import time  \n",
    "from sklearn import metrics  \n",
    "import pandas as pd\n",
    "from numpy import *\n",
    "from sklearn import model_selection\n",
    "#from sklearn import cross_validation\n",
    "import numpy as np  \n",
    "data = pd.read_excel(r'./test1.xlsx',header=None)\n",
    "x = data.iloc[:,:7]\n",
    "y = data.iloc[:,7]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 89,
   "id": "a113f324",
   "metadata": {},
   "outputs": [],
   "source": [
    "df = pd.DataFrame(pre)\n",
    "df = df.T\n",
    "df.to_excel(excel_writer=\"tmp.xlsx\",index=False,encoding='utf-8')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "19ef9897",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "RandomForestClassifier()\n"
     ]
    }
   ],
   "source": [
    "print(model)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "86b138fe",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
