# coding:utf8
import cv2
import matplotlib
import pydotplus
import os
import pandas as pd
from keras import Sequential
from keras.layers import Dense, Dropout
from keras import regularizers
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.externals import joblib
from keras.losses import mse,binary_crossentropy,sparse_categorical_crossentropy
from sklearn.metrics import roc_auc_score
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split,GridSearchCV
from sklearn.svm import SVC
from sklearn.tree import export_graphviz
from sqlalchemy import create_engine
import numpy as np
import matplotlib.pyplot as plt
from keras.utils.vis_utils import plot_model

os.environ['PATH'] += os.pathsep + r"D:\Program Files\graphiz\bin"
os.environ['PATH'] += os.pathsep + r'D:\hiicy\demad\instantclient-basic-windows.x64-12.2.0.1.0'

SEED = 22
np.random.seed(SEED)


class Entry(object):
    def __init__(self, addr=None, data_from=None, **kwargs):
        """
        :param addr:
        :param algorthim_: str
        """
        self.addr = addr
        self.data_from = data_from
        self.kwargs = kwargs

    def read_data(self):
        test_prop = 0.2
        if self.data_from == "csv":
            data = self._read_csv()
        elif self.data_from == "sdb":
            data = self._read_sdb(**self.kwargs)
        elif self.data_from == "oracle" or self.data_from == "mysql":
            data = self._read_db(**self.kwargs)
        else:
            data = self._read_csv()
        if data is None:
            print("数据为空，请输入正确的数据源")
            return
        labels = data.iloc[:, -1]
        data = data.iloc[:, :-1]
        # 训练、测试分割比例=8:2
        return train_test_split(data, labels, test_size=test_prop, random_state=SEED)

    def _read_csv(self):
        data = pd.read_csv(filepath_or_buffer=self.addr)
        # data = pd.read_excel(self.addr)
        return data

    def neurl_net(self, **kwargs):
        x_train, x_test, y_train, y_test = self.read_data()
        print(y_train)
        def _grid_find_kera(model):
            param_test = {
                "epochs": range(1, 20, 3),
                "batch_size": range(8, 64, 16)
            }
            gsearch1 = GridSearchCV(estimator=model,
                                    param_grid=param_test, n_jobs=3, scoring="roc_auc", cv=5)
            gsearch1=gsearch1.fit(x_train, y_train)
            print(f'Best_score:{gsearch1.best_score_}  Best_param:{gsearch1.best_params_}')
            print(gsearch1.cv_results_.keys())

        # def create_model():
        model = Sequential()
        model.add(Dropout(rate=0.4,seed=SEED))
        model.add(Dense(32,input_dim=5,kernel_regularizer=regularizers.l2(0.3),activation="relu"))  # input_shape 输入的数据矩阵维度
        model.add(Dropout(rate=0.5,seed=SEED))
        model.add(Dense(2,kernel_regularizer=regularizers.l2(0.03),kernel_initializer='random_uniform', activation="relu"))
        model.add(Dropout(rate=0.5,seed=SEED))
        model.add(Dense(1,kernel_regularizer=regularizers.l2(0.03), kernel_initializer="random_uniform",activation='sigmoid'))
        model.compile(optimizer='adadelta', loss=binary_crossentropy, metrics=['accuracy'])
        model.fit(x_train.values, y_train.values,epochs=3,validation_split=0.3)
        p = model.predict_proba(x_test.values)
        p = np.array([a[0] for a in p])
        print("Neural Network ROC-AUC score: %.3f" % roc_auc_score(y_test, p))
        # plot_model(model,to_file='net.png',show_shapes=True)
        # print(x_test.iloc[9,:],'-----',y_test.iloc[9],'\n')
        v = x_test.iloc[9:10,:].values

        print(model.predict_classes(x_train.values))

        # return model
        # create_model()
        # model = KerasClassifier(build_fn=create_model)
        # _grid_find_kera(model)

    def svm(self, **kwargs):
        """
        Parameters
        ---------
        C越大，模型越容易过拟合；C越小，模型越容易欠拟合
        C=1.0,  惩罚系数C比较大时，我们的损失函数也会越大，这意味着我们不愿意放弃比较远的离群点。这样我们会有更加多的支持向量，也就是说支持向量和超平面的模型也会变得越复杂，也容易过拟合。
        kernel='rbf',
        degree=3,这个参数只对多项式核函数有用，是指多项式核函数的阶数n
        gamma越小，模型的泛化性变好，但过小，模型实际上会退化为线性模型；gamma越大，理论上SVM可以拟合任何非线性数据过拟合。
        gamma越大，支持向量越少，gamma值越小，支持向量越多。支持向量的个数影响训练与预测的速度
        gamma='auto', 越大，与它越近得样本收到的影响越大
        coef0=0.0,
        shrinking=True,
        probability=False,
        tol=1e-3,
        cache_size=200,
        class_weight=None,
        verbose=False,
        max_iter=-1,
        decision_function_shape='ovr',
        random_state=None

        Return
        --------
        pkl:模型
        graph:
        """
        from mpl_toolkits.mplot3d import Axes3D
        x_train, x_test, y_train, y_test = self.read_data()
        def _grid_search_svm():
            param_test = {
                'C': [0.5, 0.7, 1, 1.5, 1.9, 2.5, 3, 3.5, 4, 4.5, ]

            }
            gsearch1 = GridSearchCV(estimator=SVC(probability=True, kernel='linear', random_state=SEED),
                                    param_grid=param_test, n_jobs=3, scoring="roc_auc", cv=5)
            gsearch1.fit(x_train, y_train)

            print(gsearch1.best_params_, type(gsearch1.best_params_))
            return gsearch1.best_params_
        svc = SVC(probability=True, kernel='linear',C=4.5, random_state=SEED, **kwargs)
        svc.fit(x_train, y_train)
        p = svc.predict_proba(x_test)
        print("Support vector ROC-AUC score: %.3f" % roc_auc_score(y_test, p[:, 1]))
        cm = confusion_matrix(y_test,svc.predict(x_test))
        # def plt_cm(cm):
        #     plt.imshow(cm,interpolation='nearest')
        #     plt.title('confusion matrix')
        #     plt.colorbar()
        #     plt.xlabel('predict value')
        #     tick_mark = np.arange(2)
        #     plt.xticks(tick_mark,tick_mark)
        #     plt.yticks(tick_mark,tick_mark)
        #     plt.ylabel('true value')
        #     plt.show()
        # 持久化模型
        joblib.dump(svc,"svm.pkl")
        # bad_list = self._find_badcase(svc, x_test, y_test)
        # 由于采用了线性核函数，y=w.G(x)+b # G:对x的变换 非线性空间中
        n_Support_vector = svc.n_support_  # 支持向量的个数
        sv_ix = svc.support_  # 支持向量的索引
        w = svc.coef_  # 方向向量函数(1,5)
        b = svc.intercept_  # 截距(1,)

        def two_dim_plot():
            ax = plt.subplot(111)
            x_array = x_train.values.astype('float32')
            y_array = y_train.values.astype('float32')
            pos = x_array[np.where(y_array == 1)]
            neg = x_array[np.where(y_array == 0)]
            ax.scatter(pos[:,0],pos[:,1],c='r',label='pos')
            ax.scatter(neg[:,0],neg[:,1],c='b',label='neg')

            a = -w[0,0] / w[0,1]
            xx = np.linspace(-5,5)
            xy = a*xx - b / w[0,1]
            plt.plot(xx,xy,'k-',label="分割线")
            plt.xlim(-0.5,0.7)
            plt.ylim(-0.5,0.7)
            plt.show()

        def three_dim_plot():
            # 绘制分类平面
            fig = plt.figure()
            ax = Axes3D(fig)
            ax.set_xlim()
            # 坐标刻度，考虑到数据的数值大小
            x, y = np.arange(0, 1, 0.01), np.arange(0, 1, 0.01)
            x, y = np.meshgrid(x, y)  # 绘制网格 [i for i in zip(x.flat,y.flat)] 这是网格点
            z = (w[0, 0] * x + w[0, 1] * y + b) / (-w[0, 2])
            surf = ax.plot_surface(x, y, z, rstride=1, cstride=1)

            # 绘制散点图
            x_array = x_train.values.astype('float32')
            y_array = y_train.values.astype('float32')
            pos = x_array[np.where(y_array == 1)]
            neg = x_array[np.where(y_array == 0)]
            ax.scatter(pos[:, 0], pos[:, 1], pos[:, 2], c="r", label='pos')
            ax.scatter(neg[:, 0], neg[:, 1], neg[:, 2], c="b", label='neg')

            # 绘制支持向量
            X = x_train.values.astype('float32')
            for i in range(len(sv_ix)):
                ax.scatter(X[sv_ix[i],0],X[sv_ix[i],1],
                           X[sv_ix[i],2],s=50,c='g',marker='o',)

            ax.set_zlabel('Z')
            ax.set_ylabel('Y')
            ax.set_xlabel('X')
            ax.set_zlim([0,1])
            plt.legend(loc='upper left')

            ax.view_init(350,200)
            plt.show()

    def _find_badcase(self, clf, X, Y):
        Y = Y.values

        bad_list = []
        y_pre = clf.predict(X)

        for i in range(len(X)):
            if y_pre[i] != Y[i]:
                bad_list.append(i)
        return bad_list

    def random_forest(self, **kwargs):
        """
        :param kwargs:
                n_estimators=10,
                criterion="gini",
                max_depth=None,
                min_samples_split=2,
                min_samples_leaf=1,
                min_weight_fraction_leaf=0.,
                max_features="auto",
                max_leaf_nodes=None,
                min_impurity_decrease=0.,
                min_impurity_split=None,
                bootstrap=True,
                oob_score=False,
                n_jobs=1,
                random_state=None,
                verbose=0,
                warm_start=False,
                class_weight
        :return:
        dot:训练好的模型
        graph:打印好的图
        """
        x_train, x_test, y_train, y_test = self.read_data()

        # 网格寻优参数
        def _grid_search__forest():
            param_test = {
                'n_estimators': range(10, 71, 10)
            }
            gsearch1 = GridSearchCV(estimator=RandomForestClassifier(),
                                    param_grid=param_test, scoring="roc_auc", cv=5)
            gsearch1.fit(x_train, y_train)
            print(gsearch1.best_params_)
            param_test2 = {
                'max_depth': range(4, 15, 2), 'min_samples_split': range(5, 300, 25)
            }

            gsearch2 = GridSearchCV(estimator=RandomForestClassifier(
                **gsearch1.best_params_),
                param_grid=param_test2, iid=False, scoring="roc_auc", cv=5)

            gsearch2.fit(x_train, y_train)
            print(gsearch2.best_params_)
            gsearch1.best_params_.update(gsearch2.best_params_)
            return gsearch1.best_params_

        def _print_graph(clf, feature_names, **kwargs):
            if "random_forest" in kwargs.keys():
                trees = clf.estimators_
                print(trees[0].n_classes_)  # 随机森林的决策树列表
                for index, model in enumerate(trees):
                    filename = 'sel' + str(index) + '.pdf'
                    dot_data = export_graphviz(
                        model,
                        label="root",
                        proportion=True,
                        impurity=False,
                        out_file=None,
                        feature_names=feature_names,
                        class_names=self._class_name,
                        filled=True,
                        rounded=True)
                    graph = pydotplus.graph_from_dot_data(dot_data)
                    # TODO:
                    # return Image(graph.create_png())
                    graph.write_pdf(filename)

        best_param = _grid_search__forest()
        par = {}
        par.update(best_param)
        par.update(kwargs)
        clf = RandomForestClassifier(random_state=SEED)
        clf.fit(x_train, y_train)

        p = clf.predict_proba(x_test)
        # 两个预测值的概率
        # print(clf.classes_)
        print("Random Forest ROC-AUC score: %.3f" % roc_auc_score(y_test, p[:, 1]))
        clf.score(x_test,y_test)
        _print_graph(clf, x_train.columns, random_forest=True)
        joblib.dump(clf, "random_forest.pkl")

    def _read_db(self, **kwargs):
        engine = create_engine(self.addr, **kwargs)
        if 'table_name' in kwargs.keys():
            data = pd.read_sql_table(table_name=kwargs.get("table_name"), con=engine)
            return data
        elif 'sql' in kwargs.keys():
            data = pd.read_sql(sql=kwargs.get('sql'), con=engine)
            return data
        print('请输入表名以获得数据....')

        return None

    # def _read_sdb(self, **kwargs):
    #
    #     db_select_collection = kwargs.get('collection') if 'collection' in kwargs.keys() else "face.decision_tree_test"
    #     if self.addr:
    #         addr = self.addr
    #     else:
    #         addr = {
    #             "host":'10.248.175.164',
    #             "service": 11810 or "11810",
    #             "user":"peter",
    #             "psw":"Foxconn99."
    #         }
    #     try:
    #         db = client(**addr)
    #     except SDBBaseError,e:
    #         pysequoiadb._print(e)
    #         exit()
    #     else:
    #         cl = db.get_collection(db_select_collection)
    #         records = []
    #         cursor = cl.query()
    #         try:
    #             while True:
    #                 try:
    #                     record = cursor.next()
    #                     records.append(record)
    #                 except SDBEndOfCursor:
    #                     break
    #                 except SDBBaseError, e:
    #                     pysequoiadb._print(e)
    #                     break
    #         finally:
    #             cursor.close()
    #         if records is None:
    #             return None
    #         columns = [str(column) for column in records[0].keys()] # 列名
    #         columns.remove("_id")
    #         dictRecords={}
    #         for column in columns:
    #             dictRecords[column] = []
    #             for record in records:
    #                 dictRecords[column].append(record.get(column))
    #
    #         data = pd.DataFrame(dictRecords)
    #         return data


    @property
    def _class_name(self):
        return {0: "D", 1: "R"}


if __name__ == "__main__":
    # addr = r'D:\hiicy\documents\train_data.xlsx'
    addr = r'D:\hiicy\documents\python_decision_tree\DT_test_data.csv'
    entry = Entry(addr=addr)
    # entry.random_forest()
    entry.svm()
    entry.neurl_net()
