from sklearn.linear_model import LogisticRegressionCV
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.naive_bayes import GaussianNB, BernoulliNB
from sklearn import metrics
import xgboost as xgb
import numpy as np


class Predict(object):
    """
    建模预测
    """
    def __init__(self, x_train, x_test, y_train, y_test):
        self.x_train = x_train
        self.x_test = x_test
        self.y_train = y_train
        self.y_test = y_test

    def model_evalution(self, model):
        """
        算法评估
        :param model:
        :return:
        """
        # 算法得分计算准确率
        print('模型得分R值(准确率): {}'.format(model.score(self.x_test, self.y_test)))
        # # 算法稀疏化特征比率(%)
        # print(np.mean(lr.coef_.ravel() == 0) * 100)
        # # 算法参数
        # print(lr.coef_)
        # print(lr.intercept_)

        # predict
        try:
            # 预测值分别为0，1的概率
            pre_y = model.predict_proba(self.x_test)
            # auc是ROC曲线下的面积， 直管反映了ROC曲线表达分类能力：auc > 0.5:由于随机分类器，否则差
            print('auc: {}'.format(metrics.roc_auc_score(self.y_test, pre_y[:, 1])))
        except Exception as e:
            # 预测值
            model.predict(self.x_test)
            print(e)

    def logistic_model(self):
        """
        逻辑回归：
        null: 0.8152305825242718(535) # missing_rate=0.8
        方差检测: 0.8191747572815534(522) # missing_rate=0.8, min_std=0.5 np.logspace(-2, 2, 10)
        PCA(80): 0.8021844660194175(594) # missing_rate=0.8, min_std=0.5
        IPCA(65): 0.8094660194174758(561) # missing_rate=0.8, min_std=1
        LDA: 0.8264563106796117(488)  # (0.8290214308791327)missing_rate=0.55, min_std=0.5 无需cs参数
        fa(63): 0.8121966019417476(596) # missing_rate=0.75, min_std=0.5
        ICA: 0.8094660194174758(561) # missing_rate=0.8, min_std=1
        NMF(75): 0.8100728155339806(626 #  missing_rate=0.85, min_std=0.5
        latentda(70):0.8112864077669903(617) #  missing_rate=0.8, min_std=0.5
        TSNE:0.8100728155339806(626) #  missing_rate=0.8, min_std=0.5
        :return:
        """
        # lr = LogisticRegressionCV(Cs=np.logspace(-2, 2, 10))
        lr = LogisticRegressionCV(Cs=np.logspace(-4, 1, 10))  # lda参数无效
        lr.fit(self.x_train, self.y_train)

        # 模型评估
        self.model_evalution(model=lr)

    def svm_model(self):
        """
        SVM
        LDA: 0.8264563106796117(502) # 0.6789496356391571
        :return:
        """
        svm = SVC(probability=True)
        model = svm.fit(self.x_train, self.y_train)

        # 模型评估
        self.model_evalution(model=model)

    def tree_model(self):
        """
        decision tree
        LDA: 0.7572815533980582(502) # 0.617222481482811
        :return:
        """
        dtree = DecisionTreeClassifier()
        model = dtree.fit(self.x_train, self.y_train)

        # 模型评估
        self.model_evalution(model=model)

    def sgd_model(self):
        # 0.8234223300970874
        clf = SGDClassifier()
        model = clf.fit(self.x_train, self.y_train)

        # 模型评估
        self.model_evalution(model=model)

    def knn_model(self):
        # 0.8088592233009708/0.6769040695935193
        clf = KNeighborsClassifier(2, weights='uniform')
        model = clf.fit(self.x_train, self.y_train)

        # 模型评估
        self.model_evalution(model=model)

    def gpc_model(self):
        # 耗时，慢：0.8264563106796117/0.8286056167809409
        kernel = 1.0 * RBF(length_scale=1.0)
        clf = GaussianProcessClassifier(kernel=kernel, optimizer=None)
        model = clf.fit(self.x_train, self.y_train)

        # 模型评估
        self.model_evalution(model=model)

    def naive_bayes_gaussina_model(self):
        # 0.8212985436893204/0.8290214308791327
        clf = GaussianNB()
        model = clf.fit(self.x_train, self.y_train)

        # 模型评估
        self.model_evalution(model=model)

    def naive_bayes_bernoul_model(self):
        # 0.8100728155339806/0.7662831604264637
        clf = BernoulliNB()
        model = clf.fit(self.x_train, self.y_train)

        # 模型评估
        self.model_evalution(model=model)

    def nn_model(self):
        # 0.8270631067961165/0.8290280121094638
        from sklearn.neural_network import MLPClassifier
        clf = MLPClassifier(solver='lbfgs', alpha=1e-5, random_state=0)
        model = clf.fit(self.x_train, self.y_train)

        # 模型评估
        self.model_evalution(model=model)

    def tf_logistic_model(self):
        """
        tensorflow 逻辑回归
        0.824029 / 0.8290214308791327
        :return:
        """
        import tensorflow as tf

        num_classes = 2
        learning_rate = 1e-5
        training_step = 5000
        display_step = 500
        batch_size = 1000

        train_data = tf.data.Dataset.from_tensor_slices((self.x_train, self.y_train))
        train_data = train_data.repeat().shuffle(50000).batch(batch_size).prefetch(1)
        w = tf.Variable(tf.ones([len(self.x_train[0]), num_classes], dtype=tf.double), name='weight')
        b = tf.Variable(tf.zeros([num_classes], dtype=tf.double), name='bias')

        def logsitic_regression(x):
            # tf.nn.softmax 多分类应用
            return tf.nn.softmax(tf.matmul(x, w) + b)

        def cross_entropy(predict_y, y_true):
            y_true = tf.one_hot(y_true, depth=num_classes, dtype=tf.float64)
            predict_y = tf.clip_by_value(predict_y, 1e-9, 1)

            return tf.reduce_mean(-tf.reduce_sum(y_true * tf.math.log(predict_y)))

        def accuracy(y_pre, y_true):
            correct_prediction = tf.equal(tf.argmax(y_pre, 1), tf.cast(y_true, tf.int64))
            return tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        optimizer = tf.optimizers.SGD(learning_rate)

        def run_optimization(x, y):
            with tf.GradientTape() as g:
                pred_y = logsitic_regression(x)
                loss_ = cross_entropy(pred_y, y)

            gradients = g.gradient(loss_, [w, b])

            optimizer.apply_gradients(zip(gradients, [w, b]))

        for step, (batch_x, batch_y) in enumerate(train_data.take(training_step), 1):
            run_optimization(batch_x, batch_y)

            if step % display_step == 0:
                pred = logsitic_regression(batch_x)
                loss = cross_entropy(pred, batch_y)
                acc = accuracy(pred, batch_y)
                print("step: %i, loss: %f, accuracy: %f" % (step, loss, acc))

        pre_y = logsitic_regression(self.x_test)
        loss = cross_entropy(pre_y, self.y_test)
        acc = accuracy(pre_y, self.y_test)
        print("\nloss: %f, accuracy: %f" % (loss, acc))
        print('auc: {}'.format(metrics.roc_auc_score(self.y_test, pre_y[:, 1])))

    def xgboost_model(self):
        """
        auc: 0.8244555527635185
        :return:
        """
        d_train = xgb.DMatrix(self.x_train, self.y_train)
        d_test = xgb.DMatrix(self.x_test)

        params = {'booster':'gbtree',
                  'objective': 'binary:logistic',
                    'eval_metric': 'auc',
                    'max_depth':4,
                    'lambda':10,
                    'subsample':0.75,
                    'colsample_bytree':0.75,
                    'min_child_weight':2,
                    'eta': 0.025,
                    'seed':0,
                    'nthread':8,
                     'silent':1}
        model = xgb.train(params, d_train, num_boost_round=1000, evals=[(d_train, 'train')])

        # 模型评估
        # 预测值为1的概率
        pre_y = model.predict(d_test)

        # auc是ROC曲线下的面积， 直管反映了ROC曲线表达分类能力：auc > 0.5:由于随机分类器，否则差
        print('auc: {}'.format(metrics.roc_auc_score(self.y_test, pre_y)))

    def xgboost_classifier(self):
        from xgboost import XGBClassifier, XGBRFClassifier

        model_1 = XGBClassifier()  # 0.8264563106796117/0.8273773198836917
        model_2 = XGBRFClassifier() # 0.8264563106796117/0.826819710186548

        model_1.fit(self.x_train, self.y_train)
        model_2.fit(self.x_train, self.y_train)

        # 模型评估
        self.model_evalution(model=model_1)
        self.model_evalution(model=model_2)
