#!/usr/bin/env python
# -*- coding:utf-8 -*- 
# @Time    : 2018/9/13 11:23
# @Author  : liujiantao
# @Site    :  https://www.imuo.com/a/3588be095549c760f965c71159e3aaf274c72ba0181970fc3cfe076db479b42f
# @File    : LogisticRegression_train.py
# @Software: PyCharm
from tiancheng.base.base_helper import *

from sklearn import svm

target_names = ['label is 0', 'label is 1']


def lr_train01(X, Y, i, test01):
    # 过采样
    # X = VarianceThreshold_selector(X.fillna(-1))
    # 数据标准化处理：
    from sklearn.preprocessing import StandardScaler

    X = StandardScaler().fit_transform(X)
    from sklearn.model_selection import train_test_split
    # 随机抽取20%的测试集
    X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.1, random_state=i)
    # # 主成分分析PCA
    # from sklearn.decomposition import PCA
    # pca = PCA(n_components=2)
    # X_train_pca = pca.fit_transform(X_train)
    # X_test_pca = pca.transform(X_test)
    # 用法：
    # 采用 smote 算法 进行 过采样
    from imblearn.over_sampling import SMOTE
    sm = SMOTE(random_state=2, k_neighbors=5)
    X_train_res, y_train_res = sm.fit_sample(X_train, y_train.ravel())
    # parameters = {
    #     'C': np.linspace(1, 10, 10)
    # }
    # lr = LogisticRegression()
    # clf = GridSearchCV(lr, parameters, cv=5, verbose=5, n_jobs=3)
    # clf.fit(X_train_res, y_train_res.ravel())
    # print(clf.best_params_)
    clf = svm.SVC(probability=True)  # 调参
    clf.fit(X_train_res, y_train_res.ravel())
    X_test = StandardScaler().fit_transform(X_test)
    test01 = StandardScaler().fit_transform(test01)
    ytestPre = clf.predict(X_test)

    from sklearn.metrics import accuracy_score
    sub = clf.predict_proba(test01)[:, 1]
    accuracy = accuracy_score(y_test, ytestPre)
    # print(u'准确率： %.4f%%' % (100 * accuracy))
    # from sklearn import metrics
    #
    # precision = metrics.precision_score(y_test, ytestPre, average='micro')  # 微平均，精确率
    # print(u'微平均，精确率： %.4f%%' % (100 * precision))
    # recall = metrics.recall_score(y_test, ytestPre, average='macro')
    # print(u'微平均，召回率： %.4f%%' % (100 * recall))
    from sklearn.metrics import classification_report

    print(classification_report(y_test, ytestPre, target_names=target_names))
    ytestPre = clf.predict_proba(X_test)[:, 1]
    m = tpr_weight_funtion(y_test, ytestPre)
    print(m)

    return accuracy, sub


if __name__ == '__main__':
    thresholds = [0, 0.1, 0.2, 3, 4, 5]
    y = get_tag_train_new()[tag_hd.Tag].values  # pd.read_csv('input/tag_train_new.txt')
    sub = get_sub()
    train = pd.read_csv(train_data_path)
    test = pd.read_csv(test_data_path)
    # test = test.drop(['UID', 'Tag'], axis=1)
    train[tag_hd.Tag] = y
    cols = list(test.columns.values)
    print(cols)
    # tr_corr = train.corr()[tag_hd.Tag].reset_index()
    # tr_corr
    print(train.shape)
    print(test.shape)
    res, weights = pd.DataFrame(), []
    for i in range(2):
        cols_sample = random.sample(cols, 250)
        X_shixin = train[train[tag_hd.Tag] == 1]
        sample_size = (i + 1) * X_shixin.shape[0]
        X_normal = train[train[tag_hd.Tag] == 0].sample(sample_size, random_state=i)
        X = pd.concat([X_normal, X_shixin])
        label = X[tag_hd.Tag]
        X = X[cols_sample]
        test01 = test[cols_sample]
        m, sub = lr_train01(X, label, i, test01)
        res[i] = sub
        weights.append(m)
    print(weights)
    res.columns = [i for i in range(res.shape[1])]
    blending_model(res, weights, "svc_")
    print(123)
