#!/usr/bin/env python
# -*- coding:utf-8 -*- 
# @Time    : 2018/9/13 11:23
# @Author  : liujiantao
# @Site    :  https://www.imuo.com/a/3588be095549c760f965c71159e3aaf274c72ba0181970fc3cfe076db479b42f
# @File    : LogisticRegression_train.py
# @Software: PyCharm
from tiancheng.base.base_helper import *
from sklearn.linear_model import LogisticRegression
import pandas as pd
import matplotlib.pyplot as plt
target_names = ['label is 0', 'label is 1']


from sklearn.feature_selection import VarianceThreshold

threshold = 1

import itertools

def plot_confusion_matrix(cm, classes,
                          normalize=False,
                          title='Confusion matrix',
                          cmap=plt.cm.Blues):
    """
    This function prints and plots the confusion matrix.
    Normalization can be applied by setting `normalize=True`.
    """
    plt.imshow(cm, interpolation='nearest', cmap=cmap)
    plt.title(title)
    plt.colorbar()
    tick_marks = np.arange(len(classes))
    plt.xticks(tick_marks, classes, rotation=0)
    plt.yticks(tick_marks, classes)

    if normalize:
        cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
        #print("Normalized confusion matrix")
    else:
        1#print('Confusion matrix, without normalization')

    #print(cm)

    thresh = cm.max() / 2.
    for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
        plt.text(j, i, cm[i, j],
                 horizontalalignment="center",
                 color="white" if cm[i, j] > thresh else "black")

    plt.tight_layout()
    plt.ylabel('True label')
    plt.xlabel('Predicted label')

def save_model(dx_tree,filename):
    from sklearn.externals import joblib
    joblib.dump(dx_tree, filename+".pkl")


def ROC_AUC(y, pred):
    import matplotlib.pyplot as plt
    from sklearn.metrics import roc_curve
    # y = np.array([1, 1, 2, 2])
    # pred = np.array([0.1, 0.4, 0.35, 0.8])
    fpr, tpr, thresholds = roc_curve(y, pred[:, 1], pos_label=1)
    # print("正确率：fpr==>"+str(fpr))
    # print("错误率：tpr==>"+str(tpr))
    # print(thresholds)
    from sklearn.metrics import auc
    auc_area = auc(fpr, tpr)
    # 画图，只需要plt.plot(fpr,tpr),变量roc_auc只是记录auc的值，通过auc()函数能计算出来
    plt.plot(fpr, tpr, lw=1, label='ROC  (area = %0.2f)' % (auc_area))
    print("auc_area:" + str(auc_area))

def decode(input_str):
    """
    中文解码
    """
    import json
    return json.dumps(input_str, ensure_ascii=False, indent=4, default=lambda x: str(x))


# 方差选择法，返回值为特征选择后的数据
# 参数threshold为方差的阈值
# ll = VarianceThreshold(threshold=2).fit_transform(df_data.fillna(0.0))
def VarianceThreshold_selector(data):
    selector = VarianceThreshold(threshold=threshold)
    selector.fit(data)
    select_columns = list(data.columns[selector.get_support(indices=True)].values)
    print(decode(select_columns))
    print(len(select_columns))
    return data[select_columns]


def lr_train01(X, Y,i):
    # 过采样
    # X = VarianceThreshold_selector(X.fillna(-1))
    # 数据标准化处理：
    from sklearn.preprocessing import StandardScaler

    X = StandardScaler().fit_transform(X)
    from sklearn.model_selection import train_test_split
    # 随机抽取20%的测试集
    X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.1, random_state=i)
    # # 主成分分析PCA
    # from sklearn.decomposition import PCA
    # pca = PCA(n_components=2)
    # X_train_pca = pca.fit_transform(X_train)
    # X_test_pca = pca.transform(X_test)
    # 用法：
    # 采用 smote 算法 进行 过采样
    from imblearn.over_sampling import SMOTE
    sm = SMOTE(random_state=2,k_neighbors=5)
    X_train_res, y_train_res = sm.fit_sample(X_train, y_train.ravel())
    # parameters = {
    #     'C': np.linspace(1, 10, 10)
    # }
    # lr = LogisticRegression()
    # clf = GridSearchCV(lr, parameters, cv=5, verbose=5, n_jobs=3)
    # clf.fit(X_train_res, y_train_res.ravel())
    # print(clf.best_params_)
    clf = LogisticRegression(penalty='l2', dual=False, C=2.0, fit_intercept=True, intercept_scaling=1,
                            class_weight=None, random_state=1, solver='liblinear', max_iter=3000, multi_class='ovr',
                            verbose=0, warm_start=False, n_jobs=1)
    clf.fit(X_train_res, y_train_res.ravel())
    X_test = StandardScaler().fit_transform(X_test)
    ytestPre = clf.predict(X_test)

    from sklearn.metrics import accuracy_score
    sub = clf.predict(test)[:, 1]
    accuracy = accuracy_score(y_test, ytestPre)
    # print(u'准确率： %.4f%%' % (100 * accuracy))
    # from sklearn import metrics
    #
    # precision = metrics.precision_score(y_test, ytestPre, average='micro')  # 微平均，精确率
    # print(u'微平均，精确率： %.4f%%' % (100 * precision))
    # recall = metrics.recall_score(y_test, ytestPre, average='macro')
    # print(u'微平均，召回率： %.4f%%' % (100 * recall))
    from sklearn.metrics import classification_report

    print(classification_report(y_test, ytestPre, target_names=target_names))
    ytestPre = clf.predict_proba(X_test)[:, 1]
    m = tpr_weight_funtion(y_test, ytestPre)
    print(m)
    return m,sub


if __name__ == '__main__':
    thresholds = [0, 0.1, 0.2, 3, 4, 5]
    y = get_tag_train_new()  # pd.read_csv('input/tag_train_new.txt')
    sub = get_sub()
    train = pd.read_csv(train_data_path)
    label = get_tag_train_new()[tag_hd.Tag].values
    test = pd.read_csv(test_data_path)
    train['Tag'] = y[tag_hd.Tag]
    # tr_corr = train.corr()[tag_hd.Tag].reset_index()
    # tr_corr
    print(test.shape)
    train = train.drop(['UID', 'Tag'], axis=1).fillna(-1)
    test = test.drop(['UID', 'Tag'], axis=1).fillna(-1)
    # train = train.values
    # test = test.values

    print(train.shape)
    print(test.shape)
    res, weights = pd.DataFrame(), []
    for i in range(5):
        X_normal = train[train[tag_hd.Tag == 0]]
        sample_size = (i+1)*X_normal.shape[0]
        X_shixin = train[train[tag_hd.Tag == 1]].sample(sample_size)
        X = pd.concat([X_normal,X_shixin])
        m, sub = lr_train01(X, label,i)
        res[i] = sub
        weights.append(m)
    print(weights)
    res.columns = [i for i in range(res.shape[1])]
    blending_model(res, weights, "logistic_")
    print(123)
