from matplotlib import colors
from matplotlib.colors import ListedColormap
# from sklearn.metrics import classification_report
import matplotlib.pyplot as plt
import numpy as np
import sklearn.cross_validation as cv
from sklearn.metrics import roc_curve,auc
from sklearn.model_selection import learning_curve
# 从网上拼凑出来的
ddl_heat = ['#DBDBDB','#DCD5CC','#DCCEBE','#DDC8AF','#DEC2A0','#DEBB91',\
            '#DFB583','#DFAE74','#E0A865','#E1A256','#E19B48','#E29539']
ddlheatmap = colors.ListedColormap(ddl_heat)
def plot_classification_report(cr, title=None, cmap=ddlheatmap):
    """
    cr是一个来自sklearn.metrics.classification_report()方法的返回值，这个方法要求输入[实际预测结果的序列]和[样本集给定结果的序列]
    返回值是一个报告(String)，包含精度，召回率以及F1值
    该报告被当前这个函数解析生成可视化图表
    """
    title = title or 'Classification report'
    lines = cr.split('\n')
    classes = []
    matrix = []
    print(lines)
    for line in lines[2:(len(lines)-3)]:
        s = line.split()
        classes.append(s[0])
        value = [float(x) for x in s[1: len(s) - 1]]
        matrix.append(value)
    fig, ax = plt.subplots(1)
    for column in range(len(matrix)+1):
        for row in range(len(classes)):
            # txt = matrix[row][column]
            ax.text(column,row,matrix[row][column],va='center',ha='center')
    fig = plt.imshow(matrix, interpolation='nearest', cmap=cmap)
    plt.title(title)
    plt.colorbar()
    x_tick_marks = np.arange(len(classes)+1)
    y_tick_marks = np.arange(len(classes))
    plt.xticks(x_tick_marks, ['precision', 'recall', 'f1-score'], rotation=45)
    plt.yticks(y_tick_marks, classes)
    plt.ylabel('Classes')
    plt.xlabel('Measures')
    plt.show()
# cr = classification_report(y_true, y_pred)
# plot_classification_report(cr)
def get_preds(attributes, targets, model):
    '''
    Executes classification or regression using the specified model
    and returns expected and predicted values.
    Useful for comparison plotting!
    '''
    splits = cv.train_test_split(attributes, targets, test_size=0.3)
    X_train, X_test, y_train, y_test = splits
    model.fit(X_train, y_train)
    y_true = y_test
    y_pred = model.predict(X_test)
    return (y_true,y_pred)
    
def roc_plot(y, yhats, models):
    """
    y是测试集自带的分类结果序列，yhats是测试集在模型中预测给出的结果序列
    models是一个长度为1的list类型的对象，其元素是代表该模型名称的字符串
    """
    f, ax1 = plt.subplots(1, sharey=True)
    yhat, m, ax =yhats, models[0], ax1
    false_positive_rate, true_positive_rate, thresholds = roc_curve(y,yhat)
    roc_auc = auc(false_positive_rate, true_positive_rate)
    ax.set_title('ROC for %s' % m)
    ax.plot(false_positive_rate, true_positive_rate, \
                c='#2B94E9', label='AUC = %0.2f'% roc_auc)
    ax.legend(loc='lower right')
    ax.plot([0,1],[0,1],'m--',c='#666666')
    plt.xlim([0,1])
    plt.ylim([0,1.1])
    print("auc score:",auc(false_positive_rate,true_positive_rate))
    plt.show()
# y_true_svc, y_pred_svc = get_preds(stdfeatures, labels, LinearSVC())
# y_true_knn, y_pred_knn = get_preds(stdfeatures, labels, KNeighborsClassifier())
# actuals = np.array([y_true_svc,y_true_knn])
# predictions = np.array([y_pred_svc,y_pred_knn])
# models = ['LinearSVC','KNeighborsClassifier']
# roc_compare_two(actuals, predictions, models)

def error_mean_plot(mods,X,y):
    f, ax = plt.subplots(1, sharex=True, sharey=True)
    predicted = cv.cross_val_predict(mods, X, y, cv=12)
    ax.scatter(y, predicted, c='#F2BE2C')
    ax.set_title('Prediction Error for %s' % mods)
    ax.plot([y.min(), y.max()], [y.min(), y.max()], 'k--', lw=4, c='#2B94E9')
    ax.set_ylabel('Predicted')
    plt.xlabel('Measured')
    plt.show()
# models = np.array([(Ridge(),'Ridge'), (SVR(),'SVR'), (RANSACRegressor(),'RANSAC')])
# error_compare_three(models, features, labels)

def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
                        n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
    """
    学习率曲线
    Generate a simple plot of the test and training learning curve.

    Parameters
    ----------
    estimator : object type that implements the "fit" and "predict" methods
        An object of that type which is cloned for each validation.
    实现了fit和predict方法的模型
    title : string
        Title for the chart.

    X : array-like, shape (n_samples, n_features)
        Training vector, where n_samples is the number of samples and
        n_features is the number of features.
        类数组类型，形状是（样本容量，单个样本包含的特征数），X是作为训练用的向量

    y : array-like, shape (n_samples) or (n_samples, n_features), optional
        Target relative to X for classification or regression;
        None for unsupervised learning.
        类数组类型，形状是（样本容量，单个样本包含的特征数）或者（样本容量），可选参数
        y是和X相关联的分类或者回归结果向量
        如果是无监督学习则不需要提供这个参数

    ylim : tuple, shape (ymin, ymax), optional
        Defines minimum and maximum yvalues plotted.
        设置了y轴的数值范围
    cv : int, cross-validation generator or an iterable, optional
        Determines the cross-validation splitting strategy.
        用来确定交叉验证策略
        Possible inputs for cv are:
          - None, to use the default 3-fold cross-validation,默认三折验证
          - integer, to specify the number of folds.
          - An object to be used as a cross-validation generator.
          - An iterable yielding train/test splits.

        For integer/None inputs, if ``y`` is binary or multiclass,
        :class:`StratifiedKFold` used. If the estimator is not a classifier
        or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.

        Refer :ref:`User Guide <cross_validation>` for the various
        cross-validators that can be used here.

    n_jobs : integer, optional 
    多核运算 要启用全部cpu核心运算输入-1即可
    注意，如果多核并行运算需要限制Python文件只作为脚本运行的情况下
    也就是说要在 
    if __name__ == '__main__':
        pass
    中运行
        Number of jobs to run in parallel (default 1).
    """
    plt.figure()
    plt.title(title)
    if ylim is not None:
        plt.ylim(*ylim)
    plt.xlabel("Training examples")
    plt.ylabel("Score")
    train_sizes, train_scores, test_scores = learning_curve(
        estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
    train_scores_mean = np.mean(train_scores, axis=1)
    train_scores_std = np.std(train_scores, axis=1)
    test_scores_mean = np.mean(test_scores, axis=1)
    test_scores_std = np.std(test_scores, axis=1)
    plt.grid()

    plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
                     train_scores_mean + train_scores_std, alpha=0.1,
                     color="r")
    plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
                     test_scores_mean + test_scores_std, alpha=0.1, color="g")
    plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
             label="Training score")
    plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
             label="Cross-validation score")

    plt.legend(loc="best")
    plt.show()