# -*- coding: utf-8 -*-
"""
Created on Tue Mar 27 19:42:05 2018

@author: JimmyMo
"""

import numpy as np
import sklearn.datasets
import matplotlib.pyplot as plt
import sklearn.linear_model
import sklearn.metrics

def plot_decision_boundary(pred_func):  
    # 设定最大最小值，附加一点点边缘填充  
    x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5  # JM: 获取makemoon生成出来的数据中的一个切面作为x轴
    y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5  # JM: 获取makemoon生成出来的数据中的二个切面作为y轴
    h = 0.01  # JM: 表示以0.01的步进绘制网格
  
    xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))  # numpy.meshgrid()——生成网格点坐标矩阵。
  
    # 用预测函数预测一下  
    # JM: np.c_ 是把矩阵或者向量按照第二个轴排起来.也就是列轴. 
    # JM：np.c_ 就是再次把xx.ravel()的一维数组和yy.ravel()的一维数组排列成一个二维数组
    # JM: xx.ravel():表示把一个矩阵行优先展成一个向量.跟flatten一样. 
    # JM: numpy.flatten()返回一份拷贝，而numpy.ravel()返回的是视图，会影响（reflects）原始矩阵。
    Z = pred_func(np.c_[xx.ravel(), yy.ravel()]) # JM: 这里的操作等于是把网格坐标矩阵的点都预测了一遍，然后填充上蓝红两类代表的颜色，最后再在上面画样本点
    Z = Z.reshape(xx.shape)  # JM: 按照xx.shape调整形状
  
    # 然后画出图  
    # contour和contourf都是画三维等高线图的
    # 不同点在于contour() 是绘制轮廓线，contourf()会填充轮廓
    # Z中的数据便是1或者0，表示分类，在plt.contourf中Z表示为等高线的高度值
    plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)  
    plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral) 

n_samples=10
sampleRatio = .9
sampleBoundary = int(n_samples*sampleRatio)

#X, y = sklearn.datasets.make_classification(n_samples, n_features=5,n_classes=2)
X, y = sklearn.datasets.make_classification(n_samples, n_features=2, n_redundant=0,
                             n_clusters_per_class=1, n_classes=2)

training_features = X[:sampleBoundary]
training_targets = y[:sampleBoundary]

test_features = X[sampleBoundary:]
test_targets = y[sampleBoundary:]

#print X
#print y


#plt.scatter(X[:,0], X[:,1], c=y)

logisticRegression = sklearn.linear_model.LogisticRegression()
logisticRegression.fit(training_features, training_targets)

predict_targets = logisticRegression.predict(test_features)

print ('actual  targets is ...' + str(test_targets))
print ('predict targets is ...' + str(predict_targets))
print ('logisticRegression _coef is ...' + str(logisticRegression.coef_))
#plt.plot([0, 1], [0, 1], 'r--')  
plot_decision_boundary(lambda x: logisticRegression.predict(x))
#plt.scatter(X[:,0], X[:,1], c=y)
accuracy = sklearn.metrics.accuracy_score(test_targets, predict_targets)
print ('accuracy is ...' + str(accuracy))
