#! -*- coding:utf-8 -*-
import numpy as np
from sklearn import datasets
from sklearn.cross_validation import cross_val_predict
from sklearn import linear_model
import matplotlib.pyplot as plt

from sklearn import metrics
from sklearn.tree import DecisionTreeClassifier
'''
lr = linear_model.LinearRegression()
boston = datasets.load_boston()
y = boston.target

# cross_val_predict returns an array of the same size as `y` where each entry
# is a prediction obtained by cross validated:
predicted = cross_val_predict(lr, boston.data, y, cv=10)
print predicted

fig, ax = plt.subplots()
ax.scatter(y, predicted)
ax.plot([y.min(), y.max()], [y.min(), y.max()], 'k--', lw=4)
ax.set_xlabel('Measured')
ax.set_ylabel('Predicted')
plt.show()
'''

import urllib
# url with dataset
url = "http://archive.ics.uci.edu/ml/machine-learning-databases/pima-indians-diabetes/pima-indians-diabetes.data"
# download the file
raw_data = urllib.urlopen(url)
# load the CSV file as a numpy matrix
dataset = np.loadtxt(raw_data, delimiter=",")
#dataset = np.loadtxt("pima-indians-diabetes.data", delimiter=",")
# separate the data from the target attributes
X = dataset[:,0:8]
y = dataset[:,8]

#print X[1]
#print y[1]

# fit a CART model to the data
model = DecisionTreeClassifier()
model.fit(X, y)
print "model",(model)

# make predictions
expected = y
predicted = model.predict(X)
# summarize the fit of the model
accuracy = metrics.accuracy_score(expected, predicted)
print 'accuracy: %.2f%%' % (100 * accuracy) #,"score:",model.score(X,y)

print(metrics.classification_report(expected, predicted))
print(metrics.confusion_matrix(expected, predicted))
#print "predict", model.predict_proba([[10,139,80,0,0,27.1,1.401,57]])


'''
knn , liner logistic
'''
print(__doc__)

from sklearn import datasets, neighbors, linear_model

digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target

n_samples = len(X_digits)

X_train = X_digits[:.9 * n_samples]
y_train = y_digits[:.9 * n_samples]
X_test = X_digits[.9 * n_samples:]
y_test = y_digits[.9 * n_samples:]

knn = neighbors.KNeighborsClassifier()
logistic = linear_model.LogisticRegression()

print('KNN score: %f' % knn.fit(X_train, y_train).score(X_test, y_test))
print('LogisticRegression score: %f'
      % logistic.fit(X_train, y_train).score(X_test, y_test))


'''
Knn example
'''
X = [[0], [1], [2], [3],[4],[4.1],[4.5]]
y = [0, 0, 1, 1,2,2,2]
from sklearn.neighbors import KNeighborsClassifier
neigh = KNeighborsClassifier(n_neighbors=3)
neigh.fit(X, y)
#KNeighborsClassifier(...)
print(neigh.predict([[4.21]]))
#[0]
print(neigh.predict_proba([[2.86]]))
#[[ 0.66666667  0.33333333]]