from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt
import random

#Generate actual and predicted values. First let use a good prediction probabilities array:

# actual = [1,1,1,0,0,0]
# predictions = [0.9,0.9,0.9,0.1,0.1,0.1]
actual = [1,1,1,0,0,0,1,1,1,1,0,0,0,0,0,0]
predictions = [0.78,0.89,0.1,0.1,0.1,0.1,0.8,0.6,0.7,0.9,0.1,0.2,0.3,0.4,0.23,0.4]

#Then we need to calculated the fpr and tpr for all thresholds of the classification. This is where the roc_curve call comes into play. In addition we calculate the auc or area under the curve which is a single summary value in [0,1] that is easier to report and use for other purposes. You usually want to have a high auc value from your classifier.

false_positive_rate, true_positive_rate, thresholds = roc_curve(actual, predictions)
roc_auc = auc(false_positive_rate, true_positive_rate)

#Finally we plot the fpr vs tpr as well as our auc for our very good classifier.

plt.title('Receiver Operating Characteristic')
plt.plot(false_positive_rate, true_positive_rate, 'b',
label='AUC = %0.2f'% roc_auc)
plt.legend(loc='lower right')
plt.plot([0,1],[0,1],'r--')
# plt.xlim([-0.1,1.2])
# plt.ylim([-0.1,1.2])
plt.xlim([0,1.0])
plt.ylim([0,1.0])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()