import numpy as np

def accuracy(y_true,y_predict):
    y_true = np.array(y_true)
    y_predict = np.array(y_predict)
    print(((y_predict == y_true).astype(float)).sum()/len(y_predict))
y_true = [0,1,1,0]
y_predict = [1,0,1,0]
accuracy(y_true,y_predict)