import sys
import math
from sklearn.datasets import load_svmlight_file
from sklearn import svm
from sklearn import cross_validation
from sklearn import preprocessing

def Output(msg, data, symbol):
    mean = 0
    max = 0
    
    for e in data:
        mean += e
	if e > max: max = e
    mean /= len(data)
    
    sd = 0
    for e in data:
        sd += (e - mean) ** 2
    sd = math.sqrt(sd / len(data))
    print "Max {0} {1:.2f}{2}".format(msg, max, symbol)
    print "Mean {0} {1:.2f}{2}".format(msg, mean, symbol)
    print "Standard Deviation {0:.2f}{1}".format(sd, symbol)

if len(sys.argv) != 2:
    print "cv.py <data file path>"
    sys.exit()

# Use support vector regression
clf = svm.SVR(C=1)
X, y = load_svmlight_file(sys.argv[1])

# Use 5-fold cross validation
kfold = cross_validation.KFold(n=X.shape[0], n_folds=5)
min_max_scaler = preprocessing.MinMaxScaler()
relative_error = []
absolute_error = []
for train, test in kfold:
    # Train a model using the training folds
    clf.fit(X[train].toarray(), y[train])
    # Test the model using the prediction fold
    predicted = clf.predict(X[test].toarray())

    err = 0
    for i in range(len(predicted)):     
        diff = predicted[i] - y[test][i]
	if diff < 0: diff = -diff
        if y[test][i] != 0:
	    err = diff / y[test][i]
	else:
	    err = 0
        print y[test][i], predicted[i], y[test][i]-predicted[i], (y[test][i]-predicted[i]) / y[test][i]
        if y[test][i] != 0:
            relative_error.append(err * 100)
        absolute_error.append(diff)

Output("absolute error", absolute_error, ' ')
Output("relative error", relative_error, '%')
