from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.svm import SVC

def tune_parameters(X, y):

    # X = svm_classifier.datafeats
    # y = svm_classifier.datalabels

    # Split the dataset in two equal parts
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.5, random_state=0)

    # Set the parameters by cross-validation
    tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
                         'C': [1, 10, 100, 1000]},  
                        {'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]

    scores = [
        ('precision', precision_score),
        ('recall', recall_score),
        ('f1', f1_score),
    ]

    for score_name, score_func in scores:
        print "# Tuning hyper-parameters for %s" % score_name
        print

        clf = GridSearchCV(SVC(C=1), tuned_parameters, score_func=score_func)
        clf.fit(X_train, y_train, cv=5)

        print "Best parameters set found on development set:"
        print
        print clf.best_estimator_
        print
        print "Grid scores on development set:"
        print
        for params, mean_score, scores in clf.grid_scores_:
            print "%0.3f (+/-%0.03f) for %r" % (
                mean_score, scores.std() / 2, params)
        print

        print "Detailed classification report:"
        print
        print "The model is trained on the full development set."
        print "The scores are computed on the full evaluation set."
        print
        y_true, y_pred = y_test, clf.predict(X_test)
        print classification_report(y_true, y_pred)
        print
