#-*-encoding:utf-8-*-

import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import train_test_split
from sklearn import metrics
import csv_io
import sample
import matplotlib.pyplot as plt
from sklearn.cross_validation import StratifiedKFold

def plot_distribution(positive, negative):
    x_p = [item[0] for item in positive]
    y_p = [item[1] for item in positive]

    plt.plot(x_p, y_p, "mo:")

    x_n = [item[0] for item in negative]
    y_n = [item[1] for item in negative]

    plt.plot(x_n, y_n, 'cx--')

    plt.show()

def feature_importance():

    # header = ["driving_time", "driving_distance", "max_speed", "avg_speed", "max_deceleration", "max_acceleration", "avg_deceleration", "avg_acceleration", "intersection", \
    #           'at_count', 'at_duration', 'at_distance', 'acc_count', 'acc_duration', 'acc_distance', 'dec_count', 'dec_duration', 'dec_distance', 'os_count', 'os_duration', 'os_distance', 'hr', 'hc']

    # dataset = csv_io.read_csv("../../data/feat/try1/feature.csv", csv_io.parse_feature, delimiter=',', header=True)

    dataset = csv_io.load_csv("../../data/feat/try1/feature.csv", csv_io.parse_feature, delimiter=',', header=True)

    header = dataset[0]
    header = header[1:]

    print header

    dataset = dataset[1:]

    dataset = np.array(dataset)

    print dataset.shape

    # mark label
    feature, violation = dataset[:, 1:-2], dataset[:, -2:]

    violation = np.sum(violation, 1)

    negative = violation != 0

    label = np.ones(violation.shape)

    label[negative] = 0

    print "bad people {0}".format(np.count_nonzero(label==0))

    print "Good people {0}".format(np.count_nonzero(label))

    x_train, x_test, y_train, y_test = train_test_split(feature, label, test_size=0.5)

    x_train, y_train = sample.down_sample(x_train, y_train, 1.05)

    print "RandomForest ..."

    model = RandomForestClassifier(n_estimators=5000, n_jobs=10, criterion="entropy")

    skf = StratifiedKFold(label, 10)

    for k, (train, test) in enumerate(skf):
        x_train, y_train = sample.down_sample(feature[train], label[train])

        model.fit(x_train, y_train)

        predicted = model.predict_proba(feature[test])

        print ("[fold {0}] accuracy: {1:.5f}, auc score: {2:.5f}".format(k, model.score(feature[test], label[test]), metrics.roc_auc_score(label[test], predicted[:, 1])))

    # model.fit(x_train, y_train)
    #
    # print np.count_nonzero(y_test == 0)
    #
    # print "validation accruacy is {0}".format(model.score(x_test, y_test))
    #
    # predicted = model.predict_proba(x_test)
    #
    # print "auc score is {0}".format(metrics.roc_auc_score(y_test, predicted[:, 1]))
    #
    # print "confusion matrix"
    #
    # predicted = model.predict(x_test)
    #
    # print metrics.confusion_matrix(y_test, predicted)
    #
    # fname = "../../data/feat/try1/feature.w"
    #
    # print "write feature_importances to file {0}".format(fname)
    #
    # print model.feature_importances_
    #
    # # with open(fname, "wb") as f:
    # #     for i, value in enumerate(model.feature_importances_):
    # #         f.write("{0},{1},{2}\n".format(i, header[i], value))
    #
    # return model.feature_importances_


if __name__ == '__main__':

    feature_importance()
