#-*-encoding:utf-8-*-

import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import train_test_split
from sklearn import metrics
import csv_io
from sklearn.linear_model import LogisticRegression
import sample
import matplotlib.pyplot as plt
from sklearn.cross_validation import StratifiedKFold
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB

def prepare():

    dataset = csv_io.read_csv("../../data/feat/try1/feature.csv", csv_io.parse_feature, delimiter=',', header=True)

    dataset = np.array(dataset)

    feature, violation = dataset[:, 1:-2], dataset[:, -2:]

    violation = np.sum(violation, 1)

    negative = violation != 0

    label = np.ones(violation.shape)

    label[negative] = 0

    return feature, label

def train_model_set(models, x_train, y_train, x_test, y_test):
    for i in range(len(models)):
        models[i].fit(x_train, y_train)

    confu_set = []
    for i in range(len(models)):
        predicted = models[i].predict(x_test)

        confusion_matrix = metrics.confusion_matrix(y_test, predicted)

        # prob = (confusion_matrix[0][0], confusion_matrix[0][1], confusion_matrix[1][0], confusion_matrix[1][1])

        prob = confusion_matrix

        predicted = models[i].predict_proba(x_test)

        roc = metrics.roc_auc_score(y_test, predicted[:, 1])

        confu_set.append([i, prob, roc])

    return confu_set

def compare_different_models():
    X, y = prepare()

    models = []
    models.append(RandomForestClassifier(n_estimators=5000, n_jobs=16, criterion='entropy'))
    models.append(LogisticRegression(penalty='l2', n_jobs=10))
    models.append(DecisionTreeClassifier())
    models.append(GaussianNB())


    skf = StratifiedKFold(y, 10)

    results = []
    names = ["RF", "LR", "DT", "NB"]
    for k, (train, test) in enumerate(skf):
        # data
        x_train, y_train = sample.down_sample(X[train], y[train])

        x_test, y_test = X[test], y[test]

        pool = sample.feature_filter(x_train, y_train)

        x_train = sample.feature_transform(pool, x_train)

        x_test = sample.feature_transform(pool, x_test)

        x_train, y_train = sample.down_sample(x_train, y_train, 1.0)

        confu_set = train_model_set(models, x_train, y_train, x_test, y_test)

        entryset = {}
        for i, item in enumerate(confu_set):
            print "[fold {0}] model {1}, accuracy: {2}, auc score: {3:.5f}".format(k, names[i], item[1], item[2])
            entryset.setdefault(names[i], {})
            entry = {}
            entry.setdefault("roc", item[2])
            accuracy = (item[1][0][0] + item[1][1][1]) * 1.0 / (item[1][0][0] + item[1][0][1] + item[1][1][0] + item[1][1][1])
            type1 = (item[1][0][1]) * 1.0 / (item[1][0][1] + item[1][0][0])
            type2 = (item[1][0][0]) * 1.0 / (item[1][0][0] + item[1][0][1])
            type3 = (item[1][1][1]) * 1.0 / (item[1][1][1] + item[1][0][1])

            entry.setdefault("accuracy", accuracy)
            entry.setdefault("i", type1)
            entry.setdefault("ii", type2)
            entry.setdefault("iii", type3)
            entryset[names[i]] = entry
        results.append(entryset)

    # write roc
    roc_file = "../../data/multi-model-roc.csv"
    csv_io.write_roc_(roc_file, results, "roc", names)

    roc_file = "../../data/multi-model-accuracy.csv"
    csv_io.write_roc_(roc_file, results, "accuracy", names)

    roc_file = "../../data/multi-model-type1.csv"
    csv_io.write_roc_(roc_file, results, "i", names)

    roc_file = "../../data/multi-model-type2.csv"
    csv_io.write_roc_(roc_file, results, "ii", names)

    roc_file = "../../data/multi-model-type3.csv"
    csv_io.write_roc_(roc_file, results, "iii",names)

def train_ratio_set(x_train, y_train, x_test, y_test, ratios):
    results = []
    for i, ratio in enumerate(ratios):
        model = RandomForestClassifier(n_estimators=5000, criterion='entropy', n_jobs=16)
        train, label = sample.down_sample(x_train, y_train, ratio)
        model.fit(train, label)

        predicted = model.predict(x_test)
        mat = metrics.confusion_matrix(y_test, predicted)
        accuracy = (mat[0][0] + mat[1][1]) * 1.0 / (mat[0][0] + mat[0][1] + mat[1][0] + mat[1][1])
        precision = (mat[1][1]) * 1.0 / (mat[1][1] + mat[0][1])

        predicted = model.predict_proba(x_test)
        auc = metrics.roc_auc_score(y_test, predicted[:, 1])

        results.append([i, accuracy, precision, auc])

    return results

def compare_different_ratio():
    X, y = prepare()

    skf = StratifiedKFold(y, 10)

    results = []
    ratios = [0.1, 0.5, 1.0, 2.0, 4.0, 8.0, 10.0]
    for k, (train, test) in enumerate(skf):
        x_train, y_train = X[train], y[train]
        x_test, y_test = X[test], y[test]
        result = train_ratio_set(x_train, y_train, x_test, y_test, ratios)
        entryset = {}

        for i, item in enumerate(result):
            print "[fold {0}], ratio {1}, accuracy {2}".format(k, ratios[i], item[1])
            entry = dict()
            entry.setdefault("accuracy", item[1])
            entry.setdefault("precision", item[2])
            entry.setdefault("auc", item[3])
            entryset.setdefault(ratios[i], entry)
        results.append(entryset)

    imbalance_file = "../../data/imbalance-accuracy.csv"
    csv_io.write_roc_(imbalance_file, results, "accuracy", ratios)

    imbalance_file = "../../data/imbalance-precision.csv"
    csv_io.write_roc_(imbalance_file, results, "precision", ratios)

    imbalance_file = "../../data/imbalance-auc.csv"
    csv_io.write_roc_(imbalance_file, results, "auc", ratios)

if __name__ == '__main__':
    # compare_different_models()
    compare_different_ratio()