#!/usr/bin/env python 
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn import svm
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import f1_score, make_scorer, average_precision_score, recall_score
from sklearn.model_selection import cross_val_score
from sklearn.neural_network import MLPClassifier

K_FOLDS = 10
N_PERCENTAGE_DATA = 100
POSITIVE_LABEL = 1
FIGURE_COUNT = 1


def validate_f1_on_train(clf, x, y):
    f1_scorer = make_scorer(f1_score, pos_label=POSITIVE_LABEL)
    scores = cross_val_score(clf, x, y, cv=K_FOLDS, scoring=f1_scorer, n_jobs=-1)
    avg_f1 = sum(scores) / len(scores)
    return avg_f1


def draw_bar_plot(title, xlabel, ylabel, xticks, x, y):
    x_pos = np.arange(len(x))
    # plt.ion()
    plt.xticks(x_pos, xticks)
    plt.xlabel(xlabel)
    plt.ylabel(ylabel)
    plt.ylim((0.9 * min(y), 1.1 * max(y)))
    plt.title(title)
    plt.bar(x_pos, y)
    # plt.show()
    plt.savefig(title)  # save the figure to file
    plt.close()
    # plt.close(fig)    # close the figure window


def calc_3_scores(ytest, ypred):
    precision = average_precision_score(ypred, ytest, pos_label=POSITIVE_LABEL)
    recall = recall_score(ypred, ytest, pos_label=POSITIVE_LABEL, average="binary")
    f1 = f1_score(ypred, ytest, pos_label=POSITIVE_LABEL, average="binary")
    return precision, recall, f1


# read data
headers = ['f{}'.format(x) for x in range(1, 31)] + ['label']
train = pd.read_csv('data/cancer-data-train.csv', header=None, names=headers)
test = pd.read_csv('data/cancer-data-test.csv', header=None, names=headers)
train['label'] = train['label'].map(lambda x: 1 if x == 'M' else 0)  #
test['label'] = test['label'].map(lambda x: 1 if x == 'M' else 0)

train = train.head(int(len(train) * (N_PERCENTAGE_DATA / 100)))  # left 5% data for develop

xtrain = train.drop(['label'], axis=1)
ytrain = train['label']
xtest = test.drop(['label'], axis=1)
ytest = test['label']

# Part A: svm
f1_scores_with_different_c = []
x_axis_c = (0.01, 0.1, 1, 10, 100)
for c in x_axis_c:
    clf = svm.SVC(kernel='linear', C=c, random_state=42)
    f1_scores_with_different_c.append(validate_f1_on_train(clf, xtrain, ytrain))
print(f1_scores_with_different_c)
# plot f1 with different c
draw_bar_plot('Part A', 'the values of C', 'F measure', [str(x) for x in x_axis_c], x_axis_c,
              f1_scores_with_different_c)

# Part B: MLP
layer_sizes = [(20,), (40,), (20, 20), (40, 40)]
f1_scores_with_different_layer_sizes = []
for layer_size in layer_sizes:
    clf = MLPClassifier(hidden_layer_sizes=layer_size, random_state=42)
    f1_scores_with_different_layer_sizes.append(validate_f1_on_train(clf, xtrain, ytrain))
print(f1_scores_with_different_layer_sizes)
draw_bar_plot('Part B', 'the size of hidden layers', 'F measure', [str(x) for x in layer_sizes], layer_sizes,
              f1_scores_with_different_layer_sizes)

# Part C, best c=1, best layer=(20,)
clfs = [svm.SVC(kernel='linear', C=1, random_state=42),
        MLPClassifier(hidden_layer_sizes=(40,40), random_state=42),
        LinearDiscriminantAnalysis()]
list_of_3_scores = []
for clf in clfs:
    clf.fit(xtrain, ytrain)
    ypred = clf.predict(xtest)
    p, r, f = calc_3_scores(ytest, ypred)
    draw_bar_plot(clf.__class__.__name__, 'metrics', '', ['precision', 'recall', 'F measure'],
                  ['precision', 'recall', 'F measure'], [p, r, f])

# Part D
clfs = [svm.SVC(kernel='linear', C=1, random_state=42),
        MLPClassifier(hidden_layer_sizes=(20,), random_state=42),
        LinearDiscriminantAnalysis(),
        RandomForestClassifier()]
list_of_3_scores = []
for clf in clfs:
    clf.fit(xtrain, ytrain)
    ypred = clf.predict(xtest)
    p, r, f = calc_3_scores(ytest, ypred)
    draw_bar_plot(clf.__class__.__name__, 'metrics', '', ['precision', 'recall', 'F measure'],
                  ['precision', 'recall', 'F measure'], [p, r, f])
