from sklearn.svm import SVC
import pylab as pl
import svm_tool_method as stm
import numpy as np
from sklearn import model_selection
from sklearn import metrics
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams['font.sans-serif'] = ['KaiTi']
import warnings
warnings.filterwarnings('ignore')  # "error", "ignore", "always", "default", "module" or "once"
clfs = []   #分类器的集合
# C：C-SVC的惩罚参数C  默认值是1.0
# C越大，越惩罚 松弛变量（误分类），希望 松弛变量（误分类） 接近0，趋向于对训练集全分对的情况，对训练集测试时准确率很高，但泛化能力弱。
# C值小，对误分类的惩罚减小，允许容错，将他们当成噪声点，泛化能力较强。
# class_weight ：类别的权重，字典形式传递。设置第几类的参数C为weight*C(C-SVC中的C)
for c in [1, 10, 100, 500, 1000]:
    for w in [{1: 5}, {1: 10}, {1: 15}, {1: 20}, {1: 25}]:
        clfs += [SVC(probability=True, C=c, class_weight=w)]

def svm_method(x, y):
    w_svm = SVC(class_weight='balanced', probability=True)

    (scores, x_values) = ensemble_forward_pass(clfs, x, y, 'auc', 3)
    pl.plot(x_values, scores, color="green", label="weighted SVM")


def ensemble_forward_pass(clfs, x, y,draw, n_clfs=None):
    if n_clfs == None:
        n_clfs = len(clfs)
    clf_list = stm.EnsembleClfs(clfs)
    auc_scores = np.zeros(n_clfs)
    false_positive_rate = 0
    true_positive_rate = 0
    accurancy = 0
    precision = 0
    recall = 0
    f1 = 0
    for i in range(n_clfs):
        # K折交叉验证：分成了4个子集
        skf = model_selection.StratifiedKFold(n_splits=4)

        # CROSS VALIDATE
        scores = []
        for train_index, test_index in skf.split(x, y):
            X_train, X_test = x[train_index], x[test_index]
            y_train, y_test = y[train_index], y[test_index]
            # 使用训练集数据你和分类器
            clf_list.fit(X_train, y_train, i)
            # 对测试集的数据进行预测
            y_pred = clf_list.predict(X_test)

            scores += [metrics.roc_auc_score(y_test, y_pred)]
        if draw == 'evolution' and i == 2:
            y_test = y_test.astype(int)
            y_pred = y_pred.astype(int)
            recall = metrics.recall_score(y_test, y_pred, average='micro')
            f1 = metrics.f1_score(y_test, y_pred, average='weighted')
            accurancy = metrics.accuracy_score(y_test, y_pred)
        elif draw == 'roc' and i == 2:
            false_positive_rate, true_positive_rate, thresholds = metrics.roc_curve(y_test, y_pred)
            roc_auc = np.mean(scores)
        else:
            auc_scores[i] = np.mean(scores)

    if draw == 'auc':
        return auc_scores, np.arange(n_clfs) + 1
    elif draw == 'roc':
        return false_positive_rate, true_positive_rate, roc_auc
    else:
        return accurancy, recall, f1


def draw_roc(x, y):
    w_svm = SVC(class_weight='balanced', probability=True)

    false_positive_rate, true_positive_rate, roc_auc = ensemble_forward_pass(clfs, x, y, 'roc', 3)
    plt.plot(false_positive_rate, true_positive_rate, 'g', label="AUC = %0.2f" % roc_auc)

def draw_evolution(x, y):
    w_svm = SVC(class_weight='balanced', probability=True)
    accurancy, recall, f1 = ensemble_forward_pass(clfs, x, y, 'evolution', 3)
    precision = (recall*f1) / (2*recall- f1)
    #在这里用plt.xx画图然后 plt.xx设置title什么的放到main那里
    plt.bar(['accurancy'], [accurancy], width=0.3)
    plt.bar(['precision'],[precision],  width=0.3)
    plt.bar(['recall'], [recall],  width=0.3)
    plt.bar(['F1'], [f1],  width=0.3)

