# encoding:utf-8
# -----------------------------------------------------------
# "ML-Stealer: Stealing Prediction Functionality of Machine Learning Models with Mere Black-Box Access"
# @author: Shijie Wang, 2019.
# ------------------------------------------------------------

import numpy as np
import pandas as pd
from collections import Counter
from sklearn.externals import joblib
from sklearn import preprocessing
from scipy.signal import find_peaks


def preprocess(sample):
    data = preprocessing.scale(sample)
    return data


def synthetic_no(model_path, features, n_sample):
    num_features = len(features)
    sample = np.zeros([n_sample, num_features])
    prediction_sample = {}
    predict_class = {}
    gradient = {}
    THRp = {}
    synthetic_data = []
    model = joblib.load(model_path)

    # sample uniformly
    for i in range(n_sample):
        for j in range(num_features):
            sample[i, j] = np.random.choice(features[j], replace=True)

    # get the prediction
    prediction = model.predict_proba(preprocess(sample))
    # get the class numbers (i.e. for bank dataset, class is 2)
    num_class = prediction.shape[1]

    # initialize dict
    for i in range(num_class):
        predict_class[i] = []

    # split prediction by class
    for k in range(n_sample):
        idx = np.argmax(prediction[k])  # get the class indices of maximum prediction of every single sample data
        predict_class[idx].append(prediction[k, idx])  # store the maximum prediction to a dict (the key is class)
        prediction_sample[prediction[k, idx]] = sample[k]  # construct a dict for maximum_predict-sample pairs

    # calculate gradient and find first peak
    for ii in range(num_class):
        predict_class[ii] = np.sort(predict_class[ii])[::-1]
        gradient[ii] = gradient_series(predict_class[ii])
        THRp[ii], _ = find_peaks(gradient[ii])

    # add synthetic data
    for jj in range(num_class):
        for kk in range(int(THRp[jj][0])):
            synthetic_data.append(prediction_sample[predict_class[jj][kk]])

    return synthetic_data


def synthetic_limit(model_path, features, n_sample, probability):
    num_features = len(features)
    sample = np.zeros([n_sample, num_features])
    prediction_sample = {}
    predict_class = {}
    gradient = {}
    THRp = {}
    synthetic_data = []
    model = joblib.load(model_path)

    # sample based on frequency
    for i in range(n_sample):
        for j in range(num_features):
            sample[i, j] = np.random.choice(features[j], replace=True, p=probability[j])

    # get the prediction
    prediction = model.predict_proba(preprocess(sample))
    # get the class numbers (i.e. for bank dataset, class is 2)
    num_class = prediction.shape[1]

    # initialize dict
    for i in range(num_class):
        predict_class[i] = []

    # split prediction by class
    for k in range(n_sample):
        idx = np.argmax(prediction[k])  # get the class indices of maximum prediction of every single sample data
        predict_class[idx].append(prediction[k, idx])  # store the maximum prediction to a dict (the key is class)
        prediction_sample[prediction[k, idx]] = sample[k]  # construct a dict for maximum_predict-sample pairs

    # calculate gradient and find first peak
    for ii in range(num_class):
        predict_class[ii] = np.sort(predict_class[ii])[::-1]
        gradient[ii] = gradient_series(predict_class[ii])
        THRp[ii], _ = find_peaks(gradient[ii])

    # add synthetic data
    for jj in range(num_class):
        for kk in range(int(THRp[jj][0])):
            synthetic_data.append(prediction_sample[predict_class[jj][kk]])

    return synthetic_data


def gradient_series(prediction):
    gradient = np.abs(np.diff(prediction))

    return gradient


def feature_init(csv_file):
    features = {}
    prob = {}
    df = pd.read_csv(csv_file, sep=',')
    df = df.values
    for i in range(df.shape[1]):
        features[i] = np.unique(df[:, i])
        prob[i] = [p / df.shape[0] for p in list(Counter(df[:, i]).values())]

    return features, prob


if __name__ == '__main__':
    np.set_printoptions(suppress=True)
    features, prob = feature_init('X_adult_train_2.csv')
    adult_data = []
    bank_data = []
    cancer_data = []

    for i in range(2000):
        data = synthetic_no('./victim_models/svm_part_adult.m', features, 300)
        data = list(map(list, set(map(tuple, data))))
        adult_data = adult_data + data
        adult_data = list(map(list, set(map(tuple, adult_data))))
        print(len(adult_data))
        if len(adult_data) > 5000:
            break


    df_adult = pd.DataFrame(adult_data, columns=['age', 'workclass', 'fnlwgt', 'education', 'education_number',
                                                 'marriage', 'occupation', 'relationship', 'race', 'sex', 'capital_gain',
                                                 'capital_loss', 'hours_per_work', 'native_country']).astype(int)
    df_adult.drop_duplicates(subset=None, keep='first', inplace=True)
    df_adult.to_csv('./synthetic_data/adult_no_svm.csv', index=None)
