import numpy as np
import pandas as pd
from keras import callbacks
from keras.models import load_model
import keras
from keras import backend as K
import keras.optimizers as opt
from keras import Input,layers
from keras.models import Model
from keras import regularizers
import process_data_func as process_data
import bayes_opt
import tensorflow as tf

callback_list = [
        callbacks.EarlyStopping(monitor="val_loss", patience=5),
        callbacks.ModelCheckpoint(filepath="model_1.h5", monitor="val_loss", save_best_only=True),
        callbacks.ReduceLROnPlateau(monitor="val_loss", factor=0.7, verbose=1, patience=1)
    ]

def getData():
    data = np.array(pd.read_csv("data_1.csv", names=['prov_id', 'area_id', 'chnl_type', 'service_type', 'product_type',
                                                     'innet_months', 'total_times', 'total_flux', 'total_fee',
                                                     'pay_fee', 'sex', 'age', 'manu_name', 'term_type', 'max_rat_flag',
                                                     'is_5g_base_cover', 'is_work_5g_cover', 'is_home_5g_cover',
                                                     'is_work_5g_cover_l01', 'is_home_5g_cover_l01',
                                                     'is_work_5g_cover_l02', 'is_home_5g_cover_l02', 'activity_type',
                                                     'is_act_expire', 'comp_type', 'call_days', 're_call10',
                                                     'short_call10', 'long_call10', 'bank_cnt', 'game_app_flux',
                                                     'live_app_flux', 'video_app_flux', 'city_5g_ratio', 'city_level',
                                                     'app_sum']))
    label = np.array(pd.read_csv("label_1.csv"))
    data = data[1:, :]

    data_test = np.array(pd.read_csv("data_test_1.csv",
                                     names=['prov_id', 'area_id', 'chnl_type', 'service_type', 'product_type',
                                            'innet_months', 'total_times', 'total_flux', 'total_fee', 'pay_fee', 'sex',
                                            'age', 'manu_name', 'term_type', 'max_rat_flag', 'is_5g_base_cover',
                                            'is_work_5g_cover', 'is_home_5g_cover', 'is_work_5g_cover_l01',
                                            'is_home_5g_cover_l01', 'is_work_5g_cover_l02', 'is_home_5g_cover_l02',
                                            'activity_type', 'is_act_expire', 'comp_type', 'call_days', 're_call10',
                                            'short_call10', 'long_call10', 'bank_cnt', 'game_app_flux', 'live_app_flux',
                                            'video_app_flux', 'city_5g_ratio', 'city_level', 'app_sum']))
    label_test = np.array(pd.read_csv("label_test_1.csv"))
    data_test = data_test[1:, :]

    return data,label,data_test,label_test

def f1(y_true, y_pred):
    def recall(y_true, y_pred):
        true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
        possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
        recall = true_positives / (possible_positives + K.epsilon())
        return recall
    def precision(y_true, y_pred):
        true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
        predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
        precision = true_positives / (predicted_positives + K.epsilon())
        return precision
    precision = precision(y_true, y_pred)
    recall = recall(y_true, y_pred)
    return 2 * ((precision * recall) / (precision + recall + K.epsilon()))

def IoU(y_true, y_pred):
    # calculate equality of the predictions and truths to the label
    # calculate the |intersection| (AND) of the labels
    intersection = K.sum(y_true * y_pred)
    # calculate the |union| (OR) of the labels
    union = K.sum(y_true) + K.sum(y_pred) - intersection
    # avoid divide by zero - if the union is zero, return 1
    # otherwise, return the intersection over union
    return K.switch(K.equal(union, 0), 1.0, intersection / union)


def tversky_loss(y_true, y_pred):
    # 这俩参数能调，aplha是多惩罚假阳性，beta是多惩罚假阴性
    alpha = 0.5
    beta = 1-alpha

    ones = K.ones(K.shape(y_true))
    p0 = y_pred  # proba that voxels are class i
    p1 = ones - y_pred  # proba that voxels are not class i
    g0 = y_true
    g1 = ones - y_true

    num = K.sum(p0 * g0)
    den = num + alpha * K.sum(p0 * g1) + beta * K.sum(p1 * g0)

    T = K.sum(num / den)  # when summing over classes, T has dynamic range [0 Ncl]
    Ncl = K.cast(K.shape(y_true)[-1], 'float32')
    return Ncl - T

def focal_loss(y_true, y_pred):
    # gamma调整损失衰减，准确率越高的样本衰减越多，越低的衰减的越少，这样整个loss就是由准确率较低的样本主导了
    # alpha是正负样本权重，当class为1的时候，乘以权重alpha，当class为0的时候，乘以权重1-alpha
    alpha=0.25 # 之所以这里给正例样本调低是因为，正例少，准确度低，所以本身就会被gamma加强。这里给正例降权是为了平衡gamma加权的影响
    gamma=2

    pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred))
    pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred))
    return -K.sum(alpha * K.pow(1. - pt_1, gamma) * K.log(pt_1))-K.sum((1-alpha) * K.pow( pt_0, gamma) * K.log(1. - pt_0))

def getNet():
    data_input = Input(shape=(36,))
    x = layers.Dense(64, activation="relu")(data_input)
    x = layers.normalization.BatchNormalization()(x)
    x=layers.Dropout(0.1)(x)
    x= layers.Dense(128, activation="relu")(x)
    x = layers.normalization.BatchNormalization()(x)
    x=layers.Dropout(0.1)(x)
    x= layers.Dense(128, activation="relu")(x)
    x = layers.normalization.BatchNormalization()(x)
    x=layers.Dropout(0.1)(x)
    x = layers.Dense(32, activation="relu")(x)
    x=layers.Dropout(0.1)(x)
    x = layers.Dense(1,activation="sigmoid")(x)

    model_1 = Model(inputs=data_input, outputs=x)
    model_1.compile(optimizer=opt.adam(), loss=tversky_loss,metrics=[f1,"acc"])

    return model_1


def doFit(mulNum):
    model_1=getNet()
    process_data.genData(process_data.posNum*mulNum)
    data,label,data_test,label_test=getData()
    model_1.fit(data,label, epochs=40000, batch_size=2048, callbacks=callback_list, validation_data=(data_test,label_test))
    y_pred=model_1.predict(data_test)
    return f1(label_test,y_pred)


if __name__ == '__main__':
    '''
    rf_bo = bayes_opt.BayesianOptimization(
        doFit,
        {'mulNum': (1, 50)}
    )
    rf_bo.maximize()
    '''
    doFit(9.17)