# from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
from tensorflow.keras.optimizers import Adam
from deepctr.models import DeepFM
import pandas as pd
from sklearn.metrics import roc_auc_score, log_loss
BOARD_DIR = './model_ckpt'


class DeepFmModel:
    def __init__(self, linear_feature_columns, dnn_feature_columns, feature_names, cat_feats):
        self.feature_names = feature_names
        self.cat_feats = cat_feats
        self.model = None
        self.opt = None
        self.model_name = "DeepFm"
        self.checkpoint_path = BOARD_DIR + "checkpoint_weights.hdf5"
        self.model_init(linear_feature_columns, dnn_feature_columns)

    def model_init(self, linear_feature_columns, dnn_feature_columns):
        self.opt = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=True)
        # self.opt = Adagrad(learning_rate=0.001, initial_accumulator_value=0.1, epsilon=1e-7,)
        self.model = DeepFM(linear_feature_columns, dnn_feature_columns, dnn_hidden_units=(512, 128, 64),
                            l2_reg_linear=0.00003, l2_reg_embedding=0.00003, l2_reg_dnn=0.00002, seed=1024,
                            dnn_dropout=0.5,
                            dnn_activation='relu', dnn_use_bn=False, task='binary')

    def model_fit(self, train_x_model_input, train_y, initial_epoch=0):
        self.model.compile(optimizer=self.opt, loss="binary_crossentropy", metrics=["AUC"], )
        # if os.path.isfile(self.checkpoint_path):
        #     self.model.load_weights(self.checkpoint_path)
        # tb_callback = TensorBoard(
        #     log_dir=BOARD_DIR,
        #     histogram_freq=10,
        #     write_graph=True,
        #     write_images=False,
        #     update_freq="epoch")
        # ck_callback = ModelCheckpoint(
        #     filepath=self.checkpoint_path,
        #     monitor="val_loss",
        #     save_best_only=True,
        #     save_weights_only=True,
        #     verbose=1)
        # es_callback = EarlyStopping(
        #     monitor="val_loss",
        #     min_delta=1e-8,
        #     patience=5,
        #     restore_best_weights=True,
        #     verbose=1)
        # rr_callback = ReduceLROnPlateau(
        #     monitor="val_loss",
        #     min_delta=1e-8,
        #     factor=0.2,
        #     patience=3,
        #     verbose=1)
        # call_backs = [ck_callback, es_callback]
        history = self.model.fit(train_x_model_input, train_y.values, initial_epoch=initial_epoch,
                                 batch_size=256, epochs=initial_epoch + 100, verbose=2, callbacks=[],
                                 validation_split=0.1
                                 # validation_data=(test_x_model_input, test_y)
                                 )
        print("early_stop epoch = " + str(history.epoch[-1]))

        # 标签分布
        # print("\n【测试集标签 分布】")
        # print(pd.DataFrame(test_y).describe())
        return history.epoch[-1]

    def predict(self, eval_x_model_input, eval_y, save_path=None):
        pred_ans = self.model.predict(eval_x_model_input, batch_size=256)
        if save_path:
            pd.DataFrame(pred_ans, columns=['deep_fm_pred']).to_csv(save_path, index=False)
        print("\n【预测结果 分布】")
        print(pd.DataFrame(pred_ans).describe())

        # 评估指标
        print("{0} LogLoss = {1}".format(self.model_name, str(round(log_loss(eval_y, pred_ans), 4))))
        eval_auc = round(roc_auc_score(eval_y, pred_ans), 4)
        print("{0} AUC = {1}".format(self.model_name, str(eval_auc)))
        return eval_auc