import numpy as np
import pandas as pd
from tensorflow.keras.layers import *
import tensorflow.keras.backend as K
import matplotlib.pyplot as plt
# %matplotlib inline
from EGES_model import EGES_Model

data = pd.read_csv('./data_cache/deepFM_data.csv', sep='\t')

api_embedding = pd.DataFrame(columns=['target', 'emb'])
with open('./embedding/EGES.embed', 'r') as r:
    lines = r.readlines()
    for index , line in enumerate(lines):
        emb = [ float(i) for i in line.split()]
        api_embedding = api_embedding.append(pd.DataFrame([[index, emb]], columns=['target', 'emb']))

print(api_embedding)
data =  pd.merge(data, api_embedding, on='target', how='left')

print(data)
cols = data.columns.values
print(cols)

dense_feats = []
sparse_feats = ['source', 'target', 'c_x', 'oac_x', 'ac_x', 'c_y', 'oac_y', 'ac_y']

#
# def process_dense_feats(data, feats):
#     d = data.copy()
#     d = d[feats].fillna(0.0)
#     for f in feats:
#         d[f] = d[f].apply(lambda x: np.log(x + 1) if x > -1 else -1)
#
#     return d
#
# data_dense = process_dense_feats(data, dense_feats)
#
# from sklearn.preprocessing import LabelEncoder
#
#
# def process_sparse_feats(data, feats):
#     d = data.copy()
#     d = d[feats].fillna("-1")
#     for f in feats:
#         label_encoder = LabelEncoder()
#         d[f] = label_encoder.fit_transform(d[f])
#
#     return d
#
#
# data_sparse = process_sparse_feats(data, sparse_feats)
# total_data = pd.concat([data_dense, data_sparse], axis=1)
total_data= data

import tensorflow as tf
dense_inputs = []
for f in dense_feats:
    _input = Input([1], name=f)
    dense_inputs.append(_input)

# concat_dense_inputs = Concatenate(axis=1)(dense_inputs)  # ?, 13
# fst_order_dense_layer = Dense(1)(concat_dense_inputs)  # ?, 1


#载入预训练模型
print('----------------------------------------------------------------------------------------------------------------------------------------')
EGES = EGES_Model(1027, 4, [1027, 108, 3, 3], n_sampled=10,
                  embedding_dim=128,
                  lr=0.001)
pre_embedding = []
saver = tf.train.Saver()
with tf.Session() as sess:
    saver.restore(sess, "checkpoints/EGES")
    # print("weights:", sess.run(W))
    # print("biases:", sess.run(b))
    print(sess)
    for i in range(4):
        pre_embedding.append(sess.run(EGES.embedding[i]))
print(pre_embedding)

print(EGES.embedding[0])

print('--------------------------------------------------------------------------------------------------------------------------------')

sparse_inputs = []
for f in sparse_feats:
    _input = Input([1], name=f)
    sparse_inputs.append(_input)
    print(f)
print(sparse_inputs)
sparse_1d_embed = []
for i, _input in enumerate(sparse_inputs):
    f = sparse_feats[i]

    voc_size = data[f].max()#data[f].nunique()
    _embed = Flatten()(Embedding(voc_size+1, 1, embeddings_regularizer=tf.keras.regularizers.l2(0.5))(_input))
    sparse_1d_embed.append(_embed)
print(sparse_1d_embed)
fst_order_sparse_layer = Add()(sparse_1d_embed)

linear_part =  fst_order_sparse_layer#Add()([fst_order_dense_layer, fst_order_sparse_layer])
# embedding size
k = 128
# 只考虑sparse的二阶交叉
sparse_kd_embed = []
pre_flag = True
for i, _input in enumerate(sparse_inputs):
    f = sparse_feats[i]
    if pre_flag:
        if f == 'target':
            _embed = Embedding(EGES.feature_lens[0], k, weights=[pre_embedding[0]])(_input)
        elif f == 'c_x':
            _embed = Embedding(EGES.feature_lens[1], k, weights=[pre_embedding[1]])(_input)
        elif f == 'oac_x':
            _embed = Embedding(EGES.feature_lens[2], k, weights=[pre_embedding[2]])(_input)
        elif f == 'ac_x':
            _embed = Embedding(EGES.feature_lens[3], k, weights=[pre_embedding[3]])(_input)
        else:
            voc_size = data[f].max()#data[f].nunique()
            _embed = Embedding(voc_size+1, k, embeddings_regularizer=tf.keras.regularizers.l2(0.7))(_input)
    else:
        voc_size = data[f].max()  # data[f].nunique()
        _embed = Embedding(voc_size + 1, k, embeddings_regularizer=tf.keras.regularizers.l2(0.7))(_input)
    sparse_kd_embed.append(_embed)
print(sparse_kd_embed)

# 1.将所有sparse的embedding拼接起来，得到 (n, k)的矩阵，其中n为特征数，k为embedding大小
concat_sparse_kd_embed = Concatenate(axis=1)(sparse_kd_embed)  # ?, n, k
# 2.先求和再平方
sum_kd_embed = Lambda(lambda x: K.sum(x, axis=1))(concat_sparse_kd_embed)  # ?, k
square_sum_kd_embed = Multiply()([sum_kd_embed, sum_kd_embed])  # ?, k
# 3.先平方再求和
square_kd_embed = Multiply()([concat_sparse_kd_embed, concat_sparse_kd_embed]) # ?, n, k
sum_square_kd_embed = Lambda(lambda x: K.sum(x, axis=1))(square_kd_embed)  # ?, k
# 4.相减除以2
sub = Subtract()([square_sum_kd_embed, sum_square_kd_embed])  # ?, k
sub = Lambda(lambda x: x*0.5)(sub)  # ?, k
snd_order_sparse_layer = Lambda(lambda x: K.sum(x, axis=1, keepdims=True))(sub)  # ?, 1


flatten_sparse_embed = Flatten()(concat_sparse_kd_embed)  # ?, n*k
fc_layer = Dropout(0.5)(Dense(256, activation='relu')(flatten_sparse_embed))  # ?, 256
fc_layer = Dropout(0.3)(Dense(256, activation='relu')(fc_layer))  # ?, 256
fc_layer = Dropout(0.1)(Dense(256, activation='relu')(fc_layer))  # ?, 256
fc_layer_output = Dense(1)(fc_layer)  # ?, 1
#输出结果
print(linear_part)
print(snd_order_sparse_layer)
print(fc_layer_output)
output_layer = Add()([linear_part, snd_order_sparse_layer, fc_layer_output])#
output_layer = Activation("sigmoid")(output_layer)
#编译模型
from tensorflow.keras.models import Model
from tensorflow.keras.utils import plot_model
model = Model(sparse_inputs, output_layer)#Model(dense_inputs+sparse_inputs, output_layer)
# plot_model(model, "./data_cache/deepfm.png")

model.summary()


import tensorflow as tf
model.compile(optimizer="adam",
              loss="binary_crossentropy",
              metrics=["binary_crossentropy", tf.keras.metrics.AUC(name='auc')])
#训练
from tensorflow.keras.callbacks import TensorBoard
tbCallBack = TensorBoard(log_dir='./logs',  # log 目录
                 histogram_freq=0,
                 write_graph=True,
                 write_grads=True,
                 write_images=True,
                 embeddings_freq=0,
                 embeddings_layer_names=None,
                 embeddings_metadata=None)

train_data = total_data.sample(frac=0.8)
print(train_data)
valid_data = total_data[~total_data.index.isin(train_data.index)]
train_dense_x = [train_data[f].values for f in dense_feats]
train_sparse_x = [train_data[f].values for f in sparse_feats]
train_label = [train_data['label'].values]
val_dense_x = [valid_data[f].values for f in dense_feats]
val_sparse_x = [valid_data[f].values for f in sparse_feats]
val_label = [valid_data['label'].values]
model.fit(train_sparse_x,#train_dense_x+
          train_label, epochs=20, batch_size=256,
          validation_data=(val_sparse_x, val_label),#val_dense_x+
          callbacks=[tbCallBack]
         )